diff --git "a/4066.jsonl" "b/4066.jsonl" new file mode 100644--- /dev/null +++ "b/4066.jsonl" @@ -0,0 +1,729 @@ +{"seq_id":"402566792","text":"\"\"\"\nPlots MaxF1 score.\n\n-------------------------------------------------\n\nThe MIT License (MIT)\n\nCopyright (c) 2017 Marvin Teichmann\n\nMore details: https://github.com/MarvinTeichmann/KittiSeg/blob/master/LICENSE\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\nimport numpy as np\nimport sys\n\nimport matplotlib.pyplot as plt\n\nrunfolder = '/media/srg/Work/git/avm_dataset/dataset/AVM6414_augmented' #'D:/git/KittiSeg/RUNS' \nanafile = 'output.log'\n\noutput_folder = os.path.join(runfolder, 'analyse')\nif os.path.isdir(output_folder) == False:\n os.mkdir(output_folder)\n\nfilename = os.path.join(runfolder, anafile)\nbegin_iter = 55000\neval_iters = 5000\nmax_iters = 95000\n\n\ndef read_values(class_num, prop):\n regex_string = \"\\[class\\s%d\\]\\s%s\\:\\s+(\\d+\\.\\d+)\" % (class_num, prop) \n regex = re.compile(regex_string)\n\n value_list = [regex.search(line).group(1) for line in open(filename)\n if regex.search(line) is not None]\n\n float_list = [float(value) for value in value_list]\n\n return float_list\n\ndef plot_training_result(prop, begin_iter, unit_iter, max_iter, n):\n label_list = range(begin_iter, max_iter+1, unit_iter)\n \n plt.figure(figsize=(8, 5))\n plt.rcParams.update({'font.size': 14})\n \n class_num = range(0,n)\n for c in class_num:\n values = read_values(c, prop)\n max_value = max(values)\n max_iter = values.index(max(values))* unit_iter + begin_iter\n plt.plot(label_list, values, label='class {}'.format(c), marker=\".\", linestyle='-')\n plt.text(65000, 35-c*5, '[calss {}] iter: {}, val: {}'.format(c, max_iter, max_value), size='x-small') \n plt.xlabel('Iteration')\n plt.ylim([0,100])\n plt.ylabel('{} [%]'.format(prop))\n plt.legend(loc=0)\n \n plt.savefig(output_folder + \"/{}\".format(prop) + \".pdf\")\n plt.show()\n\n# MaxF1\nclass_num = 4\nprop = 'MaxF1'\nplot_training_result(prop, begin_iter, eval_iters, max_iters, class_num)\n\n# Average Precision\nprop = 'Average Precision'\nplot_training_result(prop, begin_iter, eval_iters, max_iters, class_num)\n","sub_path":"utils/plot_eval_results.py","file_name":"plot_eval_results.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"621745301","text":"import time, random\nfrom threading import *\n\n\ndef run():\n # 定义一个私有变量\n local_var = local()\n local_var.numbers = [1] # 给定初始值为1\n # 给定休眠时间,模拟不同线程的执行\n time.sleep(random.random())\n for i in range(8):\n local_var.numbers.append(random.choice(range(10)))\n # 打印当前线程的私有变量值\n print(current_thread(), local_var.numbers)\n\n\nif __name__ == '__main__':\n thread_list = []\n\n for i in range(5):\n t1 = Thread(target=run)\n t1.start()\n thread_list.append(t1)\n\n for j in thread_list:\n j.join()\n","sub_path":"Python多线程和多进程/线程/同步/线程独立的私有变量.py","file_name":"线程独立的私有变量.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"195129254","text":"\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 23 11:48:01 2018\r\n\r\n@author: cindy\r\n\"\"\"\r\nFOV='GOS FOV [arcmin]'\r\nR='Radius [solar radii]'\r\nT='T [degrees]'\r\nROTATION='field rotation [degrees]'\r\nOCCULTER='Occulter'\r\nMOD_STATE='Modulator State'\r\nON='On'\r\nOFF='Off'\r\nOUT='Out'\r\nMOD_TYPE='Modulator Type'\r\nMOD_POS='Modulator position'\r\nSKY_BRIGHT='sky brightness [millionths of disk]'\r\nND_FILTER='Neutral Density Filter'\r\nSP_SLIT='SP slit length [arcsec]'\r\nSP_FILTER='SP filter'\r\nSP_CENTER_WL='SP center wavelength [nm]'\r\nSP_EXP_TIME='SP exposure time [ms]'\r\nSP_CO_ADD='SP number of coadds'\r\nSP_NUM_REPEATS='SP number of repeats'\r\nSP_STEP_SIZE='SP step size [arcsec]'\r\nSP_NUM_STEPS='SP number of steps'\r\nSP_SECS='SP duration [s]'\r\nSP_DV='SP data volume [MB]'\r\n\r\nCI_FILTER='CI filter'\r\nCI_CENTER_WL='CI center wavelength [nm]'\r\nCI_EXP_TIME='CI exposure time [ms]'\r\nCI_CO_ADD='CI number of coadds'\r\nCI_NUM_POSITIONS='CI number of positions'\r\nCI_NUM_REPEATS='CI number of repeats'\r\nCI_SECS='CI duration [s]'\r\nCI_DV='CI data volume [MB]'\r\nCI_POS_NUMS='CI positions values'\r\nSP_CFG_NAME='SP config name'\r\nCI_CFG_NAME='CI config name'\r\n\r\nCORONAL_FLUX=\"Coronal Flux spectrum\"\r\nDISK_FLUX=\"Disk Flux spectrum\"\r\nDISK_SPECTRUM_1=\"Disk low resolution SP filter & spectrum\"\r\nDISK_SPECTRUM_2=\"Disk high resolution SP filter & spectrum\"\r\nDISK_SPECTRUM_3=\"Disk high resolution CI filter & spectrum\"\r\nCORONAL_SPECTRUM_1=\"Corona low resolution SP filter & spectrum\"\r\nCORONAL_SPECTRUM_2=\"Corona high resolution SP filter & spectrum\"\r\nCORONAL_SPECTRUM_3=\"Corona high resolution CI filter & spectrum\"\r\nLINE_COUNTS=\"SP coronal line counts in filter passband\"\r\nV_SENSITIVITY=\"SP velocity sensitivity for lines in filter passband\"\r\nB_SENSITIVITY=\"SP magnetic sensitivity lines in filter passband\"\r\n\r\nDISCRETE=\"Discrete\"\r\nCONTINIOUS=\"Continuous\"\r\nDEFAULT_CONFIG=\"Default Configuration\"\r\n\r\nSP=\"SP\"\r\nCI=\"CI\"\r\nSP_CI=\"SP&CI\"\r\nDUO=\"duo\"\r\nDELIM=' - '\r\nSP_METRICS=[\r\nR,\r\nSKY_BRIGHT,\r\nND_FILTER,\r\nSP_CENTER_WL,\r\nSP_EXP_TIME,\r\nSP_CO_ADD,\r\nSP_SECS,\r\nSP_DV\r\n]\r\n\r\nCI_METRICS=[\r\nR,\r\nSKY_BRIGHT,\r\nND_FILTER,\r\nCI_EXP_TIME,\r\nCI_CO_ADD,\r\nCI_SECS,\r\nCI_DV]\r\n\r\n\r\nSUNMAP_PARAMS=['tSB','rSB','slitRotationSB','fovCB',\r\n 'occulterCB','stepSizeSB_SP','numStepsSB_SP','numPosCB_CI']\r\n\r\n\r\nPARAMS_CHANGED_SP=\"Saturation Check changed SP exposure time and coadds\"\r\nPARAMS_CHANGED_CI=\"Saturation Check changed CI exposure time and coadds\"\r\n\r\nCI_SAT_LEVEL=\"CI Saturation Level\"\r\nSP_SAT_LEVEL=\"SP Saturation Level\"\r\n\r\nAUTO_SAT_CHECK_SP_CANCELLED=\"SP camera has reached minimum exposure time. Please insert ND filter. Automatic Saturation Check has been disabled.\"\r\nAUTO_SAT_CHECK_CI_CANCELLED=\"CI camera has reached minimum exposure time. Please insert ND filter. Automatic Saturation Check has been disabled.\"\r\n\r\nAUTO_SAT_CHECK_CI=\"Automatic Saturation Check CI\"\r\nAUTO_SAT_CHECK_SP=\"Automatic Saturation Check SP\"\r\n\r\nINPUT_MODEL=\"Input Models\"\r\n\r\nDEL_ZANNA_QS=\"QS\"\r\nDEL_ZANNA_QR=\"QR\"\r\nDEL_ZANNA_AR=\"AR\"\r\nDEL_ZANNA_QS_LABEL=\"DelZanna QS Model\"\r\nDEL_ZANNA_QR_LABEL=\"DelZanna QR Model\"\r\nDEL_ZANNA_AR_LABEL=\"DelZanna AR Model\"\r\n\r\nUSE_BOTH_MESSAGE=\"Using Spectrograph and Context Imager simultanously restricts the instrument configurations. Please check configurations before proceeding.\"\r\nPROG_WL_ERROR=\"Instrument program only supports a single center wavelength for the spectrograph.\"","sub_path":"names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"433677585","text":"def intersection(arrays):\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n # Your code here\n\n dct = {}\n result = []\n\n # Looping thru the lists\n\n for inner_array in arrays:\n\n for item in inner_array:\n\n if item not in dct:\n\n dct[item] = 0\n \n # Counting each appearance\n\n dct[item] += 1\n\n # if the counting match the number of inner arrays, it means\n # this number is in all arrays.\n\n for key,value in dct.items():\n\n if value == len(arrays):\n\n result.append(key)\n\n\n return result\n\n\nif __name__ == \"__main__\":\n arrays = []\n\n arrays.append(list(range(1000000, 2000000)) + [1, 2, 3])\n arrays.append(list(range(2000000, 3000000)) + [1, 2, 3])\n arrays.append(list(range(3000000, 4000000)) + [1, 2, 3])\n\n print(intersection(arrays))\n","sub_path":"hashtables/ex3/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"349587833","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 31 02:49:19 2019\n\n@author: Stella\n\"\"\"\n\n\nimport pandas as pd\nimport numpy as np\nimport scipy.optimize\nimport math\nimport os\nfrom fun import *\n\nnamelist = ['China','Common', 'ETF', 'OTC', 'Tech', 'Value']\nnamedict = dict({'China':\"中概股基金\",'Common':\"一般股票型基金\", \"ETF\": \"指數型基金\", \"OTC\":\"店頭市場基金\", \"Tech\":\"科技型基金\", \"Value\":\"價值型基金\"})\n\n\ndflist = []\nreturn_list=[]\nq_list=[]\n\nfor i in range(len(namelist)):\n inputFileName = namelist[i]+\".csv\"\n dflist.append(formatting(inputFileName))\n return_list.append(return_calculate(dflist[i],1))\n q_list.append(riskiness(return_list[i],1,namelist[i]))\n \n\ntestname = \"0050 元大台灣50\" \n\nport_qlist = []\nindbest_list = best_fund(q_list)\n\nfor i in range(len(indbest_list)):\n port_qlist.append(portfolio(testname,indbest_list[i],dflist))\n\n \nbest_port(port_qlist) \n","sub_path":"subfunction/port_best.py","file_name":"port_best.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"220578568","text":"import glob\nimport pickle\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\nimport numpy as np\n\n\ndef extract_vocabulary(folder):\n files = glob.glob(folder+\"*.txt\")\n vocabulary = {}\n for filename in files:\n with open(filename) as f:\n text = f.readlines()\n for line in text:\n words = word_tokenize(line)\n for word in words:\n word = word.lower()\n if word in vocabulary.keys():\n vocabulary[word] += 1\n else:\n vocabulary[word] = 1\n vocabulary.pop(\"\", None)\n return vocabulary\n\n\ndef merge_vocabulary(old_vocab, new_vocab):\n for word in new_vocab.keys():\n if word in list(old_vocab.keys()):\n old_vocab[word] += new_vocab[word]\n else:\n old_vocab[word] = new_vocab[word]\n return old_vocab\n\n\ndef stop_word_removal(vocabulary):\n stop_words = set(stopwords.words('english'))\n for word in stop_words:\n vocabulary.pop(word, None)\n return vocabulary\n\n\ndef lemmatize(vocabulary):\n lemmatizer = WordNetLemmatizer()\n for word in list(vocabulary.keys()):\n lemmatized_word = lemmatizer.lemmatize(word)\n if lemmatized_word != word:\n try:\n vocabulary[lemmatized_word] += vocabulary[word]\n except KeyError:\n pass\n vocabulary.pop(word)\n return vocabulary\n\n\ndef threshold(vocabulary, lower_percentile, upper_percentile):\n lower_bound = np.percentile(np.fromiter(vocabulary.values(), dtype=float), lower_percentile)\n upper_bound = np.percentile(np.fromiter(vocabulary.values(), dtype=float), 100-upper_percentile)\n print(lower_bound, upper_bound)\n vocabulary = {key:value for key, value in vocabulary.items() if value > lower_bound and value < upper_bound}\n return vocabulary\n\n\nif __name__ == '__main__':\n vocabulary = {}\n for i in range(1, 11):\n vocabulary = merge_vocabulary(vocabulary, extract_vocabulary(\"lingspam/part\"+str(i)+\"/\"))\n vocabulary = stop_word_removal(vocabulary)\n vocabulary = lemmatize(vocabulary)\n vocabulary = threshold(vocabulary, 92, 0.15)\n with open(\"vocabulary.pickle\", \"wb\") as f:\n pickle.dump(vocabulary, f)\n","sub_path":"vocabulary.py","file_name":"vocabulary.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"15701855","text":"import unittest\nfrom obisqc import taxonomy\nimport logging\n\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)s %(name)-12s %(levelname)-8s %(message)s\", datefmt=\"%H:%M:%S\")\nlogging.getLogger(\"urllib3\").setLevel(logging.INFO)\nlogging.getLogger(\"obisqc.util.aphia\").setLevel(logging.INFO)\n\n\nclass DummyCache:\n def store(self, aphiaid, aphia_info):\n pass\n def fetch(self, aphiaid):\n if str(aphiaid) == \"141433\":\n return {\n \"record\": {\n \"AphiaID\": 141433,\n \"scientificname\": \"Abra alba\",\n \"status\": \"accepted\",\n \"valid_AphiaID\": 141433,\n \"isMarine\": False, # modified for testing purposes\n \"isBrackish\": False, # modified for testing purposes\n \"isFreshwater\": None,\n \"isTerrestrial\": None\n },\n \"classification\": {}\n }\n\n\nclass TestTaxonomyCache(unittest.TestCase):\n\n def setUp(self):\n self.cache = DummyCache()\n\n def test_cache(self):\n records = [\n { \"id\": 0, \"scientificNameID\": \"urn:lsid:marinespecies.org:taxname:141433\" }\n ]\n\n results_nocache = taxonomy.check(records)\n self.assertTrue(results_nocache[0][\"annotations\"][\"aphia\"] == 141433)\n self.assertFalse(results_nocache[0][\"dropped\"])\n self.assertNotIn(\"not_marine\", results_nocache[0][\"flags\"])\n\n results_cache = taxonomy.check(records, self.cache)\n self.assertTrue(results_cache[0][\"annotations\"][\"aphia\"] == 141433)\n self.assertTrue(results_cache[0][\"dropped\"])\n self.assertIn(\"not_marine\", results_cache[0][\"flags\"])\n\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"test/test_taxonomy_cache.py","file_name":"test_taxonomy_cache.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"287780978","text":"import numpy as np\nfrom keras.datasets import imdb\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\nfrom keras.preprocessing.text import Tokenizer\n\ndocs = ['Well done!',\n\t\t'Good work',\n\t\t'Great effort',\n\t\t'nice work',\n\t\t'Excellent!',\n\t\t'Weak',\n\t\t'Poor effort!',\n\t\t'not good',\n\t\t'poor work',\n\t\t'Could have done better.']\n# define class labels\nlabels = np.array([1,1,1,1,1,0,0,0,0,0])\n# prepare tokenizer\nt = Tokenizer()\nt.fit_on_texts(docs)\nvocab_size = len(t.word_index) + 1\nencode_dict = t.word_index\n# integer encode the documents\nencoded_docs = t.texts_to_sequences(docs)\nprint(encoded_docs)\n# pad documents to a max length of 4 words\nmax_length = 5\npadded_docs = sequence.pad_sequences(encoded_docs, maxlen=max_length, padding='post')\nprint(padded_docs)\n\nimport pandas as pd\ndata_train = pd.read_csv('reddit_train.csv', usecols = ['BODY','REMOVED'])\ndata_test = pd.read_csv('reddit_test.csv', usecols = ['BODY','REMOVED'])\n","sub_path":"demo_mark0.py","file_name":"demo_mark0.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"49239255","text":"import cv2\nimport numpy as np\nfrom tools.segmentation import Segmentation\n\nclass IsolatePlayerFilter(object):\n\n def filter(self, rgb, depth, globalargs, args = {}):\n\n if not 'player_seed_closest' in args:\n return rgb, depth\n\n x, y = args['player_seed_closest']\n\n segmenter = Segmentation(depth)\n\n segment = segmenter.segment((x, y))\n\n rgb = np.zeros(rgb.shape, dtype=rgb.dtype)\n\n cond = (segmenter.markers == 255)\n np.copyto(rgb[:,:,0], segment, 'same_kind', cond)\n np.copyto(rgb[:,:,1], segment, 'same_kind', cond)\n np.copyto(rgb[:,:,2], segment, 'same_kind', cond)\n\n overlay = globalargs['overlay']\n\n color = 255, 100, 100\n overlay.circle((y, x), 10, color, 2)\n\n x_raw, y_raw = args['player_seed_smooth']\n color = 100, 255, 100\n overlay.circle((y_raw, x_raw), 60, color, 2)\n\n return rgb, depth","sub_path":"filters/IsolatePlayerFilter.py","file_name":"IsolatePlayerFilter.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"515382315","text":"# Load Data\nimport numpy as np\nimport scipy\nimport scipy.io\nfrom scipy import signal\nimport matplotlib.pyplot as plt\n\nrawDict = {}\nrawDict['data'] = scipy.io.loadmat('videoData.mat')\ndata = rawDict['data']['videodata']\n\n# Now data is the numpy array of shape 401 X 401 X 3 X 300\n# Compute the average red channel of each frame and plot the result\n\navg_red = np.zeros(np.shape(data)[3])\nfor i in range(0, np.shape(data)[3]):\n avg_red[i] = np.mean(data[:, :, 0, i])\nplt.figure(1)\nplt.plot(avg_red)\nplt.title(\"Average value of Red Channel with respect to time\")\n\ntaps = 91\nnyqf = 30\nb = signal.firwin(taps, [50/60, 140/60], pass_zero=False, nyq=nyqf)\nw, h = signal.freqz(b)\nplt.figure(2)\nplt.plot(nyqf*w / np.pi, 20 * np.log10(abs(h)), 'b')\nplt.ylabel('Amplitude [dB]', color='b')\nplt.grid()\nplt.axis('tight')\nidxS = 80\nidxF = 280\nfiltered = signal.convolve(b, avg_red)\n\n\n\npeaks = []\nfor i in range(idxS, idxF):\n if filtered[i] > filtered[i-1] and filtered[i] > filtered[i+1]:\n peaks.append((i, filtered[i]))\npeaks = np.array(peaks)\nplt.figure(3)\nplt.subplot(2, 1, 1)\nplt.plot(filtered)\nplt.title('Filtered Signal')\nplt.subplot(2, 1, 2)\nplt.plot(filtered[idxS:idxF])\nplt.scatter(peaks[:,0]-idxS, peaks[:,1], color = 'red')\nplt.title('Relevant segment of Filtered Signal and peaks')\nplt.show()\n\nprint(\"BPM = \",len(peaks)*6)","sub_path":"lab7/lab7_jonathan_leal.py","file_name":"lab7_jonathan_leal.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"256992508","text":"#元祖 只读列表,可循环查询,可切片。\r\n#儿子不能改,孙子可能可以改。\r\n# tu = (1,2,3,'alex',[2,3,4,'taibai'],'egon')\r\n# # print(tu[3])\r\n# # print(tu[0:4])\r\n# # for i in tu:\r\n# # print(i)\r\n# tu[4][3]=tu[4][3].upper()\r\n# print(tu)\r\n# tu[4].append('sb')\r\n# print(tu)\r\n\r\n# s = 'alex'\r\n# s1 = 'sb'.join(s)\r\n# print(s1)\r\n#列表转化成字符串 list -----> str join\r\n# li = ['taibai','alex','wusir','egon','女神',]\r\n# s = '++++'.join(li)\r\n#str ----->list split()\r\n# print(s)\r\n\r\n\r\n#range [1,2,3,4,5,6,.......100........]\r\n\r\n# for i in range(3,10):\r\n# print(i)\r\n# for i in range(10):\r\n# print(i)\r\n# for i in range(0,10,3):\r\n# print(i)\r\n# for i in range(10,0,-2):\r\n# print(i)\r\n# for i in range(10,-1,-2):\r\n# print(i)\r\n\r\nli = [1,2,3,5,'alex',[2,3,4,5,'taibai'],'afds']\r\n# for i in li:\r\n# if type(i) == list:\r\n# for k in i:\r\n# print(k)\r\n# else:print(i)\r\n\r\nfor i in range(len(li)):\r\n if type(li[i]) == list:\r\n for j in li[i]:\r\n print(j)\r\n else:print(li[i])","sub_path":"python/day4课堂笔记/4,元祖.py","file_name":"4,元祖.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"420441120","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nfrom constraint import *\n\nclass EquipStruct:\n serial = 0 # 序号 int\n name = '' # str\n intro = '' # str\n # 属性叠加\n dmgType = -1 # 伤害类型 int\n atkNum = 1 # 攻击次数 int\n atkRange = 0 # 0-近战 1-金色羽毛 2-远程\n race = 0 # 种族 int\n health = 0 # 生命 int\n dmg = 0 # 攻击力 int\n power = [0 for i in range(PowerNum)]\n dodge = 0 # 闪避 int\n # 发动相关\n opportunity = 0 # 触发时机 int\n trig = 0 # 触发限定 list[str]\n possiblity = 0 # 概率 int\n operMethod = 0 # 操作名称 int\n operAttr = 0 # 操作属性 list[str]\n operTarget = 0 # 操作对象 int\n operTrig = 0 # 操作限定 list[str]\n operValue = 0 # 基础数值 list[int]\n operValueAdd=0 # 属性加成 int\n reg = 0 # 寄存器 int\n cnt = 0 # 计数\n\n def __init__(self):\n pass\n\n # Fit excel\n def __init__(self, serial=0, name='', intro='', dmgType=-1, atkNum=1, atkRange=0, race=0,\n health=0, dmg=0, power=[0 for i in range(PowerNum)], dodge=0, opportunity=0,\n trig=0, trigReg=0, trigCnt=0, possiblity=0, operMethod=0, operAttr=0,\n operTarget=0, operTrig=0, operValue=0, operValueAdd=0):\n self.serial = int(serial)\n self.name = str(name)\n self.intro = str(intro)\n if (type(dmgType) == str):\n self.dmgType = DmgTypeDict[dmgType]\n self.atkNum = int(atkNum)\n if (type(atkRange) == str):\n self.atkRange = RangeDict[atkRange]\n if(type(race)==str):\n self.race = RaceDict[race]\n self.health = int(health)\n self.dmg = int(dmg)\n self.power = power\n self.dodge = int(dodge)\n self.opportunity = int(opportunity)\n if type(trig) == str:\n self.trig = re.split(',', trig)\n self.possiblity = possiblity\n if (type(operMethod) == str):\n self.operMethod = OperMethodDict[operMethod]\n if (type(operAttr) == str):\n self.operAttr = re.split(',', operAttr)\n if (type(operTarget) == str):\n self.operTarget = OperTargetDict[operTarget]\n if type(operTrig) == str:\n self.operTrig = re.split(',', operTrig)\n self.operValue = [int(x) for x in re.split(',', str(operValue))]\n if (type(operValueAdd) == str):\n self.operValueAdd = DmgTypeDict[operValueAdd]\n self.reg = int(trigReg)\n self.cnt = int(trigCnt)\n","sub_path":"equip.py","file_name":"equip.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"407970551","text":"\"\"\" Filters out putative operons that are extremely similar to one another. \"\"\"\n\nimport sys\n\nfrom operon_analyzer import analyze\n\noperons = analyze.load_operons(sys.stdin)\nunique = analyze.deduplicate_operons_approximate(operons)\nunique = analyze.dedup_supersets(unique)\n\nfor operon in unique:\n print(operon.as_str())\n","sub_path":"src/nontn7/dedup.py","file_name":"dedup.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"598890730","text":"import time\nimport rethinkdb as r\nfrom ntp.data.init_db import db_name\nfrom ntp.data import main\nfrom pprint import pprint\n\n\ndef test_main():\n \"\"\"\n Tests primary data acquisition API\n \"\"\"\n\n # Zero is the initial condition for main, where there are no existing timestamps to compare against\n out = main.main(0)\n pprint(out)\n assert out\n assert isinstance(out, dict)\n assert all(isinstance(out[elem], list) for elem in out)\n assert all(out[key] for key in out)\n\n must_fail = int(time.time())\n failing = main.main(must_fail)\n assert not failing\n assert isinstance(failing, bool)\n r.db_drop(db_name).run(r.connect())\n","sub_path":"ntp/data/tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"492709547","text":"import aes128\nimport Hex\nfrom binascii import hexlify as hx, unhexlify as uhx\nfrom struct import pack as pk, unpack as upk\nfrom Fs.File import File\nfrom Fs.File import MemoryFile\nfrom hashlib import sha256\nimport Fs.Type\nimport os\nimport re\nimport pathlib\n\nimport Keys\nimport Print\n\nMEDIA_SIZE = 0x200\n\n\n\nclass Header(File):\n\tdef __init__(self, path = None, mode = None, cryptoType = -1, cryptoKey = -1, cryptoCounter = -1, nca = None):\n\t\tself.size = 0\n\t\tself.offset = 0\n\t\tself.nca = nca\n\t\tself.bktr_offset = 0\n\t\tself.bktr_size = 0\n\t\tself.magic = None\n\t\tself.version = None\n\t\tself.enctryCount = 0\n\t\tself.reserved = None\n\t\tself.buffer = None\n\t\tsuper(Header, self).__init__(path, mode, cryptoType, cryptoKey, cryptoCounter)\n\n\tdef open(self, file = None, mode = 'rb', cryptoType = -1, cryptoKey = -1, cryptoCounter = -1):\n\t\tsuper(Header, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)\n\t\tself.rewind()\n\n\t\tself.bktr_offset = self.readInt64()\n\t\tself.bktr_size = self.readInt64()\n\t\tself.magic = self.read(0x4)\n\t\tself.version = self.readInt32()\n\t\tself.enctryCount = self.readInt32()\n\t\tself.reserved = self.readInt32()\n\n\tdef printInfo(self, maxDepth = 3, indent = 0):\n\t\tif not self.bktr_size:\n\t\t\treturn\n\n\t\ttabs = '\\t' * indent\n\t\tPrint.info('\\n%sBKTR' % (tabs))\n\t\tPrint.info('%soffset = %d' % (tabs, self.bktr_offset))\n\t\tPrint.info('%ssize = %d' % (tabs, self.bktr_size))\n\t\tPrint.info('%sentry count = %d' % (tabs, self.enctryCount))\n\n\t\tPrint.info('\\n')\n\nclass BktrRelocationEntry:\n\tdef __init__(self, f):\n\t\tself.virtualOffset = f.readInt64()\n\t\tself.physicalOffset = f.readInt64()\n\t\tself.isPatch = f.readInt32()\n\n\tdef printInfo(self, maxDepth = 3, indent = 0):\n\t\ttabs = '\\t' * indent\n\t\tPrint.info('%sRelocation Entry %s %x = %x' % (tabs, 'Patch' if self.isPatch else 'Base', self.physicalOffset, self.virtualOffset))\n\nclass BktrSubsectionEntry:\n\tdef __init__(self, f):\n\t\tself.virtualOffset = f.readInt64()\n\t\tself.size = 0\n\t\tself.padding = f.readInt32()\n\t\tself.ctr = f.readInt32()\n\n\tdef printInfo(self, maxDepth = 3, indent = 0):\n\t\ttabs = '\\t' * indent\n\t\tPrint.info('%sSubsection Entry %d, CTR = %x' % (tabs, self.virtualOffset, self.ctr))\n\nclass BktrBucket:\n\tdef __init__(self, f):\n\t\tself.padding = f.readInt32()\n\t\tself.entryCount = f.readInt32()\n\t\tself.endOffset = f.readInt64()\n\t\tself.entries = []\n\n\tdef getEntry(self, offset):\n\t\tindex = 0\n\t\tlast = self.entries[index]\n\t\tfor entry in self.entries:\n\t\t\tif entry.virtualOffset > offset:\n\t\t\t\tbreak\n\n\t\t\tlast = self.entries[index]\n\t\t\tindex += 1\n\n\t\treturn last\n\n\n\tdef printInfo(self, maxDepth = 3, indent = 0):\n\t\ttabs = '\\t' * indent\n\t\tPrint.info('\\n%sBKTR Bucket' % tabs)\n\t\tPrint.info('%sentries: %d' % (tabs, self.entryCount))\n\t\tPrint.info('%send offset: %d' % (tabs, self.endOffset))\n\n\t\tfor entry in self.entries:\n\t\t\tentry.printInfo(maxDepth, indent + 1)\n\nclass BktrSubsectionBucket(BktrBucket):\n\tdef __init__(self, f):\n\t\tsuper(BktrSubsectionBucket, self).__init__(f)\n\n\t\tfor i in range(self.entryCount):\n\t\t\tself.entries.append(BktrSubsectionEntry(f))\n\n\nclass BktrRelocationBucket(BktrBucket):\n\tdef __init__(self, f):\n\t\tsuper(BktrRelocationBucket, self).__init__(f)\n\n\t\tif self.entryCount > 0xFFFF:\n\t\t\traise IOError('Too many entries')\n\n\t\tfor i in range(self.entryCount):\n\t\t\tself.entries.append(BktrRelocationEntry(f))\n\n\nclass Bktr(Header):\n\tdef __init__(self, path = None, mode = None, cryptoType = -1, cryptoKey = -1, cryptoCounter = -1, nca = None):\n\t\tself.basePhysicalOffsets = []\n\t\tsuper(Bktr, self).__init__(path, mode, cryptoType, cryptoKey, cryptoCounter, nca)\n\n\n\tdef open(self, file = None, mode = 'rb', cryptoType = -1, cryptoKey = -1, cryptoCounter = -1):\n\t\tsuper(Bktr, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)\n\n\t\tif self.bktr_size:\n\t\t\tself.nca.seek(self.bktr_offset)\n\t\t\tself.nca.readInt32() # padding\n\t\t\tself.bucketCount = self.nca.readInt32()\n\t\t\tself.totalPatchImageSize = self.nca.readInt64()\n\t\t\tself.basePhysicalOffsets = []\n\t\t\tfor i in range(int(0x3FF0 / 8)):\n\t\t\t\tself.basePhysicalOffsets.append(self.nca.readInt64())\n\n\tdef isValid(self):\n\t\treturn True if self.bktr_size > 0 else False\n\n\tdef getBucket(self, offset):\n\t\tif len(self.buckets) == 0:\n\t\t\treturn None\n\n\t\tindex = 0\n\t\tlast = self.buckets[0]\n\n\t\tfor virtualOffset in self.basePhysicalOffsets:\n\t\t\tif index >= len(self.buckets):\n\t\t\t\tbreak\n\n\t\t\tif offset > virtualOffset:\n\t\t\t\tbreak\n\n\t\t\tlast = self.buckets[index]\n\t\t\tindex += 1\n\n\t\treturn last\n\n\tdef printInfo(self, maxDepth = 3, indent = 0):\n\t\tsuper(Bktr, self).printInfo(maxDepth, indent)\n\t\ttabs = '\\t' * indent\n\t\tPrint.info('%sOffsets' % (tabs))\n\n\t\ti = 0\n\t\tfor off in self.basePhysicalOffsets:\n\t\t\ti += 1\n\t\t\tif off == 0 and i != 1:\n\t\t\t\tbreak\n\t\t\tPrint.info('%s %x' % (tabs, off))\n\n\n\nclass Bktr1(Bktr):\n\tdef __init__(self, path = None, mode = None, cryptoType = -1, cryptoKey = -1, cryptoCounter = -1, nca = None):\n\t\tself.buckets = []\n\t\tsuper(Bktr1, self).__init__(path, mode, cryptoType, cryptoKey, cryptoCounter, nca)\n\n\tdef open(self, file = None, mode = 'rb', cryptoType = -1, cryptoKey = -1, cryptoCounter = -1):\n\t\tsuper(Bktr1, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)\n\n\t\tself.buckets = []\n\n\t\tif self.bktr_size:\n\t\t\tfor i in range(self.bucketCount):\n\t\t\t\tself.buckets.append(BktrRelocationBucket(self.nca))\n\n\tdef getRelocationEntry(self, offset):\n\t\tif len(self.buckets) == 0:\n\t\t\treturn None\n\n\t\tbucket = self.buckets[0]\n\n\t\tindex = 0\n\t\tfor virtualOffset in self.basePhysicalOffsets:\n\n\t\t\tif virtualOffset > offset or index >= len(self.buckets):\n\t\t\t\tbreak\n\n\t\t\tbucket = self.buckets[index]\n\t\t\tindex += 1\n\n\t\tresult = bucket.entries[0]\n\t\tfor entry in bucket.entries:\n\t\t\tif offset > entry.virtualOffset:\n\t\t\t\tbreak\n\t\t\tresult = entry\n\n\t\treturn entry\n\n\n\tdef printInfo(self, maxDepth = 3, indent = 0):\n\t\tsuper(Bktr1, self).printInfo(maxDepth, indent)\n\t\ttabs = '\\t' * indent\n\n\t\tfor bucket in self.buckets:\n\t\t\tbucket.printInfo(maxDepth, indent+1)\n\nclass Bktr2(Bktr):\n\tdef __init__(self, path = None, mode = None, cryptoType = -1, cryptoKey = -1, cryptoCounter = -1, nca = None):\n\t\tself.buckets = []\n\t\tsuper(Bktr2, self).__init__(path, mode, cryptoType, cryptoKey, cryptoCounter, nca)\n\n\tdef open(self, file = None, mode = 'rb', cryptoType = -1, cryptoKey = -1, cryptoCounter = -1):\n\t\tsuper(Bktr2, self).open(file, mode, cryptoType, cryptoKey, cryptoCounter)\n\n\t\tself.buckets = []\n\n\t\tif self.bktr_size:\n\t\t\tfor i in range(self.bucketCount):\n\t\t\t\tself.buckets.append(BktrSubsectionBucket(self.nca))\n\n\tdef getEntries(self, offset, size):\n\t\tentries = []\n\n\t\tbucket = self.getBucket(offset)\n\t\tif bucket is not None:\n\t\t\tentries.append(bucket.getEntry(offset))\n\n\t\treturn entries\n\n\tdef getAllEntries(self):\n\t\tentries = []\n\n\t\tfor bucket in self.buckets:\n\t\t\tlast = None\n\t\t\tfor entry in bucket.entries:\n\t\t\t\tif last is not None:\n\t\t\t\t\tlast.size = entry.virtualOffset - last.virtualOffset\n\t\t\t\tlast = entry\n\t\t\t\tentries.append(entry)\n\n\t\t\tif len(entries) != 0:\n\t\t\t\tentries[-1].size = bucket.endOffset - entries[-1].virtualOffset\n\n\t\treturn entries\n\n\tdef printInfo(self, maxDepth = 3, indent = 0):\n\t\tsuper(Bktr2, self).printInfo(maxDepth, indent)\n\n\t\tfor bucket in self.buckets:\n\t\t\tbucket.printInfo(maxDepth, indent+1)\n","sub_path":"py/ztools/Fs/Bktr.py","file_name":"Bktr.py","file_ext":"py","file_size_in_byte":7089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"135274563","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\ndef collatz_next(num):\n if num%2 == 0:\n return num//2\n else:\n return 3*num+1\n\ndef len_of_collatz_chain(num):\n len = 1\n n = num\n while n != 1:\n n = collatz_next(n)\n len += 1\n return len\n\ndef main():\n result = 0\n length_max = 0\n for i in range(1, 1000000):\n length_i = len_of_collatz_chain(i)\n if length_i > length_max:\n length_max = length_i\n result = i\n if (i-1)%10000 == 0:\n print(i)\n print(result)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"001-100/011-020/014.py","file_name":"014.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"383237600","text":"\"\"\"This program is program that outputs the binary (base 2) representation of\nthe decimal number typed as the input. It is based on decomposing the number into\na sum of powers of 2.\n\n@author Amit Kumar\n@version 1.0\n@since 03/01/2019\n\"\"\"\n# importing important modules\nimport utility.Utility\nimport util.Util\n\nglobal deci\n\nprint() # putting one white-space line before printing anything on the console\ntry:\n deci = utility.Utility.get_integer() # calling the function\nexcept Exception as e:\n print(e)\n\nprint(\"Binary representation of \", deci, \": \", util.Util.to_binary(deci)) # printing the output\n","sub_path":"AlgorithmPrograms/Problem15_BinaryConversion.py","file_name":"Problem15_BinaryConversion.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"236757845","text":"from PIL import Image\nfrom pytesseract import *\nfrom deepLearningEngine.bayesianEngine import BayesianFilter\nfrom urllib.request import urlopen\nimport boto3\nfrom server.mongoDBCon import *\n\nbf = BayesianFilter()\n\ns3Client = boto3.client('s3')\n\n\n# OCR 추출 후 training\ndef ocrToTxt(fileId, fileName, lang='eng'):\n img = Image.open(urlopen(fileName))\n\n # 추출\n outText = image_to_string(img, lang=lang, config='--psm 6 -c preserve_interword_spaces=1')\n\n # training\n bf.fit(outText, fileId)\n\n\nif __name__ == \"__main__\":\n\n books = dbCon('books', {})\n bookIds = []\n for book in books:\n bookIds.append(book.get('_id'))\n\n for bookId in bookIds:\n # bookId = 4\n probImgeUrl = []\n probId = []\n problems = dbCon({\"content.picture\": {\"$regex\": \"https\"}, \"bookId\": bookId})\n for problem in problems:\n probId.append(problem.get('_id'))\n probImgeUrl.append(problem.get('content').get('picture'))\n\n try:\n prefix = 'problem/' + str(problem.get('_id')) + '/'\n result = s3Client.list_objects(Bucket='deeplearning-training-data-classification', Prefix=prefix,\n Delimiter='/')\n for obj in result.get(\"Contents\"):\n pathSplit = obj.get('Key').split('/')\n if pathSplit[2] != '':\n urlPath = 'https://s3.ap-northeast-2.amazonaws.com/deeplearning-training-data-classification/' + obj.get(\n 'Key')\n probId.append(pathSplit[1])\n probImgeUrl.append(urlPath)\n # print('sub folder : ', obj.get('Key'))\n except:\n pass\n for i in range(len(probId)):\n print(probId[i], ':', probImgeUrl[i])\n\n # OCR 추출 작업 메인\n for fullName in range(len(probImgeUrl)):\n # 한글+영어 추출(kor, eng , kor+eng)\n ocrToTxt(probId[fullName], probImgeUrl[fullName], 'kor+eng')\n print(probId[fullName], ':', probImgeUrl[fullName])\n\n bf.word_save(bookId)\n print('+++ Text Convert Complete! +++', bookId)\n # 작업 완료 메시지\n","sub_path":"ocrTraining/bookIdTraining.py","file_name":"bookIdTraining.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"273160058","text":"import numpy as np\n\ndef ID(W,D):\n\treturn np.log2(2*D/W)\n\ndef ID_shannon(W,D):\n\treturn np.log2((D+W)/W)\n\ndef inches_to_cm(inches):\n\tto_cm=2.54\n\treturn inches*to_cm\n\ndef pixel_to_cm(num_pixel):\n\t#Each pixel on the screen was 0.02055 cm wide\n\teach_pixel_cm=0.02055\n\treturn each_pixel_cm*num_pixel\n\ndef experiments(exp):\n\tif exp==1:\n\t\texp_str='Fitts1954_Table1'\n\t\tWs=np.array([2,1,0.5,0.25])\n\t\tDs=np.array([2,4,8,16])\n\telif exp==2:\n\t\texp_str='Fitts1954 Table2 Disc Transfer'\n\t\tWs=np.array([0.5,0.25,0.125,0.0625])\n\t\tDs=np.array([4,8,16,32])\n\telif exp==3:\n\t\texp_str='Fitts1954 Table3 Pin Transfer'\n\t\tWs=np.array([0.25,0.125,0.0625,0.03125])\n\t\tDs=np.array([1,2,4,8,16])\n\telif exp==4:\n\t\texp_str='Jagacinski 1983 Helmet'\n\t\tWs=[1.22,0.7,0.4]\n\t\tDs=[2.45,4.28,7.5]\n\telif exp==5:\n\t\texp_str='Jagacinski 1983 Joystick'\n\t\tWs=[0.92,0.52,0.3]\n\t\tDs=[2.45,4.28,7.5]\n\telse:\n\t\texp_str='Zhai2004'\n\t\tWs=np.array([72,36,12])\n\t\tDs=np.array([120,360,840])\n\n\n\tWs=np.round(Ws/(np.max(Ds)*(1/0.5)),3)\n\tDs=np.round(Ds/(np.max(Ds)*(1/0.5)),3)\n\n\treturn Ws,Ds,exp_str\n\nif __name__ == '__main__':\n\timport itertools\n\tfor exp in [1,2,3,4,5,6]:\n\t\tWs,Ds,exp_str=experiments(exp)\n\t\tprint(exp_str)\n\t\tprint(f'Ws={Ws}')\n\t\tprint(f'Ds={Ds}')\n","sub_path":"data/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"317974177","text":"import argparse\nimport os\n\ndef new_dir(string):\n if os.path.exists(string):\n raise argparse.ArgumentTypeError(\"must specify a new directory for the hop project\")\n return string\n\ndef create_parser():\n config_parser = argparse.ArgumentParser(add_help=False)\n config_parser.add_argument('--hop-config', help='path to hop.yml file (defaults to ./hop.yml)')\n parser = argparse.ArgumentParser()\n sparser = parser.add_subparsers(dest='command')\n\n init_parser = sparser.add_parser('init', help='initializes hop')\n init_parser.add_argument('dest_dir', help='destination directory for hop')\n\n sparser.add_parser('provision', help='provisions gocd', parents=[config_parser])\n\n configure_parser = sparser.add_parser('configure', help='configures gocd', parents=[config_parser])\n configure_parser.add_argument('context', help='A folder with a set of yml files for app definitions')\n configure_parser.add_argument('--host', help='GoCD host. e.g: localhost:8153')\n configure_parser.add_argument('--user', help='User with admin role')\n configure_parser.add_argument('--password', help='Password for user')\n\n return parser\n","sub_path":"hop/cli/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"75942232","text":"import socket\n\ns = socket.socket()\nhost = '192.168.1.48' #ip of raspberry pi\nport = 13000\ns.bind((host, port))\n\nprint (\"Server Listening at /(host)\",host)\n\ns.listen(5)\nwhile True:\n c, addr = s.accept()\n print ('Got connection from',addr)\n c.send('Thank you for connecting')\n c.close()\n","sub_path":"Tester/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"50935456","text":"import functools\n\nfrom chainer import function, variable\n\n\ndef _init_with_name_scope(self, *args, **kargs):\n self.name_scope = kargs['name_scope']\n org_init = kargs['org_init']\n del kargs['name_scope']\n del kargs['org_init']\n org_init(self, *args, **kargs)\n\n\n_org_func_init = function.Function.__init__\n_org_val_init = variable.VariableNode.__init__\n\n\nclass name_scope(object):\n \"\"\"Class that creates hierarchical names for operations and variables.\n Args:\n name (str): Name for setting namespace.\n values (list): Variable in the namespace.\n Example:\n You can set namespace using \"with\" statement.\n In the following example, no namespace is set for the variable 'X', but\n the variable 'Y' and the relu function are set to the namespace \"test\".\n\n x = chainer.Variable(...)\n with name_scope('test'):\n y = F.relu(x)\n \"\"\"\n stack = []\n\n def __init__(self, name, values=list()):\n self.stack.append(name)\n for v in values:\n v.node.name_scope = '/'.join(self.stack)\n\n def __enter__(self):\n self._org_func_init = function.Function.__init__\n function.Function.__init__ = functools.partialmethod(_init_with_name_scope,\n name_scope='/'.join(self.stack),\n org_init=_org_func_init)\n self._org_val_init = variable.VariableNode.__init__\n variable.VariableNode.__init__ = functools.partialmethod(_init_with_name_scope,\n name_scope='/'.join(self.stack),\n org_init=_org_val_init)\n return self\n\n def __exit__(self, exec_type, exec_value, traceback):\n function.Function.__init__ = self._org_func_init\n variable.VariableNode.__init__ = self._org_val_init\n self.stack.pop(-1)\n\ndef within_name_scope(name):\n \"\"\"Decorator for link class methods.\n Args:\n name (str): Name for setting namespace.\n \"\"\"\n def decorator(func):\n import functools\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n with name_scope(name, self.params()):\n res = func(self, *args, **kwargs)\n return res\n return wrapper\n return decorator\n","sub_path":"tensorboard/name_scope.py","file_name":"name_scope.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"112875978","text":"lower = int(input(\"enter lower bound\"))\nupper = int(input(\"enter upper bound\"))\n\nfor n in range(lower, upper):\n\n if n == 1: continue\n\n flag = 1\n for val in range(2, n // 2 + 1):\n if n % val == 0:\n flag = 0\n break\n\n if flag == 1:\n print(\"Prime number: \", n)\n","sub_path":"prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"49742253","text":"from django import forms\nfrom django.core.mail import send_mail\nfrom django.core.validators import validate_email\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, render_to_response\nfrom django.template import RequestContext\nfrom booking.models import *\nfrom django.contrib.auth.decorators import login_required\nfrom karspexet.settings import DEBUG\nimport datetime, math\nimport uuid\n\nfrom .forms import registerForm\n\ndef verify_coupon(coupon_code):\n '''\n @param: A code for a coupon. Max 8 characters (str)\n @return: Tuple with:\n - statuscode (int)\n - coupon if found, otherwise None (DiscountCode)\n Statuscodes:\n 1: All good\n -1: DiscountCode doesn't exist\n -2: Coupon is used\n '''\n try:\n coupon = DiscountCode.objects.get(code=coupon_code)\n except models.ObjectDoesNotExist:\n return (-1, None)\n if coupon.is_used():\n return (-2, coupon)\n return (1, coupon)\n\n\ndef determine_price(spex, nachspex, guest_type, alcohol_free, coupon=None):\n prices = {\n 'phux': 10,\n 'student': 15,\n 'not_student': 25\n }\n price = 0\n cheaper_used = False\n if spex:\n '''\n Use coupon if and only if:\n - It exists\n - It's still valid (still has uses left)\n - The price is less than it otherwise would be\n '''\n if coupon and not coupon.is_used() and coupon.price < prices[guest_type]:\n price += coupon.price\n else:\n price += prices[guest_type]\n if coupon and coupon.price >= prices[guest_type]:\n cheaper_used = True\n\n # Price for nachspex is 15€ if also spex and 18€ if only nachspex\n if nachspex:\n if spex:\n price += 15\n else:\n price += 18\n # If alcoholfree price is reduced by 3€\n if alcohol_free:\n price -= 3\n return (price, cheaper_used)\n\n\n# Create your views here.\n@login_required(login_url='/admin')\ndef enrolled(request):\n return render(request, 'enrolled.html', {'participants': Participant.objects.all()})\n\n@login_required(login_url='/admin')\ndef teater(request):\n return render(request, 'teater.html', {'participants': Participant.objects.all()})\n\n\ndef ticket(request, participant_id):\n if Participant.objects.filter(uuid = participant_id).exists():\n participant = Participant.objects.get(uuid = participant_id)\n\n context = {\n 'name': participant.name,\n 'student': participant.student,\n 'spex': participant.spex,\n 'nachspex': participant.nachspex,\n 'price': participant.price,\n }\n\n return render(request, 'ticket.html', context)\n else:\n return render(request, 'index.html', {'error_message': \"Det existerar inte biljett med denna id\"})\n\ndef thanks(request):\n return render(request, \"thanks.html\")\n\ndef form_page_view(request):\n return render(request, \"index.html\")\n\ndef register(request):\n if request.method == 'POST':\n form = registerForm(request.POST)\n if form.is_valid():\n form = form.cleaned_data\n # Check type of ticket\n if form['register_choice'] == 'only_spex':\n register_choice = \"Endast spex\"\n spex = True\n nachspex = False\n elif form['register_choice'] == 'only_nachspex':\n register_choice = \"Endast nachspex\"\n spex = False\n nachspex = True\n else:\n register_choice = \"Spex och nachspex\"\n spex = True\n nachspex = True\n\n # Check validity of coupon if code was entered\n status_code, coupon = None, None\n if form['coupon']:\n status_code, coupon = verify_coupon(form['coupon'])\n\n # Check price\n price, cheaper_used = determine_price(spex, nachspex, form['student'], form['alcoholFree'], coupon)\n if cheaper_used:\n status_code = 0\n\n context = {\n 'name': form['name'],\n 'email': form['email'],\n 'avec': form['avec'],\n 'diet': form['diet'],\n 'comment': form['comment'],\n 'alcohol_free' : form['alcoholFree'],\n 'register_choice': register_choice,\n 'spex': spex,\n 'student': form['student'],\n 'nachspex': nachspex,\n 'price': price,\n 'coupon_status': status_code,\n 'coupon_code': form['coupon']\n }\n return render(request, 'confirm.html', context)\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = registerForm()\n return render(request, 'register.html', {'form': form})\n\ndef send(request):\n if request.method == 'POST':\n spex = request.POST['spex']\n nachspex = request.POST['nachspex']\n coupon_code = request.POST['coupon']\n alcohol_free = request.POST['alcohol_free']\n guest_type = request.POST['student']\n email = request.POST['email']\n\n # Apparently the 'True' is not a Boolean and needs to be converted, wtf\n # TODO: Change to 0 and 1 to avoid this step\n if nachspex == 'True':\n nachspex = True\n else:\n nachspex = False\n\n if spex == 'True':\n spex = True\n else:\n spex = False\n\n if alcohol_free == 'True':\n alcohol_free = True\n else:\n alcohol_free = False\n\n status_code, coupon = None, None\n if request.POST['coupon']:\n status_code, coupon = verify_coupon(request.POST['coupon'])\n price, cheaper_used = determine_price(spex, nachspex, guest_type, alcohol_free, coupon)\n\n\n new_participant = Participant(\n name=request.POST['name'],\n email=email,\n spex=spex,\n nachspex=nachspex,\n alcoholfree=alcohol_free,\n diet=request.POST['diet'],\n avec=request.POST['avec'],\n comment=request.POST['comment'],\n student=guest_type,\n price=price,\n uuid=uuid.uuid4()\n )\n new_participant.save()\n\n if status_code == 1 and not cheaper_used:\n coupon.times_used += 1\n coupon.save()\n\n ticket_url = 'https://karspex.teknologforeningen.fi/ticket/{}'.format(new_participant.uuid)\n subject, sender = 'Anmälan till Kårspexets föreställning', 'Kårspexambassaden '\n content = \"Tack för din anmälan till Kårspexets Finlandsföreställning den 22 februari.\\nDin biljett hittar du på {}. Vänligen ta fram biljetten när du går in i teatern för att försnabba inträdet.\\nBetala {}€ till konto FI34 1124 3500 2457 77 (mottagare Kårspexambassaden) eller med MobilePay till numret +358504948298 med meddelandet \\\"Kårspex, Förnamn Efternamn\\\". Betalningen ska vara framme senast 22.2.2020. Ifall betalningen inte hinner till detta, ber vi dig vänligen ta med jämna pengar till teatern.\\n\\nMed vänliga hälsningar,\\nKårspexambassaden.\".format(ticket_url, price)\n if DEBUG:\n print('Subject: {}\\nSender: {}\\nRecipient: {}\\nContent: {}'.format(subject, sender, email, content))\n else:\n send_mail(subject, content, sender, [email], fail_silently=False)\n\n return HttpResponseRedirect('/register/thanks/')\n else:\n return render(request, 'index.html', {'error_message': \"Det skedde ett fel, vänligen försök pånytt\"})\n","sub_path":"booking/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"646626011","text":"#\n# * Provide transaction management for clients,\n# * ensuring that all transactions are serializable, recoverable,\n# * and in general satisfy the ACID properties.\n# * @author Edward Sciore\n#\nimport threading\n\nfrom simpledb.file.BlockId import BlockId\nfrom simpledb.tx.BufferList import BufferList\nfrom simpledb.tx.concurrency.ConcurrencyMgr import ConcurrencyMgr\nfrom simpledb.tx.recovery.RecoveryMgr import RecoveryMgr\nfrom simpledb.util.Synchronized import synchronized\n\n\nclass Transaction(object):\n nextTxNum = 0\n END_OF_FILE = -1\n lock = threading.Lock()\n\n #\n # * Create a new transaction and its associated\n # * recovery and concurrency managers.\n # * This constructor depends on the file, log, and buffer\n # * managers that it gets from the class\n # * {@link simpledb.server.SimpleDB}.\n # * Those objects are created during system initialization.\n # * Thus this constructor cannot be called until either\n # * {@link simpledb.server.SimpleDB#init(String)} or\n # * {@link simpledb.server.SimpleDB#initFileLogAndBufferMgr(String)} or\n # * is called first.\n #\n def __init__(self, fm, lm, bm):\n self.fm = fm\n self.bm = bm\n self.txnum = self.nextTxNumber()\n self.recoveryMgr = RecoveryMgr(self, self.txnum, lm, bm)\n self.concurMgr = ConcurrencyMgr()\n self.mybuffers = BufferList(bm)\n\n #\n # * Commit the current transaction.\n # * Flush all modified buffers (and their log records),\n # * write and flush a commit record to the log,\n # * release all locks, and unpin any pinned buffers.\n #\n def commit(self):\n self.recoveryMgr.commit()\n print(\"transaction \" + str(self.txnum) + \" committed\")\n self.concurMgr.release()\n self.mybuffers.unpinAll()\n\n #\n # * Rollback the current transaction.\n # * Undo any modified values,\n # * flush those buffers,\n # * write and flush a rollback record to the log,\n # * release all locks, and unpin any pinned buffers.\n #\n def rollback(self):\n self.recoveryMgr.rollback()\n print(\"transaction \" + str(self.txnum) + \" rolled back\")\n self.concurMgr.release()\n self.mybuffers.unpinAll()\n\n #\n # * Flush all modified buffers.\n # * Then go through the log, rolling back all\n # * uncommitted transactions. Finally,\n # * write a quiescent checkpoint record to the log.\n # * This method is called during system startup,\n # * before user transactions begin.\n #\n def recover(self):\n self.bm.flushAll(self.txnum)\n self.recoveryMgr.recover()\n\n #\n # * Pin the specified block.\n # * The transaction manages the buffer for the client.\n # * @param blk a reference to the disk block\n #\n def pin(self, blk):\n self.mybuffers.pin(blk)\n\n #\n # * Unpin the specified block.\n # * The transaction looks up the buffer pinned to this block,\n # * and unpins it.\n # * @param blk a reference to the disk block\n #\n def unpin(self, blk):\n self.mybuffers.unpin(blk)\n\n #\n # * Return the integer value stored at the\n # * specified offset of the specified block.\n # * The method first obtains an SLock on the block,\n # * then it calls the buffer to retrieve the value.\n # * @param blk a reference to a disk block\n # * @param offset the byte offset within the block\n # * @return the integer stored at that offset\n #\n def getInt(self, blk, offset):\n self.concurMgr.sLock(blk)\n buff = self.mybuffers.getBuffer(blk)\n return buff.contents().getInt(offset)\n\n #\n # * Return the string value stored at the\n # * specified offset of the specified block.\n # * The method first obtains an SLock on the block,\n # * then it calls the buffer to retrieve the value.\n # * @param blk a reference to a disk block\n # * @param offset the byte offset within the block\n # * @return the string stored at that offset\n #\n def getString(self, blk, offset):\n self.concurMgr.sLock(blk)\n buff = self.mybuffers.getBuffer(blk)\n return buff.contents().getString(offset)\n\n #\n # * Store an integer at the specified offset\n # * of the specified block.\n # * The method first obtains an XLock on the block.\n # * It then reads the current value at that offset,\n # * puts it into an update log record, and\n # * writes that record to the log.\n # * Finally, it calls the buffer to store the value,\n # * passing in the LSN of the log record and the transaction's id.\n # * @param blk a reference to the disk block\n # * @param offset a byte offset within that block\n # * @param val the value to be stored\n #\n def setInt(self, blk, offset, val, okToLog):\n self.concurMgr.xLock(blk)\n buff = self.mybuffers.getBuffer(blk)\n lsn = -1\n if okToLog:\n lsn = self.recoveryMgr.setInt(buff, offset, val)\n p = buff.contents()\n p.setInt(offset, val)\n buff.setModified(self.txnum, lsn)\n\n #\n # * Store a string at the specified offset\n # * of the specified block.\n # * The method first obtains an XLock on the block.\n # * It then reads the current value at that offset,\n # * puts it into an update log record, and\n # * writes that record to the log.\n # * Finally, it calls the buffer to store the value,\n # * passing in the LSN of the log record and the transaction's id.\n # * @param blk a reference to the disk block\n # * @param offset a byte offset within that block\n # * @param val the value to be stored\n #\n def setString(self, blk, offset, val, okToLog):\n self.concurMgr.xLock(blk)\n buff = self.mybuffers.getBuffer(blk)\n lsn = -1\n if okToLog:\n lsn = self.recoveryMgr.setString(buff, offset, val)\n p = buff.contents()\n p.setString(offset, val)\n buff.setModified(self.txnum, lsn)\n\n #\n # * Return the number of blocks in the specified file.\n # * This method first obtains an SLock on the\n # * \"end of the file\", before asking the file manager\n # * to return the file size.\n # * @param filename the name of the file\n # * @return the number of blocks in the file\n #\n def size(self, filename):\n dummyblk = BlockId(filename, self.END_OF_FILE)\n self.concurMgr.sLock(dummyblk)\n return self.fm.length(filename)\n\n #\n # * Append a new block to the end of the specified file\n # * and returns a reference to it.\n # * This method first obtains an XLock on the\n # * \"end of the file\", before performing the append.\n # * @param filename the name of the file\n # * @return a reference to the newly-created disk block\n #\n def append(self, filename):\n dummyblk = BlockId(filename, self.END_OF_FILE)\n self.concurMgr.xLock(dummyblk)\n return self.fm.append(filename)\n\n def blockSize(self):\n return self.fm.blockSize()\n\n def availableBuffs(self):\n return self.bm.available()\n\n @staticmethod\n def nextTxNumber():\n with Transaction.lock:\n Transaction.nextTxNum += 1\n return Transaction.nextTxNum\n","sub_path":"simpledb/tx/Transaction.py","file_name":"Transaction.py","file_ext":"py","file_size_in_byte":7471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"430868086","text":"from direct.showbase.DirectObject import DirectObject\nfrom panda3d.core import NodePath, Point3, Vec3\nimport math\n\n\nclass CameraController(DirectObject):\n\n def __init__(self, base, r, theta, phi):\n DirectObject.__init__(self)\n self.base = base\n\n # Parameters\n self.rotateMag = 0.5\n self.moveMag = 50\n self.zoomMag = 100\n\n # Camera properties\n self.r = r\n self.theta = theta\n self.phi = phi\n self.target = NodePath(\"target\")\n self.target.reparentTo(self.base.render)\n self.base.camera.reparentTo(self.target)\n self.followingObject = None\n\n # Controls\n self.mouseDown1 = False\n self.mouseDown2 = False\n self.mouseDown3 = False\n self.mousePrevX = 0.0\n self.mousePrevY = 0.0\n self.accept(\"mouse1\", self.onMouse1, [True])\n self.accept(\"mouse1-up\", self.onMouse1, [False])\n self.accept(\"mouse2\", self.onMouse2, [True])\n self.accept(\"mouse2-up\", self.onMouse2, [False])\n self.accept(\"mouse3\", self.onMouse3, [True])\n self.accept(\"mouse3-up\", self.onMouse3, [False])\n\n # Run task that updates camera\n self.base.taskMgr.add(self.updateCamera, \"UpdateCameraTask\", priority=1)\n\n def setTarget(self, parent):\n self.target.reparentTo(parent)\n\n def follow(self, obj):\n self.followingObject = obj\n\n def stopFollowing(self):\n self.followingObject = None\n\n def zoom(self, dR):\n self.r += dR\n if self.r < 0.0:\n self.r = 0.0\n\n def rotateTheta(self, dTheta):\n self.theta += dTheta\n if self.theta < 0.0:\n self.theta += 2 * math.pi\n if self.theta > 2 * math.pi:\n self.theta -= 2 * math.pi\n\n def rotatePhi(self, dPhi):\n self.phi += dPhi\n if self.phi < 0.1:\n self.phi = 0.1\n if self.phi > math.pi-0.1:\n self.phi = math.pi-0.1\n\n def onMouse1(self, down):\n if not self.mouseDown2 and not self.mouseDown3:\n if down and self.base.mouseWatcherNode.hasMouse():\n self.mouseDown1 = True\n self.mousePrevX = self.base.mouseWatcherNode.getMouseX()\n self.mousePrevY = self.base.mouseWatcherNode.getMouseY()\n else:\n self.mouseDown1 = False\n\n def onMouse2(self, down):\n if not self.mouseDown1 and not self.mouseDown3:\n if down and self.base.mouseWatcherNode.hasMouse():\n self.mouseDown2 = True\n self.mousePrevX = self.base.mouseWatcherNode.getMouseX()\n self.mousePrevY = self.base.mouseWatcherNode.getMouseY()\n else:\n self.mouseDown2 = False\n\n def onMouse3(self, down):\n if not self.mouseDown1 and not self.mouseDown2:\n if down and self.base.mouseWatcherNode.hasMouse():\n self.mouseDown3 = True\n self.mousePrevX = self.base.mouseWatcherNode.getMouseX()\n self.mousePrevY = self.base.mouseWatcherNode.getMouseY()\n else:\n self.mouseDown3 = False\n\n def updateCamera(self, task):\n if self.base.mouseWatcherNode.hasMouse():\n # Register camera controls\n mouseX = self.base.mouseWatcherNode.getMouseX()\n mouseY = self.base.mouseWatcherNode.getMouseY()\n dX = self.mousePrevX - mouseX\n dY = self.mousePrevY - mouseY\n\n if self.mouseDown1:\n self.rotateTheta(dX * math.pi * self.rotateMag)\n self.rotatePhi(-dY * math.pi * self.rotateMag)\n\n if self.mouseDown2 and self.followingObject is None:\n vecX = self.target.getRelativeVector(self.base.camera, Vec3.right())\n vecY = self.target.getRelativeVector(self.base.camera, Vec3.forward())\n vecY.setZ(0.0)\n vecY.normalize()\n offset = (vecX * dX * self.moveMag) + (vecY * dY * self.moveMag)\n self.target.setPos(self.target, offset)\n\n if self.followingObject is not None:\n self.target.setPos(self.followingObject.getPos())\n\n if self.mouseDown3:\n self.zoom(dY * self.zoomMag)\n\n self.mousePrevX = mouseX\n self.mousePrevY = mouseY\n\n # Update camera position\n position = Point3(0.0, 0.0, 0.0)\n position.setX(self.r * math.cos(self.theta) * math.sin(self.phi))\n position.setY(self.r * math.sin(self.theta) * math.sin(self.phi))\n position.setZ(self.r * math.cos(self.phi))\n self.base.camera.setPos(position)\n self.base.camera.lookAt(self.target)\n\n return task.cont\n","sub_path":"python/_bak/9/OrbitCamera.py","file_name":"OrbitCamera.py","file_ext":"py","file_size_in_byte":4741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"507168104","text":"from dataclasses import dataclass\nimport pymysql\n\n\n@dataclass\nclass TransaccionDTO:\n monto: float\n id_movimiento: int\n id_categoria: int\n descripcion: str\n fecha: str\n\n@dataclass\nclass MovimientoDTO:\n nombre: str\n descripcion: str\n\n@dataclass\nclass CategoriaDTO:\n nombre: str\n descripcion: str\n\n\nclass Service:\n def __init__(self):\n self.conexion = pymysql.connect(host=\"localhost\", port=3306, user=\"usuario\",\n passwd=\"1234\", db=\"finanzas\")\n self.cursor = self.conexion.cursor()\n\n def buscar(self, tabla, campo = None, condicion = None, valor = None):\n if campo:\n self.cursor.execute(\n \"SELECT * FROM {} WHERE {} {} %s\".format(tabla, campo, condicion), (valor)\n )\n else:\n self.cursor.execute(\n \"SELECT * FROM {}\".format(tabla)\n )\n return self.cursor.fetchall()\n\n def eliminar(self, tabla, id):\n self.cursor.execute(\n \"DELETE FROM {} WHERE id = %s\".format(tabla), (id)\n )\n self.conexion.commit()\n\n def editar(self, tabla, campo, valor):\n self.cursor.execute(\n \"UPDATE {} SET {} = %s\".format(tabla, campo), (valor)\n )\n self.conexion.commit()\n\n def registrar_movimiento(self, data = MovimientoDTO):\n self.cursor.execute(\n \"INSERT INTO movimientos VALUES (%s, %s, %s)\", (None, data.nombre, data.descripcion)\n )\n self.conexion.commit()\n \n def registrar_categoria(self, tipo, data = CategoriaDTO):\n self.cursor.execute(\n \"INSERT INTO {} VALUES (%s, %s, %s)\".format(tipo), (None, data.nombre, data.descripcion)\n )\n self.conexion.commit()\n\n def registrar_transaccion(self, tipo, data = TransaccionDTO):\n self.cursor.execute(\n \"INSERT INTO {} VALUES (%s, %s, %s, %s, %s, %s)\".format(tipo), (None, data.monto, data.id_movimiento,\n data.id_categoria, data.descripcion, data.fecha,)\n )\n self.conexion.commit()\n\n def calcular_balance(self):\n balance = 0\n for ingreso in self.buscar(\"ingresos\"):\n balance += ingreso[1]\n for egreso in self.buscar(\"egresos\"):\n balance -= egreso[1]\n return balance\n\n def cerrar_database(self):\n self.conexion.close()\n\nif __name__ == \"__main__\":\n transaccion = TransaccionDTO(0.50, 1, 1, \"\", \"2019\")\n modelo = Service()\n print(modelo.buscar(\"movimientos\"))\n modelo.cerrar_database()","sub_path":"modelo/finanzas.py","file_name":"finanzas.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"309535044","text":"from django.contrib.auth.decorators import login_required\nfrom app.DatabaseWorker import WellDataWorker\nfrom app.views import get_relative_part\nfrom django.http import JsonResponse\n\n@login_required(login_url=\"/\")\ndef get_counties(request):\n if request.is_ajax():\n database_worker = WellDataWorker()\n counties = database_worker.get_all_counties()\n context = {}\n context[\"counties\"] = counties\n return JsonResponse(context)\n\n@login_required(login_url=\"/\")\ndef get_formations(request):\n if request.is_ajax():\n database_worker = WellDataWorker()\n formations = database_worker.get_all_formations()\n context = {}\n context[\"formations\"] = formations\n return JsonResponse(context)\n\n@login_required(login_url=\"/\")\ndef get_townships(request):\n database_worker = WellDataWorker()\n return get_relative_part(request, database_worker.get_all_townships,'range','meridian','townships')\n\n\n@login_required(login_url=\"/\")\ndef get_ranges(request):\n database_worker = WellDataWorker()\n return get_relative_part(request, database_worker.get_all_ranges,'township','meridian','ranges')\n\n\n@login_required(login_url=\"/\")\ndef get_meridians(request):\n database_worker = WellDataWorker()\n return get_relative_part(request, database_worker.get_all_meridians,'township','range','meridians')","sub_path":"app/views_location.py","file_name":"views_location.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"562181770","text":"import pip\nimport os\n\ndef install(package):\n if hasattr(pip, 'main'):\n pip.main(['install', '-U',package])\n else:\n pip._internal.main(['install', package])\n\nos.system(\"python3 setup.py bdist bdist_wheel\")\n# Example\nif __name__ == '__main__':\n install('dist/PLC-0.2.0-py3-none-any.whl')\n","sub_path":"update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"269665130","text":"from tqdm import tqdm\nimport pymongo\nfrom lxml import etree\nimport io\nimport json\nimport asyncio\nimport aiohttp\n\nloop = asyncio.new_event_loop()\nclient = pymongo.MongoClient(\"localhost\", 27017)\ndb = client.testdb\ncol = db.data\nerrors = []\n\n\nasync def get_data(url):\n try:\n async with aiohttp.ClientSession() as client:\n async with client.get(url=url) as raw_response:\n response_text = await raw_response.text()\n parser = etree.HTMLParser(encoding=\"utf-8\")\n doc = etree.parse(io.StringIO(str(response_text)), parser)\n all_h1 = doc.xpath(\"//h1//text()\")\n \"\"\"\n \n Scrapping code goes here\n \n At the end, either return the data or store it in pymongo or any other db. \n \"\"\"\n\n except:\n errors.append(url)\n\n\nasync def main(links):\n q = asyncio.Queue()\n producers = [loop.create_task(get_data(link)) for link in links]\n await asyncio.gather(*producers)\n await q.join()\n\n\nif __name__ == \"__main__\":\n\n # Load all the urls.\n with open(\"urls.txt\") as f:\n js = f.read()\n f.close()\n\n ids = js.split(\"\\n\")\n\n # Feed urls in group of 100 urls. This way, the scrapping code will make 100 concurrent requests to get data from\n # respective url. You can change this number according to your laptop speed and internet connectivity. A simple\n # trial and error approach could be used to find the most efficient number for your machine.\n for i in tqdm(range(0, len(ids), 100)):\n loop.run_until_complete(main(ids[i:i + 100]))\n\n print(f\"Total {round((len(ids) - len(errors)) / len(ids),2)} Urls successfully scraped! Remaining urls are \"\n f\"stored in errors.txt file.\")\n\n with open(\"errors.txt\", \"w\") as f:\n f.write(\"\\n\".join(errors))\n f.close()\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"basic_template.py","file_name":"basic_template.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"653580785","text":"while True:\n prompt = \"Hi. How many are going to be in your party today?\\n\"\n ans = int(input(prompt))\n if ans >= 8 and ans > 0:\n print(\"Sorry, you are going to have to wait a little bit!\")\n elif ans == -1:\n print(\"Goodbye!\")\n break\n elif ans < -1:\n print(\"Sir, that doesn't really make any sense. Please enter a better number!\")\n else:\n print(\"Your table is ready, right this way!\")\n","sub_path":"pythoncode/table_waiting.py","file_name":"table_waiting.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"446562783","text":"import numpy as np\n\ncm_per_meter = 100\nmeters_per_cm = 0.01\n\ninches_per_foot = 12\nfeet_per_inch = 0.083333\n\ninches_per_meter = 39.3701\nmeters_per_inch = 0.0254\n\nfeet_per_meter = 3.28084\nmeters_per_foot = 0.3048\n\nradians_per_degree = np.pi / 180\ndegrees_per_radian = 180 / np.pi\n\npounds_per_kilogram = 2.20462\nkilograms_per_pound = 0.453592\n\ngrams_per_kilogram = 1000\nkilograms_per_gram = 0.001\n","sub_path":"src/utils/units.py","file_name":"units.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"392870407","text":"#!/usr/bin/env python3\n\n# For more info see: https://regexone.com/references/python\n\nimport imports_standardized as imps\nimport re # Regular Expression\nfrom ATHPy3 import StrUtl\nfrom ATHPy3 import GetOpts\n\nfrom ATHBox import Box\nfrom ATHRegx import Regx\n\ndef basicRegex(): # {\n regex = \"[A-Za-z]+\"\n input = \"Hello World\"\n if ( re.search( regex, input ) ):\n print(\"Found it\")\n# }\n\ndef matchRegex(): # {\n regex = \"[A-Za-z]+\"\n input = \"Hello World\"\n\n match = re.search( regex, input )\n print ( \"First match start=%s, end=%s\" % (match.start(), match.end()) )\n# }\n\ndef groupRegex(): # {\n regex = \"[A-Za-z]+\"\n input = \"Hello World\"\n\n match = re.search( regex, input ) # search finds only the first, its not a global search\n print( \"Group0='%s'\" % (match.group(0)) )\n # print( \"Group1='%s'\" % (match.group(1)) ) # ERR, there is no group 1\n# }\n\ndef findallRegex(): # {\n regex = \"[A-Za-z]+\"\n input = \"Hello World\"\n\n # global search returns a collection\n # BE CAREFUL this will load the entire contents into memory and all matches into memory\n # for you to process. If the input is large or many matches expected, finditr may be better\n matches = re.findall( regex, input ) \n for match in matches: # match is a string\n print ( \"found %s\" % (match) )\n# }\n\ndef finditerRegex(): # {\n regex = \"[A-Za-z]+\"\n input = \"Hello World\"\n\n # global search returns a collection\n # BE CAREFUL this will load the entire contents into memory and all matches into memory\n # for you to process. If the input is large or many matches expected, finditr may be better\n matches = re.finditer( regex, input ) \n for match in matches: # match is a match obj\n # input[#1:#2] // this is the synta\n print ( \"found match %s @ start=%s, end=%s\" % (input[match.start():match.end()], match.start(), match.end()) )\n# }\n\ndef finditerRegexAdvanced(): # {\n regex = r\"(\\d{1,2})/(\\d{1,2})/(\\d{2}|\\d{4})\"\n #regex = r\"(\\d{2}|\\d{4})\"\n input = \"7/5/79, 8/16/82, 2/29/16\"\n\n # global search returns a collection\n # BE CAREFUL this will load the entire contents into memory and all matches into memory\n # for you to process. If the input is large or many matches expected, finditr may be better\n matches = re.finditer( regex, input ) \n for match in matches: # match is a match obj\n # input[#1:#2] // this is the synta\n #print ( \"found match %s @ start=%s, end=%s\" % (input[match.start():match.end()], match.start(), match.end()) )\n print( \"whole group match='%s', mo='%s', day='%s', yr='%s'\" % (\n match.group(0),\n match.group(1),\n match.group(2),\n match.group(3)\n ) )\n# }\n\ndef searchAndReplaceRegexMultiline(): # {\n regex = r\"Bob\"\n replace = \"Aaron\"\n input = \"Hello my name is Bob\\nDid I mention my name is Bob\"\n result = re.sub(regex, replace, input, 0, re.MULTILINE)\n print(result)\n# }\n\ndef searchAndReplaceRegexMultilineAdvanced(): # {\n input = \"Hello my name is Bob\"\n input += \"\\n\"\n input += \"Did I mention my name is Bob\"\n input += \"\\n\"\n input += \"Even though I am Bob, he is also Bob\"\n input += \"\\n\"\n input += \"We are Bob\"\n\n output = ''\n lastPos = 0\n regex = re.compile(r\"(Bob)\", re.MULTILINE)\n matches = regex.finditer(input)\n for match in matches:\n if match:\n repl = \"Aaron\"\n output += input[lastPos:match.start()]\n output += repl\n lastPos = match.start() + len(match.group(1))\n print(\"--\")\n print(output)\n# }\n\n\ndef searchAndReplaceSimplified(): # {\n input = \"Hello my name is Bob\"\n input += \"\\n\"\n input += \"Did I mention my name is Bob\"\n input += \"\\n\"\n input += \"Even though I am Bob, he is also Bob\"\n input += \"\\n\"\n input += \"We are Bob\"\n\n rx = Regx(input)\n # Super simple\n print(\"---\")\n print(rx.replace(r\"(Bob)\", \"Joe\"))\n print(\"---\")\n \n # Customizable via delegate\n print(\"---\")\n print(rx.replaceDelegate(r\"(Bob)\", searchAndReplaceSimplifiedSearchResultDelegate))\n print(\"---\")\n\n# }\n\n# Delegate receives a ATHRegx.Match object\n# which contains a re.Match + some additional useful bits\n# the returned value is what gets substituded for this particular match\ndef searchAndReplaceSimplifiedSearchResultDelegate( match ): # {\n # return \"_\" + match.match.group(0) + \"_\" + str(match.idx) + \"_\"\n return \"_%s_%s_\" % (match.match.group(0), match.idx) # same as above but in sprintf stylee\n# }\n\n\ndef test(): # {\n if ( StrUtl.isFloatString(\"123\") ):\n print(\"Yes\")\n else:\n print(\"No\")\n# }\n\ndef testBox(): # {\n box = Box(2,3)\n box.describe()\n# }\n\ndef opts(): # {\n opts = GetOpts()\n opts.addDescription(\"Description\")\n\n opts.add(\"path\", \"p\", \"THEPATH\", False, \"path description\")\n\n if ( opts.buildSafe( None ) ): # {\n print(opts.get(\"path\", \"empty\"))\n # }\n# }\n\ndef __main(): # {\n basicRegex()\n matchRegex()\n groupRegex()\n findallRegex()\n finditerRegex()\n finditerRegexAdvanced()\n searchAndReplaceRegexMultiline()\n searchAndReplaceRegexMultilineAdvanced()\n searchAndReplaceSimplified()\n test()\n opts()\n testBox()\n# }\n\nif __name__ == '__main__': __main()","sub_path":"examples3/example_regex.py","file_name":"example_regex.py","file_ext":"py","file_size_in_byte":5249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"311389975","text":"#! /usr/bin/env python\n\"\"\"\nWeek 01 Exercise 06\n\nPython script that creates a list. One of the elements of the list is a\ndictionary with three keys.\n\nThe list is written to files using both YAML and JSON formats. The YAML file\nis in the expanded form.\n\"\"\"\n\nimport yaml\nimport json\n\ntest_list = [x+1 for x in range(5)]\ntest_dict = {\"key01\": \"value01\", \"key02\": \"value02\", \"key03\": \"value03\"}\ntest_list.append(test_dict)\n\nwith open(\"test_file.yml\", 'w') as yml_file:\n # yml_file.write(yaml.dump(test_list))\n yaml.dump(test_list, yml_file, default_flow_style=False)\n\nwith open(\"test_file.json\", 'w') as json_file:\n json_file.write(json.dumps(test_list, sort_keys=True))\n","sub_path":"week_01/ex06.py","file_name":"ex06.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"532642548","text":"import numpy as np\nfrom sklearn import svm\n\n# Calculate and return true positive, true negative, false positive, and false negative\ndef perf_measure(y_actual, y_hat):\n TP = 0\n FP = 0\n TN = 0\n FN = 0\n\n for i in range(len(y_hat)): \n if y_actual[i]==y_hat[i]==1:\n TP += 1\n if y_hat[i]==1 and y_actual[i]!=y_hat[i]:\n FP += 1\n if y_actual[i]==y_hat[i]==-1:\n TN += 1\n if y_hat[i]==-1 and y_actual[i]!=y_hat[i]:\n FN += 1\n\n return(TP, FP, TN, FN)\n\n\ndef run(k, X, y, algorithmType, **kwargs):\n \"\"\"\n Perform k fold cross validation and return an array representing the\n results\n\n Input:\n folds:\n The value of k, the number of folds in k-fold cross validation.\n\n X:\n The samples, as an n x d numpy array\n\n y:\n The labels in parallel with the samples above\n\n Keyword arguments, **kwargs:\n C:\n The slack variable for both primal and dual svm\n\n algorithmType:\n A python string with value as primal or dual for the\n appropriate model. We are using the radial basis kernel\n in the case of the dual mode\n\n gamma:\n Read svm.LinearSVC for details. For the radial basis kernel, it\n is the hyperparameter to be tuned\n\n Return:\n The function would return a matrix z of dimensions k x 1, containing\n the error percentage (%) for each of the k trials.\n\n \"\"\"\n\n n = X.shape[0]\n\n # this is a list containing all the k subsets. So it is of size k in the\n # once we ready it, in a for loop\n allSets = []\n\n # there is a parallel list for labels\n ySets = []\n # this loop is not the main loop to iterate and fill z. It is just to make\n # the sets\n for i in range(k):\n # we use float to perform normal division (not integer division)\n lower = int(float(n) * i / float(k))\n upper = int(float(n) * (i+1) / float(k)) - 1\n subsetForX = X[lower:upper+1, ]\n subsetForY = y[lower:upper+1, ]\n allSets.append(subsetForX)\n ySets.append(subsetForY)\n\n # z contains the resulting mean square error for each set according to k\n # fold cross validation\n z = np.zeros(shape=(k, 1))\n\n # the main loop matching the pseudo code given in the question\n for i in range(k):\n T = allSets[i]\n yForT = ySets[i]\n\n # we try and add all rows that are not in T, into setForS\n setForS = allSets[0:i]\n setForS.extend(allSets[i + 1:])\n\n # we convert rows or sets of rows, into a common array S\n S = np.concatenate(setForS)\n\n # parralel conversion for making a set of corresponding labels\n yForS = ySets[0:i]\n yForS.extend(ySets[i + 1:])\n yForS_1 = np.concatenate(yForS)\n\n yForT = yForT.reshape( (yForT.shape[0],) )\n yS = yForS_1.reshape( (yForS_1.shape[0],) )\n # for dual svm using sklearn\n if algorithmType == \"primal\":\n # clf is the classifier object for performing learning, testing and\n # more\n clf = svm.LinearSVC(C=kwargs[\"C\"], dual=False)\n clf.fit(S, yS)\n # get the error on the set in T\n # (see documentation for details)\n z[i] = 1 - clf.score(T, yForT)\n\n # for primal svm using sklearn\n elif algorithmType == \"dual\":\n clf = svm.SVC(\n kernel=\"rbf\",\n gamma=kwargs[\"gamma\"],\n C=kwargs[\"C\"])\n\n clf.fit(S, yS)\n z[i] = 1 - clf.score(T, yForT)\n return z","sub_path":"scripts/kfoldcv.py","file_name":"kfoldcv.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"229228202","text":"from grid import Grid\nfrom matrix import Matrix\nfrom pixel import Pixel\n\n# Draw Lines Function\ndef draw_lines(grid, matrix):\n end = len( matrix.matrix )\n i = 0\n\n while ( i < end ):\n draw_line(grid,\n matrix.matrix[i][0],\n matrix.matrix[i][1],\n matrix.matrix[i + 1][0],\n matrix.matrix[i + 1][1],\n matrix.matrix[i + 2])\n\n i += 3\n\n# Draw Line Function\ndef draw_line(grid, x0, y0, x1, y1, pixel):\n # Intial Computations\n delta_x = 1.0 * (x1 - x0) # Ensure Floating Point\n delta_y = 1.0 * (y1 - y0) # Ensure Floating Point\n\n # When Delta X is NOT 0\n if (delta_x != 0):\n slope = 1.0 * (delta_y / delta_x) # Ensure Floating Point\n else:\n draw_line_deltax_zero(grid, x0, y0, x1, y1, pixel)\n return\n\n # When Delta Y is 0\n if (delta_y == 0):\n draw_line_deltay_zero(grid, x0, y0, x1, y1, pixel)\n return\n\n A = 2 * delta_y\n B = -2 * delta_x\n x = x0\n y = y0\n\n # NOTE: Sorry for the weird commenting (conditons) :(\n # I usually comment above the \"if\" or \"elif\"\n # However, tabbing breaks if I try to put a comment on the line\n # before the \"elif\" :'(\n # (I have no idea why...)\n\n if ( (slope > 0) and (slope <= 1) ):\n # Octants 1 and 5\n # Condtion(s): 0 < slope <= 1\n\n diff = A + (B / 2) # Difference\n\n while ( x <= x1 ):\n grid.plot(x, y, pixel)\n\n if ( diff > 0 ):\n y += 1\n diff += B\n\n x += 1\n diff += A\n elif ( slope > 1 ):\n # Octants 2 and 6\n # Condition(s): 1 < slope < infinity\n\n diff = (A / 2) + B # Difference\n\n while ( y <= y1 ):\n grid.plot(x, y, pixel)\n\n if ( diff < 0 ):\n x += 1\n diff += A\n\n y += 1\n diff += B\n elif ( slope < -1 ):\n # Octants 3 and 7\n # Condition(s): -infinity < slope < -1\n\n diff = (A / 2) - B # Difference\n\n while ( y >= y1 ):\n grid.plot(x, y, pixel)\n\n if ( diff > 0 ):\n x += 1\n diff += A\n\n y -= 1\n diff -= B\n else:\n # Everything Else (Octants 4 and 8)\n\n diff = A - (B / 2) # Difference\n\n while ( x <= x1 ):\n grid.plot(x, y, pixel)\n\n if ( diff < 0 ):\n y -= 1\n diff -= B\n\n x += 1\n diff += A\n\n# Draw Line (When Delta X = 0)\ndef draw_line_deltax_zero(grid, x0, y0, x1, y1, pixel):\n # Initial Variables\n x = x0\n y = y0\n\n # Reverse if y1 < y0\n if (y1 < y0):\n draw_line_deltax_zero(grid, x1, y1, x0, y0, pixel)\n return\n\n # Draw the Line\n while (y <= y1):\n grid.plot(x, y, pixel)\n\n y += 1\n\n# Drawl Line (When Delta Y = 0)\ndef draw_line_deltay_zero(grid, x0, y0, x1, y1, pixel):\n # Initial Variables\n x = x0\n y = y0\n\n # Reverse if x1 < x0\n if (x1 < x0):\n draw_line_deltay_zero(grid, x1, y1, x0, y0, pixel)\n return\n\n # Draw the Line\n while (x <= x1):\n grid.plot(x, y, pixel)\n\n x += 1\n","sub_path":"graphics/transformations/4/norman_li/line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"324373343","text":"\"\"\"\r\nZach Vincent 2019\r\nAdam's Crossword Program\r\nLast updated 03/22/19\r\nv1.0\r\nFuture updates: Save matched pairs to an array and remove one of a pair that appears twice\r\n\"\"\"\r\nimport sys\r\n\r\nx, user_input_first_letter, user_input_second_letter = sys.argv\r\nwords = ['aaron', 'baron', 'packer', 'packed', 'packers', 'xkhd', 'xkhc']\r\ntemp_word_index = 0\r\ntemp_new_searchable_word = ''\r\ntemp_selected_word_index = 0\r\ntemp_selected_word = ''\r\npossible_words = []\r\n\r\ndef search_found_letter(selected_letter, unselected_letter, selected_index, selected_word):\r\n\tfor other_word in words:\r\n\t\ttemp_word_index = words.index(other_word)\r\n\t\ttemp_selected_word_index = words.index(selected_word)\r\n\t\ttemp_new_searchable_word = words[temp_word_index][:selected_index] + words[temp_word_index][selected_index+1:]\r\n\t\ttemp_selected_word = words[temp_selected_word_index][:selected_index] + words[temp_selected_word_index][selected_index+1:]\r\n\r\n\t\tif (len(words[temp_word_index]) == len(selected_word)):\r\n\t\t\tif (words[temp_word_index][selected_index] == unselected_letter):\r\n\t\t\t\tif (selected_word == other_word):\r\n\t\t\t\t\tbreak\r\n\t\t\t\telif (temp_selected_word == temp_new_searchable_word):\r\n\t\t\t\t\tprint(selected_word + ' ==> ' + other_word)\r\n\t\t\t\r\n\r\ndef find_usable_words(first_letter, second_letter):\r\n\tfor word in words:\r\n\t\tindex = 0\r\n\t\tfor letter in word:\r\n\t\t\tif (letter == first_letter):\r\n\t\t\t\tsearch_found_letter(first_letter, second_letter, index, word)\r\n\t\t\t\tindex += 1\r\n\t\t\telif (letter == second_letter):\r\n\t\t\t\tsearch_found_letter(second_letter, first_letter, index, word)\r\n\t\t\t\tindex += 1\r\n\t\t\telse:\r\n\t\t\t\tindex += 1\r\n\r\nfind_usable_words(user_input_first_letter, user_input_second_letter)","sub_path":"AdamsCrosswordProgram_FUNCTIONAL.py","file_name":"AdamsCrosswordProgram_FUNCTIONAL.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"370832102","text":"import json\n\n''' This class takes path to config file and creates ConfConfig\nobject, has useful methods for retrieving some information about\nvhosts and ip/port tuples\n'''\n\n\nclass Config:\n \"\"\" Initialize config object and json data\n in dictionary\n \"\"\"\n\n def __init__(self, config_file_path):\n\n with open(config_file_path) as data_file:\n self.data = json.load(data_file)\n\n # keep log path\n self.log = self.data[\"log\"]\n\n # keep all vhost objects identified by\n # (ip,port) tuple key\n self.virtual_hosts = {}\n\n # keep all ip,port tuples\n self.ip_ports = list()\n\n for vhost in self.data[\"server\"]:\n ip = str(vhost[\"ip\"])\n port = str(vhost[\"port\"])\n self.ip_ports.append((ip, port))\n if ip + \":\" + port not in self.virtual_hosts:\n self.virtual_hosts[ip + \":\" + port] = list()\n self.virtual_hosts[ip + \":\" + port].append(vhost)\n\n # make unique ip,port tuples\n self.ip_ports = list(set(self.ip_ports))\n\n ''' Returns path to log directory '''\n\n def get_log(self):\n return self.log\n\n ''' returns ip,port,vhost,logfile parameters for specific\n ip/port tuples\n '''\n\n def get_vhost(self, ip, port):\n return (self.virtual_hosts[str(ip) + \":\" + str(port)])\n\n ''' return all vhost names this server has '''\n\n def get_vhost_names(self):\n result = list()\n for key in self.virtual_hosts:\n vhost_list = self.virtual_hosts[key]\n for vhost in vhost_list:\n if \"vhost\" in vhost:\n result.append(vhost[\"vhost\"])\n result = list(set(result))\n\n return result\n\n ''' this method return all distinct ip,port tuples'''\n\n def get_ip_ports(self):\n return self.ip_ports\n\n ''' this method returns vhost body by indicating ip, port and domain name '''\n\n def get_domain_vhost(self, ip, port, host):\n\n # get list of vhost bodies by ip,port\n vhosts = self.get_vhost(ip, port)\n\n # choose the one with host domain name from this list\n for vhost in vhosts:\n if vhost[\"vhost\"] == host:\n return vhost\n\n # if nothing was found\n return None\n\n ''' this method takes vhost name and checks if server owns this host '''\n def vhost_exists(self, vhost):\n lst = self.get_vhost_names()\n\n return vhost in lst","sub_path":"HTTP Server/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"155238578","text":"bind = '0.0.0.0:5000'\n# worker type. comment out to use async gevent instead of the default sync\n#worker_class = 'gevent'\naccess_log_format = '\"%({x-real-ip}i)s\" %(l)s %(u)s %(t)s \"%(r)s\" %(s)s %(b)s \"%(f)s\" \"%(a)s\"'\naccesslog = '/var/log/proxy/proxy.access.log'\nerrorlog = '/var/log/proxy/proxy.error.log'\nloglevel = 'warning'\ntimeout = 120\ngraceful_timeout = 90\nlimit_request_line = 8096\n","sub_path":"docker_files/gunicorn.py","file_name":"gunicorn.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"316109273","text":"import socket\n\nMAXBYTES = 65535\n\n\ndef main():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind(('127.0.0.1', 50000))\n while True:\n data, address = sock.recvfrom(MAXBYTES)\n new_data = data.decode()\n n = int(new_data) % 2\n if n == 0:\n new_data = 'Par'\n else:\n new_data = 'Impar'\n sock.sendto(new_data.encode(), address)\n sock.close()\n return 0\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Servidor_echo_exe_02.py","file_name":"Servidor_echo_exe_02.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"1788522","text":"from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nimport os\n\nfor i in range(1, 37):\n pageUrl = 'http://rating.chgk.info/api/teams/?page='+str(i)\n r = requests.get(pageUrl)\n \n with open('F:\\\\test5.xml', 'w') as output_file:\n output_file.write(r.text)\n\n lines = []\n with open('F:\\\\test5.xml', 'r+') as f:\n lines = f.readlines()\n\n IDTeamList = []\n teamNameList = []\n teamTownList = []\n regionNameList = []\n countryNameList = []\n\n for line in lines:\n if(line.startswith(' \"idteam\": \"')):\n print(line[len(' \"idteam\": \"'):-3])\n IDTeamList.append(line[len(' \"idteam\": \"'):-3])\n continue\n if(line.startswith(' \"town\": \"')):\n print(line[len(' \"town\": \"'):-3])\n teamTownList.append(line[len(' \"town\": \"'):-3])\n continue\n if(line.startswith(' \"name\": \"')):\n print(line[len(' \"name\": \"'):-3])\n teamNameList.append(line[len(' \"name\": \"'):-3])\n continue\n if(line.startswith(' \"region_name\": \"')):\n print(line[len(' \"region_name\": \"'):-3])\n regionNameList.append(line[len(' \"region_name\": \"'):-3])\n continue\n if(line.startswith(' \"country_name\": \"')):\n print(line[len(' \"country_name\": \"'):-2])\n countryNameList.append(line[len(' \"country_name\": \"'):-2])\n continue\n \n data = {'IdTeam' : IDTeamList,\n 'Name': teamNameList,\n 'Town': teamTownList,\n 'Region name': regionNameList,\n 'Country name': countryNameList}\n \n dataFrame = pd.DataFrame(data = data)\n dataFrame.set_index('IdTeam', drop=True, inplace=True)\n\n filePath = 'F:\\\\teams.csv'\n if not os.path.isfile(filePath):\n dataFrame.to_csv(filePath, sep='\\t', encoding='utf-16')\n else:\n dataFrame.to_csv(filePath, encoding='utf-16', mode='a', sep='\\t', header=False)\n","sub_path":"_0. DWH/Projects/Viktoriya_Gruzitskaya/dwso/sources/scripts for data extract/TeamParser.py","file_name":"TeamParser.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"339720031","text":"#!/usr/bin/env python2\n\n''' Youtube-dlG module to download videos & handle each download. '''\n\nimport time\nfrom threading import Thread\n\nfrom wx import CallAfter\nfrom wx.lib.pubsub import setuparg1\nfrom wx.lib.pubsub import pub as Publisher\n\nfrom .OptionsParser import OptionsParser\nfrom .DownloadObject import DownloadObject\n\nfrom .utils import (\n get_youtubedl_filename,\n fix_path\n)\n\n\nclass DownloadManager(Thread):\n\n '''\n Manage youtube-dlG download list.\n\n Params\n threads_list: Python list that contains DownloadThread objects.\n\n update_thread: UpdateThread.py thread.\n \n Accessible Methods\n close()\n Params: None\n\n Return: None\n\n add_thread()\n Params: DownloadThread object\n\n Return: None\n\n alive_threads()\n Params: None\n\n Return: Number of alive threads.\n\n not_finished()\n Params: None\n\n Return: Number of threads not finished yet.\n\n Properties\n successful_downloads: Number of successful downloads.\n time: Time (seconds) it took for all downloads to complete.\n '''\n\n PUBLISHER_TOPIC = 'dlmanager'\n MAX_DOWNLOAD_THREADS = 3\n\n def __init__(self, threads_list, update_thread=None):\n super(DownloadManager, self).__init__()\n self.threads_list = threads_list\n self.update_thread = update_thread\n self._successful_downloads = 0\n self._running = True\n self._time = 0\n self.start()\n\n def run(self):\n if self.update_thread is not None:\n self.update_thread.join()\n \n self._time = time.time()\n\n # Main loop\n while self._running and not self._threads_finished():\n for thread in self.threads_list:\n if not self._running:\n break\n\n self._start_thread(thread)\n \n time.sleep(0.1)\n\n # Make sure no child thread is alive\n for thread in self.threads_list:\n if thread.is_alive():\n thread.join()\n\n # Collect thread status\n if thread.status == 0:\n self._successful_downloads += 1\n\n self._time = time.time() - self._time\n\n if not self._running:\n self._callafter('closed')\n else:\n self._callafter('finished')\n\n @property\n def time(self):\n ''' Return time it took for every download to finish. '''\n return self._time\n\n @property\n def successful_downloads(self):\n ''' Return number of successful downloads. '''\n return self._successful_downloads\n\n def close(self):\n ''' Close DownloadManager. '''\n self._callafter('closing')\n self._running = False\n for thread in self.threads_list:\n thread.close()\n\n def add_thread(self, thread):\n ''' Add new DownloadThread on self.threads_list. '''\n self.threads_list.append(thread)\n\n def alive_threads(self):\n ''' Return number of alive threads in self.threads_list. '''\n counter = 0\n\n for thread in self.threads_list:\n if thread.is_alive():\n counter += 1\n\n return counter\n\n def not_finished(self):\n ''' Return number of threads not finished. '''\n counter = 0\n\n for thread in self.threads_list:\n if thread.ident is None or thread.is_alive():\n counter += 1\n\n return counter\n\n def _start_thread(self, thread):\n ''' Start given thread if not download queue full. '''\n while self.alive_threads() >= self.MAX_DOWNLOAD_THREADS:\n time.sleep(1)\n\n if not self._running:\n break\n\n # If thread has not started\n if thread.ident is None and self._running:\n thread.start()\n\n def _threads_finished(self):\n ''' Return True if all threads in self.threads_list have finished. '''\n for thread in self.threads_list:\n # If thread has not started or thread is alive\n if thread.ident is None or thread.is_alive():\n return False\n\n return True\n\n def _callafter(self, data):\n ''' CallAfter wrapper. '''\n CallAfter(Publisher.sendMessage, self.PUBLISHER_TOPIC, data)\n\n\nclass DownloadThread(Thread):\n\n '''\n DownloadObject Thread wrapper for youtube-dlg.\n\n Params\n url: Video url to download.\n index: ListCtrl corresponding row for current thread.\n opt_manager: OptionsManager.OptionsManager object.\n log_manager: Any logger which implements log().\n\n Accessible Methods\n close()\n Params: None\n\n Return: None\n\n Properties\n status: Thread status.\n '''\n\n PUBLISHER_TOPIC = 'dlthread'\n\n def __init__(self, url, index, opt_manager, log_manager=None):\n super(DownloadThread, self).__init__()\n self.url = url\n self.index = index\n self.opt_manager = opt_manager\n self.log_manager = log_manager\n self._downloader = None\n self._status = 0\n\n def run(self):\n self._downloader = DownloadObject(\n self._get_youtubedl_path(),\n self._data_hook,\n self.log_manager\n )\n\n options = OptionsParser(self.opt_manager).parse()\n\n return_code = self._downloader.download(self.url, options)\n\n if return_code == DownloadObject.OK:\n self._callafter({'status': 'Finished'})\n elif return_code == DownloadObject.ERROR:\n self._callafter({'status': 'Error', 'speed': '', 'eta': ''})\n self._status = 1\n elif return_code == DownloadObject.STOPPED:\n self._callafter({'status': 'Stopped', 'speed': '', 'eta': ''})\n self._status = 1\n elif return_code == DownloadObject.ALREADY:\n self._callafter({'status': 'Already-Downloaded'})\n\n @property\n def status(self):\n ''' Return thread status. Use this property after\n thread has joined. (self._status != 0) indicates there was\n an error.\n '''\n return self._status\n\n def close(self):\n ''' Close download thread. '''\n if self._downloader is not None:\n self._downloader.stop()\n\n def _data_hook(self, data):\n ''' Merge playlist_info with data['status'] and\n pass data to self._callafter.\n '''\n playlist_info = ''\n\n if data['playlist_index'] is not None:\n playlist_info = data['playlist_index']\n playlist_info += '/'\n playlist_info += data['playlist_size']\n\n if data['status'] is not None:\n data['status'] = data['status'] + ' ' + playlist_info\n\n self._callafter(data)\n\n def _callafter(self, data):\n ''' Add self.index on data and send data back to caller. '''\n data['index'] = self.index\n CallAfter(Publisher.sendMessage, self.PUBLISHER_TOPIC, data)\n\n def _get_youtubedl_path(self):\n ''' Retrieve youtube-dl path. '''\n path = self.opt_manager.options['youtubedl_path']\n path = fix_path(path) + get_youtubedl_filename()\n return path\n","sub_path":"youtube_dl_gui/DownloadThread.py","file_name":"DownloadThread.py","file_ext":"py","file_size_in_byte":7191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"137777724","text":"import unittest\r\n\r\nclass Validator:\r\n\tdef __init__(self,validation_dictionary):\r\n\t\t\"\"\"\r\n\t\tconstructor set validation specifications\r\n\t\t\"\"\"\r\n\t\tself.__validation_dictionary=validation_dictionary\r\n\t\t\r\n\tdef validate(self,record):\r\n\t\t\"\"\"\r\n\t\tcheck if content of record is in the specification format\r\n\t\trecord -- input data to validate\r\n\t\t\"\"\"\r\n\t\t\r\n\t\tif type(record) != type(self.__validation_dictionary):\r\n\t\t\treturn False\r\n\t\t\t\r\n\t\tfor key in self.__validation_dictionary.keys():\r\n\t\t\tif self.__validation_dictionary[key]!=None:\r\n\t\t\t\tif len(record[key])!= self.__validation_dictionary[key]:\r\n\t\t\t\t\treturn False\t\t\t\r\n\t\t\telse:\t\r\n\t\t\t\tif record[key]=='':\r\n\t\t\t\t\treturn False\r\n\t\treturn True\r\n\t\r\n\r\nclass Test(unittest.TestCase):\t\t\r\n\tdef test_Validator(self):\r\n\t\t\"\"\"\r\n\t\tunittest for validator class test coverage \r\n\t\t\"\"\"\r\n\t\tselected_fields_validation={'CMTE_ID': None,'NAME': None,'ZIP_CODE':5,'TRANSACTION_DT':4,'TRANSACTION_AMT':None,'OTHER_ID': 0}\r\n\t\tvalidator=Validator(selected_fields_validation)\r\n\t\t\r\n\t\t#record has different data type of specifications \r\n\t\trecord=0\r\n\t\tself.assertFalse(validator.validate(record))\r\n\r\n\t\trecord=''\r\n\t\tself.assertFalse(validator.validate(record))\r\n\t\t\r\n\t\t#empty dictionary\r\n\t\trecord={}\r\n\t\tself.assertFalse(validator.validate(record))\r\n\r\n\t\t#different format\r\n\t\trecord={'x':0,'y':1}\r\n\t\tself.assertFalse(validator.validate(record))\r\n\t\t\r\n\t\t#dictionary with missing keys \r\n\t\trecord={'CMTE_ID': 'C00384818','ZIP_CODE':'23529','TRANSACTION_DT':'2018','TRANSACTION_AMT':'333','OTHER_ID': ''}\r\n\t\tself.assertFalse(validator.validate(record))\r\n\t\t\r\n\t\t#record has ZIP_CODE of size less than cofiguration specifications \r\n\t\trecord={'CMTE_ID': 'C00384818','NAME': 'Wessam','ZIP_CODE':'3529','TRANSACTION_DT':'2018','TRANSACTION_AMT':'333','OTHER_ID': ''}\r\n\t\tself.assertFalse(validator.validate(record))\r\n\t\t\r\n\t\t#record has ZIP_CODE of size less than cofiguration specifications \r\n\t\trecord={'CMTE_ID': 'C00384818','NAME': 'Wessam','ZIP_CODE':'3529','TRANSACTION_DT':'2018','TRANSACTION_AMT':'333','OTHER_ID': ''}\r\n\t\tself.assertFalse(validator.validate(record))\r\n\r\n\t\t#record in format \r\n\t\trecord={'CMTE_ID': 'C00384818','NAME': 'Wessam','ZIP_CODE':'23529','TRANSACTION_DT':'2018','TRANSACTION_AMT':'333','OTHER_ID': ''}\r\n\t\tself.assertTrue(validator.validate(record))\r\n\t\r\nif __name__==\"__main__\":\r\n\tunittest.main()\r\n\t\t\t\t","sub_path":"insight_testsuite/temp/src/Analytic/Validator/Validator.py","file_name":"Validator.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"111790511","text":"from rdflib import Graph, Literal, Namespace, URIRef\nfrom rdflib.namespace import RDF, FOAF, RDFS, OWL\nimport pandas as pd\n\ng = Graph()\nex = Namespace(\"http://example.org/\")\nRecipe = Namespace(\"http://schema.org/Recipe/\")\n\ng.bind(\"ex\", ex)\ng.bind(\"recipe\", Recipe)\n\n#Read data\ncsv_data = pd.read_csv(\"recipes.csv\")\n\n#Replace quotations\ncsv_data = csv_data.replace(to_replace='\"', value=\"\", regex=True) \n\n#Replace commas\ncsv_data = csv_data.replace(to_replace=\",\", value=\"\", regex=True) \n\n#Replace space with underscore\ncsv_data[\"Title\"] = csv_data[\"Title\"].replace(to_replace=\" \", value=\"_\", regex=True) \n\n#Replace underscore with space for column Directions\n#csv_data[\"Directions\"] = csv_data[\"Directions\"].replace(to_replace=\"_\", value=\" \", regex=True) \n\n#Fill empty cells with unknown\ncsv_data = csv_data.fillna(\"unknown\")\n\n#Add ingredient-, quantity-, and unitstring to list \ningredientList = []\nquantityList = []\nunitList = []\n\nfor i in range(1, 20):\n ingredientList.append(\"Ingredient\" + str(i)) \n quantityList.append(\"Quantity\" + str(i))\n unitList.append(\"Unit\" + str(i))\n\n#1. Title\n#2. Directions\n#3. Quantity01\n#4. Unit01\n#5. Ingredient01\n#6. Quantity02\n#7. Unit02\n#8. Ingredient02\n# ...\n#20. Category\n\n\n#TODO \n#Place the values into approptriate RDF literals (replace ex)\n\n# RECIPE SETUP:\n# Give title an appropriate namespace (Recipe or such)\n# Give each ingredient in the recipe its own node (Ingredient has a literal)\n# Place quantity and unit in the ingredient node (Give them each literals)\n\n#Example (Usage pf schema.org/Recipe):\n# name:Zucchi Patties\n# recipeIngredient: 1 garlic clove\n# recipeIngredient: 1 tablespoon parmesan cheese\n# recipeInstructions: description of recipe\n\n#Example: \n#g.add((ex.Zucchi_Patties, RDF.type, Thing.name))\n#g.add((ex.Zucchi_Patties, Recipe.recipeIngredient, Literal(\"1 garlic clove\")))\n\n#Adding ingredient, quantity and unit to one same node \ndef oneNode(subject, row):\n for i in range(19):\n if (row[ingredientList[i]] != \"unknown\"):\n if (row[quantityList[i]] != \"unknown\"):\n if (row[unitList[i]] != \"unknown\"):\n g.add((URIRef(ex + subject), URIRef(Recipe.recipeIngredient), \n Literal(row[quantityList[i]] + \" \" + row[unitList[i]] + \" \" + row[ingredientList[i]])))\n else:\n g.add((URIRef(ex + subject), URIRef(Recipe.recipeIngredient), \n Literal(row[quantityList[i]] + \" \" + row[ingredientList[i]])))\n\n#Adding ingredient to one node, and unit and quantity to another node\ndef twoNodes(subject, row):\n for i in range(0, 19): \n if (row[ingredientList[i]] != \"unknown\"):\n g.add((URIRef(ex + subject), URIRef(Recipe.recipeIngredient), Literal(row[ingredientList[i]])))\n \n if (row[quantityList[i]] != \"unknown\"):\n if (row[unitList[i]] != \"unknown\"):\n g.add((URIRef(ex + subject), URIRef(ex + \"quantity\"), Literal(row[quantityList[i]] + \" \" + row[unitList[i]])))\n else:\n g.add((URIRef(ex + subject), URIRef(ex + \"quantity\"), Literal(row[quantityList[i]])))\n\n#Adding all of them to three different nodes \ndef threeNodes(subject, row):\n for i in range(0, 19): \n if (row[ingredientList[i]] != \"unknown\"):\n g.add((URIRef(ex + subject), URIRef(Recipe.recipeIngredient), Literal(row[ingredientList[i]])))\n \n if (row[quantityList[i]] != \"unknown\"):\n g.add((URIRef(ex + subject), URIRef(ex + \"quantity\"), Literal(row[quantityList[i]])))\n if (row[unitList[i]] != \"unknown\"):\n g.add((URIRef(ex + subject), URIRef(ex + \"quantity\"), Literal(row[unitList[i]])))\n\n\nfor index, row in csv_data.iterrows():\n subject = row[\"Title\"]\n\n #g.add((URIRef(ex + subject), URIRef(ex + \"title\"), Literal(subject)))\n\n #g.add((URIRef(ex + subject), URIRef(ex + \"directions\"), Literal(row[\"Directions\"])))\n\n g.add((URIRef(ex + subject), RDF.type, Recipe.name))\n\n g.add((URIRef(ex + subject), Recipe.recipeInstructions, Literal(row[\"Directions\"])))\n\n oneNode(subject, row)\n\n#Remove nodes marked unknown \ng.remove((None, None, URIRef(\"http://example.org/unknown\")))\n\n# Writing the graph to a file on your system. Possible formats = turtle, n3, xml, nt.\ng.serialize(destination=\"recipe_triples.txt\", format=\"turtle\")\n\nprint(g.serialize(format=\"turtle\").decode())\n\n\n\n","sub_path":"python/csv_to_rdf.py","file_name":"csv_to_rdf.py","file_ext":"py","file_size_in_byte":4480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"215041105","text":"f = open('./input/rosalind_gc.txt', 'r')\n\nd={}\nfirst=True\n\nfor line in f:\n if line[0]=='>':\n if not(first):\n d[s]=t\n first=False\n s=line[1:-1]\n t=''\n else:\n t+=line.strip()\nd[s]=t\n\nmaxi=0.\n\ndef comp(s):\n return (s.count('C')+s.count('G'))/float(len(s))\n\nfor k in d:\n if comp(d[k])>maxi:\n maxi=comp(d[k])\n res=k\n\nprint('%s\\n%f%%' % (res, maxi*100))\n\n","sub_path":"Rosalind/AC/GC.py","file_name":"GC.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"145059699","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom openpyxl import Workbook\nfrom openpyxl import load_workbook\nfrom tkinter import messagebox\nimport openpyxl\nimport tkinter\nfrom tkinter import *\nimport os\nimport win32com.client as win32\npath_of_file = os.path.dirname(__file__)\nnew_path = os.path.dirname(os.path.abspath(__file__))\nprint(path_of_file)\ncurrent_path = os.getcwd()\n\nwindow = tk.Tk()\nwindow.geometry('800x400')\nwindow.title(\"Automation Sprint Plan Generator\")\n#########################################################################################################\nframe_1 = ttk.Frame()\nlb1 = ttk.Label(master=frame_1,\n text=\"Follow Below Steps to Generate Automaion Sprint Plan\", foreground=\"white\", background=\"black\")\n# , foreground=\"white\", background=\"black\"\nlb1.pack()\n# l.grid(column=0, row=0)\nlb2 = ttk.Label(master=frame_1, relief=RAISED,\n text=\"S1.Will extract all test cases and Planned Test cases for each platform from Sprint Plan workook to a new Temp workook.\", background=\"#2F4F4F\", foreground=\"white\")\n# lb2.config(anchor=LEFT)\nlb2.pack()\n#lb2.place(x=0, y=20)\n# lb2.place(anchor=E)\n# frame_1.pack()\n# l1.grid(column=0, row=1)\n# , background=\"#2F4F4F\"\n\n#######################################################################################################\nframe_2 = ttk.Frame()\nlb3 = ttk.Label(master=frame_2, text=\"Enter the Name of Sprint Plan Worbook\")\nlb3.grid(column=0, row=0)\n# lbl.pack()\nplan_workook = ttk.Entry(master=frame_2, width=40)\nplan_workook.grid(column=1, row=0)\n# plan_workook.pack()\nlb4 = ttk.Label(\n master=frame_2, text=\"Enter Some Random Temp workbook name eg:Veera.xlsx format\")\nlb4.grid(column=0, row=1)\n# 2.pack()\ntemp = ttk.Entry(master=frame_2, width=40)\ntemp.grid(column=1, row=1)\n# temp.pack()\n\n########################################################\n#Will exract all tcs into temp sheet\n#####################################################\ndef testcases():\n wb1 = openpyxl.Workbook()\n wb1.save(filename=current_path + \"\\\\\" + temp.get())\n wb = load_workbook(\n filename=current_path + \"\\\\\" + plan_workook.get())\n plan = wb[\"Automation\"]\n new_sheet = wb1.create_sheet(\"Testcases with Compoent\")\n new_sheet[\"A1\"] = \"Test Script ID\"\n new_sheet[\"B1\"] = \"Component\"\n cur_automation_row = 2\n new_sheet_row = 2\n automation_rows = plan.max_row\n while cur_automation_row <= automation_rows:\n component = plan.cell(row=cur_automation_row, column=7).value\n test_script_id = plan.cell(row=cur_automation_row, column=6).value\n if type(test_script_id) == str:\n if test_script_id.find(\",\") >= 0:\n mul_tcs = []\n mul_tcs = test_script_id.split(\",\")\n for each in mul_tcs:\n new_sheet.cell(row=new_sheet_row, column=1).value = each\n new_sheet.cell(row=new_sheet_row,\n column=2).value = component\n new_sheet_row += 1\n else:\n new_sheet.cell(row=new_sheet_row,\n column=1).value = test_script_id\n new_sheet.cell(row=new_sheet_row, column=2).value = component\n new_sheet_row += 1\n cur_automation_row += 1\n wb1.save(filename=current_path + \"\\\\\" +\n temp.get())\n\n###########################################################################\n#Will extract planned TCs across all platforms\n##########################################################################\ndef extract():\n # print(txt.get(), txt1.get())\n testcases()\n #wb1 = openpyxl.Workbook()\n wb1 = load_workbook(filename=current_path + \"\\\\\" + temp.get())\n wb = load_workbook(\n filename=current_path + \"\\\\\" + plan_workook.get())\n # s1=wb.active\n # print(s1)\n print(wb.sheetnames)\n plan = wb[\"Automation\"]\n for row in plan.iter_rows(max_row=1, values_only=True):\n fr = row\n fr = list(fr)\n start = fr.index(\"AXB6\")\n end = start+8\n for platform in range(start, end):\n print(fr[platform])\n for col in plan.iter_cols(min_col=platform+1, max_col=platform+1, values_only=True):\n planned = col\n # print(planned)\n planned = planned[1:]\n new_sheet = wb1.create_sheet(fr[platform])\n new_sheet[\"A1\"] = \"Test Script ID\"\n new_row = 2\n old_row = 1\n for i in planned:\n if i == \"Yes\" or i == \"Planned\":\n # print(plan.cell(row=old_row+1,column=6).value)\n if type(plan.cell(row=old_row+1, column=6).value) == str:\n new_sheet.cell(row=new_row, column=1).value = plan.cell(\n row=old_row+1, column=6).value\n new_row += 1\n old_row += 1\n wb1.save(filename=current_path + \"\\\\\" +\n temp.get())\n\n messagebox.showinfo('Message title', 'Succesfully done S1')\n\n#####################################################################################################\n\n\nframe_3 = ttk.Frame()\nbtn1 = ttk.Button(master=frame_3, text=\"Proceed1\",\n command=extract)\nbtn1.pack()\n\n\nlb5 = ttk.Label(master=frame_3, relief=RAISED, justify=LEFT,\n text=\"S2.Will make sure that each cell has only one Test Case and Removes Dulpicates platform wise in temp workook.\", foreground=\"white\", background=\"#2F4F4F\")\nlb5.pack()\n\n\ndef Remove_Dup():\n wb = load_workbook(filename=current_path + \"\\\\\" +\n temp.get())\n sheets = wb.sheetnames\n sheets = sheets[2:]\n for each_sheet in sheets:\n current_sheet = wb[each_sheet]\n for col in current_sheet.iter_cols(max_col=1, values_only=True):\n tcs = col\n tcs = tcs[1:]\n # print(tcs)\n r = 2\n for each in tcs:\n var = each.find(\",\")\n print(each)\n print(var)\n multi_tcs = []\n if var > 0:\n multi_tcs = each.split(\",\")\n print(multi_tcs)\n current_sheet.cell(row=r, column=1).value = multi_tcs[0]\n print(current_sheet.cell(row=r, column=1).value)\n r += 1\n print(r, \" inside\")\n current_sheet.insert_rows(idx=r, amount=len(multi_tcs)-1)\n for i in range(1, len(multi_tcs)):\n print(i)\n current_sheet.cell(row=r, column=1).value = multi_tcs[i]\n print(current_sheet.cell(row=r, column=1).value)\n print(multi_tcs[i])\n r += 1\n print(r, \" ineer loop\")\n else:\n r += 1\n print(r, \" outside\")\n for col in current_sheet.iter_cols(max_col=1, values_only=True):\n tcs = col\n tcs = tcs[1:]\n current_sheet.delete_cols(idx=1)\n # print(cols)\n unique_tc = []\n for each in tcs:\n if each in unique_tc:\n pass\n else:\n unique_tc.append(each)\n r = 2\n current_sheet.cell(row=1, column=1).value = each_sheet + \\\n \"(\" + str(len(unique_tc))+\")\"\n for each in unique_tc:\n current_sheet.cell(row=r, column=1).value = each\n r += 1\n wb.save(filename=current_path + \"\\\\\" +\n temp.get())\n messagebox.showinfo('Message title', 'Succesfully done S2')\n\n\nbtn2 = ttk.Button(master=frame_3, text=\"Proceed2\",\n command=Remove_Dup)\nbtn2.pack()\n\nlb6 = ttk.Label(master=frame_3, relief=RAISED, justify=LEFT,\n text=\"S3.Will map component to all unique TCs to new \\\"Unique_TC\\\" sheet in Planned workbook from temp.xlsx workbook.\", foreground=\"white\", background=\"#2F4F4F\")\nlb6.pack()\n\n\ndef Copy_Unique():\n wb_from = load_workbook(filename=current_path + \"\\\\\" +\n temp.get())\n wb_to = load_workbook(\n filename=current_path + \"\\\\\" + plan_workook.get())\n sheets = wb_from.sheetnames\n sheets = sheets[1:]\n print(sheets)\n # new_sheet=wb1.create_sheet(fr[platform])\n Unique = wb_to.create_sheet(\"Unique_TC\")\n planned_tcs = []\n component_sheet = wb_from[sheets[0]]\n \n all_unique_tc = []\n compo_row=2\n compo_max=component_sheet.max_row\n while compo_row<=compo_max:\n t=component_sheet.cell(row=compo_row,column=1).value\n if t in all_unique_tc:\n pass\n else:\n all_unique_tc.append(t)\n compo_row+=1\n tc_with_component_rows = component_sheet.max_row\n Unique.cell(row=1, column=1).value = \"Test Script ID\"\n Unique.cell(row=1, column=2).value = \"Component\"\n #Compoent mapping to testcases\n r = 2\n for each in all_unique_tc:\n Unique.cell(row=r, column=1).value = each\n final_comp = []\n cur_row = 2\n while cur_row <= tc_with_component_rows:\n tc = component_sheet.cell(row=cur_row, column=1).value\n comp = component_sheet.cell(row=cur_row, column=2).value\n if tc == each:\n #final_comp = final_comp+\",\" + comp\n final_comp.append(comp)\n cur_row += 1\n final_comp = set(final_comp)\n final_comp = list(final_comp)\n final_comp = \",\".join(final_comp)\n Unique.cell(row=r, column=2).value = final_comp\n r += 1\n wb_to.save(\n filename=current_path + \"\\\\\" + plan_workook.get())\n #mapping planed testcases across all platforms\n #print(all_unique_tc)\n sheets = sheets[1:]\n unique_rows = Unique.max_row\n c = 4\n for each_sheet in sheets:\n print(each_sheet)\n current_sheet = wb_from[each_sheet]\n print(current_sheet)\n tcs = []\n for col in current_sheet.iter_cols(max_col=1, min_col=1, values_only=True):\n tcs = col\n\n Unique.cell(row=1, column=c).value = tcs[0]\n tcs = list(tcs[1:])\n print(tcs)\n for each_tc in tcs:\n indx = all_unique_tc.index(each_tc)\n Unique.cell(row=indx+2, column=c).value = \"Yes\"\n c += 1\n wb_to.save(\n filename=current_path + \"\\\\\" + plan_workook.get())\n messagebox.showinfo('Message title', 'Succesfully done S3')\n\n\nbtn3 = ttk.Button(master=frame_3, text=\"Proceed3\",\n command=Copy_Unique)\nbtn3.pack()\n\nlb7 = ttk.Label(master=frame_3, relief=RAISED, justify=LEFT,\n text=\"S4.Will convert xls workook to xlsx format and will map each TCs to respective Test Type and Applicable or Not Applicabe for respective platform.\", foreground=\"white\", background=\"#2F4F4F\")\nlb7.pack()\n###########################################################################################################\nframe_4 = ttk.Frame()\nlb8 = ttk.Label(\n master=frame_4, text=\"Enter the Name of Script Mapping Workook i.e downloaded from Automaics\")\nlb8.grid(column=0, row=0)\n# lbl.pack()\nscript_mapping_from = ttk.Entry(master=frame_4, width=40)\nscript_mapping_from.grid(column=1, row=0)\nlb9 = ttk.Label(master=frame_4,\n text=\"Enter the Conversion workook name with xlsx format eg:Veera.xlsx\", foreground=\"black\")\nlb9.grid(column=0, row=1)\nscript_mapping_to = ttk.Entry(master=frame_4, width=40)\nscript_mapping_to.grid(column=1, row=1)\n\n\ndef Conversion():\n file_name = current_path + \"\\\\\" + script_mapping_from.get()\n print(file_name)\n final_name = current_path + \"\\\\\" + script_mapping_to.get()\n print(final_name)\n excel = win32.gencache.EnsureDispatch('Excel.Application')\n wb = excel.Workbooks.Open(file_name)\n wb.SaveAs(final_name, FileFormat=51)\n wb.Close()\n messagebox.showinfo(\n 'Message title', 'Succesfully Converted to xlsx format')\n\n\ndef Mapping():\n Conversion()\n wb_from = load_workbook(\n filename=current_path + \"\\\\\" + plan_workook.get())\n wb_to = load_workbook(filename=current_path + \"\\\\\" +\n script_mapping_to.get())\n print(wb_to.sheetnames)\n #mapping Testtype of component \n map_sheet = wb_to[\"Sheet1\"]\n Auto_tcs=[]\n for col in map_sheet.iter_cols(max_col=1, min_col=1, values_only=True):\n col = col\n Auto_tcs = list(col)\n planned_sheet = wb_from[\"Unique_TC\"]\n planned_sheet.cell(row=1,column=3).value=\"Test Type\"\n unq_row=2\n unq_max=planned_sheet.max_row\n t=\"\"\n while unq_row<=unq_max:\n t=planned_sheet.cell(row=unq_row,column=1).value\n if t in Auto_tcs:\n indx=Auto_tcs.index(t)\n planned_sheet.cell(row=unq_row,column=3).value=map_sheet.cell(row=indx,column=5).value\n else:\n planned_sheet.cell(row=unq_row,column=3).value=\"NA\"\n unq_row+=1\n wb_from.save(\n filename=current_path + \"\\\\\" + plan_workook.get())\n #mapping A/NA for planned TCs accross all platforms\n map_App = wb_to[\"Script_Mapping\"]\n for col in map_App.iter_cols(max_col=1, min_col=1, values_only=True):\n all_TC = col\n for row in map_App.iter_rows(max_row=1, min_row=1, values_only=True):\n headings = row\n run_on_models_col=headings.index(\"RUN ON MODELS\")+1\n print(run_on_models_col)\n platforms = [\"ARRIS-XB6\", \"TECH-XB6\", \"CISCO-XB3\",\n \"ARRIS-XB3\", \"PACE-XF3\", \"PACE-CFG3\", \"TECH-CBR\", \"TECH-XB7\"]\n # print(Auto_tcs)\n \n # print(wb_from.sheetnames)\n overall_column = 4\n for platform in platforms:\n print(\"*************Starting\"+platform +\n \"****************************************\")\n for each in planned_sheet.iter_cols(max_col=overall_column, min_col=overall_column, values_only=True):\n plan_tc = each\n plan_tc=list(plan_tc)\n print(plan_tc)\n li=[]\n li=plan_tc[0].split(\"(\")\n plan_tc=plan_tc[1:]\n planned_sheet.insert_cols(idx=overall_column+1, amount=1)\n r = 2\n counter = 0\n for each in plan_tc:\n tc=\"\"\n if type(each)==str and each==\"Yes\":\n tc=planned_sheet.cell(row=r,column=1).value\n test_type=planned_sheet.cell(row=r,column=3).value\n if tc in all_TC and test_type != \"NA\":\n ind = all_TC.index(tc)\n final = ind+1\n print(final)\n print(map_App.cell(row=final, column=1).value)\n Run_models = map_App.cell(row=final, column=run_on_models_col).value\n # print(Run_models)\n # print(Run_models)\n print(type(Run_models))\n final_list_platform = []\n if type(Run_models) == str:\n final_list_platform = list(Run_models.split(\"\\n\"))\n print(final_list_platform)\n if platform in final_list_platform:\n planned_sheet.cell(row=r, column=overall_column+1).value = \"A\"\n counter += 1\n else:\n planned_sheet.cell(row=r, column=overall_column+1).value = \"NA\"\n else:\n planned_sheet.cell(row=r, column=overall_column+1).value = \"NA\"\n if test_type == \"NA\":\n planned_sheet.cell(row=r, column=overall_column+1).value = \"NA\"\n r += 1\n planned_sheet.cell(row=1, column=overall_column +\n 1).value = \"A in \"+li[0] + \"(\" + str(counter) + \")\"\n overall_column = overall_column+2\n wb_from.save(\n filename=current_path + \"\\\\\" + plan_workook.get())\n messagebox.showinfo(\n 'Message title', 'Succesfully done S4 and Generated Automation Plan Successfully')\n window.destroy()\n\n\nframe_5 = ttk.Frame()\nbtn4 = ttk.Button(master=frame_5, text=\"Proceed4\",\n command=Mapping)\nbtn4.pack()\n####################################################################################################################\nframe_1.pack()\nframe_2.pack()\nframe_3.pack()\nframe_4.pack()\nframe_5.pack()\nwindow.mainloop()\n","sub_path":"Sprint_Plan_Final.py","file_name":"Sprint_Plan_Final.py","file_ext":"py","file_size_in_byte":15973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"581622522","text":"# free sprites courtesy of Sithjester\n\nimport pygame\nimport gamebox\n\npygame.mixer.music.load(\"sound.wav\")\npygame.mixer.music.play(-1)\n\nflag_sound = pygame.mixer.Sound(\"sound.wav\")\ngoal_sound = pygame.mixer.Sound(\"sound.wav\")\n\ncamera = gamebox.Camera(800,600)\nplayer_one_sheet = gamebox.load_sprite_sheet(\"http://untamed.wild-refuge.net/images/rpgxp/avengers/blackwidow.png\", 4, 4)\nplayer_two_sheet = gamebox.load_sprite_sheet(\"http://untamed.wild-refuge.net/images/rpgxp/avengers/steverogers.png\", 4, 4)\n\n#add monster sprite sheet\nmonster_sheet = gamebox.load_sprite_sheet(\"http://untamed.wild-refuge.net/images/rpgxp/avengers/hulk.png\", 4, 4)\n\n\nplayer_one_frame = 8\nplayer_two_frame = 4\n\n#starting frame of monster looking downards\nmonster_frame = 12\n\ntimer = 30*60\ndirection = 0\ncounter = 0\n\nplayer_one = gamebox.from_image(350, 300, player_one_sheet[player_one_frame])\nplayer_two = gamebox.from_image(450, 300, player_two_sheet[player_two_frame])\n\n#set monster between the two\nmonster = gamebox.from_image(400, 300, monster_sheet[monster_frame])\n# a monster should be bigger than both of them\nmonster.scale_by(1.5)\n\ntouched_monster = 0\n\n\n#blue.png is essentially the blue flag\nplayer_one_flag = [gamebox.from_image(50, 50, \"blue.png\")]\nplayer_one_flag[0].scale_by(0.15)\nplayer_one_goal = gamebox.from_color(100, 300, \"blue\", 10, 600) # goal line for player 1\nplayer_one_counter = 0\n\n\n#red.png is the red flag\nplayer_two_flag = [gamebox.from_image(750, 550, \"red.png\")]\nplayer_two_flag[0].scale_by(0.15)\nplayer_two_counter = 0\nplayer_two_goal = gamebox.from_color(700, 300, \"red\", 10, 600) # goal line for player 2\n\nplayer_one_has_flag = False\nplayer_two_has_flag = False\n\ngame_start = True\ngame_end = False\n\ndef tick(keys):\n global player_one_frame\n global player_two_frame\n global player_one_has_flag\n global player_two_has_flag\n global player_one_counter\n global player_two_counter\n global game_start\n global game_end\n global direction\n global counter\n global timer\n global monster_frame\n global monster_sheet\n global touched_monster\n\n if game_start:\n camera.clear(\"black\")\n beginning_title = gamebox.from_text(400, 250, \"CAPTURE THE FLAG\", \"Arial\", 46, \"white\")\n beginning_description = gamebox.from_text(400, 325, \"PRESS SPACEBAR TO BEGIN\", \"Arial\", 46, \"white\")\n\n camera.draw(beginning_title)\n camera.draw(beginning_description)\n\n if pygame.K_SPACE in keys:\n game_start = False\n\n else:\n timer -= 1\n\n camera.clear(\"green\")\n\n player_one_frame += 1\n player_two_frame += 1\n counter += 1\n\n #animate the monster so that he walks in place\n monster_frame +=1\n\n #adjusts hulks speed here.\n #make the monster move ahead\n monster.y +=20\n\n if monster.y == 800:\n monster.y = 0\n\n if monster_frame == 16:\n monster_frame = 12\n if player_one_frame == 12:\n player_one_frame = 8\n if player_two_frame == 8:\n player_two_frame = 4\n if counter % 5 == 0:\n player_one.image = player_one_sheet[player_one_frame+direction*10]\n player_two.image = player_two_sheet[player_two_frame+direction*10]\n #place the monster on screen\n monster.image = monster_sheet[monster_frame + direction*10]\n\n if pygame.K_RIGHT in keys:\n player_one.x += 10\n if pygame.K_LEFT in keys:\n player_one.x -= 10\n if pygame.K_UP in keys:\n player_one.y -= 10\n if pygame.K_DOWN in keys:\n player_one.y += 10\n\n if pygame.K_d in keys:\n player_two.x += 10\n if pygame.K_a in keys:\n player_two.x -= 10\n if pygame.K_w in keys:\n player_two.y -= 10\n if pygame.K_s in keys:\n player_two.y += 10\n\n\n # detect if player 2 touched monster\n if player_two.touches(monster):\n game_end = True\n touched_monster = 2\n\n # detect if player 1 touched monster\n if player_one.touches(monster):\n game_end = True\n touched_monster = 1\n\n\n for flag in player_one_flag:\n if player_two.touches(flag):\n flag_sound.play()\n player_two_has_flag = True\n player_one_flag.remove(flag)\n camera.draw(flag)\n for flag in player_two_flag:\n if player_one.touches(flag):\n flag_sound.play()\n player_one_has_flag = True\n player_two_flag.remove(flag)\n camera.draw(flag)\n\n if player_one.touches(player_one_goal) and player_one_has_flag:\n goal_sound.play()\n new_flag = gamebox.from_image(750, 550, \"red.png\")\n player_two_flag.append(new_flag)\n player_two_flag[0].scale_by(0.15)\n player_one_counter += 1\n player_one_has_flag = False\n\n if player_two.touches(player_two_goal) and player_two_has_flag:\n goal_sound.play()\n new_flag = gamebox.from_image(50, 50, \"blue.png\")\n player_one_flag.append(new_flag)\n player_one_flag[0].scale_by(0.15)\n player_two_counter += 1\n player_two_has_flag = False\n\n player_one_counter_text = gamebox.from_text(75, 75, str(player_one_counter), \"Arial\", 28, \"blue\", italic = True)\n player_two_counter_text = gamebox.from_text(775, 575, str(player_two_counter), \"Arial\", 28, \"red\", italic = True)\n timer_display = gamebox.from_text(400, 25, str(timer // 30) + \" seconds\", \"Arial\", 46, \"black\")\n\n camera.draw(player_one)\n camera.draw(player_two)\n #draw the monster on screen\n camera.draw(monster)\n camera.draw(player_one_goal)\n camera.draw(player_two_goal)\n camera.draw(player_one_counter_text)\n camera.draw(player_two_counter_text)\n camera.draw(timer_display)\n\n if timer == 0:\n game_end = True\n\n if game_end:\n camera.clear(\"black\")\n game_over_text = gamebox.from_text(400, 250, \"GAME OVER\", \"Arial\", 46, \"white\")\n continue_text = gamebox.from_text(400, 400, \"PRESS ENTER TO REPLAY\", \"Arial\", 46, \"white\")\n\n\n # player one had touched monster\n if touched_monster == 1:\n win_text = gamebox.from_text(400, 325, \"PLAYER 2 wins as PLAYER 1 was eaten by monster\", \"Arial\", 30, \"white\")\n # player 2 had touched monster\n elif touched_monster == 2:\n win_text = gamebox.from_text(400, 325, \"PLAYER 2 wins as PLAYER 1 was eaten by monster\", \"Arial\", 30, \"white\")\n elif player_one_counter > player_two_counter:\n win_text = gamebox.from_text(400, 325, \"PLAYER 1 WINS WITH \" + str(player_one_counter) + \" FLAGS\", \"Arial\", 46, \"white\")\n elif player_two_counter > player_one_counter:\n win_text = gamebox.from_text(400, 325, \"PLAYER 2 WINS WITH \" + str(player_two_counter) + \" FLAGS\", \"Arial\", 46, \"white\")\n else:\n win_text = gamebox.from_text(400, 325, \"TIE\", \"Arial\", 46, \"white\")\n\n camera.draw(game_over_text)\n camera.draw(win_text)\n camera.draw(continue_text)\n\n # reset the numbers\n if pygame.K_RETURN in keys:\n game_end = False\n game_start = True\n timer = 30 * 60\n player_one_counter = 0\n player_two_counter = 0\n touched_monster = 0\n player_one.x = 350\n player_one.y = 300\n player_two.x = 450\n player_two.y = 300\n\n camera.display()\n\nticks_per_second = 30\n\ngamebox.timer_loop(ticks_per_second, tick)\n\n\n","sub_path":"sprites/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":7682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"131092345","text":"# coding: utf-8\nfrom __future__ import division, print_function\nfrom numpy import pi\nimport numpy as np\nfrom numpy import random \nimport time\nfrom collections import deque\nimport os\nfrom keras.models import Sequential\nfrom keras.models import model_from_json\nfrom keras.layers import Dense, Conv1D\nfrom keras.layers.recurrent import LSTM\nfrom keras.optimizers import RMSprop\nfrom keras.optimizers import Adam\nfrom keras.utils import plot_model\nfrom collections import deque\nfrom keras import backend as K\nimport tensorflow as tf \n\nclass DQN:\n def __init__(self,current_id,learning_rate=0.01, state_size=4, action_size=2, hidden_size=64):\n self.state_size = state_size\n self.action_size = action_size\n self.model = Sequential()\n self.model.add(Dense(state_size,activation='relu',input_shape=(1,state_size)))\n self.model.add(Dense(hidden_size, activation='relu'))\n self.model.add(Dense(action_size, activation ='linear'))\n self.optimizer = Adam(lr=learning_rate) # 誤差を減らす学習方法はAdam\n self.model.compile(loss='mean_squared_error', optimizer=self.optimizer)\n self.id = current_id\n def replay(self,memory,batch_num,gamma,targetQN):\n state_minibatch = np.zeros((batch_num,self.state_size))\n y_minibatch = np.zeros((batch_num,self.action_size))\n batch = memory.sample(batch_num)\n\n for i in range(batch_num):\n #[ seq..., action, reward, seq_new]\n s_j= batch[i,0:self.state_size]\n a_j = int(batch[i,self.state_size])\n r_j = batch[i, self.state_size+1]\n s_dash_j= batch[i,(self.state_size+2):(self.state_size*2+2)].reshape(-1,1,self.state_size)\n y_j = self.model.predict(s_j.reshape(-1, 1,self.state_size))[0,0]\n y_j[a_j]=( r_j+ gamma * np.max(targetQN.model.predict(s_dash_j)))\n \n state_minibatch[i,:]=s_j\n y_minibatch[i,:]=y_j\n state_minibatch=state_minibatch.reshape(batch_num,1,self.state_size)\n y_minibatch=y_minibatch.reshape(batch_num,1,self.action_size)\n self.model.fit(state_minibatch, y_minibatch,batch_size=int(batch_num/10),epochs=10,verbose=0)\n loss = self.model.evaluate(state_minibatch, y_minibatch,batch_size=int(batch_num/10),verbose=0)\n return loss\n def load_model(self, name_y, name_w):\n f_model = '../data/'+self.id+'/trained_model'\n print('load model')\n json_string = open(os.path.join(f_model, name_y)).read()\n self.model = model_from_json(json_string)\n self.model.load_weights(os.path.join(f_model, name_w))\n def save_model(self,num_episode):\n f_model = '../data/'+self.id+'/trained_model'\n name_j = 'model%d.json'%num_episode\n name_y = 'model%d.yaml'%num_episode\n name_w = 'weights%d.hdf5'%num_episode\n json_string = self.model.to_json()\n yaml_string = self.model.to_yaml()\n print('save the architecture of a model')\n open(os.path.join(f_model,name_j), 'w').write(json_string)\n open(os.path.join(f_model,name_y), 'w').write(yaml_string)\n print('save weights')\n self.model.save_weights(os.path.join(f_model,name_w))","sub_path":"dqn/model/dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"64918534","text":"ldict={\r\n 'hello':'otai',\r\n 'hi':'oty',\r\n 'hey':'oai',\r\n 'meet':'reask',\r\n 'whatsup':'imitzio',\r\n 'good':'saiie',\r\n 'how':'roke',\r\n 'you':'oka',\r\n 'do':'irru',\r\n 'next':'iep',\r\n 'first':'oa',\r\n 'second':'ob',\r\n 'bye':'sotai',\r\n 'tell':'sapi',\r\n 'end':'rdn',\r\n 'and':'iu',\r\n 'see':'ryz',\r\n 'sea':'oli',\r\n 'water':'rias',\r\n 'building':'rumpa',\r\n 'attack':'shar',\r\n 'kill':'oiep',\r\n 'destory':'iros',\r\n 'capture':'suz',\r\n 'save':'imma',\r\n 'survive':'rive',\r\n 'hold':'oos',\r\n 'fly':'sakoz',\r\n 'to':'ot',\r\n}\r\nnew_key = input(\"Enter a word Translate form Englist to Alien or Alien to English: \")\r\noutput = 0\r\nfor key_word, value_word in ldict.items():\r\n if (key_word == new_key):\r\n output = value_word\r\n elif (value_word == new_key):\r\n output = key_word\r\nprint(output)\r\nif output == 0:\r\n for key_word, value_word in ldict.items():\r\n if not(key_word == output):\r\n new_value = input(\"Enter the meaning of the word: \")\r\n ldict[new_key] = new_value\r\n print(\"new word:\", new_key,\"\\nmeaing: \", new_value)\r\n break\r\n elif not(value_word == output):\r\n new_value = input(\"Enter the meaning of the word: \")\r\n ldict[new_key] = new_value\r\n print(\"new word:\", new_key,\"\\nmeaing: \", new_value)\r\n break","sub_path":"dict.py","file_name":"dict.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"147978788","text":"'''\nExercise : Assignment-2\nimplement the function hangman, which takes one parameter - the secret_word_giv\nthe user is to guess. This starts up an interactive game of Hangman between\nthe user and the computer. Be sure you take advantage of the three helper functions,\nisWordGuessed, getGuessedWord, and getAvailableLetters,\nthat you've defined in the previous part.\n'''\n# -----------------------------------\n# Helper code\n# You don't need to understand this helper code,\n# but you will have to know how to use the functions\n# (so be sure to read the docstrings!)\nimport random\ndef get_guessed_word(secret_word, letters_guessed):\n \"\"\"This function returns a string that is comprised of letters and underscores, based on what\n letters in letters_guessed are in secret_word.\"\"\"\n str1 = \"\"\n for i in secret_word:\n if i not in letters_guessed:\n str1 += \" _\"\n else:\n str1 += i\n return str1\n\ndef is_word_guessed(secret_word, letters_guessed):\n '''is word guessed? '''\n secret_set = set(secret_word)\n intersect = secret_set.intersection(set(letters_guessed))\n return bool(len(intersect) == len(secret_set))\ndef get_available_letters(letters_guessed):\n \"\"\"gives the available letters for guess \"\"\"\n import string\n a_list = list(string.ascii_lowercase)\n \n for i in letters_guessed:\n if i in a_list:\n a_list.remove(i)\n return \"\".join(a_list)\n\nWORD_LIST_FILENAME = \"words.txt\"\n\ndef load_words():\n \"\"\"\n Returns a list of valid words. Words are strings of lowercase letters.\n Depending on the size of the word list, this function may\n take a while to finish.\n \"\"\"\n print(\"Loading word list from file...\")\n # in_file: file\n in_file = open(WORD_LIST_FILENAME, 'r')\n # line: string\n line = in_file.readline()\n # WORD_LIST: list of strings\n word_list = line.split()\n print(\" \", len(word_list), \"words loaded.\")\n return word_list\n\ndef choose_word(word_list):\n \"\"\"\n WORD_LIST (list): list of words (strings)\n\n Returns a word from WORD_LIST at random\n \"\"\"\n return random.choice(word_list)\n\n# end of helper code\n# -----------------------------------\n\n# Load the list of words into the variable WORD_LIST\n# so that it can be accessed from anywhere in the program\nWORD_LIST = load_words()\n\ndef hangman(secret_word_giv):\n '''\n secret_word_giv: string, the secret word to guess.\n Starts up an interactive game of Hangman.\n * At the start of the game, let the user know how many\n letters the secret_word_giv contains.\n * Ask the user to supply one guess (i.e. letter) per round.\n * The user should receive feedback immediately after each guess\n about whether their guess appears in the computers word.\n * After each round, you should also display to the user the\n partially guessed word so far, as well as letters that the\n user has not yet guessed.\n Follows the other limitations detailed in the problem write-up.'''\n print(\"Welcome to the game, Hangman! \\n\")\n print(\"I am thinking of a word that is\", len(secret_word_giv), \"letters long.\")\n print(\"Please enter only one guess(letter) per round \\n-------------\")\n guess_count = 8\n letters_guessed = []\n while guess_count > 0 and (not is_word_guessed(secret_word_giv, letters_guessed)):\n print(\"You have\", guess_count, \"guesses left.\")\n print(\"Available letters:\", get_available_letters(letters_guessed))\n inp_user = input(\"Please guess a letter:\")\n the_guessed_word = get_guessed_word(secret_word_giv, letters_guessed)\n letters_guessed.append(inp_user)\n if inp_user in the_guessed_word:\n the_guessed_word = get_guessed_word(secret_word_giv, letters_guessed)\n print(\"Oops! You've already guessed that letter:\", the_guessed_word)\n print(\"\\n------------\")\n elif inp_user in secret_word_giv:\n #letters_guessed.append(inp_user)\n the_guessed_word = get_guessed_word(secret_word_giv, letters_guessed)\n print(\"Good guess\", the_guessed_word)\n print(\"\\n------------\")\n #guess_count -= 1\n elif inp_user not in secret_word_giv:\n #letters_guessed.append(inp_user)\n the_guessed_word = get_guessed_word(secret_word_giv, letters_guessed)\n print(\"Oops! That letter is not in my word:\", the_guessed_word)\n print(\"\\n------------\")\n guess_count -= 1\n if is_word_guessed(secret_word_giv, letters_guessed):\n print(\"Congratulations, you won!\")\n print(\"\\n------------\")\n else:\n print(\"Sorry, you ran out of guesses. The word was \" + secret_word_giv + \"\\n------------\")\ndef main():\n '''\n Main function for the given program\n When you've completed your hangman function, uncomment these two lines\n and run this file to test! (hint: you might want to pick your own\n secret_word_giv while you're testing)\n '''\n secret_word_giv = choose_word(WORD_LIST).lower()\n secret_word_giv = \"supriya\"\n hangman(secret_word_giv)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cspp1-assignments/m10/p2/assignment2.py","file_name":"assignment2.py","file_ext":"py","file_size_in_byte":5116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"221719341","text":"han#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 21 10:49:08 2018\n\n@author: garethjones\n\"\"\"\n\n''' Import Dataset '''\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\ndirectory = '/Users/garethjones/Documents/Data Analysis/MRRH Oncology Analysis/'\nfile1 = 'Data/Oncology Dataset.csv'\nfile2 = 'Data/Screening Dataset.csv'\norigindata = pd.read_csv(directory+file1, encoding='utf-8')\nscreeningdata = pd.read_csv(directory+file2, encoding='ISO-8859-1')\n\n''' Create Global Variables '''\nbigcancers = ['Breast','KS','Prostate','Stomach','Esophagus']\npalette10 = ['#094675','#AD2E32','#F6E8A5','#A2CFAB','#FCC46B','#E7F5F6','#F8C2CD','#C9CACB','#713F6E','#DBC757']\nplt.rc('font',family='Calibri')\n\n'''Select Relevant Columns'''\ncolumns = ['enrol_d','gender','age','district','cancer','othercancertype','chemotype']\ndata = origindata[columns]\n\n'''Rename Column Headers and Capitalize'''\ndata.columns = ['Appt_Date','Gender','Age','District','Cancer_Type','Other_Cancer_Type','Chemo_Type']\ndata.Cancer_Type = data.Cancer_Type.str.title()\n\n''' Change date format and substring year/month info '''\ndata.loc[:,'Appt_Date'] = pd.to_datetime(data.loc[:,'Appt_Date'])\ndata = data.dropna(how='any',subset=['Appt_Date','Cancer_Type'])\ndata.Appt_Date = data.Appt_Date.apply(lambda x: x.strftime('%d%m%Y'))\nappt_year = []\nappt_month = []\nfor i in data.Appt_Date:\n appt_year.append(i[slice(4,8)])\n appt_month.append(i[slice(2,4)])\ndata['Appt_Year'] = appt_year\ndata['Appt_Month'] = appt_month\n\n''' Remove irrelevant years '''\ndata = data[data['Appt_Year']!='2011']\ndata = data[data['Appt_Year']!='2012']\ndata = data[data['Appt_Year']!='2018']\n\n''' Reorder Columns '''\ndata = data[['Appt_Date','Appt_Year','Appt_Month','Gender','Age','District','Cancer_Type','Other_Cancer_Type','Chemo_Type']]\n\n''' Reformat cancer names '''\nrename_dict = {'Ks':'KS',\n 'Non_Hodgkins_Lymphoma':'Non Hodgkins Lymphoma',\n 'Hodgkins_Lymphoma':'Hodgkins Lymphoma',\n 'Headneck':'Head & Neck',\n 'Rhaydomyo_Sarcoma':'Rhaydomyo Sarcoma',\n 'Yolk_Sac':'Yolk Sac',\n 'Castle_Mans':'Castle Mans',\n np.nan:'Unknown'}\ndata['Cancer_Type'] = data['Cancer_Type'].replace(rename_dict)\n\ndel columns,i,appt_year,appt_month,rename_dict\n\n","sub_path":"01. Data Preparation.py","file_name":"01. Data Preparation.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"284609434","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport services.controlers.loggControler\nfrom services.controlers.loggControler import LoggControler\nfrom services.exceptions import *\nfrom services.controlers.controler import Controler\nimport services.querys.damageReportQuery\nfrom services.querys.damageReportQuery import DamageReportQuery\nimport os\nimport hashlib\nimport re\nimport cgi\nimport urllib\nfrom services.tools import *\n\nclass DamageReportsControler(Controler):\n\tdef __init__(self):\n\t\ttry:\n\t\t\tqueryPath=\"services.querys.damageReportQuery.DamageReportQuery()\"\n\t\t\tloggMsg=\"DamageReportsControler\"\n\t\t\tinfoMsg=\"REPORTE_DE_DAÑOS\"\n\t\t\tsuper(DamageReportsControler, self).__init__(queryPath, loggMsg, infoMsg)\n\t\texcept Exception as e:\n\t\t\tloggControler = LoggControler()\n\t\t\tloggControler.addLogg('Controler: DamageReportsControler-__init__()', ERROR_NO_DEFINIDO, e.message)\n\n\tdef getByEventId(self, eventId):\n\t\tdamageReportList=[]\n\t\tstate = 300\n\t\ttry:\n\t\t\tobjs = self.queryObj.getByEventId(eventId)\n\t\t\tif objs.count():\n\t\t\t\tfor obj in objs:\n\t\t\t\t\tdamageReportList.append(obj.toDictFront())\n\t\t\tstate = OK\n\t\texcept Exception as e:\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Controler: '+self.loggMsg+'-getByEventId()', ERROR_NO_DEFINIDO, e.message)\t\n\t\treturn damageReportList, state\n\t\t\n\tdef\tgetByResource(self, resourceRef, resourceCode, sessionJson):\n\t\tdamageReportList=[]\n\t\tstate = 300\n\t\ttry:\n\t\t\tsessionJson[\"userTypeId\"] = int(sessionJson[\"userTypeId\"])\n\t\t\tsessionJson[\"companyIdSession\"] = int(sessionJson[\"companyIdSession\"])\n\t\t\tobjs = self.queryObj.getByResource(resourceRef, resourceCode, sessionJson[\"companyIdSession\"])\n\t\t\tif objs.count():\n\t\t\t\tfor obj in objs:\n\t\t\t\t\tdamageReportList.append(obj.toDictFront())\n\t\t\tstate = OK\n\t\texcept Exception as e:\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Controler: '+self.loggMsg+'-getByEventId()', ERROR_NO_DEFINIDO, e.message)\t\n\t\treturn damageReportList, state\n\n\n\tdef add(self, eventId, resourceRef, resourceCode, date, responsible, description, amount, \n\t\t\tstatusReportFound, cause, where, withWhat, how, signatureResponsable, \n\t\t\tsignatureCoordinator, sessionJson):\n\t\tmessage=self.infoMsg+\"_NO_REGISTRADO\"\n\t\tstate = 300\n\t\ttry:\n\t\t\tsessionJson[\"userTypeId\"] = int(sessionJson[\"userTypeId\"])\n\t\t\tsessionJson[\"companyIdSession\"] = int(sessionJson[\"companyIdSession\"])\n\t\t\t# Validaciones\n\t\t\tdoRegister, validateMsg = self.getValidate()\n\t\t\t# Registro\n\t\t\tif doRegister == True:\n\t\t\t\tstate, objId = self.queryObj.add(eventId, resourceRef, resourceCode, date, responsible, description, amount, \n\t\t\t\t\tstatusReportFound, cause, where, withWhat, how, signatureResponsable, \n\t\t\t\t\tsignatureCoordinator, sessionJson[\"companyIdSession\"])\n\t\t\t\tif state == OK:\n\t\t\t\t\tmessage = self.infoMsg+\"_REGISTRADO\"\n\t\texcept Exception as e:\n\t\t\tloggControler = LoggControler()\n\t\t\tloggControler.addLogg('Controler: '+self.loggMsg+'-add()', ERROR_NO_DEFINIDO, e.message)\n\t\treturn message, state\n\n\tdef edit(self, eventId, resourceRef, resourceCode, date, responsible, description, amount, \n\t\t\tstatusReportFound, cause, where, withWhat, how, signatureResponsable, \n\t\t\tsignatureCoordinator, identifier):\n\t\tmessage=self.infoMsg+\"_NO_MODIFICADO\"\n\t\tstate = 300\n\t\ttry:\n\t\t\tobj = self.queryObj.getById(identifier)\n\t\t\tif eventId is not None:\n\t\t\t\tobj.eventId=eventId\n\t\t\tif resourceRef is not None:\n\t\t\t\tobj.resourceRef=resourceRef\n\t\t\tif resourceCode is not None:\n\t\t\t\tobj.resourceCode=resourceCode\n\t\t\tif date is not None:\n\t\t\t\tobj.date=date\n\t\t\tif responsible is not None:\n\t\t\t\tobj.responsible=responsible\n\t\t\tif description is not None:\n\t\t\t\tobj.description=description\n\t\t\tif amount is not None:\n\t\t\t\tobj.amount=amount\n\t\t\tif statusReportFound is not None:\n\t\t\t\tobj.statusReportFound=statusReportFound\n\t\t\tif cause is not None:\n\t\t\t\tobj.cause=cause\n\t\t\tif where is not None:\n\t\t\t\tobj.where=where\n\t\t\tif withWhat is not None:\n\t\t\t\tobj.withWhat=withWhat\n\t\t\tif how is not None:\n\t\t\t\tobj.how=how\n\t\t\tif signatureResponsable is not None:\n\t\t\t\tobj.signatureResponsable=signatureResponsable\n\t\t\tif signatureCoordinator is not None:\n\t\t\t\tobj.signatureCoordinator=signatureCoordinator\n\t\t\tself.queryObj.edit(obj)\n\t\t\tmessage=self.infoMsg+\"_MODIFICADO\"\n\t\t\tstate = OK\n\t\texcept Exception as e:\n\t\t\tloggControler = LoggControler()\n\t\t\tloggControler.addLogg('Controler: '+self.loggMsg+'-edit()', ERROR_NO_DEFINIDO, e.message)\n\t\treturn message, state\n\n\tdef getValidate(self, **kwargs):\n\t\tvalidate = False\n\t\tvalidateMsg = \"ERROR_EN_VALIDACIÓN\"\n\t\ttry:\n\t\t\t# Aquí se incluyen las validaciones que se requieran\n\t\t\t# En dado caso se cumplan...\n\t\t\tvalidate = True\n\t\t\tvalidateMsg = \"VALIDACIÓN EXITOSA\"\n\t\texcept Exception as e:\n\t\t\tloggControler = LoggControler()\n\t\t\tloggControler.addLogg('Controler: '+self.loggMsg+'-getValidate()', ERROR_NO_DEFINIDO, e.message)\n\t\treturn validate, validateMsg","sub_path":"services/controlers/damageReportsControler.py","file_name":"damageReportsControler.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"591733397","text":"import os\nimport time\n\nfrom src.color_class import ColorClass\nfrom src.graph import Graph\nfrom src.graph_io import load_graph\nfrom src.partition_refinement import *\n\n\n# def test_branching():\n# graphs = read_list_of_graphs_from_file('../graphs/branching/cubes3.grl')\n# disjoint_graph = graphs[0] + graphs[1]\n#\n# highest_colornum = set_up_colornum(disjoint_graph)\n#\n# start = time.time()\n# print(count_isomorphism([], [], disjoint_graph, highest_colornum))\n# end = time.time()\n#\n# print(\"TIME: \", end - start)\n\n\ndef count_isomorphism(D, I, disjoint_union, highest_colornum):\n for i in range(len(D)):\n D[i].colornum = highest_colornum\n I[i].colornum = highest_colornum\n highest_colornum += 1\n\n disjoint_union = partition_refinement_two(disjoint_union, len(disjoint_union.vertices))\n\n all_color_classes = get_color_classes(disjoint_union)\n\n if not is_balanced(disjoint_union):\n return 0\n if is_bijection(disjoint_union):\n return 1\n num = 0\n\n previous_coloring = get_previous_coloring(disjoint_union)\n\n for C in list_of_colour_classes_bigger_eq_than_4(all_color_classes):\n x = C.vertices[0]\n for i in range(int(len(C.vertices) / 2), len(C.vertices)):\n num += count_isomorphism([x], [C.vertices[i]], disjoint_union,\n get_highest_colornum(all_color_classes) + 1)\n set_previous_coloring(disjoint_union, previous_coloring)\n break\n\n return num\n\n\ndef set_up_colornum(graph):\n highest_colornum = 0\n for vertex in graph.vertices:\n vertex.colornum = vertex.degree\n if vertex.degree > highest_colornum:\n highest_colornum = vertex.degree + 1\n return highest_colornum\n\n\ndef get_coloring(vertices):\n coloring = {}\n for v in vertices:\n if v.colornum not in coloring:\n coloring[v.colornum] = 1\n else:\n coloring[v.colornum] += 1\n return coloring\n\n\ndef get_color_classes(g):\n all_color_classes = {}\n for v in g.vertices:\n if v.colornum not in all_color_classes:\n all_color_classes[v.colornum] = ColorClass(v.colornum)\n all_color_classes[v.colornum].add_vertex(v)\n return all_color_classes\n\n\ndef is_bijection(graph):\n left_graph = graph.vertices[:int(len(graph.vertices) / 2)]\n right_graph = graph.vertices[-int(len(graph.vertices) / 2):]\n left_coloring, right_coloring = get_coloring(left_graph), get_coloring(right_graph)\n if left_coloring != right_coloring:\n return False\n else:\n for key, value in left_coloring.items():\n if value != 1:\n return False\n return True\n\n\ndef get_highest_colornum(color_classes):\n highest_colornum = 0\n for color_class in color_classes.values():\n if color_class.colornum > highest_colornum:\n highest_colornum = color_class.colornum\n return highest_colornum\n\n\ndef is_balanced(graph):\n left_graph = graph.vertices[:int(len(graph.vertices) / 2)]\n right_graph = graph.vertices[-int(len(graph.vertices) / 2):]\n g_coloring, h_coloring = get_coloring(left_graph), get_coloring(right_graph)\n if g_coloring == h_coloring:\n return True\n else:\n return False\n\n\ndef list_of_colour_classes_bigger_eq_than_4(all_color_classes):\n temp = []\n for color_class in all_color_classes.values():\n if color_class.size() >= 4:\n temp.append(color_class)\n return temp\n\n\ndef get_previous_coloring(graph):\n previous_coloring = {}\n for vertex in graph.vertices:\n previous_coloring[vertex] = vertex.colornum\n return previous_coloring\n\n\ndef set_previous_coloring(graph, previous_coloring):\n for vertex in graph.vertices:\n vertex.colornum = previous_coloring[vertex]\n\n\ndef read_list_of_graphs_from_file(path):\n open_path = os.path.relpath(path, os.path.dirname(__file__))\n with open(open_path, 'r') as file:\n problematic_file = load_graph(file, Graph, True)\n\n return problematic_file[0]\n","sub_path":"src/branching.py","file_name":"branching.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"94829171","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport decimal\nimport datetime\nfrom decimal import Decimal as D\nfrom subprocess import Popen, PIPE\n\ndef main (arg0, argv):\n symbol = \"aapl\"\n close_price = D(\"218.37\")\n cost = D(\"221.27\")\n n = D(4)\n starting_balance = D(\"1003.82\")\n\n file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"data\", symbol)\n\n with Popen([\"tail\", \"-fn0\", file], stdout=PIPE, encoding=\"ascii\", bufsize=0) as p:\n for line in p.stdout:\n dt, price = line.split()\n dt = datetime.datetime.strptime(dt[:len(\"1234-00-00 00:00:00.123456\")], \"%Y-%m-%dT%H:%M:%S.%f\")\n price = round(decimal.Decimal(price), 2)\n print(\n f\"{dt:%l:%M:%S%p}\",\n f\"{price}\",\n f\"{price/close_price-1:.3%}\",\n f\"{price/cost-1:.3%}\",\n f\"{(price-cost)*n/starting_balance:.3%}\",\n sep=\" \",\n )\n\nif __name__ == \"__main__\":\n try:\n c = main(sys.argv[0], sys.argv[1:])\n except KeyboardInterrupt:\n c = 1\n except BrokenPipeError:\n sys.stderr.close()\n c = 0\n if c:\n sys.exit(c)\n","sub_path":"projects/apis/iex/sell.py","file_name":"sell.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"497095086","text":"from file_util import *\nfrom midi_util import *\n# from main import *\nfrom model import *\n\n\ncurrent_run_cnt = '0'\ndata_dir = \"./\"\ndata_set = \"data_for_model\"\nruns_dir = \"./\"\nbi = True\nforward_only = False # keep false for training, True for forward pass only\nload_model = None\nload_last = None\n\n\ndef setup_dir():\n print('[*] Setting up directory...')\n\n main_path = runs_dir\n current_run = os.path.join(main_path, current_run_cnt)\n\n files_path = data_dir\n files_path = os.path.join(files_path, data_set)\n\n x_path = os.path.join(files_path, 'inputs')\n y_path = os.path.join(files_path, 'velocities')\n eval_path = os.path.join(files_path, 'eval')\n\n model_path = os.path.join(current_run, 'model')\n logs_path = os.path.join(current_run, 'tmp')\n png_path = os.path.join(current_run, 'png')\n pred_path = os.path.join(current_run, 'predictions')\n\n if not os.path.exists(current_run):\n os.makedirs(current_run)\n if not os.path.exists(logs_path):\n os.makedirs(logs_path)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n if not os.path.exists(png_path):\n os.makedirs(png_path)\n if not os.path.exists(pred_path):\n os.makedirs(pred_path)\n\n dirs = {\n 'main_path': main_path,\n 'current_run': current_run,\n 'model_path': model_path,\n 'logs_path': logs_path,\n 'png_path': png_path,\n 'eval_path': eval_path,\n 'pred_path': pred_path,\n 'x_path': x_path,\n 'y_path': y_path\n }\n\n return dirs\n\n\ndef load_training_data(x_path, y_path, genre):\n X_data = []\n Y_data = []\n names = []\n print('[*] Loading data...')\n\n x_path = os.path.join(x_path, genre)\n y_path = os.path.join(y_path, genre)\n\n for i, filename in enumerate(os.listdir(x_path)):\n if filename.split('.')[-1] == 'npy':\n names.append(filename)\n\n for i, filename in enumerate(names):\n abs_x_path = os.path.join(x_path, filename)\n abs_y_path = os.path.join(y_path, filename)\n loaded_x = np.load(abs_x_path)\n\n X_data.append(loaded_x)\n\n loaded_y = np.load(abs_y_path)\n loaded_y = loaded_y / 127\n Y_data.append(loaded_y)\n assert X_data[i].shape[0] == Y_data[i].shape[0]\n\n return X_data, Y_data\n\n\ndef prepare_data():\n dirs = setup_dir()\n data = {}\n data[\"classical\"] = {}\n data[\"jazz\"] = {}\n\n c_train_X, c_train_Y = load_training_data(dirs['x_path'], dirs['y_path'], \"classical\")\n\n data[\"classical\"][\"X\"] = c_train_X\n data[\"classical\"][\"Y\"] = c_train_Y\n\n j_train_X, j_train_Y = load_training_data(dirs['x_path'], dirs['y_path'], \"jazz\")\n\n data[\"jazz\"][\"X\"] = j_train_X\n data[\"jazz\"][\"Y\"] = j_train_Y\n return dirs, data\n\n\ndirs, data = prepare_data()\n\nnetwork = GenreLSTM(dirs, input_size=176, mini=True, bi=bi)\nnetwork.prepare_model()\n\nif not forward_only:\n if load_model:\n loaded_epoch = load_model.split('.')[0]\n loaded_epoch = loaded_epoch.split('-')[-1]\n loaded_epoch = loaded_epoch[1:]\n print(\"[*] Loading \" + load_model + \" and continuing from \" + loaded_epoch + \".\")\n loaded_epoch = int(loaded_epoch)\n network.train(data, model=load_model, starting_epoch=loaded_epoch + 1)\n else:\n network.train(data)\nelse:\n network.load(load_model)\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"540993650","text":"# Parse a DEPS file and git checkout all of the dependencies.\n\nimport os\nimport subprocess\nimport sys\nimport threading\n\nDEFAULT_DEPS_PATH = os.path.normpath(\n os.path.join(os.path.dirname(__file__), os.pardir, 'DEPS'))\n\n\n# Find the git executable.\ndef git_executable():\n env_git = os.environ.get('GIT_EXECUTABLE')\n search_list = ['git']\n if env_git:\n search_list.insert(0, env_git)\n with open(os.devnull, 'w') as devnull:\n for git in search_list:\n try:\n subprocess.call([git, '--version'], stdout=devnull)\n except (OSError,):\n continue\n return git\n return None\n\n\ndef status(directory, commit_hash, change):\n def truncate(s, size):\n return s if len(s) <= size else s[:(size - 3)] + '...'\n\n length = 36\n directory = truncate(directory, length)\n commit_hash = truncate(commit_hash, 40)\n symbol = '>' if change else '@'\n sys.stdout.write('%-*s %s %s\\n' % (length, directory, symbol, commit_hash))\n\n\n# Checkout (and clone if needed) a git repository.\n# repo (string) - the location of the repository, suitable for passing to `git clone`.\n# commit_hash (string) - a commit, suitable for passing to `git checkout`\n# directory (string) - the path into which the repository should be checked out.\ndef git_checkout_to_directory(git, repo, commit_hash, directory, verbose):\n if not os.path.isdir(directory):\n subprocess.check_call(\n [git, 'clone', '--no-checkout', repo, directory])\n subprocess.check_call([git, 'checkout', '--quiet', commit_hash],\n cwd=directory)\n if verbose:\n status(directory, commit_hash, True)\n return\n\n with open(os.devnull, 'w') as devnull:\n # If this fails, we will fetch before trying again.\n # Don't spam user with error information.\n if 0 == subprocess.call([git, 'checkout', '--quiet', commit_hash],\n cwd=directory, stderr=devnull):\n # If this succeeds, skip slow `git fetch`.\n if verbose:\n status(directory, commit_hash, False) # success\n return\n\n # If the repo has changed, always force use of the correct repo.\n # If origin already points to repo, this is a quick no-op.\n subprocess.check_call(\n [git, 'remote', 'set-url', 'origin', repo], cwd=directory)\n subprocess.check_call([git, 'fetch', '--quiet'], cwd=directory)\n subprocess.check_call([git, 'checkout', '--quiet', commit_hash], cwd=directory)\n if verbose:\n status(directory, commit_hash, True) # success\n\n\ndef parse_file_to_dict(path):\n dictionary = {}\n exec(open(path).read(), dictionary)\n return dictionary\n\n\n# SHA1 sums are 160 bits, encoded as lowercase hexadecimal.\ndef is_sha1_sum(s):\n return len(s) == 40 and all(c in '0123456789abcdef' for c in s)\n\n\n# Grab dependencies, with optional platform support.\ndef git_sync_deps(deps_file_path, command_line_os_requests, verbose):\n git = git_executable()\n assert git\n\n deps_file_directory = os.path.dirname(deps_file_path)\n deps_file = parse_file_to_dict(deps_file_path)\n dependencies = deps_file['deps'].copy()\n os_specific_dependencies = deps_file.get('deps_os', dict())\n if 'all' in command_line_os_requests:\n for value in os_specific_dependencies.itervalues():\n dependencies.update(value)\n else:\n for os_name in command_line_os_requests:\n # Add OS-specific dependencies.\n if os_name in os_specific_dependencies:\n dependencies.update(os_specific_dependencies[os_name])\n for directory in dependencies:\n for other_dir in dependencies:\n if directory.startswith(other_dir + '/'):\n raise Exception('%r is parent of %r' % (other_dir, directory))\n list_of_arg_lists = []\n for directory in sorted(dependencies):\n if not isinstance(dependencies[directory], str):\n if verbose:\n print('skipping \"%s\"' % directory)\n continue\n if '@' in dependencies[directory]:\n repo, commit_hash = dependencies[directory].split('@', 1)\n else:\n raise Exception(\"please specify commit\")\n if not is_sha1_sum(commit_hash):\n raise Exception(\"poorly formed commit hash: %r\" % commit_hash)\n\n relative_directory = os.path.join(deps_file_directory, directory)\n\n list_of_arg_lists.append(\n (git, repo, commit_hash, relative_directory, verbose))\n\n multi_thread(git_checkout_to_directory, list_of_arg_lists)\n\n\ndef multi_thread(function, list_of_arg_lists):\n threads = []\n for args in list_of_arg_lists:\n thread = threading.Thread(None, function, None, args)\n thread.start()\n threads.append(thread)\n for thread in threads:\n thread.join()\n\n\ndef main(argv):\n deps_file_path = os.environ.get('GIT_SYNC_DEPS_PATH', DEFAULT_DEPS_PATH)\n verbose = not bool(os.environ.get('GIT_SYNC_DEPS_QUIET', False))\n git_sync_deps(deps_file_path, argv, verbose)\n return 0\n\n\nif __name__ == '__main__':\n exit(main(sys.argv[1:]))\n","sub_path":"tools/git_sync_deps.py","file_name":"git_sync_deps.py","file_ext":"py","file_size_in_byte":5155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"534356160","text":"import socket\nimport numpy as np\nfrom keras import models\nimport cv2 as cv\nimport struct\nfrom PIL import Image\nimport io\nimport time\nimport keyboard\n\ndef main():\n port_number = 5002\n\n server = socket.socket()\n server.bind(('0.0.0.0', port_number))\n server.listen(0)\n ack = server.accept()\n connection = ack[0].makefile('rb')\n \n cnnForBack = models.load_model('forward_backward.h5')\n cnnForLeftRight = models.load_model('left_right_forward.h5')\n\n counter = 1\n\n currentAction = 5\n while 1:\n try:\n t1 = time.time()\n im_len = struct.unpack(':`. The\ngiven path on the host machine will be mounted under the given path in\nthe command container.\n\"\"\"\n\n\n_CONFIG_PATHS_COERCE_TO_LIST = {\n \"bind_mounts\",\n}\n\nUUID_REGEX = re.compile(\n \"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$\", re.IGNORECASE\n)\n\nCommandTableHeader = OrderedDict(\n [\n (\"id\", \"id\"),\n (\"username\", \"username\"),\n (\"description\", \"description\"),\n (\"state\", \"state\"),\n (\"exitStatus\", \"exitStatus\"),\n (\"resourcePool\", \"resourcePool\"),\n ]\n)\n\nTensorboardTableHeader = OrderedDict(\n [\n (\"id\", \"id\"),\n (\"username\", \"username\"),\n (\"description\", \"description\"),\n (\"state\", \"state\"),\n (\"experimentIds\", \"experimentIds\"),\n (\"trialIds\", \"trialIds\"),\n (\"exitStatus\", \"exitStatus\"),\n (\"resourcePool\", \"resourcePool\"),\n ]\n)\n\nTaskTypeNotebook = \"notebook\"\nTaskTypeCommand = \"command cmd\"\nTaskTypeShell = \"shell\"\nTaskTypeTensorBoard = \"tensorboard\"\n\nRemoteTaskName = {\n TaskTypeNotebook: \"notebook\",\n TaskTypeCommand: \"command\",\n TaskTypeShell: \"shell\",\n TaskTypeTensorBoard: \"tensorboard\",\n}\n\nRemoteTaskLogName = {\n TaskTypeNotebook: \"Notebook\",\n TaskTypeCommand: \"Command\",\n TaskTypeShell: \"Shell\",\n TaskTypeTensorBoard: \"TensorBoard\",\n}\n\nRemoteTaskNewAPIs = {\n TaskTypeNotebook: \"notebooks\",\n TaskTypeCommand: \"commands\",\n TaskTypeShell: \"shells\",\n TaskTypeTensorBoard: \"tensorboards\",\n}\n\nRemoteTaskOldAPIs = {\n TaskTypeNotebook: \"notebooks\",\n TaskTypeCommand: \"commands\",\n TaskTypeShell: \"shells\",\n TaskTypeTensorBoard: \"tensorboard\",\n}\n\nRemoteTaskListTableHeaders = {\n \"notebook\": CommandTableHeader,\n \"command cmd\": CommandTableHeader,\n \"shell\": CommandTableHeader,\n \"tensorboard\": TensorboardTableHeader,\n}\n\nRemoteTaskGetIDsFunc = {\n \"notebook\": lambda args: args.notebook_id,\n \"command cmd\": lambda args: args.command_id,\n \"shell\": lambda args: args.shell_id,\n \"tensorboard\": lambda args: args.tensorboard_id,\n}\n\n\nCommand = namedtuple(\n \"Command\",\n [\n \"id\",\n \"owner\",\n \"registered_time\",\n \"config\",\n \"state\",\n \"addresses\",\n \"exit_status\",\n \"misc\",\n \"agent_user_group\",\n ],\n)\n\n\ndef expand_uuid_prefixes(\n args: Namespace, prefixes: Optional[Union[str, List[str]]] = None\n) -> Union[str, List[str]]:\n if prefixes is None:\n prefixes = RemoteTaskGetIDsFunc[args._command](args) # type: ignore\n\n was_single = False\n if isinstance(prefixes, str):\n was_single = True\n prefixes = [prefixes]\n\n # Avoid making a network request if everything is already a full UUID.\n if not all(UUID_REGEX.match(p) for p in prefixes):\n api_path = RemoteTaskNewAPIs[args._command]\n api_full_path = \"api/v1/{}\".format(api_path)\n res = api.get(args.master, api_full_path).json()[api_path]\n all_ids: List[str] = [x[\"id\"] for x in res]\n\n def expand(prefix: str) -> str:\n if UUID_REGEX.match(prefix):\n return prefix\n\n # Could do better algorithmically than repeated linear scans, but let's not complicate\n # the code unless it becomes an issue in practice.\n ids = [x for x in all_ids if x.startswith(prefix)]\n if len(ids) > 1:\n raise api.errors.BadRequestException(f\"partial UUID '{prefix}' not unique\")\n elif len(ids) == 0:\n raise api.errors.BadRequestException(f\"partial UUID '{prefix}' not found\")\n return ids[0]\n\n prefixes = [expand(p) for p in prefixes]\n\n if was_single:\n prefixes = prefixes[0]\n return prefixes\n\n\n@authentication.required\ndef list_tasks(args: Namespace) -> None:\n api_path = RemoteTaskNewAPIs[args._command]\n api_full_path = \"api/v1/{}\".format(api_path)\n table_header = RemoteTaskListTableHeaders[args._command]\n\n if args.all:\n params = {} # type: Dict[str, Any]\n else:\n params = {\"users\": [authentication.must_cli_auth().get_session_user()]}\n\n res = api.get(args.master, api_full_path, params=params).json()[api_path]\n\n if args.quiet:\n for command in res:\n print(command[\"id\"])\n return\n\n for item in res:\n if item[\"state\"].startswith(\"STATE_\"):\n item[\"state\"] = item[\"state\"][6:]\n\n if getattr(args, \"json\", None):\n print(json.dumps(res, indent=4))\n return\n\n values = render.select_values(res, table_header)\n\n render.tabulate_or_csv(table_header, values, getattr(args, \"csv\", False))\n\n\n@authentication.required\ndef kill(args: Namespace) -> None:\n task_ids = expand_uuid_prefixes(args)\n name = RemoteTaskName[args._command]\n\n for i, task_id in enumerate(task_ids):\n try:\n _kill(args.master, args._command, task_id)\n print(colored(\"Killed {} {}\".format(name, task_id), \"green\"))\n except api.errors.APIException as e:\n if not args.force:\n for ignored in task_ids[i + 1 :]:\n print(\"Cowardly not killing {}\".format(ignored))\n raise e\n print(colored(\"Skipping: {} ({})\".format(e, type(e).__name__), \"red\"))\n\n\ndef _kill(master_url: str, taskType: str, taskID: str) -> None:\n api_full_path = \"api/v1/{}/{}/kill\".format(RemoteTaskNewAPIs[taskType], taskID)\n api.post(master_url, api_full_path)\n\n\n@authentication.required\ndef set_priority(args: Namespace) -> None:\n task_id = expand_uuid_prefixes(args)\n name = RemoteTaskName[args._command]\n\n try:\n api_full_path = \"api/v1/{}/{}/set_priority\".format(\n RemoteTaskNewAPIs[args._command], task_id\n )\n api.post(args.master, api_full_path, {\"priority\": args.priority})\n print(colored(\"Set priority of {} {} to {}\".format(name, task_id, args.priority), \"green\"))\n except api.errors.APIException as e:\n print(colored(\"Skipping: {} ({})\".format(e, type(e).__name__), \"red\"))\n\n\n@authentication.required\ndef config(args: Namespace) -> None:\n task_id = expand_uuid_prefixes(args)\n api_full_path = \"api/v1/{}/{}\".format(RemoteTaskNewAPIs[args._command], task_id)\n res_json = api.get(args.master, api_full_path).json()\n print(render.format_object_as_yaml(res_json[\"config\"]))\n\n\ndef _set_nested_config(config: Dict[str, Any], key_path: List[str], value: Any) -> Dict[str, Any]:\n current = config\n for key in key_path[:-1]:\n current = current.setdefault(key, {})\n current[key_path[-1]] = value\n return config\n\n\ndef parse_config_overrides(config: Dict[str, Any], overrides: Iterable[str]) -> None:\n for config_arg in overrides:\n if \"=\" not in config_arg:\n raise ValueError(\n \"Could not read configuration option '{}'\\n\\n\"\n \"Expecting:\\n{}\".format(config_arg, CONFIG_DESC)\n )\n\n key, value = config_arg.split(\"=\", maxsplit=1) # type: Tuple[str, Any]\n\n # Separate values if a comma exists. Use yaml.safe_load() to cast\n # the value(s) to the type YAML would use, e.g., \"4\" -> 4.\n if \",\" in value:\n value = [yaml.safe_load(v) for v in value.split(\",\")]\n else:\n value = yaml.safe_load(value)\n\n # Certain configurations keys are expected to have list values.\n # Convert a single value to a singleton list if needed.\n if key in _CONFIG_PATHS_COERCE_TO_LIST:\n value = [value]\n\n # TODO(#2703): Consider using full JSONPath spec instead of dot\n # notation.\n config = _set_nested_config(config, key.split(\".\"), value)\n\n\ndef parse_config(\n config_file: Optional[IO],\n entrypoint: Optional[List[str]],\n overrides: Iterable[str],\n volumes: Iterable[str],\n) -> Dict[str, Any]:\n config = {} # type: Dict[str, Any]\n if config_file:\n with config_file:\n config = util.safe_load_yaml_with_exceptions(config_file)\n\n parse_config_overrides(config, overrides)\n\n for volume_arg in volumes:\n if \":\" not in volume_arg:\n raise ValueError(\n \"Could not read volume option '{}'\\n\\n\"\n \"Expecting:\\n{}\".format(volume_arg, VOLUME_DESC)\n )\n\n host_path, container_path = volume_arg.split(\":\", maxsplit=1)\n bind_mounts = config.setdefault(\"bind_mounts\", [])\n bind_mounts.append({\"host_path\": host_path, \"container_path\": container_path})\n\n # Use the entrypoint command line argument if an entrypoint has not already been\n # defined by previous settings.\n if not config.get(\"entrypoint\") and entrypoint:\n config[\"entrypoint\"] = entrypoint\n\n return config\n\n\ndef launch_command(\n master: str,\n endpoint: str,\n config: Dict[str, Any],\n template: str,\n context_path: Optional[Path] = None,\n data: Optional[Dict[str, Any]] = None,\n preview: Optional[bool] = False,\n default_body: Optional[Dict[str, Any]] = None,\n) -> Any:\n user_files = [] # type: List[Dict[str, Any]]\n if context_path:\n user_files, _ = context.read_context(context_path)\n\n body = {} # type: Dict[str, Any]\n if default_body:\n body.update(default_body)\n\n body[\"config\"] = config\n\n if template:\n body[\"template_name\"] = template\n\n if len(user_files) > 0:\n body[\"files\"] = user_files\n\n if data is not None:\n message_bytes = json.dumps(data).encode(\"utf-8\")\n base64_bytes = base64.b64encode(message_bytes)\n body[\"data\"] = base64_bytes\n\n if preview:\n body[\"preview\"] = preview\n\n return api.post(\n master,\n endpoint,\n body,\n ).json()\n\n\ndef render_event_stream(event: Any) -> None:\n description = event[\"description\"]\n if event[\"scheduled_event\"] is not None:\n print(\n colored(\"Scheduling {} (id: {})...\".format(description, event[\"parent_id\"]), \"yellow\")\n )\n elif event[\"assigned_event\"] is not None:\n print(colored(\"{} was assigned to an agent...\".format(description), \"green\"))\n elif event[\"container_started_event\"] is not None:\n print(colored(\"Container of {} has started...\".format(description), \"green\"))\n elif event[\"service_ready_event\"] is not None:\n pass # Ignore this message.\n elif event[\"terminate_request_event\"] is not None:\n print(colored(\"{} was requested to terminate...\".format(description), \"red\"))\n elif event[\"exited_event\"] is not None:\n # TODO: Non-success exit statuses should be red\n stat = event[\"exited_event\"]\n print(colored(\"{} was terminated: {}\".format(description, stat), \"green\"))\n pass\n elif event[\"log_event\"] is not None:\n print(event[\"log_event\"], flush=True)\n else:\n raise ValueError(\"unexpected event: {}\".format(event))\n","sub_path":"harness/determined/cli/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":11864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"19385428","text":"#!/usr/bin/env python3\nimport pandas as pd\nimport numpy as np\nimport uproot\nimport random\nimport os\nimport shutil\nimport argparse\n\nfrom config import *\n\nrandom.seed(30)\n\ndata_folder = root_folder + 'user.eschanet.allTrees_v2_0_2_signal_1Lbb_skim.root'\nroot = uproot.open(data_folder)\n\nsignal_id = []\ncount = 0\nfor n in root.keys():\n if 'nosys' not in str(n).lower():\n n = str(n)\n signal_id.append('_'.join(n.split('Wh_hbb_')[1].split('_')[0:2])) \n \nunique_name = np.unique(signal_id)\n\ndef main(direction, depth, additional_cuts):\n \n if direction == 'down':\n syst_path = numpy_sig_syst_down\n elif direction == 'up':\n syst_path = numpy_sig_syst_up\n \n signal_dict = {}\n for n in unique_name:\n for names in root.keys():\n if n in str(names):\n if 'nosys' not in str(names).lower():\n if str(names).lower().split(';')[0].endswith(direction):\n # print(('_'.join(str(names).split('_')[5:])).split('__')[0])\n if ('_'.join(str(names).split('_')[5:])).split('__')[0] in syst:\n # print(('_'.join(str(names).split('_')[5:])).split('__')[0])\n try:\n signal_dict[n].append(str(names).split(\";\")[0].split(\"'\")[1])\n except:\n signal_dict[n] = [str(names).split(\";\")[0].split(\"'\")[1]] \n\n\n entrysteps=3000000\n tot = 0\n\n for folder in unique_name:\n dir = syst_path + '{}'.format(folder)\n if os.path.exists(dir):\n # continue\n shutil.rmtree(dir)\n os.makedirs(dir)\n else:\n os.makedirs(dir)\n\n for name in signal_dict[folder]:\n\n events = uproot.open(data_folder)[name]\n array = events.lazyarray('met')\n\n print('lunghezza array', len(array))\n file_split = len(array)//entrysteps\n start_name_file = 0\n entrystart = start_name_file*entrysteps\n\n print(name)\n\n batches = events.iterate(columns_sig, entrystart=entrystart,\n entrysteps=entrysteps, \n outputtype=pd.DataFrame)\n\n for ix in range(start_name_file, file_split+1):\n\n print(ix) \n batch = next(batches)\n print('adding luminosity')\n batch['luminosity'] = 139000\n print(len(batch))\n\n batch = batch[batch['nLep_signal'].astype(int)==1]\n print('after signal {}'.format(len(batch)))\n\n batch = batch[batch['trigMatch_metTrig'].astype(int)==1]\n print('after trig {}'.format(len(batch)))\n\n batch = batch[((batch['nBJet30_MV2c10']>=1)&(batch['nBJet30_MV2c10']<4))]\n print('after bjet {}'.format(len(batch)))\n\n batch = batch[batch['met']>=220] \n print('after met {}'.format(len(batch)))\n\n batch = batch[batch['mt']>=50] \n print('after mt {}'.format(len(batch)))\n\n if depth == 'middle':\n batch = batch[((batch['mbb']>=100)&(batch['mbb']<=140))] \n print('after mbb {}'.format(len(batch))) \n\n batch = batch[batch['mct2']>100] \n print('after mct2 {}'.format(len(batch)))\n\n if additional_cuts:\n\n print('cutting below 0 and above 1000')\n batch = batch[((batch['mct2']>=0)&(batch['mct2']<1000))]\n batch = batch[((batch['mt']>=0)&(batch['mt']<1000))]\n batch = batch[((batch['met']>=0)&(batch['met']<1000))]\n batch = batch[((batch['mlb1']>=0)&(batch['mlb1']<1000))]\n batch = batch[((batch['lep1Pt']>=0)&(batch['lep1Pt']<1000))]\n\n if len(batch) > 0:\n\n batch['weight'] = batch['genWeight']*batch['eventWeight']*batch['pileupWeight']*\\\n batch['leptonWeight']*batch['bTagWeight']*batch['jvtWeight']*batch['luminosity']\n\n # batch['weight'] = batch.apply(lambda row: row['genWeight']*row['eventWeight']*row['pileupWeight']*\n # row['leptonWeight']*row['bTagWeight']*row['jvtWeight']*row['luminosity'], axis=1)\n\n batch_fin = batch.iloc[:,:8]\n\n batch_fin['weight'] = batch['weight']\n\n batch_fin = batch_fin[['met', 'mt', 'mbb', 'mct2',\n 'mlb1','lep1Pt', 'nJet30', 'nBJet30_MV2c10', 'weight']]\n\n tot = len(batch)\n print('tot = {}'.format(tot))\n print(\"\\x1b[31m\\\"saving {}_{}\"\"\\x1b[0m\".format(name,ix))\n np.save(syst_path + '{}/{}.npy'.format(folder,name), batch_fin.values)\n \n\n\nif __name__ == \"__main__\":\n \n parser = argparse.ArgumentParser(description='arguments for systematics cuts: define if up or down and if additional cuts are required')\n\n parser.add_argument('--direction', nargs=\"?\", type = str, default = 'down', help= 'up or down')\n parser.add_argument('--depth', nargs=\"?\", type = str, default = 'middle', help= 'depth of the cuts')\n parser.add_argument('--clean_data', nargs=\"?\", type = bool, default = True, help= 'remove events with variable <0 and >1000')\n\n args = parser.parse_args()\n additional_cuts = args.clean_data\n \n main(args.direction, args.depth, additional_cuts)\n","sub_path":"013_signal_sys.py","file_name":"013_signal_sys.py","file_ext":"py","file_size_in_byte":5644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"432572402","text":"try:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nfrom .service import Service\nfrom .options import DEFAULT_OPTIONS\nfrom .utils import getPidPath\n\nfrom os import environ\nfrom socket import AF_INET\nfrom twisted.application.internet import TCPServer, SSLServer\nfrom twisted.internet import reactor\n\nimport chalk\nimport os\nimport time\n\n\n__version__ = \"0.0.0\"\n__all__ = ['start', 'stop', 'restart']\n\n\ndef start(resource, options):\n \"sets up the desired services and runs the requested action\"\n neji = Neji(resource, options=options)\n neji.spinUp(options['fd'])\n chalk.blue(\n 'Ready and Listening on port %d...' % options.get('port')\n )\n neji.reactor.run()\n\n\ndef stop(resource, options, sig=9):\n neji = Neji(resource, options=options)\n with open(neji.pid) as pid_file:\n pids = pid_file.readlines()\n for pid in pids:\n try:\n os.kill(int(pid), sig)\n except OSError:\n # OSError raised when it trys to kill the child processes\n pass\n os.remove(neji.pid)\n\n\ndef restart(resource, options, fd=None):\n stop(resource, options)\n time.sleep(1) # wait a second to ensure the port is closed\n start(resource, options, fd)\n\n\nclass Neji(object):\n \"\"\"\n HendrixDeploy encapsulates the necessary information needed to deploy\n the HendrixService on a single or multiple processes.\n \"\"\"\n\n def __init__(self, resource, options={}, reactor=reactor):\n self.options = DEFAULT_OPTIONS\n self.options.update(options)\n self.reactor = reactor\n self.service = Service(resource, self.options['port'])\n\n self.servers = []\n for service in self.service.services:\n if isinstance(service, (TCPServer, SSLServer)):\n self.servers.append(service.name)\n # self.is_secure = self.options['key'] and self.options['cert']\n\n @property\n def pid(self):\n \"The default location of the pid file for process management\"\n return getPidPath(self.options)\n\n def getSpawnArgs(self):\n _args = [\n 'ng',\n 'start', # action\n '--port', str(self.options['port']),\n '--workers', '0',\n '--fd', pickle.dumps(self.fds),\n ]\n\n # args/signals\n if self.options['dev']:\n _args.append('--dev')\n return _args\n\n def spinUp(self, fd=None):\n if fd is None:\n # anything in this block is only run once\n self.service.startService()\n self.launchWorkers()\n else:\n fds = pickle.loads(fd)\n factories = {}\n for name in self.servers:\n factory = self.disownService(name)\n factories[name] = factory\n self.service.startService()\n for name, factory in factories.iteritems():\n self.addSubprocesses(fds, name, factory)\n\n def launchWorkers(self):\n pids = [str(os.getpid())] # script pid\n if self.options['workers']:\n # Create a new listening port and several other processes to\n # help out.\n childFDs = {0: 0, 1: 1, 2: 2}\n self.fds = {}\n for name in self.servers:\n port = self.service.getPort(name)\n fd = port.fileno()\n childFDs[fd] = fd\n self.fds[name] = fd\n args = self.getSpawnArgs()\n transports = []\n for i in range(self.options['workers']):\n transport = self.reactor.spawnProcess(\n None, 'ng', args, childFDs=childFDs, env=environ\n )\n transports.append(transport)\n pids.append(str(transport.pid))\n with open(self.pid, 'w') as pid_file:\n pid_file.write('\\n'.join(pids))\n\n def addSubprocesses(self, fds, name, factory):\n self.reactor.adoptStreamPort( # outputs port\n fds[name], AF_INET, factory\n )\n\n def disownService(self, name):\n \"\"\"\n disowns a service on hendirix by name\n returns a factory for use in the adoptStreamPort part of setting up\n multiple processes\n \"\"\"\n _service = self.service.getServiceNamed(name)\n _service.disownServiceParent()\n return _service.factory\n","sub_path":"neji/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"320269446","text":"from lua_state import LuaState\nimport sys\n\n\ndef py_print(ls):\n nargs = ls.get_top()\n for i in range(1, nargs+1):\n if ls.is_boolean(i):\n print('%t', ls.to_boolean(i), end='')\n elif ls.is_string(i):\n print(ls.to_string(i), end='')\n else:\n print(ls.type_name(ls.type(i)), end='')\n\n if i < nargs:\n print('\\t', end='')\n\n print()\n return 0\n\n\ndef main():\n with open(sys.argv[1], 'rb') as f:\n data = f.read()\n ls = LuaState()\n ls.register('print', py_print)\n ls.load(data)\n ls.call(0, 0)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"code/python/ch10/src/test_closure_upvalue.py","file_name":"test_closure_upvalue.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"539552406","text":"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"\nDoubly Robust Learner. The method uses the doubly robust correction to construct doubly\nrobust estimates of all the potential outcomes of each samples. Then estimates a CATE model\nby regressing the potential outcome differences on the heterogeneity features X.\n\nReferences\n----------\n\nDylan Foster, Vasilis Syrgkanis (2019).\n Orthogonal Statistical Learning.\n ACM Conference on Learning Theory. https://arxiv.org/abs/1901.09036\n\nRobins, J.M., Rotnitzky, A., and Zhao, L.P. (1994).\n Estimation of regression coefficients when some regressors are not always observed.\n Journal of the American Statistical Association 89,846–866.\n\nBang, H. and Robins, J.M. (2005).\n Doubly robust estimation in missing data and causal inference models.\n Biometrics 61,962–972.\n\nTsiatis AA (2006).\n Semiparametric Theory and Missing Data.\n New York: Springer; 2006.\n\n\"\"\"\n\nfrom warnings import warn\n\nimport numpy as np\nfrom sklearn.base import clone\nfrom sklearn.linear_model import (LassoCV, LinearRegression,\n LogisticRegressionCV)\n\nfrom ._ortho_learner import _OrthoLearner\nfrom .cate_estimator import (DebiasedLassoCateEstimatorDiscreteMixin,\n ForestModelFinalCateEstimatorDiscreteMixin,\n StatsModelsCateEstimatorDiscreteMixin)\nfrom .inference import GenericModelFinalInferenceDiscrete\nfrom .sklearn_extensions.ensemble import SubsampledHonestForest\nfrom .sklearn_extensions.linear_model import (\n DebiasedLasso, StatsModelsLinearRegression, WeightedLassoCVWrapper)\nfrom .utilities import (_deprecate_positional, check_high_dimensional,\n check_input_arrays, filter_none_kwargs,\n fit_with_groups, inverse_onehot)\n\n\nclass _ModelNuisance:\n def __init__(self, model_propensity, model_regression, min_propensity):\n self._model_propensity = model_propensity\n self._model_regression = model_regression\n self._min_propensity = min_propensity\n\n def _combine(self, X, W):\n return np.hstack([arr for arr in [X, W] if arr is not None])\n\n def fit(self, Y, T, X=None, W=None, *, sample_weight=None, groups=None):\n if Y.ndim != 1 and (Y.ndim != 2 or Y.shape[1] != 1):\n raise ValueError(\"The outcome matrix must be of shape ({0}, ) or ({0}, 1), \"\n \"instead got {1}.\".format(len(X), Y.shape))\n if (X is None) and (W is None):\n raise AttributeError(\"At least one of X or W has to not be None!\")\n if np.any(np.all(T == 0, axis=0)) or (not np.any(np.all(T == 0, axis=1))):\n raise AttributeError(\"Provided crossfit folds contain training splits that \" +\n \"don't contain all treatments\")\n XW = self._combine(X, W)\n filtered_kwargs = filter_none_kwargs(sample_weight=sample_weight)\n\n fit_with_groups(self._model_propensity, XW, inverse_onehot(T), groups=groups, **filtered_kwargs)\n fit_with_groups(self._model_regression, np.hstack([XW, T]), Y, groups=groups, **filtered_kwargs)\n return self\n\n def score(self, Y, T, X=None, W=None, *, sample_weight=None, groups=None):\n XW = self._combine(X, W)\n filtered_kwargs = filter_none_kwargs(sample_weight=sample_weight)\n\n if hasattr(self._model_propensity, 'score'):\n propensity_score = self._model_propensity.score(XW, inverse_onehot(T), **filtered_kwargs)\n else:\n propensity_score = None\n if hasattr(self._model_regression, 'score'):\n regression_score = self._model_regression.score(np.hstack([XW, T]), Y, **filtered_kwargs)\n else:\n regression_score = None\n\n return propensity_score, regression_score\n\n def predict(self, Y, T, X=None, W=None, *, sample_weight=None, groups=None):\n XW = self._combine(X, W)\n propensities = np.maximum(self._model_propensity.predict_proba(XW), self._min_propensity)\n n = T.shape[0]\n Y_pred = np.zeros((T.shape[0], T.shape[1] + 1))\n T_counter = np.zeros(T.shape)\n Y_pred[:, 0] = self._model_regression.predict(np.hstack([XW, T_counter])).reshape(n)\n Y_pred[:, 0] += (Y.reshape(n) - Y_pred[:, 0]) * np.all(T == 0, axis=1) / propensities[:, 0]\n for t in np.arange(T.shape[1]):\n T_counter = np.zeros(T.shape)\n T_counter[:, t] = 1\n Y_pred[:, t + 1] = self._model_regression.predict(np.hstack([XW, T_counter])).reshape(n)\n Y_pred[:, t + 1] += (Y.reshape(n) - Y_pred[:, t + 1]) * (T[:, t] == 1) / propensities[:, t + 1]\n return Y_pred.reshape(Y.shape + (T.shape[1] + 1,))\n\n\nclass _ModelFinal:\n # Coding Remark: The reasoning around the multitask_model_final could have been simplified if\n # we simply wrapped the model_final with a MultiOutputRegressor. However, because we also want\n # to allow even for model_final objects whose fit(X, y) can accept X=None\n # (e.g. the StatsModelsLinearRegression), we cannot take that route, because the MultiOutputRegressor\n # checks that X is 2D array.\n def __init__(self, model_final, featurizer, multitask_model_final):\n self._model_final = clone(model_final, safe=False)\n self._featurizer = clone(featurizer, safe=False)\n self._multitask_model_final = multitask_model_final\n return\n\n def fit(self, Y, T, X=None, W=None, *, nuisances, sample_weight=None, sample_var=None):\n Y_pred, = nuisances\n self.d_y = Y_pred.shape[1:-1] # track whether there's a Y dimension (must be a singleton)\n if (X is not None) and (self._featurizer is not None):\n X = self._featurizer.fit_transform(X)\n filtered_kwargs = filter_none_kwargs(sample_weight=sample_weight, sample_var=sample_var)\n if self._multitask_model_final:\n ys = Y_pred[..., 1:] - Y_pred[..., [0]] # subtract control results from each other arm\n if self.d_y: # need to squeeze out singleton so that we fit on 2D array\n ys = ys.squeeze(1)\n self.model_cate = self._model_final.fit(X, ys, **filtered_kwargs)\n else:\n self.models_cate = [clone(self._model_final, safe=False).fit(X, Y_pred[..., t] - Y_pred[..., 0],\n **filtered_kwargs)\n for t in np.arange(1, Y_pred.shape[-1])]\n return self\n\n def predict(self, X=None):\n if (X is not None) and (self._featurizer is not None):\n X = self._featurizer.transform(X)\n if self._multitask_model_final:\n pred = self.model_cate.predict(X)\n if self.d_y: # need to reintroduce singleton Y dimension\n return pred[:, np.newaxis, :]\n return pred\n else:\n preds = np.array([mdl.predict(X) for mdl in self.models_cate])\n return np.moveaxis(preds, 0, -1) # move treatment dim to end\n\n def score(self, Y, T, X=None, W=None, *, nuisances, sample_weight=None, sample_var=None):\n if (X is not None) and (self._featurizer is not None):\n X = self._featurizer.transform(X)\n Y_pred, = nuisances\n if self._multitask_model_final:\n return np.mean(np.average((Y_pred[..., 1:] - Y_pred[..., [0]] - self.model_cate.predict(X))**2,\n weights=sample_weight, axis=0))\n else:\n return np.mean([np.average((Y_pred[..., t] - Y_pred[..., 0] -\n self.models_cate[t - 1].predict(X))**2,\n weights=sample_weight, axis=0)\n for t in np.arange(1, Y_pred.shape[-1])])\n\n\nclass DRLearner(_OrthoLearner):\n \"\"\"\n CATE estimator that uses doubly-robust correction techniques to account for\n covariate shift (selection bias) between the treatment arms. The estimator is a special\n case of an :class:`._OrthoLearner` estimator, so it follows the two\n stage process, where a set of nuisance functions are estimated in the first stage in a crossfitting\n manner and a final stage estimates the CATE model. See the documentation of\n :class:`._OrthoLearner` for a description of this two stage process.\n\n In this estimator, the CATE is estimated by using the following estimating equations. If we let:\n\n .. math ::\n Y_{i, t}^{DR} = E[Y | X_i, W_i, T_i=t]\\\n + \\\\frac{Y_i - E[Y | X_i, W_i, T_i=t]}{Pr[T_i=t | X_i, W_i]} \\\\cdot 1\\\\{T_i=t\\\\}\n\n Then the following estimating equation holds:\n\n .. math ::\n E\\\\left[Y_{i, t}^{DR} - Y_{i, 0}^{DR} | X_i\\\\right] = \\\\theta_t(X_i)\n\n Thus if we estimate the nuisance functions :math:`h(X, W, T) = E[Y | X, W, T]` and\n :math:`p_t(X, W)=Pr[T=t | X, W]` in the first stage, we can estimate the final stage cate for each\n treatment t, by running a regression, regressing :math:`Y_{i, t}^{DR} - Y_{i, 0}^{DR}` on :math:`X_i`.\n\n The problem of estimating the nuisance function :math:`p` is a simple multi-class classification\n problem of predicting the label :math:`T` from :math:`X, W`. The :class:`.DRLearner`\n class takes as input the parameter ``model_propensity``, which is an arbitrary scikit-learn\n classifier, that is internally used to solve this classification problem.\n\n The second nuisance function :math:`h` is a simple regression problem and the :class:`.DRLearner`\n class takes as input the parameter ``model_regressor``, which is an arbitrary scikit-learn regressor that\n is internally used to solve this regression problem.\n\n The final stage is multi-task regression problem with outcomes the labels :math:`Y_{i, t}^{DR} - Y_{i, 0}^{DR}`\n for each non-baseline treatment t. The :class:`.DRLearner` takes as input parameter\n ``model_final``, which is any scikit-learn regressor that is internally used to solve this multi-task\n regresion problem. If the parameter ``multitask_model_final`` is False, then this model is assumed\n to be a mono-task regressor, and separate clones of it are used to solve each regression target\n separately.\n\n Parameters\n ----------\n model_propensity : scikit-learn classifier or 'auto', optional (default='auto')\n Estimator for Pr[T=t | X, W]. Trained by regressing treatments on (features, controls) concatenated.\n Must implement `fit` and `predict_proba` methods. The `fit` method must be able to accept X and T,\n where T is a shape (n, ) array.\n If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be chosen.\n\n model_regression : scikit-learn regressor or 'auto', optional (default='auto')\n Estimator for E[Y | X, W, T]. Trained by regressing Y on (features, controls, one-hot-encoded treatments)\n concatenated. The one-hot-encoding excludes the baseline treatment. Must implement `fit` and\n `predict` methods. If different models per treatment arm are desired, see the\n :class:`.MultiModelWrapper` helper class.\n If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen.\n\n model_final :\n estimator for the final cate model. Trained on regressing the doubly robust potential outcomes\n on (features X).\n\n - If X is None, then the fit method of model_final should be able to handle X=None.\n - If featurizer is not None and X is not None, then it is trained on the outcome of\n featurizer.fit_transform(X).\n - If multitask_model_final is True, then this model must support multitasking\n and it is trained by regressing all doubly robust target outcomes on (featurized) features simultanteously.\n - The output of the predict(X) of the trained model will contain the CATEs for each treatment compared to\n baseline treatment (lexicographically smallest). If multitask_model_final is False, it is assumed to be a\n mono-task model and a separate clone of the model is trained for each outcome. Then predict(X) of the t-th\n clone will be the CATE of the t-th lexicographically ordered treatment compared to the baseline.\n\n multitask_model_final : bool, optional, default False\n Whether the model_final should be treated as a multi-task model. See description of model_final.\n\n featurizer : :term:`transformer`, optional, default None\n Must support fit_transform and transform. Used to create composite features in the final CATE regression.\n It is ignored if X is None. The final CATE will be trained on the outcome of featurizer.fit_transform(X).\n If featurizer=None, then CATE is trained on X.\n\n min_propensity : float, optional, default ``1e-6``\n The minimum propensity at which to clip propensity estimates to avoid dividing by zero.\n\n categories: 'auto' or list, default 'auto'\n The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).\n The first category will be treated as the control treatment.\n\n n_splits: int, cross-validation generator or an iterable, optional (default is 2)\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`cv splitter`\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the treatment is discrete\n :class:`~sklearn.model_selection.StratifiedKFold` is used, else,\n :class:`~sklearn.model_selection.KFold` is used\n (with a random shuffle in either case).\n\n Unless an iterable is used, we call `split(concat[W, X], T)` to generate the splits. If all\n W, X are None, then we call `split(ones((T.shape[0], 1)), T)`.\n\n random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None\n If int, random_state is the seed used by the random number generator;\n If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;\n If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used\n by :mod:`np.random`.\n\n Examples\n --------\n A simple example with the default models:\n\n .. testcode::\n\n import numpy as np\n import scipy.special\n from econml.drlearner import DRLearner\n\n np.random.seed(123)\n X = np.random.normal(size=(1000, 3))\n T = np.random.binomial(2, scipy.special.expit(X[:, 0]))\n sigma = 0.001\n y = (1 + .5*X[:, 0]) * T + X[:, 0] + np.random.normal(0, sigma, size=(1000,))\n est = DRLearner()\n est.fit(y, T, X=X, W=None)\n\n >>> est.const_marginal_effect(X[:2])\n array([[0.511640..., 1.144004...],\n [0.378140..., 0.613143...]])\n >>> est.effect(X[:2], T0=0, T1=1)\n array([0.511640..., 0.378140...])\n >>> est.score_\n 5.11238581...\n >>> est.score(y, T, X=X)\n 5.78673506...\n >>> est.model_cate(T=1).coef_\n array([0.434910..., 0.010226..., 0.047913...])\n >>> est.model_cate(T=2).coef_\n array([ 0.863723..., 0.086946..., -0.022288...])\n >>> est.cate_feature_names()\n \n >>> [mdl.coef_ for mdl in est.models_regression]\n [array([ 1.472104...e+00, 1.984419...e-03, -1.103451...e-02, 6.984376...e-01,\n 2.049695...e+00]), array([ 1.455654..., -0.002110..., 0.005488..., 0.677090..., 1.998648...])]\n >>> [mdl.coef_ for mdl in est.models_propensity]\n [array([[-0.747137..., 0.153419..., -0.018412...],\n [ 0.083807..., -0.110360..., -0.076003...],\n [ 0.663330..., -0.043058... , 0.094416...]]),\n array([[-1.048348...e+00, 2.248997...e-04, 3.228087...e-02],\n [ 1.911900...e-02, 1.241337...e-01, -8.196211...e-02],\n [ 1.029229...e+00, -1.243586...e-01, 4.968123...e-02]])]\n\n Beyond default models:\n\n .. testcode::\n\n import scipy.special\n import numpy as np\n from sklearn.linear_model import LassoCV\n from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\n from econml.drlearner import DRLearner\n\n np.random.seed(123)\n X = np.random.normal(size=(1000, 3))\n T = np.random.binomial(2, scipy.special.expit(X[:, 0]))\n sigma = 0.01\n y = (1 + .5*X[:, 0]) * T + X[:, 0] + np.random.normal(0, sigma, size=(1000,))\n est = DRLearner(model_propensity=RandomForestClassifier(n_estimators=100, min_samples_leaf=10),\n model_regression=RandomForestRegressor(n_estimators=100, min_samples_leaf=10),\n model_final=LassoCV(cv=3),\n featurizer=None)\n est.fit(y, T, X=X, W=None)\n\n >>> est.score_\n 1.7...\n >>> est.const_marginal_effect(X[:3])\n array([[0.68..., 1.10...],\n [0.56..., 0.79...],\n [0.34..., 0.10...]])\n >>> est.model_cate(T=2).coef_\n array([0.74..., 0. , 0. ])\n >>> est.model_cate(T=2).intercept_\n 1.9...\n >>> est.model_cate(T=1).coef_\n array([0.24..., 0.00..., 0. ])\n >>> est.model_cate(T=1).intercept_\n 0.94...\n\n Attributes\n ----------\n score_ : float\n The MSE in the final doubly robust potential outcome regressions, i.e.\n\n .. math::\n \\\\frac{1}{n_t} \\\\sum_{t=1}^{n_t} \\\\frac{1}{n} \\\\sum_{i=1}^n (Y_{i, t}^{DR} - \\\\hat{\\\\theta}_t(X_i))^2\n\n where n_t is the number of treatments (excluding control).\n\n If `sample_weight` is not None at fit time, then a weighted average across samples is returned.\n\n\n \"\"\"\n\n def __init__(self, model_propensity='auto',\n model_regression='auto',\n model_final=StatsModelsLinearRegression(),\n multitask_model_final=False,\n featurizer=None,\n min_propensity=1e-6,\n categories='auto',\n n_splits=2,\n random_state=None):\n if model_propensity == 'auto':\n model_propensity = LogisticRegressionCV(cv=3, solver='lbfgs', multi_class='auto',\n random_state=random_state)\n if model_regression == 'auto':\n model_regression = WeightedLassoCVWrapper(cv=3, random_state=random_state)\n self._multitask_model_final = multitask_model_final\n super().__init__(_ModelNuisance(model_propensity, model_regression, min_propensity),\n _ModelFinal(model_final, featurizer, multitask_model_final),\n n_splits=n_splits, discrete_treatment=True,\n discrete_instrument=False, # no instrument, so doesn't matter\n categories=categories,\n random_state=random_state)\n\n @_deprecate_positional(\"X and W should be passed by keyword only. In a future release \"\n \"we will disallow passing X and W by position.\", ['X', 'W'])\n def fit(self, Y, T, X=None, W=None, *, sample_weight=None, sample_var=None, groups=None, inference=None):\n \"\"\"\n Estimate the counterfactual model from data, i.e. estimates function :math:`\\\\theta(\\\\cdot)`.\n\n Parameters\n ----------\n Y: (n,) vector of length n\n Outcomes for each sample\n T: (n,) vector of length n\n Treatments for each sample\n X: optional(n, d_x) matrix or None (Default=None)\n Features for each sample\n W: optional(n, d_w) matrix or None (Default=None)\n Controls for each sample\n sample_weight: optional(n,) vector or None (Default=None)\n Weights for each samples\n sample_var: optional(n,) vector or None (Default=None)\n Sample variance for each sample\n groups: (n,) vector, optional\n All rows corresponding to the same group will be kept together during splitting.\n If groups is not None, the n_splits argument passed to this class's initializer\n must support a 'groups' argument to its split method.\n inference: string, :class:`.Inference` instance, or None\n Method for performing inference. This estimator supports 'bootstrap'\n (or an instance of :class:`.BootstrapInference`).\n\n Returns\n -------\n self: DRLearner instance\n \"\"\"\n # Replacing fit from _OrthoLearner, to enforce Z=None and improve the docstring\n return super().fit(Y, T, X=X, W=W,\n sample_weight=sample_weight, sample_var=sample_var, groups=groups,\n inference=inference)\n\n def score(self, Y, T, X=None, W=None):\n \"\"\"\n Score the fitted CATE model on a new data set. Generates nuisance parameters\n for the new data set based on the fitted residual nuisance models created at fit time.\n It uses the mean prediction of the models fitted by the different crossfit folds.\n Then calculates the MSE of the final residual Y on residual T regression.\n\n If model_final does not have a score method, then it raises an :exc:`.AttributeError`\n\n Parameters\n ----------\n Y: (n,) vector of length n\n Outcomes for each sample\n T: (n,) vector of length n\n Treatments for each sample\n X: optional(n, d_x) matrix or None (Default=None)\n Features for each sample\n W: optional(n, d_w) matrix or None (Default=None)\n Controls for each sample\n\n Returns\n -------\n score: float\n The MSE of the final CATE model on the new data.\n \"\"\"\n # Replacing score from _OrthoLearner, to enforce Z=None and improve the docstring\n return super().score(Y, T, X=X, W=W)\n\n @property\n def multitask_model_cate(self):\n \"\"\"\n Get the fitted final CATE model.\n\n Returns\n -------\n multitask_model_cate: object of type(`model_final`)\n An instance of the model_final object that was fitted after calling fit which corresponds whose\n vector of outcomes correspond to the CATE model for each treatment, compared to baseline.\n Available only when multitask_model_final=True.\n \"\"\"\n if not self._multitask_model_final:\n raise AttributeError(\"Separate CATE models were fitted for each treatment! Use model_cate.\")\n return super().model_final.model_cate\n\n def model_cate(self, T=1):\n \"\"\"\n Get the fitted final CATE model.\n\n Parameters\n ----------\n T: alphanumeric\n The treatment with respect to which we want the fitted CATE model.\n\n Returns\n -------\n model_cate: object of type(model_final)\n An instance of the model_final object that was fitted after calling fit which corresponds\n to the CATE model for treatment T=t, compared to baseline. Available when multitask_model_final=False.\n \"\"\"\n if self._multitask_model_final:\n raise AttributeError(\"A single multitask model was fitted for all treatments! Use multitask_model_cate.\")\n _, T = self._expand_treatments(None, T)\n ind = inverse_onehot(T).item() - 1\n assert ind >= 0, \"No model was fitted for the control\"\n return super().model_final.models_cate[ind]\n\n @property\n def models_propensity(self):\n \"\"\"\n Get the fitted propensity models.\n\n Returns\n -------\n models_propensity: list of objects of type(`model_propensity`)\n A list of instances of the `model_propensity` object. Each element corresponds to a crossfitting\n fold and is the model instance that was fitted for that training fold.\n \"\"\"\n return [mdl._model_propensity for mdl in super().models_nuisance]\n\n @property\n def models_regression(self):\n \"\"\"\n Get the fitted regression models.\n\n Returns\n -------\n model_regression: list of objects of type(`model_regression`)\n A list of instances of the model_regression object. Each element corresponds to a crossfitting\n fold and is the model instance that was fitted for that training fold.\n \"\"\"\n return [mdl._model_regression for mdl in super().models_nuisance]\n\n @property\n def nuisance_scores_propensity(self):\n \"\"\"Gets the score for the propensity model on out-of-sample training data\"\"\"\n return self.nuisance_scores_[0]\n\n @property\n def nuisance_scores_regression(self):\n \"\"\"Gets the score for the regression model on out-of-sample training data\"\"\"\n return self.nuisance_scores_[1]\n\n @property\n def featurizer(self):\n \"\"\"\n Get the fitted featurizer.\n\n Returns\n -------\n featurizer: object of type(`featurizer`)\n An instance of the fitted featurizer that was used to preprocess X in the final CATE model training.\n Available only when featurizer is not None and X is not None.\n \"\"\"\n return super().model_final._featurizer\n\n def cate_feature_names(self, input_feature_names=None):\n \"\"\"\n Get the output feature names.\n\n Parameters\n ----------\n input_feature_names: list of strings of length X.shape[1] or None\n The names of the input features\n\n Returns\n -------\n out_feature_names: list of strings or None\n The names of the output features :math:`\\\\phi(X)`, i.e. the features with respect to which the\n final CATE model for each treatment is linear. It is the names of the features that are associated\n with each entry of the :meth:`coef_` parameter. Available only when the featurizer is not None and has\n a method: `get_feature_names(input_feature_names)`. Otherwise None is returned.\n \"\"\"\n if self.featurizer is None:\n return input_feature_names\n elif hasattr(self.featurizer, 'get_feature_names'):\n return self.featurizer.get_feature_names(input_feature_names)\n else:\n raise AttributeError(\"Featurizer does not have a method: get_feature_names!\")\n\n\nclass LinearDRLearner(StatsModelsCateEstimatorDiscreteMixin, DRLearner):\n \"\"\"\n Special case of the :class:`.DRLearner` where the final stage\n is a Linear Regression on a low dimensional set of features. In this case, inference\n can be performed via the asymptotic normal characterization of the estimated parameters.\n This is computationally faster than bootstrap inference. To do this, just leave the setting ``inference='auto'``\n unchanged, or explicitly set ``inference='statsmodels'`` or alter the covariance type calculation via\n ``inference=StatsModelsInferenceDiscrete(cov_type='HC1)``.\n\n More concretely, this estimator assumes that the final cate model for each treatment takes a linear form:\n\n .. math ::\n \\\\theta_t(X) = \\\\left\\\\langle \\\\theta_t, \\\\phi(X) \\\\right\\\\rangle + \\\\beta_t\n\n where :math:`\\\\phi(X)` is the outcome features of the featurizers, or `X` if featurizer is None. :math:`\\\\beta_t`\n is a an intercept of the CATE, which is included if ``fit_cate_intercept=True`` (Default). It fits this by\n running a standard ordinary linear regression (OLS), regressing the doubly robust outcome differences on X:\n\n .. math ::\n \\\\min_{\\\\theta_t, \\\\beta_t}\\\n E_n\\\\left[\\\\left(Y_{i, t}^{DR} - Y_{i, 0}^{DR}\\\n - \\\\left\\\\langle \\\\theta_t, \\\\phi(X_i) \\\\right\\\\rangle - \\\\beta_t\\\\right)^2\\\\right]\n\n Then inference can be performed via standard approaches for inference of OLS, via asympotic normal approximations\n of the estimated parameters. The default covariance estimator used is heteroskedasticity robust (HC1).\n For other methods see :class:`.StatsModelsInferenceDiscrete`. Use can invoke them by setting:\n ``inference=StatsModelsInferenceDiscrete(cov_type=...)``.\n\n This approach is valid even if the CATE model is not linear in :math:`\\\\phi(X)`. In this case it performs\n inference on the best linear approximation of the CATE model.\n\n Parameters\n ----------\n model_propensity : scikit-learn classifier or 'auto', optional (default='auto')\n Estimator for Pr[T=t | X, W]. Trained by regressing treatments on (features, controls) concatenated.\n Must implement `fit` and `predict_proba` methods. The `fit` method must be able to accept X and T,\n where T is a shape (n, ) array.\n If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be chosen.\n\n model_regression : scikit-learn regressor or 'auto', optional (default='auto')\n Estimator for E[Y | X, W, T]. Trained by regressing Y on (features, controls, one-hot-encoded treatments)\n concatenated. The one-hot-encoding excludes the baseline treatment. Must implement `fit` and\n `predict` methods. If different models per treatment arm are desired, see the\n :class:`.MultiModelWrapper` helper class.\n If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen.\n\n featurizer : :term:`transformer`, optional, default None\n Must support fit_transform and transform. Used to create composite features in the final CATE regression.\n It is ignored if X is None. The final CATE will be trained on the outcome of featurizer.fit_transform(X).\n If featurizer=None, then CATE is trained on X.\n\n fit_cate_intercept : bool, optional, default True\n Whether the linear CATE model should have a constant term.\n\n min_propensity : float, optional, default ``1e-6``\n The minimum propensity at which to clip propensity estimates to avoid dividing by zero.\n\n categories: 'auto' or list, default 'auto'\n The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).\n The first category will be treated as the control treatment.\n\n n_splits: int, cross-validation generator or an iterable, optional (default is 2)\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`cv splitter`\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the treatment is discrete\n :class:`~sklearn.model_selection.StratifiedKFold` is used, else,\n :class:`~sklearn.model_selection.KFold` is used\n (with a random shuffle in either case).\n\n Unless an iterable is used, we call `split(X,T)` to generate the splits.\n\n random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None\n If int, random_state is the seed used by the random number generator;\n If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;\n If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used\n by :mod:`np.random`.\n\n Examples\n --------\n A simple example with the default models:\n\n .. testcode::\n\n import numpy as np\n import scipy.special\n from econml.drlearner import DRLearner, LinearDRLearner\n\n np.random.seed(123)\n X = np.random.normal(size=(1000, 3))\n T = np.random.binomial(2, scipy.special.expit(X[:, 0]))\n y = (1 + .5*X[:, 0]) * T + X[:, 0] + np.random.normal(size=(1000,))\n est = LinearDRLearner()\n est.fit(y, T, X=X, W=None)\n\n >>> est.effect(X[:3])\n array([ 0.409743..., 0.312604..., -0.127394...])\n >>> est.effect_interval(X[:3])\n (array([ 0.120682..., -0.102543..., -0.663246...]), array([0.698803..., 0.727753..., 0.408458...]))\n >>> est.coef_(T=1)\n array([ 0.450779..., -0.003214... , 0.063884... ])\n >>> est.coef__interval(T=1)\n (array([ 0.202646..., -0.207195..., -0.104558...]), array([0.698911..., 0.200767..., 0.232326...]))\n >>> est.intercept_(T=1)\n 0.88425066...\n >>> est.intercept__interval(T=1)\n (0.68655813..., 1.08194320...)\n\n Attributes\n ----------\n score_ : float\n The MSE in the final doubly robust potential outcome regressions, i.e.\n\n .. math::\n \\\\frac{1}{n_t} \\\\sum_{t=1}^{n_t} \\\\frac{1}{n} \\\\sum_{i=1}^n (Y_{i, t}^{DR} - \\\\hat{\\\\theta}_t(X_i))^2\n\n where n_t is the number of treatments (excluding control).\n\n If `sample_weight` is not None at fit time, then a weighted average across samples is returned.\n\n \"\"\"\n\n def __init__(self,\n model_propensity='auto',\n model_regression='auto',\n featurizer=None,\n fit_cate_intercept=True,\n min_propensity=1e-6,\n categories='auto',\n n_splits=2, random_state=None):\n self.fit_cate_intercept = fit_cate_intercept\n super().__init__(model_propensity=model_propensity,\n model_regression=model_regression,\n model_final=StatsModelsLinearRegression(fit_intercept=fit_cate_intercept),\n featurizer=featurizer,\n multitask_model_final=False,\n min_propensity=min_propensity,\n categories=categories,\n n_splits=n_splits,\n random_state=random_state)\n\n @_deprecate_positional(\"X and W should be passed by keyword only. In a future release \"\n \"we will disallow passing X and W by position.\", ['X', 'W'])\n def fit(self, Y, T, X=None, W=None, *, sample_weight=None, sample_var=None, groups=None, inference='auto'):\n \"\"\"\n Estimate the counterfactual model from data, i.e. estimates function :math:`\\\\theta(\\\\cdot)`.\n\n Parameters\n ----------\n Y: (n,) vector of length n\n Outcomes for each sample\n T: (n,) vector of length n\n Treatments for each sample\n X: optional(n, d_x) matrix or None (Default=None)\n Features for each sample\n W: optional(n, d_w) matrix or None (Default=None)\n Controls for each sample\n sample_weight: optional(n,) vector or None (Default=None)\n Weights for each samples\n sample_var: optional(n,) vector or None (Default=None)\n Sample variance for each sample\n groups: (n,) vector, optional\n All rows corresponding to the same group will be kept together during splitting.\n If groups is not None, the n_splits argument passed to this class's initializer\n must support a 'groups' argument to its split method.\n inference: string, :class:`.Inference` instance, or None\n Method for performing inference. This estimator supports ``'bootstrap'``\n (or an instance of :class:`.BootstrapInference`) and ``'statsmodels'``\n (or an instance of :class:`.StatsModelsInferenceDiscrete`).\n\n Returns\n -------\n self: DRLearner instance\n \"\"\"\n # Replacing fit from DRLearner, to add statsmodels inference in docstring\n return super().fit(Y, T, X=X, W=W,\n sample_weight=sample_weight, sample_var=sample_var, groups=groups,\n inference=inference)\n\n @property\n def multitask_model_cate(self):\n # Replacing this method which is invalid for this class, so that we make the\n # dosctring empty and not appear in the docs.\n return super().multitask_model_cate\n\n @property\n def model_final(self):\n return super().model_final._model_final\n\n @property\n def fitted_models_final(self):\n return super().model_final.models_cate\n\n\nclass SparseLinearDRLearner(DebiasedLassoCateEstimatorDiscreteMixin, DRLearner):\n \"\"\"\n Special case of the :class:`.DRLearner` where the final stage\n is a Debiased Lasso Regression. In this case, inference can be performed via the debiased lasso approach\n and its asymptotic normal characterization of the estimated parameters. This is computationally\n faster than bootstrap inference. Leave the default ``inference='auto'`` unchanged, or explicitly set\n ``inference='debiasedlasso'`` at fit time to enable inference via asymptotic normality.\n\n More concretely, this estimator assumes that the final cate model for each treatment takes a linear form:\n\n .. math ::\n \\\\theta_t(X) = \\\\left\\\\langle \\\\theta_t, \\\\phi(X) \\\\right\\\\rangle + \\\\beta_t\n\n where :math:`\\\\phi(X)` is the outcome features of the featurizers, or `X` if featurizer is None. :math:`\\\\beta_t`\n is a an intercept of the CATE, which is included if ``fit_cate_intercept=True`` (Default). It fits this by\n running a debiased lasso regression (i.e. :math:`\\\\ell_1`-penalized regression with debiasing),\n regressing the doubly robust outcome differences on X: i.e. first solves the penalized square loss problem\n\n .. math ::\n \\\\min_{\\\\theta_t, \\\\beta_t}\\\n E_n\\\\left[\\\\left(Y_{i, t}^{DR} - Y_{i, 0}^{DR}\\\n - \\\\left\\\\langle \\\\theta_t, \\\\phi(X_i) \\\\right\\\\rangle - \\\\beta_t\\\\right)^2\\\\right]\\\n + \\\\lambda \\\\left\\\\lVert \\\\theta_t \\\\right\\\\rVert_1\n\n and then adds a debiasing correction to the solution. If alpha='auto' (recommended), then the penalty\n weight :math:`\\\\lambda` is set optimally via cross-validation.\n\n This approach is valid even if the CATE model is not linear in :math:`\\\\phi(X)`. In this case it performs\n inference on the best sparse linear approximation of the CATE model.\n\n Parameters\n ----------\n model_propensity : scikit-learn classifier or 'auto', optional (default='auto')\n Estimator for Pr[T=t | X, W]. Trained by regressing treatments on (features, controls) concatenated.\n Must implement `fit` and `predict_proba` methods. The `fit` method must be able to accept X and T,\n where T is a shape (n, ) array.\n If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be chosen.\n\n model_regression : scikit-learn regressor or 'auto', optional (default='auto')\n Estimator for E[Y | X, W, T]. Trained by regressing Y on (features, controls, one-hot-encoded treatments)\n concatenated. The one-hot-encoding excludes the baseline treatment. Must implement `fit` and\n `predict` methods. If different models per treatment arm are desired, see the\n :class:`.MultiModelWrapper` helper class.\n If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen.\n\n featurizer : :term:`transformer`, optional, default None\n Must support fit_transform and transform. Used to create composite features in the final CATE regression.\n It is ignored if X is None. The final CATE will be trained on the outcome of featurizer.fit_transform(X).\n If featurizer=None, then CATE is trained on X.\n\n fit_cate_intercept : bool, optional, default True\n Whether the linear CATE model should have a constant term.\n\n alpha: string | float, optional., default 'auto'.\n CATE L1 regularization applied through the debiased lasso in the final model.\n 'auto' corresponds to a CV form of the :class:`DebiasedLasso`.\n\n max_iter : int, optional, default 1000\n The maximum number of iterations in the Debiased Lasso\n\n tol : float, optional, default 1e-4\n The tolerance for the optimization: if the updates are\n smaller than ``tol``, the optimization code checks the\n dual gap for optimality and continues until it is smaller\n than ``tol``.\n\n min_propensity : float, optional, default ``1e-6``\n The minimum propensity at which to clip propensity estimates to avoid dividing by zero.\n\n categories: 'auto' or list, default 'auto'\n The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).\n The first category will be treated as the control treatment.\n\n n_splits: int, cross-validation generator or an iterable, optional, default 2\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`cv splitter`\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the treatment is discrete\n :class:`~sklearn.model_selection.StratifiedKFold` is used, else,\n :class:`~sklearn.model_selection.KFold` is used\n (with a random shuffle in either case).\n\n Unless an iterable is used, we call `split(X,T)` to generate the splits.\n\n random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None\n If int, random_state is the seed used by the random number generator;\n If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;\n If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used\n by :mod:`np.random`.\n\n Examples\n --------\n A simple example with the default models:\n\n .. testcode::\n\n import numpy as np\n import scipy.special\n from econml.drlearner import DRLearner, SparseLinearDRLearner\n\n np.random.seed(123)\n X = np.random.normal(size=(1000, 3))\n T = np.random.binomial(2, scipy.special.expit(X[:, 0]))\n y = (1 + .5*X[:, 0]) * T + X[:, 0] + np.random.normal(size=(1000,))\n est = SparseLinearDRLearner()\n est.fit(y, T, X=X, W=None)\n\n >>> est.effect(X[:3])\n array([ 0.418400..., 0.306400..., -0.130733...])\n >>> est.effect_interval(X[:3])\n (array([ 0.056783..., -0.206438..., -0.739296...]), array([0.780017..., 0.819239..., 0.477828...]))\n >>> est.coef_(T=1)\n array([0.449779..., 0.004807..., 0.061954...])\n >>> est.coef__interval(T=1)\n (array([ 0.242194... , -0.190825..., -0.139646...]), array([0.657365..., 0.200440..., 0.263556...]))\n >>> est.intercept_(T=1)\n 0.88436847...\n >>> est.intercept__interval(T=1)\n (0.68683788..., 1.08189907...)\n\n Attributes\n ----------\n score_ : float\n The MSE in the final doubly robust potential outcome regressions, i.e.\n\n .. math::\n \\\\frac{1}{n_t} \\\\sum_{t=1}^{n_t} \\\\frac{1}{n} \\\\sum_{i=1}^n (Y_{i, t}^{DR} - \\\\hat{\\\\theta}_t(X_i))^2\n\n where n_t is the number of treatments (excluding control).\n\n If `sample_weight` is not None at fit time, then a weighted average across samples is returned.\n\n \"\"\"\n\n def __init__(self,\n model_propensity='auto',\n model_regression='auto',\n featurizer=None,\n fit_cate_intercept=True,\n alpha='auto',\n max_iter=1000,\n tol=1e-4,\n min_propensity=1e-6,\n categories='auto',\n n_splits=2, random_state=None):\n self.fit_cate_intercept = fit_cate_intercept\n model_final = DebiasedLasso(\n alpha=alpha,\n fit_intercept=fit_cate_intercept,\n max_iter=max_iter,\n tol=tol)\n super().__init__(model_propensity=model_propensity,\n model_regression=model_regression,\n model_final=model_final,\n featurizer=featurizer,\n multitask_model_final=False,\n min_propensity=min_propensity,\n categories=categories,\n n_splits=n_splits,\n random_state=random_state)\n\n @_deprecate_positional(\"X and W should be passed by keyword only. In a future release \"\n \"we will disallow passing X and W by position.\", ['X', 'W'])\n def fit(self, Y, T, X=None, W=None, *, sample_weight=None, sample_var=None, groups=None, inference='auto'):\n \"\"\"\n Estimate the counterfactual model from data, i.e. estimates function :math:`\\\\theta(\\\\cdot)`.\n\n Parameters\n ----------\n Y: (n,) vector of length n\n Outcomes for each sample\n T: (n,) vector of length n\n Treatments for each sample\n X: optional(n, d_x) matrix or None (Default=None)\n Features for each sample\n W: optional(n, d_w) matrix or None (Default=None)\n Controls for each sample\n sample_weight: optional(n,) vector or None (Default=None)\n Weights for each samples\n sample_var: optional(n,) vector or None (Default=None)\n Sample variance for each sample\n groups: (n,) vector, optional\n All rows corresponding to the same group will be kept together during splitting.\n If groups is not None, the n_splits argument passed to this class's initializer\n must support a 'groups' argument to its split method.\n inference: string, :class:`.Inference` instance, or None\n Method for performing inference. This estimator supports ``'bootstrap'``\n (or an instance of :class:`.BootstrapInference`) and ``'debiasedlasso'``\n (or an instance of :class:`.LinearModelInferenceDiscrete`).\n\n Returns\n -------\n self: DRLearner instance\n \"\"\"\n # Replacing fit from DRLearner, to add debiasedlasso inference in docstring\n # TODO: support sample_var\n if sample_weight is not None and inference is not None:\n warn(\"This estimator does not yet support sample variances and inference does not take \"\n \"sample variances into account. This feature will be supported in a future release.\")\n Y, T, X, W, sample_weight, sample_var = check_input_arrays(Y, T, X, W, sample_weight, sample_var)\n check_high_dimensional(X, T, threshold=5, featurizer=self.featurizer,\n discrete_treatment=self._discrete_treatment,\n msg=\"The number of features in the final model (< 5) is too small for a sparse model. \"\n \"We recommend using the LinearDRLearner for this low-dimensional setting.\")\n return super().fit(Y, T, X=X, W=W,\n sample_weight=sample_weight, sample_var=None, groups=groups,\n inference=inference)\n\n @property\n def multitask_model_cate(self):\n # Replacing this method which is invalid for this class, so that we make the\n # dosctring empty and not appear in the docs.\n return super().multitask_model_cate\n\n @property\n def model_final(self):\n return super().model_final._model_final\n\n @property\n def fitted_models_final(self):\n return super().model_final.models_cate\n\n\nclass ForestDRLearner(ForestModelFinalCateEstimatorDiscreteMixin, DRLearner):\n \"\"\" Instance of DRLearner with a :class:`~econml.sklearn_extensions.ensemble.SubsampledHonestForest`\n as a final model, so as to enable non-parametric inference.\n\n Parameters\n ----------\n model_propensity : scikit-learn classifier\n Estimator for Pr[T=t | X, W]. Trained by regressing treatments on (features, controls) concatenated.\n Must implement `fit` and `predict_proba` methods. The `fit` method must be able to accept X and T,\n where T is a shape (n, ) array.\n\n model_regression : scikit-learn regressor\n Estimator for E[Y | X, W, T]. Trained by regressing Y on (features, controls, one-hot-encoded treatments)\n concatenated. The one-hot-encoding excludes the baseline treatment. Must implement `fit` and\n `predict` methods. If different models per treatment arm are desired, see the\n :class:`~econml.utilities.MultiModelWrapper` helper class.\n\n min_propensity : float, optional, default ``1e-6``\n The minimum propensity at which to clip propensity estimates to avoid dividing by zero.\n\n categories: 'auto' or list, default 'auto'\n The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).\n The first category will be treated as the control treatment.\n\n n_crossfit_splits: int, cross-validation generator or an iterable, optional (Default=2)\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`cv splitter`\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the treatment is discrete\n :class:`~sklearn.model_selection.StratifiedKFold` is used, else,\n :class:`~sklearn.model_selection.KFold` is used\n (with a random shuffle in either case).\n\n Unless an iterable is used, we call `split(concat[W, X], T)` to generate the splits. If all\n W, X are None, then we call `split(ones((T.shape[0], 1)), T)`.\n\n n_estimators : integer, optional (default=100)\n The total number of trees in the forest. The forest consists of a\n forest of sqrt(n_estimators) sub-forests, where each sub-forest\n contains sqrt(n_estimators) trees.\n\n criterion : string, optional (default=\"mse\")\n The function to measure the quality of a split. Supported criteria\n are \"mse\" for the mean squared error, which is equal to variance\n reduction as feature selection criterion, and \"mae\" for the mean\n absolute error.\n\n max_depth : integer or None, optional (default=None)\n The maximum depth of the tree. If None, then nodes are expanded until\n all leaves are pure or until all leaves contain less than\n min_samples_split samples.\n\n min_samples_split : int, float, optional (default=2)\n The minimum number of splitting samples required to split an internal node.\n\n - If int, then consider `min_samples_split` as the minimum number.\n - If float, then `min_samples_split` is a fraction and\n `ceil(min_samples_split * n_samples)` are the minimum\n number of samples for each split.\n\n min_samples_leaf : int, float, optional (default=1)\n The minimum number of samples required to be at a leaf node.\n A split point at any depth will only be considered if it leaves at\n least ``min_samples_leaf`` splitting samples in each of the left and\n right branches. This may have the effect of smoothing the model,\n especially in regression. After construction the tree is also pruned\n so that there are at least min_samples_leaf estimation samples on\n each leaf.\n\n - If int, then consider `min_samples_leaf` as the minimum number.\n - If float, then `min_samples_leaf` is a fraction and\n `ceil(min_samples_leaf * n_samples)` are the minimum\n number of samples for each node.\n\n min_weight_fraction_leaf : float, optional (default=0.)\n The minimum weighted fraction of the sum total of weights (of all\n splitting samples) required to be at a leaf node. Samples have\n equal weight when sample_weight is not provided. After construction\n the tree is pruned so that the fraction of the sum total weight\n of the estimation samples contained in each leaf node is at\n least min_weight_fraction_leaf\n\n max_features : int, float, string or None, optional (default=\"auto\")\n The number of features to consider when looking for the best split:\n\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a fraction and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=n_features`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\n Note: the search for a split does not stop until at least one\n valid partition of the node samples is found, even if it requires to\n effectively inspect more than ``max_features`` features.\n\n max_leaf_nodes : int or None, optional (default=None)\n Grow trees with ``max_leaf_nodes`` in best-first fashion.\n Best nodes are defined as relative reduction in impurity.\n If None then unlimited number of leaf nodes.\n\n min_impurity_decrease : float, optional (default=0.)\n A node will be split if this split induces a decrease of the impurity\n greater than or equal to this value.\n\n The weighted impurity decrease equation is the following::\n\n N_t / N * (impurity - N_t_R / N_t * right_impurity\n - N_t_L / N_t * left_impurity)\n\n where ``N`` is the total number of split samples, ``N_t`` is the number of\n split samples at the current node, ``N_t_L`` is the number of split samples in the\n left child, and ``N_t_R`` is the number of split samples in the right child.\n\n ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\n if ``sample_weight`` is passed.\n\n subsample_fr : float or 'auto', optional (default='auto')\n The fraction of the half-samples that are used on each tree. Each tree\n will be built on subsample_fr * n_samples/2.\n\n If 'auto', then the subsampling fraction is set to::\n\n (n_samples/2)**(1-1/(2*n_features+2))/(n_samples/2)\n\n which is sufficient to guarantee asympotitcally valid inference.\n\n honest : boolean, optional (default=True)\n Whether to use honest trees, i.e. half of the samples are used for\n creating the tree structure and the other half for the estimation at\n the leafs. If False, then all samples are used for both parts.\n\n n_jobs : int or None, optional (default=None)\n The number of jobs to run in parallel for both `fit` and `predict`.\n ``None`` means 1 unless in a :func:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary `\n for more details.\n\n verbose : int, optional (default=0)\n Controls the verbosity when fitting and predicting.\n\n random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;\n If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used\n by :mod:`np.random`.\n \"\"\"\n\n def __init__(self,\n model_regression, model_propensity,\n min_propensity=1e-6,\n categories='auto',\n n_crossfit_splits=2,\n n_estimators=1000,\n criterion=\"mse\",\n max_depth=None,\n min_samples_split=5,\n min_samples_leaf=5,\n min_weight_fraction_leaf=0.,\n max_features=\"auto\",\n max_leaf_nodes=None,\n min_impurity_decrease=0.,\n subsample_fr='auto',\n honest=True,\n n_jobs=None,\n verbose=0,\n random_state=None):\n model_final = SubsampledHonestForest(n_estimators=n_estimators,\n criterion=criterion,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_features=max_features,\n max_leaf_nodes=max_leaf_nodes,\n min_impurity_decrease=min_impurity_decrease,\n subsample_fr=subsample_fr,\n honest=honest,\n n_jobs=n_jobs,\n random_state=random_state,\n verbose=verbose)\n super().__init__(model_regression=model_regression, model_propensity=model_propensity,\n model_final=model_final, featurizer=None,\n multitask_model_final=False,\n min_propensity=min_propensity,\n categories=categories,\n n_splits=n_crossfit_splits, random_state=random_state)\n\n @_deprecate_positional(\"X and W should be passed by keyword only. In a future release \"\n \"we will disallow passing X and W by position.\", ['X', 'W'])\n def fit(self, Y, T, X=None, W=None, *, sample_weight=None, sample_var=None, groups=None, inference='auto'):\n \"\"\"\n Estimate the counterfactual model from data, i.e. estimates functions τ(·,·,·), ∂τ(·,·).\n\n Parameters\n ----------\n Y: (n × d_y) matrix or vector of length n\n Outcomes for each sample\n T: (n × dₜ) matrix or vector of length n\n Treatments for each sample\n X: optional (n × dₓ) matrix\n Features for each sample\n W: optional (n × d_w) matrix\n Controls for each sample\n sample_weight: optional (n,) vector\n Weights for each row\n sample_var: optional (n, n_y) vector\n Variance of sample, in case it corresponds to summary of many samples. Currently\n not in use by this method (as inference method does not require sample variance info).\n groups: (n,) vector, optional\n All rows corresponding to the same group will be kept together during splitting.\n If groups is not None, the n_splits argument passed to this class's initializer\n must support a 'groups' argument to its split method.\n inference: string, `Inference` instance, or None\n Method for performing inference. This estimator supports 'bootstrap'\n (or an instance of :class:`.BootstrapInference`) and 'blb'\n (for Bootstrap-of-Little-Bags based inference)\n\n Returns\n -------\n self\n \"\"\"\n return super().fit(Y, T, X=X, W=W,\n sample_weight=sample_weight, sample_var=None, groups=groups,\n inference=inference)\n\n def multitask_model_cate(self):\n # Replacing to remove docstring\n super().multitask_model_cate()\n\n @property\n def model_final(self):\n return super().model_final._model_final\n\n @property\n def fitted_models_final(self):\n return super().model_final.models_cate\n","sub_path":"econml/drlearner.py","file_name":"drlearner.py","file_ext":"py","file_size_in_byte":59356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"330191582","text":"import sys\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nsys.stdin.reconfigure(encoding='utf-8-sig')\ncolors = ['lawngreen', 'g', 'teal', 'cyan', 'deepskyblue', 'dodgerblue', 'navy', 'indigo', 'violet', 'm', 'b']\nmatplotlib.use('gtk3agg')\n\nfor line in sys.stdin:\n if line[0] != '@':\n print(line, end='')\n elif line[1] == 'W':\n n, *coord = line[2:].split()\n n, (x, y) = int(n), map(float, coord)\n\n if n == -1:\n plt.plot(x, y, marker='.', mfc='black', mew=0, alpha=0.5, label=n)\n else:\n plt.plot(x, y, marker='.', mfc=colors[n % 11], mew=0, alpha=0.5, label=n)\n \n elif line[1] == 'E':\n n, *coord = line[2:].split()\n n, (x1, y1, x2, y2) = int(n), map(float, coord)\n dx, dy = x2-x1, y2-y1\n \n plt.plot(x1, y1, 'yo', alpha=0.5)\n plt.plot(x2, y2, 'yo', alpha=0.5)\n plt.arrow(x1, y1, dx, dy, length_includes_head=True, width=0.1, fc='black', \n shape='left', label='1', alpha=0.2, ec=None)\n plt.annotate(n, (x1 + dx/2, y1 + dy/2), c='red')\n\nplt.show()\n","sub_path":"COMP20003 A1 code/visualisation.py","file_name":"visualisation.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"37676878","text":"from django.db import models\nfrom target_object_database.models import TargetObject\nfrom hx_lti_initializer.models import LTICourse\nimport uuid\n\n\nclass AssignmentTargets(models.Model):\n assignment = models.ForeignKey(\n \"Assignment\",\n verbose_name='Assignment',\n )\n target_object = models.ForeignKey(\n TargetObject,\n verbose_name='Source Material',\n unique=False,\n )\n order = models.IntegerField(\n verbose_name='Order',\n )\n target_external_css = models.CharField(\n max_length=255,\n blank=True,\n help_text='Only input a URL to an externally hosted CSS file.'\n )\n target_instructions = models.TextField(\n blank=True,\n null=True,\n help_text='Add instructions for this object in this assignment.'\n )\n target_external_options = models.TextField(\n blank=True,\n null=True,\n )\n\n class Meta:\n verbose_name = \"Assignment Target\"\n verbose_name_plural = \"Assignment Targets\"\n ordering = [\n 'order',\n ]\n\n def get_target_external_options_list(self):\n \"\"\"\n Returns a list of options that are saved to the target_external_options model attribute\n in CSV format.\n \n Notes:\n - since the model attribute could be null in the database, we have to\n check if it's None before trying parse it.\n - this field does not contain user-supplied values, so we don't need industrial-strength\n CSV parsing.\n \"\"\"\n if self.target_external_options is None:\n return []\n return self.target_external_options.split(',') \n\n def get_view_type_for_mirador(self):\n \"\"\"\n \"\"\"\n options = self.get_target_external_options_list()\n if len(options) == 1:\n return \"ImageView\"\n else:\n return options[0] if options[0] != '' else \"ImageView\"\n\n def get_canvas_id_for_mirador(self):\n \"\"\"\n \"\"\"\n options = self.get_target_external_options_list()\n if len(options) == 1:\n return None\n else:\n return options[1] if options[1] != '' else None\n\n def get_dashboard_hidden(self):\n \"\"\"\n \"\"\"\n options = self.get_target_external_options_list()\n if len(options) < 3:\n return \"false\"\n else:\n return options[2] if options[2] != '' else \"false\"\n\n def get_transcript_hidden(self):\n \"\"\"\n \"\"\"\n options = self.get_target_external_options_list()\n if len(options) < 4:\n return \"false\"\n else:\n return options[3] if options[3] != '' else \"false\"\n\n def get_transcript_download(self):\n \"\"\"\n \"\"\"\n options = self.get_target_external_options_list()\n if len(options) < 5:\n return \"false\"\n else:\n return options[4] if options[4] != '' else \"false\"\n\n def get_video_download(self):\n \"\"\"\n \"\"\"\n options = self.get_target_external_options_list()\n if len(options) < 6:\n return \"false\"\n else:\n return options[5] if options[5] != '' else \"false\"\n\n\nclass Assignment(models.Model):\n \"\"\"\n This object will contain the objects and settings for the annotation tool\n \"\"\"\n\n assignment_id = models.CharField(\n max_length=100,\n blank=True,\n unique=True,\n default=uuid.uuid4\n )\n assignment_name = models.CharField(\n max_length=255,\n blank=False,\n default=\"No Assignment Name Given\"\n )\n assignment_objects = models.ManyToManyField(\n TargetObject,\n through=\"AssignmentTargets\"\n )\n annotation_database_url = models.CharField(max_length=255)\n annotation_database_apikey = models.CharField(max_length=255)\n annotation_database_secret_token = models.CharField(max_length=255)\n include_instructor_tab = models.BooleanField(\n help_text=\"Include a tab for instructor annotations.\",\n default=False\n )\n include_mynotes_tab = models.BooleanField(\n help_text=\"Include a tab for user's annotations. Warning: Turning this off will not allow students to make annotations.\",\n default=True\n )\n include_public_tab = models.BooleanField(\n help_text=\"Include a tab for public annotations. Used for private annotations. If you want users to view each other's annotations.\",\n default=True\n )\n allow_highlights = models.BooleanField(\n help_text=\"Allow predetermined tags with colors.\",\n default=False\n )\n highlights_options = models.CharField(\n max_length=255,\n blank=True\n )\n allow_touch = models.BooleanField(\n help_text=\"Allow touch devices to use tool (warning, experimental).\",\n default=False\n )\n pagination_limit = models.IntegerField(\n help_text=\"How many annotations should show up when you hit the 'More' button?\" # noqa\n )\n allow_flags = models.BooleanField(\n help_text=\"Allow users to flag items as inappropriate/offensive.\",\n default=False\n )\n\n TABS = (\n ('Instructor', 'Instructor'),\n ('MyNotes', 'My Notes'),\n ('Public', 'Public'),\n )\n\n default_tab = models.CharField(\n choices=TABS,\n default=\"Public\",\n max_length=20\n )\n course = models.ForeignKey(LTICourse)\n hidden = models.BooleanField(default=False)\n\n def __str__(self):\n return self.assignment_name\n\n def __unicode__(self):\n return u\"%s\" % self.assignment_name\n\n def object_before(self, id):\n if len(self.assignment_objects.all()) > 1:\n try:\n obj = TargetObject.objects.get(pk=id)\n assignment_target = AssignmentTargets.objects.get(\n assignment=self,\n target_object=obj\n )\n if assignment_target.order == 1:\n return None\n else:\n new_order = assignment_target.order-1\n return AssignmentTargets.objects.get(\n assignment=self,\n order=new_order\n )\n except:\n return None\n return None\n\n def object_after(self, id):\n if len(self.assignment_objects.all()) > 1:\n try:\n obj = TargetObject.objects.get(pk=id)\n assignment_target = AssignmentTargets.objects.get(\n assignment=self,\n target_object=obj\n )\n if assignment_target.order == len(self.assignment_objects.all()): # noqa\n return None\n else:\n new_order = assignment_target.order+1\n return AssignmentTargets.objects.get(\n assignment=self,\n order=new_order\n )\n except:\n return None\n return None\n","sub_path":"hx_lti_assignment/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"429746072","text":"'''\nmu, lambda\nCreated on 09/09/2014\n@author: azu\n'''\nfrom practices.first.evolutionaryStrategies.fitness import *\n\n\ndef initialize(number):\n return [[np.float(random.gauss(0, 100)) for i in range(number)] for u in range(mu)]\n\n\ndef mutate(variables, generation, numberOfVariables):\n return [[variables[i] + sigma[generation] * random.gauss(0, 1) for i in range(numberOfVariables)] for u in\n range(mu)]\n\n\ndef success(replacement, generation, numberOfVariables):\n ps = replacement / float(generation)\n if (generation % numberOfVariables == 0):\n if (ps > 1 / 5.0):\n return sigma[generation - numberOfVariables] * 0.817\n if (ps < 1 / 5.0):\n return sigma[generation - numberOfVariables] / 0.817\n if (ps == 1 / 5.0):\n return sigma[generation - numberOfVariables]\n else:\n return sigma[generation - 1]\n\n\ndef select(fitnessArray, mode):\n f = fitnessArray[:]\n f.sort()\n middleValue = f.pop(len(f) / 2)\n secondBestValue = f.pop(1)\n selected = {\n 'best': fitnessArray.index(min(fitnessArray)),\n '2nd': fitnessArray.index(secondBestValue),\n 'middle': fitnessArray.index(middleValue),\n 'worst': fitnessArray.index(max(fitnessArray))}\n return selected[mode]\n\n\ndef muCommaLambda(func):\n print(\"\\nES: u+1 \\tFunction: %s\" % (func))\n generation = 0\n replacement = 0\n ps = 0\n num = numberOfVariables[func]\n comparison = 1\n variables = initialize(num)\n fitnessArray = [function[func](variables[u]) for u in range(mu)]\n best = select(fitnessArray, 'best')\n while (generation < maxGenerations and sigma[generation] > epsilon and min(fitnessArray) > float('-inf')):\n best = select(fitnessArray, 'best')\n actualBest = fitnessArray[best]\n offspring = mutate(variables[best], generation, num)\n fitnessSon = [function[func](offspring[u]) for u in range(mu)]\n variables = offspring[:]\n fitnessArray = fitnessSon\n if (actualBest > min(fitnessArray)):\n replacement += 1\n comparison = abs(actualBest - min(fitnessArray))\n generation += 1\n if (generation < maxGenerations):\n sigma[generation] = success(replacement, generation, num)\n print(variables[best], fitnessArray[best], sigma[generation], generation, sigma[generation], comparison)\n #\n if (num > 1):\n print(imageMaker(number_of_variables=num, function_id=func, name=str(func) + \"_muCommaLamda\",\n point=([variables[best][0]], [variables[best][1]], [fitnessArray[best]])))\n else:\n print(imageMaker(number_of_variables=num, function_id=func, name=str(func) + \"_muCommaLamda\",\n point=(variables[best], [fitnessArray[best]])))\n return \"Vars: %s Fitness: %s Generations: %d\" % (variables[best], fitnessArray[best], generation)\n\n # muCommaLambda(0)","sub_path":"practices/first/evolutionaryStrategies/muCommaLambda.py","file_name":"muCommaLambda.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"329595664","text":"# 5.\tПользователь вводит две буквы. Определить, на каких местах\n# алфавита они стоят, и сколько между ними находится букв.\n\nimport string\n\nalphabet = list(string.ascii_lowercase)\n\nfirst_letter = input(\"Type a first letter: \")\nsecond_letter = input(\"Type a second letter: \")\n\nif len(first_letter) == 1 and len(second_letter) == 1 \\\n and alphabet.count(first_letter) == 1 and alphabet.count(second_letter) == 1:\n print(f\"Position of the letter '{first_letter}' is {alphabet.index(first_letter) + 1}.\")\n print(f\"Position of the letter '{second_letter}' is {alphabet.index(second_letter) + 1}.\")\n\n if second_letter < first_letter:\n temp_value = first_letter\n first_letter = second_letter\n second_letter = temp_value\n\n print(f\"Letters between of them: {alphabet.index(second_letter) - alphabet.index(first_letter)}.\")\nelse:\n print(\"Invalid input!\")\n","sub_path":"Lesson_1/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"249845828","text":"import friendly_traceback\n\n\ndef test_lookup_error():\n try:\n # LookupError is the base class for KeyError and IndexError.\n # It should normally not be raised by user code,\n # other than possibly codecs.lookup(), which is why we raise\n # it directly here for our example.\n raise LookupError\n except Exception:\n friendly_traceback.explain(redirect=\"capture\")\n result = friendly_traceback.get_output()\n assert \"LookupError\" in result\n if friendly_traceback.get_lang() == 'en':\n assert \"LookupError is the base class for\" in result\n return result\n\n\nif __name__ == \"__main__\":\n print(test_lookup_error())\n","sub_path":"tests/except/test_lookup_error.py","file_name":"test_lookup_error.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"409238855","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport uuid\n\nfrom oslo_utils import timeutils\nimport six\nfrom sqlalchemy import MetaData, Table\n\nfrom cinder.volume import volume_types\n\n\ndef upgrade(migrate_engine):\n \"\"\"Create default volume type\"\"\"\n\n meta = MetaData(bind=migrate_engine)\n now = timeutils.utcnow()\n\n # create a default volume type during cinder DB migration\n vtypes = Table(\"volume_types\", meta, autoload=True)\n results = list(vtypes.select().where(\n vtypes.c.name == volume_types.DEFAULT_VOLUME_TYPE and\n vtypes.c.deleted is False).execute())\n if not results:\n vtype_id = six.text_type(uuid.uuid4())\n volume_type_dict = {\n 'id': vtype_id,\n 'name': volume_types.DEFAULT_VOLUME_TYPE,\n 'description': 'Default Volume Type',\n 'created_at': now,\n 'updated_at': now,\n 'deleted': False,\n 'is_public': True,\n }\n vtype = vtypes.insert()\n vtype.execute(volume_type_dict)\n","sub_path":"cinder/db/sqlalchemy/migrate_repo/versions/132_create_default_volume_type.py","file_name":"132_create_default_volume_type.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"447095838","text":"import pathlib\nthis_path = pathlib.Path().absolute()\ndata_path = this_path.parent / \"data\"\n\nimport pandas as pd\n\ndef get_dataframes():\n '''\n function to retrieve the data for this project as dataframes\n \n --returns:\n a tuple containing pandas dataframes in the format (x_train, x_test, y_train)\n '''\n x_train_filename = 'Pump_it_Up_Data_Mining_the_Water_Table_-_Training_set_values.csv'\n x_test_filename = 'Pump_it_Up_Data_Mining_the_Water_Table_-_Test_set_values.csv'\n y_train_filename = 'Pump_it_Up_Data_Mining_the_Water_Table_-_Training_set_labels.csv'\n \n x_train = __open_local_csv(x_train_filename)\n x_test = __open_local_csv(x_test_filename)\n y_train = __open_local_csv(y_train_filename)\n \n return (x_train, x_test, y_train)\n\ndef __open_local_csv(filename):\n '''\n checks that the csv filepath exists for given filename and returns a dataframe containing its\n values if it does exist\n \n --parameters:\n \n filename: should be a string containing the name of the csv to be opened\n \n --returns:\n \n pandas DataFrame object if csv_path exists, else prints error msg and returns None\n '''\n \n csv_path = data_path / filename\n if csv_path.exists():\n return pd.read_csv(csv_path, index_col = 'id')\n else:\n print(f'the specified filepath does not exist: {csv_path}')\n return None\n \n \ndef get_strict_features():\n '''\n returns list of features used for strict dataset\n '''\n strict_features = ['amount_tsh', 'gps_height', 'installer', 'basin', 'region',\n 'lga', 'population', 'construction_year', 'extraction_type_group', 'payment_type',\n 'quality_group', 'quantity', 'source_type', 'waterpoint_type']\n return strict_features\n\ndef get_loose_features():\n '''\n returns list of features used for loose dataset\n '''\n loose_features = ['amount_tsh', 'gps_height', 'installer', 'basin', 'region', 'lga', 'ward', 'population',\n 'public_meeting', 'scheme_management', 'permit', 'construction_year', 'extraction_type_group',\n 'payment_type', 'water_quality', 'quantity', 'source', 'waterpoint_type']\n return loose_features\n\ndef get_numeric_features(f_names):\n '''\n returns list of numeric features within given feature set\n '''\n numeric = ['amount_tsh', 'population', 'construction_year', 'gps_height']\n num_features = [x for x in f_names if x in numeric]\n return num_features\n\ndef get_categorical_features(f_names):\n '''\n returns list of categorical features within given feature set\n '''\n categorical = ['installer', 'basin', 'region', 'lga', 'ward',\n 'public_meeting', 'scheme_management', 'permit','extraction_type_group',\n 'payment_type', 'water_quality', 'quantity', 'source',\n 'waterpoint_type', 'source_type', 'quality_group']\n cat_features = [x for x in f_names if x in categorical]\n return cat_features","sub_path":"src/data_functions.py","file_name":"data_functions.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"368013567","text":"import numpy as np\n@nrp.MapSpikeSink(\"motors_down_stage_one\", nrp.brain.down_stage_one, nrp.leaky_integrator_alpha)\n@nrp.MapSpikeSink(\"motors_left_stage_one\", nrp.brain.left_stage_one, nrp.leaky_integrator_alpha)\n@nrp.MapSpikeSink(\"motors_up_stage_one\", nrp.brain.up_stage_one, nrp.leaky_integrator_alpha)\n@nrp.MapSpikeSink(\"motors_right_stage_one\", nrp.brain.right_stage_one, nrp.leaky_integrator_alpha)\n@nrp.MapSpikeSink(\"motors_down_stage_two\", nrp.brain.down_stage_two, nrp.leaky_integrator_alpha)\n@nrp.MapSpikeSink(\"motors_left_stage_two\", nrp.brain.left_stage_two, nrp.leaky_integrator_alpha)\n@nrp.MapSpikeSink(\"motors_up_stage_two\", nrp.brain.up_stage_two, nrp.leaky_integrator_alpha)\n@nrp.MapSpikeSink(\"motors_right_stage_two\", nrp.brain.right_stage_two, nrp.leaky_integrator_alpha)\n@nrp.MapRobotPublisher('eye_tilt_pos', Topic('/robot/eye_tilt/pos', std_msgs.msg.Float64))\n@nrp.MapRobotPublisher('eye_pan_pos', Topic('/robot/left_eye_pan/pos', std_msgs.msg.Float64))\n@nrp.MapRobotPublisher('eye_tilt_vel', Topic('/robot/eye_tilt/vel', std_msgs.msg.Float64))\n@nrp.MapRobotPublisher('eye_pan_vel', Topic('/robot/left_eye_pan/vel', std_msgs.msg.Float64))\n@nrp.MapRobotSubscriber(\"joint_state_sub\", Topic(\"/robot/joints\", sensor_msgs.msg.JointState))\n@nrp.MapRobotSubscriber(\"shuffle_status_sub\", Topic(\"/group_3/shuffling\", std_msgs.msg.Bool))\n@nrp.Neuron2Robot()\ndef center_on_green(t, motors_down_stage_one, motors_left_stage_one, motors_up_stage_one, motors_right_stage_one, \n motors_down_stage_two, motors_left_stage_two, motors_up_stage_two, motors_right_stage_two,\n eye_tilt_pos, eye_pan_pos, eye_tilt_vel, eye_pan_vel, joint_state_sub, shuffle_status_sub):\n\n stage_two = shuffle_status_sub.value.data if shuffle_status_sub.value is not None else False\n\n if not stage_two:\n # Stage one: Velocity-controlled motion to green ball\n scaling_factor = 3\n tilt = scaling_factor * (motors_up_stage_one.voltage - motors_down_stage_one.voltage)\n pan = scaling_factor * ( motors_left_stage_one.voltage - motors_right_stage_one.voltage)\n eye_tilt_vel.send_message(std_msgs.msg.Float64(tilt))\n eye_pan_vel.send_message(std_msgs.msg.Float64(pan))\n\n else:\n # Stage two: Position-controlled motion to red cup\n scaling_factor = 0.03\n joint_names = joint_state_sub.value.name\n joint_positions = joint_state_sub.value.position\n current_tilt = joint_positions[joint_names.index(\"eye_tilt\")]\n current_pan = joint_positions[joint_names.index(\"left_eye_pan\")]\n\n tilt = current_tilt + scaling_factor * (motors_up_stage_two.voltage - motors_down_stage_two.voltage)\n pan = current_pan + scaling_factor * ( motors_left_stage_two.voltage - motors_right_stage_two.voltage)\n\n eye_tilt_pos.send_message(std_msgs.msg.Float64(tilt))\n eye_pan_pos.send_message(std_msgs.msg.Float64(pan))\n","sub_path":"follow_object.py","file_name":"follow_object.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"576986963","text":"a=15\narr = []\ni=2\nwhile(i=', datetime.datetime(int(year), int(month), int(day)))\n\n docs = ref.stream()\n\n cache = {}\n count = 0\n\n for doc in docs:\n count = count + 1\n data = doc.to_dict()\n\n fixtureID = data['fixture']\n print(\"ID:\", fixtureID)\n\n if fixtureID in cache:\n winner = cache[fixtureID]\n else:\n winner = get_winner_from_fixture(fixtureID)\n cache[fixtureID] = winner\n\n print(\"query count:\", count, \"cache size:\", len(cache))\n\n if winner is not None:\n hit = winner == data['tipValue']\n\n doc_ref = db.collection(u'eventTips').document(doc.id)\n\n doc_ref.set({\n 'isHit': hit\n }, merge=True)\n\n retval = 'date:{0}-{1}-{2}, query count:{3}, cache size:{4}'.format(year, month, day, count, len(cache))\n return retval\n\n\ndef DB_name_to_CSV_name(name):\n \"\"\"\n string in expression is from DB\n string in return is from csv files\n \"\"\"\n\n if name == 'Wolves':\n return 'Wolverhampton Wanderers'\n if name == 'Atletico Madrid':\n return 'Atlético Madrid'\n if name == 'Athletic Club':\n return 'Athletic Bilbao'\n if name == 'Deportivo La Coruna':\n return 'Deportivo La Coruña'\n if name == 'Malaga':\n return 'Málaga CF'\n if name == 'Alaves':\n return 'CD Alavés'\n if name == 'Leganes':\n return 'CD Leganés'\n if name == 'Sporting Gijon':\n return 'Sporting Gijón'\n if name == 'Sheffield Utd':\n return 'Sheffield United'\n if name == 'QPR':\n return 'Queens Park Rangers'\n if name == 'Almeria':\n return 'UD Almería'\n if name == 'Cordoba':\n return 'Córdoba CF'\n if name == 'Cadiz':\n return 'Cádiz CF'\n return name\n\n\n# to deploy function from console: 'gcloud functions deploy storage'\ndef storage(request):\n request_args = request.args\n\n if request_args and 'id' in request_args:\n fixtureID = request_args['id']\n\n db = google.cloud.firestore.Client()\n\n doc_ref = db.collection(u'fixtures').document(fixtureID)\n doc = doc_ref.get()\n data = doc.to_dict()\n if data is None:\n return \"fixture id not found\"\n else:\n print('fixture %s found' % doc.id)\n\n home = data['teams']['home']['name']\n away = data['teams']['away']['name']\n league = 'PL' if data['league']['id'] == 39 else 'PD'\n season = data['league']['season']\n season_round = int(data['league']['round'].split(\"-\")[1])\n print(\"season_round:\", season_round)\n\n full_league = 'Premier League' if league == 'PL' else 'Primera División'\n file_name = '%s %d-%d - %d.csv' % (full_league, season, season + 1, int(season_round) - 1)\n print(\"file_name:\", file_name)\n if season_round != 1 and season_round != 0:\n client = google.cloud.storage.Client()\n bucket = client.get_bucket('better-gsts.appspot.com')\n blob = bucket.get_blob(file_name)\n file_text = blob.download_as_text()\n # print(file_text)\n lines = file_text.split(\"\\r\\n\")\n # print(\"lines:\", lines)\n for line in lines:\n if line.__contains__(DB_name_to_CSV_name(home)):\n # print(\"line: \", line)\n line_values = line.split(',')\n HR = line_values[0]\n HW = line_values[3]\n HD = line_values[4]\n HL = line_values[5]\n HGF = line_values[6].split(':')[0]\n HGA = line_values[6].split(':')[1]\n HS = line_values[8]\n if line.__contains__(DB_name_to_CSV_name(away)):\n # print(\"line: \", line)\n line_values = line.split(',')\n AR = line_values[0]\n AW = line_values[3]\n AD = line_values[4]\n AL = line_values[5]\n AGF = line_values[6].split(':')[0]\n AGA = line_values[6].split(':')[1]\n AS = line_values[8]\n\n else:\n HR = HW = HL = HD = HGF = HGA = HS = AR = AW = AL = AD = AGF = AGA = AS = '0'\n\n vector = '%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s' % (\n HR, HW, HL, HD, HGF, HGA, HS, AR, AW, AL, AD, AGF, AGA, AS)\n\n vector_np = np.array([np.fromstring(vector, dtype=int, sep=',')])\n print(\"vector_np:\", vector_np)\n\n loaded_model = pickle.load(bucket.get_blob(\"model.sav\").open(mode='rb'))\n result = loaded_model.predict_proba(vector_np)[0]\n print(\"model result:\", result)\n\n doc_ref.set({\n 'vector': vector,\n 'prediction': result.tolist()\n }, merge=True)\n\n return str(result)\n else:\n return \"Error: bad args\"\n\n\ndef hello_http(request):\n \"\"\"HTTP Cloud Function.\n Args:\n request (flask.Request): The request object.\n \n Returns:\n The response text, or any set of values that can be turned into a\n Response object using `make_response`\n .\n \"\"\"\n request_json = request.get_json(silent=True)\n request_args = request.args\n\n if request_json and 'name' in request_json:\n name = request_json['name']\n elif request_args and 'name' in request_args:\n name = request_args['name']\n else:\n name = 'World'\n return 'Hello {}!'.format(escape(name))\n","sub_path":"cloud_scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"199209788","text":"import argparse\nimport configparser\nimport os\nimport shutil\nimport numpy as np\n\nimport dataset\nimport evaluate\nimport word2vec\n\nnp.set_printoptions(precision=3)\nos.environ['CHAINER_TYPE_CHECK'] = '0'\nimport chainer\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('config_file')\n parser.add_argument('--batch', '-b', type=int, default=30)\n parser.add_argument('--epoch', '-e', type=int, default=10)\n parser.add_argument('--pretrain_epoch', '-pe', type=int, default=10)\n parser.add_argument('--gpu', '-g', type=int, default=-1)\n parser.add_argument('--model', '-m', choices=['multi', 'label', 'encdec', 'pretrain'], default='multi')\n parser.add_argument('--pretrain_w2v', '-p', action='store_true')\n parser.add_argument('--data_path', '-d', choices=['local', 'server', 'test'], default='server')\n parser.add_argument('--load_model', '-l', type=str)\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n config = configparser.ConfigParser()\n \"\"\"ARGS DETAIL\"\"\"\n config_file = args.config_file\n batch_size = args.batch\n n_epoch = args.epoch\n pretrain_epoch = args.pretrain_epoch\n gpu_id = args.gpu\n model_type = args.model\n pretrain_w2v = args.pretrain_w2v\n data_path = args.data_path\n load_model = args.load_model\n\n \"\"\"DIR PREPARE\"\"\"\n config.read(config_file)\n vocab_size = int(config['Parameter']['vocab_size'])\n coefficient = float(config['Parameter']['coefficient'])\n shuffle_data = bool(config['Parameter']['shuffle'])\n\n if pretrain_w2v:\n vocab_size = 'p' + str(vocab_size)\n\n if model_type == 'multi':\n if shuffle_data:\n base_dir = './pseudo_{}_{}_{}_c{}_shuffle/'.format(model_type, vocab_size, data_path[0], coefficient)\n else:\n base_dir = './pseudo_{}_{}_{}_c{}/'.format(model_type, vocab_size, data_path[0], coefficient)\n else:\n if shuffle_data:\n base_dir = './pseudo_{}_{}_{}_shuffle/'.format(model_type, vocab_size, data_path[0])\n else:\n base_dir = './pseudo_{}_{}_{}/'.format(model_type, vocab_size, data_path[0])\n model_save_dir = base_dir\n\n if not os.path.exists(base_dir):\n os.mkdir(base_dir)\n shutil.copyfile(config_file, base_dir + config_file)\n config_file = base_dir + config_file\n config.read(config_file)\n\n \"\"\"PARAMATER\"\"\"\n embed_size = int(config['Parameter']['embed_size'])\n hidden_size = int(config['Parameter']['hidden_size'])\n class_size = int(config['Parameter']['class_size'])\n dropout_ratio = float(config['Parameter']['dropout'])\n weight_decay = float(config['Parameter']['weight_decay'])\n gradclip = float(config['Parameter']['gradclip'])\n vocab_size = int(config['Parameter']['vocab_size'])\n valid_num = int(config['Parameter']['valid_num'])\n shuffle_data = bool(config['Parameter']['shuffle'])\n \"\"\"LOGGER\"\"\"\n log_file = model_save_dir + 'log.txt'\n logger = dataset.prepare_logger(log_file)\n logger.info(args) # 引数を記録\n logger.info('[Training start] logging to {}'.format(log_file))\n\n \"\"\"DATASET\"\"\"\n train_src_file = config[data_path]['train_src_file']\n train_trg_file = config[data_path]['train_trg_file']\n valid_src_file = config[data_path]['valid_src_file']\n valid_trg_file = config[data_path]['valid_trg_file']\n test_src_file = config[data_path]['single_src_file']\n test_trg_file = config[data_path]['single_trg_file']\n src_w2v_file = config[data_path]['src_w2v_file']\n trg_w2v_file = config[data_path]['trg_w2v_file']\n\n train_data = dataset.load_label_corpus_file(train_src_file, train_trg_file)\n qa_data_sub_lit = dataset.split_valid_data(train_data, valid_num)\n valid_data = dataset.load_label_corpus_file(valid_src_file, valid_trg_file)\n test_data = dataset.load_label_corpus_file(test_src_file, test_trg_file)\n test_data_sub_lit = dataset.split_valid_data(test_data, valid_num)\n\n \"\"\"VOCABULARY\"\"\"\n src_vocab, trg_vocab, sos, eos = dataset.prepare_vocab(base_dir, train_data, vocab_size, gpu_id)\n src_vocab_size = len(src_vocab.vocab)\n trg_vocab_size = len(trg_vocab.vocab)\n\n src_initialW, trg_initialW = None, None\n if pretrain_w2v:\n w2v = word2vec.Word2Vec()\n src_initialW, vector_size, src_match_word_count = w2v.make_initialW(src_vocab.vocab, src_w2v_file)\n trg_initialW, vector_size, trg_match_word_count = w2v.make_initialW(trg_vocab.vocab, trg_w2v_file)\n logger.info('Initialize w2v embedding. Match: src {}/{}, trg {}/{}'.format(src_match_word_count, src_vocab_size, trg_match_word_count, trg_vocab_size))\n\n logger.info('src_vocab size: {}, trg_vocab size: {}'.format(src_vocab_size, trg_vocab_size))\n\n evaluater = evaluate.Evaluate()\n\n \"\"\"GPU\"\"\"\n if gpu_id >= 0:\n logger.info('Use GPU')\n chainer.cuda.get_device_from_id(gpu_id).use()\n\n cross_valid_result = []\n for ite in range(1, valid_num + 1):\n model_valid_dir = base_dir + 'valid{}/'.format(ite)\n if not os.path.exists(model_valid_dir):\n os.mkdir(model_valid_dir)\n\n qa_train_data, qa_dev_data, qa_test_data = dataset.separate_train_dev_test(qa_data_sub_lit, ite)\n train_data, dev_data, test_data = dataset.separate_train_dev_test(test_data_sub_lit, ite)\n test_data_id = [t['id'] for t in test_data]\n\n qa_iter = dataset.Iterator(qa_train_data, src_vocab, trg_vocab, batch_size, gpu_id, sort=True, shuffle=True)\n valid_iter = dataset.Iterator(valid_data, src_vocab, trg_vocab, batch_size, gpu_id, sort=False, shuffle=False)\n train_iter = dataset.Iterator(train_data, src_vocab, trg_vocab, batch_size, gpu_id, sort=True, shuffle=True)\n dev_iter = dataset.Iterator(dev_data, src_vocab, trg_vocab, batch_size, gpu_id, sort=False, shuffle=False)\n test_iter = dataset.Iterator(test_data, src_vocab, trg_vocab, batch_size, gpu_id, sort=False, shuffle=False)\n\n qa_size = len(qa_train_data)\n train_size = len(train_data)\n logger.info('V{} ## QA:{}, train:{}, dev:{} ,test:{}'.format(ite, qa_size, train_size, len(dev_data), len(test_data)))\n\n \"\"\"MODEL\"\"\"\n if model_type == 'multi':\n model = model.Multi(src_vocab_size, trg_vocab_size, embed_size, hidden_size, class_size, dropout_ratio, coefficient, src_initialW, trg_initialW)\n elif model_type in ['label', 'pretrain']:\n model = model.Label(src_vocab_size, trg_vocab_size, embed_size, hidden_size, class_size, dropout_ratio, src_initialW, trg_initialW)\n else:\n model = model.EncoderDecoder(src_vocab_size, trg_vocab_size, embed_size, hidden_size, dropout_ratio, src_initialW, trg_initialW)\n\n if gpu_id >= 0:\n model.to_gpu()\n\n \"\"\"OPTIMIZER\"\"\"\n optimizer = chainer.optimizers.Adam()\n optimizer.setup(model)\n optimizer.add_hook(chainer.optimizer.GradientClipping(gradclip))\n optimizer.add_hook(chainer.optimizer.WeightDecay(weight_decay))\n\n \"\"\"PRETRAIN\"\"\"\n if model_type == 'pretrain' and load_model is None:\n logger.info('Pre-train start')\n pretrain_loss_dic = {}\n for epoch in range(1, pretrain_epoch + 1):\n train_loss = 0\n for i, batch in enumerate(train_iter.generate(), start=1):\n try:\n loss = model.pretrain(*batch)\n train_loss += loss.data\n optimizer.target.cleargrads()\n loss.backward()\n optimizer.update()\n\n except Exception as e:\n logger.info('P{} ## train iter: {}, {}'.format(epoch, i, e))\n chainer.serializers.save_npz(model_save_dir + 'p_model_epoch_{}.npz'.format(epoch), model)\n\n \"\"\"EVALUATE\"\"\"\n valid_loss = 0\n for batch in valid_iter.generate():\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\n valid_loss += model.pretrain(*batch).data\n logger.info('P{} ## train loss: {}, val loss:{}'.format(epoch, train_loss, valid_loss))\n pretrain_loss_dic[epoch] = valid_loss\n\n \"\"\"MODEL SAVE & LOAD\"\"\"\n best_epoch = min(pretrain_loss_dic, key=(lambda x: pretrain_loss_dic[x]))\n logger.info('best_epoch:{}, val loss: {}'.format(best_epoch, pretrain_loss_dic[best_epoch]))\n shutil.copyfile(model_save_dir + 'p_model_epoch_{}.npz'.format(best_epoch),\n model_save_dir + 'p_best_model.npz')\n logger.info('Pre-train finish')\n\n if load_model:\n logger.info('load model: {}'.format(load_model))\n chainer.serializers.load_npz(base_dir + load_model, model)\n\n \"\"\"TRAIN\"\"\"\n epoch_info = {}\n for epoch in range(1, n_epoch + 1):\n train_loss = 0\n mix_train_iter = dataset.MixIterator(qa_iter, train_iter, seed=0, shuffle=shuffle_data)\n for i, batch in enumerate(mix_train_iter.generate(), start=1):\n try:\n loss = optimizer.target(*batch[0])\n train_loss += loss.data\n optimizer.target.cleargrads()\n loss.backward()\n optimizer.update()\n\n except Exception as e:\n logger.info('V{} ## E{} ## train iter: {}, {}'.format(ite, epoch, i, e))\n chainer.serializers.save_npz(model_valid_dir + 'model_epoch_{}.npz'.format(epoch), model)\n\n \"\"\"DEV\"\"\"\n labels, alignments = [], []\n for i, batch in enumerate(dev_iter.generate(), start=1):\n try:\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\n _, label, align = model.predict(batch[0], sos, eos)\n except Exception as e:\n logger.info('V{} ## E{} ## dev iter: {}, {}'.format(ite, epoch, i, e))\n\n if model_type == 'multi':\n for l, a in zip(label, align):\n labels.append(chainer.cuda.to_cpu(l))\n alignments.append(chainer.cuda.to_cpu(a))\n elif model_type in ['label', 'pretrain']:\n for l in label:\n labels.append(chainer.cuda.to_cpu(l))\n else:\n for a in align:\n alignments.append(chainer.cuda.to_cpu(a))\n\n best_param_dic = evaluater.param_search(labels, alignments, dev_data)\n param = max(best_param_dic, key=lambda x: best_param_dic[x]['macro'])\n init, mix = evaluate.key_to_param(param)\n dev_score = round(best_param_dic[param]['macro'], 3)\n\n \"\"\"TEST\"\"\"\n outputs, labels, alignments = [], [], []\n for i, batch in enumerate(test_iter.generate(), start=1):\n try:\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\n output, label, align = model.predict(batch[0], sos, eos)\n except Exception as e:\n logger.info('V{} ## E{} ## test iter: {}, {}'.format(ite, epoch, i, e))\n\n if model_type == 'multi':\n for l, a in zip(label, align):\n labels.append(chainer.cuda.to_cpu(l))\n alignments.append(chainer.cuda.to_cpu(a))\n elif model_type in ['label', 'pretrain']:\n for l in label:\n labels.append(chainer.cuda.to_cpu(l))\n else:\n for a in align:\n alignments.append(chainer.cuda.to_cpu(a))\n\n rate, count, tf_lit, macro, micro = evaluater.eval_param(labels, alignments, test_data, init, mix)\n test_macro_score = round(macro, 3)\n test_micro_score = round(micro, 3)\n logger.info('V{} ## E{} ## loss: {}, dev: {}, param: {}, micro: {}, macro: {}'.format(ite, epoch, train_loss, dev_score, param, test_micro_score, test_macro_score))\n\n epoch_info[epoch] = {\n 'id': test_data_id,\n 'label': labels,\n 'align': alignments,\n 'hypo': outputs,\n 'epoch': epoch,\n 'dev_score': dev_score,\n 'param': param,\n 'rate': rate,\n 'count': count,\n 'tf': tf_lit,\n 'macro': test_macro_score,\n 'micro': test_micro_score\n }\n dataset.save_output(model_valid_dir, epoch_info[epoch])\n\n \"\"\"MODEL SAVE\"\"\"\n best_epoch = max(epoch_info, key=(lambda x: epoch_info[x]['dev_score']))\n cross_valid_result.append(epoch_info[best_epoch])\n logger.info('V{} ## best_epoch: {}, dev: {}, micro: {}, macro: {}'.format(ite, best_epoch, epoch_info[best_epoch]['dev_score'], epoch_info[best_epoch]['micro'], epoch_info[best_epoch]['macro']))\n shutil.copyfile(model_valid_dir + 'model_epoch_{}.npz'.format(best_epoch), model_valid_dir + 'best_model.npz')\n\n logger.info('')\n\n ave_dev_score, ave_macro_score, ave_micro_score = 0, 0, 0\n ave_test_score = [0 for _ in range(len(cross_valid_result[0]['rate']))]\n id_total, label_total, align_total, tf_total = [], [], [], []\n\n for v, r in enumerate(cross_valid_result, start=1):\n ave_dev_score += r['dev_score']\n ave_macro_score += r['macro']\n ave_micro_score += r['micro']\n for i, rate in enumerate(r['rate']):\n ave_test_score[i] += rate\n logger.info(' {}: e{}, {}\\tdev: {}, micro: {}, macro: {} {}'.format(v, r['epoch'], r['param'], r['dev_score'], r['micro'], dataset.float_to_str(r['rate']), r['macro']))\n\n id_total.extend(r['id'])\n label_total.extend(r['label'])\n align_total.extend(r['align'])\n tf_total.extend(r['tf'])\n ave_dev_score = round(ave_dev_score / valid_num, 3)\n ave_macro_score = round(ave_macro_score / valid_num, 3)\n ave_micro_score = round(ave_micro_score / valid_num, 3)\n ave_test_score = [ave_test_score[i] / valid_num for i in range(len(ave_test_score))]\n logger.info('dev: {}, micro: {}, macro: {} {}'.format(ave_dev_score, ave_micro_score, dataset.float_to_str(ave_test_score), ave_macro_score))\n\n label, align, tf = dataset.sort_multi_list(id_total, label_total, align_total, tf_total)\n dataset.save_list(base_dir + 'label.txt', label)\n dataset.save_list(base_dir + 'align.txt', align)\n dataset.save_list(base_dir + 'tf.txt', tf)\n\n\nif __name__ == '__main__':\n main()","sub_path":"train_pseudo.py","file_name":"train_pseudo.py","file_ext":"py","file_size_in_byte":14746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"460046837","text":"import sys\nfrom troposphere import GetAtt, Join, Output, Parameter, Ref, Template\nfrom troposphere.cognito import UserPool, SchemaAttribute, Policies, PasswordPolicy, AdminCreateUserConfig, \\\n UserPoolClient, IdentityPool, CognitoIdentityProvider, IdentityPoolRoleAttachment\nfrom troposphere.iam import Role, Policy\n\nenv = sys.argv[1]\n\nCOMPONENT_NAME = \"YaegarBooksCognito\"\n\nt = Template(COMPONENT_NAME)\n\nt.add_version(\"2010-09-09\")\n\nt.add_description(COMPONENT_NAME + \" stacks for env \" + env)\n\nuserPoolUsers = t.add_resource(\n UserPool(\n \"UserPool\" + COMPONENT_NAME + \"Users\",\n UserPoolName=\"UserPool\" + COMPONENT_NAME + \"Users\",\n AliasAttributes=[\"email\"],\n Schema=[SchemaAttribute(\n Name=\"email\",\n AttributeDataType=\"String\",\n Required=\"True\",\n Mutable=\"True\"\n )],\n Policies=Policies(\n PasswordPolicy=PasswordPolicy(\n MinimumLength=6,\n RequireLowercase=\"True\",\n RequireUppercase=\"True\",\n RequireNumbers=\"True\"\n )\n ),\n AdminCreateUserConfig=AdminCreateUserConfig(\n AllowAdminCreateUserOnly=\"False\",\n UnusedAccountValidityDays=7\n ),\n AutoVerifiedAttributes=[\"email\"],\n EmailVerificationMessage='''Welcome to YaegarBooks,\nThanks for registering for a YaegarBooks account, please click the link to verify your email address. {####}\n\nWelcome\nYaegarBooks''',\n EmailVerificationSubject=\"Your verification link\"\n )\n)\n\nuserPoolClientUsers = t.add_resource(\n UserPoolClient(\n \"UserPoolClient\" + COMPONENT_NAME + \"Users\",\n ClientName=\"UserPoolClient\" + COMPONENT_NAME + \"Users\",\n UserPoolId=Ref(userPoolUsers),\n GenerateSecret=\"False\"\n )\n)\n\nidentityPoolUsers = t.add_resource(\n IdentityPool(\n \"IdentityPool\" + COMPONENT_NAME + \"Users\",\n IdentityPoolName=\"IdentityPool\" + COMPONENT_NAME + \"Users\",\n AllowUnauthenticatedIdentities=False,\n CognitoIdentityProviders=[\n CognitoIdentityProvider(\n ClientId=Ref(userPoolClientUsers),\n ProviderName=GetAtt(userPoolUsers, \"ProviderName\")\n )\n ]\n )\n)\n\ncognitoUnAuthorizedRole = t.add_resource(\n Role(\n \"CognitoUnAuthorizedRole\",\n RoleName=\"Cognito_YaegarBooksUnauth_Role\",\n AssumeRolePolicyDocument={\n \"Version\": \"2012-10-17\",\n \"Statement\": [{\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": [\"cognito-identity.amazonaws.com\"]\n },\n \"Action\": [\"sts:AssumeRoleWithWebIdentity\"],\n \"Condition\": {\n \"StringEquals\": {\n \"cognito-identity.amazonaws.com:aud\": Ref(identityPoolUsers)\n },\n \"ForAnyValue:StringLike\": {\n \"cognito-identity.amazonaws.com:amr\": \"unauthenticated\"\n }\n }\n }]\n },\n Policies=[\n Policy(\n PolicyName=\"cognitounauth\",\n PolicyDocument={\n \"Version\": \"2012-10-17\",\n \"Statement\": [{\n \"Effect\": \"Allow\",\n \"Action\": [\n \"mobileanalytics:PutEvents\",\n \"cognito-sync:*\"\n ],\n \"Resource\": [\"*\"]\n }]\n }\n )]\n )\n)\n\ncognitoAuthorizedRole = t.add_resource(\n Role(\n \"CognitoAuthorizedRole\",\n RoleName=\"Cognito_YaegarBooksAuth_Role\",\n AssumeRolePolicyDocument={\n \"Version\": \"2012-10-17\",\n \"Statement\": [{\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": [\"cognito-identity.amazonaws.com\"]\n },\n \"Action\": [\"sts:AssumeRoleWithWebIdentity\"],\n \"Condition\": {\n \"StringEquals\": {\n \"cognito-identity.amazonaws.com:aud\": Ref(identityPoolUsers)\n },\n \"ForAnyValue:StringLike\": {\n \"cognito-identity.amazonaws.com:amr\": \"authenticated\"\n }\n }\n }]\n },\n Policies=[\n Policy(\n PolicyName=\"cognitoauth\",\n PolicyDocument={\n \"Version\": \"2012-10-17\",\n \"Statement\": [{\n \"Effect\": \"Allow\",\n \"Action\": [\n \"mobileanalytics:PutEvents\",\n \"cognito-sync:*\",\n \"cognito-identity:*\"\n ],\n \"Resource\": [\"*\"]\n }]\n }\n )]\n )\n)\n\nidentityPoolRoleAttachment = t.add_resource(\n IdentityPoolRoleAttachment(\n \"IdentityPoolRoleAttachment\",\n IdentityPoolId=Ref(identityPoolUsers),\n Roles={\n \"authenticated\": GetAtt(cognitoAuthorizedRole, \"Arn\"),\n \"unauthenticated\": GetAtt(cognitoUnAuthorizedRole, \"Arn\")\n }\n )\n\n)\nt.add_output([\n Output(\n \"UserPoolArn\",\n Value=GetAtt(userPoolUsers, \"Arn\"),\n Description=\"UserPool arn for YaegarBooks\"\n ),\n Output(\n \"UserPoolClientId\",\n Value=Ref(userPoolClientUsers),\n Description=\"UserPoolClient id for YaegarBooks\"\n ),\n Output(\n \"IdentityPoolId\",\n Value=Ref(identityPoolUsers),\n Description=\"IdentityPool id for YaegarBooks\"\n )\n])\n\nprint(t.to_json())\n","sub_path":"stacks/src/cognito.py","file_name":"cognito.py","file_ext":"py","file_size_in_byte":5750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"617195420","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 9 21:27:22 2017\n\n@author: yolandatiao\n\"\"\"\n\n#####------------------ Import START ------------------#####\nimport os # For changing directory\nimport csv # For using csv writer\nimport string # For using string replace\nfrom astropy.io import ascii # For using ascii table to open csv\nfrom astropy.table import Table, Column, join # For using astropy table functions\nimport glob # For finding filenames under a directory\n#####------------------ Import END ------------------#####\n\n\n\n#####------------------ Config START ------------------#####\ncode_dir=\"/Volumes/Huitian/Exp174/codes\"\nwk_dir=\"/Volumes/Huitian/Exp174/1_Refseq\"\n\npeak_file=\"/Volumes/Huitian/Exp174/1_Refseq/Exp122_Exp169_GSE88987_mergedPeaks_cord.csv\"\nref_file=\"mm10_refseq_match_mg_e-100000.csv\"\n\n#####------------------ Config END ------------------#####\n\n\n\n#####------------------ Self defined function START ------------------######\nos.chdir(code_dir)\nimport fc_basic_astropy_subprocess as fc\n#####------------------ Self defined function END ------------------######\n\n\n#####------------------ Main function START ------------------#####\nos.chdir(wk_dir)\n\npeak_file_nf=fc.filenamenoformat(fc.Getfilename(peak_file))\nref_file_nf=fc.filenamenoformat(fc.Getfilename(ref_file))\nout_file_name=\"%s_ann-%s.csv\"%(peak_file_nf,ref_file_nf)\n\nref_data=ascii.read(ref_file)\nref_data=fc.setcolnames(ref_data)\nref_data_len=len(ref_data)\n\n\n###----- Find gene name for each peak\nwith open(out_file_name,\"w\") as fout:\n outwriter=csv.writer(fout, delimiter=\",\") \n \n with open(peak_file,\"r\") as fin:\n inreader=csv.reader(fin,delimiter=\",\")\n \n in_colnames=next(inreader)\n in_colnames.append(\"gene_number\")\n in_colnames.append(\"gene_name\")\n outwriter.writerow(in_colnames)\n \n row_out=[]\n for row in inreader:\n row_out=row\n row_chr=row[1]\n row_s=row[2]\n row_e=row[3]\n row_genelist=[]\n ref_data_x=[]\n for x in xrange(0, ref_data_len):\n ref_data_x=list(ref_data[x])\n if ref_data_x[1]==row_chr:\n if (ref_data_x[2]<=row_s and ref_data_x[3]>row_e):\n row_genelist.append(ref_data[0])\n \n row_out.append(len(row_genelist))\n if len(row_genelist)>0:\n row_out.append(\",\".join(row_genelist))\n else:\n row_out.append(\"NA\")\n outwriter.writerow(row_out)\n\n \n \n \n\n \n#####------------------ Main function END ------------------#####","sub_path":"codes/Func_2_refseq_match_peak.py","file_name":"Func_2_refseq_match_peak.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"153178072","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 1 10:01:06 2021\r\n\r\n@author: YOSO_WANHH\r\n\"\"\"\r\n#a263: 日期差幾天\r\n\r\nimport datetime\r\ny1, m1, d1 = map(int,(input().split()))\r\ny2, m2, d2 = map(int,(input().split()))\r\ndate1 = datetime.datetime(y1, m1, d1)\r\ndate2 = datetime.datetime(y2, m2, d2)\r\nprint(abs((date1-date2).days))","sub_path":"263日期差幾天.py","file_name":"263日期差幾天.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"467515074","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 8 09:50:46 2019\r\n\r\n@author: lenovo\r\n\"\"\"\r\nimport random\r\nimport torch\r\nimport torch.nn as nn\r\nimport scipy.io as sio\r\nimport numpy as np\r\nimport time\r\nfrom PIL import Image\r\nimport matplotlib.pyplot as plt\r\nimport torch.optim as optim\r\nfrom torch.autograd import Variable\r\nfrom sklearn.metrics import confusion_matrix\r\nimport torchvision.models as models\r\n\r\n#load the data\r\ndataF = 'C://Users//lenovo//Desktop//毕设//资料//Salinas_corrected.mat'\r\ndataRow = sio.loadmat(dataF)[\"salinas_corrected\"]\r\ndataTruthF = 'C://Users//lenovo//Desktop//毕设//资料//Salinas_gt.mat'\r\ndataTruth = sio.loadmat(dataTruthF)[\"salinas_gt\"]\r\n\r\n#=============PCA=============#\r\ndef pca(dataMat,topNfeat=9999999):\r\n #dataMat:input\r\n #topNfeat:output top N features\r\n meanVals = np.mean(dataMat, axis=0) #to compute the mean value\r\n meanRemoved = dataMat - meanVals #to remove the mean value\r\n covMat = np.cov(meanRemoved, rowvar=0) #to compute the covariance matrix\r\n eigVals,eigVects = np.linalg.eig(np.mat(covMat)) #to compute the eigenvalues and eigenvectors\r\n eigValInd = np.argsort(eigVals) #to sort from the smallest to the largest\r\n eigValInd = eigValInd[:-(topNfeat+1):-1] #to get the nth largest eigenvalues and eigenvectors\r\n redEigVects = eigVects[:, eigValInd]\r\n lowDDataMat = meanRemoved * redEigVects #to transform into low dimensions\r\n reconMat = (lowDDataMat * redEigVects.T) + meanVals\r\n cumCont = sum(eigVals[eigValInd])/sum(eigVals) #to compute cumulative contribution\r\n return lowDDataMat,reconMat,cumCont\r\n\r\n# to draw principal component\r\nimData = dataRow.reshape([111104, 204]) #reshape the data into 2_D\r\nlowDataMat,reconMat,cumCont = pca(imData, 3)\r\nprinCom = np.zeros([3, 512, 217])\r\nfor i in range(3):\r\n prinCom[i,:,:] = lowDataMat[:,i].reshape([512, 217]) \r\n[nBand,nRow, nColumn] = prinCom.shape\r\n\r\n#to flip and expand the size of data\r\ndef flip(data):\r\n y_4 = np.zeros_like(data)\r\n y_1 = y_4\r\n y_2 = y_4\r\n first = np.concatenate((y_1, y_2, y_1), axis=1)\r\n second = np.concatenate((y_4, data, y_4), axis=1)\r\n third = first\r\n Data = np.concatenate((first, second, third), axis=0)\r\n return Data\r\n\r\nnTrain = 200 #the size of training data \r\nnTest = 500 #the size of testing data\r\ntraEpo = 500 #training epoches\r\nbatSize = 200 #batch size\r\nnz = 100 #the band number of noise\r\nngf = 64 \r\nnc = 3 #the bands number of the output of genetator\r\nnumLabel = int(np.max(dataTruth)) #the number of labels\r\nprinCom = (torch.from_numpy(prinCom)).permute(1,2,0)\r\nprinCom = flip(prinCom)\r\nprinCom = (torch.from_numpy(prinCom)).permute(2,0,1)\r\ndatatruth = flip(dataTruth)\r\n\r\n#to pick up the training and testing data\r\nHalfWidth = 32\r\nWid = 2 * HalfWidth\r\nG = datatruth[nRow - HalfWidth:2 * nRow + HalfWidth, nColumn - HalfWidth:2 * nColumn + HalfWidth]\r\ndata = prinCom[:,nRow - HalfWidth:2 * nRow + HalfWidth, nColumn - HalfWidth:2 * nColumn + HalfWidth]\r\n[row, col] = G.shape\r\n\r\nNotZeroMask = np.zeros([row, col])\r\nNotZeroMask[HalfWidth + 1: -1 - HalfWidth + 1, HalfWidth + 1: -1 - HalfWidth + 1] = 1\r\nG = G * NotZeroMask\r\n\r\n[Row, Column] = np.nonzero(G)\r\nnSample = np.size(Row)\r\n\r\nimdb = {}\r\nimdb['trainData'] = np.zeros([nTrain, nBand, 2 * HalfWidth, 2 * HalfWidth], dtype=np.float32)\r\nimdb['trainLabels'] = np.zeros([nTrain], dtype=np.int64)\r\nimdb['testData'] = np.zeros([nTest, nBand, 2 * HalfWidth, 2 * HalfWidth], dtype=np.float32)\r\nimdb['testLabels'] = np.zeros([nTest], dtype=np.int64)\r\n\r\nRandPerm = np.random.permutation(nSample)\r\n\r\nfor iSample in range(nTrain):\r\n imdb['trainData'][iSample, :, :, :] = data[:,Row[RandPerm[iSample]] - HalfWidth: Row[RandPerm[iSample]] + HalfWidth, \\\r\n Column[RandPerm[iSample]] - HalfWidth: Column[RandPerm[iSample]] + HalfWidth]\r\n imdb['trainLabels'][iSample] = G[Row[RandPerm[iSample]],\r\n Column[RandPerm[iSample]]].astype(np.int64)\r\nfor iSample in range(nTest):\r\n imdb['testData'][iSample, :, :, :] = data[:,Row[RandPerm[(iSample+nTrain)]] - HalfWidth: Row[RandPerm[(iSample+nTrain)]] + HalfWidth, \\\r\n Column[RandPerm[(iSample+nTrain)]] - HalfWidth: Column[RandPerm[(iSample+nTrain)]] + HalfWidth]\r\n imdb['testLabels'][iSample] = G[Row[RandPerm[(iSample+nTrain)]],\r\n Column[RandPerm[(iSample+nTrain)]]].astype(np.int64)\r\nprint('Data is READY.')\r\nimdb['trainLabels'] = imdb['trainLabels'] - 1\r\nimdb['testLabels'] = imdb['testLabels'] - 1\r\n\r\n#=============3D GAN=============#\r\n \r\n#=============3D GENERATOR=============#\r\nclass G(nn.Module):\r\n def __init__(self, nz, ngf, nc):\r\n super(G, self).__init__()\r\n self.layer1 = nn.ConvTranspose2d(nz, ngf*8, 4, stride=1, padding=0, bias=False)\r\n self.bn1 = nn.BatchNorm2d(ngf*8)\r\n self.layer2 = nn.ConvTranspose2d(ngf*8, ngf*4, 4, stride=2, padding=1, bias=False)\r\n self.bn2 = nn.BatchNorm2d(ngf*4)\r\n self.layer3 = nn.ConvTranspose2d(ngf*4, ngf*2, 4, stride=2, padding=1, bias=False)\r\n self.bn3 = nn.BatchNorm2d(ngf*2)\r\n self.layer4 = nn.ConvTranspose2d(ngf*2, ngf, 4, stride=2, padding=1, bias=False)\r\n self.bn4 = nn.BatchNorm2d(ngf)\r\n self.layer5 = nn.ConvTranspose2d(ngf, nc, 4, stride=2, padding=1, bias=False)\r\n self.ac = nn.ReLU(True)\r\n self.ac1 = nn.Tanh()\r\n\r\n def forward(self, z):\r\n #1024*1*1 to 512*4*4\r\n op = self.layer1(z)\r\n op = self.bn1(op)\r\n op = self.ac(op)\r\n \r\n #512*4*4 to 256*8*8\r\n op = self.layer2(op)\r\n op = self.bn2(op)\r\n op = self.ac(op)\r\n \r\n #256*8*8 to 128*16*16\r\n op = self.layer3(op)\r\n op = self.bn3(op)\r\n op = self.ac(op)\r\n \r\n #128*16*16 to 64*32*32\r\n op = self.layer4(op)\r\n op = self.bn4(op)\r\n op = self.ac(op)\r\n \r\n #64*32*32 to 3*64*64\r\n op = self.layer5(op)\r\n output = self.ac1(op)\r\n return output\r\n\r\n#=============3D DISCRIMINATOR=============# \r\nclass D(nn.Module):\r\n def __init__(self, ngf, nc, numLabel):\r\n super(D, self).__init__()\r\n self.layer1 = nn.Conv2d(nc, ngf, 4, stride=2, padding=1, bias=False)\r\n self.layer2 = nn.Conv2d(ngf, ngf*2, 4, stride=2, padding=1, bias=False)\r\n self.bn2 = nn.BatchNorm2d(ngf*2)\r\n self.layer3 = nn.Conv2d(ngf*2, ngf*4, 4, stride=2, padding=1, bias=False)\r\n self.bn3 = nn.BatchNorm2d(ngf*4)\r\n self.layer4 = nn.Conv2d(ngf*4, ngf*8, 4, stride=2, padding=1, bias=False)\r\n self.bn4 = nn.BatchNorm2d(ngf*8)\r\n self.layer5 = nn.Conv2d(ngf*8, ngf, 4, stride=1, padding=0, bias=False)\r\n self.ac = nn.LeakyReLU(0.2,inplace=True)\r\n self.ac1 = nn.Linear(ngf, 1)\r\n self.ac2 = nn.Linear(ngf, numLabel)\r\n self.layer6 = nn.Sigmoid()\r\n self.layer7 = nn.LogSoftmax()\r\n \r\n def forward(self, g):\r\n #3*64*64 to 64*32*32\r\n op = self.layer1(g)\r\n op = self.ac(op)\r\n\r\n #64*32*32 to 128*16*16\r\n op = self.layer2(op)\r\n op = self.bn2(op)\r\n op = self.ac(op)\r\n \r\n #128*16*16 to 256*8*8\r\n op = self.layer3(op)\r\n op = self.bn3(op)\r\n op = self.ac(op)\r\n \r\n #256*8*8 to 512*4*4\r\n op = self.layer4(op)\r\n op = self.bn4(op)\r\n op = self.ac(op)\r\n \r\n #512*4*4 to 64*1*1\r\n op = self.layer5(op)\r\n op = op.view(-1, ngf)\r\n \r\n #to distinguish the real or the fake \r\n RorF = self.ac1(op)\r\n RorF = self.layer6(RorF)\r\n #to predict the class labels\r\n Classes = self.ac2(op)\r\n Classes = self.layer7(Classes)\r\n return RorF,Classes\r\n \r\n#to compute the kappa\r\ndef kappa(testData, k):\r\n dataMat = np.mat(testData)\r\n P0 = 0.0\r\n for i in range(k):\r\n P0 += dataMat[i, i]*1.0\r\n xsum = np.sum(dataMat, axis=1)\r\n ysum = np.sum(dataMat, axis=0)\r\n Pe = float(ysum * xsum) / np.sum(dataMat) ** 2\r\n P0 = float(P0 / np.sum(dataMat) * 1.0) #OA\r\n cohens_coefficient = float((P0 - Pe) / (1 - Pe))\r\n return cohens_coefficient\r\n\r\ninput = torch.FloatTensor(batSize, 3, 64, 64)\r\nz = torch.FloatTensor(batSize, 100, 1, 1)\r\ns_label = torch.FloatTensor(batSize)\r\nc_label = torch.LongTensor(batSize)\r\ninLabel= torch.LongTensor(nTest)\r\nlossDD = np.zeros(50)\r\nlossGG = np.zeros(50)\r\nOA = np.zeros(50)\r\nAA = np.zeros(50)\r\nK = np.zeros(50)\r\n\r\nreal_label = 1\r\nfake_label = 0\r\n \r\nSCrit = nn.BCELoss() #use Binary Cross Entropy Loss to compute the TorF loss\r\nCCrit = nn.NLLLoss() #use Negative Log Likelihood Loss to compute the HSI loss \r\n \r\nz = Variable(z)\r\ninput = Variable(input)\r\ns_label = Variable(s_label)\r\nc_label = Variable(c_label)\r\ninLabel = Variable(inLabel)\r\nG = G(nz, ngf, nc)\r\nD = D(ngf, nc, numLabel)\r\n\r\noptimizerD = optim.Adam(D.parameters(), lr=0.0002) #to optimize with the Adam algorithm\r\noptimizerG = optim.Adam(G.parameters(), lr=0.0002)\r\n\r\ndef train():\r\n plt.ion() #to plot continously \r\n for epoch in range(traEpo):\r\n #to train discriminator with fake data\r\n optimizerD.zero_grad() #to clear the gradients of all optimized torch.Tensor s.\r\n z = torch.rand(batSize,100,1,1) #to input the noise\r\n c = torch.randint(0, 16, (batSize,)) #to input the label\r\n gen = G(z)\r\n fakeSP,fakeCP = D(gen.detach()) #to input the fake data into discriminator \r\n s_label.data.fill_(fake_label)\r\n c_label.copy_(c)\r\n lossDFS = SCrit(fakeSP, s_label)\r\n lossDFC = CCrit(fakeCP, c_label)\r\n lossDF = lossDFS + lossDFC\r\n lossDF.backward()\r\n \r\n #to train discriminator with REAL data \r\n realSP,realCP = D(torch.from_numpy(imdb['trainData'])) #to input the real data into discriminator \r\n s_label.fill_(real_label)\r\n c_label.copy_(torch.from_numpy(imdb['trainLabels'])) \r\n lossDRS = SCrit(realSP, s_label)\r\n lossDRC = CCrit(realCP, c_label)\r\n lossDR = lossDRS + lossDRC\r\n lossDR.backward()\r\n lossD = lossDR + lossDF \r\n optimizerD.step()\r\n\r\n #to train generator\r\n optimizerG.zero_grad()\r\n fakeSP,fakeCP = D(gen) #to input the fake data into discriminator \r\n c_label.copy_(c)\r\n lossGS = SCrit(fakeSP, s_label)\r\n lossGC = CCrit(fakeCP, c_label)\r\n lossG = lossGS + lossGC \r\n lossG.backward()\r\n optimizerG.step()\r\n\r\n #to output the result every 10 trainings\r\n if epoch % 10 == 0: \r\n print('[%d] Loss_D: %.4f Loss_G: %.4f '\r\n % (epoch, lossD.data.numpy(), lossG.data.numpy()))\r\n lossDD[int(epoch/10)] = lossD / 10\r\n lossGG[int(epoch/10)] = lossG / 10\r\n \r\n #testing…… \r\n if epoch % 10 == 0:\r\n D.eval()\r\n G.eval()\r\n testLoss = 0\r\n right = 0\r\n predict = np.array([], dtype=np.int64)\r\n labels = np.array([], dtype=np.int64)\r\n \r\n inLabel.copy_(torch.from_numpy(imdb['testLabels']))\r\n data, label = Variable(torch.from_numpy(imdb['testData'])), Variable(torch.from_numpy(imdb['testLabels']))\r\n y_l = label.data.cpu().numpy()\r\n output = D(data)\r\n testLoss += CCrit(output[1], label)\r\n pred = output[1].data.max(1)[1] # get the index of the max log-probability\r\n right += pred.cpu().eq(inLabel).sum()\r\n predict = np.append(predict, pred.cpu().numpy())\r\n labels = np.append(labels, y_l)\r\n \r\n acc =100. * right / 500\r\n C = confusion_matrix(labels, predict)\r\n k = 100. * kappa(C, np.shape(C)[0])\r\n AA_ACC = np.diag(C) / np.sum(C, 1)\r\n aa = 100. * np.mean(AA_ACC, 0)\r\n print('OA= %.5f AA= %.5f k= %.5f' % (acc, aa, k))\r\n OA[int(epoch/10)] = acc\r\n AA[int(epoch/10)] = aa\r\n K[int(epoch/10)] = k\r\n \r\n #to plot the score of loss\r\n plt.figure(figsize=(7,10)) \r\n plt.subplot(2,1,1)\r\n x = np.arange(0,500,10) \r\n plt.plot(x, lossDD, c='k', label='lossD')\r\n plt.plot(x, lossGG, c='r', label='lossG')\r\n plt.title('score of loss')\r\n plt.xlabel('epoch')\r\n plt.ylabel('loss')\r\n plt.ylim(0,1)\r\n plt.legend(['lossD', 'lossG'])\r\n \r\n #to plot the score of OA、AA、kappa\r\n plt.subplot(2,1,2)\r\n plt.plot(x, OA, c='k', label='OA')\r\n plt.plot(x, AA, c='r', label='AA')\r\n plt.plot(x, K, c='b', label='kappa')\r\n plt.title('score of OA/AA/kappa')\r\n plt.xlabel('epoch')\r\n plt.ylabel('accuracy(%)') \r\n plt.ylim(0,100)\r\n plt.legend(['OA', 'AA', 'kappa'])\r\n \r\n plt.show()\r\n \r\n \r\nt_begin = time.time() \r\ntrain()\r\nprint(\"Total Elapse: {:.2f}\".format(time.time() - t_begin))\r\n","sub_path":"3D-GAN.py","file_name":"3D-GAN.py","file_ext":"py","file_size_in_byte":12978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"383822893","text":"import os\nimport json\n\nclass DataJson():\n\n FILE = 'data.json'\n \n def __init__(self, path):\n\n self.path = os.path.join(path, self.FILE)\n self.create_if_not_exist()\n\n def create_if_not_exist(self):\n\n if not os.path.isfile(self.path):\n with open(self.path, 'w') as data:\n data.write('{}')\n data.close()\n\n def get_data(self, key=None):\n with open(self.path) as data:\n json_data = json.load(data)\n data.close()\n if key:\n if key in json_data:\n return json_data[key]\n return ''\n return json_data\n\n def set_data(self, key, value):\n\n json_data = self.get_data()\n\n json_data[key] = value\n \n with open(self.path, 'w') as outfile:\n json.dump(json_data, outfile)\n outfile.close()\n \n\n\n\n\n\n\n","sub_path":"catalogo/tools/DataJson.py","file_name":"DataJson.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"539694545","text":"from admin.verification.viewsajax import VerificationRequestMutateView\r\n\r\n\r\n\r\nfrom django.conf.urls import patterns, include, url\r\nfrom views import *\r\n\r\nurlpatterns = patterns('',\r\n url(r'^nric/$', VerificationRequestListView.as_view(), name='verification_requests'),\r\n url(r'^nric/(?P[0-9]+)/mutate/$', VerificationRequestMutateView.as_view(), name='verification_mutate_details'),\r\n url(r'^nric/(?P[0-9]+)/details/$', VerificationDetailsView.as_view(), name='verification_details_view'),\r\n url(r'^cert/$', CertificateVerfication.as_view(), name='cert_verification_requests'),\r\n url(r'^cert/(?P[0-9]+)/details/$', VerificationDetailsView.as_view(), name='cert_verification_details_view'),\r\n )\r\n\r\n","sub_path":"verification/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"328228992","text":"from sqlalchemy import exc\nfrom findfiles import run\nfrom app import Provincias\nfrom models.filesql import FileSql, Workflow, make_fileql\n\n\n\nprovs = Provincias()\nlogger = provs.logger()\nlogger.info('Comenzando con el main')\nrdb = provs.setup_redis()\nlogger.debug('Se configura redis en el host: {}'.format(provs.conf.redis['host']))\nsql = provs.setup_sql()\nlogger.debug('Se configura sql: {}'.format(provs.conf.get_sql))\nwork_dir = provs.get_workdir\nlogger.debug('env workdir = {}'.format(work_dir))\nvalidador=\"ARG-(0[0-9]|1[0-9]|2[0-5])-[a-z A-Z]{3}-(h-tabla|tabla)([1-2]|[1-2]_[1-2])-(0?[1-9]|1[012])-[0-9]{4}.*\\.(csv|txt)\"\n\n\ndef to_session(elements,row, sql):\n for e in elements:\n fileql = make_fileql(e, row.prov_id, row.id, 1)\n logger.debug(\"Se agrega el archivo: {}\".format(fileql.orig_name))\n sql.session.add(fileql)\n\ndef to_db(sql):\n try:\n sql.session.commit()\n except exc.IntegrityError as e:\n logger.error(\"Error al insertar en la base\")\n logger.error(\"{}\".format(e))\n sql.session.rollback()\n\ndef search_files():\n for row in sql.session.query(provs.institucion).all():\n\n pathToFile = \"{}/{}\".format(work_dir, row.folder)\n logger.info(\"Revisando el directorio {}\".format(pathToFile))\n response = run(pathToFile, rdb, row.r_key, validador)\n logger.debug(\"Cantidad de archivos: {}\"\\\n .format(len(response['newfiles'])))\n if response['newfiles']:\n to_session(response['newfiles'], row, sql)\n\n to_db(sql)\n\nif __name__ == '__main__':\n search_files()\n","sub_path":"src/scanfiles.py","file_name":"scanfiles.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"249147796","text":"# Copyright (c) 2016 Uber Technologies, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom __future__ import absolute_import\n\nfrom . import common\nfrom .. import rw\nfrom .call_continue import CallContinueMessage\nfrom .types import Types\n\n\nclass CallRequestContinueMessage(CallContinueMessage):\n \"\"\"Represent a continuation of a call request (across multiple frames).\"\"\"\n message_type = Types.CALL_REQ_CONTINUE\n\n def __init__(\n self,\n flags=0,\n checksum=None,\n args=None,\n id=0,\n ):\n super(CallRequestContinueMessage, self).__init__(\n flags, checksum, args, id)\n\n def fragment(self, space_left):\n fragment_msg = CallRequestContinueMessage(\n flags=self.flags,\n checksum=self.checksum,\n )\n return super(CallRequestContinueMessage, self).\\\n fragment(space_left, fragment_msg)\n\n\ncall_req_c_rw = rw.instance(\n CallRequestContinueMessage,\n (\"flags\", rw.number(1)), # flags:1\n (\"checksum\", common.checksum_rw), # csumtype:1 (csum:4){0, 1}\n (\"args\", rw.args(rw.number(2))), # [arg1~2, arg2~2, arg3~2]\n)\n","sub_path":"tchannel/messages/call_request_continue.py","file_name":"call_request_continue.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"415997153","text":"#!/usr/bin/env python3.8\n\nimport numpy as np\nimport dakota.interfacing as di\nimport sys\n#import pysgpp\n\nsys.path.append('/home/rehmemk/git/anugasgpp/Okushiri') # nopep8\nfrom sgppOkushiri import maxOkushiri1Out # nopep8\n\n\ndef maxOkushiri1Out_forDakota(v):\n dim = 6\n okushiri_func = maxOkushiri1Out(dim, gridResolution=64, normalization=1, residual=0)\n dv = np.array(v)\n result = okushiri_func.eval(dv)\n okushiri_func.cleanUp()\n return [-result]\n\n\nparams, results = di.read_parameters_file()\nnum_params = params.num_variables\ncontinuous_vars = [0]*num_params\nfor k in range(num_params):\n continuous_vars[k] = params[params.descriptors[k]]\n\nevaluations = maxOkushiri1Out_forDakota(continuous_vars)\n\nfor i, r in enumerate(results.responses()):\n if r.asv.function:\n r.function = evaluations[i]\n\nresults.write()\n","sub_path":"dakota/maxOkushiri_for_dakota.py","file_name":"maxOkushiri_for_dakota.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"598830394","text":"__author__ = 'f5xs'\n\nfrom level1 import url_make, currenttime, Sleep, toOrdinal, strftime, Action\n\nfrom requests import get, post, put, delete, ConnectionError, HTTPError, Timeout\nfrom requests.exceptions import ChunkedEncodingError\n\n\nToday = lambda: toOrdinal(strftime(\"%Y%m%d\"))\n\n\ndef factor(date_i, date_f, logprice, expprice, spread, shift):\n factorized = (1-(1/spread))**(date_f-date_i)\n factorized = ((1-factorized) * logprice) + (factorized * expprice)\n factorized = factorized * (1 - shift) + shift * logprice\n return factorized\n\n\nclass Req(object):\n # An object that downloads a given URL continuously and will not return errors.\n\n def __init__(self, url, method=\"get\", str_data=True, json=False, cooldown=1, error_limit=4, timeout=(4, 2), clip=54,\n print_=True, params={}, **kwargs):\n\n if method not in [\"get\", \"post\", \"put\", \"delete\"]:\n raise RuntimeError(\"The method must be one of the following:\\nget post put delete\")\n if method == \"get\":\n method = get\n elif method == \"post\":\n method = post\n elif method == \"put\":\n method = put\n else:\n method = delete\n link = url_make(url, params)\n sc_counter = 0\n raised = False\n if print_:\n print(\"%sDownloading %s...\" % (currenttime(), link[:(lambda: clip if clip else None)()]))\n while True:\n sleep_ = Sleep(cooldown, True)\n try:\n self.request = method(url, timeout=timeout, stream=True, params=params, **kwargs)\n self.request.raise_for_status()\n self.status_code = self.request.status_code\n self.url = self.request.url\n self.content = self.request.content\n if str_data:\n self.text = self.request.text\n if json:\n self.json = self.request.json()\n break\n except (ChunkedEncodingError, ConnectionError, Timeout) as err:\n raised, errmsg = True, err\n except HTTPError as err:\n raised, errmsg = True, err\n if int(self.request.status_code/100) == 4:\n if self.request.status_code == 429:\n pass\n else:\n raise\n else:\n sc_counter += 1\n if sc_counter >= error_limit:\n raise\n finally:\n if raised:\n print(\"%sError: %s\" % (currenttime(), errmsg))\n sleep_.join()\n\n\nclass History(object):\n def __init__(self, *args):\n self.dates = []\n self.entries = {}\n self.headers = tuple([arg for arg in args])\n\n def __add__(self, other, override=False):\n if self.headers != other.headers:\n raise ArithmeticError(\"Headers of the two History objects are not the same.\")\n\n history = History(*self.headers)\n a = {_ for _ in self.entries}.union({_ for _ in other.entries})\n\n for _ in a:\n s = None\n o = None\n\n try: # Self value identified\n s = self.entries[_]\n except KeyError:\n pass\n\n try: # Other value identified\n o = other.entries[_]\n except KeyError:\n pass\n\n if not s:\n s = o\n elif s and o: # \"Collision\" testing\n if s != o:\n if not override:\n raise BaseException(\"The elements of the two objects are inequal: %s != %s\" % (s, o))\n else:\n s = o\n\n history.entries[_] = s\n\n history.d_recheck()\n return history\n\n def __str__(self):\n _2 = len(self.entries)\n _ = \"%s in length.\" % _2\n if _2 == 0:\n return _\n else:\n return _ + \" %s\" % self.entries[self.dates[-1]]\n\n def copy(self):\n history = History()\n history.dates = self.dates\n history.entries = self.entries\n history.headers = self.headers\n return history\n\n def append(self, date, *args):\n # Add an entry/date\n if len(args) != len(self.headers):\n raise IndexError(\"Argument length is not equal to %s\" % len(self.headers))\n self.entries[date] = [arg for arg in args]\n self.d_recheck()\n\n def d_recheck(self):\n # Recheck the date list\n self.dates = sorted([date for date in self.entries])\n\n def del_(self, date):\n # Delete an entry\n del(self.entries[date])\n\n def get(self, date, mode=\"=\"):\n if mode == \"=\":\n return self.entries[date]\n elif mode == \"<\":\n pass\n elif mode == \">\":\n pass\n else:\n raise Exception(\"Invalid mode \\\"%s\\\"\" % mode)\n\n\nclass Buy(Action):\n def __repr__(self):\n return \"Buy \"\n\nclass Sell(Action):\n def __repr__(self):\n return \"Sell\"\n\nclass Hold(Action):\n def __repr__(self):\n return \"Hold\"","sub_path":"level2.py","file_name":"level2.py","file_ext":"py","file_size_in_byte":5149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"28541284","text":"from arcade import Sprite,load_spritesheet\nimport platform\n\nif platform.system() == \"Windows\": slash = \"\\\\\"\nelse: slash = \"/\"\n\nbrokenSprites = []\nbrokenSprites.append(load_spritesheet(r\"Sprite\"+slash+\"RedChipBroken 720.png\",sprite_width=720,sprite_height=720,columns=2,count=4))\nbrokenSprites.append(load_spritesheet(r\"Sprite\"+slash+\"YellowChipBroken 720.png\",sprite_width=720,sprite_height=720,columns=2,count=4))\nfallingSprites = []\nfallingSprites.append(load_spritesheet(r\"Sprite\"+slash+\"ChipFallRed 720.png\",sprite_width=720,sprite_height=720,columns=3,count=8))\nfallingSprites.append(load_spritesheet(r\"Sprite\"+slash+\"ChipFallYellow 720.png\",sprite_width=720,sprite_height=720,columns=3,count=8))\nwin = []\nwin.append(load_spritesheet(r\"Sprite\"+slash+\"ChipLose 720.png\",sprite_width=720,sprite_height=720,columns=3,count=9))\nwin.append(load_spritesheet(r\"Sprite\"+slash+\"ChipWin 720.png\",sprite_width=720,sprite_height=720,columns=3,count=9))\n\nHEIGHT = 6\nWIDTH = 7\nSCREEN_WIDTH = 1080\nSCREEN_HEIGHT = 720\nSHIFT = 0\nratio = 0\nheight = 0\nwidth = 0\nwith open(\"gamesettings.txt\", \"r\") as f:\n ratio = float(f.readline())\n height = int(f.readline())\n width = int(ratio*height)\nclass Pion:\n # changé\n def __init__(self,x,y,id):\n self.x = x\n self.y = y\n self.id = id\n self.sprite = Sprite()\n\n self.sprite.texture = fallingSprites[id-1][0]\n self.sprite.scale = width * 0.19 / 853\n self.sprite.center_x = self.x * (width*64/853) + (width*32/853) + ((width )//2 - (WIDTH*(width*64/853) // 2)) + (width*5/853) + SHIFT\n self.sprite.center_y = 0\n self.sprite.change_x = 0\n self.sprite.change_y = 0\n self.accel = 1\n self.pose = False\n self.gagnant = False\n self.textI = 0\n self.compteur = 0\n self.fall = False\n \n def __str__(self):\n return '['+str(self.x)+','+str(self.y)+']'\n #retourne le nombre de pions alignés dans la direction (direction)\n def alignement(self,direction,pions):\n for pion in self.pionsVoisins(pions):\n if(pion.x == self.x + direction[0] and pion.y == self.y + direction[1]):\n return 1 + pion.alignement(direction,pions) \n return 1\n def alignement2(self,direction,pions,grille):\n for pion in self.pionsVoisins(pions):\n if(pion.x == self.x + direction[0] and pion.y == self.y + direction[1]):\n return (1 + pion.alignement2(direction,pions,grille)[0],pion.alignement2(direction,pions,grille)[1])\n for coup in grille.coupsPossibles():\n if(self.x+direction[0] == coup and self.y+direction[1] == grille.colonnes[self.x+direction[0]].caseDisponible() ):\n return (1,True)\n return (1,False)\n def pionsAlignes(self,direction,pions):\n alignement = []\n alignement.append(self)\n for pion in self.pionsVoisins(pions):\n if(pion.x == self.x + direction[0] and pion.y == self.y + direction[1]):\n return alignement + pion.pionsAlignes(direction,pions)\n return alignement\n #retourne tous les pions voisins appartenant à \"pions\"\n def pionsVoisins(self,pions):\n voisins = []\n for pion in pions: \n voisinage = (abs(pion.y - self.y) < 2) and (abs(pion.x - self.x) < 2)\n samePosition = (pion.x == self.x and pion.y == self.y )\n if (voisinage and not samePosition): voisins.append(pion)\n return voisins\n # ajouté\n def settled(self,grille):\n derniereCase = self.y+1 > grille.height-1\n return derniereCase or not grille.colonnes[self.x].cases[self.y+1] == 0\n # ajouté\n def update(self,grille):\n while(not self.settled(grille)):\n grille.colonnes[self.x].cases[self.y] = 0\n self.y+=1\n grille.colonnes[self.x].cases[self.y] = self.id\n def nowfall(self):\n self.fall = True\n self.y = 10\n self.sprite.change_y = 0\n def spriteUpdate(self,grille):\n casey = grille.height - (self.sprite.center_y+(width*32/853)) // (width*64//853)\n if(not self.pose or self.fall):\n if(casey < self.y+1):\n self.accel = 0.2\n self.sprite.change_y -= self.accel*2\n self.sprite.center_y += self.sprite.change_y\n else:\n self.sprite.center_y = (grille.height - self.y) * (width*64/853) - (width*32/853) + (width*5/853)\n \n self.accel = 0\n self.pose = True\n\n def breakPions(self,grille):\n i = 0\n while(not self.y-i <= 0 and i<=3):\n if(not grille.colonnes[self.x].cases[self.y-i-1] == 0):\n self.sprite.texture = brokenSprites[self.id-1][i]\n self.sprite.scale = width * 0.19 / 853\n i+=1\n def brille(self):\n self.compteur += 1\n if(self.compteur%3 == 0): self.textI +=1\n if(self.textI >= 9): self.textI = 0\n self.sprite.texture = win[self.id-1][self.textI]\n self.sprite.scale = width * 0.19 / 853\n","sub_path":"bitmapped/P4 Classes + Sprites/Pion.py","file_name":"Pion.py","file_ext":"py","file_size_in_byte":5085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"588341254","text":"#regular expression starting with a ending with b\n\nimport re\n\nn=input(\"enter\")\nx=\"^a[a-zA-Z0-9\\W]*b$\"\nmatch=re.fullmatch(x,n)\nif match is not None:\n print(\"valid\")\nelse:\n print(\"invalid\")","sub_path":"regular_exprssion/abex.py","file_name":"abex.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"565822785","text":"import subprocess\nimport logging\nimport datetime\nimport os.path\nimport shlex\nimport time\nimport lib.connection as Connection\nfrom gi.repository import GObject\nimport copy\n\n# TODO: move templates to config file\nffmpegtemplate = \"\"\"ffmpeg -y -nostdin \\\n{inputs} \\\n-ac 2 -aspect 16:9 \\\n{videotracks} \\\n{audiotracks} \\\n-flags +global_header -flags +ilme+ildct \\\n-f segment -segment_time 180 -segment_format mpegts {filename}-%d.ts\"\"\"\nvideotracktemplate = \"-map {id}:v -c:v:{id} mpeg2video -pix_fmt:v:{id} yuv420p -qscale:v:{id} 2 -qmin:v:{id} 2 -qmax:v:{id} 7 -keyint_min 0 -bf:{id} 0 -g:{id} 0 -intra:{id} -maxrate:{id} 90M \"\naudiotracktemplate = \"-map {id}:a -c:a:{id} mp2 -b:a:{id} 192k -ac:a:{id} 2 -ar:a:{id} 48000 \"\ninputtemplate = \"-i tcp://{host}:{port} \"\nfilenameTemplate = \"voctorec_{year}-{month}-{day}_{hour}-{minute}-{second}\"\n\nclass MultiTrackRec:\n def __init__(self):\n self.recording = False\n self.curTime = \"\"\n self.curBitrate = \"\"\n self.curSize = \"\"\n self.ffmpegProcess = None\n self.videotracks = list()\n self.audiotracks = list()\n self.segmented = True\n self.segment_time = 180\n self.log = logging.getLogger(\"multitrackrec\")\n self.log.info(\"MultiTrackRecorder Initialized\")\n self.basepath = \"/home/zoadmin/record/\"\n self.folderpath = None\n\n def add_video_track(self, port, id, name):\n track = {\"id\": int(id), \"name\": str(name), \"port\": int(port)}\n self.videotracks.append(track)\n self.log.info(\"Added Videotrack {name}\".format(name=name))\n self.log.debug(\"Track: \" + str(track))\n\n def add_audio_track(self, id, name):\n track = {\"id\": int(id), \"name\": str(name)}\n self.audiotracks.append(track)\n self.log.info(\"Added Audiotrack {name}\".format(name=name))\n self.log.debug(\"Track: \" + str(track))\n\n def start_recording(self):\n\n self.log.info('starting recording')\n\n date = datetime.datetime.now()\n\n foldername = filenameTemplate.format(year=date.year, month=date.month, day=date.day, hour=date.hour,\n minute=date.minute, second=date.second)\n foldernamecopy = copy.copy(foldername)\n self.log.debug(foldername)\n self.log.debug(self.basepath)\n i = 0\n folderpath = self.basepath + foldername\n while os.path.exists(folderpath):\n folderpath = self.basepath + foldernamecopy + \"_\" + str(i)\n i += 1\n\n self.log.info(\"Creating Folder \" + folderpath)\n os.mkdir(folderpath)\n\n\n cmd = self.get_ffmpeg_str(folderpath + \"/segment\")\n parsed = shlex.split(cmd)\n self.log.debug(\"Parsed cmd: \" + str(parsed))\n self.folderpath = folderpath\n\n if not self.ffmpegProcess:\n self.log.info(\"Starting FFmpeg Recording Process\")\n self.ffmpegProcess = subprocess.Popen(parsed, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n\n else:\n self.log.error(\"Error: Process already running / exited unexpectedly. Restarting instead.\")\n self.ffmpegProcess.terminate()\n self.ffmpegProcess = None\n self.start_recording()\n self.recording = True\n Connection.send(\"message\", \"rec_running\")\n GObject.io_add_watch(self.ffmpegProcess.stderr, GObject.IO_IN, self.on_data)\n\n def stop_recording(self):\n self.ffmpegProcess.terminate()\n self.ffmepgProcess = None\n self.recording = False\n Connection.send(\"message\", \"rec_stop\")\n\n def on_data(self, source, _, *args):\n line = source.readline()\n line = line.split()\n self.log.debug(line)\n for l in line:\n if \"time=\" in l:\n self.curTime = l[5:]\n a = subprocess.run(['du', '-h', '-s', self.folderpath], stdout=subprocess.PIPE)\n b = subprocess.run(['df', '-h', '--output=avail', '/dev/sda1'], stdout=subprocess.PIPE)\n self.curSize = str(a.stdout.split()[0], encoding='utf-8').replace(',', '.')\n self.availSize = str(b.stdout.split()[1], encoding='utf-8')\n self.log.debug(\"Time: {}, Avail: {}, Size: {}\".format(self.curTime, self.availSize, self.curSize))\n Connection.send(\"message\", \"recstatus,{},{},{}\".format(self.curTime, self.availSize, self.curSize))\n return True\n\n def get_ffmpeg_str(self, name):\n audioStr = \"\"\n videoStr = \"\"\n inputStr = \"\"\n for track in self.videotracks:\n videoStr += videotracktemplate.format(id=track[\"id\"])\n inputStr += inputtemplate.format(host=\"localhost\", port=track[\"port\"])\n for track in self.audiotracks:\n audioStr += audiotracktemplate.format(id=track[\"id\"])\n date = datetime.date.today()\n time = datetime.time()\n\n ffstr = ffmpegtemplate.format(inputs=inputStr, videotracks=videoStr, audiotracks=audioStr, filename=name)\n self.log.debug(\"FFmpeg String generated: \" + ffstr)\n return ffstr\n","sub_path":"lib/multitrackrec.py","file_name":"multitrackrec.py","file_ext":"py","file_size_in_byte":5022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"154021946","text":"#!/usr/bin/python python\n# -*- coding:utf-8 -*-\n\nimport urllib2\nimport urlparse\n\nproxy = ''\nurl = ''\nrequest = ''\n\nopener = urllib2.build_opener()\n\nproxy_params = {urlparse.urlparse(url).scheme:proxy}\n\nopener.add_handler(urllib2.ProxyHandler(proxy_params))\n\nresponse = opener.open(request)\n","sub_path":"Chapter 1: Introduction to Web Scraping/support_proxy_crawler.py","file_name":"support_proxy_crawler.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"1980242","text":"#!/usr/bin/env python\n\nimport jirautil\nimport config\n\nimport jinja2\n\nimport argparse\nfrom datetime import *\n\ndef get_command_line_arguments():\n parser = argparse.ArgumentParser(description='Changelog generator for JIRA.', prefix_chars='-')\n parser.add_argument('version', help='versions to get issues from')\n parser.add_argument('-t', metavar='template', dest='template', default='changelog.tmpl',\n help='template file in templates/ directory')\n parser.add_argument('-o', metavar='output_file', dest='output_file', default='changelog.html',\n help='path to output file')\n\n return parser.parse_args()\n\n\ndef write_to_file(filename, content):\n with open(filename, 'w') as the_file:\n the_file.write(content)\n\n\ndef sort_on_issuetype(issue):\n types = ['New Feature', 'Improvement', 'Bug', 'Sub-task', 'Test']\n return types.index(issue.issuetype)\n\n\ndef sort_on_priority(issue):\n return int(issue.jira_issue.fields.priority.id)\n\n\ndef issue_wanted_in_changelog(issue):\n return (issue.issuetype in ['New Feature', 'Improvement', 'Bug']\n and issue.release_notes\n and issue.release_notes.lower() not in ['ingen', 'none'])\n\n\ndef translate(string):\n words = {'New Feature': 'Ny funksjonalitet',\n 'Improvement': 'Forbredring',\n 'Bug': 'Feilretting',\n 'Task': 'Oppgave'}\n\n return words[string]\n\n\nif __name__ == '__main__':\n\n args = get_command_line_arguments()\n\n print('Generating change log for version \"' + args.version + '\"...')\n\n jirautil = jirautil.JiraUtil(config.SERVER_URL, config.USER_NAME, config.USER_PASSWORD)\n\n env = jinja2.Environment(loader=jinja2.FileSystemLoader('templates'), trim_blocks=True, lstrip_blocks=True)\n env.filters['translate'] = translate\n template = env.get_template(args.template)\n\n issues = jirautil.issues_for_version(args.version)\n\n issues = list(filter(issue_wanted_in_changelog, issues))\n issues = sorted(issues, key=lambda issue: (sort_on_issuetype(issue), sort_on_priority(issue)))\n\n html = template.render(\n issues=issues,\n version=args.version,\n time_generated=datetime.now())\n\n write_to_file(args.output_file, html)\n","sub_path":"jira/changelog.py","file_name":"changelog.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"382336014","text":"#!/Users/florianpilz/gocept/python2.7/bin/python\n\nimport subprocess\nimport re\nimport argparse\n\nparser = argparse.ArgumentParser(description='Remove all PostgreSQL whose name'\n ' begins with the given prefix')\nparser.add_argument('prefix', help='Prefix used to match DBs')\nparser.add_argument(\n '-n', '--dry', action=\"store_true\",\n help='Only show which DBs would be removed, but do not perform removal')\n\noptions = parser.parse_args()\ndb_list = subprocess.check_output(\"psql -l\", shell=True)\nfor entry in db_list.split('\\n'):\n if not entry:\n continue\n db_name = re.sub('\\s+', ' ', entry).strip().split()[0]\n if not db_name.startswith(options.prefix):\n continue\n print('dropdb {}'.format(db_name))\n if not options.dry:\n subprocess.check_output(['dropdb', db_name])\n","sub_path":"bin/dropdbs-with-prefix.py","file_name":"dropdbs-with-prefix.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"322634632","text":"import os\nimport argparse\n\n\nimport tensorflow as tf\nimport opennmt as onmt\n\nfrom opennmt import constants\nfrom opennmt.utils import decay\nfrom opennmt.utils import losses\nfrom opennmt.utils import misc\nfrom opennmt.utils import optim\n\nimport sys\n\nmode = tf.estimator.ModeKeys.PREDICT\n\ndef show_usage():\n print(\"Runs the specified model interactively.\")\n print()\n print(\"Usage :\")\n print()\n print(\"python.exe infer_test.py RUN_DIR\")\n print()\n print(\"E.g.\")\n print()\n print(\"python.exe infer_test.py run-eng-logic\")\n\nif(len(sys.argv) == 2):\n RUN_DIR = str(sys.argv[1])\nelse:\n show_usage()\n exit()\n\nBASE_EXPORT_PATH = \".\\\\\" + RUN_DIR + \"\\\\export\\\\latest\"\n\nif not os.path.exists(BASE_EXPORT_PATH) :\n print(f\"Path {BASE_EXPORT_PATH} doesn't exist. Exiting.\")\n exit()\n\nexport_folders = os.listdir(BASE_EXPORT_PATH)\n\nif len(export_folders) == 0 :\n print(f\"Path {BASE_EXPORT_PATH} doesn't contain any export folders. Exiting.\")\n exit()\n\n# Find latest export folder.\nlatest_export_folder_as_int = 0\nlatest_export_folder = \"\"\n\nfor export_folder in export_folders :\n folder_name_as_int = int(export_folder)\n \n if folder_name_as_int > latest_export_folder_as_int:\n latest_export_folder_as_int = folder_name_as_int\n latest_export_folder = export_folder\n\nif latest_export_folder_as_int == 0:\n print(\"Couldn't find latest export folder. Exiting.\")\n exit()\n\nexport_dir_path = BASE_EXPORT_PATH + \"\\\\\" + latest_export_folder\n\nprint(f\"Using export path {export_dir_path}.\")\n\nwith tf.Session() as sess:\n meta_graph_def = tf.saved_model.loader.load(\n sess, [tf.saved_model.tag_constants.SERVING], export_dir_path)\n \n signature_def = meta_graph_def.signature_def[\"serving_default\"]\n\n input_tokens = signature_def.inputs[\"tokens\"].name\n input_length = signature_def.inputs[\"length\"].name\n output_tokens = signature_def.outputs[\"tokens\"].name\n output_length = signature_def.outputs[\"length\"].name\n\n print(\"\\exit or \\quit to exit\")\n\n while True:\n text = input(\"Input > \")\n \n if(text == \"\\exit\" or text == \"\\quit\"):\n break\n \n split = text.split(' ')\n\n inputs = {\n input_tokens: [\n split],\n input_length: [len(split)]\n }\n \n batch_tokens, batch_length = sess.run(\n [output_tokens, output_length], feed_dict=inputs)\n \n for tokens, length in zip(batch_tokens, batch_length):\n tokens, length = tokens[0], length[0] # Take the best hypothesis.\n length -= 1 # Ignore token.\n \n token_list = tokens[:length].tolist()\n translation=\"\"\n for token in token_list:\n translation += token.decode(\"utf-8\") + \" \"\n \n translation = translation.strip()\n \n print(\"Input > \" + translation)\n \n \n","sub_path":"OpenNMT-tf/7-logic-no-examples/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"333716524","text":"class Solution:\n def findLength(self, nums1: List[int], nums2: List[int]) -> int:\n if nums1 == nums2: #The trivial case where the two are equal\n return len(nums1)\n else:\n maximum = 0 #Storage used to find the longest subarray\n \n #This next section of code initializes the dp array which will contain values of the two given arrays plus enough space to check for subarrays.\n dp = [[0 for i in range(len(nums1) + 1)] for j in range(len(nums2) + 1)]\n for i in range(len(nums1)):\n dp[0][i + 1] = nums1[i]\n for i in range(len(nums2)):\n dp[i + 1][0] = nums2[i]\n \n #In order to populate the subarray we have to check two things. First, if the values of the row and column header are equal, we know that we have a match.\n #We also need to check whether that set is part of an existing subarray. In order to do this, we check the relationship between the diagonally-placed cell.\n #This cell would represent the last number or section of the subarray, if this has a number, we add it to the current cell since we know that is a continuation of\n #an existing array. If it is not, we know that it starts a new subarray. We fill out the entire dp matrix and use it to find the maximum length.\n for i in range(1, len(nums1) + 1):\n for j in range(1, len(nums2) + 1):\n if dp[0][i] == dp[j][0]:\n dp[j][i] = 1\n if i - 1 > 0 and j - 1 > 0:\n dp[j][i] += dp[j-1][i-1]\n if dp[j][i] > maximum:\n maximum = dp[j][i]\n return maximum #We return maximum to solve the solution.\n","sub_path":"MaximumLengthofRepeatedSubarray.py","file_name":"MaximumLengthofRepeatedSubarray.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"214341107","text":"#\n# Copyright (c) 2013-2020, Dell Inc. or its subsidiaries.\n# All rights reserved.\n# See file LICENSE for licensing information.\n#\n# Module Name:\n#\n# session.py\n#\n# Abstract:\n#\n# Session setup tests\n#\n# Authors: Brian Koropoff (brian.koropoff@emc.com)\n#\n\nimport pike.test\n\n\nclass SessionTest(pike.test.PikeTest):\n # Log off a session\n def test_session_logoff(self):\n chan, tree = self.tree_connect()\n chan.logoff()\n\n def test_session_multiplex(self):\n chan, tree = self.tree_connect()\n chan2 = chan.connection.session_setup(self.creds)\n chan3 = chan.connection.session_setup(self.creds)\n self.assertEqual(chan.connection, chan2.connection)\n self.assertEqual(chan2.connection, chan3.connection)\n self.assertNotEqual(chan.session, chan2.session)\n self.assertNotEqual(chan2.session, chan3.session)\n self.assertNotEqual(chan.session.session_id, chan2.session.session_id)\n self.assertNotEqual(chan2.session.session_id, chan3.session.session_id)\n self.assertNotEqual(chan.session.session_key, chan2.session.session_key)\n self.assertNotEqual(chan2.session.session_key, chan3.session.session_key)\n tree2 = chan2.tree_connect(self.share)\n tree3 = chan3.tree_connect(self.share)\n chan3.logoff()\n chan2.logoff()\n chan.logoff()\n","sub_path":"src/pike/test/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"418730021","text":"# File: writeWave.py\n# Author: Wayne Snyder\n# Date: 1/5/14\n# Description: This is a prototypical Python 3 program for writing mono wave file.\n# You must create an array of shorts and then you can write them out to a file.\n# If you want to use this for Python 2.7 just change input to raw_input in main(). \n# Citation: This is a modified version of the program synth.py from A Concise Introduction\n# to Programming in Python, p.129.\n\nimport array\nimport contextlib\nimport wave\nfrom math import sin, pi\n\n# Global parameters\n\nnumChannels = 1 # mono\nsampleWidth = 2 # in bytes, a 16-bit short\nsampleRate = 44100\nMAX_AMP = 2**(8*sampleWidth - 1) - 1 #maximum amplitude is 2**15 - 1 = 32767 \n\n# Take an array of shorts and write it out to a mono wave file\n\ndef writewav(fname, data, params):\n with contextlib.closing(wave.open(fname, \"w\")) as f:\n f.setparams(params)\n f.writeframes(data.tostring())\n print(fname + \" written.\")\n \n\ndef main():\n\n # parameters for this file\n\n lengthSeconds = 6 \n numSamples = sampleRate * lengthSeconds\n\n outfileName = input(\"Enter the name of the output .wav file: \")\n\n # Here is where you can create an array of shorts which will be written out\n\n data = array.array(\"h\")\n\n # Example: Create a sin wave of frequency A = 440 Hz\n\n for i in range( numSamples ):\n \n # 2 * pi * frequency is the angular velocity in radians/sec\n # multiplying this by i / sampleRate incrementally creates angle at each sample\n # and then sin ( angle ) => amplitude at this sample\n\n sample = MAX_AMP * sin( 2 * pi * 440.0 * i / sampleRate ) \n\n data.append( int( sample ) )\n\n\n params = [numChannels, sampleWidth, sampleRate , len(data), \"NONE\", None]\n writewav(outfileName, data, params)\n\n\nmain()\n","sub_path":"Audio/writeWave.py","file_name":"writeWave.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"230057085","text":"import io\nimport itertools\nimport json\nimport os\n\n# os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'\nimport random\nimport re\nimport shutil\nimport textwrap\n\nimport cv2\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom sklearn.metrics import confusion_matrix\nfrom termcolor import colored\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport imgaug as ia\nfrom scipy.special import softmax\nfrom sklearn.metrics import classification_report\n\ndef check_manual_seed(seed):\n \"\"\"\n If manual seed is not specified, choose a random one and notify it to the user\n \"\"\"\n seed = seed\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n ia.seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n print('Using manual seed: {seed}'.format(seed=seed))\n return\n\n\ndef check_log_dir(log_dir):\n # check if log dir exist\n if os.path.isdir(log_dir):\n color_word = colored('WARMING', color='red', attrs=['bold', 'blink'])\n print('%s: %s exist!' % (color_word, colored(log_dir, attrs=['underline'])))\n while (True):\n print('Select Action: d (delete)/ q (quit)', end='')\n key = input()\n if key == 'd':\n shutil.rmtree(log_dir)\n break\n elif key == 'q':\n exit()\n else:\n color_word = colored('ERR', color='red')\n print('---[%s] Unrecognized character!' % color_word)\n return\n\n\ndef plot_confusion_matrix(conf_mat, label):\n \"\"\"\n Parameters:\n title='Confusion matrix' : Title for your matrix\n tensor_name = 'MyFigure/image' : Name for the output summay tensor\n Returns:\n summary: image of plot figure\n Other items to note:\n - Depending on the number of category and the data , you may have to modify the figzie, font sizes etc.\n - Currently, some of the ticks dont line up due to rotations.\n \"\"\"\n\n cm = conf_mat\n\n np.set_printoptions(precision=2) # print numpy array with 2 decimal places\n\n fig = matplotlib.figure.Figure(figsize=(7, 7), dpi=320, facecolor='w', edgecolor='k')\n ax = fig.add_subplot(1, 1, 1)\n im = ax.imshow(cm, cmap='Oranges')\n\n classes = [re.sub(r'([a-z](?=[A-Z])|[A-Z](?=[A-Z][a-z]))', r'\\1 ', x) for x in label]\n classes = ['\\n'.join(textwrap.wrap(l, 40)) for l in classes]\n\n tick_marks = np.arange(len(classes))\n\n ax.set_xlabel('Predicted', fontsize=7)\n ax.set_xticks(tick_marks)\n c = ax.set_xticklabels(classes, fontsize=4, rotation=-90, ha='center')\n ax.xaxis.set_label_position('bottom')\n ax.xaxis.tick_bottom()\n\n ax.set_ylabel('True Label', fontsize=7)\n ax.set_yticks(tick_marks)\n ax.set_yticklabels(classes, fontsize=4, va='center')\n ax.yaxis.set_label_position('left')\n ax.yaxis.tick_left()\n\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n ax.text(j, i, format(cm[i, j], 'd') if cm[i, j] != 0 else '.',\n horizontalalignment=\"center\", fontsize=6,\n verticalalignment='center', color=\"black\")\n fig.set_tight_layout(True)\n\n fig.canvas.draw()\n w, h = fig.canvas.get_width_height()\n\n # get PNG data from the figure\n png_buffer = io.BytesIO()\n fig.canvas.print_png(png_buffer)\n png_encoded = png_buffer.getvalue()\n png_buffer.close()\n\n return png_encoded\n\n\n####\ndef update_log(output, epoch, net_name, prefix, color, tfwriter, log_file, logging):\n # print values and convert\n max_length = len(max(output.keys(), key=len))\n for metric in output:\n key = colored(prefix + '-' + metric.ljust(max_length), color)\n print('------%s : ' % key, end='')\n if metric not in ['conf_mat_c', 'conf_mat_r', 'box_plot_data']:\n print('%0.7f' % output[metric])\n elif metric == 'conf_mat_c':\n conf_mat_c = output['conf_mat_c'] # use pivot to turn back\n conf_mat_c_df = pd.DataFrame(conf_mat_c)\n conf_mat_c_df.index.name = 'True'\n conf_mat_c_df.columns.name = 'Pred'\n output['conf_mat_c'] = conf_mat_c_df\n print('\\n', conf_mat_c_df)\n elif metric == 'conf_mat_r':\n conf_mat_r = output['conf_mat_r'] # use pivot to turn back\n conf_mat_r_df = pd.DataFrame(conf_mat_r)\n conf_mat_r_df.index.name = 'True'\n conf_mat_r_df.columns.name = 'Pred'\n output['conf_mat_r'] = conf_mat_r_df\n print('\\n', conf_mat_r_df)\n elif metric == 'box_plot_data':\n box_plot_data = output['box_plot_data'] # use pivot to turn back\n box_plot_data_df = pd.DataFrame(box_plot_data)\n box_plot_data_df.columns.name = 'Pred'\n output['box_plot_data'] = box_plot_data_df\n\n if not logging:\n return\n\n # create stat dicts\n stat_dict = {}\n for metric in output:\n if metric not in ['conf_mat_c', 'conf_mat_r', 'box_plot_data']:\n metric_value = output[metric]\n elif metric == 'conf_mat_c':\n conf_mat_df = output['conf_mat_c'] # use pivot to turn back\n conf_mat_df = conf_mat_df.unstack().rename('value').reset_index()\n conf_mat_df = pd.Series({'conf_mat_c': conf_mat_c}).to_json(orient='records')\n metric_value = conf_mat_df\n elif metric == 'conf_mat_r':\n conf_mat_regres_df = output['conf_mat_r'] # use pivot to turn back\n conf_mat_regres_df = conf_mat_regres_df.unstack().rename('value').reset_index()\n conf_mat_regres_df = pd.Series({'conf_mat_r': conf_mat_r}).to_json(orient='records')\n metric_value = conf_mat_regres_df\n elif metric == 'box_plot_data':\n box_plot_data_df = pd.Series({'box_plot_data': box_plot_data}).to_json(orient='records')\n metric_value = box_plot_data_df\n stat_dict['%s-%s' % (prefix, metric)] = metric_value\n\n # json stat log file, update and overwrite\n with open(log_file) as json_file:\n json_data = json.load(json_file)\n\n current_epoch = str(epoch)\n current_model = str(net_name)\n if current_epoch in json_data:\n old_stat_dict = json_data[current_model]\n stat_dict.update(old_stat_dict)\n current_epoch_dict = {current_model: stat_dict}\n json_data.update(current_epoch_dict)\n\n with open(log_file, 'w') as json_file:\n json.dump(json_data, json_file)\n\n # log values to tensorboard\n for metric in output:\n if metric not in ['conf_mat_c', 'conf_mat_r', 'box_plot_data']:\n tfwriter.add_scalar(prefix + '-' + metric, output[metric], current_epoch)\n\n\n####\ndef log_train_ema_results(engine, info):\n \"\"\"\n running training measurement\n \"\"\"\n training_ema_output = engine.state.metrics #\n training_ema_output['lr'] = float(info['optimizer'].param_groups[0]['lr'])\n update_log(training_ema_output, engine.state.epoch, 'train-ema', 'green',\n info['tfwriter'], info['json_file'], info['logging'])\n\n\n####\ndef process_accumulated_output_multi(output, batch_size, nr_classes):\n #\n def uneven_seq_to_np(seq):\n item_count = batch_size * (len(seq) - 1) + len(seq[-1])\n cat_array = np.zeros((item_count,) + seq[0][0].shape, seq[0].dtype)\n # BUG: odd len even\n if len(seq) < 2:\n return seq[0]\n for idx in range(0, len(seq) - 1):\n cat_array[idx * batch_size:\n (idx + 1) * batch_size] = seq[idx]\n cat_array[(idx + 1) * batch_size:] = seq[-1]\n return cat_array\n\n proc_output = dict()\n true = uneven_seq_to_np(output['true'])\n # threshold then get accuracy\n if 'logit_c' in output.keys():\n logit_c = uneven_seq_to_np(output['logit_c'])\n pred_c = np.argmax(logit_c, axis=-1)\n # pred_c = [covert_dict[pred_c[idx]] for idx in range(len(pred_c))]\n acc_c = np.mean(pred_c == true)\n print(acc_c)\n # confusion matrix\n conf_mat_c = confusion_matrix(true, pred_c, labels=np.arange(nr_classes))\n proc_output.update(acc_c=acc_c, conf_mat_c=conf_mat_c,)\n if 'logit_r' in output.keys():\n logit_r = uneven_seq_to_np(output['logit_r'])\n label = np.transpose(np.array([[0., 1., 2., 3.]]).repeat(len(true), axis=0), (1, 0))\n pred_r = np.argmin(abs((logit_r - label)), axis=0)\n # pred_r = [covert_dict[pred_r[idx]] for idx in range(len(pred_r))]\n acc_r = np.mean(pred_r == true)\n # print(acc_r)\n # confusion matrix\n conf_mat_r = confusion_matrix(true, pred_r, labels=np.arange(nr_classes))\n proc_output.update(acc_r=acc_r, conf_mat_r=conf_mat_r)\n\n # proc_output.update(box_plot_data=np.concatenate(\n # [true[np.newaxis, :], pred_c[np.newaxis, :], pred_r[np.newaxis, :], logit_r.transpose(1, 0)], 0))\n return proc_output\n\ndef process_accumulated_output_multi_mix(output, batch_size, nr_classes):\n #\n def uneven_seq_to_np(seq):\n item_count = batch_size * (len(seq) - 1) + len(seq[-1])\n cat_array = np.zeros((item_count,) + seq[0][0].shape, seq[0].dtype)\n # BUG: odd len even\n if len(seq) < 2:\n return seq[0]\n for idx in range(0, len(seq) - 1):\n cat_array[idx * batch_size:\n (idx + 1) * batch_size] = seq[idx]\n cat_array[(idx + 1) * batch_size:] = seq[-1]\n return cat_array\n\n proc_output = dict()\n true = uneven_seq_to_np(output['true'])\n # threshold then get accuracy\n if 'logit_c' in output.keys():\n logit_c = uneven_seq_to_np(output['logit_c'])\n\n pred_c = np.argmax(logit_c, axis=-1)\n # pred_c = [covert_dict[pred_c[idx]] for idx in range(len(pred_c))]\n acc_c = np.mean(pred_c == true)\n print('acc_c',acc_c)\n # print(classification_report(true, pred_c, labels=[0, 1, 2, 3]))\n # confusion matrix\n conf_mat_c = confusion_matrix(true, pred_c, labels=np.arange(nr_classes))\n proc_output.update(acc_c=acc_c, conf_mat_c=conf_mat_c,)\n if 'logit_r' in output.keys():\n logit_r = uneven_seq_to_np(output['logit_r'])\n label = np.transpose(np.array([[0., 1., 2., 3.]]).repeat(len(true), axis=0), (1, 0))\n pred_r = np.argmin(abs((logit_r - label)), axis=0)\n # pred_r = [covert_dict[pred_r[idx]] for idx in range(len(pred_r))]\n acc_r = np.mean(pred_r == true)\n print('acc_r',acc_r)\n # print(classification_report(true, pred_r, labels=[0, 1, 2, 3]))\n # confusion matrix\n conf_mat_r = confusion_matrix(true, pred_r, labels=np.arange(nr_classes))\n proc_output.update(acc_r=acc_r, conf_mat_r=conf_mat_r)\n\n # if ('logit_r' in output.keys()) and ('logit_c' in output.keys()):\n # a = abs((logit_r - label)).transpose(1, 0)\n # prob_r = softmax(-a, 1)\n # logit_c +=prob_r\n # pred_c = np.argmax(logit_c, axis=-1)\n # acc_c = np.mean(pred_c == true)\n # print('acc_mix',acc_c)\n\n # proc_output.update(box_plot_data=np.concatenate(\n # [true[np.newaxis, :], pred_c[np.newaxis, :], pred_r[np.newaxis, :], logit_r.transpose(1, 0)], 0))\n return proc_output\n\ndef process_accumulated_output_multi_testAUG(output, batch_size, nr_classes):\n #\n def uneven_seq_to_np(seq):\n item_count = batch_size * (len(seq) - 1) + len(seq[-1])\n cat_array = np.zeros((item_count,) + seq[0][0].shape, seq[0].dtype)\n # BUG: odd len even\n for idx in range(0, len(seq) - 1):\n cat_array[idx * batch_size:\n (idx + 1) * batch_size] = seq[idx]\n cat_array[(idx + 1) * batch_size:] = seq[-1]\n return cat_array\n\n proc_output = dict()\n true = uneven_seq_to_np(output['true'])\n # threshold then get accuracy\n if 'pred_c' in output.keys():\n pred_c = uneven_seq_to_np(output['pred_c'])\n acc_c = np.mean(pred_c == true)\n # confusion matrix\n conf_mat_c = confusion_matrix(true, pred_c, labels=np.arange(nr_classes))\n proc_output.update(acc_c=acc_c, conf_mat_c=conf_mat_c,)\n if 'pred_r' in output.keys():\n pred_r = uneven_seq_to_np(output['pred_r'])\n acc_r = np.mean(pred_r == true)\n # confusion matrix\n conf_mat_r = confusion_matrix(true, pred_r, labels=np.arange(nr_classes))\n proc_output.update(acc_r=acc_r, conf_mat_r=conf_mat_r)\n return proc_output\n\n\n####\ndef inference(engine, inferer, prefix, dataloader, info):\n \"\"\"\n inference measurement\n \"\"\"\n inferer.accumulator = {metric: [] for metric in info['metric_names']}\n inferer.run(dataloader)\n output_stat = process_accumulated_output_multi(inferer.accumulator,\n info['infer_batch_size'], info['nr_classes'])\n update_log(output_stat, engine.state.epoch, prefix, 'red',\n info['tfwriter'], info['json_file'], info['logging'])\n return\n\n\n####\ndef accumulate_outputs(engine):\n batch_output = engine.state.output\n for key, item in batch_output.items():\n engine.accumulator[key].extend([item])\n return\n\n\ndef accumulate_predict(pred_patch):\n unique, counts = np.unique(pred_patch.cpu(), return_counts=True)\n pred_count = dict(zip(unique, counts))\n patch_label = max(pred_count, key=pred_count.get)\n return patch_label\n","sub_path":"misc/train_ultils_validator.py","file_name":"train_ultils_validator.py","file_ext":"py","file_size_in_byte":13460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"276719485","text":"#!/usr/bin/python3\n\"\"\"Request commits via GitHub API\n\"\"\"\n\nimport requests\nfrom sys import argv\n\nif __name__ == '__main__':\n\n repo = argv[1]\n owner = argv[2]\n\n url = 'https://api.github.com/repos/{}/{}/\\\ncommits?per_page=10'.format(owner, repo)\n\n r = requests.get(url)\n\n r = r.json()\n\n for commit in r:\n print('{}: {}'.format(commit.get('sha'),\n commit.get('commit').get('author').get('name')))\n","sub_path":"0x11-python-network_1/100-github_commits.py","file_name":"100-github_commits.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"471122816","text":"import io\nimport os\nimport pickle\nimport zipfile\nimport pandas as pd\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import Pipeline\n\n\nclass ModelDumper(object):\n \"\"\"Helper to persist a model using pickle.\n\n Parameters\n ----------\n estimator : estimator object\n An estimator object implementing `fit` and one of `decision_function`\n or `predict_proba`.\n \"\"\"\n\n def __init__(self, estimator):\n super(ModelDumper, self).__init__()\n self.estimator_ = estimator\n\n def dump(self, file):\n \"\"\"Persists model.\n\n Parameters\n ----------\n file : str or IOBase\n when str is given, file path is exepected\n \"\"\"\n if isinstance(file, str):\n f = open(file, 'wb')\n elif isinstance(file, io.IOBase):\n f = file\n else:\n raise Exception(\"Unexpected type.\")\n\n s = pickle.dumps(self.estimator_)\n\n f.write(s)\n\n f.close()\n\n\nclass ModelLoader(object):\n \"\"\"Helper to load a persisted a model using pickle.\n\n Attributes\n ----------\n estimator_ : loaded estimator\n Estimator loaded by method `load`.\n \"\"\"\n\n def __init__(self):\n super(ModelLoader, self).__init__()\n\n def load(self, file):\n \"\"\"Loads model.\n\n Parameters\n ----------\n file : str or IOBase\n when str is given, file path is exepected\n \"\"\"\n if isinstance(file, str):\n basefile, ext = os.path.splitext(file)\n\n if ext == '.zip':\n zf = zipfile.ZipFile(file, 'r')\n f = zf.open(zf.namelist()[0])\n else:\n f = open(file, 'rb')\n elif isinstance(file, io.IOBase):\n f = file\n else:\n raise Exception(\"Unexpected type.\")\n\n self.estimator_ = pickle.loads(f.read())\n\n f.close()\n\n return self.estimator_\n\n\ndef get_default_vars(func):\n func_args = func.__code__.co_varnames[:func.__code__.co_argcount]\n\n defaults_size = 0 if func.__defaults__ is None else len(func.__defaults__)\n\n return dict(zip(func_args[-defaults_size:], func.__defaults__))\n\n\ndef feature_importance(model, feature_names):\n\n if isinstance(model, GridSearchCV):\n model = model.best_estimator_\n\n if isinstance(model, Pipeline):\n model = model.steps[-1][-1]\n\n f_import = model.feature_importances_\n\n df_import = pd.DataFrame(\n list(zip(f_import, feature_names)),\n columns=['importance', 'feature']\n )\n\n df_import.sort_values('importance', inplace=True)\n df_import['feature'] = pd.Categorical(df_import['feature'],\n categories=df_import['feature'],\n ordered=True)\n\n return df_import\n","sub_path":"garson/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"372056369","text":"#!/usr/bin/env python3\n\nimport sys\nimport csv\nfrom pprint import pprint\nimport collections\nimport operator\n\nimport func\n\n\nINPUTFILE = './task08.input'\n\n\nclass Instruction():\n instructions = []\n\n def __init__(self, row):\n self.register_to_modify = row[0]\n self.action = row[1]\n self.action_value = int(row[2])\n # row[3] == if\n self.condition_register = row[4]\n self.condition_type = row[5]\n self.condition_value = int(row[6])\n\n self.instructions.append(self)\n\n def __repr__(self):\n return(\"{} {} {} if {} {} {}\".format(\n self.register_to_modify,\n self.action,\n self.action_value,\n self.condition_register,\n self.condition_type,\n self.condition_value\n ))\n\n @classmethod\n def solve(cls, registers):\n\n operators = {\n '>': (lambda x, y: x > y),\n '<': (lambda x, y: x < y),\n '<=': (lambda x, y: x <= y),\n '>=': (lambda x, y: x >= y),\n '==': (lambda x, y: x == y),\n '!=': (lambda x, y: x != y),\n 'inc': (lambda x, y: x + y),\n 'dec': (lambda x, y: x - y)\n }\n\n for instruction in cls.instructions:\n condition_check_value = registers[instruction.condition_register]\n\n if operators[instruction.condition_type](condition_check_value, instruction.condition_value):\n registers[instruction.register_to_modify] = operators[instruction.action](registers[instruction.register_to_modify], instruction.action_value)\n\n\n sort_registers = sorted(registers.items(), key=operator.itemgetter(1))\n pprint(sort_registers)\n max_register = sort_registers[-1][0]\n max_value = sort_registers[-1][1]\n print(\"register {}: {}\".format(max_register, max_value))\n\n\n\ndef main():\n\n with open(file=INPUTFILE, mode='r') as fileinput:\n reader = csv.reader(fileinput, delimiter=\" \")\n\n for row in reader:\n Instruction(row)\n\n # default i dicten er 0 (int)\n registers = collections.defaultdict(int)\n\n Instruction.solve(registers)\n #pprint(Instruction.instructions)\n\nif __name__ == '__main__':\n if len(sys.argv) == 2:\n INPUTFILE = sys.argv[1]\n main()\n","sub_path":"task08_a.py","file_name":"task08_a.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"107545784","text":"#函数create_str_to_txt(str_data, path_file_name)调用时需传入两个参数,第一个参数str_data用于接受写入的字符串,第二个参数path_file_name用于接受字符串要写入的目标文件\ndef create_str_to_txt(str_data,path_file_name):\n f1 = open(path_file_name, \"a+\")\n f1.write(str_data)\n f1.close()\n#将规范化关系人物网址.txt的所有元素按行存入列表list2中\nlist2 = []\nwith open('./网址信息_人物名称.txt') as f:\n for line in f:\n mid = line.split('\\t')\n for item in mid:\n list2.append(item)\npath_file_name = './去重最终版网址信息_人物名称.txt'\n#提取列表list2中关系人物百度百科链接的部分\nlinklist2 = []\nname2 = []\nj1 = 1\nwhile j1 < len(list2):\n linklist2.append(list2[j1])\n j1 += 2\n#提取列表list2中名字的部分\ni1 = 0\nwhile i1 < len(list2):\n name2.append(list2[i1])\n i1 += 2\n#对于所有三国人物以链接是否相同为标准进行去重,去重后加入去重最终版网址信息_人物名称.txt中\nwhole = []\nwhole = list(zip(name2, linklist2))\nall = []\nname = []\nfor item in whole:\n if not item[1] in all:\n str = \"\\t\".join([item[0], item[1]])\n print(str)\n all.append(item[1])\n name.append(item[0])\n create_str_to_txt(str,path_file_name)\n else:\n continue\n\n","sub_path":"src/人物网址去重.py","file_name":"人物网址去重.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"192179520","text":"import inspect\nfrom functools import wraps\n\nfrom tdb.trace.tracer import Tracer\n\n\ndef trace(function=None, *,\n session_name=None,\n log_to_memory=False,\n repl_on_exception=False,\n included_modules: list[str] = None,\n **decorator_kws):\n caller_frame = inspect.currentframe().f_back\n module_name = inspect.getmodule(caller_frame).__name__\n\n if not included_modules:\n included_modules = [module_name]\n\n def decorator(fn):\n @wraps(fn)\n def wrapper(*args, **kws):\n with Tracer(\n session_name or f'{module_name}::{str(fn.__name__)}',\n included_modules=included_modules,\n repl_on_exception=repl_on_exception,\n log_to_memory=log_to_memory) as t:\n wrapper.trace_log = t.trace_log\n return fn(*args, **kws)\n\n return wrapper\n\n if callable(function):\n return decorator(function)\n else:\n return decorator\n","sub_path":"tdb/trace/trace_decorator.py","file_name":"trace_decorator.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"63066576","text":"# -*- coding: utf-8 -*-\n\"\"\"\nUtility/common code of library.\n\"\"\"\n\n__author__ = 'Grzegorz Latuszek, Michal Ernst, Marcin Usielski'\n__copyright__ = 'Copyright (C) 2018, Nokia'\n__email__ = 'grzegorz.latuszek@nokia.com, michal.ernst@nokia.com, marcin.usielski@nokia.com'\n\nimport copy\nimport datetime\nimport importlib\nimport logging\nimport re\n\nimport deepdiff\n\nif datetime.time not in deepdiff.diff.numbers:\n deepdiff.diff.numbers = deepdiff.diff.numbers + (datetime.time,)\n\ntry:\n import collections.abc as collections\nexcept ImportError:\n import collections\n\n\nclass ClassProperty(property):\n def __get__(self, cls, owner):\n return classmethod(self.fget).__get__(None, owner)()\n\n\ndef copy_list(src, deep_copy=False):\n \"\"\"\n Copies list, if None then returns empty list\n :param src: List to copy\n :param deep_copy: if False then shallow copy, if True then deep copy\n :return: Copied list\n \"\"\"\n if src is None:\n return list()\n if deep_copy:\n return copy.deepcopy(src)\n return list(src)\n\n\ndef copy_dict(src, deep_copy=False):\n \"\"\"\n Copies dict, if None then returns empty dict\n :param src: List to copy\n :param deep_copy: if False then shallow copy, if True then deep copy\n :return: Copied dict\n \"\"\"\n if src is None:\n return dict()\n if deep_copy:\n return copy.deepcopy(src)\n return dict(src)\n\n\ndef instance_id(instance):\n \"\"\"\n Return id of instance in hex form.\n Helps in logs/debugs/development troubleshooting.\n \"\"\"\n instance_id = hex(id(instance))[2:] # remove leading 0x\n return instance_id\n\n\ndef camel_case_to_lower_case_underscore(string):\n \"\"\"\n Split string by upper case letters.\n F.e. useful to convert camel case strings to underscore separated ones.\n @return words (list)\n \"\"\"\n words = []\n from_char_position = 0\n for current_char_position, char in enumerate(string):\n if char.isupper() and from_char_position < current_char_position:\n words.append(string[from_char_position:current_char_position].lower())\n from_char_position = current_char_position\n words.append(string[from_char_position:].lower())\n return '_'.join(words)\n\n\n_re_escape_codes = re.compile(r\"\\x1B\\[[0-?]*[ -/]*[@-~]\") # Regex to remove color codes from command output\n\n\ndef remove_escape_codes(line):\n \"\"\"\n :param line: line from terminal\n :return: line without terminal escape codes\n \"\"\"\n line = re.sub(_re_escape_codes, \"\", line)\n return line\n\n\ndef create_object_from_name(full_class_name, constructor_params):\n name_splitted = full_class_name.split('.')\n module_name = \".\".join(name_splitted[:-1])\n class_name = name_splitted[-1]\n\n imported_module = importlib.import_module(module_name)\n class_imported = getattr(imported_module, class_name)\n obj = class_imported(**constructor_params)\n return obj\n\n\ndef update_dict(target_dict, expand_dict):\n for key, value in expand_dict.items():\n if (key in target_dict and isinstance(target_dict[key], dict) and isinstance(expand_dict[key],\n collections.Mapping)):\n update_dict(target_dict[key], expand_dict[key])\n else:\n target_dict[key] = expand_dict[key]\n\n\ndef compare_objects(first_object, second_object, ignore_order=False, report_repetition=False, significant_digits=None,\n exclude_paths=None, exclude_types=None, verbose_level=2):\n \"\"\"\n Return difference between two objects.\n :param first_object: first object to compare\n :param second_object: second object to compare\n :param ignore_order: ignore difference in order\n :param report_repetition: report when is repetition\n :param significant_digits: use to properly compare numbers(float arithmetic error)\n :param exclude_paths: path which be excluded from comparison\n :param exclude_types: types which be excluded from comparison\n :param verbose_level: higher verbose level shows you more details - default 0.\n :return: difference between two objects\n \"\"\"\n if exclude_paths is None:\n exclude_paths = set()\n if exclude_types is None:\n exclude_types = set()\n\n diff = deepdiff.DeepDiff(first_object, second_object, ignore_order=ignore_order,\n report_repetition=report_repetition, significant_digits=significant_digits,\n exclude_paths=exclude_paths, exclude_types=exclude_types, verbose_level=verbose_level)\n return diff\n\n\nclass ForwardingHandler(logging.Handler):\n \"\"\"\n Take log record and pass it to target_logger\n \"\"\"\n\n def __init__(self, target_logger_name):\n super(ForwardingHandler, self).__init__(level=1)\n self.target_logger_name = target_logger_name\n self.target_logger = logging.getLogger('moler')\n\n def emit(self, record):\n \"\"\"\n Emit a record.\n\n Output the record to the target_logger, catering for rollover as described\n in doRollover().\n \"\"\"\n record.name = self.target_logger_name\n\n if (record.levelno == logging.INFO) or (record.levelname == \"INFO\"):\n record.levelno = logging.DEBUG\n record.levelname = \"DEBUG\"\n\n self.target_logger.handle(record)\n","sub_path":"moler/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":5318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"204879341","text":"__author__ = 'xy'\n\nimport socket\nimport threading\n\nbind_ip = '127.0.0.1'\nbind_port = 9999\n\n# 设置监听端口\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind((bind_ip, bind_port))\n\n# 开始监听,设置最大连接数量为5\nserver.listen(5)\nprint(\"[*] Listening on %s:%d\" % (bind_ip, bind_port))\n\n\n# 客户处理线程\ndef handle_client(client_socket):\n # 接受客户端数据并打印出来\n requset = client_socket.recv(1024)\n print(\"[*] Received: %s\" % requset)\n # 返还客户端一个数据包\n client_socket.send(\"ACK!\")\n\n client_socket.close()\n\n\nwhile True:\n # 将客户端的socket对象保存到client,将信息细节保存到addr中\n client, addr = server.accept()\n\n print(client)\n print(addr)\n\n print(\"[*] Accepted connection from: %s:%d\" % (addr[0], addr[1]))\n\n # 以线程启动handle_client(client)\n client_handler = threading.Thread(target=handle_client, args=(client,))\n client_handler.start()\n","sub_path":"TCPserver.py","file_name":"TCPserver.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"246678381","text":"class K:\r\n def __init__(self, k):\r\n self.k = k\r\n\r\n # adding two objects\r\n def __add__(self, o):\r\n return self.k + o.k\r\nob1 = K(1)\r\nob2 = K(2)\r\nob3 = K(\"Geeks\")\r\nob4 = K(\"For\")\r\n\r\nprint(ob1 + ob2)\r\nprint(ob3 + ob4)","sub_path":"InnekePuspitasari/Overloading.py","file_name":"Overloading.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"218900101","text":"import sqlite3\nbanco = sqlite3.connect('banco_dados.db')\ncursor = banco.cursor()\n\ntry:\n cursor.execute(f\"SELECT * from conta WHERE login = '{login}' AND senha = '{senha}'\")\nexcept sqlite3.Error as erro:\n print(f'Digite a senha ou login Corretamente {erro}')\nbanco.commit()\nresultado = cursor.fetchall()\nif len(resultado) != 0:\n print(f'Olá {login}, Bem Vindo')\nelse:\n print('Digite a senha ou login corretamente')","sub_path":"SQLite/Pratica/ex002/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"311900561","text":"import glob\nimport re\nfrom os.path import join\n\nfile_lists = glob.glob(join(\".\", \"file_indexer_folder\", \"*.txt\"))\n\ndic = dict()\n\nfor file_path in file_lists:\n with open(file_path, \"r+\") as f:\n data = f.read()\n r = re.compile(r\"\\w+\")\n\n extracted_data = r.findall(data)\n\n for d in extracted_data:\n if d not in dic:\n dic[d] = [file_path]\n else:\n dic[d].append(file_path)\n\nprint(dic)","sub_path":"preprocess_data/practice/file_indexer.py","file_name":"file_indexer.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"539067986","text":"# Copyright (c) 2017 Cisco Systems, Inc.\n# All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom networking_vpp import config_opts\nfrom networking_vpp import constants as nvpp_const\nfrom networking_vpp import exceptions as nvpp_exc\n\nfrom networking_vpp.compat import context as n_context\nfrom networking_vpp.compat import driver_api\nfrom networking_vpp.compat import n_exc\nfrom networking_vpp.db.models import GpeAllocation\n\nfrom neutron.db import api as db_api\nfrom neutron.plugins.ml2.drivers import helpers\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\nfrom six import moves\n\nLOG = logging.getLogger(__name__)\n\n\nclass GpeTypeDriver(helpers.SegmentTypeDriver):\n \"\"\"A GPE network type driver.\n\n This class implements a type driver for tenant networks\n of type GPE. This driver is responsible for managing the\n VNI allocations and deallocations from a pool of VNIs.\n It enables overlay network connectivity between tenant\n instances using the GPE protocol.\n \"\"\"\n\n def __init__(self):\n super(GpeTypeDriver, self).__init__(GpeAllocation)\n self.initialize()\n\n def get_type(self):\n return nvpp_const.TYPE_GPE\n\n def initialize(self):\n try:\n config_opts.register_vpp_opts(cfg.CONF)\n gpe_vni_ranges = cfg.CONF.ml2_vpp.gpe_vni_ranges\n self.segmentation_key = next(iter(self.primary_keys))\n # A list of VNI tuples:(min_vni, max_vni) available for allocation\n self.gpe_ranges = []\n self.gpe_ranges.extend(self._parse_gpe_vni_ranges(gpe_vni_ranges))\n self.sync_allocations()\n except nvpp_exc.GpeVNIRangeError(vni_range=gpe_vni_ranges):\n LOG.exception(\"Failed to parse gpe_vni_ranges from config. \"\n \"Service terminated!\")\n raise SystemExit()\n\n def _parse_gpe_vni_ranges(self, gpe_vni_ranges):\n \"\"\"Parses a well formed GPE VNI range string.\n\n The GPE VNI ranges string is a comma-separated list of:\n : tuples that are available for tenant network\n allocation.\n It is set in the ml2_conf.ini file using the config option:\n gpe_vni_ranges = : (or)\n gpe_vni_ranges = :, :\n :param: gpe_vni_range: The GPE VNI range string to parse.\n :returns: A list of valid gpe_vni_range tuples\n \"\"\"\n vni_ranges = []\n # String format: gpe_vni_ranges = 2000:3000,4000:5000\n LOG.debug('GPE driver parsing vni ranges: %s', gpe_vni_ranges)\n for entry in gpe_vni_ranges:\n try:\n min_vni, max_vni = entry.strip().split(':')\n vni_range = int(min_vni.strip()), int(max_vni.strip())\n self._verify_gpe_vni(vni_range)\n vni_ranges.append(vni_range)\n except ValueError:\n raise nvpp_exc.GpeVNIRangeError(vni_range=gpe_vni_ranges)\n return vni_ranges\n\n def _verify_gpe_vni(self, vni_or_vni_range):\n \"\"\"Verify if the GPE VNI is valid.\n\n :param: vni_or_vni_range: An integer vni value or a tuple containing\n a vni_range value in the format (vni_min, vni_max).\n \"\"\"\n if isinstance(vni_or_vni_range, tuple):\n vnis = list(vni_or_vni_range)\n else:\n vnis = []\n vnis.append(vni_or_vni_range)\n for vni in vnis:\n if not (nvpp_const.MIN_GPE_VNI <= int(vni)\n <= nvpp_const.MAX_GPE_VNI):\n LOG.error(\"Invalid GPE VNI %s\", vni)\n raise ValueError\n\n @db_api.retry_db_errors\n def sync_allocations(self):\n \"\"\"Determine the currently allocatable GPE VNIs in the DB.\"\"\"\n\n LOG.debug('gpe_type-driver: Syncing DB VNI allocations')\n ctx = n_context.get_admin_context()\n session = ctx.session\n valid_gpe_vnis = set()\n for min_vni, max_vni in self.gpe_ranges:\n valid_gpe_vnis |= set(moves.range(min_vni, max_vni + 1))\n\n with session.begin(subtransactions=True):\n # Current VNI allocations in DB\n allocs = (session.query(self.model).with_lockmode(\n 'update').all())\n all_db_vnis = set([a.gpe_vni for a in allocs])\n # VNIs not in DB but newly added in the ML2 config\n missing_vnis = valid_gpe_vnis - all_db_vnis\n # Set of unallocated VNIs in the DB\n unallocated_vnis = set([a.gpe_vni for a in allocs if\n not a.allocated])\n # Remove unallocated VNIs from the DB that are invalid\n vnis_to_remove = unallocated_vnis - valid_gpe_vnis\n LOG.debug(\"gpe_type_driver: vnis to remove: %s\", vnis_to_remove)\n LOG.debug(\"gpe_type_driver: vnis to add: %s\", missing_vnis)\n # Remove any invalid VNIs\n for alloc in allocs:\n if alloc.gpe_vni in vnis_to_remove:\n session.delete(alloc)\n # Add the missing GPE VNIs to the DB\n for vni in sorted(missing_vnis):\n alloc = self.model(gpe_vni=vni, allocated=False)\n session.add(alloc)\n\n def is_partial_segment(self, segment):\n return segment.get(driver_api.SEGMENTATION_ID) is None\n\n def validate_provider_segment(self, segment):\n network_type = segment.get(driver_api.NETWORK_TYPE)\n segmentation_id = segment.get(driver_api.SEGMENTATION_ID)\n for key, value in segment.items():\n if value and key not in [driver_api.NETWORK_TYPE,\n driver_api.SEGMENTATION_ID]:\n msg = (_(\"%(key)s prohibited for %(gpe)s network\"),\n {'key': key,\n 'gpe': network_type})\n raise n_exc.InvalidInput(error_message=msg)\n self._verify_gpe_vni(segmentation_id)\n\n def reserve_provider_segment(self, session, segment):\n if self.is_partial_segment(segment):\n alloc = self.allocate_partially_specified_segment(session)\n if not alloc:\n raise n_exc.NoNetworkAvailable()\n else:\n segmentation_id = segment.get(driver_api.SEGMENTATION_ID)\n alloc = self.allocate_fully_specified_segment(\n session, **{self.segmentation_key: segmentation_id})\n if not alloc:\n raise nvpp_exc.GpeVNIInUse(vni_id=segmentation_id)\n\n return {driver_api.NETWORK_TYPE: self.get_type(),\n driver_api.PHYSICAL_NETWORK: None,\n driver_api.SEGMENTATION_ID: getattr(alloc,\n self.segmentation_key),\n driver_api.MTU: self.get_mtu()}\n\n def allocate_tenant_segment(self, session):\n alloc = self.allocate_partially_specified_segment(session)\n if not alloc:\n return\n return {driver_api.NETWORK_TYPE: self.get_type(),\n driver_api.PHYSICAL_NETWORK: None,\n driver_api.SEGMENTATION_ID: getattr(alloc,\n self.segmentation_key),\n driver_api.MTU: self.get_mtu()}\n\n def release_segment(self, context, segment):\n vni_id = segment[driver_api.SEGMENTATION_ID]\n LOG.debug('Releasing GPE segment %s', vni_id)\n valid = any(lo <= vni_id <= hi for lo, hi in self.gpe_ranges)\n\n info = {'type': self.get_type(), 'id': vni_id}\n with context.session.begin(subtransactions=True):\n query = (context.session.query(self.model).\n filter_by(**{self.segmentation_key: vni_id}))\n if valid:\n count = query.update({\"allocated\": False})\n if count:\n LOG.debug(\"Releasing %(type)s VNI %(id)s to pool\",\n info)\n else:\n count = query.delete()\n if count:\n LOG.debug(\"Releasing %(type)s VNI %(id)s outside pool\",\n info)\n if not count:\n LOG.warning(\"%(type)s VNI %(id)s not found\", info)\n\n def get_allocation(self, context, gpe_vni_id):\n return (context.session.query(self.model).\n filter_by(**{self.segmentation_key: gpe_vni_id}).first())\n\n def get_mtu(self, physical_network=None):\n mtu = super(GpeTypeDriver, self).get_mtu()\n return mtu - nvpp_const.GPE_ENCAP_OVERHEAD if mtu else 0\n","sub_path":"networking_vpp/types/type_gpe.py","file_name":"type_gpe.py","file_ext":"py","file_size_in_byte":9070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"270296643","text":"from __future__ import print_function\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.utils import np_utils, generic_utils\nfrom six.moves import range\nimport numpy as np\nimport scipy as sp\nfrom keras import backend as K \nimport random\nimport scipy.io\nfrom scipy.stats import mode\n# custom module for cnn model\nfrom cnn_architecture1 import *\n\nbatch_size = 128\nnb_classes = 10\n#use a large number of epochs\nnb_epoch = 50\n# input image dimensions\nimg_rows, img_cols = 28, 28\ninput_shape = (img_rows, img_cols, 1)\n\nacquisition_iterations = 98\nnum_of_queries = 10\n\n# the data, shuffled and split between tran and test sets\n(X_train_All, y_train_All), (X_test, y_test) = mnist.load_data()\n\nX_train_All = X_train_All.reshape(X_train_All.shape[0], img_rows, img_cols, 1)\nX_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)\n\nrandom_split = np.asarray(random.sample(range(0, X_train_All.shape[0]), X_train_All.shape[0]))\n\nX_train_All = X_train_All[random_split]\ny_train_All = y_train_All[random_split]\nX_Pool = X_train_All[20000:60000]\ny_Pool = y_train_All[20000:60000]\n\nX_train_All = X_train_All[0:10000]\ny_train_All = y_train_All[0:10000]\n\nX_train, y_train = np.array([], dtype=np.int64), np.array([], dtype=np.int64)\n\nfor index in range(10):\n idx = np.array(np.where(y_train_All==index)).T\n idx = idx[0:2, 0]\n X = X_train_All[idx, :, :, :]\n y = y_train_All[idx]\n \n X_train = np.concatenate((X_train, X), axis=0 ) if X_train.size else X\n y_train = np.concatenate((y_train, y), axis=0 ) if y_train.size else y\n\n\nprint('X_train shape:', X_train.shape)\nprint(X_train.shape[0], 'train samples')\n\nprint('Distribution of Training Classes:', np.bincount(y_train))\n\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_Pool = X_Pool.astype('float32')\nX_train /= 255\nX_Pool /= 255\nX_test /= 255\n\nY_train = np_utils.to_categorical(y_train, nb_classes)\nY_test = np_utils.to_categorical(y_test, nb_classes)\nY_Pool = np_utils.to_categorical(y_Pool, nb_classes)\n\nmodel.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=0)\n\nprint('Evaluating Test Accuracy Without Acquisition')\nscore, acc = model.evaluate(X_test, Y_test, verbose=0)\nall_accuracy = acc\n\nprint('Starting Active Learning in Experiment ')\n\n# Deterministic CNN\nfor i in range(acquisition_iterations):\n print('ACQUISITION ITERATION ' + str(i+1) + ' of ' + str(acquisition_iterations))\n\n pool_subset_count = 2000\n pool_subset_random_index = np.asarray(random.sample(range(0, X_Pool.shape[0]), pool_subset_count))\n X_Pool_subset = X_Pool[pool_subset_random_index]\n y_Pool_subset = y_Pool[pool_subset_random_index]\n\n print('Search over Pool of Unlabeled Data')\n \n # Var ratio active learning acquisition function\n D_probs = model.predict_proba(X_Pool_subset) \n acquired_index = np.argsort(np.max(D_probs, axis=1))[:num_of_queries]\n acquired_X = X_Pool_subset[acquired_index]\n acquired_Y = y_Pool_subset[acquired_index]\t\n\n # Remove the acquired data from the unlabeled Pool\n X_Pool = np.delete(X_Pool, (pool_subset_random_index[acquired_index]), axis=0)\n y_Pool = np.delete(y_Pool, (pool_subset_random_index[acquired_index]), axis=0)\n\n print('Acquired Points added to the training set')\n X_train = np.concatenate((X_train, acquired_X), axis=0)\n y_train = np.concatenate((y_train, acquired_Y), axis=0)\n print('Train Data size: ' + str(X_train.shape)) \n print('Unlabeled Pool size: ' + str(X_Pool.shape))\n\n print('Train Again with the added points')\n\n # convert class vectors to binary class matrices\n Y_train = np_utils.to_categorical(y_train, nb_classes)\n\n model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=0)\n \n print('Evaluate Model Test Accuracy after training')\n score, acc = model.evaluate(X_test, Y_test, verbose=0)\n # print('Test score:', score)\n print('Test accuracy:', acc)\n all_accuracy = np.append(all_accuracy, acc)\n print()\n\n\nprint('Storing Accuracy Values over experiments')\nnp.save('test_acc.npy', all_accuracy)\n","sub_path":"deterministic_cnn.py","file_name":"deterministic_cnn.py","file_ext":"py","file_size_in_byte":4086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"616655922","text":"import os\nimport sys\nimport json\nimport jinja2\nimport logging\nimport tempfile\nimport subprocess\nimport urllib.parse\n\nlogger = logging.getLogger(__name__)\n\n# javascript to pass to node and take a screen shot\nJS_TEMPLATE_FILE=os.path.join(os.path.dirname(__file__), 'screen.js')\n\ndef filename_from_url(url):\n u = urllib.parse.urlparse(url)\n host = u.netloc\n if ':' in host:\n host, port = host.split(':')\n else:\n if u.scheme.lower() == 'http':\n port = 80\n else:\n port = 443\n return '{}-{}-{}'.format(u.scheme, host, port).replace('/', '').replace('\\\\', '')\n\ndef build(url, timeout=5000, mobile=False, headers={}, screen_wait_ms=2000):\n js_h, js_tmp = tempfile.mkstemp(prefix='script.', suffix='.js', dir='.')\n os.close(js_h)\n\n img_h, img_tmp = tempfile.mkstemp(prefix=filename_from_url(url)+'.', suffix='.png', dir='.')\n os.close(img_h)\n\n inf_h, inf_tmp = tempfile.mkstemp(prefix='info.', suffix='.json', dir='.')\n os.close(inf_h)\n\n script = jinja2.Template(open(JS_TEMPLATE_FILE, 'rb').read().decode())\n rendered = script.render(\n url=url,\n image=img_tmp.replace('\\\\', '\\\\\\\\'),\n timeout=timeout,\n mobile=str(mobile).lower(),\n screen_wait=screen_wait_ms,\n headers=json.dumps(headers),\n page_info_file=inf_tmp.replace('\\\\', '\\\\\\\\'))\n open(js_tmp, 'wb').write(rendered.encode())\n return js_tmp, img_tmp, inf_tmp\n\ndef run(script_path, node_path='node'):\n cmd = [node_path, script_path]\n try:\n subprocess.check_call(cmd, timeout=60)\n except Exception as e:\n logger.error('Failed to call node: '+str(e))\n return False\n return True\n","sub_path":"js/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"634390549","text":"def make_dict(train_dir):\n emails = [os.path.join(train_dir, f) for f in os.listdir(train_dir)]\n all_words = []\n for mail in emails:\n with open(mail, encoding='latin-1') as m:\n for i, line in enumerate(m):\n if i == 1: # Body of email is only 3rd line of text file\n words = line.split()\n all_words += words\n\n dictionary = Counter(all_words)\n\n for item in list(dictionary):\n if item.isalpha() == False:\n del dictionary[item]\n elif len(item) == 1:\n del dictionary[item]\n dictionary = dictionary.most_common(100)\n return dictionary\n\nimport os\nimport numpy as np\nfrom collections import Counter\n\nham_dict = make_dict('./ham')\nspam_dict = make_dict('./spam')\n\nprint(ham_dict)\nprint(spam_dict)\n","sub_path":"data/spam_feature_extractor.py","file_name":"spam_feature_extractor.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"186188559","text":"def higherVersion2(ver1, ver2):\n list1 = list(ver1)\n list2 = list(ver2)\n sub = ord(\"0\")\n check = 0\n while(len(list1)!=0):\n log = 1\n sum1 = 0\n tmp = []\n while(len(list1)!=0 and list1[0]!=\".\"):\n tmp.append(list1.pop(0))\n n = len(tmp)\n for i in range(0,n):\n sum1 = sum1 + log*(ord(tmp[n-i-1])-sub)\n log = log*10\n log = 1\n sum2 = 0\n tmp = []\n while( len(list2)!=0 and list2[0]!=\".\"):\n tmp.append(list2.pop(0))\n n = len(tmp)\n for i in range(0,n):\n sum2 = sum2 + log*(ord(tmp[n-i-1])-sub)\n log = log*10\n if sum1 > sum2:\n check =1\n break\n elif sum2 > sum1:\n check = -1\n break\n if len(list1)!=0:\n list1.pop(0)\n list2.pop(0)\n return check\n","sub_path":"InterviewPractice/Sorting&Searching/Sorting/higherVersion2.py","file_name":"higherVersion2.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"531674951","text":"import datetime\nimport pandas as pd\nfrom Scrapper import Scrapper\n\n\ndef get_date(days: int):\n date = datetime.date.today() - datetime.timedelta(days=days)\n return date.strftime('%Y-%m-%d')\n\n\ndef merge_news_old(new_df, old_df):\n return pd.concat([new_df, old_df], sort=False).drop_duplicates().reset_index(drop=True)\n\n\ndef get_data(url, datasets_name):\n HEADERS = {\n 'Referer': 'https://stats.nba.com',\n 'Origin': 'https://stats.nba.com',\n 'x-nba-stats-token': 'true',\n 'x-nba-stats-origin': 'stats',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'\n }\n\n scrapper = Scrapper(headers=HEADERS, max_call_errors=5)\n json = scrapper.retrieve_json_api_from_url(url=url)\n\n if json == None:\n return None\n\n dfs = {}\n for elem in json['resultSets']:\n if elem['name'] not in datasets_name:\n continue\n\n df = pd.DataFrame(elem['rowSet'], columns=elem['headers'])\n dfs[elem['name']] = df\n\n return dfs\n\n\ndef get_game_detail(game_id):\n if type(game_id) != type(str()):\n game_id = '00' + str(game_id)\n\n url = 'https://stats.nba.com/stats/boxscoretraditionalv2?EndPeriod=10&EndRange=0&GameID='+str(game_id) \\\n + '&RangeType=0&Season=2019-20&SeasonType=Regular+Season&StartPeriod=1&StartRange=0'\n\n print(url)\n df = get_data(url, datasets_name=['PlayerStats'])\n return df['PlayerStats']\n","sub_path":"scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"474681663","text":"\n\"\"\"Training method\"\"\"\n\nimport argparse\nimport json\nimport os\nimport pathlib\nfrom typing import Union\n\nimport numpy as np\nimport torch\nfrom torch.backends import cudnn\nimport pytorch_lightning as pl\n\nimport dgmvae.models as dvm\n\nfrom experiment import VAEUpdater\n\n\ndef main():\n\n # -------------------------------------------------------------------------\n # 1. Settings\n # -------------------------------------------------------------------------\n\n # Kwargs\n args = init_args()\n\n # Configs\n condig_path = os.getenv(\"CONFIG_PATH\", \"./src/config_ch1.json\")\n with pathlib.Path(condig_path).open() as f:\n config = json.load(f)\n\n # Path\n root = pathlib.Path(os.getenv(\"DATA_ROOT\", \"./data/mnist/\"))\n save_path = pathlib.Path(os.getenv(\"SAVE_PATH\", \"./logs/\"),\n os.getenv(\"EVALUATION_NAME\", \"dev\"))\n model_path = save_path / \"representation\"\n dataset = os.getenv(\"DATASET_NAME\", \"mnist\")\n\n # Cuda setting\n use_cuda = torch.cuda.is_available() and args.cuda != \"null\"\n gpus = args.cuda if use_cuda else None\n\n # Random seed\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n cudnn.deterministic = True\n cudnn.benchmark = False\n\n # -------------------------------------------------------------------------\n # 2. Training\n # -------------------------------------------------------------------------\n\n # VAE model\n model_dict = {\n \"beta\": dvm.BetaVAE,\n \"factor\": dvm.FactorVAE,\n \"dipi\": dvm.DIPVAE,\n \"dipii\": dvm.DIPVAE,\n \"joint\": dvm.JointVAE,\n \"tcvae\": dvm.TCVAE,\n \"aae\": dvm.AAE,\n \"avb\": dvm.AVB,\n }\n model = model_dict[args.model](**config[f\"{args.model}_params\"])\n\n # Updater\n updater = VAEUpdater(model, args, dataset, root, args.batch_size)\n\n # Trainer\n params = {\n \"default_save_path\": save_path,\n \"gpus\": gpus,\n \"early_stop_callback\": None,\n \"max_steps\": args.steps,\n \"log_save_interval\": args.log_save_interval,\n }\n trainer = pl.Trainer(**params)\n\n # Run\n trainer.fit(updater)\n\n # Export model\n model_path.mkdir()\n ch_num = config[f\"{args.model}_params\"][\"channel_num\"]\n export_model(updater.model, str(model_path / \"pytorch_model.pt\"),\n input_shape=(1, ch_num, 64, 64))\n\n\ndef export_model(model: Union[torch.nn.Module, torch.jit.ScriptModule],\n path: Union[str, pathlib.Path],\n input_shape: tuple = (1, 3, 64, 64),\n use_script_module: bool = True\n ) -> Union[str, pathlib.Path]:\n \"\"\"Exports model.\n\n Args:\n model (torch.nn.Module or torch.jit.ScriptModule): Saved model.\n path (str or pathlib.Path): Path to file.\n input_shape (tuple, optional): Tuple of input data shape.\n use_script_module (bool, optional): Boolean flag for using script\n module.\n\n Returns:\n path (str or pathlib.Path): Path to saved file.\n \"\"\"\n\n model = model.cpu().eval()\n if isinstance(model, torch.jit.ScriptModule):\n assert use_script_module, \\\n \"Provided model is a ScriptModule, set use_script_module to True.\"\n if use_script_module:\n if not isinstance(model, torch.jit.ScriptModule):\n assert input_shape is not None\n traced_model = torch.jit.trace(model, torch.zeros(*input_shape))\n else:\n traced_model = model\n torch.jit.save(traced_model, path)\n else:\n torch.save(model, path) # saves model as a nn.Module\n return path\n\n\ndef init_args():\n parser = argparse.ArgumentParser(description=\"VAE training\")\n parser.add_argument(\"--model\", type=str, default=\"beta\")\n parser.add_argument(\"--cuda\", type=str, default=\"0\")\n parser.add_argument(\"--seed\", type=int, default=0)\n parser.add_argument(\"--steps\", type=int, default=100)\n parser.add_argument(\"--batch-size\", type=int, default=64)\n parser.add_argument(\"--log-save-interval\", type=int, default=100)\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"408968786","text":"#!/usr/bin/env python\nimport argparse\nimport sys\nimport numpy as np\nimport h5py\nimport pygame\nimport json\nimport matplotlib.pyplot as plt\nfrom keras.models import model_from_json\n\npygame.init()\nsize = (320*2, 160*2)\npygame.display.set_caption(\"Viewer\")\nscreen = pygame.display.set_mode(size, pygame.DOUBLEBUF)\n\ncamera_surface = pygame.surface.Surface((320,160),0,24).convert()\n\n# ***** get perspective transform for images *****\nfrom skimage import transform as tf\n\nrsrc = \\\n [[43.45456230828867, 118.00743250075844],\n [104.5055617352614, 69.46865203761757],\n [114.86050156739812, 60.83953551083698],\n [129.74572757609468, 50.48459567870026],\n [132.98164627363735, 46.38576532847949],\n [301.0336906326895, 98.16046448916306],\n [238.25686790036065, 62.56535881619311],\n [227.2547443287154, 56.30924933427718],\n [209.13359962247614, 46.817221154818526],\n [203.9561297064078, 43.5813024572758]]\nrdst = \\\n [[10.822125594094452, 1.42189132706374],\n [21.177065426231174, 1.5297552836484982],\n [25.275895776451954, 1.42189132706374],\n [36.062291434927694, 1.6376192402332563],\n [40.376849698318004, 1.42189132706374],\n [11.900765159942026, -2.1376192402332563],\n [22.25570499207874, -2.1376192402332563],\n [26.785991168638553, -2.029755283648498],\n [37.033067044190524, -2.029755283648498],\n [41.67121717733509, -2.029755283648498]]\n\ntform3_img = tf.ProjectiveTransform()\ntform3_img.estimate(np.array(rdst), np.array(rsrc))\n\ndef perspective_tform(x, y):\n p1, p2 = tform3_img((x,y))[0]\n return p2, p1\n\n# ***** functions to draw lines *****\ndef draw_pt(img, x, y, color, sz=1):\n row, col = perspective_tform(x, y)\n if row >= 0 and row < img.shape[0] and \\\n col >= 0 and col < img.shape[1]:\n img[int(row-sz):int(row+sz), int(col-sz):int(col+sz)] = color\n\ndef draw_path(img, path_x, path_y, color):\n for x, y in zip(path_x, path_y):\n draw_pt(img, x, y, color)\n\n# ***** functions to draw predicted path *****\n\ndef calc_curvature(v_ego, angle_steers, angle_offset=0):\n deg_to_rad = np.pi/180.\n slip_fator = 0.0014 # slip factor obtained from real data\n steer_ratio = 15.3 # from http://www.edmunds.com/acura/ilx/2016/road-test-specs/\n wheel_base = 2.67 # from http://www.edmunds.com/acura/ilx/2016/sedan/features-specs/\n\n angle_steers_rad = (angle_steers - angle_offset) * deg_to_rad\n curvature = angle_steers_rad/(steer_ratio * wheel_base * (1. + slip_fator * v_ego**2))\n return curvature\n\ndef calc_lookahead_offset(v_ego, angle_steers, d_lookahead, angle_offset=0):\n #*** this function returns the lateral offset given the steering angle, speed and the lookahead distance\n curvature = calc_curvature(v_ego, angle_steers, angle_offset)\n\n # clip is to avoid arcsin NaNs due to too sharp turns\n y_actual = d_lookahead * np.tan(np.arcsin(np.clip(d_lookahead * curvature, -0.999, 0.999))/2.)\n return y_actual, curvature\n\ndef draw_path_on(img, speed_ms, angle_steers, color=(0,0,255)):\n path_x = np.arange(0., 50.1, 0.5)\n path_y, _ = calc_lookahead_offset(speed_ms, angle_steers, path_x)\n draw_path(img, path_x, path_y, color)\n\n# ***** main loop *****\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Path viewer')\n parser.add_argument('model', type=str, help='Path to model definition json. Model weights should be on the same path.')\n parser.add_argument('--dataset', type=str, default=\"2016-06-02--21-39-29\", help='Dataset/video clip name')\n parser.add_argument('--start', type=int, default=\"300\", help='Video testing start point (in sec)')\n parser.add_argument('--end', type=int, default=\"3000\", help='Video testing end point (in sec)') #need to set a valid end point\n parser.add_argument('--time', type=int, default=\"1\", help='Time Sequence length')\n args = parser.parse_args()\n out_file=list()\n dataset = args.dataset\n skip = args.start\n\n with open(args.model, 'r') as jfile:\n model = model_from_json(json.load(jfile))\n\n model.compile(\"sgd\", \"mse\")\n weights_file = args.model.replace('json', 'keras')\n model.load_weights(weights_file)\n print(model.summary())\n\n # default dataset is the validation data on the highway\n log = h5py.File(\"dataset/log/\"+dataset+\".h5\", \"r\") #log = h5py.File(\"dataset/log/2016-06-02--21-39-29.h5\", \"r\")\n cam = h5py.File(\"dataset/camera/\"+dataset+\".h5\", \"r\")\n print (log.keys())\n\n # skip to highway\n for i in range(skip*100, min(log['times'].shape[0], args.end*100)):\n if i%100 == 0:\n print (\"%.2f seconds elapsed\" % (i/100.0))\n\n '''For CNN model. However the image is used for viewer'''\n img = cam['X'][log['cam1_ptr'][i]].swapaxes(0,2).swapaxes(0,1)\n '''This is for CNN-LSTM Model.'''\n if args.time>1 :\n #img_2 = np.array((cam['X'][log['cam1_ptr'][i]]))\n #speed_ms = np.array([log['speed'][i]])\n #predicted_steers = model.predict([img_2[None, :, :, :], speed_ms[None, :, None]])[0][0][0]\n img_2 = np.array((cam['X'][log['cam1_ptr'][i]], cam['X'][log['cam1_ptr'][i+7]]))\n speed_ms = np.array((log['speed'][i],log['speed'][i+7]))\n predicted_steers = model.predict([img_2[None, :, :, :, :], speed_ms[None, :, None]])[0][0][0]\n\n else :\n predicted_steers = model.predict(img[None, :, :, :].transpose(0, 3, 1, 2))[0][0]\n\n print(predicted_steers)\n angle_steers = log['steering_angle'][i]\n speed_ms = log['speed'][i]\n out_file.append([i, predicted_steers, angle_steers, speed_ms])\n\n draw_path_on(img, speed_ms, -angle_steers/10.0)\n draw_path_on(img, speed_ms, -predicted_steers/10.0, (0, 255, 0))\n # draw on\n pygame.surfarray.blit_array(camera_surface, img.swapaxes(0,1))\n camera_surface_2x = pygame.transform.scale2x(camera_surface)\n screen.blit(camera_surface_2x, (0,0))\n pygame.display.flip()\n\n x=np.array(out_file)\n np.savetxt(\"./outputs/steering_model/out\"+args.dataset+\"---\"+ str(args.start)+ \"-\"+ str(args.end)+\".txt\",x, fmt=\"%4.3f\")\n\n #View both actual vs real\n x=x.transpose()\n plt.plot(x[0],x[1], '-g')\n plt.plot(x[0],x[2], '-b')\n plt.xlabel('Time (in ms)')\n plt.title(\"Plot of Actual and Predicted Steering -- dataset -- \" + args.dataset)\n plt.ylabel('Angle Value (in degrees)')\n plt.savefig(\"./outputs/steering_model/act_vs_predict\"+args.dataset+\"---start-\"+ str(args.start)+ \"-end-\"+ str(args.end)+\".png\",format='eps', dpi=1000)\n plt.show()\n\n #View Error\n plt.plot(x[0], x[1]-x[2], '-r')\n plt.title(\"Plot of Error between Actual-Predicted -- dataset -- \" + args.dataset)\n plt.xlabel('Time (in ms)')\n plt.ylabel('Error (in degrees)')\n plt.savefig(\"./outputs/steering_model/error_plot\"+args.dataset+\"---start-\"+ str(args.start)+ \"-end-\"+ str(args.end)+\".png\",format='eps', dpi=1000)\n plt.show()\n'''\nplt.scatter(dates,values)\nplt.plot(dates, values)\nplt.show()\n'''\n ","sub_path":"view_steering_model.py","file_name":"view_steering_model.py","file_ext":"py","file_size_in_byte":6695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"605947946","text":"\"\"\"The main program \"\"\"\nfrom classes import *\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.clock import Clock\nfrom kivy.graphics import Rectangle\n \n\nclass RootWidget(BoxLayout):\n\n def __init__(self,imageOfPuzzleChoosed,**kwargs):\n super(RootWidget, self).__init__(**kwargs)\n self.orientation=\"vertical\"\n self.labelTimer=Label(text=\"0 : 00\",size_hint=(0.25,0.1))\n self.seconds=0\n self.minutes=0\n Clock.schedule_interval(self.timer,1)\n self.buttonGiveUp=Button(text=\"Abandonner\",size_hint=(0.25,0.1),on_press=self.giveUp)\n self.boxOfPieces=GridLayout(cols=NB_PIECE_ROW_BOARD,size_hint=(1, 0.25))\n #self.boardPuzzle=Rectangle(pos=self.pos,size=self.size)\n self.add_widget(KvImage(source=\"images/foret.jpg\"))\n self.add_widget(self.labelTimer)\n self.add_widget(self.buttonGiveUp)\n #self.add_widget(self.boardPuzzle)\n self.add_widget(self.boxOfPieces)\n self.puzzle=Puzzle(imageOfPuzzleChoosed,WIDTH_PUZZLE,HEIGHT_PUZZLE,WIDTH_PIECE,HEIGHT_PIECE)\n self.click=ListProperty([0, 0])\n\n def timer(self,dt):\n self.seconds+=1\n if self.seconds==60:\n self.minutes+=1\n self.seconds=0\n self.labelTimer.text=f\"{self.minutes} : {self.seconds}\" \n\n def on_touch_down(self,touch):\n if self.collide_point(*touch.pos):\n self.click=touch.pos\n return True\n return super(RootWidget, self).on_touch_down(touch)\n\n def on_click(self,instance,pos):\n print('pressed at {pos}'.format(pos=pos))\n\n def giveUp(self,button):\n print(\"perdu\")\n\n\ndef displayHomeScreen():\n \"\"\"Display the home window of the game with the main menu\n #Button(text=\"Jouer\")\n Button(text=\"Importer 1 puzzle\")\n Button(text=\"Statistiques\")\n Button(text=\"Quitter\")\"\"\"\n pass\n\n\nclass MyApp(App):\n def build(self):\n imageOfPuzzleChoosed=\"images/foret.jpg\"\n root=RootWidget(imageOfPuzzleChoosed)\n displayHomeScreen()\n root.puzzle.makePartition(root.boxOfPieces)\n return root\n\n\nif __name__==\"__main__\":\n\tMyApp().run()","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"150851124","text":"import os\n\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import mean_squared_error\n\n\ndef read_ratings(file_path, sep='::'):\n \"\"\"\n Reads the ratings file into a user x item DataFrame. Ratings are stored in 'database' form.\n Where each line is in the form: \n Unkown values are 0 and ratings are on a 1-5 scale\n :param file_path: The ratings file path\n :param sep: The separator between items\n :return: The user x item ratings DataFrame\n \"\"\"\n ratings_file = os.path.abspath(file_path)\n column_names = ['userId', 'movieId', 'rating', 'timestamp']\n ratings = pd.read_csv(ratings_file, names=column_names, sep=sep, engine='python')\n ratings = ratings.drop('timestamp', axis=1)\n ratings[['userId', 'movieId']] = ratings[['userId', 'movieId']].astype('int32')\n ratings[['rating']] = ratings[['rating']].astype('int8')\n ratings = ratings.pivot('userId', 'movieId', 'rating').fillna(value=0)\n return ratings\n\n\ndef get_ratings_sparsity(ratings):\n \"\"\"\n Calculates the sparsity of the ratings matrix\n :param ratings: The user x item ratings DataFrame\n :type ratings: DataFrame\n :return: The percentage sparsity of the DataFrame\n \"\"\"\n sparsity = float(len(ratings.values.nonzero()[0]))\n sparsity /= (ratings.shape[0] * ratings.shape[1])\n sparsity *= 100\n\n return sparsity\n\n\ndef split_train_test(ratings, test_ratio=0.2):\n \"\"\"\n Split the ratings matrix into test and train matrices.\n :param ratings: The original user x item ratings DataFrame\n :type ratings: DataFrame\n :param test_ratio: The ratio of ratings to take for the test dataset\n :type test_ratio: float\n :return: The train and test ratings dataFrames\n \"\"\"\n test = pd.DataFrame(np.zeros(ratings.shape), index=ratings.index, columns=ratings.columns)\n train = pd.DataFrame(np.zeros(ratings.shape), index=ratings.index, columns=ratings.columns)\n\n for user in xrange(ratings.shape[0]):\n user_ratings_indexes = ratings.iloc[user, :].nonzero()[0]\n train_indexes, test_indexes = train_test_split(user_ratings_indexes, test_size=test_ratio)\n train.iloc[user, train_indexes] = ratings.iloc[user, train_indexes]\n test.iloc[user, test_indexes] = ratings.iloc[user, test_indexes]\n\n return train, test\n\n\ndef get_rmse(predicted, actual):\n \"\"\"\n Calculates the root mean squared error between the predicted and actual ratings DataFrames\n :param predicted: The predicted ratings\n :type predicted: DataFrame\n :param actual: The actual ratings\n :type actual: DataFrame\n :return: root mean squared error\n \"\"\"\n return np.sqrt(mean_squared_error(actual.values[actual.values.nonzero()].flatten(),\n predicted.values[actual.values.nonzero()].flatten()))\n\n\ndef clamp(x, floor=1, ceiling=5):\n \"\"\"\n Clamps a value between the values floor and ceiling\n :param x: The value to be clamped\n :param floor: The minimum value for x\n :param ceiling: The maximum value for x\n :return: The clamped value of x\n \"\"\"\n if x > ceiling:\n x = ceiling\n elif x < floor:\n x = floor\n return x","sub_path":"py/extras.py","file_name":"extras.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"74986074","text":"# Given a sequence of non-negative integers, where each number is written in a separate line. The sequence ends with 0. Determine the length of the widest fragment where all the elements are equal to each other.\n\ndef widest_fragment(nums):\n max_fragment = 1\n fragment = 1\n for i in range(len(nums)-1):\n if nums[i] == nums[i+1]:\n fragment +=1\n if fragment > max_fragment:\n max_fragment = fragment\n else:\n fragment = 1\n return max_fragment\n \n\nprint(widest_fragment([2,2,7,7,7,7,7,7,9,9,9,9,9])) # 6","sub_path":"Warm-Ups/While_Loops/widest_fragment.py","file_name":"widest_fragment.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"431134801","text":"from __future__ import absolute_import\n\nfrom functools import partial\nimport cPickle as pickle\nimport socket\nimport threading\nimport zlib\nimport re\n\nfrom utils.misc import check_type, ignored, consume\n\n\nclass Packet(object):\n \"\"\"This class is responsible for the packing and unpacking of data. A packet consists of a\n simple fixed-length header containing the size (base 10 integer, in bytes) of the packet's\n payload, followed by the actual payload. The payload is nothing but the pickle dump (a string)\n of a given object. Use Packet.pack(obj) to create a packet from an object, or Packet.append()\n to accumulate the pickle of an object and then Packet.unpack() it.\"\"\"\n HEADER_SIZE = 15\n HEADER_FORMAT = \"\"\n HEADER_REGEX = re.compile(\"^\\d{10})C(?P\\d)>\")\n\n __slots__ = (\"header\", \"compressed\", \"payload_size\", \"payload\")\n\n def __init__(self):\n self.header = \"\"\n self.compressed = None\n self.payload_size = None\n self.payload = None\n\n @classmethod\n def from_data(cls, data):\n packet = cls()\n packet.append(data)\n return packet\n\n def clear(self):\n self.header = \"\"\n self.compressed = None\n self.payload_size = None\n self.payload = None\n\n reset = clear\n\n def append(self, data):\n check_type(data, basestring)\n if self.payload_size is None:\n self.header += data\n self.parse_header()\n else:\n self.payload += data\n if self.payload_size is not None and len(self.payload) > self.payload_size:\n raise Exception(\"current payload size exceeds size declared in header\")\n\n def parse_header(self):\n if self.payload_size is None and len(self.header) >= Packet.HEADER_SIZE:\n buff = self.header\n match = type(self).HEADER_REGEX.match(buff)\n self.compressed = bool(int(match.group(\"compressed\")))\n self.payload_size = int(match.group(\"payload_size\"))\n self.header = buff[:Packet.HEADER_SIZE]\n self.payload = buff[Packet.HEADER_SIZE:]\n\n @property\n def remaining(self):\n \"\"\"Returns the number of bytes missing from the packet's payload, or, if the header is\n still incomplete (so we don't know the size of the payload), returns the number of bytes\n remaining in the header.\"\"\"\n if self.payload_size is None:\n return Packet.HEADER_SIZE - len(self.header)\n else:\n return self.payload_size - len(self.payload)\n\n @property\n def is_empty(self):\n return len(self.header) == 0\n\n @property\n def is_complete(self):\n if self.payload_size is None:\n return False\n assert len(self.payload) <= self.payload_size\n return len(self.payload) == self.payload_size\n\n @classmethod\n def pack(cls, data, compress=False):\n payload = to_pickle(data)\n if compress:\n payload = zlib.compress(payload)\n packet = cls()\n packet.header = cls.HEADER_FORMAT.format(payload_size=len(payload), compressed=compress)\n packet.compressed = compress\n packet.payload_size = len(payload)\n packet.payload = payload\n return packet\n\n def unpack(self):\n if not self.is_complete:\n raise Exception(\"cannot unpack incomplete packet\")\n pickled_data = self.payload if not self.compressed else zlib.decompress(self.payload)\n return from_pickle(pickled_data)\n\n\nclass PacketDispatcher(object):\n \"\"\"This class uses a socket to communicate with other PacketDispatchers via TCP/IP (lan, wan,\n localhost, you name it :P). Sent data is wrapped with the Packet class, which adds a fixed-\n -length header to the data.\"\"\"\n TIMEOUT = 1.0 # second\n\n __slots__ = (\"socket\", \"packet\", \"running\", \"on_data_received\")\n\n def __init__(self, socket, on_data_received=None):\n self.socket = socket # the socket used by the packet dispatcher\n self.socket.settimeout(self.TIMEOUT) # make socket asynchronous\n self.packet = Packet() # used for accumulating messages\n self.running = False # running flag\n self.on_data_received = on_data_received # callback run when a packet is complete\n\n def wait_for_data(self, separate_thread=False):\n if separate_thread:\n thread = threading.Thread(target=partial(consume, self._wait_for_data))\n thread.start()\n else:\n return self._wait_for_data()\n\n def _wait_for_data(self):\n if self.running:\n raise Exception(\"already waiting for data\")\n self.running = True\n while self.running:\n while self.running and not self.packet.is_complete:\n with ignored(socket.timeout):\n received = self.socket.recv(self.packet.remaining)\n if len(received) == 0: # connection closed from the other side\n self.shutdown()\n break\n self.packet.append(received)\n if self.packet.is_complete:\n data = self.packet.unpack()\n self.packet.clear()\n if self.on_data_received is not None:\n self.on_data_received(data)\n yield data\n if not self.packet.is_empty:\n raise Exception(\"incomplete packet remains unhandled\")\n\n def stop(self):\n self.running = False\n\n def shutdown(self):\n self.stop()\n self.socket.shutdown(socket.SHUT_RDWR)\n self.socket.close()\n\n def send(self, data, compress=False):\n packet = data if isinstance(data, Packet) else Packet.pack(data, compress=compress)\n self.socket.sendall(packet.header)\n self.socket.sendall(packet.payload)\n\n\nPacket.Dispatcher = PacketDispatcher\n\n\ndef to_pickle(obj):\n \"\"\"Creates a pickle from an object, calling __preserialize__() and __postserialize__() on the\n object if available. Data can be temporarily modified/removed from the object and returned by\n __preserialize__(). Any data returned by __preserialize__() will then be passed on to\n __postserialize__() so that the object's original state may be restored.\"\"\"\n cls = type(obj)\n pre_serialize = getattr(cls, \"__preserialize__\", None)\n temp_data = None\n if callable(pre_serialize):\n temp_data = pre_serialize(obj)\n pickled_obj = pickle.dumps(obj)\n post_serialize = getattr(cls, \"__postserialize__\", None)\n if callable(post_serialize):\n post_serialize(obj, temp_data)\n return pickled_obj\n\n\ndef from_pickle(pickled_obj):\n \"\"\"Opposite of to_pickle() (obviously). Calls __postdeserialize__() on the object if its class\n provides it and if it is a callable.\"\"\"\n obj = pickle.loads(pickled_obj)\n cls = type(obj)\n post_deserialize = getattr(cls, \"__postdeserialize__\", None)\n if callable(post_deserialize):\n post_deserialize(obj)\n return obj\n","sub_path":"utils/packet.py","file_name":"packet.py","file_ext":"py","file_size_in_byte":7082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"360767562","text":"import sys\nfrom collections import deque\n\n# 송아지 찾기(BFS : 상태트리탐색)\n\n# sys.stdin = open(\"input.txt\", 'r')\n\nMAX = 10000\nch = [0] * (MAX + 1) # 방문 여부 체크\ndis = [0] * (MAX + 1) # 거리 기록\n\nn, m = map(int, input().split())\nch[n] = 1\ndis[n] = 0\ndQ = deque()\ndQ.append(n)\n\nwhile dQ:\n now = dQ.popleft()\n if now == m:\n break\n\n for next in (now - 1, now + 1, now + 5):\n if 0 < next <= MAX and ch[next] == 0:\n dQ.append(next)\n ch[next] = 1\n dis[next] = dis[now] + 1\n\n\nprint(dis[m])","sub_path":"section7/7-7.py","file_name":"7-7.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"23989528","text":"from PySide2 import QtCore\nfrom PySide2.QtWebEngineWidgets import QWebEngineView\nfrom PySide2.QtWidgets import (QMainWindow, QWidget, QApplication, QAction,\n QPushButton, QLineEdit, QTextEdit, QVBoxLayout,\n QGridLayout, QSplitter, QLabel, QFileDialog,\n QMessageBox, QComboBox, QScrollArea, QStyle,\n QGroupBox, QCheckBox, QTabWidget)\nfrom nwb_conversion_tools.gui.classes.console_widget import ConsoleWidget\nfrom nwb_conversion_tools.gui.classes.forms_general import GroupNwbfile, GroupSubject\nfrom nwb_conversion_tools.gui.classes.forms_ophys import GroupOphys\nfrom nwb_conversion_tools.gui.classes.forms_ecephys import GroupEcephys\nfrom nwb_conversion_tools.gui.classes.forms_behavior import GroupBehavior\nfrom nwb_conversion_tools.gui.classes.forms_ogen import GroupOgen\nfrom nwb_conversion_tools.gui.utils.name_references import name_to_gui_class\n\nimport numpy as np\nimport nbformat as nbf\nfrom pathlib import Path\nimport tempfile\nimport socket\nimport psutil\nimport shutil\nimport datetime\nimport importlib\nimport yaml\nimport sys\nimport os\n\n\nclass Application(QMainWindow):\n def __init__(self, metafile=None, conversion_module=None, source_paths=None,\n kwargs_fields=None, extension_modules=None, extension_forms=None,\n show_add_del=False, nwbfile_loc=None, conversion_class=None, nwbwidgets=True):\n super().__init__()\n # Dictionary storing source files paths\n self.source_paths = source_paths\n # Path to conversion module .py file\n self.conversion_module_path = conversion_module\n # Dictionary storing custom boolean options (to form checkboxes)\n self.kwargs_fields = kwargs_fields\n # Boolean control to either show/hide the option for add/del Groups\n self.show_add_del = show_add_del\n # Extension modules\n self.extension_modules = extension_modules\n # Updates name_to_gui_class with extension classes\n self.name_to_gui_class = name_to_gui_class\n if extension_forms:\n self.name_to_gui_class.update(extension_forms)\n # Temporary folder path\n self.temp_dir = tempfile.mkdtemp()\n # default nwbfile save location:\n self.nwbfile_loc = nwbfile_loc\n # conversion_class:\n self.conversion_class = conversion_class\n\n self.resize(1200, 900)\n self.setWindowTitle('NWB:N conversion tools')\n\n # Initialize GUI elements\n self.init_gui()\n self.init_meta_tab()\n self.load_meta_file(filename=metafile)\n if nwbwidgets:\n self.init_nwb_explorer()\n self.show()\n\n def init_gui(self):\n \"\"\"Initiates GUI elements.\"\"\"\n mainMenu = self.menuBar()\n\n fileMenu = mainMenu.addMenu('File')\n action_choose_conversion = QAction('Choose conversion module', self)\n fileMenu.addAction(action_choose_conversion)\n action_choose_conversion.triggered.connect(self.load_conversion_module)\n\n helpMenu = mainMenu.addMenu('Help')\n action_about = QAction('About', self)\n helpMenu.addAction(action_about)\n action_about.triggered.connect(self.about)\n\n self.tabs = QTabWidget()\n self.setCentralWidget(self.tabs)\n\n def init_meta_tab(self):\n # Center panels -------------------------------------------------------\n self.groups_list = []\n\n # Left-side panel: forms\n self.btn_load_meta = QPushButton('Load metafile')\n self.btn_load_meta.setIcon(self.style().standardIcon(QStyle.SP_ArrowDown))\n self.btn_load_meta.clicked.connect(lambda: self.load_meta_file(filename=None))\n self.btn_load_meta.setToolTip(\"The YAML file with metadata for this conversion.\\n\"\n \"You can customize the metadata in the forms below.\")\n self.btn_save_meta = QPushButton('Save metafile')\n self.btn_save_meta.setIcon(self.style().standardIcon(QStyle.SP_DriveFDIcon))\n self.btn_save_meta.clicked.connect(self.save_meta_file)\n self.btn_run_conversion = QPushButton('Run conversion')\n self.btn_run_conversion.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))\n self.btn_run_conversion.clicked.connect(self.run_conversion)\n self.btn_form_editor = QPushButton('Form -> Editor')\n self.btn_form_editor.clicked.connect(self.form_to_editor)\n\n self.lbl_nwb_file = QLabel('Output nwb file:')\n self.lbl_nwb_file.setToolTip(\"Path to the NWB file that will be created.\")\n self.lin_nwb_file = QLineEdit('')\n if self.nwbfile_loc is not None:\n self.lin_nwb_file = QLineEdit(self.nwbfile_loc)\n self.btn_nwb_file = QPushButton()\n self.btn_nwb_file.setIcon(self.style().standardIcon(QStyle.SP_DialogOpenButton))\n self.btn_nwb_file.clicked.connect(self.load_nwb_file)\n\n l_grid1 = QGridLayout()\n l_grid1.setColumnStretch(3, 1)\n l_grid1.addWidget(self.btn_load_meta, 0, 0, 1, 1)\n l_grid1.addWidget(self.btn_save_meta, 0, 1, 1, 1)\n l_grid1.addWidget(self.btn_run_conversion, 0, 2, 1, 1)\n l_grid1.addWidget(QLabel(), 0, 3, 1, 1)\n l_grid1.addWidget(self.btn_form_editor, 0, 4, 1, 2)\n l_grid1.addWidget(self.lbl_nwb_file, 1, 0, 1, 1)\n l_grid1.addWidget(self.lin_nwb_file, 1, 1, 1, 3)\n l_grid1.addWidget(self.btn_nwb_file, 1, 4, 1, 1)\n\n # Adds custom files/dir paths fields\n if self.source_paths is None:\n self.lbl_source_file = QLabel('source files:')\n self.lin_source_file = QLineEdit('')\n self.btn_source_file = QPushButton()\n self.btn_source_file.setIcon(self.style().standardIcon(QStyle.SP_DialogOpenButton))\n self.btn_source_file.clicked.connect(self.load_source_files)\n l_grid1.addWidget(self.lbl_source_file, 3, 0, 1, 1)\n l_grid1.addWidget(self.lin_source_file, 3, 1, 1, 3)\n l_grid1.addWidget(self.btn_source_file, 3, 4, 1, 1)\n else:\n self.group_source_paths = QGroupBox('Source paths')\n self.grid_source = QGridLayout()\n self.grid_source.setColumnStretch(3, 1)\n ii = -1\n for k, v in self.source_paths.items():\n ii += 1\n lbl_src = QLabel(k + ':')\n setattr(self, 'lbl_src_' + str(ii), lbl_src)\n lin_src = QLineEdit(v['path'])\n setattr(self, 'lin_src_' + str(ii), lin_src)\n btn_src = QPushButton()\n btn_src.setIcon(self.style().standardIcon(QStyle.SP_DialogOpenButton))\n setattr(self, 'btn_src_' + str(ii), btn_src)\n if v['type'] == 'file':\n btn_src.clicked.connect((lambda x: lambda: self.load_source_files(x[0], x[1]))([ii, k]))\n else:\n btn_src.clicked.connect((lambda x: lambda: self.load_source_dir(x[0], x[1]))([ii, k]))\n self.grid_source.addWidget(lbl_src, ii, 0, 1, 1)\n self.grid_source.addWidget(lin_src, ii, 1, 1, 3)\n self.grid_source.addWidget(btn_src, ii, 4, 1, 1)\n self.group_source_paths.setLayout(self.grid_source)\n l_grid1.addWidget(self.group_source_paths, 3, 0, 1, 6)\n\n # Adds custom kwargs checkboxes\n if self.kwargs_fields:\n self.group_kwargs = QGroupBox('KWARGS')\n self.grid_kwargs = QGridLayout()\n self.grid_kwargs.setColumnStretch(4, 1)\n ii = -1\n for k, v in self.kwargs_fields.items():\n ii += 1\n chk_kwargs = QCheckBox(k)\n chk_kwargs.setChecked(v)\n chk_kwargs.clicked.connect((lambda x: lambda: self.update_kwargs(x[0], x[1]))([ii, k]))\n setattr(self, 'chk_kwargs_' + str(ii), chk_kwargs)\n self.grid_kwargs.addWidget(chk_kwargs, ii // 4, ii % 4, 1, 1)\n self.group_kwargs.setLayout(self.grid_kwargs)\n l_grid1.addWidget(self.group_kwargs, 4, 0, 1, 6)\n\n self.l_vbox1 = QVBoxLayout()\n self.l_vbox1.addStretch()\n scroll_aux = QWidget()\n scroll_aux.setLayout(self.l_vbox1)\n l_scroll = QScrollArea()\n l_scroll.setWidget(scroll_aux)\n l_scroll.setWidgetResizable(True)\n\n self.l_vbox2 = QVBoxLayout()\n self.l_vbox2.addLayout(l_grid1)\n self.l_vbox2.addWidget(l_scroll)\n\n # Right-side panel\n # Metadata text\n editor_label = QLabel('Metafile preview:')\n r_grid1 = QGridLayout()\n r_grid1.setColumnStretch(1, 1)\n r_grid1.addWidget(editor_label, 0, 0, 1, 1)\n r_grid1.addWidget(QLabel(), 0, 1, 1, 1)\n self.editor = QTextEdit()\n r_vbox1 = QVBoxLayout()\n r_vbox1.addLayout(r_grid1)\n r_vbox1.addWidget(self.editor)\n\n # Logger\n log_label = QLabel('Log:')\n r_grid2 = QGridLayout()\n r_grid2.setColumnStretch(1, 1)\n r_grid2.addWidget(log_label, 0, 0, 1, 1)\n r_grid2.addWidget(QLabel(), 0, 1, 1, 1)\n self.logger = QTextEdit()\n self.logger.setReadOnly(True)\n r_vbox2 = QVBoxLayout()\n r_vbox2.addLayout(r_grid2)\n r_vbox2.addWidget(self.logger)\n\n r_vsplitter = QSplitter(QtCore.Qt.Vertical)\n ru_w = QWidget()\n ru_w.setLayout(r_vbox1)\n rb_w = QWidget()\n rb_w.setLayout(r_vbox2)\n r_vsplitter.addWidget(ru_w)\n r_vsplitter.addWidget(rb_w)\n\n # Metadata/conversion tab Layout\n self.left_w = QWidget()\n self.left_w.setLayout(self.l_vbox2)\n self.splitter = QSplitter(QtCore.Qt.Horizontal)\n self.splitter.addWidget(self.left_w)\n self.splitter.addWidget(r_vsplitter)\n\n self.metadata_layout = QVBoxLayout()\n self.metadata_layout.addWidget(self.splitter)\n self.tab_metadata = QWidget()\n self.tab_metadata.setLayout(self.metadata_layout)\n self.tabs.addTab(self.tab_metadata, 'Metadata/Conversion')\n\n # Background color\n p = self.palette()\n p.setColor(self.backgroundRole(), QtCore.Qt.white)\n self.setPalette(p)\n\n def init_nwb_explorer(self):\n \"\"\"Initializes NWB file explorer tab\"\"\"\n # Layout Widgets\n self.btn_load_nwbexp = QPushButton('Load NWB')\n self.btn_load_nwbexp.setIcon(self.style().standardIcon(QStyle.SP_ArrowDown))\n self.btn_load_nwbexp.clicked.connect(self.load_nwb_explorer)\n self.btn_load_nwbexp.setToolTip(\"Choose NWB file to explore!\")\n self.btn_close_nwbexp = QPushButton('Close')\n self.btn_close_nwbexp.setIcon(self.style().standardIcon(QStyle.SP_DialogCloseButton))\n self.btn_close_nwbexp.clicked.connect(self.close_nwb_explorer)\n self.btn_close_nwbexp.setToolTip(\"Close current file view.\")\n self.html = QWebEngineView()\n\n self.grid_widgets = QGridLayout()\n self.grid_widgets.setColumnStretch(2, 1)\n self.grid_widgets.addWidget(self.btn_load_nwbexp, 0, 0, 1, 1)\n self.grid_widgets.addWidget(self.btn_close_nwbexp, 0, 1, 1, 1)\n self.grid_widgets.addWidget(QLabel(), 0, 2, 1, 1)\n self.vbox_widgets = QVBoxLayout()\n self.vbox_widgets.addLayout(self.grid_widgets)\n self.vbox_widgets.addWidget(self.html)\n\n # Layout Console\n console_label = QLabel('Ipython console:')\n self.explorer_console = ConsoleWidget(par=self)\n self.explorer_console.setToolTip(\"nwbfile --> NWB file data\")\n\n self.grid_console = QGridLayout()\n self.grid_console.addWidget(console_label, 0, 0, 1, 1)\n self.grid_console.addWidget(self.explorer_console, 1, 0, 1, 1)\n\n hsplitter = QSplitter(QtCore.Qt.Horizontal)\n left_w = QWidget()\n left_w.setLayout(self.vbox_widgets)\n right_w = QWidget()\n right_w.setLayout(self.grid_console)\n hsplitter.addWidget(left_w)\n hsplitter.addWidget(right_w)\n\n # Add tab to GUI\n self.tabs.addTab(hsplitter, 'NWB widgets')\n\n def write_to_logger(self, txt):\n time = datetime.datetime.now().time().strftime(\"%H:%M:%S\")\n full_txt = \"[\" + time + \"] \" + txt\n self.logger.append(full_txt)\n\n def run_conversion(self):\n \"\"\"Runs conversion function.\"\"\"\n self.write_to_logger('Converting data to NWB... please wait.')\n self.toggle_enable_gui(enable=False)\n self.thread = ConversionFunctionThread(self)\n self.thread.finished.connect(lambda: self.finish_conversion(error=self.thread.error))\n self.thread.start()\n\n def finish_conversion(self, error):\n if error:\n self.write_to_logger('ERROR:')\n self.write_to_logger(str(error))\n else:\n self.write_to_logger('Data successfully converted to NWB.')\n self.toggle_enable_gui(enable=True)\n\n def toggle_enable_gui(self, enable):\n self.editor.setEnabled(enable)\n self.left_w.setEnabled(enable)\n\n def save_meta_file(self):\n \"\"\"Saves metadata to .yml file.\"\"\"\n filename, _ = QFileDialog.getSaveFileName(self, 'Save file', '', \"(*.yml);;(*.yaml)\")\n if filename:\n data = {}\n for grp in self.groups_list:\n info, error = grp.read_fields()\n if error is None:\n data[grp.group_type] = info\n else:\n return\n with open(filename, 'w') as f:\n yaml.dump(data, f, default_flow_style=False)\n\n def read_metadata_from_form(self):\n \"\"\"Loads metadata from form.\"\"\"\n metadata = {}\n for grp in self.groups_list:\n info, error = grp.read_fields()\n if error is None:\n metadata[grp.group_type] = info\n else:\n return\n return metadata\n\n def form_to_editor(self):\n \"\"\"Loads data from form to editor.\"\"\"\n metadata = self.read_metadata_from_form()\n txt = yaml.dump(metadata, default_flow_style=False)\n self.editor.setText(txt)\n\n def update_kwargs(self, ind, key):\n \"\"\"Updates the boolean values for keyword arguments.\"\"\"\n chk_kw = getattr(self, 'chk_kwargs_' + str(ind))\n self.kwargs_fields[key] = chk_kw.isChecked()\n\n def load_source_files(self, ind, key):\n \"\"\"Browser to source file location.\"\"\"\n filenames, ftype = QFileDialog.getOpenFileNames(\n parent=self,\n caption='Open file',\n directory='',\n filter=\"(*)\"\n )\n if len(filenames):\n all_names = ''\n for fname in filenames:\n all_names += fname + ', '\n lin_src = getattr(self, 'lin_src_' + str(ind))\n lin_src.setText(all_names[:-2])\n self.source_paths[key]['path'] = all_names[:-2]\n\n def load_source_dir(self, ind, key):\n \"\"\"Browser to source directory location.\"\"\"\n dirname = QFileDialog.getExistingDirectory(\n parent=self,\n caption='Source directory',\n directory=''\n )\n if len(dirname):\n lin_src = getattr(self, 'lin_src_' + str(ind))\n lin_src.setText(dirname)\n self.source_paths[key]['path'] = dirname\n\n def load_meta_file(self, filename=None):\n \"\"\"\n Opens (or browsers to) a .yml file containing metadata for NWB. Then:\n 1. loads the internal variable self.metadata with the content\n 2. writes content to editor\n 3. updates forms\n \"\"\"\n if filename is None:\n filename, ftype = QFileDialog.getOpenFileName(\n parent=self,\n caption='Open file',\n directory='',\n filter=\"(*.yml);;(*.yaml)\"\n )\n if ftype != '(*.yml)' or ftype != '(*.yaml)':\n return\n with open(filename) as f:\n self.metadata = yaml.safe_load(f)\n txt = yaml.dump(self.metadata, default_flow_style=False)\n self.editor.setText(txt)\n self.update_forms()\n\n def load_conversion_module(self):\n \"\"\"Browser to conversion script file location.\"\"\"\n filename, ftype = QFileDialog.getOpenFileName(\n parent=self,\n caption='Open file',\n directory='',\n filter=\"(*py)\"\n )\n if filename != '':\n self.conversion_module_path = filename\n\n def load_nwb_file(self):\n \"\"\"Browser to nwb file location.\"\"\"\n filename, ftype = QFileDialog.getSaveFileName(\n parent=self,\n caption='Save file',\n directory='',\n filter=\"(*nwb)\"\n )\n if filename is not None:\n self.lin_nwb_file.setText(filename)\n\n def load_nwb_explorer(self):\n \"\"\"Browser to nwb file location.\"\"\"\n filename, ftype = QFileDialog.getOpenFileName(\n parent=self,\n caption='Load file',\n directory='',\n filter=\"(*nwb)\"\n )\n if filename != '':\n # Opens file on Ipython console\n self.run_console(fname=filename)\n # Opens file on NWBWidgets\n self.run_voila(fname=filename)\n\n def close_nwb_explorer(self):\n \"\"\"Close current NWB file view on explorer\"\"\"\n if hasattr(self, 'voilathread'):\n # Stop Voila thread\n self.voilathread.stop()\n # Closes nwb file on console\n self.explorer_console._execute('io.close()', True)\n self.explorer_console.clear()\n\n def run_console(self, fname):\n \"\"\"Loads NWB file on Ipython console\"\"\"\n # Imports extension modules\n imports_text = \"\"\n if self.extension_modules:\n for k, v in self.extension_modules.items():\n imports_text += \"\\nfrom \" + k + \" import \" + \", \".join(v)\n code = \"\"\"\n import pynwb\n import os\n \"\"\" + imports_text + \"\"\"\n fpath = os.path.join(r'\"\"\" + str(fname) + \"\"\"')\n io = pynwb.NWBHDF5IO(fpath, 'r', load_namespaces=True)\n nwbfile = io.read()\n \"\"\"\n self.explorer_console._execute(code, True)\n self.explorer_console.clear()\n self.explorer_console.print_text('nwbfile --> Loaded NWB file\\n')\n\n def run_voila(self, fname):\n \"\"\"Set up notebook and run it with a dedicated Voila thread.\"\"\"\n # Stop any current Voila thread\n self.close_nwb_explorer()\n # Write Figure + ipywidgets to a .ipynb file\n nb = nbf.v4.new_notebook()\n # Imports extension modules\n imports_text = \"\"\n for k, v in self.extension_modules.items():\n imports_text += \"\\nfrom \" + k + \" import \" + \", \".join(v)\n code = \"\"\"\n from nwbwidgets import nwb2widget\n import pynwb\n import os\n \"\"\" + imports_text + \"\"\"\n fpath = os.path.join(r'\"\"\" + str(fname) + \"\"\"')\n io = pynwb.NWBHDF5IO(fpath, 'r', load_namespaces=True)\n nwb = io.read()\n nwb2widget(nwb)\n \"\"\"\n nb['cells'] = [nbf.v4.new_code_cell(code)]\n nbpath = os.path.join(self.temp_dir, Path(fname).stem + '.ipynb')\n nbf.write(nb, nbpath)\n # Run instance of Voila with the just saved .ipynb file\n port = get_free_port()\n self.voilathread = voilaThread(parent=self, port=port, nbpath=nbpath)\n self.voilathread.start()\n # Load Voila instance on GUI\n self.update_html(url='http://localhost:' + str(port))\n # self.parent.write_to_logger(txt=self.name + \" ready!\")\n\n def update_html(self, url):\n \"\"\"Loads temporary HTML file and render it.\"\"\"\n self.html.load(QtCore.QUrl(url))\n self.html.show()\n\n def clean_groups(self):\n \"\"\"Removes all groups widgets.\"\"\"\n for grp in self.groups_list:\n nWidgetsVbox = self.l_vbox1.count()\n for i in range(nWidgetsVbox):\n if self.l_vbox1.itemAt(i) is not None:\n if grp == self.l_vbox1.itemAt(i).widget():\n self.l_vbox1.itemAt(i).widget().setParent(None) # deletes widget\n self.groups_list = [] # deletes all list items\n\n def update_forms(self):\n \"\"\"Updates forms fields with values in metadata.\"\"\"\n self.clean_groups()\n for grp in self.metadata:\n if grp == 'NWBFile':\n item = GroupNwbfile(parent=self, metadata=self.metadata['NWBFile'])\n item.write_fields(data=self.metadata['NWBFile'])\n self.groups_list.append(item)\n self.l_vbox1.addWidget(item)\n if grp == 'Subject':\n item = GroupSubject(parent=self)\n item.write_fields(data=self.metadata['Subject'])\n self.groups_list.append(item)\n self.l_vbox1.addWidget(item)\n if grp == 'Ophys':\n item = GroupOphys(self)\n for subgroup in self.metadata[grp]:\n # if many items of same class, in list\n if isinstance(self.metadata[grp][subgroup], list):\n for subsub in self.metadata[grp][subgroup]:\n item.add_group(\n group=self.name_to_gui_class[subgroup](parent=item),\n metadata=subsub\n )\n else: # if it's just one item of this class\n item.add_group(\n group=self.name_to_gui_class[subgroup](parent=item),\n metadata=self.metadata[grp][subgroup]\n )\n self.groups_list.append(item)\n self.l_vbox1.addWidget(item)\n if grp == 'Ecephys':\n item = GroupEcephys(self)\n for subgroup in self.metadata[grp]:\n # if many items of same class, in list\n if isinstance(self.metadata[grp][subgroup], list):\n for subsub in self.metadata[grp][subgroup]:\n item.add_group(\n group=self.name_to_gui_class[subgroup](parent=item),\n metadata=subsub\n )\n else: # if it's just one item of this class\n item.add_group(\n group=self.name_to_gui_class[subgroup](parent=item),\n metadata=self.metadata[grp][subgroup]\n )\n self.groups_list.append(item)\n self.l_vbox1.addWidget(item)\n if grp == 'Behavior':\n item = GroupBehavior(self)\n for subgroup in self.metadata[grp]:\n # if many items of same class, in list\n if isinstance(self.metadata[grp][subgroup], list):\n for subsub in self.metadata[grp][subgroup]:\n item.add_group(\n group=self.name_to_gui_class[subgroup](parent=item),\n metadata=subsub\n )\n else: # if it's just one item of this class\n item.add_group(\n group=self.name_to_gui_class[subgroup](parent=item),\n metadata=self.metadata[grp][subgroup]\n )\n self.groups_list.append(item)\n self.l_vbox1.addWidget(item)\n if grp == 'Ogen':\n item = GroupOgen(self)\n for subgroup in self.metadata[grp]:\n # if many items of same class, in list\n if isinstance(self.metadata[grp][subgroup], list):\n for subsub in self.metadata[grp][subgroup]:\n item.add_group(\n group=self.name_to_gui_class[subgroup](parent=item),\n metadata=subsub\n )\n else: # if it's just one item of this class\n item.add_group(\n group=self.name_to_gui_class[subgroup](parent=item),\n metadata=self.metadata[grp][subgroup]\n )\n self.groups_list.append(item)\n self.l_vbox1.addWidget(item)\n nItems = self.l_vbox1.count()\n self.l_vbox1.addStretch(nItems)\n\n def about(self):\n \"\"\"About dialog.\"\"\"\n msg = QMessageBox()\n msg.setWindowTitle(\"About NWB conversion\")\n msg.setIcon(QMessageBox.Information)\n msg.setText(\"Version: 0.2.0 \\n\"\n \"Shared tools for converting data from various formats to NWB:N 2.0.\\n \")\n msg.setInformativeText(\"NWB conversion tools Github page\")\n msg.setStandardButtons(QMessageBox.Ok)\n msg.exec_()\n\n def closeEvent(self, event):\n \"\"\"Before exiting, executes these actions.\"\"\"\n # Stop any current Voila thread\n self.close_nwb_explorer()\n # Remove any remaining temporary directory/files\n shutil.rmtree(self.temp_dir, ignore_errors=False, onerror=None)\n event.accept()\n\n\ndef get_free_port():\n not_free = True\n while not_free:\n port = np.random.randint(7000, 7999)\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n res = sock.connect_ex(('localhost', port))\n if res != 0:\n not_free = False\n return port\n\n\ndef is_listening_to_port(process, port):\n is_listening = False\n # iterate over processe's children\n for child in process.children(recursive=True):\n # iterate over child connections\n for con in child.connections():\n if con.status == 'LISTEN':\n if isinstance(con.laddr.port, int):\n is_listening = con.laddr.port == port\n elif isinstance(con.laddr.port, list):\n is_listening = port in con.laddr.port\n return is_listening\n return is_listening\n\n\nclass voilaThread(QtCore.QThread):\n def __init__(self, parent, port, nbpath):\n super().__init__()\n self.parent = parent\n self.port = port\n self.nbpath = nbpath\n\n def run(self):\n os.system(\"voila \" + self.nbpath + \" --no-browser --port \" + str(self.port))\n\n def stop(self):\n pid = os.getpid()\n process = psutil.Process(pid)\n proc_list = []\n for child in process.children(recursive=True):\n is_listening = is_listening_to_port(child, self.port)\n if is_listening:\n proc_list.append(child)\n for proc in proc_list:\n for child in process.children(recursive=True):\n child.kill()\n\n\n# Runs conversion function, useful to wait for thread\nclass ConversionFunctionThread(QtCore.QThread):\n def __init__(self, parent):\n super().__init__()\n self.parent = parent\n self.error = None\n\n def run(self):\n if not self.parent.lin_nwb_file.text():\n error = ValueError('select a save location for nwbfile')\n self.error = error.__class__.__name__ + ':' + str(error)\n raise error\n if self.parent.conversion_module_path:# if not an empty string (if value was selected from gui)\n try:\n mod_file = self.parent.conversion_module_path\n spec = importlib.util.spec_from_file_location(os.path.basename(mod_file).strip('.py'), mod_file)\n conv_module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(conv_module)\n metadata = self.parent.read_metadata_from_form()\n conv_module.conversion_function(source_paths=self.parent.source_paths,\n f_nwb=self.parent.lin_nwb_file.text(),\n metadata=metadata,\n **self.parent.kwargs_fields)\n except Exception as error:\n self.error = error.__class__.__name__ + ':' + str(error)\n else:\n try:\n metadata = self.parent.read_metadata_from_form()\n fileloc = list(self.parent.source_paths.values())[0]['path']\n conversion_obj = self.parent.conversion_class(fileloc, None, metadata)\n conversion_obj.run_conversion()\n conversion_obj.save(self.parent.lin_nwb_file.text())\n except Exception as error:\n self.error = error.__class__.__name__ + ':' + str(error)\n\n\nclass CustomComboBox(QComboBox):\n def __init__(self):\n \"\"\"Class created to ignore mouse wheel events on combobox.\"\"\"\n super().__init__()\n\n def wheelEvent(self, event):\n event.ignore()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv) # instantiate a QtGui (holder for the app)\n ex = Application()\n sys.exit(app.exec_())\n\n\n# If it is imported as a module\ndef nwb_conversion_gui(metafile=None, conversion_module=None, source_paths=None,\n kwargs_fields=None, extension_modules=None, extension_forms=None,\n show_add_del=False, nwbfile_loc=None, conversion_class=None,load_nwbwidgets=True):\n \"\"\"Sets up QT application.\"\"\"\n if conversion_module:\n raise DeprecationWarning('use of conversion_module will be replaced by conversion_class'\n 'with its \\'run_conversion\\' method called directly')\n app = QtCore.QCoreApplication.instance()\n if conversion_class is None and conversion_module is None:\n raise Exception('provide one of conversion_module:str or conversion_class:class')\n elif conversion_class is not None and conversion_module is not None:\n raise Exception('provide either of conversion_module:str or conversion_class:class')\n if app is None:\n app = QApplication(sys.argv) # instantiate a QtGui (holder for the app)\n Application(\n metafile=metafile,\n conversion_module=conversion_module,\n source_paths=source_paths,\n kwargs_fields=kwargs_fields,\n extension_modules=extension_modules,\n extension_forms=extension_forms,\n show_add_del=show_add_del,\n nwbfile_loc=nwbfile_loc,\n conversion_class=conversion_class,\n nwbwidgets=load_nwbwidgets\n )\n sys.exit(app.exec_())\n","sub_path":"nwb_conversion_tools/gui/nwb_conversion_gui.py","file_name":"nwb_conversion_gui.py","file_ext":"py","file_size_in_byte":30714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"397096966","text":"from config import TEST_DIR\nfrom parsing import parse\nfrom graph import Graph\nfrom clustering import BoruvkaMST, ClusterNumberException\nfrom predictor import Predictor\nfrom evaluator import Evaluator\n\nfrom datetime import datetime\n\nimport csv\nimport os\n\n\nclass ProblemInformation:\n def __init__(self, **kwargs):\n total_start = datetime.now()\n test = kwargs['filename'] or input('\\nEnter test name: ')\n test_rating = parse(os.path.join(TEST_DIR, test + '.test'))\n base_rating = parse(os.path.join(TEST_DIR, test + '.base'))\n graph = Graph(base_rating=base_rating)\n while True:\n try:\n cluster_number = kwargs['cluster_num'] or int(input('\\nEnter number of clusters: '))\n\n component = BoruvkaMST(graph, cluster_number)\n break\n except (ClusterNumberException, ValueError):\n print('Number of clusters must be > 0')\n continue\n\n predictor = Predictor(component, base_rating)\n\n from copy import deepcopy\n\n predicted = deepcopy(test_rating)\n unpredictables = 0\n\n start = datetime.now()\n with open(os.path.join(TEST_DIR, test + '.res'), newline='', mode='w') as csv_file:\n data_writer = csv.writer(csv_file, delimiter=' ', quotechar='|')\n for user in test_rating.keys():\n for movie in test_rating[user].keys():\n predicted[user][movie] = None\n predicted[user][movie] = predictor.predict(user, movie)\n if not predicted[user][movie]:\n unpredictables += 1\n data_writer.writerow([user, movie, predicted[user][movie]])\n self.predicting_time = datetime.now() - start\n print('Time for predicting:', self.predicting_time, end='\\n\\n')\n\n self.total_time = datetime.now() - total_start\n print('Total time:', self.total_time)\n\n print('Number of unpredictable users:', unpredictables)\n\n self.evaluator = Evaluator(test_rating, predicted)\n\nif __name__ == '__main__':\n import sys\n\n print('Recommendation algorithm based on modified MST clustering with Boruvka\\'s algorithm')\n print('Created by Andriy Trotsyuk, IS-33\\n')\n\n while True:\n try:\n if len(sys.argv) > 1:\n ProblemInformation(filename=sys.argv[1], cluster_num=int(sys.argv[2]))\n break\n else:\n ProblemInformation(filename=None, cluster_num=None)\n except FileNotFoundError:\n print('Test is not found')\n continue\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"412270852","text":"#!/usr/bin/python\n# coding=utf-8\nimport pymysql as MySQLdb\n\n\nclass SinaDB():\n dbConn = ''\n cursor = ''\n def __init__(self):\n self.dbConn = MySQLdb.connect(host='localhost', user='root', passwd=\"\", db='sina_spider', charset=\"utf8\")\n self.cursor = self.dbConn.cursor()\n self.cursor.execute(\"SET NAMES utf8\")\n self.cursor.execute(\"SET CHARACTER_SET_CLIENT=utf8\")\n self.cursor.execute(\"SET CHARACTER_SET_RESULTS=utf8\")\n\n def getUserDetail(self, uid):\n query = \"SELECT uname, sex, fans, uid, icon FROM user_detail WHERE uid = %s\" % uid\n self.cursor.execute(query)\n data = self.cursor.fetchall()\n return data\n\n def getUserInfo(self, condition):\n if type(condition) != str:\n condition = str(condition)\n condition = \"%\" + condition + \"%\"\n query = 'SELECT uname, sex, fans, uid, icon FROM user_detail WHERE uname LIKE \"%s\" OR fans LIKE \"%s\" OR uid LIKE \"%s\" OR icon LIKE \"%s\"' % (condition, condition, condition, condition)\n self.cursor.execute(query)\n result = self.cursor.fetchall()\n if result != ():\n return result\n else:\n return None\n\n def getAllGirlInfo(self, page, limit):\n page = page - 1\n page = page * limit\n query = 'SELECT uname, sex, fans, uid, icon FROM user_detail WHERE sex = \\'女\\' LIMIT %d, %d' % (page, limit)\n self.cursor.execute(query)\n result = self.cursor.fetchall()\n rows = self.cursor.execute('SELECT uname, sex, fans, uid, icon FROM user_detail WHERE sex = \\'女\\'')\n if rows % limit > 0:\n rows = rows / limit + 1\n else:\n rows = rows / limit\n if result != ():\n return (result, rows)\n else:\n return None\n\n def getAllBoyInfo(self, page, limit):\n page = page - 1\n page = page * limit\n query = 'SELECT uname, sex, fans, uid, icon FROM user_detail WHERE sex = \\'男\\' LIMIT %d, %d' % (page, limit)\n self.cursor.execute(query)\n result = self.cursor.fetchall()\n rows = self.cursor.execute('SELECT uname, sex, fans, uid, icon FROM user_detail WHERE sex = \\'男\\'')\n if rows % limit > 0:\n rows = rows / limit + 1\n else:\n rows = rows / limit\n if result != ():\n return (result, rows)\n else:\n return None\n\n def getUserImageLinks(self, uid, page, limit):\n page = page - 1\n page = page * limit\n query = 'SELECT img_src FROM user_img WHERE uid = %s LIMIT %d, %d' % (uid, page, limit)\n self.cursor.execute(query)\n result = self.cursor.fetchall()\n query = 'SELECT img_src FROM user_img WHERE uid = %s' % uid\n rows = self.cursor.execute(query)\n if rows % limit > 0:\n rows = rows / limit + 1\n else:\n rows = rows / limit\n if result != ():\n return (result, rows)\n else:\n return None\n\n def getColumnsNames(self):\n columns = [index[0] for index in self.cursor.description]\n return columns\n\n def isHasImg(self, uid):\n query = 'SELECT * FROM user_img WHERE uid = %s' % uid\n self.cursor.execute(query)\n result = self.cursor.fetchall()\n if result != ():\n return 1\n else:\n return 0\n\n def getAllUserUid(self):\n query = 'SELECT uid FROM user_detail WHERE uid != ALL (SELECT uid FROM user_img GROUP BY uid)'\n self.cursor.execute(query)\n result = self.cursor.fetchall()\n if result != ():\n return result\n else:\n return None\n\n @property\n def getDataAnalysis(self):\n query = \"SELECT count(uid) FROM user_detail\"\n self.cursor.execute(query)\n all_users_count = self.cursor.fetchall()[0][0]\n\n query = \"SELECT count(id) FROM user_img\"\n self.cursor.execute(query)\n all_imgs = self.cursor.fetchone()[0]\n\n query = \"SELECT count(uid) FROM user_detail WHERE sex = '男'\"\n self.cursor.execute(query)\n male_users_count = self.cursor.fetchall()[0][0]\n\n query = \"SELECT count(uid) FROM user_detail WHERE sex = '女'\"\n self.cursor.execute(query)\n female_users_count = self.cursor.fetchall()[0][0]\n\n query = \"SELECT uname, sex, fans, uid, icon FROM user_detail ORDER BY CAST(fans AS UNSIGNED) DESC\"\n self.cursor.execute(query)\n greater_fans_user_info = self.cursor.fetchone()\n\n query = \"SELECT uname, sex, fans, uid, icon FROM user_detail ORDER BY CAST(fans AS UNSIGNED)\"\n self.cursor.execute(query)\n smaller_fans_user_info = self.cursor.fetchone()\n\n query = \"SELECT count(uid) AS a, uid FROM user_img GROUP BY uid ORDER BY a DESC\"\n self.cursor.execute(query)\n greater_send_img_user = self.cursor.fetchone()\n\n query = \"SELECT uname, sex, fans, uid, icon FROM user_detail WHERE uid = %s\" % greater_send_img_user[1]\n self.cursor.execute(query)\n greater_send_img_user_info = self.cursor.fetchone()\n\n query = \"SELECT count(uid) AS a, uid FROM user_img GROUP BY uid ORDER BY a\"\n self.cursor.execute(query)\n smaller_send_img_user = self.cursor.fetchone()\n\n query = \"SELECT uname, sex, fans, uid, icon FROM user_detail WHERE uid = %s\" % smaller_send_img_user[1]\n self.cursor.execute(query)\n smaller_send_img_user_info = self.cursor.fetchone()\n\n return {\n 'all':all_users_count,\n 'all_imgs':all_imgs,\n 'male':male_users_count,\n 'female':female_users_count,\n 'greater_fans_user_info':greater_fans_user_info,\n 'smaller_fans_user_info':smaller_fans_user_info,\n 'greater_send_img':greater_send_img_user[0],\n 'smaller_send_img':smaller_send_img_user[0],\n 'greater_send_img_user_info':greater_send_img_user_info,\n 'smaller_send_img_user_info':smaller_send_img_user_info,\n }\n","sub_path":"libs/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":6022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"549016992","text":"import pypsdier\r\nimport os\r\n\"\"\"\r\nDescription: This is the simulation and experimental data for the \r\nHydrolysis Reaction of Penicilin G.\r\n\"\"\"\r\n\r\ndef PenG_HydrolysisReaction(Cs, E0, k, K, Ks, K1, K2):\r\n \"\"\"\r\n ReaccionPenG(Cs, E0, inputs)\r\n Cs = PenG [mM], AFA [mM], 6-APA [mM]\r\n E0 [mM]\r\n inputs = k [1/s], K [mM], Ks [mM], K1 [mM], K2 [mM]\r\n \"\"\"\r\n PenG, AFA, APA6 = Cs\r\n mcd = K + PenG + PenG*PenG/Ks + K*AFA/K1 + K*APA6/K2 + PenG*APA6/K2 + K*AFA*APA6/(K1*K2)\r\n v_S = (k*E0*PenG) / mcd\r\n v = (-v_S, v_S, v_S )\r\n return v\r\n\r\ninputs = {}\r\ninputs[\"SeedFile\"] = \"PGA400R72.rde\" # filename where the simulation will be stored\r\ninputs[\"SimulationTime\"] = 120.*60 # [s], total time to be simulated \r\ninputs[\"SavingTimeStep\"] = 60. # [s], saves only one data per second\r\ninputs[\"CatalystVolume\"] = 0.0350 # [mL], total volume of all catalyst particles in reactor\r\ninputs[\"BulkVolume\"] = 40.3 # [mL], bulk volume of the liquid phase\r\ninputs[\"Names\"] = ('PenG', 'AFA', '6-APA') # legend for the xls, reports and plots\r\ninputs[\"InitialConcentrations\"] = (10.0, 0., 0.) # [mM], initial concentration of substrates and products\r\ninputs[\"EffectiveDiffusionCoefficients\"] = (5.30E-10, 7.33E-10, 5.89E-10) # [m2/s], effective diffusion coefficient for substrates and products\r\ninputs[\"CatalystParticleRadius\"] = (75.7E-6,) # [m], list of possible catalyst particle radiuses\r\ninputs[\"CatalystParticleRadiusFrequency\"] = (1.0,) # [], list of corresponding frequencies of catalyst particle radiuses\r\ninputs[\"ReactionFunction\"] = PenG_HydrolysisReaction # function defining the reaction \r\ninputs[\"ReactionParameters\"] = 41., 0.13, 821., 1.82, 48. #[1/s] and [mM] parameters to be used in the reaction function \r\ninputs[\"CatalystEnzymeConcentration\"] = 0.106 # [mM] can be a float, int or a function returning float or int.\r\n\r\nplot_options = {}\r\nplot_options[\"t_exp\"] = [0, 2, 4, 6, 8, 10, 20, 30, 45, 60, 80, 100, 120] # Time in mins\r\nplot_options[\"PenG_exp\"] = [10.00, 9.46, 9.01, 8.56, 8.17, 7.79, 5.63, 3.90, 2.06, 0.95, 0.35, 0.12, 0] # Concentration\r\n\r\n# Define filename for storing the simulation and plots\r\nfilename = \"PGA400R72.rde\"\r\n\r\n# Simulate only if file not found\r\nSI = pypsdier.SimulationInterface()\r\nif os.path.exists(filename):\r\n SI.load(filename)\r\nelse:\r\n SI.new(inputs, plot_options)\r\n SI.simulate(\"ode\")\r\n SI.simulate(\"pde\")\r\n SI.save(filename)\r\nSI.status()\r\nSI.plot(\"plot\", display=True)\r\nSI.export_xls(filename.replace(\".rde\", \".xls\"))","sub_path":"experiments/PGA400R72.py","file_name":"PGA400R72.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"373701696","text":"#!/usr/bin/python2\n\n#Only work in ArchLinux\n\nimport os\nimport smtplib\nimport mimetypes\nimport sys\nfrom email.MIMEMultipart import MIMEMultipart\nfrom email.MIMEBase import MIMEBase\nfrom email.MIMEText import MIMEText\nfrom email.MIMEAudio import MIMEAudio\nfrom email.MIMEImage import MIMEImage\nfrom email.Encoders import encode_base64\n\ndef sendMail(subject, text, *attachmentFilePaths):\n gmailUser = 'yangling1984@gmail.com'\n gmailPassword = 'enurcamuahyparfb'\n recipient = 'yangling1984@free.kindle.cn'\n\n msg = MIMEMultipart()\n msg['From'] = gmailUser\n msg['To'] = recipient\n msg['Subject'] = subject\n msg.attach(MIMEText(text))\n\n for attachmentFilePath in attachmentFilePaths:\n msg.attach(getAttachment(attachmentFilePath))\n\n mailServer = smtplib.SMTP('smtp.gmail.com', 587)\n mailServer.ehlo()\n mailServer.starttls()\n mailServer.ehlo()\n mailServer.login(gmailUser, gmailPassword)\n mailServer.sendmail(gmailUser, recipient, msg.as_string())\n mailServer.close()\n\n print('Sent email to %s' % recipient)\n\ndef getAttachment(attachmentFilePath):\n contentType, encoding = mimetypes.guess_type(attachmentFilePath)\n\n if contentType is None or encoding is not None:\n contentType = 'application/octet-stream'\n\n mainType, subType = contentType.split('/', 1)\n file = open(attachmentFilePath, 'rb')\n\n if mainType == 'text':\n attachment = MIMEText(file.read())\n elif mainType == 'message':\n attachment = email.message_from_file(file)\n elif mainType == 'image':\n attachment = MIMEImage(file.read(),_subType=subType)\n elif mainType == 'audio':\n attachment = MIMEAudio(file.read(),_subType=subType)\n else:\n attachment = MIMEBase(mainType, subType)\n attachment.set_payload(file.read())\n encode_base64(attachment)\n\n file.close()\n\n attachment.add_header('Content-Disposition', 'attachment', filename=os.path.basename(attachmentFilePath))\n return attachment\n\ndef clearKindleFolder(*kindleFolderFiles):\n if len(kindleFolderFiles) == 0:\n print('No file to clear')\n return\n for onefile in kindleFolderFiles:\n os.remove(onefile)\n print('%s is deleted' % onefile)\n\nkindleFileFolder = '/home/yangling/Documents/Kindle/'\nchecksum_files = []\nfor theFile in os.listdir(kindleFileFolder):\n print(theFile)\n fullpath = os.path.join(kindleFileFolder, theFile)\n print(fullpath)\n if os.path.isfile(fullpath):\n checksum_files += [fullpath]\nprint(','.join(checksum_files))\nif len(sys.argv) == 1:\n # No argument, load file from kindlefilefolder\n if len(checksum_files) == 0:\n print('There is no file in %s' % kindleFileFolder)\n else:\n sendMail('convert','convert',*checksum_files)\n clearKindleFolder(*checksum_files)\nelif len(sys.argv) == 2:\n # The argument is either \"-n\" or file path\n if str(sys.argv[1]) == '-n':\n #load file from kindlefilefolder, but not convert\n if len(checksum_files) == 0:\n print('There is no file in %s' % kindleFileFolder)\n else:\n sendMail(' ',' ',*checksum_files)\n clearKindleFolder(*checksum_files)\n else:\n # the argument is file path\n sendMail('convert','convert',str(sys.argv[1]))\nelif len(sys.argv) == 3:\n # the argument is file path and \"-n\"\n if str(sys.argv[1]) == '-n':\n sendMail(' ',' ',str(sys.argv[2]))\n elif str(sys.argv[2]) == '-n':\n sendMail(' ',' ',str(sys.argv[1]))\n else:\n print('Invalid argument.')\nelse:\n print('Invalid argument. You must specify a file. Append \"-n\" if you dont want convert')\n","sub_path":"send-mail.py","file_name":"send-mail.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"574304141","text":"import csv\nfrom dataframe import DataFrame\n\ndef empty(l):\n for c in l:\n if len(c.strip()) > 0: return False\n return True\n\ndef loadCSV(path):\n csvFile = open(path)\n\n players = dict()\n colNames = []\n firstRow = True\n for line in csv.reader(csvFile.readlines()):\n if empty(line):\n continue\n if firstRow:\n for name in line:\n name = name.strip()\n colNames.append(name)\n players[name] = []\n firstRow = False\n continue\n for i, value in enumerate(line):\n players[colNames[i]].append(value.strip())\n\n return DataFrame(players, colNames)\n\ndef exportCSV(dataframe, path):\n csvFile = open(path, 'w')\n csvFile.write(','.join(dataframe.colNames) + '\\n')\n for i in range(dataframe.nRows()):\n csvFile.write(','.join(dataframe.getRow(i)) + '\\n')\n csvFile.close()","sub_path":"csvutil.py","file_name":"csvutil.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"433693377","text":"from sqlalchemy import *\nfrom migrate import *\n\n\nfrom migrate.changeset import schema\npre_meta = MetaData()\npost_meta = MetaData()\nbrand = Table('brand', post_meta,\n Column('id', Integer, primary_key=True, nullable=False),\n Column('name', String),\n)\n\nfood = Table('food', post_meta,\n Column('id', Integer, primary_key=True, nullable=False),\n Column('name', String),\n Column('category', String),\n Column('cal', Float),\n Column('fat', Float),\n Column('sfat', Float),\n Column('tfat', Float),\n Column('chol', Float),\n Column('salt', Float),\n Column('carb', Float),\n Column('fiber', Float),\n Column('sugar', Float),\n Column('protein', Float),\n Column('brand_id', Integer),\n)\n\n\ndef upgrade(migrate_engine):\n # Upgrade operations go here. Don't create your own engine; bind\n # migrate_engine to your metadata\n pre_meta.bind = migrate_engine\n post_meta.bind = migrate_engine\n post_meta.tables['brand'].create()\n post_meta.tables['food'].columns['brand_id'].create()\n\n\ndef downgrade(migrate_engine):\n # Operations to reverse the above upgrade go here.\n pre_meta.bind = migrate_engine\n post_meta.bind = migrate_engine\n post_meta.tables['brand'].drop()\n post_meta.tables['food'].columns['brand_id'].drop()\n","sub_path":"db_repository/versions/002_migration.py","file_name":"002_migration.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"360428843","text":"########################################\n# EMR 2021 PROJEKT - SPRACH EREKENNUNG #\n########################################\n\n# Mitglieder:\n\n# Marcel Heinen\n# Sergey Rogachevsky\n# Yosua Kurniawan\n\n# se_youbot-real_demo1.py Python 3.8.10 tested and works (18.08.2021) \n\n# Known Issues:\n# time delay between Computer due to wireless connection (Real Robot when using wireless connectivity)\n# awful sphinx audio accuracy\n# sphinx audio could not recieve any numerical audio input\n\n# Library and Packages:\n\n# Speech Recognition:\n\n# $ pip install SpeechRecognition\n\n# FOR WINDOWS USER NEED TO ADD:\n\n# $ pip install pipwin\n# $ pipwin install pyaudio\n\n# FOR UBUNTU USER NEED TO ADD:\n\n# $ sudo apt-get install portaudio19-dev python3-pyaudio\n# $ pip install PyAudio\n\n# pocketsphinx:\n\n# $ sudo apt install -y libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev\n# $ sudo apt-get install swig3.0 or $ sudo apt-get install swig (only god knows)\n# $ sudo pip install pocketsphinx\n\n# rospy:\n\n# $ sudo apt install python-rospy\n\n# pyaudio:\n\n# $ sudo apt install python3-pyaudio\n\n# In real youBot (the same as roscore terminal):\n\n# roslaunch youbot_driver_ros_interface youbot_driver.launch\n\n######################################################################\n# import all required libraries\nimport speech_recognition as sr\nfrom geometry_msgs.msg import Twist\nfrom nav_msgs.msg import Odometry\nimport rospy\nimport time\nimport math\n\n# define initial position of the robot\nx=0\ny=0\nyaw=0\n\n# initiate ros node\ndef init():\n global msg, velocity_publisher, pose_subscriber\n # define initial speed to zero\n msg = Twist()\n msg.linear.x = 0\n msg.linear.y = 0\n msg.angular.x = 0\n msg.angular.y = 0\n # create ros node\n try:\n \n rospy.init_node('se_youbot', anonymous=True)\n\n # declare velocity publisher\n cmd_vel_topic = \"/cmd_vel\"\n velocity_publisher = rospy.Publisher(cmd_vel_topic, Twist, queue_size=10)\n\n # #declare pose subscriber\n position_topic = \"/odom\"\n pose_subscriber = rospy.Subscriber(position_topic, Odometry, poseCallback) \n\n time.sleep(2)\n \n except rospy.ROSInterruptException:\n rospy.loginfo(\"node terminated.\")\n\n# function to get position \ndef poseCallback(pose_message):\n global x\n global y, yaw\n x= pose_message.pose.pose.position.x\n y= pose_message.pose.pose.position.y\n\n# function to get audio input using microphone as source\ndef get_audio():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n audio = r.listen(source)\n text = \"\"\n try:\n # text = r.recognize_google(audio)\n text = r.recognize_sphinx(audio) # using pocket sphinx instead of google\n print(text)\n except:\n print(\"Unrecognizeable\")\n return text\n\n# make a list of each words and phrasing audio input into string data; return the direction as string value\ndef phrasing_audio_direction():\n text_lst_direction = \"\"\n direction = \"\"\n determine_direction = True\n\n while determine_direction: \n print(\"Give direction input (forward, backward, left, right) :\")\n text_lst_direction = get_audio()\n if \"forward\" in text_lst_direction:\n print(\"Moving Forward\")\n direction = \"forward\"\n determine_direction = False\n return direction\n elif \"backward\" in text_lst_direction:\n print(\"Moving Backward\")\n direction = \"backward\"\n determine_direction = False\n return direction\n elif \"left\" in text_lst_direction:\n print(\"Turning Left\")\n direction = \"left\"\n determine_direction = False\n return direction\n elif \"right\" in text_lst_direction:\n print(\"Turning Right\")\n direction = \"right\"\n determine_direction = False\n return direction \n else:\n print(\"UNABLE TO RECOGNIZE COMMAND TRY AGAIN, error: 1\")\n determine_direction = True\n\n# make a list of each words and phrasing audio input into string data and check whether it is convertable into int; return the data as string data\ndef phrasing_audio_distance():\n text_lst_distance = \"\"\n distance = \"\"\n determine_distance = True\n max_distance = 3 # declare max distance\n\n while determine_distance: \n print(\"Give distance value (number):\")\n text_lst_distance = input('Type a int value number 1 - 3') # use string input instead of audio input\n\n if isinstance(text_lst_distance, str) == True and str.isdigit(text_lst_distance) == False: # check if text_lst_distance recieve any string and if it can be converted to an int\n print(\"UNABLE TO RECOGNIZE COMMAND TRY AGAIN, error: 2\")\n determine_distance = True\n\n elif int(text_lst_distance) < max_distance: # text_lst_distance must recieve string value that is able to be converted to an int\n distance = text_lst_distance\n print(distance + \" m\")\n determine_distance = False\n return distance\n\n else:\n print(\"UNABLE TO RECOGNIZE COMMAND TRY AGAIN, error: 3\")\n determine_distance = True\n\n# function to move the robot based on audio input\ndef move():\n # declare a Twist message to send velocity commands\n msg = Twist()\n # get current location \n global x, y, speed\n x0=x\n y0=y\n speed = 0.1 # define the speed of turtle\n direction = phrasing_audio_direction()\n distance = phrasing_audio_distance()\n\n if direction == 'forward':\n msg.linear.x = abs(speed)\n elif direction == 'backward':\n msg.linear.x = -abs(speed)\n elif direction == 'left':\n msg.linear.y = abs(speed)\n elif direction == 'right':\n msg.linear.y = -abs(speed) \n \n distance_moved = 0.0\n loop_rate = rospy.Rate(10) # we publish the velocity at 10 Hz (10 times a second) \n cmd_vel_topic = '/cmd_vel'\n velocity_publisher = rospy.Publisher(cmd_vel_topic, Twist, queue_size=10)\n\n while True :\n rospy.loginfo(\"youBot move: \" + direction +\" for \" + distance + \" m\")\n velocity_publisher.publish(msg)\n\n loop_rate.sleep()\n # calculate the distance between current and initial position\n distance_moved = abs(0.5 * math.sqrt(((x-x0) ** 2) + ((y-y0) ** 2)))\n print(distance_moved) \n if not (distance_moved < int(distance)): # distance(str) will be converted into int\n rospy.loginfo(\"reached\")\n break\n \n # finally, stop the robot when desiered distance reached\n msg.linear.x =0\n msg.linear.y =0\n velocity_publisher.publish(msg)\n\nif __name__ == '__main__':\n\n init()\n move()\n\n","sub_path":"main/se_youbot-real_demo1.py","file_name":"se_youbot-real_demo1.py","file_ext":"py","file_size_in_byte":6980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"185993786","text":"juliet = {\n 'first_name': 'Adams smith',\n 'school': 'st johns grammer',\n 'age': 22,\n}\nevans = {\n 'first_name': 'evans opoku',\n 'school': 'west africa',\n 'age': 26\n}\npeople = [juliet, evans]\nfor i in people:\n print(i)\nschool = input('the name of the school is: ')\nprint(f'\\nthe name of the school is {school}')\n\n# Sometimes you’ll want to write a prompt that’s longer\n# than one line. You can assign your prompt to a variable and\n# pass that variable to the input() function\n\nprompt = \"\\nIf you tell us who you are, we can personalize the messages you see.\"\nprompt += \"\\nWhat is your first name? \"\n\nname = input(prompt)\nprint(f'\\nHello, {name}')\n\nheight = int(input('\\nplease input your height: \\n'))\nif height >= 48:\n print('you can ride roller coaster')\nelif height <= 47:\n print('may be next year')\n\n# determining if number is even or odd\nnum = int(input('\\nplease enter number: '))\n\nwhile num < 0 or num == 0:\n if num == 0:\n print('TraceBack: value input error')\n num = int(input('\\nplease enter number: '))\n elif num < 0:\n print('please input +ve values ')\n num = int(input('\\nplease enter number: '))\nelse:\n if num % 2 == 0:\n print('Number given is even')\n elif num % 2 == 1:\n print('number given maybe odd')\n else:\n print('cant determine number')\n\nprompt = \"\\nTell me something, and I will repeat it back to you:\"\nprompt += \"\\nEnter 'quit' to end the program. \"\n\nmessage = ' '\nwhile message != 'quit':\n message = input(f'\\n{prompt}')\n if message != 'quit':\n print(message)\n# adding flags to a program, below program performs similarly\n# like the above code\nactive = True\nwhile active:\n message = input(prompt)\n if message == 'quit':\n active = False\n else:\n print(message)\n\n# using break statements in code\nprompt = '\\nPlease enter the name of a city you have visited: '\nprompt += '\\n(Enter quit when done) '\nwhile True:\n city = input(prompt)\n if city == 'quit':\n break\n else:\n print(f'I would love to go to {city.title()}!')\n\n# using the continue statement, printing odd numbers\ncurrent_number = 0\nwhile current_number < 10:\n current_number += 1\n if current_number % 2 == 0:\n continue\n print(current_number)\n\n# loops with Lists and Dictionaries\nusers = ['abena', 'nyamesem', 'frema opare']\nnew_users = []\nwhile users:\n current_user = users.pop()\n print(f\"verifying user: {current_user.title()}\")\n new_users.append(current_user)\nprint('\\nThe following users have been confirmed: ')\nfor u in new_users:\n print(u.title())\n# Removing All Instances of Specific Values from a List\npets = ['dog', 'cat', 'dog', 'goldfish', 'cat', 'rabbit', 'cat']\nprint(f'\\n{pets}')\n\nwhile 'cat' in pets:\n pets.remove('cat')\nprint(pets)\n\n# Filling a Dictionary with User Input\nresponse = {}\n# Set a flag to indicate that polling is active.\npolling_active = True\nwhile polling_active:\n name = input('\\nWhat is your name: ')\n name2 = input('Where do you intend going: ')\n\n response[name] = name2\n repeat = input(\"Would you like to let another person respond?\"\n \" (yes/ no) \")\n if repeat == \"no\":\n polling_active = False\nprint(f'\\n{response}')\n\nprint(\"\\n--- Poll Results ---\")\nfor name, response in response.items():\n print(f\"{name} would like to climb {response}.\")\nprint('')\n\n\ndef greet_user(username):\n \"\"\"Display a simple greeting.\"\"\"\n print(f\"Hello, {username.title()}!\")\n\n\ngreet_user('jesse')\n\n\ndef favorite_book(title):\n '''displays title of a book'''\n print(f'one of my favourite books is {title}')\n\n\nfavorite_book('Alice wonderland')\n\n\ndef animal(type, name):\n \"\"\"describes animal type and name\"\"\"\n print(f'I have a {type}')\n print(f'My {type}\\'s name is {name}')\n\n\nanimal('fowls', 'meneku')\n\n\ndef function(x, y):\n \"\"\"fxn to add two numbers\"\"\"\n return x + y\n\n\nsummf = function(y=8, x=12)\nprint(summf)\n\n\ndef display_info(work, age, ple):\n \"\"\"dunction to display name and age\"\"\"\n age = input('please age must be more than 18: ')\n\n print(f'your work is {work}, name is {ple} and age is {age}')\n\n\ndisplay_info('developer', 18, 'ernest lipson')\n\n\ndef describe_pet(animal_type, pet_name):\n \"\"\"Display information about a pet.\"\"\"\n\n print(f\"\\nI have a {animal_type}.\")\n print(f\"My {animal_type}'s name is {pet_name.title()}.\")\n\n\ndescribe_pet(animal_type='hamster', pet_name='harry')\ndescribe_pet(pet_name='harry', animal_type='hamster')\n# both function calls above are equivalent\n\n\ndef describee_pet(self, parameter_list='dog'):\n \"\"\"function to name animal type setting default value to dog\"\"\"\n print(f'\\nI have animal {self}')\n print(f'My animals name is {parameter_list}')\n\n\ndescribee_pet('cow')\n\n# Avoiding Argument Errors\n# Unmatched arguments occur when you\n# provide fewer or more arguments than a function needs to do its work.\ntext = input('please input text you want printed')\n\n\ndef make_shirt(size, text):\n \"\"\"to make a shirt\"\"\"\n print(f'the size of your shirt is {size} and the message'\n f'printed on it is {text}')\n\n\nmake_shirt(18, text)\n\n\ndef get_formatted_name(first_name, middle_name, last_name):\n \"\"\"Return a full name, neatly formatted.\"\"\"\n full_name = f\"{first_name} {middle_name} {last_name}\"\n return full_name.title()\n\n\nmusician = get_formatted_name('john', 'lee', 'hooker')\nprint(musician)\n\n# making an argument optional\n\n\ndef get_formatted_name(first_name, last_name, middle_name=''):\n '''return a full name fullt formatted'''\n if middle_name:\n fullname = f'{first_name} {middle_name} {last_name}'\n else:\n fullname = f'{first_name} {last_name}'\n return fullname\n\n\nname = get_formatted_name('lipson', 'kwabena', 'boateng')\nprint(name)\nname_two = get_formatted_name('ernest'.capitalize(),\n 'ayew'.capitalize())\n\n\ndef squares(a, b):\n \"\"\"Python code to print all the perfect\n square numbers between a and b\"\"\"\n Lists = []\n for i in range(a, b+1):\n j = 1\n while j*j <= i:\n if j*j == i:\n Lists.append(i)\n j = j+1\n i = i+1\n return Lists\n\n\nprint(squares(1, 500))\n","sub_path":"python_works/pcceg/pcceg_four.py","file_name":"pcceg_four.py","file_ext":"py","file_size_in_byte":6195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"634420738","text":"# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nimport re\nimport time\nfrom multiprocessing import Queue, synchronize\nfrom queue import Empty\nfrom typing import Any, Dict, Optional, cast\n\nimport boto3\nimport yaml\nfrom kubernetes import dynamic\nfrom kubernetes.dynamic import exceptions as k8s_exceptions\nfrom orbit_controller import ORBIT_API_GROUP, ORBIT_API_VERSION, dump_resource, dynamic_client, logger\nfrom urllib3.exceptions import ReadTimeoutError\n\n\ndef _verbosity() -> int:\n try:\n return int(os.environ.get(\"ORBIT_CONTROLLER_LOG_VERBOSITY\", \"0\"))\n except Exception:\n return 0\n\n\ndef _generate_buildspec(repo_host: str, repo_prefix: str, src: str, dest: str) -> Dict[str, Any]:\n repo = dest.replace(f\"{repo_host}/\", \"\").split(\":\")[0]\n build_spec = {\n \"version\": 0.2,\n \"phases\": {\n \"install\": {\n \"runtime-versions\": {\"python\": 3.7, \"docker\": 19},\n \"commands\": [\n (\n \"nohup /usr/sbin/dockerd --host=unix:///var/run/docker.sock \"\n \"--host=tcp://0.0.0.0:2375 --storage-driver=overlay&\"\n ),\n 'timeout 15 sh -c \"until docker info; do echo .; sleep 1; done\"',\n ],\n },\n \"pre_build\": {\n \"commands\": [\n \"/var/scripts/retrieve_docker_creds.py && echo 'Docker logins successful' \"\n \"|| echo 'Docker logins failed'\",\n f\"aws ecr get-login-password | docker login --username AWS --password-stdin {repo_host}\",\n (\n f\"aws ecr create-repository --repository-name {repo} \"\n f\"--tags Key=Env,Value={repo_prefix} || echo 'Already exists'\"\n ),\n ]\n },\n \"build\": {\"commands\": [f\"docker pull {src}\", f\"docker tag {src} {dest}\", f\"docker push {dest}\"]},\n },\n }\n logger.debug(\"BuildSpec: %s\", build_spec)\n return build_spec\n\n\ndef _replicate_image(config: Dict[str, Any], src: str, dest: str) -> str:\n logger.info(\"Replicating Image: %s -> %s\", src, dest)\n\n buildspec = yaml.safe_dump(_generate_buildspec(config[\"repo_host\"], config[\"repo_prefix\"], src, dest))\n logger.debug(\"BuildSpec:\\n%s\", buildspec)\n\n client = boto3.client(\"codebuild\")\n build_id = client.start_build(\n projectName=config[\"codebuild_project\"],\n sourceTypeOverride=\"NO_SOURCE\",\n buildspecOverride=buildspec,\n timeoutInMinutesOverride=config[\"codebuild_timeout\"],\n privilegedModeOverride=True,\n imageOverride=config[\"codebuild_image\"],\n )[\"build\"][\"id\"]\n\n logger.info(\"Started CodeBuild Id: %s\", build_id)\n\n while True:\n build = client.batch_get_builds(ids=[build_id])[\"builds\"][0]\n status: str = build[\"buildStatus\"]\n phase: str = build[\"currentPhase\"]\n\n logger.debug(\"CodeBuild Id: %s, Phase: %s, Status: %s\", build_id, phase, status)\n\n if status == \"IN_PROGRESS\":\n time.sleep(10)\n continue\n else:\n return status\n\n\ndef get_config(workers: Optional[int] = None) -> Dict[str, Any]:\n config = {\n \"repo_host\": os.environ.get(\"IMAGE_REPLICATIONS_REPO_HOST\", \"\"),\n \"repo_prefix\": os.environ.get(\"IMAGE_REPLICATIONS_REPO_PREFIX\", \"\"),\n \"codebuild_project\": os.environ.get(\"IMAGE_REPLICATIONS_CODEBUILD_PROJECT\", \"\"),\n \"codebuild_timeout\": int(os.environ.get(\"IMAGE_REPLICATIONS_CODEBUILD_TIMEOUT\", \"30\")),\n \"codebuild_image\": os.environ.get(\"ORBIT_CODEBUILD_IMAGE\", \"\"),\n \"replicate_external_repos\": os.environ.get(\"IMAGE_REPLICATIONS_REPLICATE_EXTERNAL_REPOS\", \"False\").lower()\n in [\"true\", \"yes\", \"1\"],\n \"workers\": workers if workers else int(os.environ.get(\"IMAGE_REPLICATIONS_WATCHER_WORKERS\", \"2\")),\n }\n return config\n\n\ndef get_desired_image(config: Dict[str, Any], image: str) -> str:\n external_ecr_match = re.compile(r\"^[0-9]{12}\\.dkr\\.ecr\\..+\\.amazonaws.com/\")\n public_ecr_match = re.compile(r\"^public.ecr.aws/.+/\")\n\n if image.startswith(config[\"repo_host\"]):\n return image\n elif external_ecr_match.match(image):\n if config[\"replicate_external_repos\"]:\n return external_ecr_match.sub(\n f\"{config['repo_host']}/{config['repo_prefix']}/\", image.replace(\"@sha256\", \"\")\n )\n else:\n return image\n elif public_ecr_match.match(image):\n return public_ecr_match.sub(f\"{config['repo_host']}/{config['repo_prefix']}/\", image.replace(\"@sha256\", \"\"))\n else:\n return f\"{config['repo_host']}/{config['repo_prefix']}/{image.replace('@sha256', '')}\"\n\n\ndef get_replication_status(\n lock: synchronize.Lock,\n queue: Queue, # type: ignore\n statuses: Dict[str, str],\n image: str,\n desired_image: str,\n) -> str:\n with lock:\n status = statuses.get(desired_image, \"Unknown\")\n\n if status == \"Unknown\":\n if image_replicated(desired_image):\n logger.debug(\"Skipping previously completed Replication Task: %s -> %s\", image, desired_image)\n status = \"Complete\"\n statuses[desired_image] = status\n else:\n logger.debug(\"Queueing Replication Task: %s -> %s\", image, desired_image)\n status = \"Pending:1\"\n statuses[desired_image] = status\n queue.put({\"src\": image, \"dest\": desired_image})\n elif status.startswith(\"Failed\"):\n attempt = int(status.split(\":\")[1])\n if attempt < 3:\n attempt = attempt + 1\n logger.debug(\"Queueing Failed Replication Task Attemp %s: %s -> %s\", attempt, image, desired_image)\n statuses[desired_image] = f\"Pending:{attempt}\"\n queue.put({\"src\": image, \"dest\": desired_image})\n else:\n logger.error(\"Too many failed replication attempts: %s -> %s\", image, desired_image)\n\n return status\n\n\ndef image_replicated(image: str) -> bool:\n try:\n repo, tag = image.split(\":\")\n repo = \"/\".join(repo.split(\"/\")[1:])\n client = boto3.client(\"ecr\")\n paginator = client.get_paginator(\"list_images\")\n for page in paginator.paginate(repositoryName=repo):\n for imageId in page[\"imageIds\"]:\n if imageId.get(\"imageTag\", None) == tag:\n logger.debug(\"ECR Repository contains Image: %s\", image)\n return True\n logger.debug(\"Tag %s not found in ECR Repository %s\", tag, repo)\n return False\n except Exception as e:\n logger.exception(e)\n return False\n\n\ndef create_image_replication(\n namespace: str,\n images: Dict[str, str],\n client: dynamic.DynamicClient,\n request_logger: Optional[logging.Logger] = None,\n) -> None:\n _logger = request_logger if request_logger else logger\n api = client.resources.get(api_version=ORBIT_API_VERSION, group=ORBIT_API_GROUP, kind=\"ImageReplication\")\n image_replication = {\n \"apiVersion\": f\"{ORBIT_API_GROUP}/{ORBIT_API_VERSION}\",\n \"kind\": \"ImageReplication\",\n \"metadata\": {\n \"generateName\": \"image-replication-\",\n },\n \"spec\": {\"images\": [{\"destination\": k, \"source\": v} for k, v in images.items()]},\n }\n api.create(namespace=namespace, body=image_replication)\n _logger.debug(\"Created image_replication: %s\", dump_resource(image_replication))\n\n\ndef delete_image_replication(\n image_replication: Dict[str, Any],\n client: dynamic.DynamicClient,\n request_logger: Optional[logging.Logger] = None,\n) -> None:\n _logger = request_logger if request_logger else logger\n api = client.resources.get(api_version=ORBIT_API_VERSION, group=ORBIT_API_GROUP, kind=\"ImageReplication\")\n api.delete(namespace=image_replication[\"metadata\"][\"namespace\"], name=image_replication[\"metadata\"][\"name\"])\n _logger.debug(\"Deleted image_replication: %s\", dump_resource(image_replication))\n\n\ndef watch(\n lock: synchronize.Lock,\n queue: Queue, # type: ignore\n state: Dict[str, Any],\n statuses: Dict[str, Any],\n config: Dict[str, str],\n) -> int:\n while True:\n try:\n client = dynamic_client()\n api = client.resources.get(api_version=ORBIT_API_VERSION, group=ORBIT_API_GROUP, kind=\"ImageReplication\")\n\n logger.info(\"Monitoring ImageReplications\")\n\n kwargs = {\n \"resource_version\": state.get(\"lastResourceVersion\", 0),\n }\n for event in api.watch(**kwargs):\n if _verbosity() > 2:\n logger.debug(\"event object: %s\", event)\n image_replication = event[\"raw_object\"]\n state[\"lastResourceVersion\"] = image_replication.get(\"metadata\", {}).get(\"resourceVersion\", 0)\n logger.debug(\"watcher state: %s\", state)\n\n if event[\"type\"] == \"ADDED\":\n for image in image_replication.get(\"spec\", {}).get(\"images\", []):\n status = get_replication_status(\n lock=lock,\n queue=queue,\n statuses=statuses,\n image=image[\"source\"],\n desired_image=image[\"destination\"],\n )\n logger.info(\"Replication Status: %s %s\", image, status)\n delete_image_replication(image_replication=image_replication, client=client)\n else:\n logger.debug(\n \"Skipping ImageReplication event for processing type: %s image_replication: %s\",\n event[\"type\"],\n dump_resource(image_replication),\n )\n except ReadTimeoutError:\n logger.warning(\n \"There was a timeout error accessing the Kubernetes API. Retrying request.\",\n exc_info=True,\n )\n time.sleep(1)\n except k8s_exceptions.ApiException as ae:\n if ae.reason.startswith(\"Expired: too old resource version\"):\n logger.warning(ae.reason)\n state[\"lastResourceVersion\"] = 0\n else:\n logger.exception(\"Unknown ApiException in ImageReplicationWatcher. Failing\")\n raise\n except Exception:\n logger.exception(\"Unknown error in ImageReplicationWatcher. Failing\")\n raise\n else:\n state[\"lastResourceVersion\"] = \"0\"\n logger.warning(\n \"Watch died gracefully, starting back up with a reset resource_version: %s\",\n state[\"lastResourceVersion\"],\n )\n\n\ndef process_image_replications(\n lock: synchronize.Lock,\n queue: Queue, # type: ignore\n state: Dict[str, Any],\n statuses: Dict[str, Any],\n config: Dict[str, str],\n replicator_id: int,\n timeout: Optional[int] = None,\n) -> int:\n logger.info(\"Started ImageReplication Processor Id: %s\", replicator_id)\n replication_task: Optional[Dict[str, str]] = None\n\n while True:\n try:\n queue_size = queue.qsize()\n logger.info(f\"Queue Size: {queue_size}\")\n\n replication_task = cast(Dict[str, str], queue.get(block=True, timeout=timeout))\n src, dest = replication_task[\"src\"], replication_task[\"dest\"]\n\n with lock:\n logger.info(\"Got Replication Task: %s -> %s\", src, replication_task[\"dest\"])\n\n status = statuses[dest]\n if status == \"Complete\":\n logger.info(\"Skipping Completed Task: %s -> %s\", src, dest)\n continue\n elif status.startswith(\"Failed\"):\n logger.info(\"Skipping Failed Task: %s -> %s\", src, dest)\n continue\n elif status.startswith(\"Replicating\"):\n logger.info(\"Skipping Replicating Task: %s -> %s\", src, dest)\n continue\n else:\n attempt = int(status.split(\":\")[1])\n statuses[dest] = f\"Replicating:{attempt}\"\n\n result = _replicate_image(config, src, dest)\n\n with lock:\n if result == \"SUCCEEDED\":\n logger.info(\"Replication Complete: %s -> %s\", src, dest)\n statuses[dest] = \"Complete\"\n else:\n logger.error(\n \"Image Replication Attempt %s Failed: %s -> %s\",\n attempt,\n src,\n dest,\n )\n statuses[dest] = f\"Failed:{attempt}\"\n queue.put(replication_task)\n except Empty:\n logger.debug(\"Queue Empty, processing Complete\")\n return 0\n except Exception as e:\n with lock:\n status = statuses[dest]\n attempt = int(status.split(\":\")[1])\n logger.error(\n \"Image Replication Attempt %s Failed: %s -> %s\",\n attempt,\n src,\n dest,\n )\n logger.exception(e)\n statuses[dest] = f\"Failed:{attempt}\"\n finally:\n replication_task = None\n time.sleep(3)\n","sub_path":"images/orbit-controller/src/orbit_controller/image_replication.py","file_name":"image_replication.py","file_ext":"py","file_size_in_byte":14062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"221091987","text":"from django.shortcuts import render, redirect\n\nfrom .forms import UserForm, UserProfileForm\n\n# Create your views here.\n\ndef home(request):\n\treturn render(request, 'base.html')\n\ndef cadastro_usuario(request):\n\tif request.method == 'POST':\n\t\tuser_form = UserForm(request.POST)\n\t\tprofile_form = UserProfileForm(request.POST)\n\n\t\tif user_form.is_valid() and profile_form.is_valid():\n\t\t\tuser = user_form.save()\n\t\t\tuser.set_password(user.password)\n\t\t\tuser.save()\n\t\t\tprofile = profile_form.save(commit=False)\n\t\t\tprofile.user = user\n\n\t\t\tif 'foto' in request.FILES:\n\t\t\t\tprofile.foto = request.FILES['foto']\n\n\t\t\tprofile.save()\n\t\t\treturn redirect('core:usuariosucesso')\n\t\telse:\n\t\t\tprint(user_form.errors, profile_form.errors)\n\telse:\n\t\tuser_form = UserForm()\n\t\tprofile_form = UserProfileForm()\n\n\treturn render(request, 'core/cadastro_usuario.html',\\\n\t\t{'user_form': user_form, 'profile_form': profile_form})\n\ndef sucesso_usuario(request):\n\treturn render(request, 'core/sucesso_usuario.html')","sub_path":"mutirao/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"537207765","text":"#!/usr/bin/env python\r\n# Practice Python Ex 6 String Lists\r\n# http://www.practicepython.org/exercise/2014/03/12/06-string-lists.html\r\n\r\n__author__ = \"N Langley\"\r\n__version__ = \"4/4/18\"\r\n\r\npalin = str(input(\"Enter a palindrome: \"))\r\nrpalin = palin[::-1]\r\n\r\nif palin == rpalin:\r\n print(palin + \" is a Palindrome!\")\r\nelse:\r\n print(palin + \" is not a Palindrome\")","sub_path":"PP_Ex6_String_Lists.py","file_name":"PP_Ex6_String_Lists.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"479505920","text":"from typing import List\n\n\"\"\"\nhttps://leetcode.com/problems/two-sum-ii-input-array-is-sorted/submissions/\n\"\"\"\n\n\nclass Solution:\n def twoSum(self, numbers: List[int], target: int) -> List[int]:\n def binarysearch(gte_idx, lte_idx, t_val):\n if gte_idx <= lte_idx:\n mid_idx = (gte_idx + lte_idx) // 2\n mid_val = numbers[mid_idx]\n\n if t_val < mid_val:\n return binarysearch(gte_idx, mid_idx - 1, t_val)\n elif t_val > mid_val:\n return binarysearch(mid_idx + 1, lte_idx, t_val)\n else:\n return mid_idx\n else:\n return -1\n\n for idx, n in enumerate(numbers):\n least = target - n\n\n res_idx = binarysearch(0, len(numbers) - 1, least)\n\n if res_idx != idx and res_idx != -1:\n return sorted([idx + 1, res_idx + 1])\n\n return [-1, -1]\n","sub_path":"archive-dhkim/leetcode/ch18_binary_search/prob68_two-sum-ii-input-array-is-sorted.py","file_name":"prob68_two-sum-ii-input-array-is-sorted.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"260939443","text":"# -*- coding: utf-8 -*-\nfrom copy import deepcopy\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.db.models.query import QuerySet\n\nfrom ralph.lib.mixins.forms import RequestFormMixin\nfrom ralph.lib.permissions.models import PermissionsForObjectMixin\n\n\nclass PermissionPerFieldAdminMixin(object):\n # TODO: required permissions for add and change\n def get_fieldsets(self, request, obj=None):\n new_fieldsets = []\n fieldsets = super().get_fieldsets(\n request, obj\n )\n\n def condition(field):\n can_view = self.model.has_access_to_field(\n field, request.user, 'view'\n )\n can_change = self.model.has_access_to_field(\n field, request.user, 'change'\n )\n return can_view or can_change\n for fieldset in deepcopy(fieldsets):\n fields = [\n field for field in fieldset[1]['fields']\n if condition(field)\n ]\n if not fields:\n continue\n fieldset[1]['fields'] = fields\n new_fieldsets.append(fieldset)\n return new_fieldsets\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Return read only fields respects user permissions.\"\"\"\n can_view = self.model.allowed_fields(request.user, 'view')\n can_change = self.model.allowed_fields(request.user, 'change')\n return list(\n (can_view - can_change) |\n set(super().get_readonly_fields(request, obj))\n )\n\n def get_form(self, request, obj=None, **kwargs):\n \"\"\"Return form with fields which user have access.\"\"\"\n form = super().get_form(request, obj, **kwargs)\n user_allowed_fields = self.model.allowed_fields(request.user)\n forbidden_fields = set(form._meta.fields or []) - user_allowed_fields\n if forbidden_fields:\n for field in forbidden_fields:\n form.Meta.fields.remove(field)\n return form\n\n def get_list_display(self, request):\n \"\"\"Return fields with respect to user permissions.\"\"\"\n list_display = [\n field for field in self.list_display\n if self.model.has_access_to_field(\n field, request.user, action='view'\n )\n ]\n\n return list_display or ['__str__']\n\n\nclass PermissionsPerObjectFormMixin(RequestFormMixin):\n def _check_foreign_keys_permissions(self):\n \"\"\"\n Check if user has permission to save chosen related models\n (ex. ForeignKeys).\n \"\"\"\n for field_name, field in self.fields.items():\n value = []\n if (\n isinstance(field, forms.ModelChoiceField) and\n issubclass(field.queryset.model, PermissionsForObjectMixin)\n ):\n value = self.cleaned_data.get(field_name)\n if value and not isinstance(value, (list, tuple, QuerySet)):\n value = [value]\n if field_name in self.fields and value:\n for obj in value:\n if not obj.has_permission_to_object(self._user):\n self.add_error(field_name, ValidationError(\n \"You don't have permissions to select this value\"\n ))\n\n def clean(self):\n super().clean()\n self._check_foreign_keys_permissions()\n\n\nclass PermissionPerObjectAdminMixin(object):\n \"\"\"\n Admin mixin cooperating with\n `ralph.lib.permissions.models.PermissionsForObjectMixin`\n \"\"\"\n def _check_obj_permission(self, request, obj=None):\n \"\"\"\n Returns True if user has access to object.\n \"\"\"\n obj_permission = True\n if obj and isinstance(obj, PermissionsForObjectMixin):\n obj_permission = obj.has_permission_to_object(request.user)\n return obj_permission\n\n def has_change_permission(self, request, obj=None):\n return (\n super().has_change_permission(request, obj) and\n self._check_obj_permission(request, obj)\n )\n\n def has_delete_permission(self, request, obj=None):\n return (\n super().has_change_permission(request, obj) and\n self._check_obj_permission(request, obj)\n )\n\n def get_queryset(self, request):\n \"\"\"\n If model has object-level permissions, narrow queryset to object\n to which user has permissions.\n \"\"\"\n queryset = super().get_queryset(request)\n if issubclass(self.model, PermissionsForObjectMixin):\n queryset = self.model._get_objects_for_user(request.user, queryset)\n return queryset\n\n def get_field_queryset(self, db, db_field, request):\n \"\"\"\n For each related field (foreign key) which has object-level permissions\n narrow result to objects for which user has permissions.\n \"\"\"\n queryset = super().get_field_queryset(db, db_field, request)\n related_model = db_field.rel.to\n if issubclass(related_model, PermissionsForObjectMixin):\n queryset = related_model._get_objects_for_user(\n request.user, queryset\n )\n return queryset\n\n\nclass PermissionAdminMixin(\n PermissionPerFieldAdminMixin,\n PermissionPerObjectAdminMixin\n):\n pass\n","sub_path":"src/ralph/lib/permissions/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"295492153","text":"# coding: utf-8\n\nimport tensorflow as tf\n\nfrom utils import get_shape\n\n\ndef doubleQ(qf1, qf2):\n actions = get_shape(qf1)[-1]\n a = tf.argmax(qf2, axis=-1)\n a_onehot = tf.one_hot(a, depth=actions, dtype=tf.float32)\n q = tf.reduce_sum(qf1 * a_onehot, axis=-1)\n return q\n","sub_path":"module/doubleQ.py","file_name":"doubleQ.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"424290241","text":"import math,time, itertools\ndef isPrime(n):\n if n == 1:\n return False\n for i in range(2, int(math.sqrt(n)) + 1):\n if n%i == 0:\n return False\n return True\n\ndef gen_n_digit_pandigit_prime(n):\n result = []\n arr = ''\n for i in range(1,n+1):\n arr += str(i)\n lst = list(itertools.permutations(arr, n))\n for i in range(len(lst)):\n num = ''\n for j in range(n):\n num += lst[i][j]\n if isPrime(int(num)):\n result.append(int(num))\n result.sort()\n if len(result) == 0:\n return None\n return result[-1]\n\ndef problem_41():\n result = []\n for i in range(4, 9):\n added = gen_n_digit_pandigit_prime(i)\n if added != None:\n result.append(added)\n return result[-1]\n\nstart = time.time()\nprint(problem_41())\nelapsed = time.time() - start\nprint('Result found in %f seconds'%(elapsed))","sub_path":"Problem 41.py","file_name":"Problem 41.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"644854096","text":"import itertools\nimport random\nimport re\n\nimport category_encoders as ce\nimport nltk\nimport numpy as np\nimport pandas as pd\nimport sklearn\nfrom fancyimpute import KNN\nfrom gensim import corpora\nfrom gensim.models import KeyedVectors\nfrom keras.preprocessing.text import Tokenizer\nfrom nltk import ngrams, word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import *\nfrom nltk.tag import AffixTagger\nfrom scipy.spatial import distance\nfrom scipy.stats import boxcox\nfrom sklearn.decomposition import PCA, LatentDirichletAllocation, TruncatedSVD\nfrom sklearn.ensemble import RandomTreesEmbedding\nfrom sklearn.feature_extraction import FeatureHasher\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.manifold import TSNE, LocallyLinearEmbedding, SpectralEmbedding\nfrom sklearn.preprocessing import (MinMaxScaler, Normalizer,\n PolynomialFeatures, StandardScaler)\nfrom textstat.textstat import textstat\n\nseed = 1337\n\n\n# Vol 1\ndef label_encode(df2):\n df = df2.copy()\n categorical_features = df.select_dtypes(\n include=['category']).columns.values\n df[categorical_features] = df[categorical_features].apply(\n lambda x: x.cat.codes)\n return df\n\n\ndef hash_encode1(df2):\n df = df2.copy()\n categorical_features = df.select_dtypes(\n include=['category']).columns.values\n hashing_encoder = ce.HashingEncoder(n_components=len(\n categorical_features), cols=categorical_features.tolist())\n df[categorical_features] = hashing_encoder.fit_transform(\n df[categorical_features])\n return df\n\n\ndef hash_encode2(df2):\n df = df2.copy()\n categorical_features = df.select_dtypes(\n include=['category']).columns.values\n hashing_encoder = FeatureHasher(n_features=len(\n categorical_features), input_type='string')\n df[categorical_features] = pd.DataFrame(hashing_encoder.fit_transform(\n df[categorical_features].as_matrix()).toarray())\n return df\n\n\ndef count_encode(df2):\n df = df2.copy()\n categorical_features = df.select_dtypes(\n include=['category']).columns.values\n for i in categorical_features:\n df[i] = df[i].astype('object').replace(df[i].value_counts())\n return df\n\n\ndef labelcount_encode(df2):\n df = df2.copy()\n categorical_features = df.select_dtypes(\n include=['category']).columns.values\n for cat_feature in categorical_features:\n cat_feature_value_counts = df[cat_feature].value_counts()\n value_counts_list = cat_feature_value_counts.index.tolist()\n value_counts_range_rev = list(\n reversed(range(len(cat_feature_value_counts)))) # for ascending ordering\n # for descending ordering\n value_counts_range = list(range(len(cat_feature_value_counts)))\n labelcount_dict = dict(zip(value_counts_list, value_counts_range))\n df[cat_feature] = df[cat_feature].map(labelcount_dict)\n return df\n\n\ndef target_encode(df2):\n df = df2.copy()\n categorical_features = df.select_dtypes(\n include=['category']).columns.values\n for cat_feature in categorical_features:\n group_target_mean = df.groupby([cat_feature])['target'].mean()\n df[cat_feature] = df[cat_feature].astype(\n 'object').replace(group_target_mean)\n return df\n\n\n# Vol 2\ndef polynomial_encode(df2):\n df = df2.copy()\n categorical_features = df.select_dtypes(\n include=['category']).columns.values\n df[categorical_features] = df[categorical_features].apply(\n lambda x: x.cat.codes)\n poly = PolynomialFeatures(degree=2, interaction_only=False)\n df = pd.DataFrame(poly.fit_transform(df))\n return df\n\n\ndef nan_encode(df2):\n df = df2.copy()\n missing_cols = np.sum(pd.isnull(df))[np.sum(\n pd.isnull(df)) >= 1].index.tolist()\n for i in missing_cols:\n df[i] = df[i].replace(df[i].cat.categories.tolist(), 0)\n df[i] = df[i].replace(np.nan, 1)\n return df\n\n\ndef group_featurebyfeature_encode(df2, newvar_name, var1, var2, transformation):\n df = df2.copy()\n categorical_features = df.select_dtypes(\n include=['category']).columns.values\n # label encode categorical features if to be used on categorical features too\n df[categorical_features] = df[categorical_features].apply(\n lambda x: x.cat.codes)\n # determine groups based on var1, then apply a chosen transformation to the groups based on values of var2\n df['{}'.format(newvar_name)] = (df.groupby(var1))[\n var2].transform('{}'.format(transformation))\n return df\n\n\ndef impute_explicit_numerical(df2):\n df = df2.copy()\n df.fillna(-999, inplace=True) # impute with a specified value\n return df\n\n\ndef impute_mean_numerical(df2):\n df = df2.copy()\n numerical_features = df.select_dtypes(include=['number']).columns.values\n for i in numerical_features:\n # impute with mean of each column\n mean = df[i][~np.isnan(df[i])].mean()\n df[i] = df[i].replace(np.nan, mean)\n return df\n\n\ndef impute_median_numerical(df2):\n df = df2.copy()\n numerical_features = df.select_dtypes(include=['number']).columns.values\n for i in numerical_features:\n # impute with median of each column\n mean = df[i][~np.isnan(df[i])].median()\n df[i] = df[i].replace(np.nan, mean)\n return df\n\n\ndef impute_knn_numerical(df2):\n df = df2.copy()\n numerical_features = df.select_dtypes(include=['number']).columns.values\n # impute with mean using KNN algorithm for 5 closest rows\n dfknn = pd.DataFrame(KNN(k=5).complete(df), columns=df2.columns)\n return dfknn\n\n\ndef round_numerical(df2, precision):\n df = df2.copy()\n df = df.round(precision)\n return df\n\n\ndef bin_numerical(df2, step):\n df = df2.copy()\n numerical_features = df.select_dtypes(include=['number']).columns.values\n for i in numerical_features:\n feature_range = np.arange(0, np.max(df[i]), step)\n df[i] = pd.cut(df[i], feature_range, right=True)\n df[i] = pd.factorize(df[i], sort=True)[0]\n return df\n\n\ndef scale_standard_numerical(df2):\n df = df2.copy()\n df = pd.DataFrame(StandardScaler().fit_transform(df), columns=df2.columns)\n return df\n\n\ndef scale_minmax_numerical(df2):\n df = df2.copy()\n df = pd.DataFrame(MinMaxScaler().fit_transform(df), columns=df2.columns)\n return df\n\n\n# Vol 3\ndef locally_linear_embedding_others(df2, n):\n df = df2.copy()\n # specifying the number of manifold dimensions, to which data is mapped\n lle = LocallyLinearEmbedding(n_components=n, random_state=seed)\n df = pd.DataFrame(lle.fit_transform(df))\n return df\n\n\ndef spectral_embedding_others(df2, n):\n df = df2.copy()\n # specifying the number of manifold dimensions, to which data is mapped\n se = SpectralEmbedding(n_components=n, random_state=seed)\n df = pd.DataFrame(se.fit_transform(df))\n return df\n\n\ndef tsne_embedding(df2, n):\n df = df2.copy()\n # specifying the number of manifold dimensions, to which data is mapped\n tsne = TSNE(n_components=n, random_state=seed)\n df = pd.DataFrame(tsne.fit_transform(df))\n return df\n\n\ndef randomtrees_embedding_others(df2):\n df = df2.copy()\n rte = RandomTreesEmbedding(random_state=seed)\n df = pd.DataFrame(rte.fit_transform(df).toarray())\n return df\n\n\ndef row_statistics_others(df2):\n df = df2.copy()\n df['zeros'] = np.sum(df == 0, axis=1)\n df['non-zeros'] = np.sum(df == 0, axis=1)\n df['NaNs'] = np.sum(np.isnan(df), axis=1)\n df['negatives'] = np.sum(df < 0, axis=1)\n df['sum_row'] = df.sum(axis=1)\n df['mean_row'] = df.mean(axis=1)\n df['std_row'] = df.std(axis=1)\n df['max_row'] = np.amax(df, axis=1)\n return df\n\n\ndef interactions_others(df2):\n df = df2.copy()\n cols = df2.columns\n for comb in itertools.combinations(cols, 2):\n feat = comb[0] + \"_plus_\" + comb[1]\n # addition can be changed to any other interaction like subtraction, multiplication, division\n df[feat] = df[comb[0]] + df[comb[1]]\n return df\n\n\ndef target_engineering_others(df2):\n df = df2.copy()\n df['target'] = np.log(df['target']) # log-transform\n df['target'] = (df['target'] ** 0.25) + 1\n df['target'] = df['target'] ** 2 # square-transform\n df['target'], _ = boxcox(df['target']) # Box-Cox transform\n\n # Bin target variable in case of regression\n target_range = np.arange(0, np.max(df['target']), 100)\n df['target'] = np.digitize(df.target.values, bins=target_range)\n return df\n\n\n# Vol 4 - Text\n\nstemmer = snowball.SnowballStemmer('english')\nlemmatizer = WordNetLemmatizer()\nstopwords_eng = stopwords.words('english')\nwords = re.compile(r\"\\w+\", re.I)\n\nmodel = KeyedVectors.load_word2vec_format(\n '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/data/embeddings/GoogleNews-vectors-negative300.bin', binary=True)\n\n# Cleaning\n\n\ndef lowercase(df2):\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n for i in text_feats:\n df[i] = df[i].str.lower()\n return df\n\n\ndef unidecode(df2):\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n for i in text_feats:\n df[i] = df[i].str.encode('ascii', 'ignore')\n return df\n\n\ndef remove_nonalpha(df2):\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n for i in text_feats:\n df[i] = df[i].str.replace('\\W+', ' ')\n return df\n\n\ndef repair_words(df2):\n # https://www.analyticsvidhya.com/blog/2014/11/text-data-cleaning-steps-python/\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n for i in text_feats:\n df[i] = df[i].apply(lambda x: (''.join(''.join(s)[:2]\n for _, s in itertools.groupby(x))))\n return df\n\n# Tokenizing\n\n\ndef tokenize(df2):\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n for i in text_feats:\n df[i] = df[i].apply(lambda x: word_tokenize(x))\n return df\n\n\ndef ngram(df2, n):\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n for i in text_feats:\n df[i] = df[i].apply(lambda x: [i for i in ngrams(word_tokenize(x), n)])\n return df\n\n\ndef skipgram(df2, ngram_n, skip_n):\n def random_sample(words_list, skip_n):\n return [words_list[i] for i in sorted(random.sample(range(len(words_list)), skip_n))]\n\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n for i in text_feats:\n df[i] = df[i].apply(\n lambda x: [i for i in ngrams(word_tokenize(x), ngram_n)])\n df[i] = df[i].apply(lambda x: random_sample(x, skip_n))\n return df\n\n\ndef chargram(df2, n):\n # http://stackoverflow.com/questions/18658106/quick-implementation-of-character-n-grams-using-python\n def chargram_generate(string, n):\n return [string[i:i + n] for i in range(len(string) - n + 1)]\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n for i in text_feats:\n df[i] = df[i].apply(lambda x: [i for i in chargram_generate(x, 3)])\n return df\n\n# Removing\n\n\ndef remove_stops(df2, stopwords):\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n for i in text_feats:\n df[i] = df[i].apply(\n lambda x: [i for i in word_tokenize(x) if i not in stopwords])\n return df\n\n\ndef remove_extremes(df2, stopwords, min_count=3, max_frequency=0.75):\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n for i in text_feats:\n df[i] = df[i].apply(\n lambda x: [i for i in word_tokenize(x) if i not in stopwords])\n tokenized = []\n for i in text_feats:\n tokenized += df[i].tolist()\n dictionary = corpora.Dictionary(tokenized)\n dictionary.filter_extremes(no_below=min_count, no_above=max_frequency)\n dictionary.compactify()\n df = df2.copy()\n for i in text_feats:\n df[i] = df[i].apply(lambda x: [i for i in word_tokenize(x) if i not in stopwords and i not in\n list(dictionary.token2id.keys())])\n return df\n\n# Roots\n\n\ndef chop(df2, n):\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n for i in text_feats:\n df[i] = df[i].apply(lambda x: [i[:n] for i in word_tokenize(x)])\n return df\n\n\ndef stem(df2):\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n for i in text_feats:\n df[i] = df[i].apply(lambda x: [stemmer.stem(i)\n for i in word_tokenize(x)])\n return df\n\n\ndef lemmat(df2):\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n for i in text_feats:\n df[i] = df[i].apply(lambda x: [lemmatizer.lemmatize(i)\n for i in word_tokenize(x)])\n return df\n\n# Enriching\n\n\ndef extract_entity(df2):\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n for i in text_feats:\n df[i] = df[i].apply(lambda x: word_tokenize(x))\n df[i] = df[i].apply(lambda x: nltk.pos_tag(x))\n df[i] = df[i].apply(lambda x: [i[1:] for i in x])\n return df\n\n\ndef doc_features(df2):\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n for i, col in enumerate(text_feats):\n df['num_characters_{}'.format(i)] = df[col].map(\n lambda x: len(str(x))) # length of sentence\n df['num_words_{}'.format(i)] = df[col].map(\n lambda x: len(str(x).split())) # number of words\n df['num_spaces_{}'.format(i)] = df[col].map(lambda x: x.count(' '))\n df['num_alpha_{}'.format(i)] = df[col].apply(\n lambda x: sum(i.isalpha()for i in x))\n df['num_nonalpha_{}'.format(i)] = df[col].apply(\n lambda x: sum(1 - i.isalpha()for i in x))\n return df\n\n\ndef get_readability(df2):\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n for i, col in enumerate(text_feats):\n df['flesch_reading_ease{}'.format(i)] = df[col].apply(\n lambda x: textstat.flesch_reading_ease(x))\n df['smog_index{}'.format(i)] = df[col].apply(\n lambda x: textstat.smog_index(x))\n df['flesch_kincaid_grade{}'.format(i)] = df[col].apply(\n lambda x: textstat.flesch_kincaid_grade(x))\n df['coleman_liau_index{}'.format(i)] = df[col].apply(\n lambda x: textstat.coleman_liau_index(x))\n df['automated_readability_index{}'.format(i)] = df[col].apply(\n lambda x: textstat.automated_readability_index(x))\n df['dale_chall_readability_score{}'.format(i)] = df[col].apply(\n lambda x: textstat.dale_chall_readability_score(x))\n df['difficult_words{}'.format(i)] = df[col].apply(\n lambda x: textstat.difficult_words(x))\n df['linsear_write_formula{}'.format(i)] = df[col].apply(\n lambda x: textstat.linsear_write_formula(x))\n df['gunning_fog{}'.format(i)] = df[col].apply(\n lambda x: textstat.gunning_fog(x))\n df['text_standard{}'.format(i)] = df[col].apply(\n lambda x: textstat.text_standard(x))\n return df\n\n# Similarities & transformations\n\n\ndef token_similarity(df2):\n\n # https://www.kaggle.com/the1owl/quora-question-pairs/matching-que-for-quora-end-to-end-0-33719-pb\n def word_match_share(row, col1, col2, stopwords):\n q1words = {}\n q2words = {}\n for word in str(row[col1]).lower().split():\n if word not in stopwords:\n q1words[word] = 1\n for word in str(row[col2]).lower().split():\n if word not in stopwords:\n q2words[word] = 1\n if len(q1words) == 0 or len(q2words) == 0:\n return 0\n shared_words_in_q1 = [w for w in q1words.keys() if w in q2words]\n shared_words_in_q2 = [w for w in q2words.keys() if w in q1words]\n R = (len(shared_words_in_q1) + len(shared_words_in_q2)) / \\\n (len(q1words) + len(q2words))\n return R\n\n df = df2.copy()\n df['word_match_share'] = df.apply(lambda x: word_match_share(x, 'question1', 'question2', stopwords_eng),\n axis=1, raw=True)\n return df\n\n\ndef word2vec_embedding(df2, model, num_words, num_dims):\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n for i in text_feats:\n df[i] = df[i].apply(lambda x: \" \".join(\n [stemmer.stem(i) for i in word_tokenize(x)]))\n tokenizer = Tokenizer(num_words=num_words)\n tokenizer.fit_on_texts(df['question1'] + df['question2'])\n word_index = tokenizer.word_index\n embedding_matrix = np.zeros((num_words, num_dims))\n for word, i in word_index.items():\n if word in model.vocab:\n embedding_matrix[i] = model.word_vec(word)\n return pd.DataFrame(embedding_matrix)\n\n\ndef distances(df2, model):\n\n # https://github.com/abhishekkrthakur/is_that_a_duplicate_quora_question/blob/master/feature_engineering.py\n def sent2vec(s):\n words = str(s).lower().encode().decode('utf-8')\n words = word_tokenize(words)\n words = [w for w in words if w not in stopwords_eng]\n words = [w for w in words if w.isalpha()]\n M = []\n for w in words:\n try:\n M.append(model[w])\n except Exception as e:\n print(e)\n continue\n M = np.array(M)\n v = M.sum(axis=0)\n return v / np.sqrt((v ** 2).sum())\n\n df = df2.copy()\n question1_vectors = np.zeros((df.shape[0], 300))\n for i, q in (enumerate(df.question1.values)):\n question1_vectors[i, :] = sent2vec(q)\n question2_vectors = np.zeros((df.shape[0], 300))\n for i, q in (enumerate(df.question2.values)):\n question2_vectors[i, :] = sent2vec(q)\n df['cosine_distance'] = [distance.cosine(x, y) for (x, y) in zip(np.nan_to_num(question1_vectors),\n np.nan_to_num(question2_vectors))]\n df['jaccard_distance'] = [distance.jaccard(x, y) for (x, y) in zip(np.nan_to_num(question1_vectors),\n np.nan_to_num(question2_vectors))]\n df['hamming_distance'] = [distance.hamming(x, y) for (x, y) in zip(np.nan_to_num(question1_vectors),\n np.nan_to_num(question2_vectors))]\n return df\n\n\ndef bag_of_words(df2):\n df = df2.copy()\n cv = CountVectorizer()\n bow = cv.fit_transform(df.question1 + df.question2).toarray()\n return pd.DataFrame(bow, columns=cv.get_feature_names())\n\n\ndef tf_idf(df2):\n df = df2.copy()\n tf = TfidfVectorizer()\n tfidf = tf.fit_transform(df.question1 + df.question2).toarray()\n return pd.DataFrame(tfidf, columns=tf.get_feature_names())\n\n\ndef PCA_text(df2, ndims):\n df = df2.copy()\n bow = CountVectorizer().fit_transform(df.question1 + df.question2).toarray()\n pca_bow = PCA(ndims, random_state=seed).fit_transform(bow)\n return pd.DataFrame(pca_bow)\n\n\ndef SVD_text(df2, ndims):\n df = df2.copy()\n bow = CountVectorizer().fit_transform(df.question1 + df.question2)\n svd_bow = TruncatedSVD(ndims, random_state=seed).fit_transform(bow)\n return pd.DataFrame(svd_bow)\n\n\ndef LDA_text(df2, ntopics):\n df = df2.copy()\n bow = CountVectorizer().fit_transform(df.question1 + df.question2)\n lda_bow = LatentDirichletAllocation(\n ntopics, random_state=seed).fit_transform(bow)\n return pd.DataFrame(lda_bow)\n\n\ndef LDA_text2(df2, ntopics):\n cv = CountVectorizer(stop_words='english', min_df=1, max_df=0.999)\n lda = LatentDirichletAllocation(ntopics, random_state=seed, n_jobs=1)\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n cv.fit(df.question1 + df.question2)\n bow = cv.transform(df.question1 + df.question2)\n lda.fit(bow)\n ldas = []\n for i in text_feats:\n bow_i = cv.transform(df[i])\n ldas.append(pd.DataFrame(lda.transform(bow_i), index=df[i]))\n return ldas\n\n\ndef LSA_text(df2, ndims):\n cv = CountVectorizer(stop_words='english', min_df=1, max_df=0.999)\n svd = TruncatedSVD(ndims, random_state=1337)\n normalizer = Normalizer(copy=False)\n df = df2.copy()\n text_feats = df.select_dtypes(include=['object']).columns.values\n cv.fit(df.question1 + df.question2)\n bow = cv.transform(df.question1 + df.question2)\n svd.fit(bow)\n transformed_bow = svd.transform(bow)\n normed_bow = normalizer.fit(transformed_bow)\n svds = []\n for i in text_feats:\n bow_i = cv.transform(df[i])\n svd_i = svd.transform(bow_i)\n normed_i = pd.DataFrame(normalizer.transform(svd_i), index=df[i])\n svds.append(normed_i)\n return svds\n\n\n# Projection onto circle\ndef polar_coords_column(df2, colname, normalize=True):\n df = df2.copy()\n max_val = np.max(df['{}'.format(colname)])\n val_range = np.linspace(0, 360, max_val + 1)\n cat_feature_value_counts = df['{}'.format(colname)].value_counts()\n value_counts_list = cat_feature_value_counts.index.tolist()\n angle_dict = dict(zip(value_counts_list, val_range))\n\n df['{}_raw'.format(colname)] = df['{}'.format(colname)].map(angle_dict)\n df['{}_sin'.format(colname)] = np.sin(df['{}_raw'.format(colname)])\n df['{}_cos'.format(colname)] = np.cos(df['{}_raw'.format(colname)])\n df.drop(['{}_raw'.format(colname)], axis=1, inplace=True)\n if normalize:\n df['{}_sin'.format(colname)] = (df['{}_sin'.format(colname)] - np.min(df['{}_sin'.format(colname)])) / \\\n ((np.max(df['{}_sin'.format(colname)])) -\n np.min(df['{}_sin'.format(colname)]))\n df['{}_cos'.format(colname)] = (df['{}_cos'.format(colname)] - np.min(df['{}_cos'.format(colname)])) / \\\n ((np.max(df['{}_cos'.format(colname)])) -\n np.min(df['{}_cos'.format(colname)]))\n return df\n","sub_path":"scripts/fe_functions.py","file_name":"fe_functions.py","file_ext":"py","file_size_in_byte":22149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"508999749","text":"\n\n#calss header\nclass _CULPRIT():\n\tdef __init__(self,): \n\t\tself.name = \"CULPRIT\"\n\t\tself.definitions = [u'someone who has done something wrong: ', u'a fact or situation that is the reason for something bad happening: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_culprit.py","file_name":"_culprit.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"370150994","text":"import os\nimport torch.optim as optim\nfrom models.ImageAutoEncoder import ImageDecoder\nfrom models.ImgDiscriminator import *\n\nfrom losses import *\nfrom trainers.trainer import trainer\nfrom utils import *\nfrom itertools import chain\n\nclass wgan_trainer(trainer):\n\n def __init__(self, args, embedding, vocab):\n super(wgan_trainer, self).__init__(args, embedding, vocab)\n\n # Setting up the networks\n self.networks[\"generator\"] = ImageDecoder(feature_dimension=args.latent_size,\n img_dimension= args.crop_size)\n\n self.networks[\"discriminator\"] = ImgDiscriminator(args.crop_size)\n\n\n # Setting up the optimizers\n self.optimizers[\"generator\"] = optim.Adam(self.networks[\"generator\"].parameters(),\\\n lr=args.learning_rate, betas=(0.5, 0.999), weight_decay=0.00001)\n\n # self.optimizers[\"discriminator\"] = optim.RMSprop(self.networks[\"discriminator\"].parameters(),\n # lr=args.learning_rate)\n self.optimizers[\"discriminator\"] = optim.Adam(self.networks[\"discriminator\"].parameters(),\\\n lr=args.learning_rate, betas=(0.5, 0.999), weight_decay=0.00001)\n\n self.one = torch.FloatTensor([1])\n self.mone = self.one * -1\n if args.cuda:\n self.one = self.one.cuda()\n self.mone = self.mone.cuda()\n\n\n # self.gen_loss = Variable(torch.FloatTensor([1]* args.batch_size))\n # self.dis_loss = Variable(torch.FloatTensor([1]* args.batch_size))\n self.nets_to_cuda()\n\n self.step = 0\n\n # Setting up the losses\n self.create_losses_meter([\"Ls_D\", \"Ls_G\", \"Ls_D_rl\", \"Ls_D_fk\",\"Ls_GP\"])\n\n # Setting up the noise\n self.noise = torch.FloatTensor(args.batch_size, args.latent_size, 1, 1).cuda()\n self.fixed_noise = Variable(torch.FloatTensor(args.batch_size, args.latent_size, 1, 1).normal_(0, 1).cuda())\n\n def forward(self, epoch, images, captions, lengths, save_images):\n\n if self.iteration < 2500:\n cycle = 101\n else:\n cycle = 6\n\n # train_gen = self.iteration > 500 and self.iteration % 6\n if not self.iteration % cycle:\n #\n for p in self.networks[\"discriminator\"].parameters(): # reset requires_grad\n p.requires_grad = False # they are set to False below in netG update\n\n self.train_G(epoch, images, captions, lengths)\n # self.optimizers[\"generator\"].step()\n #\n for p in self.networks[\"discriminator\"].parameters(): # reset requires_grad\n p.requires_grad = True # they are set to False below in netG update\n else:\n\n self.train_D(epoch, images, captions, lengths)\n\n self.iteration += 1\n # Log Losses:\n # self.losses[\"GEN_loss\"].update(self.gen_loss.data[0],self.args.batch_size)\n # self.losses[\"DIS_loss\"].update(self.dis_loss.data[0],self.args.batch_size)\n\n if save_images:\n self.save_samples(images[0], self.networks[\"generator\"](self.fixed_noise)[0], captions[0], captions[0])\n\n\n\n def train_D(self,epoch, images, captions, lengths):\n\n # clamp parameters to a cube\n for p in self.networks[\"discriminator\"].parameters():\n p.data.clamp_(-0.01, 0.01)\n\n # train with real\n self.networks[\"discriminator\"].zero_grad()\n\n errD_real = self.networks[\"discriminator\"](images)\n errD_real.backward(self.one)\n\n # train with fake\n self.noise.resize_(self.args.batch_size, self.args.latent_size, 1, 1).normal_(0, 1)\n noisev = Variable(self.noise, volatile=True) # totally freeze netG\n fake = Variable(self.networks[\"generator\"](noisev).data)\n\n inputv = fake\n errD_fake = self.networks[\"discriminator\"](inputv)\n errD_fake.backward(self.mone)\n\n errGP = calc_gradient_penalty(self.networks[\"discriminator\"], images, fake)\n errGP.backward()\n\n errD = errD_real - errD_fake + errGP\n self.optimizers[\"discriminator\"].step()\n\n self.losses[\"Ls_D\"].update(errD.data[0], self.args.batch_size)\n self.losses[\"Ls_D_fk\"].update(errD_fake.data[0], self.args.batch_size)\n self.losses[\"Ls_D_rl\"].update(errD_real.data[0], self.args.batch_size)\n self.losses[\"Ls_GP\"].update(errGP.data[0], self.args.batch_size)\n\n # Gradient Penalty\n\n # img_gp = calc_gradient_penalty(self.networks[\"discriminator\"], images, img_gen)\n\n\n def train_G(self,epoch, images, captions, lengths):\n self.networks[\"generator\"].zero_grad()\n # in case our last batch was the tail batch of the dataloader,\n # make sure we feed a full batch of noise\n self.noise.resize_(self.args.batch_size, self.args.latent_size, 1, 1).normal_(0, 1)\n noisev = Variable(self.noise)\n fake = self.networks[\"generator\"](noisev)\n errG = self.networks[\"discriminator\"](fake)\n errG.backward(self.one)\n self.optimizers[\"generator\"].step()\n\n self.losses[\"Ls_G\"].update(errG.data[0], self.args.batch_size)\n\n def backpropagate(self, loss):\n loss.backward()\n for opt in self.optimizers.values():\n opt.step()\n self.step += 1\n\n\n","sub_path":"trainers/wgan_trainer.py","file_name":"wgan_trainer.py","file_ext":"py","file_size_in_byte":5373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"13635821","text":"# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n\n def rob(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n\n if not root:\n return 0\n if not root.left and not root.right:\n return root.val\n # case1:\n case1 = root.val\n if root.left:\n if root.left.left:\n case1 += self.rob(root.left.left)\n if root.left.right:\n case1 += self.rob(root.left.right)\n if root.right:\n if root.right.left:\n case1 += self.rob(root.right.left)\n if root.right.right:\n case1 += self.rob(root.right.right)\n # case2:\n case2 = 0\n if root.left:\n case2 += self.rob(root.left)\n if root.right:\n case2 += self.rob(root.right)\n\n return max(case1, case2)\n","sub_path":"HouseRobberIII.py","file_name":"HouseRobberIII.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"269457697","text":"import urllib.request as req\r\nimport bs4\r\nimport json\r\n\r\n# --------------- 將 url 解析成 list 後回傳 ---------------\r\ndef getdata(url):\r\n re = req.Request(url , headers = {\r\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36\"\r\n })\r\n\r\n with req.urlopen(re) as response:\r\n jsondata = response.read().decode(\"utf-8\")\r\n\r\n jsondata = json.loads(jsondata)\r\n\r\n return jsondata\r\n\r\n\r\n\r\n\r\n\r\n# --------------- 建立一個 request 物件,附加 request header 的資訊好做爬蟲動作不被隔擋 ---------------\r\nurl = \"https://ibus.tbkc.gov.tw/cms/driving-map\"\r\n\r\nre = req.Request(url , headers = {\r\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36\"\r\n})\r\n\r\nwith req.urlopen(re) as response:\r\n data = response.read().decode(\"utf-8\")\r\n\r\n\r\n\r\n# --------------- 抓 html 原始碼,篩選出 type = text/javascript script 標籤內容,再將篩選內容轉成 str 型態 ---------------\r\nroot = bs4.BeautifulSoup(data , \"html.parser\")\r\ntitle = root.find_all(\"script\" , {\"type\":\"text/javascript\"})\r\ntitle1 = (str(title))\r\n\r\n\r\n\r\n# --------------- 擷取篩選過的字串中部分的資料(provider ~ bus_type 之前的資料) ---------------\r\ntitle2 = 'provider:'\r\ntitle3 = 'bus_type'\r\ndata = title1[title1.index(title2):title1.index(title3)]\r\n\r\n\r\n\r\n# --------------- 將部分擷取的資料再擷取, 用 eval() 函式 把 str 處理成其他型態的資料 ---------------\r\ntitle2 = '{\"ProviderId\"'\r\ntitle3 = '\"}],'\r\ndata = data[data.index(title2):data.index(title3)+2]\r\nb = eval(data)\r\n\r\n\r\n\r\n#--------------- 抓處理過的資料所含的 routeMapImageUrl 對應值(url)---------------\r\nurlstr = []\r\nnamezh = []\r\nfor i in b:\r\n if(i[\"routeMapImageUrl\"] != \"\"):\r\n carname = i[\"NameZh\"]\r\n str1 = i[\"routeMapImageUrl\"]\r\n str2 = ':\\/\\/ibus.tbkc.gov.tw\\/cms\\/api\\/route\\/' \r\n str3 = 'map'\r\n\r\n jsonurl = \"https\" + str1[str1.index(str2):str1.index(str3)] + \"estimate\"\r\n jsonurl = jsonurl.replace('\\/','/')\r\n urlstr.append(jsonurl)\r\n namezh.append(carname)\r\n\r\n\r\n\r\n# --------------- main ---------------\r\nindex=0\r\nfor url in urlstr:\r\n jsondata=getdata(url)\r\n \r\n count = 0\r\n while count<2: # count<2 代表抓2班公車的資訊\r\n print(\"[車名] : \" , namezh[index])\r\n \r\n for i in jsondata:\r\n print(\"[站名] : \" , i[\"StopName\"])\r\n print(\"[到站時間] : \",i[\"ComeTime\"])\r\n print()\r\n print(\"===============================================================================\")\r\n index+=1\r\n count+=1\r\n break\r\n","sub_path":"KHbus.py","file_name":"KHbus.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"483653442","text":"import hashlib\nimport random\nfrom datetime import datetime\nfrom django.db import transaction, IntegrityError\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render_to_response, get_object_or_404, render, redirect\nfrom django.http import HttpResponseRedirect\nfrom django.utils import timezone\nfrom accounts.forms import RegistrationForm\nfrom accounts.models import Profile\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\ndef register(request):\n if request.user.is_authenticated():\n return redirect(home)\n form = RegistrationForm()\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n data={}\n data['username'] = form.cleaned_data['username']\n data['email'] = form.cleaned_data['email']\n data['password1'] = form.cleaned_data['password1']\n\n salt = hashlib.sha1(str(random.random())).hexdigest()[:5]\n user_name_salt = data['username']\n if isinstance(user_name_salt, unicode):\n user_name_salt = user_name_salt.encode('utf8')\n data['activation_key'] = hashlib.sha1(salt+user_name_salt).hexdigest()\n data['email_subject'] = \"Activation mail\"\n data['host_name'] = request.get_host()\n form.sendEmail(data)\n form.save(data)\n\n request.session['registered']=True\n return redirect(home)\n else:\n registration_form = form\n return render(request, 'registration/register.html', locals())\n\n\ndef register_success(request):\n return render_to_response(\n 'registration/success.html',\n )\n\n\ndef logout_page(request):\n logout(request)\n return HttpResponseRedirect('/')\n\n\n@login_required\ndef home(request):\n return render_to_response('home.html',{'user': request.user})\n\n\ndef activation(request, key):\n activation_expired = False\n already_active = False\n profile = get_object_or_404(Profile, activation_key=key)\n if not profile.user.is_active:\n if timezone.now() > profile.key_expires:\n activation_expired = True\n id_user = profile.user.id\n else:\n profile.user.is_active = True\n profile.user.save()\n\n else:\n already_active = True\n return render(request, 'registration/success.html', locals())\n\n\ndef new_activation_link(request, user_id):\n form = RegistrationForm()\n data = {}\n user = User.objects.get(id=user_id)\n if user is not None and not user.is_active:\n data['username'] = user.username\n data['email'] = user.email\n data['email_subject'] = \"New activation link\"\n\n salt = hashlib.sha1(str(random.random())).hexdigest()[:5]\n user_name_salt = data['username']\n if isinstance(user_name_salt, unicode):\n user_name_salt = user_name_salt.encode('utf8')\n data['activation_key'] = hashlib.sha1(salt+user_name_salt).hexdigest()\n\n profile = Profile.objects.get(user=user)\n profile.activation_key = data['activation_key']\n profile.key_expires = datetime.datetime.strftime(datetime.datetime.now() + datetime.timedelta(days=2), \"%Y-%m-%d %H:%M:%S\")\n profile.list_of_friends = []\n profile.save()\n\n form.sendEmail(data)\n request.session['new_link'] = True\n\n return redirect(home)\n\n\ndef add_nothing(request):\n return render_to_response('friends.html', {'user': request.user})\n\n\ndef add_friend(request, friend_id):\n if request.method == 'POST':\n user = User.objects.get(id=request.POST['user'])\n friend = User.objects.get(id=friend_id)\n if user is not None and friend is not None and (user.is_active and friend.is_active):\n user_profile = Profile.objects.get(user=user)\n user_profile.add_friend(friend_id)\n user_profile.save()\n friend_profile = Profile.objects.get(user=friend)\n friend_profile.add_friend(user.id)\n friend_profile.save()\n\n\n","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"499961971","text":"from threading import Thread\nimport unittest\nimport os\nimport glob\nimport re\n\nimport sublime\nimport sublime_plugin\n\nif bool(os.getenv('SUBLIME_PHP_GRAMMAR_DEBUG')):\n def debug_message(message):\n print('DEBUG php-grammar test: %s' % str(message))\nelse:\n def debug_message(message):\n pass\n\n\nclass Configuration():\n def on_load(self):\n self.package_root_path = os.path.dirname(__file__)\n self.package_name = os.path.basename(self.package_root_path)\n self.tests_root_path = os.path.join(self.package_root_path, 'tests')\n\n if int(sublime.version()) >= 3092:\n self.syntax_file_path = os.path.join('Packages', self.package_name, 'PHP.sublime-syntax')\n else:\n self.syntax_file_path = os.path.join('Packages', self.package_name, 'PHP.tmLanguage')\n\n\nconfiguration = Configuration()\n\n\ndef plugin_loaded():\n configuration.on_load()\n\n\nclass SublimeViewAPI():\n\n def __init__(self, view):\n self.view = view\n\n def to_str(self):\n return self.view.substr(sublime.Region(0, self.view.size()))\n\n def to_scope_name_repr(self, region=None):\n \"\"\"\n Return a string scope name representation of the view content.\n\n Each point in the view is converted to a scope name. A newline is\n appended to each scope name.\n \"\"\"\n content = ''\n\n if region is None:\n in_range = range(self.view.size())\n else:\n in_range = range(region.begin(), region.end())\n\n for point in in_range:\n content += self.view.scope_name(point).strip() + \"\\n\"\n\n return content.strip()\n\n\nclass GeneratePhpGrammarSyntaxTestExpectation(sublime_plugin.TextCommand):\n\n def run(self, edit):\n\n test_begin_line_region = self.view.find('^--TEST--$', 0)\n file_begin_line_region = self.view.find('^--FILE--$', 0)\n expect_begin_line_region = self.view.find('^--EXPECT--$', 0)\n\n if test_begin_line_region.empty() or file_begin_line_region.empty() or expect_begin_line_region.empty():\n return\n\n file_desciption_region = sublime.Region(test_begin_line_region.end() + 1, file_begin_line_region.begin() - 1)\n file_content_region = sublime.Region(file_begin_line_region.end() + 1, expect_begin_line_region.begin() - 1)\n expect_content_region = sublime.Region(expect_begin_line_region.end() + 1, self.view.size())\n\n if file_desciption_region.empty() or file_content_region.empty():\n return\n\n file_content_scope_repr = SublimeViewAPI(self.view).to_scope_name_repr(file_content_region)\n\n self.view.replace(edit, expect_content_region, file_content_scope_repr.strip())\n\n def is_enabled(self):\n if not self.view.file_name():\n return False\n return bool(re.match('.*[a-z][a-z0-9_]*[a-z0-9]_test.php$', self.view.file_name()))\n\n\nif bool(os.getenv('SUBLIME_PHP_GRAMMAR_DEBUG')):\n\n class PhpGrammarShowCursorScopeNameInStatusLine(sublime_plugin.EventListener):\n \"\"\"Update the status line with the scope name under the cursor.\"\"\"\n\n def on_post_text_command(self, view, command_name, args):\n self.update_scope(view)\n\n def on_post_window_command(self, window, command_name, args):\n view = window.active_view()\n if not view:\n return\n\n self.update_scope(view)\n\n if 3000 <= int(sublime.version()) < 3070:\n # Works around issue where on_post_window_command never gets called\n # See https://github.com/SublimeTextIssues/Core/issues/141\n def on_window_command(self, window, command_name, args):\n self.on_post_window_command(window, command_name, args)\n\n def update_scope(self, view):\n for region in view.sel():\n scope_name = view.scope_name(region.begin()).strip()\n view.set_status('scope', 'Scope: \"' + scope_name + '\"')\n return\n\n\nclass __php_grammar_test_view_replace(sublime_plugin.TextCommand):\n\n def run(self, edit, text):\n self.view.replace(edit, sublime.Region(0, self.view.size()), text)\n\n\nclass __php_grammar_test_view_replace_cursor(sublime_plugin.TextCommand):\n\n def run(self, edit, reverse=False):\n\n if reverse:\n cursor_position = self.view.sel()[0]\n self.view.replace(edit, cursor_position, '|')\n else:\n cursor_placeholder = self.view.find('\\|', 0)\n\n if not cursor_placeholder or cursor_placeholder.empty():\n return\n\n self.view.sel().clear()\n self.view.sel().add(cursor_placeholder.begin())\n self.view.replace(edit, cursor_placeholder, '')\n\n\nclass ViewTestCase(unittest.TestCase):\n\n def setUp(self):\n self.view = sublime.active_window().new_file()\n self.view.set_scratch(True)\n self.view.settings().set('auto_indent', False)\n self.view.settings().set('indent_to_bracket', False)\n self.view.settings().set('tab_size', 4)\n self.view.settings().set('trim_automatic_white_space', False)\n self.view.settings().set('smart_indent', True)\n self.view.settings().set('tab_size', 4)\n self.view.settings().set('translate_tabs_to_spaces', True)\n self.view.set_syntax_file(configuration.syntax_file_path)\n self.maxDiff = None\n\n def tearDown(self):\n if self.view:\n self.view.close()\n\n def set_view_content(self, content, replace_cursor_position=False):\n self.view.run_command('__php_grammar_test_view_replace', {'text': content})\n if replace_cursor_position:\n self.view.run_command('__php_grammar_test_view_replace_cursor')\n\n def get_view_content(self, replace_cursor_position=False):\n if replace_cursor_position:\n self.view.run_command('__php_grammar_test_view_replace_cursor', {'reverse': True})\n return SublimeViewAPI(self.view).to_str()\n\n def view_to_scope_name_repr(self):\n return SublimeViewAPI(self.view).to_scope_name_repr()\n\n\nclass TestFile():\n\n def __init__(self, description, actual_content, expected_content, syntax=None):\n self.description = description\n self.actual_content = actual_content\n self.expected_content = expected_content\n self.syntax = syntax\n\n def from_file(file_name):\n with open(file_name) as f:\n actual_content = f.read()\n\n if '--TEST--' in actual_content:\n res = re.split('--([A-Z]+)--\\n', actual_content)\n\n error_message = 'invalid test file: %s' % file_name\n\n if 'TEST' not in res:\n raise RuntimeError(error_message)\n\n if 'FILE' not in res:\n raise RuntimeError(error_message)\n\n if 'EXPECT' not in res:\n raise RuntimeError(error_message)\n\n description = res[res.index('TEST') + 1].strip()\n actual_content = res[res.index('FILE') + 1].strip()\n expected_content = res[res.index('EXPECT') + 1].strip()\n syntax = None\n if 'SYNTAX' in res:\n syntax = res[res.index('SYNTAX') + 1].strip()\n else:\n description = file_name\n with open(file_name.replace('_test.php', '_test_expect.php')) as f:\n expected_content = f.read()\n syntax = None\n\n return TestFile(description, actual_content, expected_content, syntax)\n\n\nclass TestIndentation(ViewTestCase):\n\n def test_indentation_file_tests(self):\n test_files = glob.glob(os.path.join(configuration.tests_root_path, 'indentation') + '/*_test.php')\n for test_file_name in test_files:\n\n test_file = TestFile.from_file(test_file_name)\n self.set_view_content(test_file.actual_content)\n self.view.run_command('reindent', {'force_indent': True, 'single_line': False})\n self.assertEqual(test_file.expected_content, self.get_view_content(), \"\\n\\ntest:\" + test_file_name)\n\n\nclass TestSyntax(ViewTestCase):\n\n def test_syntax_file_tests(self):\n if int(sublime.version()) >= 3092:\n # newer versions use the new syntax test system provided by the ST core\n return\n\n test_files = glob.glob(os.path.join(configuration.tests_root_path, 'syntax') + '/lt_3092/*_test.php')\n for test_file_name in test_files:\n\n test_file = TestFile.from_file(test_file_name)\n if test_file.syntax:\n self.view.set_syntax_file(test_file.syntax)\n\n self.set_view_content(test_file.actual_content)\n\n if ':' not in test_file.expected_content:\n self.assertEqual(\n test_file.expected_content,\n self.view_to_scope_name_repr(),\n \"\\n\\ntest:\" + test_file_name\n )\n else:\n assertions = test_file.expected_content.splitlines()\n for assertion in assertions:\n if not len(assertion) > 0:\n # allow blank lines\n continue\n\n assertion = assertion.split(':')\n\n name = assertion[0]\n line = int(assertion[1])\n offset = int(assertion[2])\n selector = assertion[3]\n\n if name == 'match':\n self.assertMatchSelector(line, offset, selector)\n elif name == 'equal':\n self.assertEqualsScope(line, offset, selector)\n else:\n raise RuntimeError('Invalid syntax test file: %s' % test_file_name)\n\n def assertMatchSelector(self, line, offset, selector):\n point = self.view.text_point(line, offset)\n selector_score = self.view.score_selector(point, selector)\n actual_scope = self.view.scope_name(point).strip()\n self.assertGreater(\n selector_score,\n 0,\n 'Expected selector score greater than 0 for (line:{}, offset:{}, point:{}, selector:{}) *** ACTUAL: \"{}\"'\n .format(line, offset, point, selector, actual_scope)\n )\n\n def assertEqualsScope(self, line, offset, expected_scope):\n point = self.view.text_point(line, offset)\n actual_scope = self.view.scope_name(point).strip()\n self.assertEqual(expected_scope, actual_scope)\n\n\nclass OutputPanel(object):\n\n def __init__(self, window, name):\n self.window = window\n self.name = name\n self.view = self.window.create_output_panel(self.name)\n self.view.settings().set('word_wrap', False)\n self.view.settings().set('line_numbers', False)\n self.view.settings().set('gutter', False)\n self.view.settings().set('scroll_past_end', False)\n\n def write(self, s):\n sublime.set_timeout(lambda: self.view.run_command('append', {'characters': s}), 0)\n\n def flush(self):\n pass\n\n def show(self):\n self.window.run_command('show_panel', {'panel': 'output.' + self.name})\n\n def close(self):\n pass\n\n\nclass TextTestRunner():\n\n def __init__(self, window):\n self.window = window\n self.test_loader = unittest.TestLoader()\n self.suite = unittest.TestSuite()\n self.syntax_tests_loaded = False\n self.indentation_tests_loaded = False\n self.syntax_tests_loaded = False\n\n def run(self, syntax_tests=False, indentation_tests=False):\n if not syntax_tests and not indentation_tests:\n return\n\n if indentation_tests:\n self.suite.addTest(self.test_loader.loadTestsFromTestCase(TestIndentation))\n self.indentation_tests_loaded = True\n\n if syntax_tests:\n if int(sublime.version()) < 3092:\n # newer versions use the new syntax test system provided by the ST core\n self.suite.addTest(self.test_loader.loadTestsFromTestCase(TestSyntax))\n self.syntax_tests_loaded = True\n\n self.display = OutputPanel(self.window, 'php-grammar.tests')\n self.display.show()\n\n runner = unittest.TextTestRunner(stream=self.display, verbosity=2)\n\n def run_and_display():\n\n if self.syntax_tests_loaded:\n\n import sublime_api\n\n syntax_tests = sublime.find_resources(\"syntax_test*\")\n\n tests = []\n for syntax_test in syntax_tests:\n if \"php-grammar\" in syntax_test:\n tests.append(syntax_test)\n\n self.append_string(\"Run syntax tests...\\n\")\n\n output = \"\"\n total_assertions = 0\n failed_assertions = 0\n for test in tests:\n # self.append_string('syntax test: ' + test + \"... ok\\n\")\n\n assertions, test_output_lines = sublime_api.run_syntax_test(test)\n total_assertions += assertions\n if len(test_output_lines) > 0:\n failed_assertions += len(test_output_lines)\n for line in test_output_lines:\n output += line + \"\\n\"\n\n self.append_string(output)\n\n if failed_assertions > 0:\n self.append_string(\n \"FAILED: %d of %d assertions in %d files failed\\n\" %\n (failed_assertions, total_assertions, len(tests))\n )\n else:\n self.append_string(\n \"Syntax test success: %d assertions in %s files passed\\n\" %\n (total_assertions, len(tests))\n )\n\n self.append_string(\"OK\\n\")\n\n if self.indentation_tests_loaded:\n self.append_string(\"Run indentation tests...\\n\")\n runner.run(self.suite)\n\n Thread(target=run_and_display).start()\n\n def append_string(self, string):\n self.display.view.run_command('append', {\n 'characters': string,\n 'force': True,\n 'scroll_to_end': True\n })\n\n\nclass RunPhpGrammarIndentationTests(sublime_plugin.WindowCommand):\n \"\"\"Run the indetation tests.\"\"\"\n\n def run(self):\n TextTestRunner(self.window).run(indentation_tests=True)\n\n\nclass RunPhpGrammarSyntaxTests(sublime_plugin.WindowCommand):\n \"\"\"Run the syntax tests.\"\"\"\n\n def run(self):\n TextTestRunner(self.window).run(syntax_tests=True)\n\n\nclass RunPhpGrammarTests(sublime_plugin.WindowCommand):\n \"\"\"Run all the tests.\"\"\"\n\n def run(self):\n TextTestRunner(self.window).run(syntax_tests=True, indentation_tests=True)\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":14688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"595753348","text":"# -*- coding:utf-8 -*-\n# ST2/ST3 compat\nfrom __future__ import print_function\nimport sublime\nimport sublime_plugin\n\nimport os\nimport re\nimport json\n\ntry:\n from latextools_utils.is_tex_file import get_tex_extensions\nexcept ImportError:\n from .latextools_utils.is_tex_file import get_tex_extensions\n\nif sublime.version() < '3000':\n # we are on ST2 and Python 2.X\n _ST3 = False\n import getTeXRoot\n from latextools_utils import get_setting\nelse:\n _ST3 = True\n from . import getTeXRoot\n from .latextools_utils import get_setting\n\n\n# Only work for \\include{} and \\input{} and \\includegraphics\nTEX_INPUT_FILE_REGEX = re.compile(\n r'([^{}\\[\\]]*)\\{edulcni\\\\'\n + r'|([^{}\\[\\]]*)\\{tupni\\\\'\n + r'|([^{}\\[\\]]*)\\{(?:\\][^{}\\[\\]]*\\[)?scihpargedulcni\\\\'\n + r'|([^{}\\[\\]]*)\\{(?:\\][^{}\\[\\]]*\\[)?ecruoserbibdda\\\\'\n + r'|([^{}\\[\\]]*)\\{yhpargoilbib\\\\'\n + r'|([^{}\\[\\]]*)\\{(?:\\][^{}\\[\\]]*\\[)?ssalctnemucod\\\\'\n + r'|([^{}\\[\\]]*)\\{(?:\\][^{}\\[\\]]*\\[)?egakcapesu\\\\'\n + r'|([^{}\\[\\]]*)\\{elytsyhpargoilbib\\\\'\n)\n\n# Get all file by types\ndef get_file_list(root, types, filter_exts=[]):\n path = os.path.dirname(root)\n\n def file_match(f):\n filename, extname = os.path.splitext(f)\n # ensure file has extension and its in the list of types\n if extname and not extname[1:].lower() in types:\n return False\n\n return True\n\n completions = []\n for dir_name, dirs, files in os.walk(path):\n files = [f for f in files if f[0] != '.' and file_match(f)]\n dirs[:] = [d for d in dirs if d[0] != '.']\n for f in files:\n full_path = os.path.join(dir_name, f)\n # Exclude image file have the same name of root file,\n # which may be the pdf file of the root files,\n # only pdf format.\n if os.path.splitext(root)[0] == os.path.splitext(full_path)[0]:\n continue\n\n for ext in filter_exts:\n if f.endswith(ext):\n f = f[:-len(ext)]\n\n completions.append((os.path.relpath(dir_name, path), f))\n\n return completions\n\n\ndef parse_completions(view, line):\n # reverse line, copied from latex_cite_completions, very cool :)\n line = line[::-1]\n\n # Do matches!\n search = TEX_INPUT_FILE_REGEX.match(line)\n\n installed_cls = []\n installed_bst = []\n installed_pkg = []\n input_file_types = None\n\n if search is not None:\n ( include_filter,\n input_filter,\n image_filter,\n addbib_filter,\n bib_filter,\n cls_filter,\n pkg_filter,\n bst_filter) = search.groups()\n else:\n return '', []\n\n # it isn't always correct to include the extension in the output filename\n # esp. with \\bibliography{}; here we provide a mechanism to permit this\n filter_exts = []\n\n if include_filter is not None:\n # if is \\include\n prefix = include_filter[::-1]\n # filter the . from the start of the extention\n input_file_types = [e[1:] for e in get_tex_extensions()]\n # only cut off the .tex extension\n filter_exts = ['.tex']\n elif input_filter is not None:\n # if is \\input search type set to tex\n prefix = input_filter[::-1]\n # filter the . from the start of the extension\n input_file_types = [e[1:] for e in get_tex_extensions()]\n # only cut off the .tex extension\n filter_exts = ['.tex']\n elif image_filter is not None:\n # if is \\includegraphics\n prefix = image_filter[::-1]\n # Load image types from configurations\n # In order to user input, \"image_types\" must be set in\n # LaTeXTools.sublime-settings configuration file or the\n # project settings for the current view.\n input_file_types = get_setting('image_types', [\n 'pdf', 'png', 'jpeg', 'jpg', 'eps'\n ])\n elif addbib_filter is not None or bib_filter is not None:\n # For bibliography\n if addbib_filter is not None:\n prefix = addbib_filter[::-1]\n else:\n prefix = ''\n bib_filter[::-1]\n filter_exts = ['.bib']\n input_file_types = ['bib']\n elif cls_filter is not None or pkg_filter is not None or bst_filter is not None:\n # for packages, classes and bsts\n if _ST3:\n cache_path = os.path.normpath(\n os.path.join(\n sublime.cache_path(),\n \"LaTeXTools\"\n ))\n else:\n cache_path = os.path.normpath(\n os.path.join(\n sublime.packages_path(),\n \"User\"\n ))\n\n pkg_cache_file = os.path.normpath(\n os.path.join(cache_path, 'pkg_cache.cache' if _ST3 else 'latextools_pkg_cache.cache'))\n\n cache = None\n if not os.path.exists(pkg_cache_file):\n gen_cache = sublime.ok_cancel_dialog(\"Cache files for installed packages, \"\n + \"classes and bibliographystyles do not exists, \"\n + \"would you like to generate it? After generating complete, please re-run this completion action!\"\n )\n\n if gen_cache:\n sublime.active_window().run_command(\"latex_gen_pkg_cache\")\n completions = []\n else:\n with open(pkg_cache_file) as f:\n cache = json.load(f)\n\n if cache is not None:\n if cls_filter is not None:\n installed_cls = cache.get(\"cls\")\n elif bst_filter is not None:\n installed_bst = cache.get(\"bst\")\n else:\n installed_pkg = cache.get(\"pkg\")\n\n prefix = ''\n else:\n prefix = ''\n\n if len(installed_cls) > 0:\n completions = installed_cls\n elif len(installed_bst) > 0:\n completions = installed_bst\n elif len(installed_pkg) > 0:\n completions = installed_pkg\n elif input_file_types is not None:\n root = getTeXRoot.get_tex_root(view)\n if root:\n completions = get_file_list(root, input_file_types, filter_exts)\n else:\n # file is unsaved\n completions = []\n\n return prefix, completions\n\ndef add_closing_bracket(view, edit):\n caret = view.sel()[0].b\n view.insert(edit, caret, \"}\")\n view.sel().subtract(view.sel()[0])\n view.sel().add(sublime.Region(caret, caret))\n\nclass LatexFillInputCompletions(sublime_plugin.EventListener):\n def on_query_completions(self, view, prefix, locations):\n if not view.match_selector(0, 'text.tex.latex'):\n return []\n\n results = []\n\n for location in locations:\n _, completions = parse_completions(\n view,\n view.substr(sublime.Region(view.line(location).a, location))\n )\n\n if len(completions) == 0:\n continue\n elif not type(completions[0]) is tuple:\n pass\n else:\n completions = [\n # Replace backslash with forward slash to fix Windows paths\n # LaTeX does not support forward slashes in paths\n os.path.normpath(os.path.join(relpath, filename)).replace('\\\\', '/')\n for relpath, filename in completions\n ]\n\n line_remainder = view.substr(sublime.Region(location, view.line(location).b))\n if not line_remainder.startswith('}'):\n results.extend([(completion, completion + '}') \n for completion in completions\n ])\n else:\n results.extend([(completion, completion)\n for completion in completions\n ])\n\n if results:\n return (\n results, \n sublime.INHIBIT_WORD_COMPLETIONS |\n sublime.INHIBIT_EXPLICIT_COMPLETIONS\n )\n else:\n return []\n\nclass LatexFillInputCommand(sublime_plugin.TextCommand):\n def run(self, edit, insert_char=\"\"):\n view = self.view\n point = view.sel()[0].b\n # Only trigger within LaTeX\n # Note using score_selector rather than match_selector\n if not view.score_selector(point, \"text.tex.latex\"):\n return\n\n if insert_char:\n # append the insert_char to the end of the current line if it\n # is given so this works when being triggered by pressing \"{\"\n point += view.insert(edit, point, insert_char)\n\n do_completion = get_setting(\"fill_auto_trigger\", True)\n \n if not do_completion:\n add_closing_bracket(view, edit)\n return\n\n prefix, completions = parse_completions(\n view,\n view.substr(sublime.Region(view.line(point).a, point)))\n\n if len(completions) == 0:\n result = []\n elif not type(completions[0]) is tuple:\n result = completions\n else:\n tex_root = getTeXRoot.get_tex_root(self.view)\n if tex_root:\n root_path = os.path.dirname(tex_root)\n else:\n print(\"Can't find TeXroot. Assuming current directory is {0}\".format(os.curdir))\n root_path = os.curdir\n\n result = [[\n # Replace backslash with forward slash to fix Windows paths\n # LaTeX does not support forward slashes in paths\n os.path.normpath(os.path.join(relpath, filename)).replace('\\\\', '/'),\n os.path.normpath(os.path.join(root_path, relpath, filename))\n ] for relpath, filename in completions]\n\n def on_done(i):\n # Doing Nothing\n if i < 0:\n return\n if type(result[i]) is list: # if result[i] is a list, it comes from input, include and includegraphics\n key = result[i][0]\n else:\n key = result[i]\n\n # close bracket\n if insert_char:\n key += \"}\"\n\n startpos = point - len(prefix)\n view.run_command(\"latex_tools_replace\", {\"a\": startpos, \"b\": point, \"replacement\": key})\n caret = view.sel()[0].b\n view.sel().subtract(view.sel()[0])\n view.sel().add(sublime.Region(caret, caret))\n\n # autocomplete bracket if we aren't doing anything\n if not result and insert_char:\n add_closing_bracket(view, edit)\n else:\n view.window().show_quick_panel(result, on_done)\n","sub_path":"latex_input_completions.py","file_name":"latex_input_completions.py","file_ext":"py","file_size_in_byte":10594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"424856008","text":"\ndef merge(l1,l2):\n count=0\n i=0\n while (i<=len(l1)-1):\n if(l1[i]<=l2[count]):\n l3.append(l1[i])\n i=i+1\n else:\n l3.append(l2[count])\n count=count+1\n return l3\n\n\n\n\n\n# l1 is list1 and l2 is list2\nl1=[int(x)for x in input().split()]\nl2=[int(x)for x in input().split()]\nl3=list()\ns=merge(l1,l2)\nprint(s)\n\n\n# If lists are not sorted then combine those 2 lists and perform bubble sort or selection sort or any sorting algorithm","sub_path":"merge_two_sorted_lists.py","file_name":"merge_two_sorted_lists.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"61993151","text":"import numpy as np \nimport math\nimport time\n\n\ndef QPSO(swarm,localbcosts,beta,maxiter,costfunc,verbose,logfile):\n\tf = open(logfile,'a')\n\tf.truncate(0)\t\t\t\t\t\t\t\t\t\n\tnum_dim = swarm.shape[1]\n\tnum_partcles = swarm.shape[0]\n\tpos_best_g= np.zeros(num_dim)\n\tpos_mbest = np.zeros(num_dim)\n\terr_best_g= - 1\n\ti = 0;\n\tt_init = time.time()\n\tsat_count = 0\n\terr_best_g_prev = 0\n\tt_total_old = time.time()\n\twhile (i49:\n\t\t\tprint('saturated')\n\t\t\treturn pos_best_g,err_best_g,i\n\t\tt_old = time.time()\n\t\tpos_best_g,err_best_g,swarm = costfunc(swarm,localbcosts)\n\t\tdiff = round(int(err_best_g_prev),2)-round(int(err_best_g),2)\n\t\tif int(diff)==0:\n\t\t\tsat_count+=1\n\t\telse:\n\t\t\tsat_count = 0\n\t\tpos_mbest = np.mean(swarm[:,1],axis=0) #only one column?\n\t\tc1 = np.random.random_sample((num_partcles,1))\n\t\tc2 = np.random.random_sample((num_partcles,1))\n\t\tu = np.random.random_sample((num_partcles,1))\n\t\tk = np.random.random_sample((num_partcles,1))\n\t\t\n\t\tp = (c1*swarm[:,1]+c2*pos_best_g)/(c1+c2)\n\t\tXfactor = beta*abs(pos_mbest-swarm[:,0])*np.log(1/u) #shouldn't this be mbest?\n\n\t\tswarm[:,0] = p+np.where(k>=0.5,1,-1)*Xfactor\n\t\ti=i+1\n\t\terr_best_g_prev = err_best_g\n\t\tt_new = time.time()\n\t\tif verbose: print('iter: {}, best solution: {} time elapsed in secs:{} Tot: {}'.format(i,err_best_g,float(t_new-t_old),float(t_new-t_init)))\n\t\tf.write(str(float(err_best_g)) + '\\n')\n\t\t\n\tprint('\\nFINAL SOLUTION:')\n\t#print(' > {}'.format(self.pos_best_g))\n\tprint(' > {}\\n'.format(err_best_g))\n\tt_total_new = time.time()\n\tprint('total time elapsed:{}secs'.format(t_total_new-t_total_old))\n\treturn pos_best_g,err_best_g,maxiter\n\n\n\n\n\n\n","sub_path":"qpso_vec.py","file_name":"qpso_vec.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"571616523","text":"from pyrameter.models.random_search import RandomSearchModel\n\nimport numpy as np\nfrom scipy.stats import norm\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import RBF\n\n\nclass GPBayesModel(RandomSearchModel):\n \"\"\"Gaussian process-based hyperparameter optimizer.\n\n Based on the Spearmint\n\n Paramaters\n ----------\n id :\n \"\"\"\n\n TYPE = 'gp'\n\n def __init__(self, id=None, domains=None, results=None,\n update_complexity=True, priority_update_freq=10, n_samples=10,\n warm_up=10, **gp_kws):\n super(GPBayesModel, self).__init__(id=id,\n domains=domains,\n results=results,\n update_complexity=update_complexity,\n priority_update_freq= \\\n priority_update_freq)\n self.n_samples = n_samples\n self.warm_up = warm_up\n self.gp_kws = gp_kws\n if 'kernel' not in self.gp_kws:\n self.gp_kws['kernel'] = RBF()\n\n def generate(self):\n if len(self.results) < self.warm_up or len(self.results) % self.warm_up == 0:\n params = super(GPBayesModel, self).generate()\n else:\n vec = self.results_to_feature_vector()\n features, losses = np.copy(vec[:, :-1]), np.copy(vec[:, -1])\n #features = features.T\n losses = np.reshape(losses, (-1, 1))\n\n gp = GaussianProcessRegressor(**self.gp_kws)\n gp.fit(features, losses)\n\n potentials = np.zeros((self.n_samples, len(self.domains)))\n for i in range(self.n_samples):\n for j in range(len(self.domains)):\n val = self.domains[j].generate(index=True)\n if isinstance(val, tuple):\n val = val[1]\n potentials[i, j] += val\n\n mu, sigma = gp.predict(potentials, return_std=True)\n best = np.min(losses)\n with np.errstate(divide='ignore'):\n gamma = (mu - best) / sigma\n ei = (mu - gamma) * norm.cdf(gamma) + sigma * norm.pdf(gamma)\n ei[sigma == 0] = 0\n\n best = potentials[np.argmax(ei, axis=1)]\n\n params = np.zeros((len(self.domains),))\n for i in range(len(self.domains)):\n domain = self.domains[i]\n params[i] += domain.map_to_domain(best[i][0],\n bound=True)\n\n return params\n","sub_path":"pyrameter/models/gp.py","file_name":"gp.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"410994039","text":"\"\"\"Setuptools installation script for triarray package.\"\"\"\n\nimport os\nfrom setuptools import setup, find_packages\nfrom distutils.util import convert_path\n\n\n# Directory of script\nroot_dir = os.path.dirname(__file__)\n\n\n# Get package version without importing it\nversion_ns = dict()\nwith open(convert_path('triarray/version.py')) as fobj:\n\texec(fobj.read(), version_ns)\nversion = version_ns['__version__']\n\n\n# Dynamic download URL based off current version - git tag should match\ndownload_url = (\n\t'https://github.com/jlumpe/triarray/archive/{}.tar.gz'\n\t.format(version)\n)\n\n\n# Read readme file for long description\nwith open(os.path.join(root_dir, 'README.md')) as fobj:\n\tlong_description = fobj.read()\n\n\nsetup(\n\tname='triarray',\n\tversion=version,\n\tdescription='Tools for working with symmetric matrices in non-redundant format.',\n\tlong_description=long_description,\n\tauthor='Jared Lumpe',\n\turl='https://github.com/jlumpe/triarray',\n\tdownload_url=download_url,\n\tlicense='MIT',\n\tclassifiers=[\n\t\t'Development Status :: 4 - Beta',\n\t\t'Intended Audience :: Developers',\n\t\t'Intended Audience :: Science/Research',\n\t\t'License :: OSI Approved :: MIT License',\n\t\t'Operating System :: OS Independent',\n\t\t'Programming Language :: Python :: 3',\n\t\t'Programming Language :: Python :: 3.5',\n\t\t'Programming Language :: Python :: 3.6',\n\t\t'Topic :: Utilities',\n\t\t'Topic :: Scientific/Engineering',\n\t\t'Topic :: Scientific/Engineering :: Mathematics',\n\t],\n\tkeywords='numpy array matrix symmetric pairwise distance similarity',\n\tpackages=find_packages(),\n\tinstall_requires=[\n\t\t'numpy>=1.11',\n\t\t'numba>=0.30',\n\t],\n)\n","sub_path":"pypi_install_script/triarray-0.2.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"385571514","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth import login, logout\nfrom django.contrib import messages\nfrom .models import Category, Article\nfrom .forms import *\n\n# Category.objects.annotate(num_products=Count('products'))\n\ndef index(request, slug=None):\n categories = Category.objects.all()\n context = {\n 'categories': categories,\n }\n if not slug:\n articles = Article.objects.all()\n else:\n category = get_object_or_404(Category, slug=slug)\n articles = Article.objects.filter(category=category)\n context = {**context, 'articles': articles}\n return render(request, 'index.html', context)\n\n\ndef add_article(request):\n return render(request, 'add_article.html')\n\n\ndef article_details(request, article_id=None, slug=None):\n article = get_object_or_404(Article, pk=article_id, slug=slug)\n categories = Category.objects.all()\n context = {\n 'article': article,\n 'categories': categories\n }\n return render(request, 'article_details.html', context)\n\n\ndef user_login(request):\n if request.method == 'POST':\n form = UserLoginForm(data=request.POST)\n if form.is_valid():\n messages.success(request, 'Вы успешно выполнили вход в систему')\n user = form.get_user()\n login(request, user)\n return redirect('/')\n else:\n messages.error(request, 'Вы ввели некорректные данные')\n else:\n form = UserLoginForm()\n return render(request, 'login.html', {'form': form})\n\n\ndef user_register(request):\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, 'Вы успешно зарегистрировались')\n return redirect('/login')\n else:\n messages.error(request, 'Ошибка регистрации')\n else:\n form = UserRegisterForm()\n return render(request, 'register.html',{'form': form})\n\n\ndef user_logout(request):\n logout(request)\n return redirect('/login')","sub_path":"python/django-blog-heroku/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"168319153","text":"def largest_palindrome(string):\n\tmaxLength = 1\n\n\tstart = 0\n\tlength = len(string)\n\n\tlow = 0\n\thigh = 0\n\n\tfor i in range(1, length):\n\t\tlow = i - 1\n\t\thigh = i\n\t\twhile low >= 0 and high < length and string[low] == string[high]:\n\t\t\tif high - low + 1 > maxLength:\n\t\t\t\tstart = low\n\t\t\t\tmaxLength = high - low + 1\n\t\t\tlow -= 1\n\t\t\thigh += 1\n\n\t\tlow = i - 1\n\t\thigh = i + 1\n\t\twhile low >= 0 and high < length and string[low] == string[high]:\n\t\t\tif high - low + 1 > maxLength:\n\t\t\t\tstart = low\n\t\t\t\tmaxLength = high - low + 1\n\t\t\tlow -= 1\n\t\t\thigh += 1\n \n\treturn(string[start:start + maxLength])\n\n\nif __name__ == '__main__':\n string = input()\n print(str(largest_palindrome(string))) \n \n\n\n","sub_path":"Dynamic Programming/largest_palindromic_substring.py","file_name":"largest_palindromic_substring.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"287016190","text":"\"\"\"\n1385. 幸运数字8\n8是小九的幸运数字,小九想知道在1~n的数中有多少个数字含有8。\n\n样例\n给出 n = 20, 返回2。\n\n解释:\n只有8,18 含有8。\n给出 n = 100, 返回19。\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param n: count lucky numbers from 1 ~ n\n @return: the numbers of lucky number\n \"\"\"\n # time:2038 ms\n def luckyNumber(self, n):\n # Write your code here\n count = 0\n for i in range(n):\n if '8' in str(i):\n count += 1\n return count\n","sub_path":"简单入门/1385.幸运数字8.py","file_name":"1385.幸运数字8.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"55150252","text":"'''\n NAMA = RIVALDO ISMIR\n NIM = A11.2019.12106\n KELP = A11.4118\n TGL = 24/11/2019\n'''\nbat = int(input(\"Masukan Angka Pembuat Pola : \")) #meminta user untuk memasukan sebuah nilai, kemudian nilai tersebut diisikan ke variable bat\nbar = 1 # inisiasi variable bar dengan nilai 1\nx = bat # inisiasi variable bar dengan nilai variabel bat\nwhile bar <= bat: #mulai perulangan dengan nilai bar sampai nilai variabel bat\n kol = 1 # inisiasi variable kol dengan nilai 1\n while kol <= bat: #mulai perulangan dengan nilai kol sampai nilai variabel bat\n if(kol == x): #melakukan pengecekan nilai kol , apakah nilai kol sama dengan nilai x\n print(bar, end=\" \") #print nilai bar dengan parameter end dengan value \" \" agar print selanjutnya tetap dalam baris yang sama dipisahkan dengan space\n x -= 1 #mengurangi nilai x agar nilai x berkurang 1 setiap loop nya\n elif(bar==kol): #melakukan pengecekan nilai bar , apakah nilai bar sama dengan nilai kol\n if bar > 10: #melakukan pengecekan nilai bar , apakah nilai bar masih lebih dari nilai 10\n print(bar, end=\" \") #print nilai bar dengan parameter end dengan value \" \" agar print selanjutnya tetap dalam baris yang sama dipisahkan dengan space\n else: #jika tidak memenuhi syarat if di atas maka else berjalan\n print(bar, end=\" \") #print nilai bar dengan parameter end dengan value \" \" agar print selanjutnya tetap dalam baris yang sama dipisahkan dengan space\n else: #jika tidak memenuhi syarat if di atas maka else berjalan\n print(\" \", end=\" \") #print space kosong dan menambahkan parameter end dengan value \" \" agar print selanjut nya tetap dalam baris dan dipisahkan spasi\n kol += 1 #menambahkan nilai kol agar nilai kol bertambah 1 setiap loop nya\n print(\"\") #print string kosong untuk membuat baris baru karena sebelumnya menggunakan end=\"\"\n bar += 1 #menambahkan nilai bar agar nilai bar bertambah 1 setiap loop nya\n","sub_path":"daspro_8/tugasku/nloop14.py","file_name":"nloop14.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"45491014","text":"from django.shortcuts import render, redirect, reverse, get_object_or_404\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom django.db.models.functions import Lower\nfrom .models import Product, Category\n\n\n# Create your views here.\n\n\ndef all_products(request):\n \"\"\" A view to search all products, including sorting and search queries \"\"\"\n\n products = Product.objects.all()\n # initialising variable query\n query = None\n categories = None\n sort = None\n direction = None\n\n ''' The double underscore syntax\n is common when making queries in django (e.g. category__name__in)\n Using it here means we're looking for the name field of the category model.\n And we're able to do this because category and\n product are related with a foreign key.'''\n\n if request.GET:\n if 'sort' in request.GET:\n # store value of URL in the variable sortkey\n sortkey = request.GET['sort']\n # replace value of sort with value above\n sort = sortkey\n # allow case insensitive sort on the name field\n if sortkey == 'name':\n sortkey = 'lower_name'\n products = products.annotate(lower_name=Lower('name'))\n if sortkey == 'category':\n sortkey = 'category__name'\n\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n\n products = products.order_by(sortkey)\n\n # checking for category in get request (URL)\n if 'category' in request.GET:\n # store value of URL in the variable categories, remove comma\n categories = request.GET['category'].split(',')\n # search for products whose category name is in the list\n products = products.filter(category__name__in=categories)\n # display categories the user selected\n categories = Category.objects.filter(name__in=categories)\n\n\n # if q is in the request, store it in a variable query\n if 'q' in request.GET:\n query = request.GET['q']\n # if query is blank, return error message, then redirect back to product url\n if not query:\n messages.error(request, \"You didn't enter any search criteria\")\n return redirect(reverse('products'))\n\n # if query has a value, search for product and description using Q\n # i in front of contains make the it case insensitive\n queries = Q(name__icontains=query) | Q(description__icontains=query)\n products = products.filter(queries)\n\n current_sorting = f'{sort}_{direction}'\n\n context = {\n 'products': products,\n 'search_term': query,\n 'current_categories': categories,\n 'current_sorting': current_sorting,\n }\n\n return render(request, 'products/products.html', context)\n\n\ndef product_detail(request, product_id):\n \"\"\" A view to show individual product details \"\"\"\n\n product = get_object_or_404(Product, pk=product_id)\n\n context = {\n 'product': product,\n }\n\n return render(request, 'products/product_detail.html', context)\n","sub_path":"products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"367708390","text":"import pygame, sys\nfrom pygame.locals import *\n\nclass ChatBox(object):\n '''chatBox(scale=1, parent=None, sizeRect=None)\n Create a chatbox with the given scale factor, parent\n rectangle, and if parent rectangle is given sizeRect\n is the size of the parent rectangle'''\n \n def __init__(self, scale=1, parent=None , sizeRect=None):\n self.rightEdge = 1920*scale\n self.bottomEdge = 1080*scale\n self.width = - sizeRect.left + sizeRect.right\n self.height = sizeRect.bottom - sizeRect.top\n self.lineWidth = self.width\n self.lineHeight = self.height/9\n self.scale = scale\n self.chatLines = [] #List of each line entered into chat\n if parent != None:\n self.area = parent.subsurface(sizeRect)\n\n self.drawChat()\n\n\n def drawChat(self):\n '''drawChat()\n drawChat creates the entered chat area, and displays\n the blank chatbox'''\n self.area.fill((255,255,255))\n self.chatEnt = ChatEnter(self.area,Rect(0, self.lineHeight*8,\n self.lineWidth, self.lineHeight), None)\n '''for i in range(7,-1,-1):\n line_rect = Rect(3, i*self._line_height+1,\n self._line_width-3, self._line_height-1)\n try:\n ChatLine( self._area, line_rect, self._chatlines[i])\n except IndexError:\n ChatLine(self._area, line_rect, \"Line {}\".format(i+1))'''\n pygame.draw.rect(self.area,(0,0,0), (0,0, self.area.get_width(),\n self.area.get_height()), 2)\n def redraw(self):\n self.drawChat()\n lineNum = 0\n for i in range(7):\n lineRect = Rect(7, lineNum*self.lineHeight+5,\n self.lineWidth-15, self.lineHeight-1)\n try:\n ChatLine( self.area, lineRect, self.chatLines[i])\n except IndexError:\n ChatLine(self.area, lineRect)\n lineNum += 1\n \n \n def getLeft(self):\n '''getLeft() returns the location of left end of the chatbox'''\n #print(self._area.get_rect().left)\n #print(self._RightEdge - self._area.get_width())\n return self.rightEdge - self.area.get_width()\n\n def getRight(self):\n '''getRight() returns the location of the right end of the chatbox'''\n #print(self._RightEdge)\n return self.rightEdge\n\n def getTopType(self):\n '''getTopType() returns the location of the top of the chatbox'''\n #print(self._BottomEdge - self._chatenter.getArea().get_rect().top)\n return self.bottomEdge - self.chatEnt.getArea().get_height()\n\n def getBottomType(self):\n '''getBottomType() returns the location of the bottom of the chatbox'''\n #print(self._BottomEdge)\n return self.bottomEdge\n\n def typeText(self, text):\n '''typeText(text) appends new text to the enter chat box'''\n self.chatEnt.appendText(text)\n\n def deleteText(self):\n '''deleteText() removes one character from the enter chat box'''\n self.chatEnt.removeText()\n\n def submitText(self):\n '''submitText() gets the text from the enter chat box, and displays\n the most recent seven lines entered by the user'''\n newText = self.chatEnt.getText()\n #print(len(newText))\n for i in newText:\n self.chatLines.append(i)\n self.chatEnt.setText(\"\")\n lineNum = 0\n for i in range(max(len(self.chatLines)-7, 0), len(self.chatLines)):\n lineRect = Rect(7, lineNum*self.lineHeight+5,\n self.lineWidth-15, self.lineHeight-1)\n ChatLine( self.area, lineRect, self.chatLines[i])\n lineNum += 1\n \n \nclass ChatLine(object):\n def __init__(self, parent, rect ,string= \" \"):\n '''ChatLine(parent, rect, string = \" \")\n ChatLine takes a parent surface, a rectangular area\n and a string, then displays the string in the given\n rectangular area.'''\n self.text = string.format(len(string))\n self.width = rect.right - rect.left \n self.height = rect.bottom - rect.top\n self.area = parent.subsurface(rect)\n self.area.fill((255,255,255))\n self.font = pygame.font.Font( None, self.height)\n self.textArea = self.font.render( self.text, 1, (0,0,0) )\n self.area.blit(self.textArea,(0,0))\n \n\n def getArea(self):\n '''getArea() returns the size of the area the text is\n being displayed in'''\n return self.area\n\n \n\n\n\nclass ChatEnter(object):\n def __init__ (self, parent,rect, action):\n '''ChatEnter(parent, rect, action)\n ChatEnter takes a parent surface, and a rectangular area\n which text will be displayed in'''\n self.lineIndex = 0\n self.currLineLen = 0\n self.height = rect.bottom - rect.top\n self.font = pygame.font.Font( None, self.height) #For testing width\n self.lines = ['']\n self.area = parent.subsurface(rect)\n self.area.fill((0xFF, 0xFF, 0xFF, 0xFF))\n pygame.draw.rect(self.area,(0,0,0), (0,0, self.area.get_width(),\n self.area.get_height()), 3)\n\n\n def chat(self):\n #Probably needs removal\n self.chatLines.append(chatLine(self.text))\n\n def curlyRemove(self):\n '''curlyRemove() returns the total size of doubled\n curly braces that were used as escape characters, so\n that they can be ignored in determining the length of\n a line of text.'''\n sizeL = 0\n sizeR = 0\n for i in self.lines[self.lineIndex]:\n if i == '{':\n sizeL += self.font.size('{')[0]\n elif i == '}':\n sizeR += self.font.size('}')[0]\n sizeTot = sizeL/2 + sizeR/2\n return sizeTot\n\n\n def appendText(self, newText):\n '''appendText(newText) takes a new string, and adds it to\n the current text being displayed'''\n self.lines[self.lineIndex] += newText\n self.currLineLen += len(newText)\n if self.lines[self.lineIndex][-1] in ['{', '}']:\n self.currLineLen -= 1 #Ignoring doubled curly braces needed as escape characters\n if self.font.size(self.lines[self.lineIndex])[0] - self.curlyRemove() > self.area.get_width()-5: #Reached end of line\n self.currLineLen = 0\n lastSpace = self.lines[self.lineIndex].rfind(' ')\n if lastSpace > -1: #If no space in current line, simply wrap text\n newLineStart = self.lines[self.lineIndex][lastSpace+1:]\n self.lines[self.lineIndex] = self.lines[self.lineIndex][:lastSpace]\n self.lineIndex += 1\n self.lines.append(newLineStart)\n self.currLineLen = len(newLineStart)\n else: #If there was a space, wrap any text after last space to new line\n nextLine = self.lines[self.lineIndex][-1]\n if self.lines[self.lineIndex][-1] in ['{', '}']: #Double braces for escape character\n self.lines[self.lineIndex] = self.lines[self.lineIndex][:len(self.lines[self.lineIndex])-2]\n self.lines.append(nextLine + nextLine)\n else:\n self.lines[self.lineIndex] = self.lines[self.lineIndex][:len(self.lines[self.lineIndex-1])]\n self.lines.append(nextLine)\n self.currLineLen = 1\n self.lineIndex += 1\n \n ChatLine(self.area, self.area.get_rect(), self.lines[self.lineIndex])\n pygame.draw.rect(self.area,(0,0,0), (0,0, self.area.get_width(),\n self.area.get_height()), 3)\n\n def removeText(self):\n '''removeText() deletes a character from the existing text being entered'''\n if len(self.lines[self.lineIndex]) < 1 and self.lineIndex == 0: #If no text left to delete\n return\n if self.lines[self.lineIndex][-1] in ['{', '}']: #Delete two characters if a brace\n self.lines[self.lineIndex] = self.lines[self.lineIndex][0:-2]\n else: #Otherwise simply delete the character\n self.lines[self.lineIndex] = self.lines[self.lineIndex][0:-1]\n self.currLineLen -= 1\n if len(self.lines[self.lineIndex]) < 1 and self.lineIndex > 0: #Remove the line if backspaced to beginning of line\n if self.lineIndex >= 0:\n self.lineIndex -= 1\n self.lines.pop()\n ChatLine(self.area, self.area.get_rect(), self.lines[self.lineIndex])\n pygame.draw.rect(self.area,(0,0,0), (0,0, self.area.get_width(),\n self.area.get_height()), 3)\n\n def getArea(self):\n '''getArea() returns the size of the enter chat box'''\n return self.area\n\n def getText(self):\n '''getText() returns the lines of text needing entered into chat'''\n return self.lines\n\n def setText(self, newText):\n '''setText(newText) takes a string and sets the current text field to that string'''\n self.lines = [newText]\n self.lineIndex = 0\n self.currLineLen = 0\n ChatLine(self.area, self.area.get_rect(), self.lines[self.lineIndex])\n pygame.draw.rect(self.area,(0,0,0), (0,0, self.area.get_width(),\n self.area.get_height()), 3)\n \n \n\n\ndef main():\n pass\n\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"ChatBoxCopy.py","file_name":"ChatBoxCopy.py","file_ext":"py","file_size_in_byte":9514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"572587792","text":"import glob\nfrom distutils.core import setup\n\ncpca_cpp_so = glob.glob('cpca_cpp*.so')[0]\nccpca_cpp_so = glob.glob('ccpca_cpp*.so')[0]\n\nsetup(\n name='ccpca',\n version=0.13,\n packages=[''],\n package_dir={'': '.'},\n package_data={'': [cpca_cpp_so, ccpca_cpp_so]},\n py_modules=['cpca_cpp', 'ccpca_cpp', 'cpca', 'ccpca'])\n","sub_path":"server/ccpca/ccpca/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"545009866","text":"\"\"\"_version.py\n\nCanonical location for authorship information\n__version__ is a PEP-386 compliant version string,\nmaking use of distutils.version.StrictVersion\n\"\"\"\n\nfrom distutils.version import LooseVersion\n\n__author__ = \"Michael Greene\"\n__copyright__ = \"Copyright (c) 2014 Michael Greene\"\n__license__ = \"MIT\"\n__version__ = str(LooseVersion(\"0.1.0\"))\n","sub_path":"pgpy/_author.py","file_name":"_author.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"652940868","text":"from flask_wtf import FlaskForm\nfrom wtforms import StringField, TextAreaField, BooleanField, SelectField, \\\n SubmitField, SelectMultipleField\nfrom wtforms.validators import Required, Length, Email, Regexp\nfrom wtforms import ValidationError\nfrom ..models import Role, User\nfrom flask_table import Table, Col\n\n\nclass EditUserForm(FlaskForm):\n name = StringField('Real name', validators=[Length(1, 64),\n Regexp('^[A-Za-z][A-Za-z\\s]*$', 0,\n 'Names must have only letters, '\n 'or space')])\n email = StringField('Email', validators=[Required(), Length(1, 64), Email()])\n device = TextAreaField('Device')\n submit = SubmitField('Submit')\n\n\nclass EditDeviceForm(FlaskForm):\n name = StringField('Device name', validators=[Required(), Length(1, 64)])\n users = TextAreaField('User names(separated by comma)')\n status = SelectField('Device status', choices=[('Normal', 'Normal'), ('Broken', 'Broken'), ('Fixing', 'Fixing'), ('Terminated', 'Terminated')])\n device_type = SelectField('Device type')\n details = TextAreaField('Details')\n submit = SubmitField('Submit')\n\n def __init__(self, device_types, *args, **kwargs):\n super(EditDeviceForm, self).__init__(*args, **kwargs)\n self.device_type.choices = [(device[\"type\"], device[\"type\"]) for device in device_types]\n\n\nclass ItemTable(Table):\n name = Col('Name')\n id = Col('id')\n classes = ['table', 'table-bordered']\n status = Col('status')\n details = Col('details')\n # users = Col('users')\n\n\nclass Item(object):\n def __init__(self, name, id, status, details):\n self.name = name\n self.id = id\n self.status = status\n self.details = details\n # ud = db.session.query(user_device).filter_by(device_id=id).all()\n # self.users = ud\n\n\nclass DeleteDeviceTypeForm(FlaskForm):\n device_type = SelectField('Device type')\n submit = SubmitField('Delete Confirm')\n\n def __init__(self, device_types, *args, **kwargs):\n super(DeleteDeviceTypeForm, self).__init__(*args, **kwargs)\n self.device_type.choices = [(device[\"type\"], device[\"type\"]) for device in device_types]\n","sub_path":"test_device_appointment_system/app/main/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"330299792","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 19 11:12:06 2021\n\n@author: gualandi\n\"\"\"\n\nimport pyomo\n\nfrom pyomo.environ import ConcreteModel, Var, Objective, Constraint, SolverFactory\nfrom pyomo.environ import maximize, Binary, RangeSet, PositiveIntegers, NonNegativeReals\n\n# Problem Description\n\"\"\"\nThe industrial steel can be easily recycled, since it is possible to burn any\nscrap to get only liquid steel (without plastics, glasses, ...).\nHowever, it is hard to separate each single metal presents in the scrap,\nand as a consequence, beside iron, we can get also chromium, nichel, and\nother impurities in the liquid steel.\nDepending on the type of production, some metals are desiderable, while others\nare not. For example, the stainless steel 18/10 must have 18% of chromium and\n10% of nichel (consider that chromium and nichel are very expensive, much more \nthan the steel itself). \n\nSuppose that the Rossi's Steel company of Voghera can choose to buy some iron\nscrap block with different properties regarding the different metals contained in \neach block. The company want to produce at minimum cost 100 quintals of stainless \nsteel 18/10, which must have at least 65% of iron and at most 1% of \nimpurity materials. Which fraction of each block is going to buy?\n\nThe data of the problem are given below.\n\n\"\"\"\n\n# Data of the problem (in theory, read data from .csv or excel file)\n\n# Blocks you can byu\nBlocks = ['Block1', 'Block2', 'Block3', 'Block4', 'Block5', 'Block6']\n\nWeights = [30, 90, 50, 70, 60, 50] # In quintal\nCosts = [50, 100, 80, 85, 92, 115] # Thousand of euros\n\n# Componets of metal in each block (given in percetange)\nCs = [\n [93, 76, 74, 65, 72, 68], # Ferro\n [5, 13, 11, 16, 6, 23], # Cromo\n [0, 11, 12, 14, 20, 8], # Nichel\n [2, 0, 3, 5, 2, 1]\n] # Impurità\n\n# Create concrete model\nm = ConcreteModel()\n\n# Set of indices\nm.I = RangeSet(0, len(Blocks) - 1)\n\n\n# Variables\ndef fb(m, i):\n return 0, Weights[i]\n\n\nm.x = Var(m.I, domain=NonNegativeReals, bounds=fb)\n\n# Objective Function\nm.obj = Objective(expr=sum(Costs[i] * m.x[i] for i in m.I))\n\n# Production Constraints\nm.c1 = Constraint(expr=sum(Cs[0][i] / 100 * m.x[i] for i in m.I) >= 65)\n\nm.c2 = Constraint(expr=sum(Cs[1][i] / 100 * m.x[i] for i in m.I) == 18)\n\nm.c3 = Constraint(expr=sum(Cs[2][i] / 100 * m.x[i] for i in m.I) == 10)\n\nm.c4 = Constraint(expr=sum(Cs[3][i] / 100 * m.x[i] for i in m.I) <= 1)\n\n# Overall production\nm.c5 = Constraint(expr=sum(m.x[i] for i in m.I) == 100)\n\n# Write the LP model in standard format\nm.write(\"misc.lp\")\n\n# Solve the model\nsol = SolverFactory('glpk').solve(m, tee=True)\n#sol = SolverFactory('gurobi').solve(m, tee=True)\n\n# CHECK SOLUTION STATUS\n\n# Get a JSON representation of the solution\nsol_json = sol.json_repn()\n\nif sol_json['Solver'][0]['Status'] == 'ok':\n print(\"Optimal solution value:\", round(m.obj(), 1))\n\n print(\"\\tValues of the decision variables:\")\n for i, b in enumerate(Blocks):\n print(b, m.x[i]())\nelse:\n print('Error in solving the model')\n","sub_path":"aa2022/scripts/blending_LP.py","file_name":"blending_LP.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"230141601","text":"from .dst_dprint import *\nfrom .dst_scene_plot import *\nfrom .dst_lc_pulse import *\nfrom .dst_sorted_lcs import *\nfrom .dst_deltat_hist import *\nfrom .dst_toff_overlay import *\nfrom .dst_toff_overlay_we import *\nfrom .dst_toff_subsample import *\nfrom .dst_large_steps import *\nfrom .dst_toff_regions import *\nfrom .dst_surge import *\nfrom .dst_iid_draw import *\n\n# -------- \n# Generate the figure for the first lightscape project\n#\n# 2014/03/13 - Written by Greg Dobler (CUSP/NYU)\n# -------- \n\ndef lightscape_figures(figs=None):\n\n # -- defaults\n if figs==None:\n figs=[1,2,3,4,6,7,8,9,10,11,12,13,14]\n elif type(figs)!=list:\n figs = [figs]\n\n\n # -- generate the figures for the main text\n # 1. scene from MetroTech (a) and lightcurve (b)\n if 1 in figs:\n scene_plot(xsize=183., dpi=150, otype='pdf') # mm\n\n\n # 2. pulse\n if 2 in figs:\n lc_pulse(xsize=183., dpi=150, otype='pdf') # mm\n\n\n # 3. sorted (Mon/Mon, Tue/Mon, & Tue/Tue) light curve matrices\n if 3 in figs:\n sorted_lcs(xsize=89., dpi=150, otype='pdf') # mm\n\n\n # 4. single source dynamics hsitogram\n if 4 in figs:\n deltat_hist(xsize=89., dpi=150, otype='pdf') # mm\n\n\n # -- generate the figures for the supplementary data\n # 6. overlay of weekday t_off curves\n if 6 in figs:\n toff_overlay(xsize=183., dpi=150, otype='eps')\n\n\n # sorted lcs Mon->Mon\n # 7. sorted +7 days light curve matrices\n if 7 in figs:\n sorted_lcs(9, 16, obase='sorted_lcs_09_16', xsize=89., dpi=150, \n otype='eps')\n\n\n # 8. random sub-samplings of the data\n if 8 in figs:\n toff_subsample(xsize=89., dpi=150, otype='eps')\n\n\n # 9. threshold on transition height\n if 9 in figs:\n large_steps(xsize=183., dpi=150, otype='eps')\n\n\n # 10. subdivide into regions\n if 10 in figs:\n toff_regions(xsize=164., dpi=150, otype='eps')\n\n\n # 11. overlay of weekday commercial t_off curves\n if 11 in figs:\n toff_overlay(xsize=183., dpi=150, otype='eps', commercial=True)\n\n\n # 12. overlay of weekend t_off curves\n if 12 in figs:\n toff_overlay_we(xsize=183., dpi=150, otype='eps')\n\n\n # 13. surge plot\n if 13 in figs:\n surge(xsize=183., dpi=150, otype='eps')\n\n\n # 14. IID draw\n if 14 in figs:\n iid_draw(xsize=183., dpi=150, otype='eps')\n","sub_path":"py/dst_lightscape_figures.py","file_name":"dst_lightscape_figures.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"529668549","text":"'''\nCreated on Dec 5, 2017\n\n@author: iuan\n'''\nfrom domain.foodProduct import FoodProduct\nfrom infrastructure.foodProductRepository import FoodProductRepository\nfrom application.foodProductController import FoodProductController\nfrom ui.console import FoodProductUI\n\ndef start():\n '''\n Starts the program.\n IN: - \n OUT: - \n CONDIS: -\n '''\n repo = FoodProductRepository()\n repo.addFood( FoodProduct(\"paine\", 5.0) )\n repo.addFood( FoodProduct(\"dulceata\", 0.0) )\n repo.addFood( FoodProduct(\"salam\", 5) )\n repo.addFood( FoodProduct(\"carne\", 4.9) )\n\n #create controller, provide repository\n ctrl = FoodProductController(repo)\n \n #create ui, provide controller\n ui = FoodProductUI(ctrl)\n ui.run()\n \nstart()","sub_path":"Fundamentals of Programming/Lab 10 Test/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"362882152","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom datetime import datetime\n\n\nclass Question(models.Model):\n title = models.TextField()\n content = models.TextField()\n author = models.ForeignKey(User)\n timestamp = models.DateTimeField(default=datetime.now, blank=True)\n\n def __unicode__(self):\n return self.title\n\n class Meta:\n db_table = 'questions'\n ordering = ['-timestamp']\n\n\nclass Comment(models.Model):\n content = models.TextField()\n question = models.ForeignKey(Question)\n author = models.ForeignKey(User)\n timestamp = models.DateTimeField(default=datetime.now, blank=True)\n\n class Meta:\n db_table = 'comments'\n ordering = ['-timestamp']\n\n","sub_path":"faq/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"123097467","text":"from base.models import BaseAction\nfrom serializers import NewClientSerializer\nfrom django.conf import settings\n\n\nclass NewClient(BaseAction):\n \"\"\"\"\"\"\n\n required = [\n 'business_name',\n 'billing_address',\n 'billing_phone',\n 'billing_email'\n ]\n\n def _pre_approve(self):\n self.action.valid = True\n self.action.need_token = False\n self.action.save()\n return []\n\n def _post_approve(self):\n self.action.valid = True\n self.action.need_token = False\n self.action.save()\n return []\n\n def _submit(self, token_data):\n pass\n\n\naction_classes = {\n 'NewClient': (NewClient, NewClientSerializer)\n}\n\nsettings.ACTION_CLASSES.update(action_classes)\n","sub_path":"stacktask/openerp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"526160540","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport ganet_lib\nimport numpy as np\nimport utils\nfrom torch.utils.data import DataLoader, random_split, Subset\nfrom CSPN.cspn import CSPN\nfrom GANet.GANet_small import GANetSmall\nimport os\n\ndef l1_normalize():\n x = torch.randn((3, 3)).cuda()\n y = x.clone()\n print(x)\n x = F.normalize(x, p=1, dim=0)\n # ganet_lib.cuda_test(x)\n print(x)\n print(x[:, 0].abs().sum())\n\n for i in range(3):\n d = y[:, i].abs().sum()\n y[:, i] /= d\n\n print(y)\n\ndef train_test_split(full_dataset):\n x = torch.arange(0, 9).view(3, 3)\n print(x[0])\n\n train_size = int(0.8 * len(full_dataset))\n test_size = len(full_dataset) - train_size\n\n print('Train test size:', (train_size, test_size))\n\n train_dataset, test_dataset = random_split(full_dataset, [train_size, test_size])\n\ndef epe_loss():\n x = torch.ones((4, 3))\n loss = utils.EPE_loss(x, 0)\n print(loss)\n\ndef deconv():\n # size = stride * (x - 1) + k - 2*p\n x = torch.ones((1, 1, 5))\n w = torch.ones((1, 1, 4))\n y = F.conv_transpose1d(x, w, stride=2, padding=1)\n print(y[0, 0])\n\ndef test_interpolate():\n x = torch.arange(0, 9, dtype=torch.float).view(1, 1, 3, 3)\n print(x)\n y = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)\n print(y)\n y = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)\n print(y)\n\ndef test_pad_memory():\n x = torch.ones((2, 32, 1000, 1000), dtype=torch.float)\n li = []\n for i in range(10):\n os.system('nvidia-smi')\n t = F.pad(x, [1, 1, 1, 1])\n li.append(t)\n\ndef test_probability_volume():\n x = torch.zeros((2, 3, 5, 5), dtype=torch.float)\n y = torch.zeros((2, 5, 5), dtype=torch.float)\n\n y[0, 0, 0] = 0.8\n y = y.unsqueeze(1)\n index = y.long()\n mid = y - index\n\n assert torch.all(y < 3 - 1), f'disparity must lower than max disparity {3 - 1}'\n\n print(index)\n x.scatter_(1, index, mid)\n x.scatter_(1, index + 1, 1 - mid)\n print(x[0, :, 0, 0].view(-1))\n\n # x = torch.zeros((5, 5), dtype=torch.float)\n # index = torch.zeros((5, 1), dtype=torch.long)\n # index[0] = 3\n # index[1] = 2\n #\n # print(index)\n # x.scatter_(1, index, 1)\n # print(x)\n\ndef test_cross_entropy():\n y = torch.randn((10,), dtype=torch.float)\n t = torch.zeros((10,), dtype=torch.float)\n\n y = F.softmax(y, dim=0)\n t[0] = 1\n\n epsilon = 1e-06\n loss = torch.sum(- t * torch.log(y + epsilon))\n print(y)\n print(t)\n print(loss)\n\ndef test_press_probability():\n x = torch.randn((2, 10, 5, 5), dtype=torch.float)\n y = torch.zeros((2, 5, 5), dtype=torch.long)\n mask = torch.zeros((2, 10, 5, 5), dtype=torch.float)\n\n kernel = 3\n\n p = torch.zeros((1, kernel, 1, 1), dtype=torch.long)\n p[0, :, 0, 0] = torch.arange(0, kernel)\n p = p.repeat(2, 1, 5, 5)\n\n y[0, 0, 0] = 5\n y[0, 1, 1] = 9\n\n mid = (y - kernel//2).unsqueeze(1)\n p = p + mid\n\n p[p <= 0] = 0\n p[p >= 10] = 9\n\n print(p[0, :, 0, 0])\n\n mask.scatter_(1, p, 1)\n x2 = x*mask\n print(x[0, :, 0, 0].view(-1))\n print(x2[0, :, 0, 0].view(-1))\n print(x2[0, :, 1, 1].view(-1))\n\ndef test_mask_cost_volume():\n x = torch.randn((2, 10, 5, 5), dtype=torch.float)\n y = torch.zeros((2, 5, 5), dtype=torch.long)\n\n y[0, 0, 0] = 5\n y[0, 1, 1] = 5\n mask = y == 5\n x = x.permute(1, 0, 2, 3)\n print(x[:, mask])\n\ndef test_confidnence():\n x = torch.randn((2, 10, 5, 5), dtype=torch.float)\n y = torch.zeros((2, 5, 5), dtype=torch.long)\n mask = torch.zeros((2, 10, 5, 5), dtype=torch.bool)\n\n y[0, 0, 0] = 1\n y[0, 0, 1] = 0\n y = y.unsqueeze(1)\n mask.scatter_(1, y, 1)\n\n x_mask = (x*mask).sum(dim=1)\n\n print(mask[0, :, 0, 0])\n print(x[0, :, 0, 0])\n print(x[0, :, 0, 1])\n print(x_mask)\n\ntest_confidnence()\n\n\n","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"520816910","text":"from django.conf.urls.defaults import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^api/', include('dhapi.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n \n (r'^(?P.*)$', 'django.views.static.serve', {'document_root': 'dhui/static/'}),\n)\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"253431332","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect, JsonResponse, HttpResponse\nfrom .forms import UploadFileForm\nfrom .forms import TypeInTextForm\nfrom django.views.decorators.csrf import csrf_protect\n# from .forms import FileFieldForm\n# from django.views.generic.edit import FormView\n\nimport requests\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\n\nHSE_API_ROOT = \"http://hse-api-web/\"\n\n\n# def save_user_text(text):\n# # with open( HSE_API_ROOT + 'user_text.txt', 'w') as fo:\n# with open('/opt/code/tmp/user_text.txt', 'w') as fo:\n# fo.write(text)\ndef web_index(request):\n return render(request, 'index.html',\n context={})\n# def web_index(request):\n# form = ProcessTextForm()\n# if request.method == 'POST':\n# form = ProcessTextForm(request.POST)\n# if form.is_valid():\n# # save_user_text(form.cleaned_data['text'])\n# ner = form.cleaned_data['ner']\n# term_extraction = form.cleaned_data['term_extraction']\n# text_classification = form.cleaned_data['text_classification']\n# readability = form.cleaned_data['readability']\n# methods = select_methods_string(ner, term_extraction,\n# text_classification, readability)\n#\n# post_form_data(methods)\n# return render(request, 'index.html',\n# context={'form':form})\n\n\n\n# def post_form_data(methods):\n# return requests.post(url=HSE_API_ROOT + 'process', data=methods)\n\ndef web_about(request):\n return render(request, 'about.html',\n context={})\n\ndef web_documentation(request):\n return render(request, 'documentation.html',\n context={})\n\ndef web_contact(request):\n return render(request, 'contact.html',\n context={})\n\n\ndef web_main(request):\n return render(request, 'main.html',\n context={\"status\": request.GET.get('status')})\n\n\ndef web_status(request):\n task_id = request.GET.get('task_id')\n if task_id:\n url = HSE_API_ROOT + \"status/\" + task_id\n content = requests.get(url)\n result = content.json()\n if result.get('status') == 'SUCCESS':\n content = requests.get(HSE_API_ROOT + 'files/' + result.get('result', [\"\"])[0])\n result['raw'] = content.content.decode('utf-8')\n return JsonResponse(result)\n return JsonResponse({\"error\": \"No task id\"})\n\n\ndef handle_uploaded_file(f, modules):\n\n files = {'file': f}\n url = HSE_API_ROOT + \"upload\"\n content = requests.post(url, files=files)\n file_id = content.json().get(\"file_id\")\n\n if file_id:\n file_id = file_id[7:]\n url = HSE_API_ROOT + \"process/\" + file_id\n content = requests.post(url, data=modules)\n\n\n else:\n raise Exception(content.json())\n response = list(content.json().values())\n\n return response\n\n# def web_process_file(request):\n\n@csrf_protect\ndef web_upload_file(request):\n if request.method == 'POST':\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n modules = list(filter(lambda t: t[0] in form.cleaned_data['modules'], form.fields['modules'].choices))\n modules = [f[0] for f in modules]\n modules = ','.join(modules)\n task_ids = handle_uploaded_file(request.FILES['file'], modules)\n task_ids = '&'.join(task_ids)\n return HttpResponseRedirect('main?task_id=' + str(task_ids))\n else:\n form = UploadFileForm()\n return render(request, 'main.html', {'form_upload': form})\n\ndef web_type_in(request):\n if request.method == 'POST':\n form = TypeInTextForm(request.POST, request.FILES)\n if form.is_valid():\n modules = list(filter(lambda t: t[0] in form.cleaned_data['modules'], form.fields['modules'].choices))\n modules = [f[0] for f in modules]\n modules = ','.join(modules)\n file = open('test.txt', 'rb+')\n subject = form.cleaned_data['text']\n subject = bytes(subject, encoding='utf-8')\n file.write(subject)\n # file.close()\n task_ids = handle_uploaded_file(file, modules)\n file.close()\n\n return HttpResponseRedirect('main?task_id=' + str(task_ids))\n else:\n form = TypeInTextForm()\n return render(request, 'main.html', {'form_text': form})\n\n\n# class FileFieldView(FormView):\n# form_class = FileFieldForm\n# template_name = 'main.html' # Replace with your template.\n# success_url = 'main' # Replace with your URL or reverse().\n#\n# def post(self, request, *args, **kwargs):\n# form_class = self.get_form_class()\n# form = self.get_form(form_class)\n# files = request.FILES.getlist('file_field')\n# if form.is_valid():\n# for f in files:\n# task_id = handle_uploaded_file(f)\n#\n# ... # Do something with each file.\n# return self.form_valid(form)\n# else:\n#\n# return self.form_invalid(form)","sub_path":"src/web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"562368707","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/travis/build/honeynet/beeswarm/beeswarm/drones/honeypot/tests/test_http.py\n# Compiled at: 2016-11-12 07:38:04\nimport gevent.monkey\ngevent.monkey.patch_all()\nfrom gevent.server import StreamServer\nfrom beeswarm.drones.honeypot.capabilities import http\nimport unittest, httplib, base64, tempfile, shutil, os\nfrom beeswarm.drones.honeypot.honeypot import Honeypot\n\nclass HttpTests(unittest.TestCase):\n\n def setUp(self):\n self.work_dir = tempfile.mkdtemp()\n Honeypot.prepare_environment(self.work_dir)\n\n def tearDown(self):\n if os.path.isdir(self.work_dir):\n shutil.rmtree(self.work_dir)\n\n def test_connection(self):\n \"\"\" Tests if the capability is up, and sending\n HTTP 401 (Unauthorized) headers.\n \"\"\"\n options = {'enabled': 'True', 'port': 0, 'users': {'test': 'test'}}\n cap = http.Http(options, self.work_dir)\n srv = StreamServer(('0.0.0.0', 0), cap.handle_session)\n srv.start()\n client = httplib.HTTPConnection('127.0.0.1', srv.server_port)\n client.request('GET', '/')\n response = client.getresponse()\n self.assertEqual(response.status, 401)\n srv.stop()\n\n def test_login(self):\n \"\"\" Tries to login using the username/password as test/test.\n \"\"\"\n options = {'enabled': 'True', 'port': 0, 'users': {'test': 'test'}}\n cap = http.Http(options, self.work_dir)\n srv = StreamServer(('0.0.0.0', 0), cap.handle_session)\n srv.start()\n client = httplib.HTTPConnection('127.0.0.1', srv.server_port)\n client.putrequest('GET', '/')\n client.putheader('Authorization', 'Basic ' + base64.b64encode('test:test'))\n client.endheaders()\n response = client.getresponse()\n self.assertEqual(response.status, 200)\n srv.stop()","sub_path":"pycfiles/Beeswarm-0.7.18.tar/test_http.py","file_name":"test_http.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"548007784","text":"\nimport json, os\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array\nfrom tensorflow.keras import layers, models, optimizers, losses\n\nfrom MyModels import CreateModel\n\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n horizontal_flip=True)\n\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\ntrain_generator = train_datagen.flow_from_directory(\n 'dataset/seg_train',\n target_size=(150, 150),\n color_mode='rgb',\n batch_size=16,\n class_mode='categorical',\n shuffle=True)\n\nvalidation_generator = test_datagen.flow_from_directory(\n 'dataset/seg_test',\n target_size=(150, 150),\n color_mode='rgb',\n batch_size=16,\n class_mode='categorical',\n shuffle=False)\n\nlabels = {}\nfor k, v in validation_generator.class_indices.items():\n labels[v] = k\n \njs = json.dumps(labels)\nf = open(\"labels.json\",\"w\")\nf.write(js)\nf.close()\n\ncheckpoint_history = os.listdir(\"training/\")\nos.mkdir(\"training/\"+str(len(checkpoint_history)))\ncheckpoint_path = \"training/\"+str(len(checkpoint_history))+\"/cp-{epoch:04d}.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\ncp_callback = tf.keras.callbacks.ModelCheckpoint(\n checkpoint_path, verbose=1, save_weights_only=True,\n period=5)\n\nmodel = CreateModel()\n\nSTEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size\nSTEP_SIZE_VALID=validation_generator.n//validation_generator.batch_size\n\nhist = model.fit_generator(generator=train_generator,\n steps_per_epoch=STEP_SIZE_TRAIN,\n validation_data=validation_generator,\n validation_steps=STEP_SIZE_VALID,\n epochs=20, callbacks=[cp_callback])\n\njs = json.dumps(hist.history)\nf = open(\"history_no_preprocess.json\",\"w\")\nf.write(js)\nf.close()\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"203647650","text":"import numpy as np\nimport pynbody\n\nm_h = 1.6733 * 10**-24 # g\ngalaxies = np.arange(0,300,1)\n\nsim = '/nobackupp2/mtremmel/Romulus/cosmo25/cosmo25p.768sg1bwK1BHe75.006912'\ns = pynbody.load(sim)\nh = s.halos(dosort=True)\n#s.physical_units()\n\nhalomass_array = []\nstellarmass_array = []\nid_array = []\nR_vir_array = []\nsSFR_array = []\nO_H_nden = []\nerrored = []\nfor i in range(1,100):\n try:\n h1 = h.load_copy(galaxies[i])\n h1.physical_units()\n\n pynbody.analysis.angmom.faceon(h1)\n H1_mass = h1['mass'].sum()\n stellar_mass = h1.s['mass'].sum()\n R_vir = pynbody.analysis.halo.virial_radius(h1) \n #R_max = h1.g['r'].max()\n central_gxy = h1[h1['r'].in_units('kpc') < 0.1*R_vir]#kpc]\n \n #### GET sSFR #### for red/blue designations\n sfr = pynbody.plot.stars.sfh(central_gxy,massform=False)#,trange=[13.7,13.75])\n np.savetxt('sfh_data/H'+str(i)+'_sfh_z017.txt',sfr[0]) #Msun/yr\n np.savetxt('sfh_data/H'+str(i)+'_age_z017.txt',sfr[1]) #Gyr\n sSFR = sfr[0]/(0.6*central_gxy.s['mass'].sum())\n\n \n #### GET O/H ### for mass metallicity relation\n hetot = 0.236 + (2.1 * central_gxy.g['metals'])\n hydrogen = 1.0 - central_gxy.g['metals'] - hetot\n \n O_H = np.average(((central_gxy.g['rho']*central_gxy.g['OxMassFrac'])/(16*m_h))/((central_gxy.g['rho']*hydrogen)/m_h))\n\n except:\n errored.append(i)\n np.savetxt('errored_halo_ids.txt',errored)\n continue\n\n print('Halo ',galaxies[i],'has mass =',H1_mass,' and stellar mass:',stellar_mass)\n print('Rvir = ',R_vir)#,'and R_max = ',R_max)\n print('Final sSFR values:',sSFR[-1:])\n print('12 + log(O/H) (number density):',12+np.log10(O_H))\n\n halomass_array.append(H1_mass)\n stellarmass_array.append(stellar_mass)\n id_array.append(i)\n R_vir_array.append(R_vir)\n sSFR_array.append(sSFR[-1:])\n O_H_nden.append(O_H)\n\n np.savetxt('ROM_halomasses_z017.txt',halomass_array)\n np.savetxt('ROM_stellarmass_z017.txt',stellarmass_array)\n np.savetxt('ROM_id_z017.txt',id_array)\n np.savetxt('ROM_Rvir_z017.txt',R_vir_array)\n np.savetxt('ROM_sSFR_z017.txt',sSFR_array)\n np.savetxt('ROM_O_H_nden_z017.txt',O_H_nden)\n","sub_path":"ROM_masses.py","file_name":"ROM_masses.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"184933516","text":"class Solution:\n def containsNearbyAlmostDuplicate(self, nums, k, t):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :type t: int\n :rtype: bool\n \"\"\"\n if t==0 and len(nums)== len(set(nums)): \n return False\n for i in range(len(nums)):\n for j in range(i+1, i+k+1):\n if j >= len(nums):\n break\n if abs(nums[i]-nums[j]) <= t:\n return True\n return False","sub_path":"220存在重复元素.py","file_name":"220存在重复元素.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"140435792","text":"import sqlite3\n\nclass PDTB:\n def __init__(self,file='pdtb.sqlite3'):\n self.conn = sqlite3.connect(file)\n self.conn.text_factory = str\n self.cursor = self.conn.cursor()\n\n def get_causal(self, section=None, file=None):\n query = \"select relation, firstsemfirst, arg1rawtext,arg2rawtext from annotations_test where firstsemfirst like '%Cause%'\"\n if section is not None and file is not None:\n query += \" and section = ? and file = ? \"\n query += \" order by section,file\"\n\n #print(query)\n if section is not None and file is not None:\n self.cursor.execute(query, (section, file))\n else:\n self.cursor.execute(query)\n\n return [Tag(*i) for i in self.cursor]\n\nclass WSJ:\n def __init__(self, file):\n self.file = file\n\n def read_relations(self):\n sentences = []\n with open(self.file) as f:\n for line in f:\n if '.START' in f or f == '\\n':\n continue\n sentences.append(line)\n return sentences\n\nclass TaggedPDTB:\n def __init__(self, file):\n self.file = file\n\n def read_relations(self):\n intra_tags = []\n adjacent_tags = []\n tags = intra_tags\n with open(self.file) as f:\n for line in f:\n line = line.strip()\n if len(line) == 0 or line == 'INTRA_SENTENCE':\n continue\n if line == 'ADJACENT_SENTENCES':\n tags = adjacent_tags\n\n r = line.split('\\t')\n tags.append(Tag(None, r[0], '\\t'.join(r[1:])))\n return intra_tags,adjacent_tags\n\nclass Tag:\n def __init__(self, relation, tag, text, text2=None):\n self.relation = relation\n self.tag = tag\n self.text = text\n self.text2 = text2\n\n def set_alt_tag(self, tag):\n self.alt_tag = tag\n\n def __repr__(self):\n return str(self.__dict__)\n","sub_path":"pdtb/pd.py","file_name":"pd.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"540645279","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom pathlib import Path\nimport numpy as np\n\n\n\"\"\"\nby morphi01\n\"\"\"\n\n\n # TASK 1: Import both data tables into python using pandas. Set the index column to \"MESS_DATUM\" and parse the column values as dates. [1P]\n\nbasedir = Path(\"C:/Users/HomeBase/git/exercise-4-morphi01/data\")\ngarmisch_dir = basedir / \"produkt_klima_tag_20171010_20190412_01550.txt\"\nzugspitze_dir = basedir / \"produkt_klima_tag_20171010_20190412_05792.txt\"\n# data index identification based on online source: https://opendata.dwd.de/climate_environment/CDC/observations_germany/climate/daily/kl/recent/\n# 01550 as Garmisch-Partenkirchen in Bavaria\n# 05792 as Zugspitze in Bavaria\n# Date: 3rd June 2019\n\ngarmisch = pd.read_csv(garmisch_dir, parse_dates = [\"MESS_DATUM\"], index_col = \"MESS_DATUM\", sep = \";\", na_values = \"-999\")\nzugspitze = pd.read_csv(zugspitze_dir, parse_dates = [\"MESS_DATUM\"], index_col = \"MESS_DATUM\", sep = \";\", na_values = \"-999\")\n\n\n # TASK 2: Clip the tables to the year 2018: [1P]\n \ngarmisch_2018 = garmisch.loc[\"2018-01\":\"2018-12\"]\nzugspitze_2018 = zugspitze.loc[\"2018-01\":\"2018-12\"]\n# set new variable names (x_2018) to keep meta data sets (instead of overwriting var \"garmisch\" and var \"zugspitze\")\n\n\n # TASK 3: Resample the temperature data to monthly averages (\" TMK\") and store them in simple lists: [1P]\n \ngarmisch_agg = garmisch_2018.resample(\"1M\").agg({\" TMK\" : \"mean\"}).values\nzugspitze_agg = zugspitze_2018.resample(\"1M\").agg({\" TMK\" : \"mean\"}).values\n# using \"garmisch_2018\" and \"zugspitze_2018\" as both \"garmisch\" and \"zugspitze\" would have been clipped by now (in TASK 2)\n# to_list() does not work as DataFrame has not 'to_list' attribute .. whatever that means\n# .values returns the monthly averages of temperatures as list; values are in chronological order (Jan to Dez)\n\n\n # TASK 4: Define a plotting function that draws a simple climate diagram\n # Add the arguments as mentioned in the docstring below [1P]\n # Set the default temperature range from -15°C to 20°C and the precipitation range from 0mm to 370mm [1P]\n\ngarmisch_agg1 = garmisch_2018.resample(\"1M\").agg({\" TMK\" : \"mean\", \" RSK\" : \"mean\"})\nzugspitze_agg1 = zugspitze_2018.resample(\"1M\").agg({\" TMK\" : \"mean\", \" RSK\" : \"mean\"})\n# adding mean of RSK to the data frame\n# column names identified via online source: https://opendata.dwd.de/climate_environment/CDC/observations_germany/climate/daily/kl/recent/DESCRIPTION_obsgermany_climate_daily_kl_recent_en.pdf\n\ngarmisch_agg1 = garmisch_agg1.rename(columns={' TMK': 'TMK'})\ngarmisch_agg1 = garmisch_agg1.rename(columns={' RSK': 'RSK'})\nzugspitze_agg1 = zugspitze_agg1.rename(columns={' TMK': 'TMK'})\nzugspitze_agg1 = zugspitze_agg1.rename(columns={' RSK': 'RSK'})\n#print(garmisch_agg1.columns)\n#print(zugspitze_agg1.columns)\n# renaming column names since nothing works with spaces\n\n\n\nx = 0\ny = zugspitze_agg1\n\ndef create_climate_diagram(df = garmisch_agg1, temp_col = \"TMK\", prec_col = \"RSK\", \n title = \"Klimadiagramm Garmisch Partenkirchen\", filename = \"Climate_Diagram\", temp_min = \"-15\",\n temp_max = \"20\", prec_min = \"0\", prec_max = \"370\"):\n \n fig = plt.figure(figsize=(10,8))\n plt.rcParams['font.size'] = 16\n\n ax2 = fig.add_subplot(111)\n ax1 = ax2.twinx()\n\n label = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\n index = np.arange(len(label))\n \n # TASK 4.1: Draw temperature values as a red line and precipitation values as blue bars: [1P]\n # Hint: Check out the matplotlib documentation how to plot barcharts. Try to directly set the correct\n # x-axis labels (month shortnames).\n\n ax2.bar(index, garmisch_agg1.RSK, color = \"blue\")\n ax1.plot(index, garmisch_agg1.TMK, color = \"red\")\n plt.xticks(index, label, fontsize=5, rotation=30)\n \n # TASK 4.2: Set appropiate limits to each y-axis using the function arguments: [1P]\n\n ######################################################################################\n \n # TASK 4.3: Set appropiate labels to each y-axis: [1P]\n\n ax2.set_ylabel(\"Niederschlagsfaktor\")\n ax1.set_ylabel(\"Temperatur\")\n \n # TASTK 4.4: Give your diagram the title from the passed arguments: [1P]\n \n plt.title(title) \n \n # TASK 4.5: Save the figure as png image in the \"output\" folder with the given filename. [1P]\n \n ######################################################################################\n return fig\n\n \n\nplt.show(create_climate_diagram(garmisch_agg1))\n \n\n\n # TASK5: Use this function to draw a climate diagram for 2018 for both stations and save the result: [1P]\n \ncreate_climate_diagram(...)#######################\ncreate_climate_diagram(...)#######################\n\n","sub_path":"create_climate_diagrams.py","file_name":"create_climate_diagrams.py","file_ext":"py","file_size_in_byte":4865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"492005244","text":"import inputs\nimport math\n\nclass Joystick:\n \n connected = False\n \n gamepad = None\n killSwitch = False\n x = 0\n y = 0\n \n forward = 0.0\n rotate = 0.0\n\n maxJoyPos = 32768\n deadZone = 0.15 # In percent\n \n \n def __init__(self):\n self.connect()\n \n def connect(self):\n if inputs.devices.gamepads:\n self.gamepad = inputs.devices.gamepads[0]\n self.connected = True\n else:\n self.gamepad = None\n \n def run(self):\n \n if self.gamepad == None:\n return False\n # Get current waiting gamepad events\n \n # collect all events\n events = []\n\n while(True):\n try:\n events.append(inputs.get_gamepad(blocking=False)[0])\n except inputs.UnpluggedError:\n self.forward = 0.0\n self.rotate = 0.0\n self.connected = False\n break\n except inputs.NoDataError:\n break\n\n if events:\n \n # If a event is received, the controller must be connected\n self.connected = True\n \n # Loop through received gamepad events and act upon them\n for event in events:\n if(event.code == \"BTN_SOUTH\"):\n self.killSwitch = ( True if event.state == 1 else False)\n elif(event.code == \"ABS_X\"):\n self.x = event.state/self.maxJoyPos;\n elif(event.code == \"ABS_Y\"):\n self.y = event.state/self.maxJoyPos \n \n if (self.killSwitch):\n self.forward = ( self.y if abs(self.y) > self.deadZone else 0.0)\n self.rotate = (self.x if abs(self.x) > self.deadZone else 0.0)\n \n else:\n self.forward = 0.0\n self.rotate = 0.0\n #print( \"Trans: \" + \"{:.2f}\".format(self.forward) + \"m/s\", \"Rot: \" + \"{:.2f}\".format(self.rotate) + \"m/s\") \n \n def getForward(self):\n return round(self.forward,4);\n \n def getRotate(self):\n return round(self.rotate,4);\n \n \n","sub_path":"joystick.py","file_name":"joystick.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"334249342","text":"# -*- coding: utf-8 -*-\n\"\"\" TcEx Framework Request Module \"\"\"\nfrom builtins import str\nimport json\nimport socket\nimport time\nfrom base64 import b64encode\n\nfrom requests import (exceptions, packages, Request, Session)\npackages.urllib3.disable_warnings() # disable ssl warning message\n\n\nclass TcExRequest(object):\n \"\"\"Wrapper on Python Requests Module with API logging.\"\"\"\n\n def __init__(self, tcex):\n \"\"\" \"\"\"\n self._tcex = tcex\n\n self._authorization_method = None\n self._basic_auth = None\n self._body = None\n self._content_type = None\n self._headers = {}\n self._http_method = 'GET'\n self._json = None\n self._payload = {}\n self._proxies = {}\n self._url = None\n self._files = None\n self.user_agent = 'ThreatConnect'\n\n # request properties\n self._retries = 4\n self._sleep = 5\n self._session = Session()\n self._timeout = 300\n self._verify_ssl = False\n\n #\n # Body\n #\n\n @property\n def body(self):\n \"\"\"The POST/PUT body content for this request.\"\"\"\n return self._body\n\n @body.setter\n def body(self, data):\n \"\"\"The POST/PUT body content for this request.\"\"\"\n if data is not None:\n self._body = data\n self.add_header('Content-Length', str(len(self._body)))\n\n @property\n def json(self):\n \"\"\"The POST/PUT body content in JSON format for this request.\"\"\"\n return self._body\n\n @json.setter\n def json(self, data):\n \"\"\"The POST/PUT body content in JSON format for this request.\"\"\"\n if data is not None:\n self._body = json.dumps(data)\n self.add_header('Content-Type', 'application/json')\n\n #\n # HTTP Headers\n #\n\n @property\n def headers(self):\n \"\"\"The header values for this request.\"\"\"\n return self._headers\n\n def reset_headers(self):\n \"\"\"Reset header dictionary for this request.\"\"\"\n self._headers = {}\n\n def add_header(self, key, val):\n \"\"\"Add a key value pair to header.\n\n Args:\n key (string): The header key\n val (string): The header value\n \"\"\"\n self._headers[key] = str(val)\n\n @property\n def authorization(self):\n \"\"\"The \"Authorization\" header value for this request.\"\"\"\n return self._headers.get('Authorization')\n\n @authorization.setter\n def authorization(self, data):\n \"\"\"The \"Authorization\" header value for this request.\"\"\"\n self.add_header('Authorization', data)\n\n def authorization_method(self, method):\n \"\"\"Method to create Authorization header for this request.\n\n Args:\n method (method): The method to use to generate the authorization header(s).\n \"\"\"\n self._authorization_method = method\n\n @property\n def basic_auth(self):\n \"\"\"The basic auth settings for this request.\"\"\"\n return self._basic_auth\n\n @basic_auth.setter\n def basic_auth(self, data):\n \"\"\"The basic auth settings for this request.\"\"\"\n self._basic_auth = data\n\n @property\n def content_type(self):\n \"\"\"The Content-Type header value for this request.\"\"\"\n return self._content_type\n\n @content_type.setter\n def content_type(self, data):\n \"\"\"The Content-Type header value for this request.\"\"\"\n self._content_type = str(data)\n self.add_header('Content-Type', str(data))\n\n def set_basic_auth(self, username, password):\n \"\"\"Manually set basic auth in the header when normal method does not work.\"\"\"\n credentials = str(\n b64encode('{}:{}'.format(username, password).encode('utf-8')), 'utf-8')\n self.authorization = 'Basic {}'.format(credentials)\n\n @property\n def user_agent(self):\n \"\"\"The the User-Agent header value for this request.\"\"\"\n return self._headers.get('User-agent')\n\n @user_agent.setter\n def user_agent(self, data):\n \"\"\"The the User-Agent header value for this request.\"\"\"\n self.add_header('User-agent', data)\n\n #\n # HTTP Payload\n #\n\n @property\n def payload(self):\n \"\"\"The payload values for this request.\"\"\"\n return self._payload\n\n def reset_payload(self):\n \"\"\"Reset payload dictionary\"\"\"\n self._payload = {}\n\n def add_payload(self, key, val, append=False):\n \"\"\"Add a key value pair to payload for this request.\n\n Args:\n key (string): The payload key\n val (string): The payload value\n append (bool): Indicate whether the value should be appended\n \"\"\"\n if append:\n self._payload.setdefault(key, []).append(val)\n else:\n self._payload[key] = val\n\n #\n # HTTP Method\n #\n\n @property\n def http_method(self):\n \"\"\"The HTTP method for this request.\"\"\"\n return self._http_method\n\n @http_method.setter\n def http_method(self, data):\n \"\"\"The HTTP method for this request.\"\"\"\n data = data.upper()\n if data in ['DELETE', 'GET', 'POST', 'PUT']:\n self._http_method = data\n\n # set content type for commit methods (best guess)\n if self._content_type is None and data in ['POST', 'PUT']:\n self.add_header('Content-Type', 'application/json')\n else:\n raise AttributeError(\n 'Request Object Error: {} is not a valid HTTP method.'.format(data))\n\n #\n # Send Properties\n #\n\n @property\n def proxies(self):\n \"\"\"The proxy settings for this request.\"\"\"\n return self._proxies\n\n @proxies.setter\n def proxies(self, data):\n \"\"\"The proxy settings for this request.\"\"\"\n self._proxies = data\n\n @property\n def timeout(self):\n \"\"\"The HTTP timeout value for this request.\"\"\"\n return self._timeout\n\n @timeout.setter\n def timeout(self, data):\n \"\"\"The HTTP timeout value for this request.\"\"\"\n if isinstance(data, int):\n self._timeout = data\n\n @property\n def verify_ssl(self):\n \"\"\"The SSL validation setting for this request.\"\"\"\n return self._verify_ssl\n\n @verify_ssl.setter\n def verify_ssl(self, data):\n \"\"\"The SSL validation setting for this request.\"\"\"\n if isinstance(data, bool):\n self._verify_ssl = data\n\n @property\n def files(self):\n \"\"\"Files setting for this request\"\"\"\n return self._files\n\n @files.setter\n def files(self, data):\n \"\"\"Files setting for this request\"\"\"\n if isinstance(data, dict):\n self._files = data\n #\n # Send\n #\n\n def send(self, stream=False):\n \"\"\"Send the HTTP request via Python Requests modules.\n\n This method will send the request to the remote endpoint. It will try to handle\n temporary communications issues by retrying the request automatically.\n\n Args:\n stream (boolean): Boolean to enable stream download.\n\n Returns:\n (Requests.Response) The Request response\n \"\"\"\n #\n # prepare request\n #\n\n api_request = Request(\n method=self._http_method, url=self._url, data=self._body, files=self._files,\n params=self._payload)\n\n request_prepped = api_request.prepare()\n\n # add authorization header returned by authorization method\n if self._authorization_method is not None:\n self._headers.update(self._authorization_method(request_prepped))\n request_prepped.prepare_headers(self._headers)\n # self._tcex.log.debug(u'Request URL: {}'.format(self._url))\n\n if self._basic_auth is not None:\n request_prepped.prepare_auth(self._basic_auth)\n\n #\n # api request (gracefully handle temporary communications issues with the API)\n #\n for i in range(1, self._retries + 1, 1):\n try:\n response = self._session.send(\n request_prepped, proxies=self._proxies, timeout=self._timeout,\n verify=self._verify_ssl, stream=stream)\n break\n except exceptions.ReadTimeout as e:\n self._tcex.log.error(u'Error: {}'.format(e))\n self._tcex.log.error(u'The server may be experiencing delays at the moment.')\n self._tcex.log.info(\n u'Pausing for {} seconds to give server time to catch up.'.format(\n self._sleep))\n time.sleep(self._sleep)\n self._tcex.log.info(u'Retry {} ....'.format(i))\n\n if i == self._retries:\n self._tcex.log.critical(u'Exiting: {}'.format(e))\n raise RuntimeError(e)\n except exceptions.ConnectionError as e:\n self._tcex.log.error(u'Error: {}'.format(e))\n self._tcex.log.error(u'Connection Error. The server may be down.')\n self._tcex.log.info(\n u'Pausing for {} seconds to give server time to catch up.'.format(\n self._sleep))\n time.sleep(self._sleep)\n self._tcex.log.info(u'Retry {} ....'.format(i))\n if i == self._retries:\n self._tcex.log.critical(u'Exiting: {}'.format(e))\n raise RuntimeError(e)\n except socket.error as e:\n self._tcex.log.critical(u'Socket Error: {}'.format(e))\n raise RuntimeError(e)\n\n self._tcex.log.info(u'URL ({}): {}'.format(self._http_method, response.url))\n self._tcex.log.info(u'Status Code: {}'.format(response.status_code))\n return response\n\n #\n # URL\n #\n\n @property\n def url(self):\n \"\"\"The URL for this request.\"\"\"\n return self._url\n\n @url.setter\n def url(self, data):\n \"\"\"The URL for this request.\"\"\"\n self._url = data\n\n def __str__(self):\n \"\"\"Print this request instance configuration.\"\"\"\n printable_string = '\\n{0!s:_^80}\\n'.format('Request')\n\n #\n # http settings\n #\n printable_string += '\\n{0!s:40}\\n'.format('HTTP Settings')\n printable_string += ' {0!s:<29}{1!s:<50}\\n'.format('HTTP Method', self.http_method)\n printable_string += ' {0!s:<29}{1!s:<50}\\n'.format('Request URL', self.url)\n printable_string += ' {0!s:<29}{1!s:<50}\\n'.format('Content Type', self.content_type)\n printable_string += ' {0!s:<29}{1!s:<50}\\n'.format('Body', self.body)\n\n #\n # headers\n #\n if self.headers:\n printable_string += '\\n{0!s:40}\\n'.format('Headers')\n for k, v in self.headers.items():\n printable_string += ' {0!s:<29}{1!s:<50}\\n'.format(k, v)\n\n #\n # payload\n #\n if self.payload:\n printable_string += '\\n{0!s:40}\\n'.format('Payload')\n for k, v in self.payload.items():\n printable_string += ' {0!s:<29}{1!s:<50}\\n'.format(k, v)\n\n return printable_string\n","sub_path":"tcex/tcex_request.py","file_name":"tcex_request.py","file_ext":"py","file_size_in_byte":11087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"383930281","text":"import os\r\nimport uuid\r\nfrom django.shortcuts import render, HttpResponse\r\nfrom django.db.models import Q\r\nfrom django.forms.models import model_to_dict\r\nfrom django.db import transaction\r\nfrom django.core import serializers\r\nfrom task.templatetags.admin_tags import change_to_task_type,change_to_staff,change_to_department\r\nfrom personnel.server import staff_db\r\nfrom .forms.form import KnowledgeForm\r\nfrom common.functions import filter_fields,build_attachment_info,compare_fields,compare_json\r\n\r\nimport json\r\nfrom task.server import task_submit_record_db,task_submit_attach_db,\\\r\n task_submit_tag_db,task_assign_db,task_map_tag_db,task_map_db\r\nfrom .server import *\r\nfrom common.functions import CJSONEncoder\r\n\r\n# Create your views here.\r\n\r\n\r\ndef collect(request):\r\n \"\"\"知识收录\"\"\"\r\n origin_id = request.GET.get(\"tsid\",None)\r\n ret = {\"status\": False,\"data\":\"\"}\r\n user = request.user.staff.sid\r\n origin_obj = coll_record_db.query_record_by_origin(origin_id)\r\n if not origin_obj:\r\n try:\r\n with transaction.atomic():\r\n # 获取记录信息\r\n record_obj = task_submit_record_db.query_record_by_id(origin_id)\r\n if record_obj:\r\n record_tags = task_submit_tag_db.query_task_tag_by_tsid(origin_id)\r\n record_attach = task_submit_attach_db.query_task_submit_attachment_by_tsid(origin_id)\r\n # 获取工单信息\r\n task_assgin_obj = task_assign_db.query_task_assign_by_tasid(record_obj.tasid_id)\r\n task_map_obj = task_map_db.query_task_by_tmid(task_assgin_obj.first().tmid_id)\r\n map_tags = task_map_tag_db.query_tag_by_tmid(task_map_obj.tmid)\r\n contributor = task_assgin_obj.first().member_id_id\r\n # 在构造收录信息\r\n tag = \"\"\r\n for item in record_tags:\r\n tag += item.name + ';'\r\n relate_tag = \"\"\r\n for item in map_tags:\r\n relate_tag += item.name + ';'\r\n relate_title = task_map_obj.title\r\n insert_info = {\r\n \"origin\": origin_id,\r\n \"title\": record_obj.title,\r\n \"summary\": record_obj.summary,\r\n \"remark\": record_obj.remark,\r\n \"tag\": tag,\r\n \"relate_tag\": relate_tag,\r\n \"relate_title\": relate_title,\r\n \"recorder_id\": user,\r\n \"contributor_id\": contributor,\r\n \"type_id\": task_map_obj.type_id,\r\n \"tid_id\": task_map_obj.tid_id\r\n }\r\n tsid = coll_record_db.insert_record(insert_info)\r\n # 收录附件\r\n att_list = []\r\n for obj in record_attach:\r\n att_json = {}\r\n att_json[\"tsid_id\"] = tsid\r\n att_json[\"attachment\"] = obj.attachment\r\n att_json[\"description\"] = obj.description\r\n att_json[\"name\"] = obj.name\r\n att_list.append(att_json)\r\n if att_list:\r\n record_attach_db.mutil_insert_attachment(att_list)\r\n # 原数据更改为收录状态\r\n task_submit_record_db.update_status({\"tsid\":origin_id,\"is_collected\":1})\r\n ret['status'] = True\r\n except Exception as e:\r\n print(e)\r\n else:\r\n ret = {\"status\": True, \"data\": \"\"}\r\n return HttpResponse(json.dumps(ret))\r\n\r\n\r\ndef collect_delete(request):\r\n \"\"\"删除收录\"\"\"\r\n ret = {'status': False, \"data\": \"\", \"message\": \"\"}\r\n ids = request.GET.get(\"ids\", '')\r\n ids = ids.split(\"|\")\r\n # 转化成数字\r\n id_list = []\r\n for item in ids:\r\n if item:\r\n id_list.append(int(item))\r\n try:\r\n with transaction.atomic():\r\n coll_record_db.multi_delete(id_list)\r\n # 删除附件\r\n record_attach_db.mutil_delete(id_list)\r\n ret['status'] = True\r\n except Exception as e:\r\n print(e)\r\n ret['message'] = \"删除失败\"\r\n return HttpResponse(json.dumps(ret))\r\n\r\n\r\ndef collections(request):\r\n \"\"\"知识库中心\"\"\"\r\n type_ = request.GET.get(\"type\",0)\r\n query_sets = coll_record_db.query_record_list()\r\n if type_:\r\n query_sets = coll_record_db.query_record_by_type(type_)\r\n return render(request,\"collections/collections.html\",{\"query_sets\":query_sets,\"type\":type_})\r\n\r\n\r\ndef knowledge(request):\r\n \"\"\"指引匹配\"\"\"\r\n tmid = int(request.GET.get(\"tmid\",0))\r\n ret = {\"status\":False,\"data\":[]}\r\n if tmid:\r\n map_obj = task_map_db.query_task_by_tmid(tmid)\r\n map_tag_list = task_map_tag_db.query_tag_by_tmid(tmid)\r\n title = map_obj.title\r\n type = map_obj.type_id\r\n query_sets = coll_record_db.query_record_by_type(type)\r\n if query_sets:\r\n q_obj = Q()\r\n q_obj.connector = \"OR\"\r\n # 构造标签Q\r\n if map_tag_list:\r\n _tag_fields = CollRecord._tag_field\r\n for item in map_tag_list:\r\n q_obj.children.append((\"%s__icontains\" % _tag_fields, item.name))\r\n # 构造标题Q\r\n _title_field = CollRecord._title_field\r\n q_obj.children.append((\"%s__icontains\" % _title_field, title))\r\n db_result = query_sets.filter(q_obj).order_by(\"favor\").all()\r\n data = serializers.serialize(\"json\", db_result)\r\n data = json.loads(data)\r\n for item in data:\r\n item[\"fields\"][\"type\"] = change_to_task_type(item[\"fields\"][\"type\"])\r\n ret[\"status\"] = True\r\n ret[\"data\"] = data\r\n return HttpResponse(json.dumps(ret))\r\n\r\n\r\ndef knowledge_detail(request):\r\n \"\"\"指引详细\"\"\"\r\n id = request.GET.get(\"id\", None)\r\n uid = request.user.staff.sid\r\n ret = {\"status\": False, \"data\": \"\", \"signal\":False}\r\n if id:\r\n try:\r\n # record_obj = coll_record_db.query_record_by_id(id)\r\n record_obj = CollRecord.objects.filter(nid=id).first()\r\n if record_obj:\r\n # 格式化数据\r\n record_json = {}\r\n # del record_json['_state']\r\n record_json[\"nid\"] = record_obj.nid\r\n record_json[\"type_id\"] = record_obj.type.name\r\n record_json[\"recorder_id\"] = record_obj.recorder.name\r\n contributor_id = record_obj.contributor_id\r\n staff_obj = staff_db.query_staff_by_id(contributor_id)\r\n record_json[\"contributor_id\"] = record_obj.contributor.name\r\n record_json[\"depart\"] = change_to_department(staff_obj.department_id)\r\n record_json[\"phone\"] = staff_obj.phone\r\n record_json[\"title\"]=record_obj.title\r\n record_json[\"tag\"]=record_obj.tag\r\n record_json[\"create_time\"]=record_obj.create_time\r\n record_json[\"favor\"] = record_obj.favor\r\n record_json[\"summary\"]=record_obj.summary\r\n record_json[\"remark\"] = record_obj.remark\r\n record_attach = record_attach_db.query_record_attach_by_tsid(id)\r\n if record_attach:\r\n record_json['attach'] = serializers.serialize(\"json\", record_attach)\r\n else:\r\n record_json['attach'] = ''\r\n # 获取点赞状态\r\n is_exist = coll_favor_db.is_exist({\"tsid_id\":id, \"uid_id\":uid})\r\n if is_exist:\r\n obj = is_exist.first()\r\n if obj.status:\r\n ret['signal'] = True\r\n ret['status'] = True\r\n ret['data'] = record_json\r\n return HttpResponse(json.dumps(ret,cls=CJSONEncoder))\r\n except Exception as e:\r\n print(e)\r\n return render(request, '404.html')\r\n\r\n\r\ndef knowledge_edit(request):\r\n \"\"\"自定义添加知识\"\"\"\r\n method = request.method\r\n if method == \"GET\":\r\n id = request.GET.get(\"id\", '')\r\n if id:\r\n query_set = coll_record_db.query_record_by_id(id)\r\n know_attach = record_attach_db.query_record_attach_by_tsid(id)\r\n if not know_attach:\r\n know_attach = ''\r\n return render(request,\"collections/knowledge_edit.html\",{\"query_set\":query_set ,\"nid\": id, \"know_attach\": know_attach})\r\n else:\r\n return render(request,'404.html')\r\n else:\r\n ret = {'status': False, \"data\": '', \"message\": \"\"}\r\n form = KnowledgeForm(data=request.POST)\r\n if form.is_valid():\r\n data = request.POST\r\n data = data.dict()\r\n k_attach = data.get(\"attach\", '')\r\n id = data.get(\"id\",None)\r\n k_attach = list(json.loads(k_attach))\r\n if not id:\r\n # 创建\r\n try:\r\n with transaction.atomic():\r\n # 插入收录知识信息\r\n coll_info = filter_fields(CollRecord._insert, data)\r\n nid = coll_record_db.insert_record(coll_info)\r\n # 插入知识附件\r\n if k_attach:\r\n k_attach = build_attachment_info({\"tsid_id\": nid}, k_attach)\r\n record_attach_db.mutil_insert_attachment(k_attach)\r\n ret['status'] = True\r\n ret['data'] = nid\r\n except Exception as e:\r\n print(e)\r\n ret[\"message\"] = \"添加失败\"\r\n else:\r\n try:\r\n with transaction.atomic():\r\n # 更新收录信息\r\n record = coll_record_db.query_record_by_id(id)\r\n know_info = compare_fields(CollRecord._update, record, data)\r\n if know_info:\r\n know_info[\"nid\"] = id\r\n coll_record_db.update_info(know_info)\r\n # 更新附件\r\n if k_attach:\r\n att_record = record_attach_db.query_record_attach_by_tsid(id)\r\n # 数据对比\r\n insert_att, update_att, delete_id_att = compare_json(att_record, k_attach, \"nid\")\r\n\r\n if insert_att:\r\n insert_att = build_attachment_info({\"tsid_id\": id}, insert_att)\r\n record_attach_db.mutil_insert_attachment(insert_att)\r\n if update_att:\r\n record_attach_db.mutil_update_attachment(update_att)\r\n if delete_id_att:\r\n record_attach_db.mutil_delete(delete_id_att)\r\n else:\r\n record_attach_db.mutil_delete_by_tsid(id)\r\n ret['data'] = id\r\n ret['status'] = True\r\n except Exception as e:\r\n print(e)\r\n else:\r\n errors = form.errors.as_data().values()\r\n firsterror = str(list(errors)[0][0])\r\n ret['message'] = firsterror\r\n return HttpResponse(json.dumps(ret))\r\n\r\n\r\ndef knowledge_favor(request):\r\n \"\"\"点赞收录\"\"\"\r\n print(\"id\",request.GET.get(\"id\",0))\r\n id = int(request.GET.get(\"id\",0))\r\n uid = int(request.GET.get(\"user\",0))\r\n ret = {\"status\":False,\"data\":\"\"}\r\n signal = True\r\n if id and uid:\r\n info = {\"tsid_id\":id, \"uid_id\":uid}\r\n is_exist = coll_favor_db.is_exist(info)\r\n if is_exist:\r\n obj = is_exist.first()\r\n if not obj.status:\r\n obj.status = 1\r\n obj.save()\r\n else:\r\n obj.status = 0\r\n signal = False\r\n obj.save()\r\n else:\r\n coll_favor_db.insert_favor(info)\r\n # 更新点赞数\r\n row = coll_favor_db.count_favor(id)\r\n favor_count = int(row[0])\r\n refer_obj = coll_record_db.query_record_by_id(id)\r\n refer_obj.favor = favor_count\r\n refer_obj.save()\r\n ret[\"data\"] = {\"count\":favor_count,\"signal\":signal}\r\n ret[\"status\"] = True\r\n return HttpResponse(json.dumps(ret))\r\n\r\n\r\ndef know_attach(request):\r\n \"\"\"收录附件上传\"\"\"\r\n ret = {\"status\": False, \"data\": {\"path\": \"\", \"name\": \"\"}, \"summary\": \"\"}\r\n # 保存路径\r\n target_path = \"media/upload/collection/\"\r\n if not os.path.exists(target_path):\r\n os.makedirs(target_path)\r\n try:\r\n # 获取文件对象\r\n file_obj = request.FILES.get(\"file\")\r\n raw_name = file_obj.name\r\n postfix = raw_name.split(\".\")[-1]\r\n if not file_obj:\r\n pass\r\n else:\r\n file_name = str(uuid.uuid4()) + \".\" + postfix\r\n # 查看路径是否存在,没有则生成\r\n if not os.path.exists(os.path.dirname(target_path)):\r\n os.makedirs(target_path)\r\n file_path = os.path.join(target_path, file_name)\r\n # os.path.join()在Linux/macOS下会以斜杠(/)分隔路径,而在Windows下则会以反斜杠(\\)分隔路径,\r\n # 故统一路径将'\\'替换成'/'\r\n file_path = file_path.replace('\\\\', \"/\")\r\n with open(file_path, \"wb\") as f:\r\n for chunk in file_obj.chunks():\r\n f.write(chunk)\r\n ret[\"status\"] = True\r\n ret[\"data\"]['path'] = file_path\r\n ret[\"data\"]['name'] = raw_name\r\n except Exception as e:\r\n ret[\"summary\"] = str(e)\r\n return HttpResponse(json.dumps(ret))\r\n\r\n","sub_path":"apps/collection/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"269850394","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# adbshellpy_home.py\n# By : 神郭\n# Version : 0.6.2.2\nimport sys,os,datetime\n#Core Function\ntry:from adbshell import errexit,update,checkinternet,clear,ParseArguments,adbcommand,install,changes,github,version,builddate,who,nowdevice,shellex\nexcept:from adbshell_alpha import errexit,update,checkinternet,clear,ParseArguments,adbcommand,install,changes,github,version,builddate,who,nowdevice,shellex\nclass adbshellpyinformation:\n p=sys.platform\n branch=None\n uselinuxpkgmanagertoinstalladb=None\n adbfile=None\n aapt=None\n def __init__(self):\n try:from adbshell_alpha import conf\n except:from adbshell import conf\n self.conf=conf\n Permissionshow=True\n#HelperView\nimport adbshellpy_libhelper\n\ndef home():\n print('''\n **********************************Welcome*****************************************\n * ADBSystemTOOLBOX *\n * 基于Python3&GoogleADB的安卓系统工具箱 *\n * Develop: CoolApkUser:白曦 Github:AEnjoy *\n * 如果你链接了多个设备,请先使用输入who命令再输入其它命令哦! *\n **********************************Welcome*****************************************\n '''+'Version:'+version +' buildDate:'+builddate) \n print('''\n _____________________________ADBSystemTOOLBOX____________________________________\n ┃ 工具箱指令: ┃ help> back cls set> who> home exit FixGithub ┃\n ┃ re-install update environment changes clean-data ┃\n ---------------------------------------------------------------------------------\n ┃ ADB指令集 : ┃ shell root(√) ┃\n ┃ 设备链接选项: ┃ start_server(√) kill_server devices tcpipconnect usb(√) ┃\n ┃ 设备高级重启: ┃ reboot shutdown rec bl edl sideload download(SamsumgDevices) ┃\n ---------------------------------------------------------------------------------\n ┃ 应用 专区 : ┃ install> uninstall> disable> enable> clear> applist> ┃ \n ┃ 系统 优化 : ┃ 编译优化compile> ┃\n ┃ 文件 传输 : ┃ pull> push> screencap> ┃\n ┃ 系统 调节 : ┃ windowmode> input> settings> dumpsys> ┃\n ┃ 应用 激活 : ┃ piebridge(黑域) shizuku icebox(冰箱) kfmark ┃\n ┃ 其它 功能 : ┃ APP安装关联:relatedapk ┃\n -------------------------------ADBSystemTOOLBOX----------------------------------\n ''')\n print('当前adbshellpy控制的设备:'+nowdevice+' \\n 你可以使用who切换目标设备.(仅有一个设备时不会显示,但功能依然可用)')\nclass func_():\n def __init__(self):\n global nowdevice\n self.adb=adbcommand(nowdevice)\n global changes,github,version,builddate\n self.p=adbshellpyinformation.p\n self.adbfile=adbshellpyinformation.adbfile\n self.changes=changes\n def kfmark(self):\n try:import adbshellpy_libroot\n except: \n update().download_lib('adbshellpy_libroot')\n import adbshellpy_libroot\n adbshellpy_libroot.Activate_KFMark()\n def icebox(self):\n self.adb.shell('dpm set-device-owner com.catchingnow.icebox/.receiver.DPMReceiver')\n def relatedapk(self):\n import adbshellpy_libapkfile\n adbshellpy_libapkfile.relatedApkfile()\n def update(self):\n update().githubopen()\n update().updatecheck()\n def changes_(self):\n print(self.changes)\n def piebridge(self):\n self.adb.shell('sh /data/data/me.piebridge.brevent/brevent.sh')\n def shizuku(self):\n self.adb.shell('shizuku sh /sdcard/Android/data/moe.shizuku.privileged.api/files/start.sh')\n def push(self):\n print('push:从本地中复制一个文件(夹)至手机')\n urlp=input('远端路径>>>')\n if urlp=='':\n print('默认使用 /sdcard')\n urlc=input('本地文件或文件夹>>>')\n urlc=urlc.replace(\" \", \"\")\n if urlc=='':\n print('本地文件或文件夹为空')\n errexit(4)\n return\n self.adb.push(urlc=urlc,urlp=urlp)\n def pull(self):\n print('pull:从手机中拉取一个文件(夹)至本地')\n urlp=input('远端路径>>>')\n urlp=urlp.replace(\" \", \"\")\n if urlp=='':\n print('E:请输入有效远端路径')\n errexit(4)\n return\n urlc=input('本地路径>>>')\n if urlc=='':\n print('默认使用当前路径')\n urlc=os.getcwd()\n self.adb.pull(urlp=urlp,urlc=urlc) \n def screencap(self):\n print('screencap:对手机执行截屏命令,并可选择是否传输至电脑并立即查看')\n h=input('传输至电脑并打开查看>>>[Y/N 默认N]')\n h=h.replace(\" \", \"\")\n self.adb.shell(command='screencap -p /sdcard/sc.png')\n if h=='y' or h=='Y':\n self.adb.pull(urlp='/sdcard/sc.png',urlc='sc.png')\n if self.p == 'Windows':\n os.system('explorer sc.png')\n if self.p=='Linux':\n h=input('...LINUX查看?需要提前安装imagemagick>>>[Y/N 默认N]')\n if h=='y' or h=='Y':\n os.system('display sc.png')\n def dumpsys(self):\n print('dumpsys:获取或设置一些调试信息(转储所有服务)。在adbmode→help→dumpsys查询命令列表')\n inputtext=input('dumpsys>>>')\n inputtext=inputtext.replace(\" \", \"\")\n if inputtext=='' or inputtext=='back':\n return\n self.adb.adb_shell().shell_dumpsys()\n return\n def settings(self):\n print('通过ADB读取/更改系统设置 在adbmode→help→settings查询命令列表')\n print('''\n get [--user | current] NAMESPACE KEY 设置键值\n ...检索KEY的当前值。\n put [--user | current] NAMESPACE KEY VALUE [TAG] [default]\n ...将KEY的内容更改为VALUE\n ...设置为默认值,仅对全局/安全名称空间不区分大小写\n delete NAMESPACE KEY\n ...删除NAMESPACE KEY键值\n reset [--user | current] NAMESPACE {PACKAGE_NAME | RESET_MODE}\n ...重置具有全局/安全模式的程序包的表。\n ...RESET_MODE:{untrusted_defaults,untrusted_clear,trusted_defaults},不区分大小写\n list NAMESPACE\n ...列出所有设置的值 NAMESPACE:{system, secure, global}\n ''')\n inputtext=input('Command>>>')\n inputtext=inputtext.replace(\" \", \"\")\n if inputtext=='' or inputtext=='back':\n return\n self.adb.adb_shell().shell_setting(func=inputtext)\n def input(self):\n print('''command:(Only Enter To Return)\n input_text: 向手机输入一串字符(不支持中文)\n input_keyevent:模拟输入内容(在adbshell→help→input中可查询指令)\n input_tap: 模拟点击屏幕上的一个像素点\n input_swipe: 模拟滑动屏幕(从一个像素点到另一像素点)\n ''')\n inputtext=input('command>>>')\n inputtext=inputtext.replace(\" \", \"\")\n if inputtext=='input_text':\n inputtext=input('Text>>>')\n self.adb.adb_shell().shell_input_text(func=inputtext)\n return\n if inputtext=='input_keyevent':\n inputtext=input('Keyevent>>>')\n self.adb.adb_shell().shell_input_keyevent(func=inputtext)\n return\n if inputtext=='input_tap':\n x=input('X>>>')\n y=input('Y>>>')\n self.adb.adb_shell().shell_input_tap(x=x,y=y)\n return\n if inputtext=='input_swipe':\n x1=input('X1>>>')\n y1=input('Y1>>>')\n x2=input('X2>>>')\n y2=input('Y2>>>')\n d =input('D>>>')\n self.adb.adb_shell().shell_input_swipe(x1=x1,x2=x2,y1=y1,y2=y2,d=d)\n return\n def windowmode(self):\n inputtext=input('欲查看或设置的信息>>>')\n inputtext=inputtext.replace(\" \", \"\")\n if inputtext=='':\n self.adb.adb_shell().shell_wm()\n return\n if inputtext=='overscan':\n inputtext=input('...overscan>>>')\n if inputtext=='reset':\n self.adb.adb_shell().shell_wm_overscan('reset')\n return\n if inputtext=='':\n self.adb.adb_shell().shell_wm_overscan()\n return\n self.adb.adb_shell().shell_wm_overscan(inputtext)\n return\n if inputtext=='size':\n inputtext=input('...size>>>')\n inputtext=inputtext.replace(\" \", \"\")\n if inputtext=='reset':\n self.adb.adb_shell().shell_wm_size(func='reset')\n return\n if inputtext=='':\n self.adb.adb_shell().shell_wm_size()\n return\n self.adb.adb_shell().shell_wm_size(func=inputtext)\n return\n if inputtext=='density':\n inputtext=input('...density>>>')\n inputtext=inputtext.replace(\" \", \"\")\n if inputtext=='reset':\n self.adb.adb_shell().shell_wm_density(func='reset')\n return\n if inputtext=='':\n self.adb.adb_shell().shell_wm_density()\n return\n self.adb.adb_shell().shell_wm_density(func=inputtext)\n return\n def applist(self):\n args_=input('附加的参数>>>')\n args_=args_.replace(\" \", \"\")\n if args_=='':\n self.adb.adb_shell().shell_pm_list_package()\n return\n self.adb.adb_shell().shell_pm_list_package(args_)\n def clear(self):\n Package=input('欲清除数据的程序包名>>>')\n Package=Package.replace(\" \", \"\")\n if Package=='':\n errexit(4)\n return\n self.adb.adb_shell().shell_pm_clear(Package)\n def enable(self):\n Package=input('欲启用的程序包名>>>')\n Package=Package.replace(\" \", \"\")\n if Package=='':\n errexit(4)\n return\n self.adb.adb_shell().shell_pm_enable(Package)\n def disable(self):\n Package=input('欲禁用的程序包名(使用applist查看)>>>')\n Package=Package.replace(\" \", \"\")\n if Package=='':\n errexit(4)\n return\n self.adb.adb_shell().shell_pm_disable_user(Package)\n def compile(self):\n a=input('Compile:请选择compile功能模式: 1).传统 2).新版 :')\n if a=='1':\n mode=input('编译模式[默认-m speed]>>>')\n func=input('编译参数[默认 为空]>>>')\n pkg=input(\"编译对象[默认-a]>>>\")\n func, pkg = func, pkg . replace(\" \", \"\")\n if mode=='':\n mode='-m speed'\n if pkg=='':\n pkg='-a'\n print('执行该操作将消耗一定时间,请坐和放宽')\n start=datetime.datetime.now()\n print('当前时间: '+str(start))\n self.adb.adb_shell().shell_cmd_compile(method=mode,func=func,pkg=pkg)\n end=datetime.datetime.now()\n print('结束时间: '+str(end))\n print('执行用时: %s Seconds'%(end-start))\n if a=='2':\n print('''Compile :\n Compile New\n 通过对AndroidN+的应用进行dexopt编译以提升性能\n 注意:如果你使用的是Android Q 或更高版本,请谨慎对系统应用进行编译,特别是高危组件:\n com.android.systemui\n 已知问题: \n Android Q\n 1.MIUI Android Q编译系统应用会导致严重掉帧\n 2.Samsung OneUI 2.0+会出现开机无法进入桌面,系统全局黑屏的问题.\n 3.LG UX 9 会在锁屏时死机重启\n 4.com.android.systemui不支持通过手动安装还原!!!\n Android N / O\n 1.编译不显示进度\n 如果你是三星用户:推荐使用Galaxy Labs 的 App Booster,其原理为编译原理,且无安全风险\n 性能:everything>speed>[默认]speed-profile>quicken>[不编译]\n 编译耗时:everything>speed\n 空间占用:everything>speed>[默认]speed-profile>quicken>spacesave>[不编译]\n **********************************Compile*****************************************\n * (00).Back [Enter] *\n * (01).使用everything模式编译所有应用[系统,用户] (强制) *\n * (02).使用everything模式编译所有应用[系统,用户] *\n * (03).使用speed模式编译所有应用[系统,用户] (强制) *\n * (04).使用speed模式编译所有应用[系统,用户] *\n * 第一次编译优化,建议选择带有(强制)选项的方法 *\n * 对于小内存设备,低存储剩余的设备,emmc设备,推荐使用speed方法以减轻IO压力 *\n * Android N O P 推荐以上选项(01-04),Android Q推荐以下选项(05-08) *\n * (05).使用everything模式编译所有应用[用户] (强制) *\n * (06).使用everything模式编译所有应用[用户] *\n * (07).使用speed模式编译所有应用[用户] (强制) *\n * (08).使用speed模式编译所有应用[用户] *\n * 急救功能 *\n * (09).还原systemUI编译(quicken默认) ←推荐 *\n * (10).还原systemUI编译(speed默认) *\n * (11).还原systemUI编译(清除编译) *\n * (12).清除第三方应用编译 *\n * (13).清除系统应用编译 *\n * (14).清除第三方应用编译[quicken] *\n * (15).清除系统应用编译[quicken] * \n * (16).清除所有编译[quicken] *\n * (17).清除所有编译 *\n * Thanks: CoolApk User:后知 *\n **********************************Compile*****************************************\n !:输入01 与 1 效果是一致的.\n ''')\n try:a=int(input('您的选择>>>'))\n except:a=0\n print('执行该操作将消耗一定时间,请坐和放宽')\n start=datetime.datetime.now()\n print('当前时间: '+str(start))\n if a==1:self.adb.adb_shell().shell_cmd_compile('-m everything','-f','-a')\n if a==2:self.adb.adb_shell().shell_cmd_compile('-m everything','','-a')\n if a==3:self.adb.adb_shell().shell_cmd_compile('-m speed','-f','-a')\n if a==4:self.adb.adb_shell().shell_cmd_compile('-m speed','','-a') \n if a==9:self.adb.adb_shell().shell_cmd_compile('-m quicken','-f','com.android.systemui')\n if a==10:self.adb.adb_shell().shell_cmd_compile('-m speed','-f','com.android.systemui')\n if a==11:self.adb.shell('cmd package compile --reset com.android.systemui')\n if a==16:self.adb.adb_shell().shell_cmd_compile('-m quicken','-f','-a')\n if a==17:self.adb.shell('cmd package compile --reset -a')\n if a==5:\n self.adb.push('libshfile/compile-5.sh','/sdcard/temp.sh')\n self.adb.shell('su -c sh /sdcard/temp.sh')\n self.adb.shell('rm /sdcard/temp.sh')\n if a==6:\n self.adb.push('libshfile/compile-6.sh','/sdcard/temp.sh')\n self.adb.shell('su -c sh /sdcard/temp.sh')\n self.adb.shell('rm /sdcard/temp.sh')\n if a==7:\n self.adb.push('libshfile/compile-7.sh','/sdcard/temp.sh')\n self.adb.shell('su -c sh /sdcard/temp.sh')\n self.adb.shell('rm /sdcard/temp.sh')\n if a==8:\n self.adb.push('libshfile/compile-8.sh','/sdcard/temp.sh')\n self.adb.shell('su -c sh /sdcard/temp.sh')\n self.adb.shell('rm /sdcard/temp.sh')\n if a==12:\n self.adb.push('libshfile/compile-12.sh','/sdcard/temp.sh')\n self.adb.shell('su -c sh /sdcard/temp.sh')\n self.adb.shell('rm /sdcard/temp.sh')\n if a==13:\n self.adb.push('libshfile/compile-13.sh','/sdcard/temp.sh')\n self.adb.shell('su -c sh /sdcard/temp.sh')\n self.adb.shell('rm /sdcard/temp.sh')\n if a==14:\n self.adb.push('libshfile/compile-14.sh','/sdcard/temp.sh')\n self.adb.shell('su -c sh /sdcard/temp.sh')\n self.adb.shell('rm /sdcard/temp.sh')\n if a==15:\n self.adb.push('libshfile/compile-15.sh','/sdcard/temp.sh')\n self.adb.shell('su -c sh /sdcard/temp.sh')\n self.adb.shell('rm /sdcard/temp.sh')\n if a==0:return\n end=datetime.datetime.now()\n print('结束时间: '+str(end))\n print('执行用时: %s Seconds'%(end-start))\n def uninstall(self):\n apkfile=input('欲移除的程序包名(使用applist查看)>>>')\n args_=input('欲附加的参数>>>')\n if apkfile=='':\n errexit(4)\n return\n elif args_=='':\n self.adb.uninstall(apkfile)\n return\n self.adb.uninstall(apkfile,args_)\n def install(self):\n apkfile=input('欲安装的apk文件>>>')\n args_=input('欲附加的参数>>>')\n if apkfile=='':\n errexit(4)\n return\n elif args_=='':\n self.adb.install(apkfile=apkfile)\n return\n self.adb.install(apkfile,args_)\n def download(self):self.adb.reboot(5)\n def sideload(self):self.adb.reboot(4)\n def edl(self):self.adb.reboot(6)\n def rec(self):self.adb.reboot(3)\n def bl(self):self.adb.reboot(2)\n def shutdown(self):self.adb.reboot(1)\n def reboot(self):self.adb.reboot()\n def usb(self):self.adb.usb()\n def tcpipconnect(self):self.adb.tcpip()\n def devices(self):self.adb.devices()\n def kill_server(self):self.adb.kill_server()\n def start_server(self):self.adb.start_server()\n def root(self):self.adb.root()\n def shell(self):self.adb.shell()\n\nf=func_()\ndef parseinput(a=1):#1二级目录(adbmode) 2二级目录(othermode)\n global nowdevice,f,shellex\n adb=adbcommand(nowdevice)\n inputtext=input('>>>')\n #inputtext=inputtext.replace(\" \", \"\")\n global changes,github,version,builddate\n p=adbshellpyinformation.p\n adbfile=adbshellpyinformation.adbfile\n try:from adbshell_alpha import conf\n except:from adbshell import conf\n #通用指令\n if inputtext=='home':\n home()\n parseinput()\n return\n if inputtext == 'cls':\n clear()\n parseinput(a)\n return\n if inputtext == 'set':\n print('''\n **********************************Setmode*****************************************\n *setting(default,Enter) 设置参数 cls 清屏 back 回到上一菜单 exit 退出 *\n *您也可以通过手动编辑adbshell.ini来修改设置 *\n **********************************Setmode*****************************************\n ''')\n parseinput(2)\n return\n if inputtext =='exit':\n adb.kill_server()\n errexit(2)\n sys.exit(0)\n if inputtext =='environment':\n print('Version:'+version+' BuildDate:'+builddate+' Platform:'+p+' UpdateAddress:'+github+' AdbBin:'+adbfile)\n parseinput(a)\n return \n if a==1:#2级目录(adbmode)\n if inputtext == '':\n parseinput(1)\n return\n if inputtext=='kfmark':\n f.kfmark()\n parseinput(1)\n return \n if inputtext == 'icebox':\n f.icebox()\n parseinput(1)\n return\n if inputtext == 'relatedapk':\n f.relatedapk()\n parseinput(1)\n return \n if inputtext=='who':\n b=adb.s\n c=who()\n nowdevice=c\n adb=adbcommand(c)\n print('您当前的设备:'+b+'切换后的设备:'+c)\n parseinput(1)\n return\n if inputtext == 'back':\n print('E:您已处于主菜单!')\n parseinput(1)\n return\n if inputtext == 're-install':\n #重新安装\n install(p,2)\n parseinput(1)\n return\n if inputtext =='update':\n f.update()\n parseinput(0)\n return\n if inputtext =='changes':\n f.changes_()\n parseinput(1)\n return\n if inputtext=='piebridge':\n f.piebridge()\n parseinput(1)\n return\n if inputtext=='shizuku':\n f.shizuku()\n parseinput(1)\n return\n if inputtext=='push':\n f.push()\n parseinput(1)\n return\n if inputtext=='pull':\n f.pull()\n parseinput(1)\n return\n if inputtext=='screencap':\n f.screencap()\n parseinput(1)\n return\n if inputtext=='dumpsys':\n f.dumpsys()\n parseinput(1)\n return\n if inputtext=='settings':\n f.settings()\n parseinput(1)\n return\n if inputtext=='input':\n f.input()\n parseinput(1)\n return\n if inputtext=='windowmode':\n f.windowmode()\n parseinput(1)\n return\n if inputtext=='':\n parseinput(1)\n return\n if inputtext=='applist':\n f.applist()\n parseinput(1)\n return\n if inputtext=='clear':\n f.clear()\n parseinput(1)\n return\n if inputtext=='enable':\n f.enable()\n parseinput(1)\n return\n if inputtext=='disable':\n f.disable()\n parseinput(1)\n return\n if inputtext=='compile':\n f.compile()\n parseinput(1)\n return\n if inputtext=='uninstall':\n f.uninstall()\n parseinput(1)\n return\n if inputtext=='install':\n f.install()\n parseinput(1)\n return\n if inputtext=='download':\n adb.reboot(mode=5)\n parseinput(1)\n return\n if inputtext=='sideload':\n adb.reboot(mode=4)\n parseinput(1)\n return\n if inputtext=='bl':\n adb.reboot(mode=2)\n parseinput(1)\n return\n if inputtext=='rec':\n adb.reboot(mode=3)\n parseinput(1)\n return\n if inputtext=='shutdown':\n adb.reboot(mode=1)\n parseinput(1)\n return\n if inputtext=='reboot':#0 不带参数 1.-p 2.fastboot(bl) 3.recovery 4.sideload 5.挖煤\n adb.reboot()\n parseinput(1)\n return\n if inputtext=='usb':\n adbcommand.usb()\n parseinput(1)\n return\n if inputtext=='tcpipconnect':\n adbcommand.tcpip()\n parseinput(1)\n return\n if inputtext=='devices':\n adb.devices()\n parseinput(1)\n return\n if inputtext=='kill_server':\n adb.kill_server()\n parseinput(1)\n return\n if inputtext=='start_server':\n adb.start_server()\n parseinput(1)\n return\n if inputtext=='root':\n adb.root()\n parseinput(1)\n return\n if inputtext=='shell':\n adb.shell()\n parseinput(1)\n return\n if inputtext.lower()=='fixgithub':\n update().fixgithub()\n parseinput(1)\n return \n if inputtext=='back':\n parseinput(1)\n return\n if inputtext == 'help':\n adbshellpy_libhelper.helper().usage()\n adbshellpy_libhelper.main()\n parseinput(1)\n return\n if inputtext=='clean-data':\n print('清除adbshellpy的数据,以恢复原始安装.输入yes继续.')\n if input('>>>')=='yes':\n adb.kill_server()\n os.rmdir('adb')\n os.rmdir('__pycache__')\n os.rmdir('build-tools')\n os.remove('adbshell.ini')\n print('操作执行完成,请重新运行实例以初始化')\n input()\n sys.exit()\n if shellex=='enable':\n adb.shell(inputtext)\n parseinput(1)\n return \n if a==2:#2级目录(othermode)\n if inputtext =='back':\n parseinput(1)\n return\n if inputtext == 'setting' or inputtext == '':\n print('adbbin uselinuxpkgmanagertoinstalladb=enable [other]')\n print('[other]:'+str(conf.options('adbshell')))\n inputtext=input('欲设置的选项:[回车退出设置]>>>')\n if inputtext=='adbbin':\n inputtext=input('ADBFile:>>>')\n if os.path.exists(inputtext)== False:\n print('E:指定的ADB File不存在,请检查Path!')\n errexit(4)\n parseinput(2)\n return\n h=input('您确定设置此项吗?Y/N>>><默认为Y>')\n if h=='Y' or h=='y' or h=='':\n conf.set(\"adbshell\", \"adbfile\", inputtext)\n conf.write(open(\"adbshell.ini\", \"w\"))\n parseinput(2)\n return\n print('W:放弃设置')\n return\n if inputtext=='uselinuxpkgmanagertoinstalladb':\n if p == \"Windows\":\n print('E:该项仅对Linux生效')\n errexit(4)\n parseinput(2)\n return\n if p == \"Linux\":\n inputtext=input('Set:>>>[默认enable]')\n h=input('您确定设置此项吗?Y/N>>><默认为Y>')\n if h=='Y' or h=='y' or h=='':\n conf.set(\"adbshell\", \"uselinuxpkgmanagertoinstalladb\", inputtext)\n conf.write(open(\"adbshell.ini\", \"w\"))\n parseinput(2)\n return\n print('W:放弃设置')\n return\n if inputtext=='':\n parseinput(2)\n return\n #otherSet\n inputtext_=input('SET:>>>')\n h=input('您确定设置此项吗?Y/N>>><默认为Y>')\n if h=='Y' or h=='y' or h=='':\n conf.set(\"adbshell\", inputtext, inputtext_)\n conf.write(open(\"adbshell.ini\", \"w\"))\n parseinput(2)\n return\n parseinput(1)\n return\n errexit(2)\n print('W :未知指令')\n parseinput(a)\n return\n ","sub_path":"adbshellpy_home.py","file_name":"adbshellpy_home.py","file_ext":"py","file_size_in_byte":28628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"615829375","text":"#\n# Tests for the module: uttriage.sfjenkins\n#\nimport logging\nimport mock\nimport os.path\n\nfrom six import StringIO\n\n# From the PyPi package(s): pytest pynumparser python-jenkin\nimport pytest\njenkins = pytest.importorskip(\"jenkins\")\npynumparser = pytest.importorskip(\"pynumparser\")\n\n# Test utilities\nfrom .tstutils import UrlFileShim\nfrom .tstutils import TempDirTestCase\nfrom .tstutils import OPENER_PATH\nfrom .tstutils import parser_error\nfrom .tstutils import ParserError\nfrom .tstutils import golden_file\n\n# Code under test\nfrom uttriage import triage_master\nfrom uttriage import utils\nfrom uttriage import bugreport\n\n\n@mock.patch('uttriage.bugreport.FB_DELAY', 0.1)\n@mock.patch('uttriage.bugreport.FB_RETRIES', 3)\n@mock.patch('uttriage.bugreport.FB_FALLBACK', 1.0)\nclass TestTriageMaster(TempDirTestCase):\n\n FJOB = 'fluorine_unittests'\n OJOB = 'oxygen-patch4_unittests'\n FARGS1 = ('1231415', '1232040-1232045', '1232171', '1232476', '1232620', '1232693')\n OARGS1 = ('94187', '94193', '94587', '94605', '95236', '95432', '95726', '96164')\n\n @classmethod\n def _expand(cls, *args):\n return list(pynumparser.NumberSequence().parse(\",\".join(args)))\n\n def copy_sample_config(self):\n # Test that the config sample is okay.\n src = os.path.join(os.path.dirname(utils.__file__), 'triage_master.cfg.sample')\n trg = os.path.join(self.temp_dir, 'triage_master.cfg')\n with open(src, 'rb') as infile:\n with open(trg, 'wb') as outfile:\n outfile.write(infile.read())\n\n @mock.patch('argparse.ArgumentParser.error', parser_error)\n def test_parser_errors(self):\n logging.info('Config Directory: \"%s\"', utils.CONFIG_DIRECTORY)\n with self.assertRaises(ParserError) as raised:\n parser, opts = triage_master.parse_args(['progname'])\n assert ('too few arguments', ) == raised.exception.args\n\n def test_build_list(self):\n self.copy_sample_config()\n arguments = (\"triage_master.py -PUN -BD\".split() + [self.FJOB] + list(self.FARGS1) +\n [self.OJOB] + list(self.OARGS1))\n parser, opts = triage_master.parse_args(arguments)\n builds = triage_master.determine_builds(opts, parser)\n logging.info(\"Builds: %s\", builds)\n expected = {self.FJOB: self._expand(*self.FARGS1), self.OJOB: self._expand(*self.OARGS1)}\n assert expected == dict(builds)\n\n @mock.patch('jenkins.Jenkins.jenkins_open', UrlFileShim('triage', 2))\n @mock.patch('argparse.ArgumentParser.error', parser_error)\n @mock.patch(OPENER_PATH, UrlFileShim('triage', 3, check_requests=True))\n def test_dryrun1(self):\n self.copy_sample_config()\n arguments = (\"triage_master.py -PUBD\".split() + [self.FJOB] + list(self.FARGS1) +\n [self.OJOB] + list(self.OARGS1))\n parser, opts = triage_master.parse_args(arguments)\n builds = triage_master.determine_builds(opts, parser)\n capture = StringIO()\n report = utils.Reporter(capture)\n dt = bugreport.DefectTracker()\n actions = triage_master.analyze_job_outputs(dt, opts, parser, builds, report)\n\n expect = ['BasicHttpServerCase.JsonFile/fluorine',\n 'CloningIntegrationFastFailCase.MigrateDestPrimary/fluorine',\n 'ConnectionMonitorCase.MultipleFailure/oxygen',\n 'ConnectionMonitorCase.MultipleTimeout/oxygen',\n 'ConnectionMonitorCase.SuccessAndFailure/oxygen']\n skeys = sorted(actions.keys())\n assert expect == skeys\n assert [1, 1, 1, 1, 2] == [len(actions[k]) for k in skeys]\n\n triage_master.summarize_failures(opts, actions, report)\n bugdata = triage_master.query_existing_bugs(dt, opts, parser, builds, actions, report)\n triage_master.update_or_create_bugs(dt, opts, parser, builds, bugdata, report)\n triage_master.send_email_and_report(opts, report, builds)\n\n email = report.get_email_summary()\n expected = golden_file('triage/dryrun1-email.txt')\n assert expected == email\n\n output = capture.getvalue()\n expected = golden_file('triage/dryrun1-report.txt')\n print(\"EXP = <<<<<{}\\n>>>>>\".format(expected))\n print(\"OUT = <<<<<{}\\n>>>>>\".format(output))\n assert expected == output\n\n @mock.patch('jenkins.Jenkins.jenkins_open', UrlFileShim('triage', 2))\n @mock.patch('argparse.ArgumentParser.error', parser_error)\n @mock.patch(OPENER_PATH, UrlFileShim('triage', 3, 200, check_requests=True))\n def test_dryrun2(self):\n self.copy_sample_config()\n arguments = \"triage_master.py --doit --keep -PBDt oxygen_unittests 837216 837218\".split()\n\n parser, opts = triage_master.parse_args(arguments)\n builds = triage_master.determine_builds(opts, parser)\n capture = StringIO()\n report = utils.Reporter(capture)\n dt = bugreport.DefectTracker()\n actions = triage_master.analyze_job_outputs(dt, opts, parser, builds, report)\n results = sorted(actions.keys())\n print(\"RESULTS: \" + str(results))\n expected = golden_file('triage/dryrun2-actions.keys', asJSON=True)\n assert expected == results\n\n triage_master.summarize_failures(opts, actions, report)\n bugdata = triage_master.query_existing_bugs(dt, opts, parser, builds, actions, report)\n\n triage_master.update_or_create_bugs(dt, opts, parser, builds, bugdata, report)\n triage_master.send_email_and_report(opts, report, builds)\n\n email = report.get_email_summary()\n expected = golden_file('triage/dryrun2-email.txt')\n assert expected == email\n\n output = capture.getvalue()\n print(\"OUTPUT = @@@<<{}>>@@@\".format(output))\n expected = golden_file('triage/dryrun2-report.txt')\n assert expected == output\n","sub_path":"matilda_scripts/unittests/test_uttriage/test_triage_master.py","file_name":"test_triage_master.py","file_ext":"py","file_size_in_byte":5830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"594184430","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_restful import Api\nfrom flask_cors import CORS\nimport os\n\napp = Flask(__name__)\napi = Api(app)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://' + os.getenv(\"MYSQL_DATABASE_USER\") + ':' + os.getenv(\n \"MYSQL_DATABASE_PASSWORD\") + '@' + os.getenv(\"MYSQL_DATABASE_HOST\") + '/advertisement_manager'\ndb = SQLAlchemy(app)\ndb.Model.metadata.reflect(bind=db.engine,schema=\"advertisement_manager\")\n\nCORS(app)\ncors = CORS(app, resources={\n r\"/*\": {\n \"origins\": \"*\"\n }\n})\n\nclass AdvertisementsModel(db.Model):\n __table__ = db.Model.metadata.tables['advertisement_manager' + '.advertisements']\n\nfrom advertisements import Advertisements\n\napi.add_resource(Advertisements, '/advertisements')","sub_path":"api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"109080445","text":"# -*- coding: utf8 -*-\n\nimport webapp2\n\nfrom workers.mm.atm_worker import AtmTaskWorker\nfrom workers.mm.bank_worker import BankTaskWorker\n\n__all__ = (\n 'application',\n)\n\n\nBASE_URL = '/workers/mm'\n\napplication = webapp2.WSGIApplication([\n (BASE_URL + '/atm/', AtmTaskWorker),\n (BASE_URL + '/bank/', BankTaskWorker),\n], debug=False)\n","sub_path":"workers/mm/mm.py","file_name":"mm.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"83749703","text":"numbers = input(\"Podaj Liczby, odzielajac je przecinkiem: \")\na = numbers.split(\",\")\nresult = []\nfor i in a:\n i = int(i)\n k = 1\n for j in range(1, i + 1):\n k = k * j\n result.append(str(k))\n\nresult = \",\".join(result)\nprint(result)\n","sub_path":"exercises/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"540769645","text":"import threading\r\nimport time\r\nimport gspread\r\nimport httplib2\r\nimport os\r\nimport time\r\nimport concurrent.futures\r\nimport traceback\r\n\r\nfrom oauth2client.service_account import ServiceAccountCredentials\r\nfrom apiclient import discovery\r\nfrom oauth2client import client\r\nfrom oauth2client import tools\r\nfrom oauth2client.file import Storage\r\n\r\nauthorization_thread_lock = threading.Lock()\r\ndata_thread_lock = threading.Lock()\r\n\r\nsave_file = open(\"failedCells.txt\", \"w+\")\r\nx = 0\r\n\r\nfailed_cells = []\r\n\r\ndef authenticate_with_sheets():\r\n scope = ['https://spreadsheets.google.com/feeds',\r\n 'https://www.googleapis.com/auth/drive']\r\n creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)\r\n client = gspread.authorize(creds)\r\n\r\n http = creds.authorize(httplib2.Http())\r\n discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\r\n 'version=v4')\r\n service = discovery.build('sheets', 'v4', http=http,\r\n discoveryServiceUrl=discoveryUrl)\r\n\r\n spread_sheet = client.open('CME Dairy Futures History')\r\n\r\n authenticate_with_sheets.organized_sheet = spread_sheet.worksheet('DRY WHEY ORGANIZED')\r\n authenticate_with_sheets.dry_whey_sheet = spread_sheet.worksheet('DRY WHEY NO APO')\r\n\r\ndef get_date(cell_col, cell_row, sheet):\r\n cell_val = sheet.cell(cell_row, cell_col).value\r\n\r\n year = sheet.cell(3, cell_col).value\r\n\r\n if cell_col <= 12:\r\n day_month = sheet.cell(cell_row, 1).value\r\n elif cell_col >= 15 and cell_col <= 25:\r\n day_month = sheet.cell(cell_row, 14).value\r\n elif cell_col >= 28 and cell_col <= 38:\r\n day_month = sheet.cell(cell_row, 27).value\r\n elif cell_col >= 41 and cell_col <= 52:\r\n day_month = sheet.cell(cell_row, 40).value\r\n elif cell_col >= 55 and cell_col <= 66:\r\n day_month = sheet.cell(cell_row, 54).value\r\n elif cell_col >= 69 and cell_col <= 80:\r\n day_month = sheet.cell(cell_row, 68).value\r\n elif cell_col >= 83 and cell_col <= 94:\r\n day_month = sheet.cell(cell_row, 82).value\r\n elif cell_col >= 97 and cell_col <= 108:\r\n day_month = sheet.cell(cell_row, 96).value\r\n elif cell_col >= 111 and cell_col <= 122:\r\n day_month = sheet.cell(cell_row, 110).value\r\n elif cell_col >= 125 and cell_col <= 136:\r\n day_month = sheet.cell(cell_row, 124).value\r\n elif cell_col >= 139 and cell_col <= 150:\r\n day_month = sheet.cell(cell_row, 138).value\r\n elif cell_col >= 153 and cell_col <= 163:\r\n day_month = sheet.cell(cell_row, 152).value\r\n\r\n date = str(day_month) + \"-\" + str(year)\r\n return_val_with_date = date + \", \" + cell_val\r\n\r\n return date\r\n\r\ndef get_contract(cell_col, cell_row, sheet):\r\n\r\n contract = \"N/A\"\r\n column = None\r\n\r\n if cell_col <= 12:\r\n contract = \"jan\"\r\n column = 2\r\n elif cell_col >= 15 and cell_col <= 25:\r\n contract = \"feb\"\r\n column = 3\r\n elif cell_col >= 28 and cell_col <= 38:\r\n contract = \"mar\"\r\n column = 4\r\n elif cell_col >= 41 and cell_col <= 52:\r\n contract = \"apr\"\r\n column = 5\r\n elif cell_col >= 55 and cell_col <= 66:\r\n contract = \"may\"\r\n column = 6\r\n elif cell_col >= 69 and cell_col <= 80:\r\n contract = \"jun\"\r\n column = 7\r\n elif cell_col >= 83 and cell_col <= 94:\r\n contract = \"jul\"\r\n column = 8\r\n elif cell_col >= 97 and cell_col <= 108:\r\n contract = \"aug\"\r\n column = 9\r\n elif cell_col >= 111 and cell_col <= 122:\r\n contract = \"sep\"\r\n column = 10\r\n elif cell_col >= 125 and cell_col <= 136:\r\n contract = \"oct\"\r\n column = 11\r\n elif cell_col >= 139 and cell_col <= 150:\r\n contract = \"nov\"\r\n column = 12\r\n elif cell_col >= 153 and cell_col <= 163:\r\n contract = \"dec\"\r\n column = 13\r\n\r\n return column\r\n\r\ndef align_data(i):\r\n\r\n try:\r\n #with data_thread_lock:\r\n contract = get_contract(i.col, i.row, authenticate_with_sheets.dry_whey_sheet)\r\n if not i.value == \"\" and not contract == 0:\r\n new_col = contract\r\n new_date = get_date(i.col, i.row, authenticate_with_sheets.dry_whey_sheet)\r\n\r\n else:\r\n print(\"\\nWorker: \\t{}\\nEmpty cell skipped: \".format(threading.current_thread().name) + str(i.col) + \" \" + str(i.row) + \"\\n\")\r\n return\r\n\r\n except Exception:\r\n print (\"Got an error.\")\r\n traceback.print_exc()\r\n save_file.write(str(i) + \"\\n\")\r\n failed_cells.append(str(i))\r\n\r\n else:\r\n try:\r\n corresponding_cell = authenticate_with_sheets.organized_sheet.find(new_date)\r\n\r\n except Exception:\r\n print(\"\\n\\nWorker: \\t{}\\nError searching sheet. Gaining new authentication.\\n\\n\\n\".format(threading.current_thread().name))\r\n #with authorization_thread_lock:\r\n authenticate_with_sheets()\r\n save_file.write(str(i) + \"\\n\")\r\n failed_cells.append(str(i))\r\n\r\n else:\r\n new_row = corresponding_cell.row\r\n\r\n print(\"Worker: \\t{}\\nColumn: \\t{}\\nRow: \\t\\t{}\\nNew Date: \\t{}\\nNew Contract: \\t{}\\nNew Value: \\t{}\\n\".format(threading.current_thread().name, str(new_col), str(new_row), str(new_date), str(contract), str(i.value)))\r\n\r\n try:\r\n authenticate_with_sheets.organized_sheet.update_cell(new_row, new_col, i.value)\r\n\r\n except Exception:\r\n print(\"\\n\\nWorker: \\t{}\\nError updating sheet. Gaining new authentication.\\n\\n\\n\".format(threading.current_thread().name))\r\n #with authorization_thread_lock:\r\n authenticate_with_sheets()\r\n save_file.write(str(i) + \"\\n\")\r\n failed_cells.append(str(i))\r\n\r\ndef execute():\r\n with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:\r\n result = executor.map(align_data, authenticate_with_sheets.dry_whey_sheet.range('B121:FG366'))\r\n","sub_path":"VaultAligner.py","file_name":"VaultAligner.py","file_ext":"py","file_size_in_byte":5960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"95860048","text":"\nimport cherrypy\n\nfrom confighelper import ConfigHelper\n\ndef render_standard_response(template_name):\n template = cherrypy.request.app.jinjaEnv.get_template(template_name)\n \n # Assemble the template context\n args = {}\n \n if ConfigHelper.use_minified_scripts():\n args['use_minified_js'] = True\n \n return template.render(**args)\n","sub_path":"src/www/lib/pagehelper.py","file_name":"pagehelper.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"536106138","text":"import ROOT\n\nimport os\n\nfrom FinalStateAnalysis.PlotTools.megautil import MetaTree\nfrom FinalStateAnalysis.PlotTools.MegaBase import MegaBase\n\nmeta = MetaTree()\n\nbase_selections = [\n meta.muon1Pt > 20,\n meta.muon2Pt > 10,\n\n meta.tauDecayFinding > 0.5,\n meta.tauPt > 20,\n\n #meta.mu17ele8 > 0.5,\n meta.tauAbsEta < 2.3,\n meta.muon1AbsEta < 2.1,\n meta.muon2AbsEta < 2.1,\n\n meta.muGlbIsoVetoPt10 < 1,\n meta.eVetoCicTightIso < 1,\n meta.bjetVeto < 1,\n meta.tauVetoPt20 < 1,\n\n meta.muon1PixHits > 0,\n meta.muon2PixHits > 0,\n meta.muon1JetBtag < 3.3,\n meta.muon2JetBtag < 3.3,\n\n meta.muon2DZ < 0.2,\n meta.muon1DZ < 0.2,\n meta.tauDZ < 0.2,\n\n meta.tauJetBtag < 3.3,\n meta.tauAntiElectronMVA > 0.5,\n meta.tauAntiElectronMedium > 0.5,\n meta.tauElecOverlap < 0.5,\n meta.tauAntiMuonTight > 0.5,\n meta.tauMuOverlap < 0.5,\n]\n\n\nhadronic_tau_id = [\n meta.tauLooseIso > 0.5,\n]\n\nmuon2_id = [\n meta.muon2RelPFIsoDB < 0.3,\n meta.muon2WWID > 0.5,\n]\n\nmuon1_id = [\n meta.muon1RelPFIsoDB < 0.3,\n meta.muon1WWID > 0.5,\n]\n\nhistograms = [\n (lambda x: x.muon1Pt, 'muon1Pt', 'muon1 pt', 100, 0, 100),\n (lambda x: x.muon1AbsEta, 'muon1AbsEta', 'muon1 |#eta|', 100, 0, 100),\n]\n\ndef muon1_fake_weight(x):\n return 1\ndef muon2_fake_weight(x):\n return 1\n\nclass AnalyzeMMT(MegaBase):\n\n def __init__(self, tree, output, **kwargs):\n super(AnalyzeMMT, self).__init__(tree, output, **kwargs)\n for histogram in histograms:\n self.book('muon1_fakes', *histogram[1:])\n self.book('muon2_fakes', *histogram[1:])\n self.book('double_fakes', *histogram[1:])\n self.book('triple_fakes', *histogram[1:])\n # Histograms w/o weights\n self.book('muon1_fakes_nowt', *histogram[1:])\n self.book('muon2_fakes_nowt', *histogram[1:])\n self.book('double_fakes_nowt', *histogram[1:])\n self.book('triple_fakes_nowt', *histogram[1:])\n self.book('final', *histogram[1:])\n self.disable_branch('*')\n for b in meta.active_branches():\n self.enable_branch(b)\n\n def process(self, entry):\n tree = self.tree\n read = tree.GetEntry(entry)\n\n # Check if we pass the base selection\n if not all(select(tree) for select in base_selections):\n return True\n\n # figure out which objects pass\n passes_tau_id = all(select(tree) for select in hadronic_tau_id)\n passes_muon1_id = all(select(tree) for select in muon1_id)\n passes_muon2_id = all(select(tree) for select in muon2_id)\n\n category = (passes_muon1_id, passes_muon2_id, passes_tau_id)\n\n if category == (True, True, True):\n for histo in histograms:\n value = histo[0](tree)\n self.histograms[os.path.join('final', histo[1])].Fill(value)\n elif category == (False, True, True):\n for histo in histograms:\n value = histo[0](tree)\n self.histograms[\n os.path.join('muon1_fakes_nowt', histo[1])].Fill(value)\n weight = muon1_fake_weight(tree.muon1Pt)\n self.histograms[\n os.path.join('muon1_fakes', histo[1])].Fill(value, weight)\n elif category == (True, False, True):\n for histo in histograms:\n value = histo[0](tree)\n self.histograms[\n os.path.join('muon2_fakes_nowt', histo[1])].Fill(value)\n weight = muon2_fake_weight(tree.muon2Pt)\n self.histograms[\n os.path.join('muon2_fakes', histo[1])].Fill(value, weight)\n elif category == (False, False, True):\n for histo in histograms:\n value = histo[0](tree)\n self.histograms[\n os.path.join('double_fakes_nowt', histo[1])].Fill(value)\n weight = muon2_fake_weight(tree.muon2Pt)*muon1_fake_weight(tree.muon1Pt)\n self.histograms[\n os.path.join('double_fakes', histo[1])].Fill(value, weight)\n\n return True\n\n def finish(self):\n self.write_histos()\n","sub_path":"Higgs/test/zh/AnalyzeMMT.py","file_name":"AnalyzeMMT.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"351062007","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nimport gym\nfrom policy_gradient import PolicyGradient\nimport matplotlib.pyplot as plt\n\nDISPLAY_REWARD_THRESHOLD = 400 # renders environment if total episode reward is greater then this threshold\nRENDER = False # rendering wastes time\n\n\nenv = gym.make('CartPole-v0')\nenv.seed(1) # reproducible, general Policy gradient has high variance\nenv = env.unwrapped\n\nprint(env.action_space)\nprint(env.observation_space)\nprint(env.observation_space.high)\nprint(env.observation_space.low)\n\t\t\t\t # s_dim,\n\t\t\t\t # a_dim,\n\t\t\t\t # learning_rate = 0.01,\n\t\t\t\t # reward_decay = 0.95,\n\t\t\t\t # output_graph = False\n\nRL = PolicyGradient(\n\t\ts_dim = env.observation_space.shape[0],\n\t\ta_dim = env.action_space.n,\n\t\tlearning_rate = 0.02,\n\t\treward_decay = 0.99,\n\t\t#output_graph = True\n\t)\n\nfor i_epsiode in range(3000):\n\n\ts = env.reset()\n\twhile True:\n\t\tif RENDER: env.render()\n\n\t\ta = RL.choose_action(s)\n\t\ts_,r,done,info = env.step(a)\n\n\t\tRL.store_transition(s,a,r)\n\n\t\tif done:\n\t\t\tep_rs_sum = sum(RL.ep_rs)\n\n\t\t\tif 'running_reward' not in globals():\n\t\t\t\trunning_reward = ep_rs_sum\n\t\t\telse:\n\t\t\t\trunning_reward = running_reward * 0.99 + ep_rs_sum * 0.01\n\n\t\t\tif running_reward > DISPLAY_REWARD_THRESHOLD: RENDER = True\n\t\t\tprint('episode:',i_epsiode,\"reward:\",int(running_reward))\n\n\t\t\tvt = RL.learn()\n\n\t\t\tif i_epsiode == 0:\n\t\t\t\tplt.plot(vt)\n\t\t\t\tplt.xlabel('episode steps')\n\t\t\t\tplt.ylabel('normalized state-action value')\n\t\t\t\tplt.show()\n\t\t\tbreak\n\n\t\ts = s_\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Policy Gradient/run_CartPole.py","file_name":"run_CartPole.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"133847698","text":"import pandas as pd\r\nimport dash_html_components as html\r\nimport dash_core_components as dcc\r\nimport plotly.graph_objs as go\r\n\r\nopen_files_df = pd.read_pickle('catsweb.pickle')\r\nstepID = open_files_df['Current Step'].unique().tolist()\r\n\r\ndef Header(app):\r\n return html.Div([get_header(app), html.Br([]),])\r\n\r\n\r\ndef get_header(app):\r\n header = html.Div(\r\n [\r\n html.Div(\r\n [\r\n html.Img(\r\n src=app.get_asset_url(\"tcs-logo.png\"),\r\n className=\"logo\",\r\n ),\r\n html.Img(\r\n src=app.get_asset_url(\"jnj-logo.png\"),\r\n className=\"logo\",\r\n style={'height': '30px',\r\n 'width': 'auto',\r\n 'margin': '25px 25px',\r\n 'align': \"right\",\r\n }\r\n ),\r\n ],\r\n className=\"row\",\r\n ),\r\n html.Div(\r\n [\r\n html.Div(\r\n [html.H4(\"TATA Complaints Handling Dashboard\")],\r\n className=\"seven columns main-title\",\r\n ),\r\n ],\r\n className=\"twelve columns\",\r\n style={\"padding-left\": \"0\"},\r\n ),\r\n ],\r\n className=\"row\",\r\n )\r\n return header\r\n\r\n\r\n\r\ndef make_dash_table(dataframe):\r\n return [html.Tr([html.Th(col) for col in dataframe.columns])] + [html.Tr([html.Td(dataframe.iloc[i][col]) for col in dataframe.columns]) for i in range(len(dataframe))]\r\n\r\n\r\ndef stepCount(product, step_count_table):\r\n step_data = dict()\r\n stepID.sort()\r\n\r\n total = 0\r\n\r\n for each_step in stepID:\r\n step_count = step_count_table[step_count_table['Current Step'] == each_step]\r\n step_data.update({'Step '+str(each_step) : step_count['Current Step'].count()})\r\n\r\n for key, value in step_data.items():\r\n total += value\r\n\r\n step_data.update({'Total': total})\r\n step_data['Product'] = product\r\n #step_data.update({'Product':product})\r\n\r\n\r\n return pd.DataFrame(step_data, index=[0])\r\n\r\ndef createOpenTable(each_open_files_df):\r\n product_list = each_open_files_df['Product Line'].unique().tolist()\r\n\r\n product_tables = dict()\r\n for product in product_list:\r\n product_tables.update({product : each_open_files_df[each_open_files_df['Product Line'] == product]})\r\n\r\n\r\n open_files_dict = dict()\r\n for product, prod_df in product_tables.items():\r\n open_files_dict.update({product : stepCount(product, prod_df)})\r\n\r\n\r\n open_files_table = pd.DataFrame()\r\n for product, product_dict in open_files_dict.items():\r\n open_files_table = open_files_table.append(product_dict, ignore_index=True)\r\n \r\n\r\n #open_files_table.set_index('Product', inplace=True)\r\n cols = open_files_table.columns.tolist()\r\n cols = cols[-1:] + cols[:-1]\r\n open_files_table = open_files_table[cols]\r\n\r\n rows_list = []\r\n\r\n for each_col in cols:\r\n rows_list.append(open_files_table[each_col].sum())\r\n\r\n rows_list[0] = 'Total'\r\n\r\n open_files_table.loc[-1] = rows_list\r\n open_files_table.index = open_files_table.index+1\r\n #open_files_table = open_files_table.sort_index()\r\n\r\n return make_dash_table(open_files_table)\r\n\r\n\r\n\r\ndef RegionCount(product, product_dataframe):\r\n region_data = dict()\r\n regions = pd.read_pickle('direct_closed.pickle')['Region'].unique().tolist()\r\n\r\n total = 0\r\n\r\n for region in regions:\r\n region_count = product_dataframe[product_dataframe['Region'] == region]\r\n region_data.update({region : int(region_count['Region'].count())})\r\n\r\n for key, value in region_data.items():\r\n total += value\r\n\r\n region_data.update({'Total': total})\r\n region_data['Product'] = product\r\n\r\n return pd.DataFrame(region_data, index=[0])\r\n\r\n\r\n\r\ndef WeekCount(product, product_dataframe):\r\n index = 0\r\n total = 0\r\n week_data = dict()\r\n product_dataframe['Date Complaint Closed'] = pd.to_datetime(product_dataframe['Date Complaint Closed'])\r\n gr = product_dataframe.groupby(pd.Grouper(key='Date Complaint Closed',freq='W'))\r\n \r\n for name, group in gr:\r\n index+=1\r\n week_data.update({'Week '+str(index) : group['Complaint Number'].count()})\r\n\r\n for key, value in week_data.items():\r\n total += value\r\n\r\n week_data.update({'Total': total})\r\n week_data['Product'] = product\r\n\r\n return pd.DataFrame(week_data, index=[0])\r\n\r\n\r\ndef createClosedTable(dataframe, xaxis_type):\r\n product_list = dataframe['Product Line'].unique().tolist()\r\n\r\n product_tables = dict()\r\n for product in product_list:\r\n product_tables.update({product : dataframe[dataframe['Product Line'] == product]})\r\n\r\n cols = []\r\n\r\n closed_files_dict = dict()\r\n if xaxis_type == 'Week':\r\n for product, prod_df in product_tables.items():\r\n week_count_df = WeekCount(product, prod_df)\r\n\r\n if len(week_count_df.columns.tolist()) >= len(cols): \r\n cols = week_count_df.columns.tolist()\r\n\r\n elif len(week_count_df.columns.tolist()) < len(cols):\r\n a = cols\r\n b = set(week_count_df.columns.tolist())\r\n indexes = [i for i, item in enumerate(a) if item not in b]\r\n for i in indexes:\r\n week_count_df.insert(len(week_count_df.columns.tolist())-2, a[i], 0)\r\n\r\n\r\n closed_files_dict.update({product : week_count_df})\r\n\r\n\r\n elif xaxis_type == 'Region':\r\n for product, prod_df in product_tables.items():\r\n closed_files_dict.update({product : RegionCount(product, prod_df)})\r\n\r\n\r\n closed_files_table = pd.DataFrame()\r\n for product, product_dict in closed_files_dict.items():\r\n closed_files_table = closed_files_table.append(product_dict, ignore_index=True)\r\n\r\n cols = closed_files_table.columns.tolist()\r\n cols = cols[-1:] + cols[:-1]\r\n closed_files_table = closed_files_table[cols]\r\n\r\n rows_list = []\r\n\r\n for each_col in cols:\r\n rows_list.append(closed_files_table[each_col].sum())\r\n\r\n rows_list[0] = 'Total'\r\n\r\n closed_files_table.loc[-1] = rows_list\r\n closed_files_table.index = closed_files_table.index+1\r\n\r\n return make_dash_table(closed_files_table)\r\n\r\n\r\n\r\ndef getAgedData(dataframe):\r\n dataframe['Received date'] = pd.to_datetime(dataframe['Received date']).dt.date\r\n dataframe['Age'] = pd.datetime.now().date() - dataframe['Received date'] \r\n\r\n US_df = dataframe[dataframe['Region'] == 'USA']\r\n OUS_df = dataframe[dataframe['Region'] != 'USA']\r\n\r\n US_AGED_df = US_df[US_df['Age'] > pd.Timedelta(60,'D')]\r\n US_NON_AGED_df = US_df[US_df['Age'] <= pd.Timedelta(60,'D')]\r\n\r\n OUS_AGED_df = OUS_df[OUS_df['Age'] > pd.Timedelta(90,'D')]\r\n OUS_NON_AGED_df = OUS_df[OUS_df['Age'] <= pd.Timedelta(90,'D')]\r\n\r\n return (US_AGED_df, US_NON_AGED_df, OUS_AGED_df, OUS_NON_AGED_df)\r\n\r\ndef agedFilesPieChart(dataframe):\r\n (US_AGED_df, US_NON_AGED_df, OUS_AGED_df, OUS_NON_AGED_df) = getAgedData(dataframe)\r\n\r\n types = ['Aged', 'Non-Aged'] \r\n US_values = [US_AGED_df['Age'].count(), US_NON_AGED_df['Age'].count()]\r\n OUS_values = [OUS_AGED_df['Age'].count(), OUS_NON_AGED_df['Age'].count()]\r\n\r\n trace1 = go.Pie(\r\n labels=types,\r\n values=US_values,\r\n marker={\"colors\": [\"#264e86\", \"#0074e4\",]},\r\n )\r\n\r\n trace2 = go.Pie(\r\n labels=types,\r\n values=OUS_values,\r\n marker={\"colors\": [\"#264e86\", \"#0074e4\",]},\r\n )\r\n\r\n layout1 = dict(margin=dict(l=10, r=0, t=40, b=0,), legend=dict(orientation=\"h\"), title='US Files', height=250)\r\n layout2 = dict(margin=dict(l=10, r=0, t=40, b=0), legend=dict(orientation=\"h\"), title='OUS Files', height=250)\r\n\r\n return dict({\"data\": [trace1], \"layout\": layout1}), dict({\"data\": [trace2], \"layout\": layout2})\r\n\r\n\r\n\r\ndef openFilesGraph(dataframe, xaxis_type):\r\n x_axis_list = dataframe[xaxis_type].unique().tolist()\r\n count = []\r\n\r\n for item in x_axis_list:\r\n dff = dataframe[dataframe[xaxis_type] == item]\r\n count.append(dff[xaxis_type].count())\r\n\r\n\r\n data = [\r\n go.Bar(\r\n x=x_axis_list,\r\n y=count,\r\n marker={\r\n \"color\": \"#97151c\",\r\n \"line\": {\r\n \"color\": \"rgb(255, 255, 255)\",\r\n \"width\": 2,\r\n },\r\n },\r\n name=\"Open Files\",\r\n ),\r\n ]\r\n\r\n layout = go.Layout(\r\n autosize=False,\r\n bargap=0.35,\r\n font={\"family\": \"Raleway\", \"size\": 10},\r\n height=200,\r\n hovermode=\"closest\",\r\n legend={\r\n \"x\": -0.0228945952895,\r\n \"y\": -0.189563896463,\r\n \"orientation\": \"h\",\r\n \"yanchor\": \"top\",\r\n },\r\n margin={\r\n \"r\": 0,\r\n \"t\": 20,\r\n \"b\": 10,\r\n \"l\": 10,\r\n },\r\n showlegend=True,\r\n title=\"\",\r\n width=330,\r\n xaxis={\r\n \"autorange\": True,\r\n \"range\": [-0.5, 4.5],\r\n \"showline\": True,\r\n \"title\": \"\",\r\n \"type\": \"category\",\r\n },\r\n yaxis={\r\n \"autorange\": True,\r\n \"range\": [0, 22.9789473684],\r\n \"showgrid\": True,\r\n \"showline\": True,\r\n \"title\": \"\",\r\n \"type\": \"linear\",\r\n \"zeroline\": False,\r\n },\r\n )\r\n return dict({\"data\" : data,\"layout\" : layout})\r\n\r\n\r\n\r\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"58619773","text":"import os\nimport sys\n\ncurrent_directory = sys.path[0]\nabove_directory, current_folder_name = os.path.split(current_directory)\nsys.path.insert(1, above_directory)\n\nimport unittest\nfrom unittest.mock import Mock\nfrom unittest import TestCase, mock\n\nfrom BreakHisOrganizer import BreakHisOrganizer\nfrom ReinhardNormalizer import ReinhardNormalizer\n\n\n\n@mock.patch(\"ReinhardNormalizer.ReinhardNormalizer\")\ndef mock_ReinhardNormalizer(mock_class):\n\n print(mock_class.return_value)\n \nclass TestBreakHisOrganizer(TestCase):\n def test_if_constructible(self):\n break_his_organizer = BreakHisOrganizer('.', None, None, None, None, None)\n self.assertIsNotNone(break_his_organizer)\n \n def test_how_dataframe_is_loaded(self):\n dataset_root = 'D:/Datasets/masf_organized_breakhis_dataset/Best_after_normalization/'\n reinhard_normalizer = None\n normalization_ref_image = None\n path_extension = '*/*/*'\n image_extension = '.png'\n \n break_his_organizer_new_dataset = BreakHisOrganizer(dataset_root,\n reinhard_normalizer,\n normalization_ref_image,\n path_extension,\n image_extension)\n \n break_his_organizer_new_dataset.build_organized_dataframe()\n \n # def test_if_imwrite_is_called(self):\n # with mock.patch('BreakHisOrganizer.cv2') as mocked_cv2:\n # break_his_organizer = BreakHisOrganizer('.', None, None, None, None, None)\n # break_his_organizer.save_dataset()\n # mocked_cv2.imwrite.assert_called_once()\n \n # def test_if_Reinhard_is_injected_correctly(self):\n # with mock.patch('ReinhardNormalizer.ReinhardNormalizer') as mocked_ReinhardNormalizer:\n # break_his_organizer = BreakHisOrganizer('.', mocked_ReinhardNormalizer, None, None, None, None)\n # break_his_organizer.save_dataset()\n # mocked_ReinhardNormalizer.fit.assert_called_once()\n \n \n \n\nif __name__=='__main__':\n unittest.main()","sub_path":"Malignancy_binary_classification/breakhis_data_preparator/BreakHisOrganizer/BreakHisOrganizer_test.py","file_name":"BreakHisOrganizer_test.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"6200452","text":"import os\nimport numpy as np\n\nenv_MEDIA_DIR = None\nMEDIA_DIR = \"#ERROR#\"\n\ntry:\n env_MEDIA_DIR = os.getenv(\"MEDIA_DIR\")\nexcept NameError:\n try:\n env_MEDIA_DIR = os.environ['MEDIA_DIR']\n except KeyError:\n pass\n\nif not (env_MEDIA_DIR is None):\n MEDIA_DIR = env_MEDIA_DIR\nelif os.path.exists(\"media_dir.txt\"):\n with open(\"media_dir.txt\", 'rU') as media_file:\n MEDIA_DIR = media_file.readline().strip()\nelse:\n MEDIA_DIR = os.path.join(\n os.path.expanduser('~'),\n \"animation/manim37/output\"\n )\n\nif not os.path.exists(MEDIA_DIR):\n raise Exception(\"\"\"\n Redefine MEDIA_DIR by changing the MEDIA_DIR\n environment constant or by changing\n media_dir.txt to point to a valid directory\n where movies and images will be written\n \"\"\")\n\nwith open(\"media_dir.txt\", 'w') as media_file:\n media_file.write(MEDIA_DIR)\n#\n\nLOW_QUALITY_FRAME_DURATION = 1. / 15\nMEDIUM_QUALITY_FRAME_DURATION = 1. / 30\nPRODUCTION_QUALITY_FRAME_DURATION = 1. / 60\n\n# There might be other configuration than pixel shape later...\nPRODUCTION_QUALITY_CAMERA_CONFIG = {\n \"pixel_height\": 1440,\n \"pixel_width\": 2560,\n}\n\nHIGH_QUALITY_CAMERA_CONFIG = {\n \"pixel_height\": 1080,\n \"pixel_width\": 1920,\n}\n\nMEDIUM_QUALITY_CAMERA_CONFIG = {\n \"pixel_height\": 720,\n \"pixel_width\": 1280,\n}\n\nLOW_QUALITY_CAMERA_CONFIG = {\n \"pixel_height\": 480,\n \"pixel_width\": 854,\n}\n\nDEFAULT_PIXEL_HEIGHT = PRODUCTION_QUALITY_CAMERA_CONFIG[\"pixel_height\"]\nDEFAULT_PIXEL_WIDTH = PRODUCTION_QUALITY_CAMERA_CONFIG[\"pixel_width\"]\n\nDEFAULT_POINT_DENSITY_2D = 25\nDEFAULT_POINT_DENSITY_1D = 250\n\nDEFAULT_STROKE_WIDTH = 4\n\nFRAME_HEIGHT = 8.0\nFRAME_WIDTH = FRAME_HEIGHT * DEFAULT_PIXEL_WIDTH / DEFAULT_PIXEL_HEIGHT\nFRAME_Y_RADIUS = FRAME_HEIGHT / 2\nFRAME_X_RADIUS = FRAME_WIDTH / 2\n\nSMALL_BUFF = 0.1\nMED_SMALL_BUFF = 0.25\nMED_LARGE_BUFF = 0.5\nLARGE_BUFF = 1\n\nDEFAULT_MOBJECT_TO_EDGE_BUFFER = MED_LARGE_BUFF\nDEFAULT_MOBJECT_TO_MOBJECT_BUFFER = MED_SMALL_BUFF\n\n\n# All in seconds\nDEFAULT_ANIMATION_RUN_TIME = 1.0\nDEFAULT_POINTWISE_FUNCTION_RUN_TIME = 3.0\nDEFAULT_WAIT_TIME = 1.0\n\n\nORIGIN = np.array((0., 0., 0.))\nUP = np.array((0., 1., 0.))\nDOWN = np.array((0., -1., 0.))\nRIGHT = np.array((1., 0., 0.))\nLEFT = np.array((-1., 0., 0.))\nIN = np.array((0., 0., -1.))\nOUT = np.array((0., 0., 1.))\nX_AXIS = np.array((1., 0., 0.))\nY_AXIS = np.array((0., 1., 0.))\nZ_AXIS = np.array((0., 0., 1.))\n\n# Useful abbreviations for diagonals\nUL = UP + LEFT\nUR = UP + RIGHT\nDL = DOWN + LEFT\nDR = DOWN + RIGHT\n\nTOP = FRAME_Y_RADIUS * UP\nBOTTOM = FRAME_Y_RADIUS * DOWN\nLEFT_SIDE = FRAME_X_RADIUS * LEFT\nRIGHT_SIDE = FRAME_X_RADIUS * RIGHT\n\nPI = np.pi\nTAU = 2 * PI\nDEGREES = TAU / 360\n\nANIMATIONS_DIR = os.path.join(MEDIA_DIR, \"animations\")\nRASTER_IMAGE_DIR = os.path.join(MEDIA_DIR, \"designs\", \"raster_images\")\nSVG_IMAGE_DIR = os.path.join(MEDIA_DIR, \"designs\", \"svg_images\")\n# TODO, staged scenes should really go into a subdirectory of a given scenes directory\nSTAGED_SCENES_DIR = os.path.join(ANIMATIONS_DIR, \"staged_scenes\")\n###\nTHIS_DIR = os.path.dirname(os.path.realpath(__file__))\nFILE_DIR = os.path.join(THIS_DIR, \"files\")\nTEX_DIR = os.path.join(FILE_DIR, \"Tex\")\nTEX_IMAGE_DIR = TEX_DIR # TODO, What is this doing?\n# These two may be depricated now.\nMOBJECT_DIR = os.path.join(FILE_DIR, \"mobjects\")\nIMAGE_MOBJECT_DIR = os.path.join(MOBJECT_DIR, \"image\")\n\nfor folder in [FILE_DIR, RASTER_IMAGE_DIR, SVG_IMAGE_DIR, ANIMATIONS_DIR, TEX_DIR,\n TEX_IMAGE_DIR, MOBJECT_DIR, IMAGE_MOBJECT_DIR,\n STAGED_SCENES_DIR]:\n if not os.path.exists(folder):\n os.makedirs(folder)\n\nTEX_TEXT_TO_REPLACE = \"YourTextHere\"\nTEMPLATE_TEX_FILE = os.path.join(THIS_DIR, \"tex_template.tex\")\nwith open(TEMPLATE_TEX_FILE, \"r\") as infile:\n TEMPLATE_TEXT_FILE_BODY = infile.read()\n TEMPLATE_TEX_FILE_BODY = TEMPLATE_TEXT_FILE_BODY.replace(\n TEX_TEXT_TO_REPLACE,\n \"\\\\begin{align*}\" + TEX_TEXT_TO_REPLACE + \"\\\\end{align*}\",\n )\n\nFFMPEG_BIN = \"ffmpeg\"\n\n\n# Colors\n\nCOLOR_MAP = {\n \"DARK_BLUE\": \"#236B8E\",\n \"DARK_BROWN\": \"#8B4513\",\n \"LIGHT_BROWN\": \"#CD853F\",\n \"BLUE_E\": \"#1C758A\",\n \"BLUE_D\": \"#29ABCA\",\n \"BLUE_C\": \"#58C4DD\",\n \"BLUE_B\": \"#9CDCEB\",\n \"BLUE_A\": \"#C7E9F1\",\n \"TEAL_E\": \"#49A88F\",\n \"TEAL_D\": \"#55C1A7\",\n \"TEAL_C\": \"#5CD0B3\",\n \"TEAL_B\": \"#76DDC0\",\n \"TEAL_A\": \"#ACEAD7\",\n \"GREEN_E\": \"#699C52\",\n \"GREEN_D\": \"#77B05D\",\n \"GREEN_C\": \"#83C167\",\n \"GREEN_B\": \"#A6CF8C\",\n \"GREEN_A\": \"#C9E2AE\",\n \"YELLOW_E\": \"#E8C11C\",\n \"YELLOW_D\": \"#F4D345\",\n \"YELLOW_C\": \"#FFFF00\",\n \"YELLOW_B\": \"#FFEA94\",\n \"YELLOW_A\": \"#FFF1B6\",\n \"GOLD_E\": \"#C78D46\",\n \"GOLD_D\": \"#E1A158\",\n \"GOLD_C\": \"#F0AC5F\",\n \"GOLD_B\": \"#F9B775\",\n \"GOLD_A\": \"#F7C797\",\n \"RED_E\": \"#CF5044\",\n \"RED_D\": \"#E65A4C\",\n \"RED_C\": \"#FC6255\",\n \"RED_B\": \"#FF8080\",\n \"RED_A\": \"#F7A1A3\",\n \"MAROON_E\": \"#94424F\",\n \"MAROON_D\": \"#A24D61\",\n \"MAROON_C\": \"#C55F73\",\n \"MAROON_B\": \"#EC92AB\",\n \"MAROON_A\": \"#ECABC1\",\n \"PURPLE_E\": \"#644172\",\n \"PURPLE_D\": \"#715582\",\n \"PURPLE_C\": \"#9A72AC\",\n \"PURPLE_B\": \"#B189C6\",\n \"PURPLE_A\": \"#CAA3E8\",\n \"WHITE\": \"#FFFFFF\",\n \"BLACK\": \"#000000\",\n \"LIGHT_GRAY\": \"#BBBBBB\",\n \"LIGHT_GREY\": \"#BBBBBB\",\n \"GRAY\": \"#888888\",\n \"GREY\": \"#888888\",\n \"DARK_GREY\": \"#444444\",\n \"DARK_GRAY\": \"#444444\",\n \"GREY_BROWN\": \"#736357\",\n \"PINK\": \"#D147BD\",\n \"GREEN_SCREEN\": \"#00FF00\",\n \"ORANGE\": \"#FF862F\",\n\n \"POPPING_TEAL\": \"#00FFFF\",\n\n \"SKINCOLOR_A\": \"#FAEBD7\",\n \"SKINCOLOR_B\": \"#FFE4C4\",\n \"SKINCOLOR_C\": \"#FFEBCD\",\n \"SKINCOLOR_D\": \"#DEB887\",\n \"SKINCOLOR_E\": \"#CD853F\",\n\n \"RED_A\": \"#FF0000\",\n \"RED_B\": \"#FF9999\",\n \"RED_C\": \"#FF0066\",\n\n \"PINK_A\": \"#DC143C\",\n \"PINK_B\": \"#F08080\",\n \"PINK_C\": \"#FFB6C1\",\n \"PINK_D\": \"#FFE4E1\",\n \"PINK_E\": \"#FFCCCC\",\n \"PINK_F\": \"#FFC0CB\",\n \"PINK_G\": \"#DB7093\",\n\n \"ORANGE_FALL\": \"#FF8000\",\n \"ORANGE_A\": \"#FF9900\",\n \"ORANGE_B\": \"#FFCC80\",\n \"ORANGE_C\": \"#FFEBCC\",\n\n \"GREEN_LIME_A\": \"#AAFF00\",\n \"GREEN_LIME_B\": \"#CCFF33\",\n \"GREEN_SHERBET\": \"#DDFF99\",\n \"GREEN_PASTEL\": \"#8CD9B3\",\n\n \"BLUE_A\": \"#0080FF\",\n \"BLUE_B\": \"#66B3FF\",\n \"BLUE_C\": \"#CCE6FF\",\n \"BLUE_NAVY\": \"#4080BF\",\n \"BLUE_SKY\": \"#66CCFF\",\n \"BLUE_CLOUD\": \"#9FBFDF\",\n \"BLUE_CLOUD_LIGHT\": \"#D9E6F2\",\n \"BLUE_NEON\": \"#80FFFF\",\n\n \"TEAL_A\": \"#33CCCC\",\n \"TEAL_B\": \"#ADEBEB\",\n\n \"PURPLE\": \"#993366\",\n \"PURPLE_LIGHT_A\": \"#BF4080\",\n \"PURPLE_LIGHT_B\": \"#E6B3CC\",\n \"PURPLE_LAVENDER\": \"#CC99FF\",\n \"MEDIUM_ORCHID\": \"#BA55D3\",\n\n \"GRAY_DARK\": \"#808080\",\n \"GRAY_LIGHT\": \"#A9A9A9\",\n \"GRAY_LIGHTER\": \"#DCDCDC\",\n \"GRAY_DARK_CLOUD\": \"#708090\",\n\n \"BROWN_MAHAGONY\": \"#6C463D\",\n \"BROWN_DARK_MAHAGONY\": \"#50322B\",\n\n \"HOT_PINK\": \"#FF69B4\",\n \"DEEP_PINK\": \"\t#FF1493\",\n \"DARK_MAGENTA\": \"#8B008B\",\n \"DARK_ORANGE\": \"#FF8C00\",\n \"GOLD\": \"#FFD700\",\n \"YELLOW_GREEN\": \"#9ACD32\",\n \"LIGHT_SEA_GREEN\": \"#20B2AA\",\n \"STEEL_BLUE\": \"#4682B4\",\n \"LIGHT_SKY_BLUE\": \"#87CEFA\",\n \"CORN_FLOWER_BLUE\": \"#6495ED\",\n \"DEEP_SKY_BLUE\": \"#00BFFF\",\n \"DODGER_BLUE\": \"#1E90FF\",\n \"CORNSILK\": \"#FFF8DC\",\n \"ANTIQUE_WHITE\": \"#FAEBD7\",\n \"YELLOW\": \"#FFFF00\",\n \"KHAKI\": \"#F0E68C\",\n \"LEMON_CHIFFON\": \"#FFFACD\",\n \"LIGHT_YELLOW\": \"#FFFFE0\",\n\n\n\n \"Living_Room_Floor_1\": \"#F0F0F0\",\n\n\n \"Living_Room_Wall_2\": \"#F5F5F5\",\n \"Living_Room_Wall_1\": \"#E8E8E8\",\n\n \"Rug_1\": \"#c0c0c0\",\n \"Rug_2\": \"#808080\",\n\n \"Table_1\": \"#86592d\",\n \"Table_2\": \"#392714\",\n \"TV_Table_1\": \"#86592d\",\n \"TV_Table_2\": \"#392714\",\n\n \"DOOR1\": \"#AFEEEE\",\n \"DOOR2\": \"#48D1CC\"\n\n}\nPALETTE = list(COLOR_MAP.values())\nlocals().update(COLOR_MAP)\nfor name in [s for s in list(COLOR_MAP.keys()) if s.endswith(\"_C\")]:\n locals()[name.replace(\"_C\", \"\")] = locals()[name]\n","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":7795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"432096936","text":"from opengever.base.schemadump.config import SCHEMA_DUMPS_DIR\nfrom opengever.base.schemadump.schema import build_all_bundle_schemas\nfrom opengever.base.schemadump.schema import build_all_gever_schemas\nfrom opengever.testing import IntegrationTestCase\nfrom os.path import join as pjoin\nfrom os.path import normpath\nfrom pkg_resources import resource_filename\nimport json\n\n\nclass TestCheckedInSchemaDumpsAreUpToDate(IntegrationTestCase):\n\n maxDiff = None\n\n @property\n def og_core_package_path(self):\n return normpath(pjoin(resource_filename('opengever.core', ''), '..', '..')) # noqa\n\n @property\n def schema_dumps_dir(self):\n return pjoin(self.og_core_package_path, SCHEMA_DUMPS_DIR)\n\n @property\n def oggbundle_schema_dumps_dir(self):\n return pjoin(resource_filename('opengever.bundle', 'schemas/'))\n\n def test_schema_dumps_for_api(self):\n for filename, current_schema in build_all_gever_schemas():\n dump_path = pjoin(self.schema_dumps_dir, filename)\n\n with open(dump_path) as dump_file:\n existing_schema = json.load(dump_file)\n\n # Shove schema through dump / load in order to get rid of\n # OrderedDicts and get better diffability\n current_schema = json.loads(json.dumps(current_schema.serialize()))\n\n self.assertDictEqual(\n existing_schema,\n current_schema,\n '\\n\\nError: JSON schema dumps for %s have changed '\n '(see diff above), please run bin/instance dump_schemas and '\n 'commit the modified schema files together with '\n 'your changes.' % dump_path)\n\n def test_schema_dumps_for_oggbundles(self):\n for filename, current_schema in build_all_bundle_schemas():\n dump_path = pjoin(self.oggbundle_schema_dumps_dir, filename)\n\n with open(dump_path) as dump_file:\n existing_schema = json.load(dump_file)\n\n self.assertDictEqual(\n existing_schema,\n current_schema.serialize(),\n '\\n\\nError: JSON schema dumps for %s have changed '\n '(see diff above), please run bin/instance dump_schemas and '\n 'commit the modified schema files together with '\n 'your changes.' % dump_path)\n","sub_path":"opengever/base/schemadump/tests/test_schemadumps.py","file_name":"test_schemadumps.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"126648721","text":"import os\nfrom obspy import read, UTCDateTime\nimport subprocess\nos.putenv(\"SAC_DISPLAY_COPYRIGHT\", '0')\n\ndef cut(fpath, b, e, outpath, fillz=False):\n p = subprocess.Popen(['sac'], stdin=subprocess.PIPE)\n s = \"wild echo off \\n\"\n if fillz: s += \"cuterr fillz \\n\"\n s += \"cut %s %s \\n\" %(b, e)\n s += \"r %s \\n\" %(fpath)\n s += \"ch allt (0-&1,b&) iztype IB \\n\"\n s += \"w %s \\n\" %(outpath)\n s += \"q \\n\"\n p.communicate(s.encode())\n\n\ndef obspy_trim(stream, t0, t1, zfill=False):\n if not zfill: st = stream.copy().trim(t0, t1)\n if zfill: st = stream.copy().trim(t0, t1, pad=True, fill_value=0)\n for tr in st:\n tr.stats.sac.nzyear = t0.year\n tr.stats.sac.nzjday = t0.julday\n tr.stats.sac.nzhour = t0.hour\n tr.stats.sac.nzmin = t0.minute\n tr.stats.sac.nzsec = t0.day\n tr.stats.sac.nzmsec = t0.microsecond / 1e3\n return st\n\n\ndef obspy_slice(stream, t0, t1):\n st = stream.slice(t0, t1)\n for tr in st:\n tr.stats.sac.nzyear = t0.year\n tr.stats.sac.nzjday = t0.julday\n tr.stats.sac.nzhour = t0.hour\n tr.stats.sac.nzmin = t0.minute\n tr.stats.sac.nzsec = t0.day\n tr.stats.sac.nzmsec = t0.microsecond / 1e3\n return st\n\n\ndef merge(fpaths, out_path):\n num_files = len(fpaths)\n if num_files==0: return\n if num_files==1: os.rename(fpaths[0], out_path); return\n p = subprocess.Popen(['sac'], stdin=subprocess.PIPE)\n s = \"wild echo off \\n\"\n print('merge sac files to {}'.format(out_path))\n if num_files<1000:\n for i,fpath in enumerate(fpaths):\n if i==0: s += \"r %s \\n\" %(fpath)\n else: s += \"r more %s \\n\" %(fpath)\n s += \"merge g z o a \\n\"\n s += \"w %s \\n\" %(out_path)\n else:\n os.rename(fpaths[0], 'tmp.sac')\n num_batch = 1 + (num_files-2)//999\n for idx in range(num_batch):\n # read one batch (1000 files)\n s += \"r tmp.sac \\n\"\n for fpath in fpaths[1+idx*999:1+(idx+1)*999]:\n s += \"r more %s \\n\" %(fpath)\n # merge batch to tmp sac\n s += \"merge g z o a \\n\"\n s += \"w tmp.sac \\n\"\n os.rename('tmp.sac', out_path)\n s += \"q \\n\"\n p.communicate(s.encode())\n\n\ndef ch_sta(fpath, knetwk=None, kstnm=None, kcmpnm=None, stlo=0, stla=0, stel=0):\n p = subprocess.Popen(['sac'], stdin=subprocess.PIPE)\n s = \"wild echo off \\n\"\n print('change station header for {}: {},{},{},{},{},{}'\\\n .format(fpath, knetwk, kstnm, kcmpnm, stlo, stla, stel))\n s += \"rh %s \\n\" %(fpath)\n s += \"ch stlo %s stla %s \\n\" %(stlo, stla)\n s += \"ch stel %s \\n\" %(stel)\n if knetwk: s += \"ch knetwk %s \\n\" %(knetwk)\n if kstnm: s += \"ch kstnm %s \\n\" %(kstnm)\n if kcmpnm: s += \"ch kcmpnm %s \\n\" %(kcmpnm)\n s += \"wh \\n\"\n s += \"q \\n\"\n p.communicate(s.encode())\n\n\ndef ch_event(fpath, evla, evlo, evdp, mag, tn=[]):\n p = subprocess.Popen(['sac'], stdin=subprocess.PIPE)\n s = \"wild echo off \\n\"\n s += \"rh %s \\n\" %(fpath)\n s += \"ch evlo %s evla %s \\n\" %(evlo, evla)\n s += \"ch evdp %s \\n\" %(evdp)\n s += \"ch mag %s \\n\" %(mag)\n for i,ti in enumerate(tn):\n s += \"ch t%s %s \\n\" %(i,ti)\n s += \"wh \\n\"\n s += \"q \\n\"\n p.communicate(s.encode())\n\n\ndef seed2sac(fpath, out_dir=None):\n if out_dir: subprocess.call(['rdseed', '-dfq', fpath, out_dir])\n else: subprocess.call(['rdseed', '-df', fpath])\n\n\ndef mseed2sac(fpath):\n subprocess.call(['mseed2sac', '-O', fpath])\n","sub_path":"sac.py","file_name":"sac.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"216218635","text":"# Module to create standard periods from years or other period terms: it requires four inputs:\n# the name of the HER, and the lists of periods, start and end dates. These should be python\n# list objects.\n\ndef lookup(her_name, period_list, start_list, end_list):\n\t\n\toutlist = []\n\t\n\t# First, we need to define our break points (for the specific HER if it uses 700 BC for the BA to IA transition).\n\t# There actually seem to be rather a lot of them...\n\t\n\ther_set = set(['Devon','Greater Manchester','Central Bedfordshire and Luton','Birmingham','Coventry','Lancashire','NorthLincolnshire','NorthYorkshire','Sandwell','Winchester','Southampton','WestBerkshire','Isle of Wight'])\n\tif her_name in her_set:\n\t\tBA_to_IA = -700\n\telse:\n\t\tBA_to_IA = -800\n\tBA_end = -700\n\tIA_start = -800\n\tIA_to_RO = 43\n\tRO_to_EM = 410\n\tEM_end = 1066\n\t\t\t\n\t# Next, we define the sets of period terms that might relate to our six periods (lower case).\n\t# The ones with closing but no opening brackets aren't a mistake, the brackets don't seem to be opened in the HER data (Wiltshire & Swindon).\n\tPR_terms = set(['prehistoric (unknown)','prehistoric','later prehistoric','late prehistoric'])\n\tBA_terms = set(['prehistoric (bronze age)','prehistoric (late bronze age)','prehistoric (middle bronze age)','bronze age','early bronze age','middle bronze age','late bronze age','earlier bronze age','later bronze age'])\n\tIA_terms = set(['prehistoric (early iron age)','prehistoric (iron age)','prehistoric (late iron age)','iron age','lpria','early iron age','middle iron age','late iron age','earlier iron age','later iron age','ia','mia'])\n\tRO_terms = set(['roman (antonine)','roman (aurelian)','roman (claudian)','roman (flavian)','roman (gordian)','roman (hadrianic)','roman (marcus aurelius)','roman (nero)','roman (romano british)','roman (severan)','roman (tiberius)','roman (trajanic)','roman (unknown)','roman (valens)','roman','c1','c2','c3','c4','1st century','2nd century','3rd century','4th century','early roman','late roman','earlier roman','later roman','ro'])\n\tEM_terms = set(['early medieval (saxon/anglian)','early medieval (scandinavian)','early medieval (unknown)','early medieval','saxon','anglian','viking','post-roman','sub-roman','anglo-saxon','dark age','dark ages','earlier medieval','c5','c6','c7','c8','c9','c10','c11','ls','ems'])\n\tUN_terms = set(['unknown (unknown)','uncertain','unknown','unknown date'])\n\t\n\t# if there is data in the period list we will use that (if start list is empty)_:\n\tif len(period_list) > 0 and len(start_list) == 0:\n\t\tfor x in period_list:\n\t\t\tcurrent = x.lower()\n\t\t\tif current in PR_terms:\n\t\t\t\toutlist.append('Prehistoric')\n\t\t\tif current in BA_terms:\n\t\t\t\toutlist.append('Bronze Age')\n\t\t\tif current in IA_terms:\n\t\t\t\toutlist.append('Iron Age')\n\t\t\tif current in RO_terms:\n\t\t\t\toutlist.append('Roman')\n\t\t\tif current in EM_terms:\n\t\t\t\toutlist.append('Early medieval')\n\t\t\tif current in UN_terms:\n\t\t\t\toutlist.append('Uncertain')\n\t\tif len(outlist) == 0: # If the list is still empty, it must be a bad date...\n\t\t\toutlist.append('BAD DATE!')\n\t# if there is no data in the period list, we will use the start and end dates\n\t# this assumes that they are ordered in order of association:\n\telse:\n\t\txnum = 0\n\t\tfor x in start_list:\n\t\t\tstart = x\n\t\t\tend = end_list[xnum]\n\t\t\tif start == None or start == 'None' or '\\n' in start or start == '' or end == '' or end == None or end == 'None':\n\t\t\t\toutlist.append('Uncertain')\n\t\t\telif start[-1:].isdigit() == True and end[-1:].isdigit() == True: # Otherwise turn it into an integer (testing it is a digit first)\n\t\t\t\tstart = int(start)\n\t\t\t\tend = int(end)\n\t\t\t\tif end < start: # If the end date is lower than the start date, they must be a bad date...\n\t\t\t\t\toutlist.append('BAD DATE!')\n\t\t\t\telse:\n\t\t\t\t\tif start == 0 and end == 0:\n\t\t\t\t\t\toutlist.append('Uncertain')\n\t\t\t\t\telif start < -3000 and end > EM_end:\n\t\t\t\t\t\toutlist.append('Uncertain')\n\t\t\t\t\telif end <= BA_end and start >= -3000: # 3000 BC is actually before the Bronze Age, but I just want to separate out all of those entries that go back to -500000 or something\n\t\t\t\t\t\toutlist.append('Bronze Age')\n\t\t\t\t\telif start >= EM_end or (start == 0 and end >= EM_end):\n\t\t\t\t\t\toutlist.append('BAD DATE!')\n\t\t\t\t\telif end <= BA_to_IA and start < -3000:\n\t\t\t\t\t\toutlist.append('Prehistoric')\n\t\t\t\t\telif start < BA_to_IA and end <= IA_to_RO:\n\t\t\t\t\t\toutlist.append('Prehistoric')\n\t\t\t\t\telif start >= IA_start and end <= IA_to_RO:\n\t\t\t\t\t\toutlist.append('Iron Age')\n\t\t\t\t\telif start >= IA_to_RO and end <= RO_to_EM:\n\t\t\t\t\t\toutlist.append('Roman')\n\t\t\t\t\telif start >= RO_to_EM:\n\t\t\t\t\t\toutlist.append('Early medieval')\n\t\t\t\t\telif start < BA_to_IA and end <= RO_to_EM:\n\t\t\t\t\t\toutlist.append('Prehistoric; Roman')\n\t\t\t\t\telif start < IA_to_RO and end <= RO_to_EM:\n\t\t\t\t\t\toutlist.append('Iron Age; Roman')\n\t\t\t\t\telif start >= IA_to_RO and end > RO_to_EM:\n\t\t\t\t\t\toutlist.append('Roman; Early medieval')\n\t\t\t\t\telif start < BA_to_IA and end > RO_to_EM:\n\t\t\t\t\t\toutlist.append('Prehistoric; Roman; Early medieval')\n\t\t\t\t\telif start < IA_to_RO and end > RO_to_EM:\n\t\t\t\t\t\toutlist.append('Iron Age; Roman; Early medieval')\n\t\t\t\t\telse:\n\t\t\t\t\t\toutlist.append('Uncertain')\n\t\t\telse: # Otherwise assume it is a bad date...\n\t\t\t\toutlist.append('BAD DATE!')\n\t\t\txnum += 1\n\t\n\treturn outlist","sub_path":"period_lookup.py","file_name":"period_lookup.py","file_ext":"py","file_size_in_byte":5221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"407799420","text":"import requests\r\nimport json\r\nimport math\r\n\r\n# reference : https://github.com/Rodantny/Rate-My-Professor-Scraper-and-Search/blob/master/RMPClass.py#L24\r\n\r\ndef getProfList(uni_id):\r\n # get number of professors in the university\r\n l = []\r\n page = requests.get(\r\n \"http://www.ratemyprofessors.com/filter/professor/?&page=1&filter=teacherlastname_sort_s+asc&query=*%3A*&queryoption=TEACHER&queryBy=schoolId&sid=\" + str(\r\n uni_id)) # get request for page\r\n temp_jsonpage = json.loads(page.content)\r\n num_of_prof = temp_jsonpage['remaining'] + 20 # get the number of professors \r\n \r\n \r\n num_of_pages = math.ceil(num_of_prof / 20)\r\n i = 1\r\n while (i <= num_of_pages):# the loop insert all professor into list\r\n page = requests.get(\"http://www.ratemyprofessors.com/filter/professor/?&page=\" + str(\r\n i) + \"&filter=teacherlastname_sort_s+asc&query=*%3A*&queryoption=TEACHER&queryBy=schoolId&sid=\" + str(\r\n uni_id))\r\n temp_jsonpage = json.loads(page.content)\r\n temp_list = temp_jsonpage['professors']\r\n l.extend(temp_list)\r\n i += 1\r\n return l\r\n\r\nmy_list = getProfList(1407)\r\n# write professor list to json file\r\nwith open('data.json', 'w') as outfile:\r\n json.dump(my_list, outfile)\r\n \r\n# get list of departments \r\ndep_req = requests.get('https://www.ratemyprofessors.com/teacher/getDepartmentListFromSchool?sid=1407')\r\njson_dep = json.loads(dep_req.content)\r\n \r\n# calcualate average score for each department \r\ndep_Dict = {} \r\nfor dep_name in json_dep['departments']:\r\n total = 0\r\n count = 0\r\n for i in my_list:\r\n if i['tDept'] == dep_name['name']: \r\n if i['overall_rating'] != 'N/A': \r\n total += float(i['overall_rating'])\r\n count += 1 \r\n if count == 0:\r\n continue\r\n dep_avg_score = total / count\r\n dep_Dict[dep_name['name']] = dep_avg_score\r\n \r\n# simple visualization in matplotlib \r\nimport matplotlib.pyplot as plt\r\nD = dep_Dict\r\nplt.bar(range(len(D)), list(D.values()), align='center')\r\nplt.xticks(range(len(D)), list(D.keys()), rotation=80)","sub_path":"getProfList.py","file_name":"getProfList.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"37550986","text":"north = '⍓'\nsouth = '⍌'\neast = '⍄'\nwest = '⍃'\n\ndef render_car(dir):\n if(dir == 'N'):\n return north\n elif(dir == 'S'):\n return south\n elif(dir == 'E'):\n return east\n elif(dir == 'W'):\n return west\n else:\n return north\n\n\ndef render_grid(car):\n result = ''\n (c_x, c_y, c_d) = car\n til = range(0,10)\n for y in til:\n row = str(y)\n for x in til:\n if(c_x == x and c_y == y):\n cell = render_car(c_d)\n else:\n cell = '_'\n row = row + ' ' + cell \n if(y == 0):\n result = result + ' '+' '.join(map(str,list(til))) \n result = result +'\\n'+row\n row = ''\n return result\n","sub_path":"render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"321656942","text":"import os\nimport cv2\nimport json\n\ndef odgt(img_path):\n seg_path = img_path.replace('images','annotations')\n seg_path = seg_path.replace('.jpg','.png')\n \n if os.path.exists(seg_path):\n img = cv2.imread(img_path)\n h, w, _ = img.shape\n\n odgt_dic = {}\n odgt_dic[\"fpath_img\"] = img_path\n odgt_dic[\"fpath_segm\"] = seg_path\n odgt_dic[\"width\"] = h\n odgt_dic[\"height\"] = w\n return odgt_dic\n else:\n # print('the corresponded annotation does not exist')\n # print(img_path)\n return None\n\n\nif __name__ == \"__main__\":\n modes = ['train','val']\n saves = ['metal_training.odgt', 'metal_validation.odgt']\n\n for i, mode in enumerate(modes):\n save = saves[i]\n dir_path = f\"/home/rico-li/Job/豐興鋼鐵/data/clean_data_20frames/U100/images/{mode}\"\n img_list = os.listdir(dir_path)\n img_list.sort()\n img_list = [os.path.join(dir_path, img) for img in img_list]\n\n with open(f'/home/rico-li/Job/豐興鋼鐵/semantic-segmentation-pytorch/data/{save}', mode='wt', encoding='utf-8') as myodgt:\n for i, img in enumerate(img_list):\n a_odgt = odgt(img)\n if a_odgt is not None:\n myodgt.write(f'{json.dumps(a_odgt)}\\n')\n ","sub_path":"odgt_maker.py","file_name":"odgt_maker.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"612990762","text":"import os\nimport pickle\nimport re\nfrom collections import Counter\nimport numpy as np\n\ndef load_data(path, encoding='utf-8'):\n\n input_file = os.path.join(path)\n with open(input_file, \"r\", encoding=encoding) as f:\n data = f.read()\n return data\n\ndef clean_str(string):\n\n string = re.sub(r\"[^가-힣A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()\n\ndef create_lookup_tables(text):\n\n vocab = set(text)\n vocab_to_int = {c : i for i, c in enumerate(vocab)}\n int_to_vocab = dict(enumerate(vocab))\n return vocab_to_int, int_to_vocab\n\ndef preprocess_and_save_data(dataset_path, pickle_path, create_lookup_tables, encoding='utf-8'):\n\n text = load_data(dataset_path, encoding)\n #text = clean_str(text)\n vocab_to_int, int_to_vocab = create_lookup_tables(text)\n int_text = np.array([vocab_to_int[c] for c in text], dtype=np.int32)\n print(len(int_text))\n with open(pickle_path, 'wb') as f:\n pickle.dump((int_text, vocab_to_int, int_to_vocab), f)\n\n\ndef load_preprocess(pickle_path):\n\n with open(pickle_path, mode='rb') as f:\n return pickle.load(f)\n\ndef explore_data(text):\n\n view_sentence_range = (0, 50)\n print('Dataset Stats\\n')\n print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))\n scenes = text.split('\\n\\n\\n')\n print('Number of scenes: {}'.format(len(scenes)))\n sentence_count_scene = [scene.count('\\n') for scene in scenes]\n print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))\n\n sentences = [sentence for scene in scenes for sentence in scene.split('\\n')]\n print('Number of lines: {}'.format(len(sentences)))\n word_count_sentence = [len(sentence.split()) for sentence in sentences]\n print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))\n print()\n print('The sentences {} to {}:'.format(*view_sentence_range))\n print('\\n'.join(text.split('\\n')[view_sentence_range[0]:view_sentence_range[1]]))\n\ndef next_batch(arr, batch_size, seq_length):\n\n no_batches = len(arr) // (batch_size * seq_length)\n arr = arr[:no_batches * batch_size * seq_length]\n arr = arr.reshape((batch_size,-1))\n\n for i in range(0, arr.shape[1], seq_length):\n x = arr[:, i:i+seq_length]\n y = np.zeros_like(x)\n y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]\n yield x, y\n\n\nif __name__ == '__main__':\n\n encoding = \"ISO-8859-1\"#'utf-8'\n script = input('Enter Name of script to preprocess: ')\n path = './data/' + script + '.txt'\n text = load_data(path, encoding)\n explore_data(text)\n pickle_path = './model/' + script + '/' + script + '_preprocess.p'\n preprocess_and_save_data(path, pickle_path, create_lookup_tables, encoding)","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"82691846","text":"# pylint: disable=redefined-outer-name\n# pylint: disable=unused-argument\n# pylint: disable=unused-variable\n\nimport logging\nimport subprocess\nfrom pprint import pformat\nfrom typing import Any, Dict, List\n\nimport pytest\nfrom docker import DockerClient\nfrom docker.models.services import Service\nfrom tenacity import Retrying, before_log, stop_after_attempt, wait_fixed\n\npytest_plugins = [\n \"pytest_simcore.docker_compose\",\n \"pytest_simcore.docker_registry\",\n \"pytest_simcore.docker_swarm\",\n \"pytest_simcore.minio_service\",\n \"pytest_simcore.postgres_service\",\n \"pytest_simcore.rabbit_service\",\n \"pytest_simcore.repository_paths\",\n \"pytest_simcore.simcore_webserver_service\",\n \"pytest_simcore.tmp_path_extra\",\n \"pytest_simcore.traefik_service\",\n]\nlog = logging.getLogger(__name__)\n\n\n# CORE stack\n\n\n@pytest.fixture(scope=\"module\")\ndef core_services_selection(simcore_docker_compose: Dict) -> List[str]:\n ## OVERRIDES packages/pytest-simcore/src/pytest_simcore/docker_compose.py::core_services_selection\n # select ALL services for these tests\n return list(simcore_docker_compose[\"services\"].keys())\n\n\n@pytest.fixture(scope=\"module\")\ndef core_stack_name(docker_stack: Dict) -> str:\n return docker_stack[\"stacks\"][\"core\"][\"name\"]\n\n\n@pytest.fixture(scope=\"module\")\ndef core_stack_compose(\n docker_stack: Dict, simcore_docker_compose: Dict\n) -> Dict[str, Any]:\n # verifies core_services_selection\n assert set(docker_stack[\"stacks\"][\"core\"][\"compose\"][\"services\"]) == set(\n simcore_docker_compose[\"services\"]\n )\n return docker_stack[\"stacks\"][\"core\"][\"compose\"]\n\n\n# OPS stack\n\n\n@pytest.fixture(scope=\"module\")\ndef ops_services_selection(ops_docker_compose: Dict) -> List[str]:\n # select ALL services for these tests\n return list(ops_docker_compose[\"services\"].keys())\n\n\n@pytest.fixture(scope=\"module\")\ndef ops_stack_name(docker_stack: Dict) -> str:\n return docker_stack[\"stacks\"][\"ops\"][\"name\"]\n\n\n@pytest.fixture(scope=\"module\")\ndef ops_stack_compose(docker_stack: Dict, ops_docker_compose: Dict):\n # verifies ops_services_selection\n assert set(docker_stack[\"stacks\"][\"ops\"][\"compose\"][\"services\"]) == set(\n ops_docker_compose[\"services\"]\n )\n return docker_stack[\"stacks\"][\"core\"][\"compose\"]\n\n\n# time measured from command 'up' finished until *all* tasks are running\nMAX_TIME_TO_DEPLOY_SECS = 60\n\n\n@pytest.fixture(scope=\"module\")\ndef deployed_simcore_stack(\n core_stack_name: str, core_stack_compose: Dict, docker_client: DockerClient\n) -> List[Service]:\n\n # NOTE: the goal here is NOT to test time-to-deplopy but\n # rather guaranteing that the framework is fully deployed before starting\n # tests. Obviously in a critical state in which the frameworks has a problem\n # the fixture will fail\n\n try:\n for attempt in Retrying(\n wait=wait_fixed(MAX_TIME_TO_DEPLOY_SECS),\n stop=stop_after_attempt(5),\n before=before_log(log, logging.WARNING),\n ):\n with attempt:\n for service in docker_client.services.list():\n for task in service.tasks():\n # NOTE: Could have been restarted from latest test parameter, accept as well complete\n assert task[\"Status\"][\"State\"] in (\n task[\"DesiredState\"],\n \"complete\",\n ), (\n f\"{service.name} still not ready or complete. Expected \"\n f\"desired_state[{task['DesiredState']}] but got \"\n f\"status_state[{task['Status']['State']}]). Details:\"\n f\"\\n{pformat(task)}\"\n )\n\n finally:\n subprocess.run(f\"docker stack ps {core_stack_name}\", shell=True, check=False)\n # logs table like\n # ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR\n # xbrhmaygtb76 simcore_sidecar.1 itisfoundation/sidecar:latest crespo-wkstn Running Running 53 seconds ago\n # zde7p8qdwk4j simcore_rabbit.1 itisfoundation/rabbitmq:3.8.0-management crespo-wkstn Running Running 59 seconds ago\n # f2gxmhwq7hhk simcore_postgres.1 postgres:10.10 crespo-wkstn Running Running about a minute ago\n # 1lh2hulxmc4q simcore_director.1 itisfoundation/director:latest crespo-wkstn Running Running 34 seconds ago\n # ...\n\n # TODO: find a more reliable way to list services in a stack\n core_stack_services: List[Service] = [\n service\n for service in docker_client.services.list()\n if service.name.startswith(f\"{core_stack_name}_\")\n ] # type: ignore\n\n assert (\n core_stack_services\n ), f\"Expected some services in core stack '{core_stack_name}'\"\n\n assert len(core_stack_compose[\"services\"].keys()) == len(core_stack_services)\n\n return core_stack_services\n","sub_path":"tests/swarm-deploy/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":5164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"314214830","text":"\"\"\"\nResursively find the max left and right length, then update the max length\n\"\"\"\nclass Solution(object):\n def diameterOfBinaryTree(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n if not root:\n return 0\n self.maxLen = 0\n self.dfs(root)\n\n return self.maxLen\n \n def dfs(self, root):\n left = right = 0\n if root.left:\n left = self.dfs(root.left) + 1\n if root.right:\n right = self.dfs(root.right) + 1\n self.maxLen = max(left + right, self.maxLen)\n return max(left, right)","sub_path":"solution/python/543.py","file_name":"543.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"121917559","text":"import httplib2\nimport json, requests\nimport sys\nimport codecs\nsys.stdout = codecs.getwriter('utf8')(sys.stdout)\nsys.stderr = codecs.getwriter('utf8')(sys.stderr)\n\ndef getGeocodeLocation(inputString):\n google_api_key = \"AIzaSyDhbfNoX5i7euOcHdpjZ9EMJYdbR1GWlxo\"\n locationString = inputString.replace(\" \", \"+\")\n url = ('https://maps.googleapis.com/maps/api/geocode/json?address={0}&key={1}'.format(locationString, google_api_key))\n h = httplib2.Http()\n response, content = h.request(url, 'GET')\n result = json.loads(content)\n latitude = result['results'][0]['geometry']['location']['lat']\n longitude = result['results'][0]['geometry']['location']['lng']\n return(latitude, longitude)\n\ndef findARestaurant(mealType, locationString):\n location = getGeocodeLocation(locationString)\n url = 'https://api.foursquare.com/v2/venues/explore'\n params = dict(\n client_id='GZO5GCT3V1PK0WMYN35JMQXTL4Q0BPVGPJWERUN50MF5IS0K',\n client_secret='I23U3EVRZIBSPD0KS2LAAENVVPVXLZZQ1I4OSL5CZEE04W1A',\n v='20180323',\n ll='{0},{1}'.format(location[0], location[1]),\n query=mealType,\n limit=1\n )\n resp = requests.get(url=url, params=params)\n data = json.loads(resp.text)\n\n venue = data['response']['groups'][0]['items'][0]['venue']\n venueAddress = venue['location']['formattedAddress']\n\n if venue:\n photoParams = dict(\n client_id='GZO5GCT3V1PK0WMYN35JMQXTL4Q0BPVGPJWERUN50MF5IS0K',\n client_secret='I23U3EVRZIBSPD0KS2LAAENVVPVXLZZQ1I4OSL5CZEE04W1A',\n v='20180323'\n )\n photosUrl = 'https://api.foursquare.com/v2/venues/{0}/photos'.format(venue['id'])\n photoResp = requests.get(photosUrl, params=photoParams)\n photoData = json.loads(photoResp.text)\n if photoData['response']['photos']['count'] != 0:\n photo = photoData['response']['photos']['items'][0]\n photoUrl = '{0}300x300{1}'.format(photo['prefix'], photo['suffix'])\n else:\n photoUrl = 'https://igx.4sqi.net/img/general/300x300/default.jpg'\n\n sys.stdout.write(venue['name'] + '\\n')\n for line in venueAddress:\n sys.stdout.write(line + '\\n')\n\n results = dict(\n name=venue['name'],\n address=venueAddress,\n image=photoUrl\n )\n\n return results\n\n else:\n print(\"No results found\")\n return None","sub_path":"puppies/geocode.py","file_name":"geocode.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"118811556","text":"#!/usr/bin/env python\n\nimport wx_sdk\nimport json\nimport os\nimport time\nimport datetime\n\npath = '/home/dmr/.cache/weather.json'\n\n\ndef isExit():\n if not os.path.exists(path):\n return True\n filetime = time.strftime(\n '%Y-%m-%d-%H', time.localtime(os.stat(path).st_mtime))\n today = datetime.datetime.now().strftime('%Y-%m-%d-%H')\n if today.__eq__(filetime):\n return False\n return True\n\n\ndef getWeather():\n if isExit():\n url = 'https://way.jd.com/jisuapi/weather'\n params = {\n 'city': '杭州',\n 'cityid': '',\n 'citycode': '',\n 'appkey': '7ec3e4137c89df841fc4d5cf996b03c8'\n }\n r = wx_sdk.wx_post_req(url, params)\n try:\n f = open(path, 'w')\n f.write(r.text)\n f.close()\n except:\n print('写缓存错误')\n try:\n fp = open(path, 'r')\n text = fp.read()\n fp.close()\n all = json.loads(text)\n caseFormat(all)\n except:\n print('读缓存错误')\n\n\ndef caseFormat(all):\n try:\n days = all['result']['result']['daily']\n night = days[0]['night']\n day = days[0]['day']\n\n templow = night['templow']\n temphigh = day['temphigh']\n dayWeather = day['weather']\n # nightWeather = night['weather']\n # WindPower = day['windpower']\n # w = {'晴': '', '多云': '', '阴': '', '阵雨': '', '雷阵雨': '',\n # '雷阵雨伴有冰雹': '', '雨夹雪': '', '小雨': '', '中雨': '',\n # '大雨': '', '暴雨': '', '大暴雨': '', '特大暴雨': '',\n # '阵雪': '', '小雪': '', '中雪': '', '大雪': '',\n # '暴雪': '', '雾': '', '冻雨': '', '沙尘暴': '',\n # '小雨-中雨': '', '中雨-大雨': '', '大雨-暴雨': '',\n # '暴雨-大暴雨': '', '大暴雨-特大暴雨': '', '小雪-中雪': '',\n # '中雪-大雪': '<++>', '大雪-暴雪': '<++>', '浮尘': '<++>',\n # '扬沙': '<++>', '强沙尘暴': '<++>', '浓雾': '<++>', '强浓雾': '<++>',\n # '霾': '<++>', '中毒霾': '<++>', '重度霾': '<++>', '严重霾': '<++>',\n # '大雾': '<++>', '特强浓雾': '<++>', '无': '', '雨': '', '雪': '<++>'}\n Temp = str(templow) + '~' + str(temphigh) + '糖 ' + dayWeather\n print(Temp)\n except:\n print(all['msg'])\n\n\ngetWeather()\n","sub_path":"polybar/polybar-scripts/weather/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"145580797","text":"from django.shortcuts import render\nfrom home.models import Product\nfrom math import ceil\nfrom django.shortcuts import render,HttpResponse,redirect\nfrom datetime import datetime\nfrom home.models import Contact,Orders,OrderUpdate,Blogpost\nfrom django.contrib import messages\nimport json\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\n# from home.models import Product\n# from math import ceil\n\n# Create your views here.\ndef index(request):\n return render(request,'index.html')\n\ndef aboutus(request):\n return render(request,'aboutus.html')\n\ndef guide(request):\n myposts= Blogpost.objects.all()\n print(myposts)\n return render(request,'guide.html',{'myposts': myposts})\n\n\ndef blockpost(request,id):\n post = Blogpost.objects.filter(post_id = id)[0]\n print(post)\n return render(request,'blockpost.html',{'post':post})\n\ndef contact(request):\n thank=False\n if request.method ==\"POST\":\n name=request.POST.get('name')\n phone=request.POST.get('phone')\n email=request.POST.get('email')\n desc=request.POST.get('desc')\n contact=Contact(name=name, email=email,phone=phone,desc=desc,date=datetime.today())\n contact.save()\n thank=True\n return render(request,'contact.html',{'thank':thank})\n \n\ndef napal(request):\n products= Product.objects.all()\n allProds=[]\n catprods= Product.objects.values('category', 'id')\n cats= {item[\"category\"] for item in catprods}\n for cat in cats:\n prod=Product.objects.filter(category=cat)\n n = len(prod)\n nSlides = n // 4 + ceil((n / 4) - (n // 4))\n allProds.append([prod, range(1, nSlides), nSlides])\n\n params={'allProds':allProds } \n return render(request,'napal.html',params)\n\n\ndef productView(request, myid):\n product = Product.objects.filter(id=myid)\n return render(request, 'productView.html', {'product':product[0]})\n\n\n\n\ndef checkout(request):\n if request.method ==\"POST\":\n items_json=request.POST.get('itemsJson')\n amount=request.POST.get('amount')\n name=request.POST.get('name')\n email=request.POST.get('email')\n city=request.POST.get('city')\n state=request.POST.get('state')\n zip_code=request.POST.get('zip_code')\n phone=request.POST.get('phone')\n date=request.POST.get('date')\n order=Orders(items_json=items_json,name=name, email=email,city=city,state=state,zip_code=zip_code,phone=phone,amount=amount)\n order.save()\n update= OrderUpdate(order_id= order.order_id, update_desc=\"Your Guide will update the message\")\n update.save()\n thank = True\n id = order.order_id\n return render(request, 'checkout.html', {'thank':thank, 'id': id})\n return render(request, 'checkout.html')\n\n\n\ndef tracker(request):\n if request.method==\"POST\":\n orderId = request.POST.get('orderId', '')\n email = request.POST.get('email', '')\n try:\n order = Orders.objects.filter(order_id=orderId, email=email)\n if len(order)>0:\n update = OrderUpdate.objects.filter(order_id=orderId)\n updates = []\n for item in update:\n updates.append({'text': item.update_desc, 'time': item.timestamp})\n response = json.dumps({\"status\":\"success\", \"updates\": updates, \"itemsJson\": order[0].items_json}, default=str)\n return HttpResponse(response)\n else:\n return HttpResponse('{\"status\":\"noitem\"}')\n except Exception as e:\n return HttpResponse('{\"status\":\"error\"}')\n return render(request, 'tracker.html')\n\n\n\ndef searchMatch(query, item):\n '''return true only if query matches the item'''\n if query in item.desc.lower() or query in item.product_name.lower() or query in item.category.lower():\n return True\n else:\n return False\n\ndef search(request):\n query = request.GET.get('search')\n allProds = []\n catprods = Product.objects.values('category', 'id')\n cats = {item['category'] for item in catprods}\n for cat in cats:\n prodtemp = Product.objects.filter(category=cat)\n prod = [item for item in prodtemp if searchMatch(query.lower(), item)]\n\n n = len(prod)\n nSlides = n // 4 + ceil((n / 4) - (n // 4))\n if len(prod) != 0:\n allProds.append([prod, range(1, nSlides), nSlides])\n params = {'allProds': allProds, \"msg\": \"\"}\n if len(allProds) == 0 or len(query)<4:\n params = {'msg': \"Please make sure to enter relevant search query\"}\n return render(request, 'search.html', params)\n\n\n\n\ndef handleSignUp(request):\n if request.method==\"POST\":\n # Get the post parameters\n username=request.POST['username']\n email=request.POST['email1']\n fname=request.POST['fname']\n lname=request.POST['lname']\n pass1=request.POST['pass1']\n pass2=request.POST['pass2']\n\n # check for errorneous input\n if len(username)<10:\n thank3=True\n return render(request,'index.html',{'thank3':thank3})\n # messages.error(request, \" Your user name must be under 10 characters\")\n # return redirect('/')\n\n if not username.isalnum():\n thank4=True\n return render(request,'index.html',{'thank4':thank4})\n # messages.error(request, \" User name should only contain letters and numbers\")\n # return redirect('/')\n if (pass1!= pass2):\n thank5=True\n return render(request,'index.html',{'thank5':thank5})\n # messages.error(request, \" Passwords do not match\")\n # return redirect('/')\n \n # Create the user\n myuser = User.objects.create_user(username, email, pass1)\n myuser.first_name= fname\n myuser.last_name= lname\n myuser.save()\n thank6=True\n return render(request,'index.html',{'thank6':thank6})\n # messages.success(request, \" Your iCoder has been successfully created\")\n # return redirect('/')\n\n else:\n return HttpResponse(\"page not found\")\n\n\n\n\ndef handeLogin(request):\n if request.method==\"POST\":\n # Get the post parameters\n loginusername=request.POST['loginusername']\n loginpassword=request.POST['loginpassword']\n\n user=authenticate(username= loginusername, password= loginpassword)\n if user is not None:\n login(request, user)\n # messages.success(request, \"Successfully Logged In\")\n thank2=True\n return render(request,'index.html',{'thank2':thank2})\n \n else:\n # messages.error(request, \"Invalid credentials! Please try again\")\n thank1=True\n return render(request,'index.html',{'thank1':thank1})\n\n return HttpResponse(\"page not found\")\n\n\n\ndef handelLogout(request):\n logout(request)\n thank7=True\n return render(request,'index.html',{'thank7':thank7})\n # messages.success(request, \"Successfully logged out\")\n # return redirect('/')","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"353519691","text":"import PyPDF2,re,cv2\nimport itertools,pytesseract\nimport sys\nimport os\n\n\n\n# pdfFileObj = open('D:\\History\\Practices\\dataextraction\\ALTMP_PRD_1.pdf', 'rb')\n# pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\ndef convertPDFToText(path):\n import os\n count=0\n increase=0\n print(\"enter\",path)\n a=[]\n from PyPDF2 import PdfFileReader\n from pdf2image import convert_from_path\n out=[]\n paths=cwd=os.getcwd()+\"/\"+\"resumes\"\n pdftoppm_path = r\"D:\\poppler-0.512\\bin\\pdftoppm.exe\"\n for filename in os.listdir(paths):\n \n \n files=paths+\"/\"+filename\n from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\n from pdfminer.converter import TextConverter\n from pdfminer.layout import LAParams\n from pdfminer.pdfpage import PDFPage\n from io import StringIO\n print (files)\n \n rsrcmgr = PDFResourceManager()\n print(rsrcmgr,\"rsrcmgr\")\n retstr = StringIO()\n print(retstr,\"retstr\")\n codec = 'utf-8'\n laparams = LAParams()\n device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)\n fp = open(files, 'rb')\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n password = \"\"\n maxpages = 0\n caching = True\n pagenos=set()\n\n for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password,caching=caching, check_extractable=True):\n interpreter.process_page(page)\n\n text = retstr.getvalue()\n print(text)\n fp.close()\n device.close()\n retstr.close()\n\n print(text,\"string\")\n print('inside') \n return(text)\n\n\n\n \n","sub_path":"Cv_parser/convertPDFToText.py","file_name":"convertPDFToText.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"574539419","text":"import cPickle as pickle\nimport pymc3 as pm\nimport autograd.numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport math\nimport theano\nimport theano.tensor as tt\nimport sys\nimport os\nfrom theano.compile.ops import as_op\n\nfrom Emulator import *\nfrom PipeLine import *\nfrom Convergency_check import PlotMarginalLikelihood\nfrom Utilities import PlotTrace\n\nif len(sys.argv) != 5:\n print('Use this script by entering: python %s Prior Training_file ExpData Output_name' % (sys.argv[0]))\n sys.exit()\n\n\n#trainning_x = np.arange(1, 4, 0.3).reshape(-1,1)\n\n\"\"\"\nLoading prior of each variables\nwill be used to load the parameter names\nsuch that when model data is read\nit can tell which one is input parameter and which one is output \n\"\"\"\n# load the prior\nprior = pd.read_csv(sys.argv[1])\n# load the name of the variables in the prior\npar_name = list(prior)\n\n\"\"\"\nLoading experiment output data\n\"\"\"\n# rad the experiment result\ndf = pd.read_csv(sys.argv[3])\n# load the experimental error\nerror = df[list(df.filter(regex='_Error'))].as_matrix().flatten()\nexp_result = df[df.columns.drop(list(df.filter(regex='_Error')))].as_matrix().flatten()\ncov = np.square(np.diag(error))\n\n\"\"\"\nUse trained emulator\n\"\"\"\nwith open(sys.argv[2], 'rb') as buff:\n data = pickle.load(buff)\n\npipe2 = data['input_pipe']\npipe = data['output_pipe']\nsim_data = data['input_data']\nsim_para = data['input_para']\nscales = data['scales']\nnuggets = data['nuggets']\n\n\"\"\"\nwe need to normalized the observed points for better emulation\nWe need to normalize both the output and input space\nfor output space, PCA is also performed for dimension reduction\n\"\"\"\npipe.Fit(sim_data)\npipe2.Fit(sim_para)\n\nemulator = EmulatorMultiOutput(pipe2.Transform(sim_para), pipe.Transform(sim_data))\nemulator.SetCovariance(squared_exponential)\nemulator.SetScales(scales)\nemulator.SetNuggets(nuggets)\nemulator.StartUp()\n\n\nmodel = pm.Model()\n\nwith model:\n\n \"\"\"\n Interface for theano to talk to our emulator\n pymc3 uses theano for calculation\n this interface is necessary\n \"\"\"\n class EmulatorLogLikelihood(pm.Continuous):\n def __init__(self, x, *args, **kwargs):\n super(EmulatorLogLikelihood, self).__init__(*args, **kwargs)\n self.x = x\n \n def logp(self, value):\n x = tt.stack(self.x)\n return my_logp(x, value)\n \n @as_op(itypes=[tt.dvector, tt.dvector], otypes=[tt.dscalar])\n def my_logp(x, value):\n mean, var = emulator.Emulate(pipe2.Transform(np.array(x)).reshape(1, -1))\n mean = pipe.TransformInv(mean.flatten())\n var = pipe.TransformCovInv(np.diag(var)) + cov\n return np.array(mvn.logpdf(value, mean, var))\n\n parameters = []\n # form random variables according to prior \n for column in prior:\n parameters.append(pm.Uniform(column, prior[column][0], prior[column][1]))\n \n emulator_result = EmulatorLogLikelihood('emulator', x=parameters, observed=theano.shared(exp_result))\n step = pm.Metropolis()\n trace = pm.sample(2000, step=step, njobs=20)\n\n pm.traceplot(trace)\n\n # plot the result in a nice matrix of histograms\n num_par = len(par_name)\n graph_num = 1\n fig, axes2d = plt.subplots(num_par, num_par) \n \n PlotTrace(trace, par_name, prior)\n df = pm.backends.tracetab.trace_to_dataframe(trace)\n df.to_csv('%s.csv' % sys.argv[4], sep='\\t')\n\n\nwith open('%s.pkl' % sys.argv[4], 'wb') as buff:\n pickle.dump({'model': emulator, 'trace': df, \\\n 'input_pipe': pipe2, 'output_pipe': pipe, \\\n 'exp_data': exp_result, 'exp_err': error, \\\n 'prior': prior}, buff)\n \n","sub_path":"Theano_Developing/StatMetroplis.py","file_name":"StatMetroplis.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"520139055","text":"#PF-Prac-20\n\"\"\"A Ducci sequence is a sequence of lists of integers. Given a starting list of integers, the next list in the sequence is formed by taking the absolute differences of neighboring integers in the previous list.\nStart List: [0,653,1854,4063]\n\nDucci Sequence:[653,1201,2209,4063], [548,1008,1854,3410], ...........,[0,0,0,0]\n\nAssumption: The Ducci sequence ends with a list containing 0s and the starting list contains four elements.\n\nWrite a python function that takes a starting list of integers and a number ‘n’ as input, and returns the nth element of the Ducci sequence.\n\nSample Input\tExpected Output\ntest_list=[0,653,1854,4063]\nn = 1\n\"\"\"\n#import copy\ndef ducci_sequence(test_list,n):\n #start writing your code here\n temp_list = test_list.copy()\n diff_list=[]\n final_list=[]\n end = len(test_list)-1\n \n for i in range(n+1):\n for j in range(end):\n diff = abs(temp_list[j]-temp_list[j+1])\n diff_list.append(diff)\n diff_list.append(abs(temp_list[last]-temp_list[0]))\n final_list.append(diff_list)\n temp_list=diff_list.copy()\n diff_list=[]\n\n return final_list[n]\n\nducci_element=ducci_sequence([0, 653, 1854, 4063] , 2)\nprint(ducci_element)","sub_path":"ducci_sequence.py","file_name":"ducci_sequence.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"352453906","text":"import parselmouth\nfrom parselmouth.praat import call\nimport os\nimport pandas as pd\n\n\n\ndef extract_pitch(input_dir,output_dir):\n\n if os.path.isdir(input_dir):\n # print(input_dir)\n for root, dirs, files in os.walk(input_dir):\n break\n for file in files:\n if file.endswith('.wav'):\n snd = parselmouth.Sound(input_dir+file)\n dur = snd.get_total_duration()\n manipulation = call(snd, \"To Manipulation\", 0.01, 75, 600)\n pitch_tier = call(manipulation, \"Extract pitch tier\")\n # print(pitch_tier)\n\n # # create a file with two columns:\n # first col is time (0.01s step by default), second col is pitch (hz)\n resultfile_padding = 'result'\n output_name = '%s%s_%s.txt' % (output_dir, file[:-4],resultfile_padding)\n call(pitch_tier, \"Write to headerless spreadsheet file\", output_name)\n print(output_name)\n # rounding time info to 2 digits\n with open('%s'%output_name, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n # rounded_time = []\n # all_pitch = []\n new_lines = []\n for line in lines:\n time = line.split('\\t')[0]\n rounded = round(float(time),2)\n # rounded_time.append(rounded)\n pitch = line.split('\\t')[1]\n # all_pitch.append(pitch)\n new_line = str(rounded) + '\\t' + pitch\n new_lines.append(new_line)\n\n final = ''.join(new_lines)\n # print(final)\n\n with open('%s'%output_name, 'w', encoding='utf-8') as f:\n f.write(final)\n\n\nif __name__ == '__main__':\n dir = (\"D:\\Rokid\\pycharm\\music/align_lyrics/\")\n out = dir\n extract_pitch(dir, out)","sub_path":"AudioProcessing/feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"291880588","text":"#!/usr/bin/env python3\n\nimport colorsys\nimport hashlib\nimport os\nimport re\nimport sys\n\n\ndef parse_color(color):\n if ',' in color:\n return [int(component) for component in color.split(',')][:3]\n if color.startswith('#'):\n return (int(color[1:3], 16), int(color[3:5], 16), int(color[5:7], 16))\n raise ValueError('unable to parse color %s; try r,g,b or #FFFFFF' % color)\n\n\ndef set_bg_color(rgb):\n if not os.isatty(1): # if stdout is not a tty, don't even attempt to change colors\n return\n if os.environ.get('TERM_PROGRAM') == 'iTerm.app':\n r, g, b = rgb\n os.write(1, b'\\033]Ph%02x%02x%02x\\033\\\\' % (r, g, b))\n\n\ndef find_hostmap_match(user_host):\n if not os.path.isfile(hostmap_file):\n return\n host = user_host.group(2)\n user_host = user_host.group(0)\n with open(hostmap_file) as hostmap_fp:\n for line in hostmap_fp:\n if line.startswith('#') or '=' not in line:\n continue\n m_host, color = [p.strip() for p in line.rsplit('=', 1)]\n if any((m == m_host or re.match(m_host, m)) for m in (host, user_host)):\n return parse_color(color)\n\ndefault_color = parse_color(os.environ.get('SSHW_DEFAULT_BG') or '25,25,25')\nhostmap_file = os.path.expanduser(os.path.expandvars(os.environ.get('SSHW_HOSTMAP') or '~/.sshw_hosts'))\n\n\ndef main(argv):\n changed = False\n user_host = None\n\n # perform rudimentary parsing of the command line to find the user/host parameter\n for arg in argv[1:]:\n if arg == '--': # after the double dash everything's the command; we've missed our shot\n break\n user_host = re.match(r'([a-z0-9]+@)?([0-9a-z.]+)', arg, re.I)\n if user_host:\n break\n\n if user_host:\n rgb = find_hostmap_match(user_host)\n if not rgb:\n user, host = user_host.groups()\n host_hash = hashlib.sha1(host.encode('ascii')).digest()\n hue = host_hash[0] / 255.\n rgb = [int(c * 255) for c in colorsys.hsv_to_rgb(hue, 0.7, 0.2)]\n set_bg_color(rgb)\n changed = True\n\n if changed:\n try:\n return os.spawnvp(os.P_WAIT, 'ssh', argv)\n except KeyboardInterrupt:\n pass\n finally:\n set_bg_color(default_color)\n else:\n # if the colors weren't changed at all, we can just exec --\n # no need to attempt to restore the color -- and save some\n # memory and process table space. (as if that's at a premium.)\n return os.execvp('ssh', argv)\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n","sub_path":"sshw.py","file_name":"sshw.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"103771904","text":"from __future__ import print_function\nimport os\nfrom flask_socketio import SocketIO\nfrom flask import Flask, render_template\nimport logging\n\nproject_root = os.path.dirname(__file__)\ntemplate_path = os.path.join(project_root, 'templates')\napp = Flask(__name__, template_folder=template_path, static_folder='templates')\nsocketio = SocketIO(app)\n\nlogging.getLogger('socketio').setLevel(logging.WARNING)\nlogging.getLogger('engineio').setLevel(logging.WARNING)\n\n\nclass eular_angles(object):\n def __init__(self):\n self.x = 0.0\n self.y = 0.0\n self.z = 0.0\n\n\nsp_gyro = eular_angles()\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@socketio.on('params')\ndef test_message(message):\n # print(message)\n sp_gyro.x = message[\"x\"]\n sp_gyro.y = message[\"y\"]\n sp_gyro.z = message[\"z\"]\n\n\nif __name__ == '__main__':\n socketio.run(app, host=\"0.0.0.0\")\n","sub_path":"server_test.py","file_name":"server_test.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"605111375","text":"\"\"\" AI chatbot \"\"\"\n\n# AI Chat Bot Module for @theUserge.\n# Lydia AI Powered by CoffeeHouse from Intellivoid (Telegram: t.me/Intellivoid)\n# Author: Phyco-Ninja (https://github.com/Phyco-Ninja) (@PhycoNinja13b)\n# Thanks to @Intellivoid For Creating CoffeeHouse API\n\n\nimport os\nimport random\nimport asyncio\n# from time import time\n\nfrom coffeehouse.lydia import LydiaAI\nfrom coffeehouse.api import API\nfrom coffeehouse.exception import CoffeeHouseError\n\nfrom userge import userge, get_collection, Message, Filters, Config\n\n\nLYDIA_CHATS = get_collection(\"LYDIA_CHATS\")\nCH_LYDIA_API = os.environ.get(\"CH_LYDIA_API\", None)\nCUSTOM_REPLY_CHANNEL = int(os.environ.get(\"CUSTOM_REPLY_CHANNEL\", 0))\nif CH_LYDIA_API is not None:\n LYDIA = LydiaAI(API(CH_LYDIA_API))\n\nACTIVE_CHATS = {}\nCUSTOM_REPLIES = []\n\nLYDIA_API_INFO = \"\"\"This module uses Lydia AI\nPowered by CoffeeHouse API created by @Intellivoid.\n\nLydia is a Active Machine Learning Chat Bot.\nWhich can adapt to current user and chat with user\non any given topic. \"\"\"\n\n\nasync def _init():\n async for chat in LYDIA_CHATS.find({'active': True}):\n ACTIVE_CHATS[chat['_id']] = (chat['session_id'], chat['session_exp'])\n if CUSTOM_REPLY_CHANNEL:\n async for message in userge.iter_history(chat_id=CUSTOM_REPLY_CHANNEL, limit=300):\n CUSTOM_REPLIES.append(message)\n\n\n# A workaround for replies of Media as per now Lydia can't process Media input,\n# And it's logical though. So this func will call custom message input by user\n# saved in a channel and reply it to message.\n# Idea arised from here (https://t.me/usergeot/157629) thnx 👍\nasync def custom_media_reply(message: Message):\n global CUSTOM_REPLIES\n if CUSTOM_REPLIES:\n cus_msg = random.choice(CUSTOM_REPLIES)\n replied = message.message_id\n if cus_msg.media:\n if cus_msg.sticker:\n await message.reply_sticker(cus_msg.sticker.file_id)\n if (cus_msg.photo or cus_msg.video or cus_msg.animation):\n dls = await userge.download_media(message=cus_msg, file_name=Config.DOWN_PATH)\n if cus_msg.photo:\n await message.reply_photo(dls)\n if cus_msg.video:\n await message.reply_video(dls)\n if cus_msg.animation:\n await userge.send_animation(\n chat_id=message.chat.id,\n animation=dls,\n unsave=True,\n reply_to_message_id=replied\n )\n os.remove(dls)\n if cus_msg.text:\n await message.reply(cus_msg.text)\n\n\n@userge.on_cmd(\"lydia\", about={\n 'header': \"Lydia AI Chat Bot\",\n 'description': \"An AI Powered Chat Bot Module\"\n \" that uses Lydia AI from CoffeeHouse.\\n\"\n \"For more info use {tr}lydia -info\",\n 'flags': {'-on': \"Enable AI on replied user\",\n '-off': \"Disable AI on replied user\",\n '-list': \"List All users\",\n '-info': \"Get Info about Lydia\"},\n 'usage': \"{tr}lydia [flag] [reply to user]\"})\nasync def lydia_session(message: Message):\n if CH_LYDIA_API is None:\n await message.edit(\n \"Please Configure `CH_LYDIA_API` & `CUSTOM_REPLY_CHANNEL`\"\n \"\\n\\nAll Instructions are available\"\n \" in @UnofficialPluginsHelp\")\n return\n\n replied = message.reply_to_message\n if '-on' in message.flags and replied:\n user_id = replied.from_user.id\n if user_id in ACTIVE_CHATS:\n await message.edit(\"AI is already Enabled on Replied User\")\n return\n data = await LYDIA_CHATS.find_one({'_id': user_id})\n if not data:\n await message.edit(\"`creating new session...`\")\n ses = LYDIA.create_session(\"en\")\n await LYDIA_CHATS.insert_one(\n {'_id': user_id, 'session_id': ses.id, 'session_exp': ses.expires, 'active': True})\n ACTIVE_CHATS[user_id] = (ses.id, ses.expires)\n else:\n await message.edit(\"`activating session...`\")\n await LYDIA_CHATS.update_one({'_id': user_id}, {\"$set\": {'active': True}})\n ACTIVE_CHATS[user_id] = (data['session_id'], data['session_exp'])\n await message.edit(\"`AI Enabled for Replied User`\", del_in=2)\n\n elif '-off' in message.flags and replied:\n user_id = replied.from_user.id\n if user_id not in ACTIVE_CHATS:\n await message.edit(\"How to delete a thing that doesn't Exist?\", del_in=5)\n return\n await message.edit(\"`disactivating session...`\")\n await LYDIA_CHATS.update_one({'_id': user_id}, {\"$set\": {'active': False}})\n del ACTIVE_CHATS[user_id]\n await message.edit(\"`AI Disable for Replied User`\", del_in=5)\n\n # Group Features Won't be displayed in Help Info For Now 😉\n elif '-enagrp' in message.flags:\n chat_id = message.chat.id\n if chat_id in ACTIVE_CHATS:\n await message.edit(\"AI is already Enabled on this chat\")\n return\n data = await LYDIA_CHATS.find_one({'_id': chat_id})\n if not data:\n await message.edit(\"`creating new session...`\")\n ses = LYDIA.create_session(\"en\")\n await LYDIA_CHATS.insert_one(\n {'_id': chat_id, 'session_id': ses.id, 'session_exp': ses.expires, 'active': True})\n ACTIVE_CHATS[chat_id] = (ses.id, ses.expires)\n else:\n await message.edit(\"`activating session...`\")\n await LYDIA_CHATS.update_one({'_id': chat_id}, {\"$set\": {'active': True}})\n ACTIVE_CHATS[chat_id] = (data['session_id'], data['session_exp'])\n await message.edit(\"`AI Enabled in Current Chat :D`\")\n\n elif '-disgrp' in message.flags:\n chat_id = message.chat.id\n if chat_id not in ACTIVE_CHATS:\n await message.edit(\"AI wasn't enabled in current chat. >:(\", del_in=5)\n return\n await message.edit(\"`disactivating session...`\")\n await LYDIA_CHATS.update_one({'_id': chat_id}, {\"$set\": {'active': False}})\n del ACTIVE_CHATS[chat_id]\n await message.edit(\"`AI Disabled in Current Chat`\", del_in=5)\n\n elif '-grps' in message.flags:\n msg = \"**AI Enabled Chats**\\n\\n\"\n for chat_id in ACTIVE_CHATS:\n if not str(chat_id).startswith(\"-100\"):\n continue\n chat_ = await userge.get_chat(chat_id)\n title = chat_.title\n msg += f\"{title} {chat_id}\\n\"\n await message.edit_or_send_as_file(msg)\n\n elif '-list' in message.flags:\n msg = \"**AI Enabled User List**\\n\\n\"\n for user_id in ACTIVE_CHATS:\n if str(user_id).startswith(\"-100\"):\n continue\n u_info = await userge.get_user_dict(user_id)\n u_men = u_info['mention']\n msg += f\"{u_men}\\n\"\n await message.edit_or_send_as_file(msg)\n\n elif '-info' in message.flags:\n await message.reply_photo(photo=\"resources/lydia.jpg\", caption=LYDIA_API_INFO)\n else:\n await message.reply_sticker(\"CAADAQAEAQAC0rXRRju3sbCT07jIFgQ\")\n\n\n@userge.on_filters(~Filters.me & (Filters.mentioned | Filters.private))\nasync def lydia_ai_chat(message: Message):\n \"\"\" incomming message handler \"\"\"\n if CH_LYDIA_API is None:\n return\n data = ACTIVE_CHATS.get(message.from_user.id, None) or ACTIVE_CHATS.get(message.chat.id, None)\n if data:\n if message.media:\n await custom_media_reply(message)\n else:\n ses = LYDIA.get_session(data[0])\n mess_text = message.text\n # if int(ses_exp) < time():\n # ses = lydia.create_session(\"en\")\n # ses_id = ses.id\n # ses_exp = ses.expires\n # await LYDIA_SESSION.find_one_and_update(\n # {'uid': \"LYDIA_SES\"},\n # {\"$set\": {'session_id': ses_id, 'session_exp': ses_exp}})\n try:\n output_ = LYDIA.think_thought(ses.id, mess_text)\n await message.reply_chat_action(\"typing\")\n await asyncio.sleep(7)\n await message.reply_chat_action(\"typing\")\n await asyncio.sleep(2)\n await message.reply_chat_action(\"cancel\")\n await message.reply(output_)\n except CoffeeHouseError:\n pass\n message.continue_propagation()\n","sub_path":"plugins/lydia.py","file_name":"lydia.py","file_ext":"py","file_size_in_byte":8450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"535567864","text":"from unittest import mock\n\nimport pymediainfo\nfrom fffw.wrapper.helpers import ensure_text\n\nfrom video_transcoding import transcoding\nfrom video_transcoding.tests.base import BaseTestCase\n\n\nclass TranscodingTestCase(BaseTestCase):\n \"\"\" Video file transcoding tests.\"\"\"\n\n # Minimal mediainfo output template to mock MediaInfo.parse result\n media_info_xml = \"\"\"\n\n\n1\n1\n\n\n\n{video_duration:.3f}\n{video_bitrate}\n{width}\n{height}\n{par:.3f}\n{aspect:.3f}\n{video_frame_rate:.3f}\n{video_frames}\n\n\n\n{audio_duration:.3f}\n{audio_bitrate}\n{audio_sampling_rate}\n{audio_samples}\n\n\n\n\"\"\"\n\n # Default video file metadata\n metadata = {\n 'width': 1920,\n 'height': 1080,\n 'aspect': 1.778,\n 'par': 1.0,\n transcoding.VIDEO_DURATION: 3600.22,\n 'video_bitrate': 5000000,\n 'video_frame_rate': 24.97,\n 'audio_bitrate': 192000,\n 'audio_sampling_rate': 48000,\n transcoding.AUDIO_DURATION: 3600.22,\n }\n\n def setUp(self):\n self.source = 'http://ya.ru/source.mp4'\n self.dest = '/tmp/result.mp4'\n self.media_info = {\n self.source: self.prepare_metadata(),\n self.dest: self.prepare_metadata()\n }\n\n self.transcoder = transcoding.Transcoder(self.source, self.dest)\n\n self.media_info_patcher = mock.patch.object(\n pymediainfo.MediaInfo, 'parse', side_effect=self.get_media_info)\n self.media_info_mock = self.media_info_patcher.start()\n\n self.runner_mock = mock.MagicMock(\n return_value=(0, '', '')\n )\n\n self.runner_patcher = mock.patch(\n 'fffw.encoding.ffmpeg.FFMPEG.runner_class',\n return_value=self.runner_mock)\n self.ffmpeg_mock = self.runner_patcher.start()\n\n def tearDown(self):\n self.media_info_patcher.stop()\n self.runner_patcher.stop()\n\n def prepare_metadata(self, **kwargs):\n \"\"\"\n Modifies metadata template with new values.\n \"\"\"\n media_info = self.metadata.copy()\n media_info.update(kwargs)\n return media_info\n\n def get_media_info(self, filename) -> pymediainfo.MediaInfo:\n \"\"\" Prepares mediainfo result for file.\"\"\"\n metadata = self.media_info[filename]\n rate = metadata['audio_sampling_rate']\n audio_duration = metadata['audio_duration']\n fps = metadata['video_frame_rate']\n video_duration = metadata['video_duration']\n xml = self.media_info_xml.format(\n filename=filename,\n audio_samples=int(rate * audio_duration),\n video_frames=int(fps * video_duration),\n **metadata)\n return pymediainfo.MediaInfo(xml)\n\n def test_smoke(self):\n \"\"\"\n ffmpeg arguments test.\n \"\"\"\n self.transcoder.transcode()\n ffmpeg_args = [\n 'ffmpeg',\n '-loglevel', 'repeat+level+info',\n '-y',\n '-i', self.source,\n '-filter_complex', '[0:v]scale=w=1920:h=1080[vout0]',\n '-map', '[vout0]',\n '-c:v', 'libx264',\n '-force_key_frames',\n 'expr:if(isnan(prev_forced_t),1,gte(t,prev_forced_t+4))',\n '-crf', '23',\n '-preset', 'slow',\n '-maxrate', '5000000',\n '-bufsize', '10000000',\n '-profile:v', 'high',\n '-g', '49',\n '-r', '24.97',\n '-map', '0:a',\n '-c:a', 'aac',\n '-b:a', '192000',\n '-ar', '48000',\n '-ac', '2',\n '-f', 'mp4', self.dest\n ]\n args, kwargs = self.ffmpeg_mock.call_args\n self.assertEqual(ensure_text(args), tuple(ffmpeg_args))\n","sub_path":"src/video_transcoding/tests/test_transcoding.py","file_name":"test_transcoding.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"482091616","text":"import tensorflow as tf\nimport cv2\nimport datetime\nimport time\nimport argparse\nimport os\n\nimport posenet\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model', type=int, default=101)\nparser.add_argument('--scale_factor', type=float, default=1.0)\nparser.add_argument('--notxt', action='store_true')\nparser.add_argument('--video', type=str, required=True)\nargs = parser.parse_args()\n\n\ndef main():\n\n with tf.Session() as sess:\n model_cfg, model_outputs = posenet.load_model(args.model, sess)\n output_stride = model_cfg['output_stride']\n\n video = cv2.VideoCapture(args.video)\n formatted_date = datetime.datetime.now().strftime(\"%Y%m%d-%H%M\")\n path = '/opt/cv/result/pose-results/posenet/' + formatted_date + '-' + str(args.model)\n i = 0\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n start = time.time()\n\n while True:\n input_image, draw_image, output_scale = posenet.read_cap(\n video, scale_factor=args.scale_factor, output_stride=output_stride)\n\n if input_image is None:\n break\n\n heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(\n model_outputs,\n feed_dict={'image:0': input_image}\n )\n\n pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multiple_poses(\n heatmaps_result.squeeze(axis=0),\n offsets_result.squeeze(axis=0),\n displacement_fwd_result.squeeze(axis=0),\n displacement_bwd_result.squeeze(axis=0),\n output_stride=output_stride,\n max_pose_detections=10,\n min_pose_score=0.25)\n\n keypoint_coords *= output_scale\n\n draw_image = posenet.draw_skel_and_kp(\n draw_image, pose_scores, keypoint_scores, keypoint_coords,\n min_pose_score=0.25, min_part_score=0.25)\n\n cv2.imwrite(path + '/' + str(i) + '.jpg', draw_image)\n\n if not args.notxt:\n print()\n print(\"Results for image: %i\" % i)\n for pi in range(len(pose_scores)):\n if pose_scores[pi] == 0.:\n break\n print('Pose #%d, score = %f' % (pi, pose_scores[pi]))\n for ki, (s, c) in enumerate(zip(keypoint_scores[pi, :], keypoint_coords[pi, :, :])):\n print('Keypoint %s, score = %f, coord = %s' % (posenet.PART_NAMES[ki], s, c))\n\n i += 1\n\n print('Average FPS:', i / (time.time() - start))\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"610195834","text":"#导入需要的包\n\n\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"]='0'\nimport zipfile\nimport random\nimport json\nimport cv2\nimport numpy as np\nfrom PIL import Image\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid.dygraph import Linear, Conv2D, BatchNorm\nimport matplotlib.pyplot as plt\nfrom paddle.fluid.param_attr import ParamAttr\n\ntrain_parameters = {\n \"input_size\": [1, 20, 20], #输入图片的shape\n \"class_dim\": -1, #分类数\n \"src_path\":\"data/data23617/characterData.zip\", #原始数据集路径\n \"target_path\":\"./data/data23617\", #要解压的路径 \n \"train_list_path\": \"./train_data.txt\", #train_data.txt路径\n \"eval_list_path\": \"./val_data.txt\", #eval_data.txt路径\n \"label_dict\":{}, #标签字典\n \"readme_path\": \"/home/aistudio/data/readme.json\", #readme.json路径\n \"num_epochs\": 1, #训练轮数\n \"train_batch_size\": 32, #批次的大小\n \"learning_strategy\": { #优化函数相关的配置\n \"lr\": 0.001 #超参数学习率\n } \n}\n\ndef unzip_data(src_path,target_path):\n '''\n 解压原始数据集,将src_path路径下的zip包解压至data/dataset目录下\n '''\n if(not os.path.isdir(target_path)): \n z = zipfile.ZipFile(src_path, 'r')\n z.extractall(path=target_path)\n z.close()\n else:\n print(\"文件已解压\")\n \n\n\ndef get_data_list(target_path,train_list_path,eval_list_path):\n '''\n 生成数据列表\n '''\n #存放所有类别的信息\n class_detail = []\n #获取所有类别保存的文件夹名称\n data_list_path=target_path\n class_dirs = os.listdir(data_list_path)\n if '__MACOSX' in class_dirs:\n class_dirs.remove('__MACOSX')\n # #总的图像数量\n all_class_images = 0\n # #存放类别标签\n class_label=0\n # #存放类别数目\n class_dim = 0\n # #存储要写进eval.txt和train.txt中的内容\n trainer_list=[]\n eval_list=[]\n #读取每个类别\n for class_dir in class_dirs:\n if class_dir != \".DS_Store\":\n class_dim += 1\n #每个类别的信息\n class_detail_list = {}\n eval_sum = 0\n trainer_sum = 0\n #统计每个类别有多少张图片\n class_sum = 0\n #获取类别路径 \n path = os.path.join(data_list_path,class_dir)\n # print(path)\n # 获取所有图片\n img_paths = os.listdir(path)\n for img_path in img_paths: # 遍历文件夹下的每个图片\n if img_path =='.DS_Store':\n continue\n name_path = os.path.join(path,img_path) # 每张图片的路径\n if class_sum % 10 == 0: # 每10张图片取一个做验证数据\n eval_sum += 1 # eval_sum为测试数据的数目\n eval_list.append(name_path + \"\\t%d\" % class_label + \"\\n\")\n else:\n trainer_sum += 1 \n trainer_list.append(name_path + \"\\t%d\" % class_label + \"\\n\")#trainer_sum测试数据的数目\n class_sum += 1 #每类图片的数目\n all_class_images += 1 #所有类图片的数目\n \n # 说明的json文件的class_detail数据\n class_detail_list['class_name'] = class_dir #类别名称\n class_detail_list['class_label'] = class_label #类别标签\n class_detail_list['class_eval_images'] = eval_sum #该类数据的测试集数目\n class_detail_list['class_trainer_images'] = trainer_sum #该类数据的训练集数目\n class_detail.append(class_detail_list) \n #初始化标签列表\n train_parameters['label_dict'][str(class_label)] = class_dir\n class_label += 1\n \n #初始化分类数\n train_parameters['class_dim'] = class_dim\n print(train_parameters)\n #乱序 \n random.shuffle(eval_list)\n with open(eval_list_path, 'a', encoding='utf-8') as f:\n for eval_image in eval_list:\n f.write(eval_image) \n #乱序 \n random.shuffle(trainer_list) \n with open(train_list_path, 'a', encoding='utf-8') as f2:\n for train_image in trainer_list:\n f2.write(train_image) \n\n # 说明的json文件信息\n readjson = {}\n readjson['all_class_name'] = data_list_path #文件父目录\n readjson['all_class_images'] = all_class_images\n readjson['class_detail'] = class_detail\n jsons = json.dumps(readjson, sort_keys=True, indent=4, separators=(',', ': '))\n with open(train_parameters['readme_path'],'w') as f:\n f.write(jsons)\n print ('生成数据列表完成!')\n\n\ndef data_reader(file_list):\n '''\n 自定义data_reader\n '''\n def reader():\n with open(file_list, 'r', encoding='utf-8') as f:\n lines = [line.strip() for line in f]\n for line in lines:\n img_path, lab = line.strip().split('\\t')\n print(img_path)\n img = cv2.imread(img_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = np.array(img).astype('float32')\n img = img/255.0\n yield img, int(lab) \n return reader\n\n\n'''\n参数初始化\n'''\nsrc_path=train_parameters['src_path']\ntarget_path=train_parameters['target_path']\ntrain_list_path=train_parameters['train_list_path']\neval_list_path=train_parameters['eval_list_path']\nbatch_size=train_parameters['train_batch_size']\n'''\n解压原始数据到指定路径\n'''\nunzip_data(src_path,target_path)\n\n#每次生成数据列表前,首先清空train.txt和eval.txt\nwith open(train_list_path, 'w') as f: \n f.seek(0)\n f.truncate() \nwith open(eval_list_path, 'w') as f: \n f.seek(0)\n f.truncate() \n \n#生成数据列表 \nget_data_list(target_path,train_list_path,eval_list_path)\n\n'''\n构造数据提供器\n'''\ntrain_reader = paddle.batch(data_reader(train_list_path),\n batch_size=batch_size,\n drop_last=True)\neval_reader = paddle.batch(data_reader(eval_list_path),\n batch_size=batch_size,\n drop_last=True)\n\n\nBatch=0\nBatchs=[]\nall_train_accs=[]\n\n\nfor batch_id,data in enumerate(train_reader()):\n images=np.array([x[0].reshape(1,20,20) for x in data],np.float32)\n labels = np.array([x[1] for x in data]).astype('int64')\n labels = labels[:, np.newaxis]\n print(labels.shape)\n print(images.shape)","sub_path":"refactor/testReader.py","file_name":"testReader.py","file_ext":"py","file_size_in_byte":7005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"411243516","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 3 09:51:06 2017\n\n@author: DKjack\n\"\"\"\n\n##Scientific Computing with Python3 main in Spysder\n#Since 11.2016iimport \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport os\n\n\n#modules and Scipts\n\ndef f(x):\n return 2**x +1\n\nz = []\nfor x in range(10):\n if f(x) >4:\n z.append(x)\n else:\n z.append(-1)\n \nprint(z)\nprint(os.getcwd())\n#Using modules and nameSpaces\nfrom smartfunctions import *\nimport smartfunctions as sf\nprint(sf.f(2))\nfrom smartfunctions import g\nprint(g(2))\n\n##Chapter2 Variables and Basic Types\nN = 10\n#following vector contains the Nth roots of unity:\nunity_roots = np.array([np.exp(1j*2*np.pi*k/N) for k in range(N)])\n#access all the real or imaginary parts with real or imag:axes(aspect = 'equal')\n#plt.plot(unity_roots.real, unity_roots.imag, 'o')\nnp.allclose(unity_roots**N, 1) #Ture\n#Your need to add modules included in you functions\n##In is ones functions is still a problem to us\n\n##Chapter 3 Container Types:\n#lists\nL = ['a',20.0,5]\nM = [3,['a',-3.0, 5]]\nl=list(range(4))\nprint (l)\n\n#Slicing;��片\nL2 = ['C','l','o','u','d','s']\n \na = [1,2,3]\nfor iteration in range(4):\n print(sum(a[0:iteration-1]))\n \n#Strides:截取\nL3 =list(range(100))\nL3[:10:2]\nL3[10:20:3]\n#use Strides to resvered order\nL4 = list(range(5))\nR = L4[::-1]\nprint(R)\n# Some List methods: \n#list.append(x);\n#list.expend(L);\n#list.insert(i,x);\n#list.remove(x);\n#list.count(x)\n#list.sort(x)\n#list.resverse()\n#list.pop()\n\n#In-place operations\nprint(L4)\nL4.reverse()\nprint(L4)\nL5 = [6,5,7,5,2,1,3,6,7]\nprint(L5)\nL5.sort()#这个排序是在内部排序使用有专门的使用方法和关键字\nprint(L5)\n#Zip opertion\nindex = [0,1,2,3,4]\nname = ['zhou','chen','john','li']\n#print(list(zip(name,index)))\n\n#Arrays :\nM = np.array([[1,2,3],[3,4,5]])\nv = np.array([[4,5],[6,7]])\n#this two Matrix\n\n#Tuples\nmy_tuple = (1,2,3)\n\n\n#Dictionaries \n#Creating and altering dictionaries:\ntruck_wheel = {'name' : 'wheel',\n 'mass' : 5.70,\n 'Ix' :20.0,\n 'Iy' :1.,\n 'Iz' :17.0,\n 'center of mass' : [0.,0.,0.]}\n#for key in truck_wheel.values():\n #print (key)\n \n#for item in truck_wheel.items():\n #print(item)\n \n#Sets\nA = {1,2,3,5,7,100}\nB= {2,4,6,8}\nC = A.union(B)\nD = A.intersection(C)\nE = C.difference(A)\nprint(C)\nprint(D)\nprint(E)\n\n\n\n","sub_path":"example/S_C_Chapter1.py","file_name":"S_C_Chapter1.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"199283922","text":"from os.path import dirname, join\nfrom unittest import TestCase\n\nfrom elex3.lib.parser import parse_and_clean\n\n\nclass TestParser(TestCase):\n\n def test_name_parsing(self):\n \"Parser should split full candidate name into first and last names\"\n path = join(dirname(__file__), 'sample_results.csv')\n results = parse_and_clean(path)\n race_key = 'President'\n cand_key = 'GOP-Smith, Joe'\n # Get one county result\n smith = results[race_key][cand_key][0]\n self.assertEqual(smith['first_name'], 'Joe')\n self.assertEqual(smith['last_name'], 'Smith')\n","sub_path":"elex3/tests/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"22066447","text":"__author__ = 'Sam Davies'\nfrom planning.strategies.strategy import Strategy\n\n\nclass FetchBall(Strategy):\n\n def __init__(self, world, robot_tag, actual_robot, config=None):\n super(FetchBall, self).__init__(world, robot_tag, actual_robot, config)\n self.m.add_state(\"Start\", self.start_trans)\n self.m.add_state(\"Grabber is Open\", self.grabber_is_open_trans)\n\n # End States / Actions\n self.m.add_final_state_and_action(\"Open Grabber\", self.other.raise_cage)\n self.m.add_final_state_and_action(\"Move to Ball\", self.move.move_robot_to_ball)\n self.m.add_final_state_and_action(\"Turn to Ball\", self.turn.turn_robot_to_ball)\n\n # set start state\n self.m.set_start(\"Start\")\n\n def act(self):\n self.fetch_world_state()\n\n action_state = self.m.run()\n return self.m.do_action(action_state)\n\n # ------------------------------------ Transitions ------------------------------------\n\n def start_trans(self):\n if self.world.do_refresh_kick:\n new_state = \"Open Grabber\"\n else:\n new_state = \"Grabber is Open\"\n return new_state\n\n def grabber_is_open_trans(self):\n if self.is_robot_facing_ball():\n new_state = \"Move to Ball\"\n else:\n new_state = \"Turn to Ball\"\n return new_state\n","sub_path":"planning/strategies/strategy_fetch_ball.py","file_name":"strategy_fetch_ball.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"514341839","text":"# Create a matplotlib chart with a green background.\n#\nimport pyvista\nchart = pyvista.ChartMPL()\nplots = chart.figure.axes[0].plot([0, 1, 2], [2, 1, 3])\nchart.background_color = (0.5, 0.9, 0.5)\nchart.show(interactive=False)\n#\n# Set the active background color to blue and activate the chart.\n#\nchart.active_background_color = 'b'\nchart.show(interactive=True)\n","sub_path":"version/0.39/api/plotting/charts/_autosummary/pyvista-ChartMPL-active_background_color-1.py","file_name":"pyvista-ChartMPL-active_background_color-1.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"252126681","text":"from os.path import join\n\n# ARMCC\nARM_PATH = \"C:/Work/toolchains/ARMCompiler_5.03_117_Windows\"\nARM_BIN = join(ARM_PATH, \"bin\")\nARM_INC = join(ARM_PATH, \"include\")\nARM_LIB = join(ARM_PATH, \"lib\")\n\nARM_CPPLIB = join(ARM_LIB, \"cpplib\")\nMY_ARM_CLIB = join(ARM_PATH, \"lib\", \"microlib\")\n\n# GCC ARM\nGCC_ARM_PATH = \"C:/Program Files (x86)/GNU Tools ARM Embedded/6.2 2016q4/bin\"\n\n# GCC CodeRed\nGCC_CR_PATH = \"C:/Work/toolchains/LPCXpresso_6.1.4_194/lpcxpresso/tools/bin\"\n\n# IAR\nIAR_PATH = \"C:/Work/toolchains/iar_6_5/arm\"\n\nSERVER_ADDRESS = \"127.0.0.1\"\nLOCALHOST = \"127.0.0.1\"\n\n# This is moved to separate JSON configuration file used by singletest.py\nMUTs = {\n}","sub_path":"mbed_settings.py","file_name":"mbed_settings.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"151317355","text":"\r\ndef kadane(a,n):\r\n l_max = a[0]\r\n k_max = a[0]\r\n\r\n for i in range(1,n):\r\n l_max = max(a[i],l_max+a[i])\r\n k_max = max(k_max,l_max)\r\n\r\n return k_max\r\n\r\ndef fn():\r\n n = int(input().strip())\r\n a = list(map(int,input().strip().split()))\r\n neg = 0\r\n\r\n k_max = kadane(a,n)\r\n t_sum = 0\r\n\r\n for i in range(n):\r\n if(a[i] < 0):\r\n neg += 1\r\n t_sum += a[i]\r\n a[i] = -a[i]\r\n\r\n if(neg == n):\r\n return k_max\r\n\r\n c_max = t_sum + kadane(a,n)\r\n\r\n return(max(c_max,k_max))\r\n \r\n \r\nfor _ in range(int(input().strip())):\r\n print(fn())\r\n","sub_path":"python/Max_Circular_Subarray_Sum_O(n)_solution.py","file_name":"Max_Circular_Subarray_Sum_O(n)_solution.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"373626336","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\nimport cv2\nimport numpy as np\nimport pkg_resources\nimport tensorflow as tf\nfrom tensorflow.python.saved_model import tag_constants\nimport os\n\nFACES_PATH = '../data/face_detection/faces/'\n\n\n# In[5]:\n\n\n# Load the TensorBoard notebook extension\n#get_ipython().run_line_magic('load_ext', 'tensorboard')\nimport datetime\n\n# Clear any logs from previous runs\n#get_ipython().system('rm -rf ./logs/ ')\n\n\n# In[6]:\n\n\nclass PNet(tf.keras.Model):\n def __init__(self):\n super(PNet, self).__init__(name=\"PNet\")\n # Define layers here.\n self.conv1 = tf.keras.layers.Conv2D(10, (3, 3), name=\"conv1\")\n self.prelu1 = tf.keras.layers.PReLU(tf.constant_initializer(0.25), shared_axes=[1, 2], name=\"prelu1\")\n self.pool1 = tf.keras.layers.MaxPooling2D((2, 2), name=\"pool1\")\n self.conv2 = tf.keras.layers.Conv2D(16, (3, 3), name=\"conv2\")\n self.prelu2 = tf.keras.layers.PReLU(tf.constant_initializer(0.25), shared_axes=[1, 2], name=\"prelu2\")\n self.conv3 = tf.keras.layers.Conv2D(32, (3, 3), name=\"conv3\")\n self.prelu3 = tf.keras.layers.PReLU(tf.constant_initializer(0.25), shared_axes=[1, 2], name=\"prelu3\")\n self.cls_output = tf.keras.layers.Conv2D(2, (1, 1), activation=\"softmax\", name=\"conv4-1\")\n self.bbox_pred = tf.keras.layers.Conv2D(4, (1, 1), name=\"conv4-2\")\n #self.landmark_pred = keras.layers.Conv2D(10, (1, 1), name=\"conv4_3\")\n\n def call(self, inputs):\n # Define your forward pass here,\n # using layers you previously defined (in `__init__`).\n scores = None\n\n x = self.conv1(inputs)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n scores = [self.cls_output(x), self.bbox_pred(x)]#, self.landmark_pred(x)]\n \n return scores\n\n\n# ## Dataset iterator\n\nclass Dataset(object):\n def __init__(self, X, y, batch_size, shuffle=False):\n \"\"\"\n Construct a Dataset object to iterate over data X and labels y\n \n Inputs:\n - X: Numpy array of data, of any shape\n - y: Numpy array of labels, of any shape but with y.shape[0] == X.shape[0]\n - batch_size: Integer giving number of elements per minibatch\n - shuffle: (optional) Boolean, whether to shuffle the data on each epoch\n \"\"\"\n assert X.shape[0] == y.shape[0], 'Got different numbers of data and labels'\n self.X, self.y = X, y\n self.batch_size, self.shuffle = batch_size, shuffle\n\n def __iter__(self):\n N, B = self.X.shape[0], self.batch_size\n idxs = np.arange(N)\n if self.shuffle:\n np.random.shuffle(idxs)\n return iter((self.X[i:i+B], self.y[i:i+B]) for i in range(0, N, B))\n\n\n\n# Set up some global variables\nUSE_GPU = False\n\nif USE_GPU:\n devicedevice = '/device:GPU:0'\nelse:\n device = '/cpu:0'\nprint('Using device: ', device)\n\n\n# Test the PNet to ensure that the implementation does not crash and produces outputs of the expected shape.\n# Pnet will output are:\n# 1. Face classification, size (batch,1,1,2) for 2 calss classification, \"Face\", and \"Not face\"\n# 2. Bounding box (batch,1,1,4) for 4 boundind box corrdinates (x,y,w,h)\n\ndef test_PNet(batch=64):\n model = PNet()\n with tf.device(device):\n x = tf.zeros((batch, 12, 12, 3))\n classification_scores, bbox_score = model(x)\n print(model.summary())\n print('\\nP-Net output size testing: \\nclassificatin score output', classification_scores.shape,\n '\\nbounding box score output', bbox_score.shape)\n\nbatch_test = 32\ntest_PNet(batch_test)\n\n\n\n# Read Dataset\n\ntraining_size = 5000\n\ndef read_pos_images():\n #Read positive images:\n path, __, filenames = next(os.walk(FACES_PATH+'pos_train/'))\n file_count = training_size #len(filenames)\n images = np.empty([0,12,3])\n for i in range(file_count):\n j=i+1\n img=cv2.imread(f\"{path}{j}.bmp\")\n images=np.append(images,img,axis=0)\n #Create list of probabilities:\n prob=[]\n for i in range(file_count):\n prob.append([[[0.0,1.0]]])\n #Create list of coordinates:\n coordinates=[]\n file = open(FACES_PATH+'coordinates.txt','r')\n lines = file.readlines()\n lines = [line[:-1] for line in lines]\n idx=[1,0,3,2]\n for line in lines:\n line = line.split(\" \")\n line = line[1]\n line=line[1:-1]\n line = line.split(\",\")\n #Transpose coordinates\n x=0\n nline=[]\n for i in idx:\n nline.append(line[i])\n x=x+1\n line=[[[float(c) for c in nline]]]\n coordinates.append(line)\n #Return images, probs, and coordinates\n return images, prob, coordinates\n\ndef read_neg_images():\n #Read negative images:\n path, __, filenames = next(os.walk(FACES_PATH+'neg_train/'))\n file_count = training_size #len(filenames)\n images = np.empty([0,12,3])\n for i in range(file_count):\n j=i+1\n img=cv2.imread(f\"{path}{j}.bmp\")\n images=np.append(images,img,axis=0)\n #Create list of probabilities:\n prob=[]\n for i in range(file_count):\n prob.append([[[1.0,0.0]]])\n #Create list of coordinates:\n coordinates=[]\n for i in range(file_count):\n coordinates.append([[[0.0,0.0,0.0,0.0]]])\n #Return images, prob, coordinates\n return images, prob, coordinates\n\n#Read in all images, probabilities, and coordinates\npimages, pprob, pcoordinates = read_pos_images()\nnimages, nprob, ncoordinates = read_neg_images()\no_images=np.append(pimages,nimages,axis=0)\no_images=np.reshape(o_images,(-1,12,12,3))\no_prob=pprob+nprob\no_coordinates=pcoordinates+ncoordinates\n\n#Shuffle them up using an index\nidx=np.arange(len(o_prob))\nnp.random.shuffle(idx)\nimages=np.empty_like(o_images)\nc=0\nfor i in idx:\n images[c]=o_images[i]\n c=c+1\n#images=(np.float32)(images-127.5)/128.0\nimages=(np.float32)(images)/255\n\n#images = np.transpose(images, (0, 2, 1, 3)) #Transpose images\nprob=[]\nfor i in idx:\n prob.append(o_prob[i])\ncoordinates=[]\nfor i in idx:\n coordinates.append(o_coordinates[i])\n\nprint('X_train , Image batch shape ', images.shape)\nprint('y_train , Classification ground true batch shape ' ,np.array(prob).shape)\nprint('y_train , Coordinates ground true batch shape ', np.array(coordinates).shape)\n\n\nX_data = images\ndel(images)\ny_data = np.concatenate((np.array(prob), np.array(coordinates)), axis=3)\n\n# ## Divide dataset to \"train', \"val\" and \"test\"\ndef load_data(X, y, training_prec = 0.7, val_prec = 0.1, test_prec = 0.2):\n data_length = len(X)\n num_training = np.int(data_length * training_prec)\n num_validation = np.int(data_length * val_prec)\n \n mask = range(num_training)\n X_train = X[mask]\n y_train = y[mask]\n mask = range(num_training, num_training + num_validation)\n X_val = X[mask]\n y_val = y[mask]\n mask = range(num_training + num_validation, data_length)\n X_test = X[mask]\n y_test = y[mask]\n \n return X_train, y_train, X_val, y_val, X_test, y_test\n\n\nX_train, y_train, X_val, y_val, X_test, y_test = load_data(X_data, y_data)\nprint('Train data shape: ', X_train.shape)\nprint('Train labels shape: ', y_train.shape, y_train.dtype)\nprint('Validation data shape: ', X_val.shape)\nprint('Validation labels shape: ', y_val.shape)\nprint('Test data shape: ', X_test.shape)\nprint('Test labels shape: ', y_test.shape)\n\n\ntrain_dset = Dataset(X_train, y_train, batch_size=64, shuffle=True)\nval_dset = Dataset(X_val, y_val, batch_size=64, shuffle=False)\ntest_dset = Dataset(X_test, y_test, batch_size=64)\n\n\n\ndef train(model_init_fn, optimizer_init_fn, num_epochs=1, is_training=False):\n \"\"\"\n Simple training loop for use with models defined using tf.keras. It trains\n a model for one epoch on training set and periodically checks\n accuracy on the validation set.\n \n Inputs:\n - model_init_fn: A function that takes no parameters; when called it\n constructs the model we want to train: model = model_init_fn()\n - optimizer_init_fn: A function which takes no parameters; when called it\n constructs the Optimizer object we will use to optimize the model:\n optimizer = optimizer_init_fn()\n - num_epochs: The number of epochs to train for\n \n Returns: Nothing, but prints progress during trainingn\n \"\"\" \n with tf.device(device):\n \n #Set up summary writers to write the summaries to disk in a different logs directory:\n current_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n train_log_dir = 'logs/gradient_tape/' + current_time + '/train'\n test_log_dir = 'logs/gradient_tape/' + current_time + '/test'\n train_summary_writer = tf.summary.create_file_writer(train_log_dir)\n test_summary_writer = tf.summary.create_file_writer(test_log_dir)\n \n #compute the loss function over the classification and ovr bounding box \n classification_loss = tf.keras.losses.BinaryCrossentropy()\n bbox_loss = tf.keras.losses.MeanSquaredError() \n \n model = model_init_fn()\n optimizer = optimizer_init_fn()\n \n train_loss = tf.keras.metrics.BinaryCrossentropy(name='train_classification_loss')\n train_bbox_loss = tf.keras.metrics.MeanSquaredError(name='train_bbox_loss')\n \n train_accuracy = tf.keras.metrics.BinaryAccuracy(name='train_accuracy')\n \n #val_loss = tf.keras.metrics.Mean(name='val_loss')\n val_loss = tf.keras.metrics.BinaryCrossentropy(name='val_classification_loss')\n val_bbox_loss = tf.keras.metrics.MeanSquaredError(name='val_bbox_loss')\n\n val_accuracy = tf.keras.metrics.BinaryAccuracy(name='val_accuracy')\n \n t = 0\n for epoch in range(num_epochs):\n \n # Reset the metrics - https://www.tensorflow.org/alpha/guide/migration_guide#new-style_metrics\n train_loss.reset_states()\n train_bbox_loss.reset_states()\n \n train_accuracy.reset_states()\n \n for x_np, y_np in train_dset:\n with tf.GradientTape() as tape:\n \n # Use the model function to build the forward pass.\n classification_scores, bbox_scores = model(x_np, training=True)\n prediction_loss = classification_loss(y_np[:,:,:,:2], classification_scores)\n coordinate_loss = bbox_loss(y_np[:,:,:,2:], bbox_scores)\n loss = prediction_loss + 0.5 * coordinate_loss * y_np[:,:,:,1]\n # Print loss \n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n \n # Update the metrics\n train_loss.update_state(y_np[:,:,:,:2], classification_scores)\n train_bbox_loss.update_state(y_np[:,:,:,2:], bbox_scores*y_np[:,:,:,1] )\n train_accuracy.update_state(y_np[:,:,:,:2], classification_scores)\n \n with train_summary_writer.as_default():\n tf.summary.scalar('loss', train_loss.result(), step=epoch)\n tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)\n\n\n if t % print_every == 0:\n val_loss.reset_states()\n val_bbox_loss.reset_states()\n val_accuracy.reset_states()\n for test_x, test_y in val_dset:\n # During validation at end of epoch, training set to False\n classification_scores, bbox_scores = model(test_x, training=False)\n t_prediction_loss = classification_loss(test_y[:,:,:,:2], classification_scores)\n t_coordinate_loss = bbox_loss(test_y[:,:,:,2:], bbox_scores)\n t_loss = t_prediction_loss + 0.5 * t_coordinate_loss * test_y[:,:,:,1]\n\n val_loss.update_state(test_y[:,:,:,:2], classification_scores)\n val_bbox_loss.update_state(test_y[:,:,:,2:], bbox_scores*test_y[:,:,:,1])\n val_accuracy.update_state(test_y[:,:,:,:2], classification_scores)\n \n with test_summary_writer.as_default():\n tf.summary.scalar('loss', val_loss.result(), step=epoch)\n tf.summary.scalar('accuracy', val_accuracy.result(), step=epoch)\n\n \n template = 'Iteration {}, Epoch {}, \\nLoss: {}, Bbox loss: {}, Accuracy: {},\\nVal Loss: {}, Val Bbox Loss: {}, Val Accuracy: {}'\n print (template.format(t, epoch+1,\n train_loss.result(),\n train_bbox_loss.result(),\n train_accuracy.result()*100,\n val_loss.result(),\n val_bbox_loss.result(), \n val_accuracy.result()*100))\n t += 1\n return model\n\n\nprint_every = 10\nnum_epochs = 150\n\ndef model_init_fn():\n return PNet()\n\ndef optimizer_init_fn():\n learning_rate = 1e-3\n return tf.keras.optimizers.Adam(learning_rate) \n #return tf.keras.optimizers.SGD(lr=learning_rate, momentum=0.9, nesterov=True)\n \nmodel = train(model_init_fn, optimizer_init_fn, num_epochs=num_epochs, is_training=True)\n\n# Test data\npredictions = model.predict(X_test)\n\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nscore = predictions[0]\nbbox = predictions[1]\n\nscore = np.squeeze(score)\nbbox = np.squeeze(score)\n\ny_test_score = np.squeeze(y_test[:,:,:,:2])\ny_test_bbox = np.squeeze(y_test[:,:,:,2:])\n\nfrom sklearn.metrics import confusion_matrix, classification_report\n\n\nprint(classification_report(y_test_score, np.round(score)))\nprint(confusion_matrix(y_test_score[:,1:2], np.round(score[:,1:2])))\n\n\n\n\n\n","sub_path":"PNet_Training.py","file_name":"PNet_Training.py","file_ext":"py","file_size_in_byte":14298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"543469158","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n size = len(s)\n if size == 0:\n return 0\n if size == 1:\n return 1\n\n prev = \"\"\n cur = \"\"\n\n for i in range(size):\n if s[i] not in cur:\n cur += s[i]\n else:\n if len(prev) < len(cur):\n prev = cur\n \"\"\"\n get first occurance of repeating char in cur string by finding\n its index and then splicing string from next index till end of\n string to get new cur string and append repeating char and proceed.\n \"\"\"\n index = cur.find(s[i])\n cur = cur[(index+1):len(cur)]\n cur += s[i]\n prev = cur if len(cur) > len(prev) else prev\n return len(prev)\n","sub_path":"lengthOfLongestSubstring.py","file_name":"lengthOfLongestSubstring.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"386042587","text":"import cv2\nimport numpy as np\n\nfrom psd_tools import PSDImage\n\n\n# psd to png\npsd1 = PSDImage.open('200×800.ai.psd')\npsd1.composite().save('psd_image_to_detect1.png')\n\npsd2 = PSDImage.open('800×200.ai.psd')\npsd2.composite().save(\"psd_image_to_detect2.png\")\n\n# 以灰度形式读入图片\n\npsd_img_1 = cv2.imread('psd_image_to_detect1.png', cv2.IMREAD_GRAYSCALE)\npsd_img_2 = cv2.imread('psd_image_to_detect2.png', cv2.IMREAD_GRAYSCALE)\n\n# SIFT特征计算\n\nsift = cv2.xfeatures2d.SIFT_create()\n\npsd_kp1, psd_des1 = sift.detectAndCompute(psd_img_1, None)\npsd_kp2, psd_des2 = sift.detectAndCompute(psd_img_2, None)\n\n# Flann 特征匹配\n\nFLANN_INDEX_KDTREE = 1\nindex_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\nsearch_params = dict(checks=50)\n\nflann = cv2.FlannBasedMatcher(index_params, search_params)\nmatches = flann.knnMatch(psd_des1, psd_des2, k=2)\ngoodMatch = []\n\nfor m, n in matches:\n # goodMatch是经过筛选的优质配对,如果2个配对中第一匹配的距离小于第二匹配的距离的1/2,基本可以说明这个第一配对是两幅图像中独特的,不重复的特征点,可以保留。\n if m.distance < 0.50*n.distance:\n goodMatch.append(m)\n# 增加一个维度\ngoodMatch = np.expand_dims(goodMatch, 1)\nprint(goodMatch[:20])\n\nimg_out = cv2.drawMatchesKnn(\n psd_img_1, psd_kp1, psd_img_2, psd_kp2, goodMatch[:15], None, flags=2)\n\ncv2.imshow('image', img_out) # 展示图片\ncv2.waitKey(0) # 等待按键按下\ncv2.destroyAllWindows() # 清除所有窗口\n\n","sub_path":"SIFT-match.py","file_name":"SIFT-match.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"46106674","text":"#!/usr/bin/env python\n\"\"\" Access Control for Flask Wrapper. \"\"\"\n\nimport urllib\nfrom copy import deepcopy\nfrom datetime import datetime\nimport flask\nfrom flask import abort, current_app, request, session\nfrom pdm.utils.X509 import X509Utils\n\n\ndef set_session_state(logged_in=False):\n \"\"\" A helper function for changing the flask session state.\n Must be called from a Flask request context.\n Returns None.\n \"\"\"\n session['logged_in'] = logged_in\n\n\nclass ACLManager(object):\n \"\"\" Access Control List manager for Flask Wrapper.\n Keeps a list of users who are allowed to access resources\n and allows or rejects request based on the presented credentials.\n \"\"\"\n\n AUTH_MODE_NONE = 0\n AUTH_MODE_X509 = 1\n AUTH_MODE_TOKEN = 2\n AUTH_MODE_SESSION = 3\n AUTH_MODE_ALLOW_ALL = 4\n\n def __init__(self, logger):\n \"\"\" Create an empty instance of ACLManager (no predfined groups or\n rules. Test mode is disabled by default.\n \"\"\"\n self.__log = logger\n self.__test_mode = ACLManager.AUTH_MODE_NONE\n self.__test_data = None\n self.__groups = {}\n self.__rules = {}\n\n def __check_entry(self, entry, allow_group):\n \"\"\" Checks an entry is in a valid format and expands groups.\n If groups are allowed, then inputting a group entry will\n result in the expanded group entries on the output.\n entry - The entry string to check.\n allow_group - Boolean, on whether to allow group names.\n Raises a ValueError if it isn't valid.\n Returns a list of entries.\n Each returned entry is a tuple of (auth_mode, auth_data).\n \"\"\"\n if entry == \"TOKEN\":\n return [(ACLManager.AUTH_MODE_TOKEN, None)]\n elif entry == \"CERT\":\n return [(ACLManager.AUTH_MODE_X509, None)]\n elif entry == \"SESSION\":\n return [(ACLManager.AUTH_MODE_SESSION, None)]\n elif entry == \"ALL\":\n return [(ACLManager.AUTH_MODE_ALLOW_ALL, None)]\n elif entry.startswith(\"CERT:\"):\n raw_dn = entry.split(':', 1)[1]\n if not \"=\" in raw_dn:\n raise ValueError(\"Bad CERT DN in ACL rule: '%s'\" % entry)\n return [(ACLManager.AUTH_MODE_X509,\n X509Utils.normalise_dn(raw_dn))]\n elif allow_group and entry.startswith(\"@\"):\n group_name = entry[1:]\n if not group_name in self.__groups:\n raise ValueError(\"Unrecognised group used in ACL rule: %s\" % \\\n group_name)\n return deepcopy(self.__groups[group_name])\n raise ValueError(\"Invalid auth entry '%s'.\" % entry)\n\n def add_group_entry(self, group_name, entry):\n \"\"\" Adds an entry to a group, if the group doesn't exist it will be\n created, otherwise it will be appeneded. Note that groups can't\n currently be nested.\n \"\"\"\n entries = self.__check_entry(entry, False)\n if not group_name in self.__groups:\n self.__groups[group_name] = entries\n else:\n self.__groups[group_name].extend(entries)\n\n def add_rule(self, res_path, entry):\n \"\"\" Adds a rule for a specific resource path. The entry can either\n be an existing group or a normal entry value.\n \"\"\"\n if not '%' in res_path:\n res_path = \"%s%%GET\" % res_path\n if res_path in self.__rules:\n raise ValueError(\"Duplicate auth rule for path '%s'.\" % res_path)\n self.__rules[res_path] = self.__check_entry(entry, True)\n\n def test_mode(self, auth_mode, auth_data=None):\n \"\"\" Enabled test mode, where the authentication info is pre-set for\n all requests. This should not be used in production.\n \"\"\"\n self.__test_mode = auth_mode\n self.__test_data = auth_data\n\n @staticmethod\n def __get_real_request_auth():\n \"\"\" Fills the details of the presented credentials into the\n request object.\n \"\"\"\n # Cert auth\n if 'Ssl-Client-Verify' in request.headers \\\n and 'Ssl-Client-S-Dn' in request.headers:\n # Request has client cert\n if request.headers['Ssl-Client-Verify'] == 'SUCCESS':\n raw_dn = request.headers['Ssl-Client-S-Dn']\n request.dn = X509Utils.normalise_dn(raw_dn)\n # Token Auth\n if 'X-Token' in request.headers:\n raw_token = request.headers['X-Token']\n try:\n token_value = current_app.token_svc.check(raw_token)\n # Check if this looks like a standard token with an expiry value\n if isinstance(token_value, dict):\n if 'expiry' in token_value:\n exp_str = token_value['expiry']\n exp_value = datetime.strptime(exp_str, '%Y-%m-%dT%H:%M:%S.%f')\n if exp_value < datetime.utcnow():\n # Token has already expired\n current_app.log.info(\"Request %s token has expired (at %s)\",\n request.uuid, exp_str)\n return \"403 Expired Token\", 403\n request.token = token_value\n request.raw_token = raw_token\n request.token_ok = True\n except ValueError:\n # Token decoding failed, it is probably corrupt or has been\n # tampered with.\n current_app.log.info(\"Request %s token validation failed.\",\n request.uuid)\n return \"403 Invalid Token\", 403\n if 'logged_in' in session:\n if session['logged_in']:\n request.session_ok = True\n\n def __get_fake_request_auth(self):\n \"\"\" Fills the request object with te test (fake) authentication\n details.\n \"\"\"\n if self.__test_mode == ACLManager.AUTH_MODE_X509:\n request.dn = X509Utils.normalise_dn(self.__test_data)\n elif self.__test_mode == ACLManager.AUTH_MODE_TOKEN:\n request.token = self.__test_data\n request.raw_token = self.__test_data\n request.token_ok = True\n elif self.__test_mode == ACLManager.AUTH_MODE_SESSION:\n request.session_ok = True\n\n @staticmethod\n def __matches_rules(rules):\n \"\"\" Checks the current request against a list of expanded rules.\n If the request matches any of the rules, True is returned.\n False is returned if no rules match the current request creds.\n \"\"\"\n for rule in rules:\n rule_mode, rule_data = rule\n if rule_mode == ACLManager.AUTH_MODE_TOKEN:\n if request.token_ok:\n return True\n elif rule_mode == ACLManager.AUTH_MODE_X509:\n if rule_data is not None:\n if rule_data == request.dn:\n return True\n else:\n if request.dn:\n return True\n elif rule_mode == ACLManager.AUTH_MODE_SESSION:\n if request.session_ok:\n return True\n elif rule_mode == ACLManager.AUTH_MODE_ALLOW_ALL:\n return True\n return False\n\n @staticmethod\n def __match_path(req_detail, rule_detail):\n \"\"\" Checks whether a request path matches a rule path.\n The rule path can contiain wildcards * or ?.\n Although the * wildcard can only be in the last position.\n Returns True if the rule_detail pattern matches req_detail.\n \"\"\"\n req_path, req_method = req_detail.split('%')\n rule_path, rule_method = rule_detail.split('%')\n if req_method != rule_method:\n return False # Wrong method\n req_parts = req_path.split('/')\n rule_parts = rule_path.split('/')\n # If the request path is shorter than the rule, then it\n # can't possibly match\n if len(req_parts) < len(rule_parts):\n return False\n # If the rule ends in a wildcard, ignore all bits of the\n # request path that match the wildcard\n if rule_parts[-1] == '*':\n rule_parts = rule_parts[0:-1]\n req_parts = req_parts[0:len(rule_parts)]\n # If the request is longer than the rule (considering *),\n # the request can't match\n if len(req_parts) > len(rule_parts):\n return False\n # Now check each segment of the path to match either\n # directly or by wildcard\n for part_num in xrange(0, len(rule_parts)):\n req_part = req_parts[part_num]\n rule_part = rule_parts[part_num]\n if rule_part == '?':\n continue\n if req_part != rule_part:\n return False\n # Everything matched, so the rule matches the request\n return True\n\n @staticmethod\n def __do_abort():\n \"\"\" Aborts the current request due to access denied.\n If the export provided a redir value for access denied,\n the client will be redirected, otherwise a 403 will be\n returned.\n \"\"\"\n # Check whether this endpoint has a special redirect\n if request.endpoint:\n if request.endpoint in current_app.view_functions:\n ep_func = current_app.view_functions[request.endpoint]\n if ep_func:\n redir_url = getattr(ep_func, 'export_redir', None)\n if redir_url:\n orig_path = urllib.quote(request.path, safe='')\n real_redir = redir_url % {'return_to': orig_path}\n abort(flask.redirect(real_redir))\n abort(403)\n\n def __check_acl(self):\n \"\"\" Checks the request object authentication details against the ACL\n list for the requested resource.\n Raises a Flask 403 abort if access should be denied.\n \"\"\"\n # Work out the request URI\n real_path = request.path\n # Strip a trailing slash, as long as it isn't the only char\n if real_path.endswith('/') and len(real_path) > 1:\n real_path = real_path[:-1]\n real_path = \"%s%%%s\" % (real_path, request.method)\n # Now check the auth rules for this path\n if real_path in self.__rules:\n if self.__matches_rules(self.__rules[real_path]):\n # The request matches => Access allowed\n return\n self.__log.info(\"Request %s denied (Failed to match specific rule).\",\n request.uuid)\n self.__do_abort()\n # No specific rule for this path, try generic rules\n did_match = False\n for rule_path in self.__rules.iterkeys():\n if self.__match_path(real_path, rule_path):\n did_match = True\n if self.__matches_rules(self.__rules[rule_path]):\n # Access allowed via a generic rule\n return\n reason = \"no matching auth rule\"\n if did_match:\n reason = \"all wildcard rules denied access\"\n # No rule matches => request denied\n self.__log.info(\"Request %s denied (%s).\",\n request.uuid, reason)\n self.__do_abort()\n\n def check_request(self):\n \"\"\" Gets the current flask request object and checks it against the\n configured rule set.\n \"\"\"\n # We use '%' to seperate out the method from the rest of the request\n # We simply don't support requests that contain a % in the URI from\n # the client.\n if '%' in request.path:\n abort(404)\n request.dn = None\n request.token = None\n request.raw_token = None\n request.token_ok = False\n request.session_ok = False\n if self.__test_mode == ACLManager.AUTH_MODE_NONE:\n self.__get_real_request_auth()\n self.__check_acl()\n else:\n self.__get_fake_request_auth()\n # ACLs aren't actually checked in test mode\n","sub_path":"src/pdm/framework/ACLManager.py","file_name":"ACLManager.py","file_ext":"py","file_size_in_byte":12203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"248065830","text":"import sys\r\nimport numpy as np\r\nimport random\r\nfrom scipy.special import softmax\r\nfrom numpy import inf\r\n\r\n\r\ndef create_x_data_set(train_path):\r\n '''\r\n get path to file and create data set from it.\r\n :param train_path: path to file.\r\n :return: data set.\r\n '''\r\n file = open(train_path)\r\n content = [line.rstrip('\\n') for line in file]\r\n dataset = [line.split(\" \") for line in content]\r\n dataset = [list(map(int, vector)) for vector in dataset]\r\n file.close()\r\n return dataset\r\n\r\n\r\ndef create_y_data_set(train_path):\r\n '''\r\n create y data set of floats from path.\r\n :param train_path: path\r\n :return: data set y of floats.\r\n '''\r\n file = open(train_path)\r\n content = [int(value) for value in file]\r\n file.close()\r\n return content\r\n\r\n\r\ndef shuffle_data(x, y):\r\n '''\r\n shuffle the data x and y accordingly.\r\n :param x: train set.\r\n :param y: cluster classification.\r\n :return: x and y shuffled.\r\n '''\r\n zip_x_y = list(zip(x, y))\r\n random.shuffle(zip_x_y)\r\n new_x, new_y = zip(*zip_x_y)\r\n return new_x, new_y\r\n\r\n\r\ndef init_weights(rows, cols=1):\r\n # if cols == 1:\r\n # return np.random.rand(rows)\r\n # else:\r\n return np.random.randn(rows, cols)\r\n # return np.random.uniform(-0.08, 0.08, rows * cols).reshape(rows, cols)\r\n\r\n\r\ndef load_files():\r\n # train constants\r\n train_x_path = sys.argv[1]\r\n train_y_path = sys.argv[2]\r\n test_x_path = sys.argv[3]\r\n train_X = np.loadtxt(train_x_path)\r\n train_Y = np.loadtxt(train_y_path)\r\n test_x = np.loadtxt(test_x_path)\r\n return train_X, train_Y, test_x\r\n\r\n\r\ndef train(train_x, train_y, lr, epochs, weights):\r\n '''\r\n sdfsdf.\r\n :param train_x:\r\n :param train_y:\r\n :param lr:\r\n :param epochs:\r\n :param weights:\r\n '''\r\n # final_acc = 0.0\r\n # iteration = 0\r\n for i in range(epochs):\r\n sum_loss = 0.0\r\n train_x, train_y = shuffle_data(train_x, train_y)\r\n for x, y in zip(train_x, train_y):\r\n fprop_cache = forward_prop(weights, x)\r\n loss = get_negative_log_loss(fprop_cache[\"y_hat\"], y)\r\n sum_loss += loss\r\n gradients = back_prop(fprop_cache, y)\r\n weights = update_weights(weights, gradients, lr)\r\n # acc, validation_loss = validation_check(weights, validation_x, validation_y)\r\n # if final_acc < acc:\r\n # final_acc = acc\r\n # iteration = i\r\n # return final_acc, iteration\r\n # print(i, sum_loss / np.size(train_y), validation_loss, \"{}%\".format(acc * 100))\r\n return weights\r\n\r\n\r\ndef validation_check(weights, validation_x, validation_y):\r\n '''\r\n test the model each epoch on the validations set.\r\n :param weights: weights matrices.\r\n :param validation_x: validation data x.\r\n :param validation_y: classification data y.\r\n :return: correctness percent and average loss.\r\n '''\r\n sigma_loss = 0.0\r\n correct = 0.0\r\n num_of_examples = np.size(validation_x, 0)\r\n for x, y in zip(validation_x, validation_y):\r\n fprop_cache = forward_prop(weights, x)\r\n y_hat = fprop_cache[\"y_hat\"]\r\n loss = get_negative_log_loss(y_hat, y)\r\n sigma_loss += loss\r\n if y_hat.argmax() == y:\r\n correct += 1\r\n accuracy = correct / num_of_examples\r\n avg_loss = sigma_loss / num_of_examples\r\n return accuracy, avg_loss\r\n\r\n\r\ndef predict(weights, test_x):\r\n '''\r\n predict the classifications of the data set x and output it to file.\r\n :param weights: weights matrices.\r\n :param test_x: validation data x.\r\n :return: nothing.\r\n '''\r\n file = open(\"test_y\", \"w\")\r\n for x in test_x:\r\n fprop_cache = forward_prop(weights, x)\r\n y_hat = fprop_cache[\"y_hat\"]\r\n classification = y_hat.argmax()\r\n file.write(str(classification) + \"\\n\")\r\n file.close()\r\n\r\n\r\ndef relu(x):\r\n '''\r\n relu function get max between(0,x)\r\n :param x: sumple data.\r\n :return: max.\r\n '''\r\n return max(0, x)\r\n\r\n\r\ndef get_z2(weights, x):\r\n '''\r\n function compute part of the neural net.\r\n :param weights: list of weights: w1,w2 - matrices, b1,b2 - vectors.\r\n :param x: one data sampling.\r\n :return: w2*h+b2.\r\n '''\r\n w2 = weights[1]\r\n b2 = weights[3]\r\n h = get_h(weights, x)\r\n w2h = np.dot(w2, h)\r\n z2 = w2h + b2\r\n return z2\r\n\r\n\r\ndef get_h(weights, x):\r\n '''\r\n activate the activation function {ReLU,Sigmoid,TanH} to breake linearity.\r\n :return: result of g on z_1.\r\n :param weights: list of weights: w1,w2 - matrices, b1,b2 - vectors.\r\n :param x: one data sampling.\r\n '''\r\n g = np.vectorize(relu)\r\n z1 = get_z1(weights, x)\r\n h = g(z1)\r\n return h\r\n\r\n\r\ndef get_z1(weights, x):\r\n '''\r\n function compute part of the neural net.\r\n :param weights: list of weights: w1,w2 - matrices, b1,b2 - vectors.\r\n :param x: one data sampling.\r\n :return: w2*h+b2.\r\n '''\r\n w1 = weights[0]\r\n b1 = weights[2]\r\n z1 = np.dot(w1, x)\r\n z1 = np.add(z1, b1)\r\n return z1\r\n\r\n\r\ndef softMax(x):\r\n \"\"\"Compute softmax values for each sets of scores in x.\r\n :param x: vector of features\r\n \"\"\"\r\n e_x = np.exp(x - np.max(x))\r\n return e_x / e_x.sum(axis=0)\r\n\r\n\r\ndef forward_prop(weights, x):\r\n '''\r\n calculate the neural net functions.\r\n :param weights: list of weights: w1,w2 - matrices, b1,b2 - vectors.\r\n :param x: one data sampling.\r\n :return: y_hat = soft_max(z_2).\r\n '''\r\n w1, w2, b1, b2 = [weights[key] for key in ('w1', 'w2', 'b1', 'b2')]\r\n new_x = np.reshape(x, (-1, 1))\r\n z1 = np.dot(w1, new_x) + b1\r\n g = np.vectorize(relu)\r\n h = g(z1)\r\n maxi = h.max() or 1\r\n h = h / maxi\r\n z2 = np.dot(w2, h) + b2\r\n y_hat = softmax(z2)\r\n ret = {'x': new_x, 'z1': z1, 'h': h, 'z2': z2, 'y_hat': y_hat}\r\n for key in weights.keys():\r\n ret[key] = weights[key]\r\n return ret\r\n\r\n\r\ndef update_weights(weights, gradients, eta):\r\n '''\r\n the function update all the weights by the gradients and the eta\r\n :param gradients: gradients of the weights w1, w2, b1 b2.\r\n :param weights: matrix weights of the neural network.\r\n :param eta: learning rate of the neural network.\r\n :return: weights after updating.\r\n '''\r\n w1, w2, b1, b2 = [weights[key] for key in ('w1', 'w2', 'b1', 'b2')]\r\n dw1, dw2, db1, db2 = [eta * gradients[key] for key in ('dw1', 'dw2', 'db1', 'db2')]\r\n w1, w2, b1, b2 = w1 - dw1, w2 - dw2, b1 - db1, b2 - db2\r\n return {'w1': w1, 'w2': w2, 'b1': b1, 'b2': b2}\r\n\r\n\r\ndef relu_derivative(x):\r\n '''\r\n calculate relu derivative.\r\n :param x: paramter.\r\n :return: relu derivative 1 or 0.\r\n '''\r\n if x > 0:\r\n return 1\r\n return 0\r\n\r\n\r\ndef back_prop(fprop_cache, y):\r\n x, z1, h, z2, w2, y_hat = [fprop_cache[key] for key in ('x', 'z1', 'h', 'z2', 'w2', 'y_hat')]\r\n '''\r\n calculate w2:\r\n '''\r\n y_vec = np.zeros((y_hat.size, 1))\r\n y_vec[int(y)] = 1\r\n dl_dz2 = np.subtract(y_hat, y_vec) # dL/dy_hat *_dy_hat/dz2\r\n dz2_dw2 = h.T # dz2/dw2\r\n dw2 = np.dot(dl_dz2, dz2_dw2) # dLw2= dL/dy_hat * dy_hat/dz2 * dz2/dw2\r\n\r\n '''\r\n calculate w1:\r\n '''\r\n dz2_dh = w2 # dz2/dh\r\n dl_dh = np.dot(dl_dz2.T, dz2_dh) # dl/dh\r\n g = np.vectorize(relu_derivative)\r\n dh_dz1 = g(z1) # dh1_dz1\r\n dz1_dw1 = x # dz1/dw1\r\n # dw1 = np.dot(dl_dh.T, np.dot(dh_dz1.T, dz1_dw1)) # dLw1 = dL/dy_hat *_dy_hat/dz2 * dz2/dh * dh1/dz1 * dz1/dw1\r\n # dw1 = np.dot(dl_dh.T, np.dot(dh_dz1.T, dz1_dw1)) # dLw1 = dL/dy_hat *_dy_hat/dz2 * dz2/dh * dh1/dz1 * dz1/dw1\r\n dw1 = np.dot((dl_dh.T * dh_dz1), dz1_dw1.T)\r\n '''\r\n calculate b2:\r\n '''\r\n db2 = dl_dz2 # dz2/db2 = 1\r\n\r\n '''\r\n clculate b1:\r\n '''\r\n db1 = np.dot(dl_dh, dh_dz1) # dz1/db1 = 1\r\n return {'dw1': dw1, 'db1': db1, 'dw2': dw2, 'db2': db2}\r\n\r\n\r\ndef get_negative_log_loss(y_hat, y):\r\n '''\r\n calculate loss by negative log loss,with clusters vector: y_hat and the true classification y.\r\n :param y_hat: vector of percents clusters.\r\n :param y: real classification.\r\n :return: loss value.\r\n '''\r\n y_vec = np.zeros(y_hat.size)\r\n y_vec[int(y)] = 1\r\n # need to calculate sum of(y_i*log(y_hat_i))\r\n # first do log on y_hat:\r\n # TODO: check what to do with log on 0 (equal -inf). possible solution:\r\n # https://stackoverflow.com/questions/49602205/python-numpy-negative-log-likelihood-calculation-when-some-predicted-probabiliti\r\n y_hat = np.log2(y_hat)\r\n # change all -inf to zeros.\r\n y_hat[y_hat == -inf] = 0\r\n # return scalar of - y*log(y_hat)\r\n return -np.dot(y_vec, y_hat)\r\n\r\n\r\ndef normalize_data(x):\r\n '''\r\n the values is between 0-255, normalize the values to be [0,1]\r\n :param x: values to normalize.\r\n :return: x after normalize.\r\n '''\r\n x = np.divide(x, 255)\r\n return x\r\n\r\n\r\ndef check_hyper_parameters(lr, epochs, hidden_size, train_X, train_Y, clusters_num, validation_X, validation_Y):\r\n w1, w2 = init_weights(hidden_size, np.ma.size(train_X, 1)), init_weights(clusters_num, hidden_size)\r\n b1, b2 = init_weights(hidden_size), init_weights(clusters_num)\r\n weights = {'w1': w1, 'w2': w2, 'b1': b1, 'b2': b2}\r\n return train(train_X, train_Y, lr, epochs, weights, validation_X, validation_Y)\r\n\r\n\r\ndef main():\r\n '''\r\n Hyper parameters:\r\n '''\r\n\r\n # number of learning iterations:\r\n epochs = 25\r\n # size of hidden layer:\r\n hidden_size = 90\r\n # number of clusters:\r\n clusters_num = 10\r\n # part size of validation from test:\r\n validation_percent = 0.2\r\n # learning rate: 0.1 or 0.01 or 0.001.\r\n lr = 0.1\r\n\r\n train_X, train_Y, test_X = load_files()\r\n train_X = normalize_data(train_X)\r\n # size of division to train and validation.\r\n # cross_validation_size = int(len(train_X) * validation_percent)\r\n\r\n # validation_X, validation_Y = train_X[cross_validation_size:], train_Y[cross_validation_size:]\r\n # train_X, train_Y = train_X[:cross_validation_size], train_Y[:cross_validation_size]\r\n\r\n '''\r\n init all the parameters, weight matrixes and vectors between layers with hidden layer size h.\r\n '''\r\n w1, w2 = init_weights(hidden_size, np.ma.size(train_X, 1)), init_weights(clusters_num, hidden_size)\r\n b1, b2 = init_weights(hidden_size), init_weights(clusters_num)\r\n weights = {'w1': w1, 'w2': w2, 'b1': b1, 'b2': b2}\r\n weights = train(train_X, train_Y, lr, epochs, weights)\r\n predict(weights, test_X)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":10517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"513005161","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom scipy import interpolate\nimport pandas as pd\n\ndef get_readouts(df):\n \n d = {}\n time = df.time\n cells = df.value\n \n d['peak'] = get_peak(time, cells)\n d['tau'] = get_peaktime(time, cells)\n d['area'] = get_area(time, cells)\n d['decay'] = get_decay(time, cells)\n series = pd.Series(d, index=['peak', 'tau', 'area', 'decay'])\n \n return series\n\ndef get_peaktime(time, cells):\n\n cells = cells.array\n\n # only look at array where there are no nans \n cellmax = np.amax(cells)\n cellmin = cells[-1]\n # check if cells are maximum in size at the end of simulation\n crit1 = np.abs(cellmax-cellmin) > 1e-3\n crit2 = cellmax > cellmin\n crit3 = np.std(cells) > 0.001\n # test if cells are not constant\n if crit1 and crit2 and crit3:\n\n peak_idx = np.argmax(cells)\n # get max value\n peak = cells[peak_idx]\n peak_half = peak / 2.\n #print(peak)\n cells = cells[:(peak_idx+1)]\n time = time[:(peak_idx+1)]\n # assert that peak is not at beginning\n if peak_idx <= 3:\n tau = np.nan\n # assert that peak half is in new cell array\n elif np.all(peak_half 1e-3\n crit2 = cellmax > cellmin\n crit3 = np.std(cells) > 0.001\n \n # test that there is a global max before end of array\n if crit1 and crit2 and crit3:\n peak_id = np.argmax(cells)\n cells = cells[peak_id:]\n time = time[peak_id:]\n \n # make sure there are at least two values in the array\n assert len(cells) > 1\n \n # interpolate to get time unter half of diff between max and arr end is reached\n celldiff = (cellmax - cellmin) /2\n celldiff = cellmax - celldiff\n f = interpolate.interp1d(cells, time)\n #print(cellmax, cellmin, celldiff)\n tau = f(celldiff)\n else:\n tau = np.nan\n \n return tau\n\ndef get_area(time, cells):\n \n cells = cells.array \n cellmax = np.amax(cells)\n cellmin = cells[-1]\n # check if cells are maximum in size at the end of simulation\n crit1 = np.abs(cellmax-cellmin) > 1e-3\n crit2 = cellmax > cellmin\n crit3 = np.std(cells) > 0.001\n \n if crit1 and crit2 and crit3:\n area = np.trapz(cells, time)\n else: \n area = np.nan\n \n return area\n\ndef get_peak(time, cells):\n \n cells = cells.array \n cellmax = np.amax(cells)\n cellmin = cells[-1]\n # check if cells are maximum in size at the end of simulation\n crit1 = np.abs(cellmax-cellmin) > 1e-3\n crit2 = cellmax > cellmin\n crit3 = np.std(cells) > 0.001\n \n if crit1 and crit2 and crit3:\n peak = cellmax\n else: \n peak = np.nan\n \n return peak","sub_path":"code/module_readouts.py","file_name":"module_readouts.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"376841888","text":"#!/usr/bin/env python3\n\nimport subprocess\nimport time\nimport shutil\n\n#--\ntarget_folder = \"/home/sarthak11/Desktop/TPF\"\nexcluded = [\"HP_TOOLS\"]\n#--\n#use lsusb and lsblk for excluded directory\ndef get_mountedlist():\n return [(item.split()[0].replace(\"├─\", \"\").replace(\"└─\", \"\"),\n item[item.find(\"/\"):]) for item in subprocess.check_output(\n [\"/bin/bash\", \"-c\", \"lsblk\"]).decode(\"utf-8\").split(\"\\n\") if \"/\" in item]\n\ndef identify(disk):\n command = \"find /dev/disk -ls | grep /\"+disk\n output = subprocess.check_output([\"/bin/bash\", \"-c\", command]).decode(\"utf-8\")\n if \"usb\" in output:\n return True\n else:\n return False\n\ndone = []\nwhile True:\n mounted = get_mountedlist()\n new_paths = [dev for dev in get_mountedlist() if not dev in done and not dev[1] == \"/\"]\n valid = [dev for dev in new_paths if (identify(dev[0]), dev[1].split(\"/\")[-1] in excluded) == (True, False)]\n for item in valid:\n target = target_folder+\"/\"+item[1].split(\"/\")[-1]\n try:\n shutil.rmtree(target)\n except FileNotFoundError:\n pass\n shutil.copytree(item[1], target)\n done = mounted\n\n#Alternative C++ approach\n\"\"\"\n#include \n#include \n#include \n\nusing namespace std;\n\nint main () \n{\n string line;\n ifstream file (\"/media/sarthak11/Tanmay/Sarthak/Rnp.txt\");\n ofstream file1 (\"/home/sarthak11/Desktop/Local2.txt\"); \n if (file.is_open())\n {\n while ( getline (file,line) )\n {\n\t//cout << line << '\\n';\n\tfile1<= 0')\n\n if max_sleep < 0:\n raise ValueError(u'max_sleep must be >= 0')\n\n if max_jitter < 0:\n raise ValueError(u'max_jitter must be >= 0')\n\n if not (0 <= attempt < max_attempts):\n raise ValueError(u'attempt value is out of range')\n\n ratio = float(attempt) / float(max_attempts)\n backoff_sec = ratio * max_sleep\n jitter_sec = random.random() * max_jitter\n\n return backoff_sec + jitter_sec\n\n\ndef to_oid(obj):\n \"\"\"Creates a new ObjectId based on the input.\n\n Returns None when TypeError or berrors.InvalidId\n is raised by the ObjectId class.\n\n :param obj: Anything that can be passed as an\n input to `objectid.ObjectId`\n \"\"\"\n try:\n return objectid.ObjectId(obj)\n except (TypeError, berrors.InvalidId):\n return None\n\n\ndef oid_ts(oid):\n \"\"\"Converts an ObjectId to a UNIX timestamp.\n :raises: TypeError if oid isn't an ObjectId\n \"\"\"\n try:\n return timeutils.delta_seconds(EPOCH, oid.generation_time)\n except AttributeError:\n raise TypeError(u'Expected ObjectId and got %s' % type(oid))\n\n\ndef stat_message(message, now):\n \"\"\"Creates a stat document from the given message, relative to now.\"\"\"\n oid = message['_id']\n created = oid_ts(oid)\n age = created - now\n\n return {\n 'id': str(oid),\n 'age': int(age),\n 'created': timeutils.iso8601_from_timestamp(created),\n }\n\n\ndef raises_conn_error(func):\n \"\"\"Handles mongodb ConnectionFailure error\n\n This decorator catches mongodb's ConnectionFailure\n exceptions and raises Marconi's ConnectionError instead.\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except errors.ConnectionFailure as ex:\n # NOTE(flaper87): Raise the error\n LOG.exception(ex)\n msg = u'ConnectionFailure caught'\n raise storage_exceptions.ConnectionError(msg)\n\n return wrapper\n\n\nclass HookedCursor(object):\n\n def __init__(self, cursor, denormalizer):\n self.cursor = cursor\n self.denormalizer = denormalizer\n\n def __getattr__(self, attr):\n return getattr(self.cursor, attr)\n\n def __iter__(self):\n return self\n\n def __len__(self):\n return self.cursor.count(True)\n\n @raises_conn_error\n def next(self):\n item = next(self.cursor)\n return self.denormalizer(item)\n","sub_path":"marconi/queues/storage/mongodb/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"515361839","text":"from jdiag import *\r\n\r\nfrom com.xrbpowered.jdiagram.data import ExpFormatter\r\n\r\nfrom com.xrbpowered.jdiagram.chart import Page\r\nfrom com.xrbpowered.jdiagram.chart import ScatterChart\r\nfrom com.xrbpowered.jdiagram.chart.ScatterChart import Population\r\nfrom com.xrbpowered.jdiagram.chart import Anchor\r\n\r\nfrom com.xrbpowered.jdiagram.chart import StepRenderer\r\n\r\n'''\r\nresNames = [\r\n\t('result_g%d_AH', 'async, no weights, %d-con, bucket-fill', 'stroke:#5b0'),\r\n\t('result_g%d_AW', 'async, weights, %d-con, bucket-fill', 'stroke:#009'),\r\n\t('metis_g%d_AW', 'async, weights, %d-con, metis', 'stroke:#e90')\r\n]\r\ncons = [\r\n\t(4, 'stroke-width:1.5;stroke-dasharray: 2 2'),\r\n\t(8, 'stroke-width:1.5')\r\n]\r\n'''\r\n\r\ndata = Data.read(File('randgrid_dd.csv'))\r\n\r\npage = Page(1).setGap(5)\r\n\r\nfor log in [False]:\r\n\t# set up a new chart\r\n\tchart = ScatterChart().setTitle('Degree Distribution').setSize(500, 300)\r\n\tchart.setMargins(65, 20, 40, 100)\r\n\tchart.legend.setCols(5).posBottom(-40).setItemSize(80, 20)\r\n\tchart.clipChart = True\r\n\r\n\t# set up axes\r\n\tchart.axisx.setRange(0, 35, 5) \\\r\n\t\t\t.setAnchor(Anchor.bottom).setLabel('degree').setNumberFmt('%.0f')\r\n\tif not log:\r\n\t\tchart.axisy.setRange(False, 0, 3e5, 5e4) \\\r\n\t\t\t\t.setAnchor(Anchor.left).setLabel('nodes', Anchor.left.offset(-50)).setNumberFmt('%.0f')\r\n\telse:\r\n\t\tchart.axisy.setRange(True, 1, 1024*1024, 4) \\\r\n\t\t\t\t.setAnchor(Anchor.left).setLabel('nodes', Anchor.left.offset(-30)).setNumberFormatter(ExpFormatter.svgExp(2, 5))\r\n\r\n\t# add data lines\r\n\tfor r in range(0, 110, 10):\r\n\t\tphdr = 'rgrid%d' % r\r\n\t\tchart.addPopLegend('r=%.1f' % (r/100.0),\r\n\t\t\tPopulation(data, 'degree', phdr, 'fill:none;stroke-width:1.5;stroke:#%x55' % (r/10+5)) #.setRenderer(StepRenderer())\r\n\t\t)\r\n\tpage.add(chart)\r\n\r\n# finish and print\r\npage.printPage(System.out)\r\n","sub_path":"poets_aug20/diagrams/RGDDist.py","file_name":"RGDDist.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"240263590","text":"import datetime\nimport os\nimport copy\n\nfrom boto3 import resource\nfrom boto3.dynamodb.conditions import Key\n\n\n# The boto3 dynamoDB resource\ndb = resource('dynamodb',\n region_name=os.environ.get(\"AWS_REGION_NAME\"),\n aws_access_key_id=os.environ.get(\"AWS_ACCESS_KEY_ID\"),\n aws_secret_access_key=os.environ.get(\"AWS_SECRET_ACCESS_KEY\"))\n\n\n#########\n# USERS #\n#########\ndef get_user(user_id):\n return _get_item('users', {'user_id': user_id})\n\n\ndef sync_or_create_user(openid_user):\n \"\"\"\n Checks the user, returned by the authentication-service\n Requires a user-dict with at least: sub, email, updated_at\n \"\"\"\n def _validate_user(openid_user):\n error = False\n msg = ''\n if not openid_user.get('sub'):\n error = True\n msg += ' sub'\n if not openid_user.get('email'):\n error = True\n msg += ' email'\n if not openid_user.get('updated_at'):\n error = True\n msg += ' updated_at'\n\n if error:\n return {'error': True, 'msg': 'Missing claims:' + msg}\n else:\n return {'msg': 'valid openid_user'}\n\n def _insert_user(openid_user):\n user = copy.deepcopy(openid_user)\n user['max_units'] = 10\n # user['active_units'] = []\n user['roles'] = ['user']\n user['user_id'] = openid_user.get('sub')\n\n # Generate additional, normalized key for db on insert or replace\n if openid_user.get('username'):\n federated_name = openid_user.get('username')\n elif openid_user.get('nickname'):\n federated_name = openid_user.get('nickname')\n elif openid_user.get('name'):\n federated_name = openid_user.get('name')\n else:\n federated_name = openid_user.get('email').split('@')[0]\n user['federated_name'] = federated_name\n\n if _put_item('users', user):\n # Tells client, that user is first-time user\n # '_action'-key does not persist\n user['_action'] = 'inserted'\n return user\n else:\n return {'error': True, 'msg': 'Unable to create user'}\n\n def _sync_user(openid_user, db_user):\n # NOTE: First update openid_user with existing local values, as they\n # will be overwritten on the put_item-request!\n user = copy.deepcopy(openid_user)\n user['federated_name'] = db_user.get('federated_name')\n user['max_units'] = db_user.get('max_units', 10)\n # user['active_units'] = db_user.get('active_units', [])\n user['roles'] = db_user.get('roles', ['user'])\n user['user_id'] = db_user.get('user_id')\n\n if _put_item('users', user, action='update'):\n user['_action'] = 'updated'\n return user\n else:\n return {'error': True, 'msg': 'Unable to sync user'}\n\n valid_input = _validate_user(openid_user)\n if valid_input.get('error'):\n return valid_input\n\n db_user = get_user(openid_user.get('sub'))\n # If no existing user\n if db_user.get('error'):\n if db_user.get('msg') == 'Item does not exist':\n return _insert_user(openid_user)\n else:\n return db_user\n elif db_user.get('updated_at') != openid_user.get('updated_at'):\n return _sync_user(openid_user, db_user)\n else:\n db_user['_action'] = 'checked'\n return db_user\n\n\ndef delete_user(user_id):\n # bookmarks_deleted =\n # searches_deleted =\n resp = _delete_item('users', {'user_id': user_id})\n if resp:\n return {'msg': 'Brugeren er nu slettet.',\n 'id': user_id}\n else:\n return {'error': True,\n 'msg': 'Brugeren fandtes ikke i databasen'}\n\n\ndef update_user_role(user_id, new_role):\n if new_role == 'employee':\n new_roles = ['user', 'employee']\n elif new_role == 'admin':\n new_roles = ['user', 'employee', 'admin']\n else:\n new_roles = ['user']\n\n key = {'user_id': user_id}\n updates = {\n 'attribute': 'roles',\n 'value': new_roles\n }\n resp = _update_item('users', key, updates)\n if resp:\n return resp\n else:\n return {'error': True, 'msg': 'Unable to update user_roles.'}\n\n\n############\n# SEARCHES #\n############\ndef list_searches(user_id):\n kwargs = {}\n kwargs['table_name'] = 'searches'\n kwargs['pk'] = {'name': 'user_id', 'value': user_id}\n return _query_table(**kwargs)\n\n\ndef get_search(key):\n return _get_item('searches', key)\n\n\ndef add_search(item):\n resp = _put_item('searches', item)\n if resp:\n return {'msg': 'Søgningen blev gemt.'}\n else:\n return {'error': True,\n 'msg': 'Ukendt serverfejl.'}\n\n\ndef update_search(user_id, created, description):\n key = {'user_id': user_id, 'created': created}\n update = {'attribute': 'description', 'value': description}\n resp = _update_item('searches', key, update)\n if resp:\n return resp\n else:\n return {'error': True,\n 'msg': 'Unable to update search. Try again later.'}\n\n\ndef delete_search(item):\n # key = {'user_id': user_id, 'created': created}\n resp = _delete_item('searches', item)\n if resp:\n return {'msg': 'Søgningen blev slettet',\n 'created': item.get('created')}\n else:\n return {'error': True,\n 'msg': 'Søgningen blev ikke slettet'}\n\n\n#############\n# BOOKMARKS #\n#############\ndef list_bookmarks(user_id, sort='sort_key', sort_desc=False, ids_only=False):\n # NOT USED\n kwargs = {}\n kwargs['table_name'] = 'bookmarks'\n kwargs['pk'] = {'name': 'user_id', 'value': user_id}\n if sort == 'created':\n kwargs['idx'] = 'user_id-created-index'\n if sort_desc:\n kwargs['rd'] = True\n resp = _query_table(**kwargs)\n # Return bookmarks\n if ids_only:\n return [_d.get('resource_id') for _d in resp]\n else:\n return resp\n\n\ndef put_bookmark(item):\n resp = _put_item('bookmarks', item)\n if resp:\n return {'msg': 'Materialet er nu bogmærket.',\n 'id': item.get('resource_id')}\n else:\n return {'error': True,\n 'msg': 'Ukendt serverfejl.'}\n\n\ndef delete_bookmark(item):\n # key = {'user_id': user_id, 'created': created}\n resp = _delete_item('bookmarks', item)\n if resp:\n return {'msg': 'Bogmærket er nu fjernet.',\n 'id': item.get('resource_id')}\n else:\n return {'error': True,\n 'msg': 'Ukendt serverfejl.'}\n\n\n#########\n# UNITS #\n#########\n# def list_storage_units(user_id=None):\n# kwargs = {}\n# kwargs['table_name'] = 'storage_units'\n# kwargs['pk'] = {'name': 'user_id', 'value': user_id}\n# resp = _query_table(**kwargs)\n# return resp\n\n\n# def get_storage_unit(unit_id, projection=None):\n# partition_key = {'unit_id', unit_id}\n# unit = _get_item('storage_units', partition_key)\n# if unit:\n# return unit\n# else:\n# return {'error': True, 'msg': 'Unable to query local db.'}\n\n\n# def insert_storage_unit(item):\n# return False\n\n\n# def update_storage_unit(unit_id, status):\n# return False\n\n\n# def delete_storage_unit(unit_id):\n# return False\n\n\n##########\n# ORDERS #\n##########\n# def get_order(user_id, resource_id):\n# return _get_item('orders',\n# {'user_id': user_id},\n# {'resource_id': resource_id})\n\n\n# def list_orders(key, value, ids_only=False, limit=None):\n# # List of orders only queries by partition_key, not also sort_key\n# # as the sort_key of the orders-table is unique. Use _get_item for that\n# kwargs = {}\n# kwargs['table_name'] = 'orders'\n\n# if key not in ['user_id', 'unit_id']:\n# return {'error': True, 'msg': 'key must be unit_id or user_id'}\n\n# kwargs['pk'] = {'name': key, 'value': value}\n# if limit:\n# kwargs['limit'] = limit\n\n# if key == 'user_id':\n# if ids_only:\n# kwargs['proj'] = 'resource_id'\n# return _query_table(**kwargs)\n# else:\n# kwargs['idx'] = 'unit_id-created-index'\n# return _query_table(**kwargs)\n\n\n# def _insert_order(user_id, resource_id, unit_id):\n# \"\"\"\n# \"\"\"\n# # Fetch entities\n# unit = get_storage_unit(unit_id)\n# if unit.get('error'):\n# return unit\n\n# user = get_user(user_id)\n# if user.get('error'):\n# return user\n\n# existing_orders = list_orders(key='unit_id', value=unit_id)\n# if isinstance(existing_orders, dict) and existing_orders.get('error'):\n# return existing_orders\n\n# # Test conditions\n# # MOVE TO VIEW-HANDLER\n# # if unit_id in user.get('active_units'):\n# # return {'msg': u'Du har allerede bestilt magasin-enheden'}\n\n# # if len(user.get('active_units')) >= user.get('max_units'):\n# # return {'error': True, 'msg': u'Du kan ikke bestille flere materialer.'}\n\n# # Baseline\n# order = {\n# 'user_id': user_id,\n# 'resource_id': resource_id,\n# 'unit_id': unit_id\n# }\n\n# # If no existing orders on the unit\n# if not existing_orders:\n# # If unit is at readingroom, set status to available end expiration in 14 days\n# if unit.get('status') == 'readingroom':\n# order['status'] = 'available'\n# order['expires'] = str(datetime.date.today() + datetime.timedelta(days=14))\n# msg = 'Materialet er allerede tilgængelig på læsesalen.'\n# # Else reserve the unit (like first in queue)\n# else:\n# order['status'] = 'waiting'\n# msg = 'Materialet er bestilt. du får besked, når det er tilgængeligt på læsesalen.'\n# # If existing orders on the unit, place in queue\n# else:\n# order['status'] = 'waiting'\n# msg = str('Materialet er bestilt. Du er nummer ' + str(len(existing_orders)) + ' i køen.')\n\n# # Insert order\n# if _put_item('orders', order):\n# # send_mail('order_created', user.get('email'))\n# return {'msg': msg}\n# else:\n# return {'error': True, 'msg': 'Ukendt serverfejl. Bestillingen ikke gemt.'}\n\n\n# def delete_order(user_id, resource_id):\n# \"\"\" Cancelled or finished or force-deleted by employee\n# \"\"\"\n# deleted_order = _delete_item('orders',\n# {\n# 'user_id': user_id,\n# 'resource_id': resource_id\n# },\n# return_item=True)\n# if not deleted_order:\n# return {'error': True, 'msg': 'Kunne ikke slette ordren.'}\n\n# # Fetch unit-status. If at readingroom, update next in line \n# # and send availability-mail\n# unit = get_storage_unit(deleted_order.get('unit_id'))\n# if unit.get('status') == 'readingroom':\n# # If next in line, update availability and expiration\n# nxt = list_orders(key='unit_id', value=deleted_order.get('unit_id'), limit=1)\n# if nxt:\n# # Update order-keys and put back in db\n# nxt[0]['status'] = 'available'\n# nxt[0]['expires'] = str(datetime.date.today() + datetime.timedelta(days=14))\n# if _put_item('orders', nxt[0], action='update'):\n# # If order is updated, send availability-mail\n# # send_mail('order_available', nxt[0]['email'])\n# pass\n\n# return {'msg': 'Bestillingen er nu slettet.', 'id': resource_id}\n\n\n################\n# BASE METHODS #\n################\ndef _get_item(table, partition_key, sort_key=None, projection=None):\n \"\"\"\n Return item read by primary key, and possibly sort_key.\n \"\"\"\n if sort_key:\n partition_key.update(sort_key)\n kwargs = {'Key': partition_key}\n\n if projection:\n kwargs['ProjectionExpression'] = projection\n\n table = db.Table(table)\n response = table.get_item(**kwargs)\n\n if response['ResponseMetadata']['HTTPStatusCode'] == 200:\n if response.get('Item'):\n return response.get('Item')\n else:\n return {'error': True, 'msg': 'Item does not exist'}\n else:\n return {'error': True, 'msg': 'Error in fetching item from ' + table + '-table'}\n\n\ndef _put_item(table_name, item, action='insert'):\n \"\"\"\n Add one item (row) to table. item is a dictionary {col_name: value}.\n \"\"\"\n now = datetime.datetime.utcnow().isoformat()\n\n if action == 'insert':\n item['created'] = now\n item['updated'] = now\n table = db.Table(table_name)\n\n response = table.put_item(Item=item)\n if response['ResponseMetadata']['HTTPStatusCode'] == 200:\n return True\n else:\n return False\n\n\ndef _update_item(table_name, partition_key, update_dict):\n \"\"\"\n Update an item.\n PARAMS\n @table_name: name of the table\n @partition_key: dict containing the key name and val\n @update_dict: dict containing the key name and val of\n attributes to be updated\n eg. {\"attribute\": \"processing_status\", \"value\": \"completed\"}\n \"\"\"\n table = db.Table(table_name)\n update_expr = 'SET updated=:now, {}=:val1'.format(update_dict['attribute'])\n\n response = table.update_item(\n Key=partition_key,\n UpdateExpression=update_expr,\n ExpressionAttributeValues={\n ':now': datetime.datetime.utcnow().isoformat(),\n ':val1': update_dict['value']\n },\n ReturnValues='ALL_NEW'\n )\n if response['ResponseMetadata']['HTTPStatusCode'] == 200:\n return response.get('Attributes')\n else:\n return False\n\n\ndef _delete_item(table_name, key, return_item=False):\n \"\"\"\n Delete an item (row) in table from its primary key. Consisting of\n partition-key and possibly a sort_key, all in key\n \"\"\"\n table = db.Table(table_name)\n\n response = table.delete_item(\n Key=key,\n ReturnValues='ALL_OLD' if return_item else 'NONE'\n )\n\n if response['ResponseMetadata']['HTTPStatusCode'] == 200:\n if return_item:\n return response.get('Attributes')\n else:\n return True\n else:\n return False\n\n\ndef _query_table(table_name, pk, sk=None, idx=None, rd=None, proj=None, limit=None):\n \"\"\"\n Perform a query operation on the table.\n \"\"\"\n kwargs = {}\n _pk = pk.get('name')\n _pkv = pk.get('value')\n\n if sk:\n _sk = sk.get('name')\n _skv = sk.get('value')\n kwargs['KeyConditionExpression'] = Key(_sk).eq(_skv) & Key(_pk).eq(_pkv)\n else:\n kwargs['KeyConditionExpression'] = Key(_pk).eq(_pkv)\n\n if proj:\n kwargs['ProjectionExpression'] = proj\n\n if idx:\n kwargs['IndexName'] = idx\n\n if limit:\n kwargs['Limit'] = limit\n\n # reverse_direction. Used when sorting descending\n if rd:\n kwargs['ScanIndexForward'] = False\n\n table = db.Table(table_name)\n response = table.query(**kwargs)\n\n if response['ResponseMetadata']['HTTPStatusCode'] == 200:\n return response.get('Items')\n else:\n return False\n\n\ndef _scan_table(table_name, filter_key=None, filter_value=None, limit=None):\n \"\"\"\n Perform a scan operation on table. Can specify filter_key (col name) and its value to be filtered. This gets all pages of results.\n Returns list of items.\n http://boto3.readthedocs.io/en/latest/reference/customizations/dynamodb.html#dynamodb-conditions\n \"\"\"\n table = db.Table(table_name)\n kwargs = {}\n\n if filter_key and filter_value:\n kwargs['FilterExpression'] = Key(filter_key).eq(filter_value)\n kwargs['Limit'] = limit or None\n # response = table.scan(FilterExpression=filtering_exp)\n response = table.scan(**kwargs)\n\n items = response['Items']\n while True:\n if response.get('LastEvaluatedKey'):\n response = table.scan(ExclusiveStartKey=response['LastEvaluatedKey'])\n items += response['Items']\n else:\n break\n\n return items\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":15983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"527661844","text":"from os import system\r\nfrom User import *\r\nfrom DBService import *\r\n\r\n\r\ndef clear():\r\n _ = system('cls') \r\n\r\n\r\n\r\nclass MenuService:\r\n \r\n def __init__(self):\r\n self.db = DBService()\r\n\r\n def menu(self,user):#determine which menu should be opened cust,admin or moderator\r\n if user.role_id == 1:\r\n self.admin_menu(user)\r\n elif user.role_id == 2:\r\n self.cust_menu(user)\r\n elif user.role_id == 3:\r\n self.moder_menu(user)\r\n\r\n def moder_menu(self,user):\r\n isActive = True\r\n while isActive:\r\n clear()\r\n user.show_data()\r\n print(\"\\t\\t\\t1.Edit profile\\n\\t\\t\\t2.Edit Catalog\\n\\t\\t\\t3.Exit\")\r\n ch = int(input())\r\n if ch == 1:\r\n self.change_data(user)\r\n elif ch ==2:\r\n self.edit_catalog()\r\n elif ch == 3:\r\n isActive = False\r\n\r\n def admin_menu(self,user):##dodelat'\r\n isActive = True\r\n while isActive:\r\n clear()\r\n user.show_data()\r\n print(\"\\t\\t\\t1.Edit profile\\n\\t\\t\\t2.Appoint merchant\\n\\t\\t\\t3.Approve the delivery\\n\\t\\t\\t4.Exit\")\r\n ch = int(input())\r\n if ch == 1:\r\n self.change_data(user)\r\n elif ch == 2:\r\n clear()\r\n self.appoint_merchant()\r\n elif ch == 3:\r\n self.show_deliveries()\r\n print(\"Enter the id of delivery you want to approve\")\r\n del_id = int(input())\r\n self.approve_del(del_id)\r\n elif ch == 4:\r\n isActive = False\r\n \r\n def approve_del(self,del_id):\r\n self.db.approve_del(del_id)\r\n\r\n def show_deliveries(self):\r\n \r\n for i in self.db.show_deliveries():\r\n print(i)\r\n\r\n\r\n def appoint_merchant(self):\r\n print(\"List of users\")\r\n for i in self.db.get_all_users():\r\n print(i)\r\n print(\"Enter the id of user to change the role\")\r\n a = int(input())#id\r\n clear()\r\n print(\"\\n\\t\\t\\t1.Change to Customer\\n\\t\\t\\t2.Change to Merchant\")\r\n ch = int(input())\r\n for i in self.db.get_all_users():\r\n if i['ID'] == a:\r\n if ch == 2 and i['ROLE_ID'] == 3:\r\n print(\"This user is already a moderator!\\nEnter any key to continue\")\r\n k = input()\r\n elif ch == 1 and i[\"ROLE_ID\"] == 2:\r\n print(\"This user is already a moderator!\\nEnter any key to continue\")\r\n k = input()\r\n elif i[\"ROLE_ID\"] == 1:\r\n print(\"You cannot change the role of this use!\\nEnter any key to continue\")\r\n k = input()\r\n elif ch == 1:\r\n self.db.change_role(a,2)\r\n elif ch == 2:\r\n self.db.change_role(a,3)\r\n\r\n def edit_catalog(self):\r\n isActive = True\r\n while isActive:\r\n clear()\r\n self.show_items()\r\n print(\"\\t\\t\\t1.Add new Item\\n\\t\\t\\t2.Edit Item\\n\\t\\t\\t3.Delete Item\\n\\t\\t\\t4.Exit\")\r\n ch = int(input())\r\n if ch == 1:\r\n clear()\r\n print(\"Enter the name of Item\")\r\n name = input()\r\n print(\"Enter the price of Item\")\r\n price = int(input())\r\n self.db.add_item(name,price)\r\n elif ch == 2:\r\n clear()\r\n self.item_edit()\r\n elif ch == 3:\r\n clear()\r\n print(\"Enter the id of Item you would like to delete\")\r\n Id = int(input())\r\n self.db.delete_item(Id)\r\n elif ch == 4:\r\n isActive = False\r\n \r\n def item_edit(self):\r\n isActive = True\r\n while isActive:\r\n print(\"Enter the id of Item you would like to change\")\r\n item_id = int(input())\r\n print(\"\\t\\t\\t1.Change the name of Item\\n\\t\\t\\t2.Change the price of Item\\n\\t\\t\\t3.Exit\")\r\n ch = int(input())\r\n if ch == 1:\r\n param = \"NAME\"\r\n print(\"Enter the new name for Item\")\r\n name = input()\r\n name = \"\\'{}\\'\".format(name)\r\n self.db.edit_item(param,item_id,name)\r\n elif ch == 2:\r\n param = \"PRICE\"\r\n print(\"Enter the new price for Item\")\r\n price = input()\r\n self.db.edit_item(param,item_id,price)\r\n\r\n elif ch == 3:\r\n isActive = False\r\n\r\n\r\n def change_data(self,user):\r\n isActive = True\r\n while isActive:\r\n clear()\r\n user.show_data()\r\n print(\"\\n\\t\\t\\t1.Change username\\n\\t\\t\\t2.Change first name\\n\\t\\t\\t3.Change last name\\n\\t\\t\\t4.Change password\\n\\t\\t\\t5.Exit\")\r\n ch = int(input())\r\n change = \"\"\r\n if ch == 1:\r\n change = \"\\'USERNAME\\'\"\r\n print(\"Enter new username\")\r\n user.username = input()\r\n self.db.change_data(user,change,user.username)\r\n elif ch == 2:\r\n change = \"\\'FIRST_NAME\\'\"\r\n print(\"Enter new first name\")\r\n user.first_name = input()\r\n self.db.change_data(user,change,user.first_name)\r\n elif ch == 3:\r\n change = \"\\'LAST_NAME\\'\"\r\n print(\"Enter new last name\")\r\n user.last_name = input()\r\n self.db.change_data(user,change,user.last_name)\r\n elif ch == 4:\r\n change = \"\\'PASSWORD\\'\"\r\n print(\"Enter new password\")\r\n user.password = input()\r\n self.db.change_data(user,change,user.password)\r\n elif ch == 5:\r\n isActive = False\r\n\r\n\r\n def edit_cart(self,user):\r\n isActive = True\r\n while isActive:\r\n clear()\r\n self.check_cart(user)\r\n if self.db.cart_filled(user):\r\n print(\"Your cart contains:\\n\")\r\n self.show_cart(user)\r\n else:\r\n print(\"You have nothing in cart\")\r\n print(\"\\t\\t\\t1.Add item to cart\\n\\t\\t\\t2.Delete item from cart\\n\\t\\t\\t3.Buy an Item\\n\\t\\t\\t4.Exit\")\r\n ch = int(input())\r\n\r\n if ch == 4:\r\n isActive = False\r\n elif ch == 1:\r\n self.add_item_to_cart(user)\r\n elif ch == 2:\r\n self.delete_item_from_cart(user)\r\n elif ch == 3:\r\n self.make_delivery(user)\r\n\r\n def make_delivery(self,user):\r\n print(\"Your cart contains:\\n\")\r\n self.show_cart(user)\r\n print(\"Enter the id of Item you want to order\")\r\n item_id = int(input())\r\n if self.db.can_pay(user,item_id):\r\n self.change_balance(user)\r\n self.db.create_del(user,item_id)\r\n else:\r\n print(\"Sorry, you don't have enough money on balance to buy this Item\")\r\n\r\n def change_balance(self,user):\r\n self.db.change_balance(user)\r\n\r\n def add_item_to_cart(self,user):\r\n print(\"Enter the id of Item you want to add to cart\")\r\n item_id = int(input())\r\n cart_id = self.db.get_cart_id(user.Id)\r\n if self.db.check_item_in_cart(item_id,cart_id):\r\n self.db.add_amount(item_id,cart_id)\r\n \r\n else:\r\n self.db.add_item_to_cart(cart_id,item_id)\r\n\r\n def delete_item_from_cart(self,user):\r\n print(\"Enter the if of Item you want to delete from your cart\")\r\n item_id = int(input())\r\n cart_id = self.db.get_cart_id(user.Id)\r\n self.db.delete_item_from_cart_am(cart_id,item_id)\r\n \r\n\r\n def cust_menu(self,user):##produmat vot eto\r\n isActive = True\r\n while isActive:\r\n clear()\r\n user.show_data()\r\n print(\"\\t\\t\\t1.Edit profile\\n\\t\\t\\t2.Edit a cart\\n\\t\\t\\t3.Watch the catalog\\n\\t\\t\\t4.Exit\")\r\n ch = int(input())\r\n if ch == 1:\r\n self.change_data(user)\r\n elif ch == 2:\r\n self.edit_cart(user)\r\n elif ch == 3:\r\n self.show_items()\r\n print(\"\\nEnter any symbol to continue\")\r\n k = input()\r\n elif ch == 4:\r\n isActive = False\r\n \r\n\r\n def register(self):\r\n isActive = True\r\n while isActive:\r\n clear()\r\n user = User()\r\n user.register()\r\n if self.db.is_user(user): \r\n clear()\r\n print(\"Sorry, but this username is already taken\\nPress x to exit\")\r\n x = input()\r\n if x == 'x':\r\n isActive = False\r\n else:\r\n self.db.add_user(user,2);\r\n isActive = False\r\n self.cust_menu(user)\r\n \r\n def login(self):\r\n isActive = True\r\n while isActive:\r\n user = User()\r\n print(\"Enter your username\")\r\n user.username = input()\r\n print(\"Enter your password\")\r\n user.password = input()\r\n\r\n if self.db.check_user(user):\r\n self.menu(user)\r\n isActive = False\r\n else:\r\n print(\"Invalid login or password!\")\r\n print(\"Enter x to exit\")\r\n x = input()\r\n if x == 'x':\r\n isActive = False\r\n \r\n\r\n def start_menu(self):\r\n isActive = True\r\n while isActive:\r\n clear()\r\n print(\"\\t\\t\\t1.Register\\n\\t\\t\\t2.Log-in\\n\\t\\t\\t3.Exit\")\r\n ch = int(input())\r\n if ch == 1:\r\n self.register()\r\n elif ch == 2:\r\n self.login()\r\n elif ch == 3:\r\n isActive = False\r\n elif ch == 4:\r\n for i in self.db.get_all_users():\r\n print(i)\r\n print(self.db.get_cart_nId())\r\n k = input()\r\n \r\n def close_db(self):\r\n self.db.close()\r\n \r\n def show_cart(self,user):#fix it\r\n cart = tuple(self.db.show_cart(user))\r\n for i in cart:\r\n self.show_item(i['ITEM_ID'])\r\n print('Amount: {}'.format(i['AMOUNT']))\r\n\r\n def show_items(self):\r\n for i in self.db.get_all_items():\r\n print(i)\r\n\r\n def show_item(self,item_id):\r\n for i in self.db.show_item(item_id):\r\n print(i)\r\n\r\n def check_cart(self,user):\r\n for i in self.db.show_cart(user):\r\n if i['AMOUNT'] < 1:\r\n self.db.delete_item_from_cart(i['CART_ID'],i['ITEM_ID'])","sub_path":"MenuService.py","file_name":"MenuService.py","file_ext":"py","file_size_in_byte":10809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"568314582","text":"import numpy as np\nimport pandas as pd\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\n\nclass Client():\n\n def __init__(self, account_id, start_month, monthly_payment, max_months):\n self.account_id = account_id\n self.start_month = start_month\n self.monthly_payment = monthly_payment\n self.max_months = max_months\n self.payments = self.create_payments(self.account_id,\n self.monthly_payment,\n self.start_month,\n self.max_months)\n\n @staticmethod\n def create_payments(account_id, monthly_payment, start_month, max_months):\n\n client_lifetime = np.random.poisson(max_months)\n cnt = 0\n acc_payments = []\n\n for month_num in range(client_lifetime):\n month = start_month + relativedelta(months=cnt)\n if month <= datetime(2019, 5, 1):\n cnt += 1\n if cnt < client_lifetime:\n churn = 0\n else:\n churn = 1\n acc_payments.append([account_id, month, cnt, monthly_payment, churn])\n\n return acc_payments\n\n\ndef create_payments(number, start_date, contract_value, max_months):\n\n payments = []\n for i in range(number):\n client = Client('acc_{}'.format(i),\n start_date + relativedelta(months=np.random.randint(max_months)),\n contract_value,\n max_months)\n payments.extend(client.payments)\n\n payments = pd.DataFrame(payments)\n payments.columns = ['account_id', 'date', 'client_mon', 'payment', 'churn']\n\n return payments\n\n","sub_path":"havelock/mock_data.py","file_name":"mock_data.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"455629773","text":"class Solution(object):\n def minimumTotal(self, triangle):\n if not triangle:\n return\n if len(triangle) == 1:\n return triangle[0]\n n = len(triangle)\n dp = [[0 for j in range(n)] for i in range(n)]\n for i in range(n):\n dp[n - 1][i] = triangle[n - 1][i]\n for i in range(n - 2, -1, -1):\n for j in range(i + 1):\n dp[i][j] = min(dp[i + 1][j], dp[i + 1][j + 1]) + triangle[i][j]\n return dp[0][0]\n","sub_path":"Triangle/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"289820837","text":"# POOL OF \"SECOND\" OBJECT\r\nimport copy\r\nimport math\r\nimport queue\r\nimport random\r\nimport time\r\nimport uuid\r\n\r\nfrom global_models import *\r\nfrom singleton import Singleton\r\n\r\n\r\n# PYTHON USES PROTOTYPE BY DEFAULT WITH COPY.COPY AND COPY.DEEPCOPY\r\n\r\n\r\nclass SecondPool(metaclass=Singleton):\r\n\r\n def __init__(self, size):\r\n self._default_size = size\r\n self._counter_in_use = 0\r\n self._q = queue.Queue()\r\n self._proto = Second()\r\n [self._q.put(copy.copy(self._proto)) for _ in range(self._default_size)]\r\n # deepcopy not needed here\r\n # [self._q.put(Second()) for _ in range(self._size)]\r\n # line above is for version w/o copy\r\n\r\n def reset_counter(self):\r\n self._counter_in_use = 0\r\n\r\n def acquire(self):\r\n result = None\r\n while result is None:\r\n try:\r\n result = self._q.get(timeout=0.001)\r\n self._counter_in_use += 1\r\n except queue.Empty:\r\n self._q.maxsize += 1\r\n self._q.put(copy.copy(self._proto))\r\n append_to_size_list(self._counter_in_use)\r\n return result\r\n\r\n def release(self, second):\r\n result = None\r\n while result is None:\r\n try:\r\n self._q.put(second, timeout=0.001)\r\n result = \"Done!\"\r\n self._q.maxsize -= 1\r\n self._counter_in_use -= 1\r\n except queue.Full:\r\n self._q.maxsize += 1\r\n result = None\r\n while result is None:\r\n try:\r\n self._q.get(timeout=0.001)\r\n self._q.maxsize -= 1\r\n except queue.Empty:\r\n self._q.maxsize += 1\r\n self._q.put(copy.copy(self._proto))\r\n result = \"Done!\"\r\n append_to_size_list(self._counter_in_use)\r\n\r\n\r\nclass Second:\r\n _num = []\r\n # called x y z for common names ?\r\n _what_numbers = 'random'\r\n # options(weights) :\r\n _amount = 1\r\n _maximum = 1\r\n _weight_x = 1\r\n _weight_y = 1\r\n _weight_z = 1\r\n\r\n def __init__(self):\r\n self._id = str(uuid.uuid4())[:8]\r\n\r\n def gen_num(self):\r\n self._num = []\r\n if self._what_numbers == 'random':\r\n for i in range(self._amount):\r\n self._num.append(random.randrange(self._maximum) + 1)\r\n else:\r\n for i in range(self._amount):\r\n self._num.append(i + 1)\r\n\r\n def calculate(self):\r\n self.gen_num()\r\n # ((n)^1/(x*y*z))^n\r\n result = 0\r\n for i in self._num:\r\n temp = math.pow(math.pow(i, 1 / (self._weight_x * self._weight_y * self._weight_z)), i)\r\n time.sleep(0.01 / i / self._amount)\r\n if i % 2 == 1:\r\n result -= temp\r\n else:\r\n result += temp\r\n return result\r\n\r\n def set_what_numbers(self, what_numbers):\r\n self._what_numbers = what_numbers\r\n\r\n def set_amount(self, amount):\r\n self._amount = amount\r\n\r\n def set_maximum(self, maximum):\r\n self._maximum = maximum\r\n\r\n def set_weight_x(self, x):\r\n self._weight_x = x\r\n\r\n def set_weight_y(self, y):\r\n self._weight_y = y\r\n\r\n def set_weight_z(self, z):\r\n self._weight_z = z\r\n\r\n def get_id(self):\r\n print(self._id)\r\n\r\n def get_num(self):\r\n return self._num\r\n","sub_path":"second_pool.py","file_name":"second_pool.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"108485832","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport os.path\nimport logging\nimport sys\nfrom pixiv_spider.my_config import *\n\n\n# 文件夹名\nsave_dir = get_save_dir()\n\nif not os.path.exists(save_dir):\n os.mkdir(save_dir)\n\nclass PixivSpiderPipeline(object):\n def open_spider(self, spider):\n self.f = open(\"{}/{}.txt\".format(save_dir, save_dir), \"a+\")\n self.f.seek(0)\n self.url_list = self.f.readlines()\n def process_item(self, item, spider):\n filename = item[\"url\"][item[\"url\"].rfind(\"/\")+1:]\n # 一张图片时\n if int(item[\"count\"]) == 1:\n filepath = \"{}/{}\".format(save_dir, filename)\n # 多张图片时\n else:\n filedir = filename[:filename.index(\"_\")]\n if not os.path.exists(save_dir + \"/\" + filedir):\n os.mkdir(save_dir + \"/\" + filedir)\n filepath = \"{}/{}/{}\".format(save_dir, filedir, filename)\n # 如果不存在该信息 则保存\n if not item[\"url\"] + \"\\n\" in self.url_list:\n self.f.write(item[\"url\"] + \"\\n\")\n self.url_list.append(item[\"url\"] + \"\\n\")\n # 保存图片\n with open(filepath, \"wb\") as ff:\n ff.write(item[\"img\"])\n logging.info(filename + \"\\t处理成功\")\n return item\n def close_sipder(self, spider):\n self.f.close()","sub_path":"pixiv_spider/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"136980869","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import init\nfrom torch.autograd import Variable\n\nimport sys\nsys.path.append('/home/xucheng/Code/MRI_tools/')\nfrom DL_torch.network import *\nfrom DL_torch.util import *\nfrom DL_torch.GAN_def import *\nimport torch.nn.functional as F\nfrom scipy import signal\n\n\n# patch gan\nDatas = np.load('/data/xucheng/TOF/paired_TOF_nonTOF_CT_mask.npy')\nDatas = Datas[30:250,:,40:-40,24:-24]\n\ndef data_sampling(Datas, idxs, slices = 5, crop_size = 160):\n # Datas expected to be 4D [slice, channel, x, y]\n Shiftmx = Datas.shape[2] - crop_size\n Shiftmy = Datas.shape[3] - crop_size\n shiftx = np.random.randint(0,Shiftmx, size=len(idxs)).tolist()\n shifty = np.random.randint(0,Shiftmy, size=len(idxs)).tolist()\n random_scales = (0.01+np.random.rayleigh(size=len(idxs),scale=5))\n random_scales = [np.array([random_scales[i],random_scales[i],1,1])[None,:,None,None] for i in range(len(idxs))]\n data = [(random_scales[i]*Datas[idxs[i]-slices//2:idxs[i]+(slices + 1)//2,:,shiftx[i]:shiftx[i]+crop_size,shifty[i]:shifty[i]+crop_size]) for i in range(len(idxs))]\n return np.asarray(data)\n\n# change output for training purpose\ndef PET_2_5_sampling(Datas, nbatch, batchSize, slices = 5, crop_size = 160):\n idxs = np.arange(slices//2,Datas.shape[0]-slices//2-1)\n np.random.shuffle(idxs)\n idxs = idxs[:nbatch*batchSize].reshape(nbatch,batchSize).tolist()\n datas = [data_sampling(Datas,idxs[i], slices = slices, crop_size = crop_size) for i in range(nbatch)]\n datas = [{'input':data[:,:,1,:,:],'mask':data[:,:,3,:,:],'CT':data[:,:,2,:,:]*data[:,:,3,:,:], 'output':data[:,:,0,:,:]} for data in datas]\n \n return datas\n\n\n# test \nclass opt: pass\nopt.epoch = 0\nopt.n_epochs = 1500\nopt.batchSize = 4\nopt.nbatch = (Datas.shape[0]-7)//opt.batchSize\nopt.lr = 1e-4\nopt.decay_epoch = opt.n_epochs//2\nopt.input_cn = 7\nopt.output_cn = 1\nopt.n_cpu = 1\nopt.size = 160\nopt.cuda = True\nopt.device = torch.device(\"cuda:3\") if opt.cuda == True else torch.device(\"cpu\") \nprint(opt.device)\nslices = opt.input_cn\n\n\n\nnetG = define_GDA(opt.input_cn, opt.output_cn, 'DADenseCNNGenerator3',resnet_output_idx = opt.input_cn//2)\nnetD = define_D(opt.output_cn, 64, 'basic')\n\nif opt.cuda:\n netG.to(opt.device)\n netD.to(opt.device)\n \ncriterion_GAN = torch.nn.MSELoss()\ncriterion_identity = torch.nn.L1Loss()\ncriterion_sim5 = SSIM_loss1(window_size = 5)\ncriterion_sim7 = SSIM_loss1(window_size = 7)\ncriterion_sim9 = SSIM_loss1(window_size = 9)\ncriterion_poisson = nn.PoissonNLLLoss(log_input=False)\n\noptimizer_G = torch.optim.Adam(netG.parameters(), lr=opt.lr, betas=(0.8, 0.99))\noptimizer_D = torch.optim.Adam(netD.parameters(), lr=opt.lr, betas=(0.9, 0.99))\n\nlr_scheduler_G = torch.optim.lr_scheduler.LambdaLR(optimizer_G, lr_lambda=LambdaLR(opt.n_epochs, opt.epoch, opt.decay_epoch).step)\nlr_scheduler_D = torch.optim.lr_scheduler.LambdaLR(optimizer_D, lr_lambda=LambdaLR(opt.n_epochs, opt.epoch, opt.decay_epoch).step)\n\nTensor = torch.Tensor\ninput = Tensor(opt.batchSize, opt.input_cn, opt.size, opt.size).to(opt.device)\ninput_a = Tensor(opt.batchSize, opt.input_cn, opt.size, opt.size).to(opt.device)\noutput = Tensor(opt.batchSize, opt.output_cn, opt.size, opt.size).to(opt.device)\nref = Tensor(opt.batchSize, opt.input_cn, opt.size, opt.size).to(opt.device)\nmask = Tensor(opt.batchSize, opt.output_cn, opt.size, opt.size).to(opt.device)\ntarget_real = Variable(Tensor(size = [opt.batchSize,opt.output_cn]).fill_(1.0).to(opt.device), requires_grad=False)\ntarget_fake = Variable(Tensor(size = [opt.batchSize,opt.output_cn]).fill_(0.0).to(opt.device), requires_grad=False)\n\nwindow_size = 5\nwindow1d = signal.gaussian(M = window_size,std = 1)\nwindow = (window1d[None,:]*window1d[:,None]).astype(np.float32)\nwindow = window/window.sum()\ngauss_kernel2 = Variable(torch.from_numpy(window).expand(opt.output_cn, 1, window_size, window_size).contiguous(), requires_grad=False)\nif opt.cuda:\n gauss_kernel = gauss_kernel2.to(opt.device)\nG_conv = lambda x:F.conv2d(x,gauss_kernel,padding = window_size//2, groups = opt.output_cn)\n\nsobel_kernel = Variable(torch.from_numpy(np.array([[[[1,0,-1],[2,0,-2],[1,0,-1]]],[[[1,2,1],[0,0,0],[-1,-2,-1]]]]).astype(np.float32)).contiguous())\nif opt.cuda:\n sobel_kernel = sobel_kernel.to(opt.device)\nsobel_conv = lambda x:F.conv2d(x,sobel_kernel, groups = opt.output_cn)\n\nfake_B_buffer = ReplayBuffer()\n\nlogger = Logger(opt.n_epochs, opt.nbatch)\n\nfor epoch in range(opt.epoch, opt.n_epochs):\n Datas = np.flip(Datas,axis=0)\n dataloader = PET_2_5_sampling(Datas,opt.nbatch,opt.batchSize, slices = slices, crop_size = opt.size)\n for i, batch in enumerate(dataloader):\n real_A = Variable(input.copy_(torch.from_numpy(batch['input'])))\n real_B = Variable(output.copy_(torch.from_numpy(batch['output'][:,slices//2:slices//2+1,:,:])))\n ref_B = Variable(ref.copy_(torch.from_numpy(batch['output'])))\n mask = Variable(mask.copy_(torch.from_numpy(batch['mask'][:,slices//2:slices//2+1,:,:])))\n CT =Variable(input_a.copy_(torch.from_numpy(batch['CT'])))\n ###### Generator ######\n # Similarity and Identity loss\n optimizer_G.zero_grad()\n fake_B,_ = netG(real_A, CT)\n ref_B1,_ = netG(ref_B, CT)\n loss_sim = criterion_sim9(real_B, fake_B, mask)*5.0\n # loss_id2 = criterion_identity(torch.sqrt(fake_B+3/8),torch.sqrt(real_B+3/8))/5\n loss_id2 = criterion_identity(torch.sqrt(fake_B+1e-6),torch.sqrt(real_B+1e-6))\n loss_id = criterion_identity(torch.sqrt(ref_B1+1e-6),torch.sqrt(real_B+1e-6))\n #loss_id3 = criterion_identity(ref_B1,real_B)/real_B.mean()\n pred_fake = netD(fake_B)\n loss_GAN = criterion_GAN(pred_fake, target_real)\n \n loss_G = loss_sim + loss_id + loss_id2 + loss_GAN \n loss_G.backward()\n \n optimizer_G.step()\n ###################################\n \n ###### Discriminator A ######\n optimizer_D.zero_grad()\n fake_B1 = fake_B.detach()\n fake_B = fake_B_buffer.push_and_pop(fake_B)\n pred_fake = netD(fake_B.detach())\n loss_D_fake = criterion_GAN(pred_fake, target_fake)\n pred_real = netD(real_B)\n loss_D_real = criterion_GAN(pred_real, target_real)\n\n # Total loss\n loss_D = (loss_D_real + loss_D_fake)*0.5\n loss_D.backward()\n\n optimizer_D.step()\n ################################### \n logger.log({'loss_G': loss_G.cpu(), 'loss_sim': loss_sim.cpu(), 'loss_D': loss_D.cpu()}, \n images={'real_A': real_A[:,slices//2:slices//2+1,...].cpu(), 'real_B': real_B.cpu(), 'fake_B': fake_B1.cpu()})\n\n # Update learning rates\n lr_scheduler_G.step()\n lr_scheduler_D.step()\n # Save models checkpoints\n torch.save(netG.state_dict(), './netG.pth')\n torch.save(netD.state_dict(), './netD.pth')\n torch.save(optimizer_G.state_dict(), './optimizer_G.pth')\n torch.save(optimizer_D.state_dict(), './optimizer_D.pth')\n","sub_path":"models/DAGAN6/DAGAN.py","file_name":"DAGAN.py","file_ext":"py","file_size_in_byte":7010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"524371247","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.14-x86_64/egg/suntime/suntime.py\n# Compiled at: 2019-08-26 08:45:56\n# Size of source mod 2**32: 7127 bytes\nimport calendar, math, datetime\nfrom dateutil import tz\n\nclass SunTimeException(Exception):\n\n def __init__(self, message):\n super(SunTimeException, self).__init__(message)\n\n\nclass Sun:\n __doc__ = '\\n Approximated calculation of sunrise and sunset datetimes. Adapted from:\\n https://stackoverflow.com/questions/19615350/calculate-sunrise-and-sunset-times-for-a-given-gps-coordinate-within-postgresql\\n '\n\n def __init__(self, lat, lon):\n self._lat = lat\n self._lon = lon\n\n def get_sunrise_time(self, date=None):\n \"\"\"\n Calculate the sunrise time for given date.\n :param lat: Latitude\n :param lon: Longitude\n :param date: Reference date. Today if not provided.\n :return: UTC sunrise datetime\n :raises: SunTimeException when there is no sunrise and sunset on given location and date\n \"\"\"\n date = datetime.date.today() if date is None else date\n sr = self._calc_sun_time(date, True)\n if sr is None:\n raise SunTimeException('The sun never rises on this location (on the specified date)')\n else:\n return sr\n\n def get_local_sunrise_time(self, date=None, local_time_zone=tz.tzlocal()):\n \"\"\"\n Get sunrise time for local or custom time zone.\n :param date: Reference date. Today if not provided.\n :param local_time_zone: Local or custom time zone.\n :return: Local time zone sunrise datetime\n \"\"\"\n date = datetime.date.today() if date is None else date\n sr = self._calc_sun_time(date, True)\n if sr is None:\n raise SunTimeException('The sun never rises on this location (on the specified date)')\n else:\n return sr.astimezone(local_time_zone)\n\n def get_sunset_time(self, date=None):\n \"\"\"\n Calculate the sunset time for given date.\n :param lat: Latitude\n :param lon: Longitude\n :param date: Reference date. Today if not provided.\n :return: UTC sunset datetime\n :raises: SunTimeException when there is no sunrise and sunset on given location and date.\n \"\"\"\n date = datetime.date.today() if date is None else date\n ss = self._calc_sun_time(date, False)\n if ss is None:\n raise SunTimeException('The sun never sets on this location (on the specified date)')\n else:\n return ss\n\n def get_local_sunset_time(self, date=None, local_time_zone=tz.tzlocal()):\n \"\"\"\n Get sunset time for local or custom time zone.\n :param date: Reference date\n :param local_time_zone: Local or custom time zone.\n :return: Local time zone sunset datetime\n \"\"\"\n date = datetime.date.today() if date is None else date\n ss = self._calc_sun_time(date, False)\n if ss is None:\n raise SunTimeException('The sun never sets on this location (on the specified date)')\n else:\n return ss.astimezone(local_time_zone)\n\n def _calc_sun_time(self, date, isRiseTime=True, zenith=90.8):\n \"\"\"\n Calculate sunrise or sunset date.\n :param date: Reference date\n :param isRiseTime: True if you want to calculate sunrise time.\n :param zenith: Sun reference zenith\n :return: UTC sunset or sunrise datetime\n :raises: SunTimeException when there is no sunrise and sunset on given location and date\n \"\"\"\n day = date.day\n month = date.month\n year = date.year\n TO_RAD = math.pi / 180.0\n N1 = math.floor(275 * month / 9)\n N2 = math.floor((month + 9) / 12)\n N3 = 1 + math.floor((year - 4 * math.floor(year / 4) + 2) / 3)\n N = N1 - N2 * N3 + day - 30\n lngHour = self._lon / 15\n if isRiseTime:\n t = N + (6 - lngHour) / 24\n else:\n t = N + (18 - lngHour) / 24\n M = 0.9856 * t - 3.289\n L = M + 1.916 * math.sin(TO_RAD * M) + 0.02 * math.sin(TO_RAD * 2 * M) + 282.634\n L = self._force_range(L, 360)\n RA = 1 / TO_RAD * math.atan(0.91764 * math.tan(TO_RAD * L))\n RA = self._force_range(RA, 360)\n Lquadrant = math.floor(L / 90) * 90\n RAquadrant = math.floor(RA / 90) * 90\n RA = RA + (Lquadrant - RAquadrant)\n RA = RA / 15\n sinDec = 0.39782 * math.sin(TO_RAD * L)\n cosDec = math.cos(math.asin(sinDec))\n cosH = (math.cos(TO_RAD * zenith) - sinDec * math.sin(TO_RAD * self._lat)) / (cosDec * math.cos(TO_RAD * self._lat))\n if cosH > 1:\n return\n elif cosH < -1:\n return\n if isRiseTime:\n H = 360 - 1 / TO_RAD * math.acos(cosH)\n else:\n H = 1 / TO_RAD * math.acos(cosH)\n H = H / 15\n T = H + RA - 0.06571 * t - 6.622\n UT = T - lngHour\n UT = self._force_range(UT, 24)\n hr = self._force_range(int(UT), 24)\n min = round((UT - int(UT)) * 60, 0)\n if min == 60:\n hr += 1\n min = 0\n if hr == 24:\n hr = 0\n day += 1\n if day > calendar.monthrange(year, month)[1]:\n day = 1\n month += 1\n if month > 12:\n month = 1\n year += 1\n return datetime.datetime(year, month, day, hr, (int(min)), tzinfo=(tz.tzutc()))\n\n @staticmethod\n def _force_range(v, max):\n if v < 0:\n return v + max\n if v >= max:\n return v - max\n return v\n\n\nif __name__ == '__main__':\n sun = Sun(85.0, 21.0)\n try:\n print(sun.get_local_sunrise_time())\n print(sun.get_local_sunset_time())\n abd = datetime.date(2014, 1, 3)\n abd_sr = sun.get_local_sunrise_time(abd)\n abd_ss = sun.get_local_sunset_time(abd)\n print(abd_sr)\n print(abd_ss)\n except SunTimeException as e:\n try:\n print('Error: {0}'.format(e))\n finally:\n e = None\n del e","sub_path":"pycfiles/suntime-1.2.5-py3.7/suntime.cpython-37.py","file_name":"suntime.cpython-37.py","file_ext":"py","file_size_in_byte":6286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"495089452","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nimport torch\r\nfrom torch.autograd import Variable\r\nfrom .config import EOS, BOS, LANGUAGE_TOKENS\r\nfrom .beam_search import SequenceGenerator\r\nfrom torch.nn.functional import adaptive_avg_pool2d\r\n\r\n\r\nclass Translator(object):\r\n\r\n def __init__(self, model, src_tok, target_tok,\r\n beam_size=5,\r\n length_normalization_factor=0,\r\n max_sequence_length=50,\r\n batch_first=False,\r\n return_all=False,\r\n get_attention=False,\r\n cuda=False):\r\n self.model = model\r\n self.src_tok = src_tok\r\n self.target_tok = target_tok\r\n self.insert_target_start = [BOS]\r\n self.insert_src_start = [BOS]\r\n self.insert_src_end = [EOS]\r\n self.batch_first = batch_first\r\n self.return_all = return_all\r\n self.get_attention = get_attention\r\n self.cuda = cuda\r\n if self.cuda:\r\n model.cuda()\r\n else:\r\n model.cpu()\r\n model.eval()\r\n self.generator = SequenceGenerator(\r\n model=self.model.generate,\r\n beam_size=beam_size,\r\n batch_first=batch_first,\r\n max_sequence_length=max_sequence_length,\r\n get_attention=get_attention,\r\n length_normalization_factor=length_normalization_factor)\r\n\r\n def set_src_language(self, language):\r\n lang = self.src_tok.special_tokens.index(LANGUAGE_TOKENS[language])\r\n self.insert_src_start = [BOS, lang]\r\n\r\n def set_target_language(self, language):\r\n lang = self.target_tok.special_tokens.index(LANGUAGE_TOKENS[language])\r\n self.insert_target_start = [BOS, lang]\r\n\r\n def translate(self, input_sentence, target_priming=None):\r\n target_priming = target_priming or ''\r\n src_tok = self.src_tok.tokenize(input_sentence,\r\n insert_start=self.insert_src_start,\r\n insert_end=self.insert_src_end)\r\n bos = self.target_tok.tokenize(\r\n target_priming, insert_start=self.insert_target_start)\r\n shape = (1, -1) if self.batch_first else (-1, 1)\r\n src = Variable(src_tok.view(*shape), volatile=True)\r\n bos = Variable(bos.view(*shape), volatile=True)\r\n if self.cuda:\r\n src = src.cuda()\r\n bos = bos.cuda()\r\n\r\n self.model.clear_state()\r\n context = self.model.encode(src)\r\n if hasattr(self.model, 'bridge'):\r\n context = self.model.bridge(context)\r\n preds, logprobs, attentions = self.generator.beam_search(bos, context)\r\n num_return = len(preds) if self.return_all else 1\r\n preds = preds[:num_return]\r\n logprobs = logprobs[:num_return]\r\n output = [self.target_tok.detokenize(p[:-1]) for p in preds]\r\n if len(target_priming) > 0:\r\n output = [' '.join([target_priming, o]) for o in output]\r\n\r\n output = output[0] if len(output) == 1 else output\r\n logprobs = logprobs[0] if len(logprobs) == 1 else logprobs\r\n if self.get_attention:\r\n attentions = attentions[:num_return]\r\n attentions = [torch.stack(att, 1) for att in attentions]\r\n attentions = attentions[0] if len(attentions) == 1 else attentions\r\n preds = [[self.target_tok.idx2word(\r\n idx) for idx in p] for p in preds]\r\n preds = preds[0] if len(preds) == 1 else preds\r\n src = [self.src_tok.idx2word(idx) for idx in list(src_tok)]\r\n return output, (attentions, src, preds)\r\n else:\r\n return output\r\n\r\n\r\nclass CaptionGenerator(Translator):\r\n\r\n def __init__(self, model, img_transform, target_tok,\r\n beam_size=5,\r\n length_normalization_factor=0,\r\n max_sequence_length=50,\r\n batch_first=False,\r\n return_all=False,\r\n get_attention=False,\r\n cuda=False):\r\n self.img_transform = img_transform\r\n super(CaptionGenerator, self).__init__(model,\r\n None,\r\n target_tok,\r\n beam_size,\r\n length_normalization_factor,\r\n max_sequence_length,\r\n batch_first,\r\n return_all,\r\n get_attention,\r\n cuda)\r\n\r\n def set_src_language(self, language):\r\n pass\r\n\r\n def describe(self, input_img, target_priming=None):\r\n target_priming = target_priming or ''\r\n src_img = self.img_transform(input_img)\r\n\r\n bos = self.target_tok.tokenize(\r\n target_priming, insert_start=self.insert_target_start)\r\n shape = (1, -1) if self.batch_first else (-1, 1)\r\n src = Variable(src_img.unsqueeze(0).unsqueeze(0), volatile=True)\r\n bos = Variable(bos.view(*shape), volatile=True)\r\n if self.cuda:\r\n src = src.cuda()\r\n bos = bos.cuda()\r\n\r\n self.model.clear_state()\r\n context = self.model.encode(src)\r\n _, c, h, w = list(context[0].size())\r\n if hasattr(self.model, 'bridge'):\r\n context = self.model.bridge(context)\r\n\r\n preds, logprobs, attentions = self.generator.beam_search(bos, context)\r\n num_return = len(preds) if self.return_all else 1\r\n preds = preds[:num_return]\r\n logprobs = logprobs[:num_return]\r\n output = [self.target_tok.detokenize(p[:-1]) for p in preds]\r\n if len(target_priming) > 0:\r\n output = [' '.join([target_priming, o]) for o in output]\r\n output = output[0] if len(output) == 1 else output\r\n logprobs = logprobs[0] if len(logprobs) == 1 else logprobs\r\n if attentions is not None:\r\n attentions = attentions[:num_return]\r\n attentions = [torch.stack([a.view(h, w) for a in attns], 0)\r\n for attns in attentions]\r\n attentions = attentions[0] if len(attentions) == 1 else attentions\r\n preds = [[self.target_tok.idx2word(\r\n idx) for idx in p] for p in preds]\r\n preds = preds[0] if len(preds) == 1 else preds\r\n return output, (attentions, preds)\r\n # for s,p in zip(sentences,logprob):\r\n # print(target_tok.detokenize(s)[::-1],' p=%s' % math.exp(p))\r\n","sub_path":"seq2seq/tools/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":6654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"61317434","text":"from collections import deque\nclass Solution:\n \"\"\"\n @param grid: The gird\n @return: Return the steps you need at least\n \"\"\"\n def getBestRoad(self, grid):\n rsz, csz = len(grid), len(grid[0])\n step = [[[-1, -1] for j in range(csz)] for i in range(rsz)]\n q = deque([(0, 0, grid[0][0])])\n step[0][0][grid[0][0]] = 0\n mov = [(-1, 0), (0, -1), (1, 0), (0, 1)]\n\n while len(q) > 0:\n cur = q.popleft()\n cs = step[cur[0]][cur[1]][cur[2]]+1\n for m in mov:\n x, y = cur[0] + m[0], cur[1] + m[1]\n if x<0 or x>=rsz or y<0 or y>=csz: continue\n if grid[x][y] == 1:\n if cur[2] == 1: continue\n if step[x][y][1]==-1 or cs= 0: ans = step[rsz-1][csz-1][0]\n if step[rsz-1][csz-1][1] >= 0 and (ans==-1 or step[rsz-1][csz-1][1]//',views.content_detail, name='content_detail'),\n path('email_list',views.email_list,name='email_list'),\n\n\n\n\n]\n","sub_path":"website/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"6324232","text":"import numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\n\nX= np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]])\ny = np.array([sum(x) for x in X])\n\n# 多元线性回归分析,其中X和Y分别为自变量数据集和因变量数据集\ndef muti_linear_regre(X, Y):\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=2, random_state=1)\n reg = LinearRegression(copy_X=True)\n reg.fit(X_train, y_train)\n print(reg.predict(X_test)) # 利用训练的神经网络进行预测/检验\n\n\nif __name__ == \"__main__\":\n muti_linear_regre(X, y)\n","sub_path":"book/manyLinear.py","file_name":"manyLinear.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"315819910","text":"import os\nimport shutil\nfrom glob import iglob\nfrom pathlib import Path\n\n\nrecursive_indexes_path = '**/index.html'\ndef get_files_from_recursive_path(path):\n for f in iglob(path, recursive=True) :\n if os.path.isfile(f):\n yield str(Path(f).resolve())\n\ndef make_fresh_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)\n else:\n shutil.rmtree(dir) # removes all the subdirectories!\n os.makedirs(dir)\n\ndef get_filename_from_path(path):\n return os.path.basename(path)\n","sub_path":"helpers/os_tools.py","file_name":"os_tools.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"618123659","text":"class Solution:\n @staticmethod\n def no_reps_1_to_9(collection: list) -> bool:\n return True if len(set(collection)) == len(collection) else False\n\n def isValidSudoku(self, board: list) -> bool:\n m = len(board)\n n = len(board[0])\n\n for i in range(m):\n row_nums = list(filter(lambda x: x != '.', board[i]))\n if not Solution.no_reps_1_to_9(row_nums):\n return False\n\n for j in range(n):\n col = [row[j] for row in board]\n col_nums = list(filter(lambda x: x != '.', col))\n if not Solution.no_reps_1_to_9(col_nums):\n return False\n\n for box_i in range(0, 9, 3):\n for box_j in range(0, 9, 3):\n box = [board[i][j] for i in range(box_i, box_i + 3) for j in range(box_j, box_j + 3)]\n box_nums = list(filter(lambda x: x != '.', box))\n if not Solution.no_reps_1_to_9(box_nums):\n return False\n\n return True\n\n\nif __name__ == \"__main__\":\n solu = Solution()\n board = [\n [\"5\",\"3\",\".\",\".\",\"7\",\".\",\".\",\".\",\".\"],\n [\"6\",\".\",\".\",\"1\",\"9\",\"5\",\".\",\".\",\".\"],\n [\".\",\"9\",\"8\",\".\",\".\",\".\",\".\",\"6\",\".\"],\n [\"8\",\".\",\".\",\".\",\"6\",\".\",\".\",\".\",\"3\"],\n [\"4\",\".\",\".\",\"8\",\".\",\"3\",\".\",\".\",\"1\"],\n [\"7\",\".\",\".\",\".\",\"2\",\".\",\".\",\".\",\"6\"],\n [\".\",\"6\",\".\",\".\",\".\",\".\",\"2\",\"8\",\".\"],\n [\".\",\".\",\".\",\"4\",\"1\",\"9\",\".\",\".\",\"5\"],\n [\".\",\".\",\".\",\".\",\"8\",\".\",\".\",\"7\",\"9\"]\n ]\n print(solu.isValidSudoku(board))","sub_path":"codes/MartinMa28/python3/0036_valid_sudoku.py","file_name":"0036_valid_sudoku.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"527411251","text":"import json\n\ntotal = 0\n\nwith open(\"input.json\") as f:\n j = json.loads(f.readline())\n\ndef handle_list(l):\n sub_total = 0\n\n for i in l:\n if isinstance(i, int):\n sub_total += i\n elif isinstance(i, list):\n sub_total += handle_list(i)\n elif isinstance(i, dict):\n sub_total += handle_dict(i)\n\n return sub_total\n\ndef handle_dict(d):\n sub_total = 0\n\n for k in d.keys():\n if isinstance(k, int):\n sub_total += k\n\n v = d[k]\n if isinstance(v, int):\n sub_total += v\n elif isinstance(v, list):\n sub_total += handle_list(v)\n elif isinstance(v, dict):\n sub_total += handle_dict(v)\n\n return sub_total\n\nif isinstance(j, int):\n total += j\nelif isinstance(j, list):\n total += handle_list(j)\nelif isinstance(j, dict):\n total += handle_dict(j)\n\nprint(\"Total:\", total)","sub_path":"Day 12/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"586640366","text":"\"\"\"\nLocal control program to be executed on remote nodes.\n\"\"\"\n__author__ = \"Domenico Garlisi\"\n__copyright__ = \"Copyright (c) 2016, CNIT\"\n__version__ = \"0.1.0\"\n__email__ = \"domenico.garlisi@cnit.it\"\n\n\nimport time\nimport datetime\nimport sys\nfrom sys import stdout\nfrom ctypes import *\nimport os\nimport csv\nimport signal\nimport threading\nimport math\nimport zmq\nimport netifaces as ni\n\n\n\nlibc = CDLL('libc.so.6')\nusleep = lambda x: time.sleep(x/1000000.0)\n\nsys.path.append('../../../')\nsys.path.append(\"../../../agent_modules/wifi_ath\")\nsys.path.append(\"../../../agent_modules/wifi_wmp\")\nsys.path.append(\"../../../agent_modules/wifi\")\nsys.path.append('../../../upis')\nsys.path.append('../../../framework')\nsys.path.append('../../../agent')\n# from agent_modules.wifi_wmp.wmp_structure import UPI_R\nfrom agent_modules.wifi_wmp.adaptation_module.libb43 import *\n\n# @controller.set_default_callback()\n# def default_callback(cmd, data):\n# \tprint((\"DEFAULT CALLBACK : Cmd: {}, Returns: {}\".format(cmd, data)))\n\n# manager = Manager()\n# story_channel = manager.list()\n\n\nstory_file = None\nreading_thread = None\n\ndef signal_handler(signal, frame):\n\tstory_file.close()\n\treading_thread.do_run = False\n\treading_thread.join()\n\ttime.sleep(2)\n\tsys.exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\n\n\n# Definition of Local Control Program\n#def my_local_control_program(controller):\ndef my_local_control_program():\n\n\t#Flags\n\tFLAG_USE_BUSY = 0\n\tFLAG_READONLY = 0\n\tFLAG_VERBOSE = 1\n\tread_interval = 7000 #12000 #(us)\n\n\tPACKET_TO_TRANSMIT\t=0x00F0\n\tMY_TRANSMISSION\t\t=0x00F2\n\tSUCCES_TRANSMISSION\t=0x00F4\n\tOTHER_TRANSMISSION\t=0x00F6\n\tBAD_RECEPTION\t\t=0x00FA\n\tBUSY_SLOT =0x00FC\n\n\tNEAR_SLOT\t=\t43\n\tCOUNT_SLOT\t=\t43\n\tSINC_SLOT_2\t=\t40\n\tSINC_SLOT_1\t=\t41\n\tSINC_SLOT_0\t=\t42\n\n\n\tclass tdma_param(ctypes.Structure):\n\t\t_fields_= [\n\t\t\t('frame_offset', ctypes.c_int),\n\t\t\t('frame_length', ctypes.c_int),\n\t\t\t('slot_assignment', ctypes.c_int)\n\t\t]\n\n\tclass fsm_param(ctypes.Structure):\n\t\t_fields_= [\n\t\t\t#Parameter number.\n\t\t\t('num', ctypes.c_int),\n\t\t\t#Parameter value.\n\t\t\t('value', ctypes.c_int)\n\t\t\t#Linked list.\n\t\t\t#struct fsm_param *next;\n\t\t]\n\n\tclass protocol(ctypes.Structure) :\n\t\t_fields_ = [\n\t\t\t#Unique identifier\n\t\t\t('id', ctypes.c_int),\n\t\t\t#Readable name, such as \"TDMA (slot 1)\"\n\t\t\t('name', c_char_p),\n\t\t\t#Path to the compiled (.txt) FSM implementation\n\t\t\t('fsm_path', ctypes.c_char_p),\n\t\t\t#Parameters for the FSM\n\t\t\t('fsm_params', fsm_param * 2),\n\t\t\t#Protocol emulator for determining decisions of protocol locally\n\t\t\t#protocol_emulator emulator;\n\t\t\t#Parameter for protocol emulator\n\t\t\t#void *parameter;\n\t\t\t('parameter', tdma_param)\n\t\t]\n\n\t# class metamac_slot(ctypes.Structure):\n\t# \t_fields_ = [\n\t# \t\t('slot_num', ctypes.c_ulong),\n\t# \t\t('read_num', ctypes.c_ulong),\n\t# \t\t('host_time', ctypes.c_uint64),\n\t# \t\t('host_time', ctypes.c_uint64),\n\t# \t\t('tsf_time', ctypes.c_uint64 ),\n\t# \t\t('slot_index', ctypes.c_int),\n\t# \t\t('slots_passed', ctypes.c_int),\n #\n\t# \t\t#Indicates if this slot was filled in because of a delay in\treading from the board.\n\t# \t\t('filler', ctypes.c_char), #\tuchar filler : 1;\n\t# \t\t#Indicates that a packet was waiting to be transmitted in this slot.\n\t# \t\t('packet_queued', ctypes.c_char), #uchar packet_queued : 1;\n\t# \t\t#Indicates that a transmission was attempted in this slot.\n\t# \t\t('transmitted', ctypes.c_char), #uchar transmitted : 1;\n\t# \t\t#Indicates that a transmission was successful in this slot.\n\t# \t\t('transmit_success', ctypes.c_char), #uchar transmit_success : 1;\n\t# \t\t#Various measures for whether another node attempted to transmit.\n\t# \t\t('transmit_other', ctypes.c_char), #uchar transmit_other : 1;\n\t# \t\t('bad_reception', ctypes.c_char), #uchar bad_reception : 1;\n\t# \t\t('busy_slot', ctypes.c_char), #uchar busy_slot : 1;\n\t# \t\t#Indicates that either a transmission attempt was unsuccessful\n\t# \t\t#in this slot or another node attempted a transmission.\n\t# \t\t('channel_busy', ctypes.c_char) #uchar channel_busy : 1;\n\t# \t]\n\n\tclass metamac_slot(ctypes.Structure):\n\t\t_fields_ = [\n\t\t\t('slot_num', ctypes.c_ulong),\n\t\t\t('read_num', ctypes.c_ulong),\n\t\t\t('host_time', ctypes.c_uint64),\n\t\t\t('host_time', ctypes.c_uint64),\n\t\t\t('tsf_time', ctypes.c_uint64 ),\n\t\t\t('slot_index', ctypes.c_int),\n\t\t\t('slots_passed', ctypes.c_int),\n\n\t\t\t#Indicates if this slot was filled in because of a delay in\treading from the board.\n\t\t\t('filler', ctypes.c_ubyte), #\tuchar filler : 1;\n\t\t\t#Indicates that a packet was waiting to be transmitted in this slot.\n\t\t\t('packet_queued', ctypes.c_ubyte), #uchar packet_queued : 1;\n\t\t\t#Indicates that a transmission was attempted in this slot.\n\t\t\t('transmitted', ctypes.c_ubyte), #uchar transmitted : 1;\n\t\t\t#Indicates that a transmission was successful in this slot.\n\t\t\t('transmit_success', ctypes.c_ubyte), #uchar transmit_success : 1;\n\t\t\t#Various measures for whether another node attempted to transmit.\n\t\t\t('transmit_other', ctypes.c_ubyte), #uchar transmit_other : 1;\n\t\t\t('bad_reception', ctypes.c_ubyte), #uchar bad_reception : 1;\n\t\t\t('busy_slot', ctypes.c_ubyte), #uchar busy_slot : 1;\n\t\t\t#Indicates that either a transmission attempt was unsuccessful\n\t\t\t#in this slot or another node attempted a transmission.\n\t\t\t('channel_busy', ctypes.c_ubyte) #uchar channel_busy : 1;\n\t\t]\n\n\tclass protocol_suite(ctypes.Structure) :\n\t\t_fields_ = [\n\t\t\t#Total number of protocols\n\t\t\t('num_protocols', ctypes.c_int),\n\t\t\t#Index of best protocol. Initially -1.\n\t\t\t('active_protocol', ctypes.c_int),\n\t\t\t#Index of protocols in slots. -1 Indicated invalid\n\t\t\t('slots', ctypes.c_int * 2),\n\t\t\t#Which slot is active. 0 indicates neither are active.\n\t\t\t('active_slot', ctypes.c_int),\n\t\t\t#Offset of slots numbering from read loop to slot numbering for TDMA.\n\t\t\t('slot_offset', ctypes.c_int),\n\t\t\t#Array of all protocols.\n\t\t\t('protocols', protocol * 4),\n\t\t\t#Array of weights corresponding to protocols.\n\t\t\t('weights', ctypes.c_double * 4),\t#double *weights; !!!WARNING for *\n\t\t\t#Factor used in computing weights.\n\t\t\t('eta', ctypes.c_double),\n\t\t\t#Slot information for last to be emulated.\n\t\t\t('last_slot', metamac_slot),\n\t\t\t#Time of last protocol update.\n\t\t\t#('last_update', timespec), # struct ;!!!WARNING for *\n\t\t\t#Indicates whether protocols should be cycled.\n\t\t\t('cycle', ctypes.c_int),\n\t\t]\n\n\tdef set_parameter(b43, slot, num, value):\n\n\t\tparam_addr = 0\n\t\tif num==10:\n\t\t\tparam_addr = 0x16*2\n\t\telif num==11:\n\t\t\tparam_addr = 0x21*2\n\t\telif num==12:\n\t\t\tparam_addr = 0x1F*2\n\t\telif num==13:\n\t\t\tparam_addr = 0x20*2\n\t\telif num==14:\n\t\t\tparam_addr = 0x11*2\n\t\telif num==15:\n\t\t\tparam_addr = 0x12*2\n\t\telif num==16:\n\t\t\tparam_addr = 0x13*2\n\t\telif num==17:\n\t\t\tparam_addr = 0x14*2\n\t\telse:\n\t\t\treturn\n\n\t\tif slot == 0 :\n\t\t\tparam_addr += b43.PARAMETER_ADDR_BYTECODE_1\n\t\telse :\n\t\t\tparam_addr += b43.PARAMETER_ADDR_BYTECODE_2\n\n\t\tb43.shmWrite16(b43.B43_SHM_SHARED, param_addr, value & 0xffff);\n\n\n\n\tdef tdma_emulate(param, slot_num, offset):\n\t\tslot_num += offset\n\t\ttdma_params = param\n\t\tif ((slot_num - tdma_params.frame_offset) % tdma_params.frame_length) == tdma_params.slot_assignment :\n\t\t\tresult = 1.0\n\t\telse :\n\t\t\tresult = 0.0\n\n\t\treturn result\n\n\tdef configure_params(b43, slot, param):\n\n\t\t#while (param != N) {\n\t\tfor i in range(2):\n\t\t\tset_parameter(b43, slot, param[i].num, param[i].value)\n\n\n\n\tdef load_protocol(b43, suite, protocol):\n\n\t\t\"\"\"\n\t\tUpdate this function --> replace the shared memory write function with UPI for change parameter and load radio program\n\t\t set_parameters(param_key_values_dict):\n\t\t activate_radio_program(name)\n\t\t\"\"\"\n\n\n\t\t#struct options opt;\n\t\tactive = suite.active_slot # Always 0 or 1 since metamac_init will already have run.\n\t\tinactive = 1 - active\n\n\t\tif (protocol == suite.slots[active]) :\n\t\t\t#This protocol is already running.\n\t\t\tpass\n\t\telse :\n\t\t\t#Protocol in active slot shares same FSM, but is not the same protocol\n\t\t\t#(already checked). Write the parameters for this protocol.\n\t\t\tconfigure_params(b43, active, suite.protocols[protocol].fsm_params)\n\t\t\tsuite.slots[active] = protocol\n\n\t\t# elif (protocol == suite.slots[inactive]):\n\t\t# \t#Switch to other slot.\n\t\t# \topt.active = (inactive == 0) ? \"1\" : \"2\"\n\t\t# \twriteAddressBytecode(df, &opt)\n\t\t# \tsuite.active_slot = inactive\n #\n\t\t# elif (suite.slots[active] >= 0 ) : #and strcmp(suite->protocols[protocol].fsm_path, suite->protocols[suite->slots[active]].fsm_path) == 0) :\n\t\t# \t#Protocol in active slot shares same FSM, but is not the same protocol\n\t\t# \t#(already checked). Write the parameters for this protocol.\n\t\t# \tconfigure_params(b43, active, suite.protocols[protocol].fsm_params)\n\t\t# \tsuite.slots[active] = protocol\n #\n\t\t# elif (suite.slots[inactive] >= 0): #and strcmp(suite->protocols[protocol].fsm_path, suite->protocols[suite->slots[inactive]].fsm_path) == 0) :\n\t\t# \t#Protocol in inactive slot shares same FSM, but is not the same protocol,\n\t\t# \t#so write the parameters for this protocol and activate it.\n #\n\t\t# \t# configure_params(df, inactive, suite->protocols[protocol].fsm_params);\n\t\t# \t# opt.active = (inactive == 0) ? \"1\" : \"2\";\n\t\t# \t# writeAddressBytecode(df, &opt);\n #\n\t\t# \tsuite.slots[inactive] = protocol\n\t\t# \tsuite.active_slot = inactive\n #\n\t\t# else:\n\t\t# \t#Load into inactive slot.\n #\n\t\t# \t# opt.load = (inactive == 0) ? \"1\" : \"2\";\n\t\t# \t# opt.name_file = suite->protocols[protocol].fsm_path;\n\t\t# \t# bytecodeSharedWrite(df, &opt);\n\t\t# \t# configure_params(df, inactive, suite->protocols[protocol].fsm_params);\n\t\t# \t# opt.active = opt.load;\n\t\t# \t# writeAddressBytecode(df, &opt);\n #\n\t\t# \tsuite.slots[inactive] = protocol\n\t\t# \tsuite.active_slot = inactive\n\n\t\tsuite.active_protocol = protocol\n\t\tsuite.last_update = monotonic_time()\n\n\n\tdef metamac_evaluate(b43, suite):\n\n\t\t#Identify the best protocol.\n\t\tbest = 0\n\t\tfor i in range(suite.num_protocols):\n\t\t\tif (suite.weights[i] > suite.weights[best]) :\n\t\t\t\tbest = i\n\n\t\tif (suite.cycle) :\n\t\t\t# struct timespec current_time;\n\t\t\t# clock_gettime(CLOCK_MONOTONIC_RAW, ¤t_time);\n\t\t\t# uint64_t timediff = (current_time.tv_sec - suite->last_update.tv_sec) * 1000000L +\n\t\t\t# \t(current_time.tv_nsec - suite->last_update.tv_nsec) / 1000L;\n #\n\t\t\t# if (timediff > 1000000L) {\n\t\t\t# \tload_protocol(df, suite, (suite->active_protocol + 1) % suite->num_protocols);\n\t\t\t# }\n\t\t\tpass\n\t\telif (best != suite.active_protocol):\n\t\t\tload_protocol(b43, suite, best)\n\n\t#static void metamac_display(unsigned long loop, struct protocol_suite *suite)\n\tdef metamac_display(loop, suite):\n\t\t# if (loop > 0):\n\t\t# \t#Reset cursor upwards by the number of protocols we will be printing.\n\t\t# \tprint(\"\\x1b[%dF\", suite.num_protocols)\n\n\t\tfor i in range(0, suite.num_protocols):\n\t\t\tactive_string = ' '\n\t\t\tif suite.active_protocol == i:\n\t\t\t\tactive_string = '*'\n\n\t\t\t#print(\"%c %5.3f %s\\n\" % (active_string, suite.weights[i], suite.protocols[i].name))\n\t\t\tif i == 0 :\n\t\t\t\tstdout.write(\"\\r%c %5.3f %s -- \" % (active_string, suite.weights[i], suite.protocols[i].name))\n\t\t\telse:\n\t\t\t\tstdout.write(\"%c %5.3f %s -- \" % (active_string, suite.weights[i], suite.protocols[i].name))\n\t\t\tstdout.flush()\n\n\t\tglobal socket_visualizer\n\t\t#ip_address = controller.net.get_iface_ip_addr(interface)\n\t\tiface = 'wlan0'\n\t\tip_address = [inetaddr['addr'] for inetaddr in ni.ifaddresses(iface)[ni.AF_INET]]\n\t\tstock_data = {\n 'node_ip_address': ip_address,\n 'active': suite.active_protocol\n }\n\n\t\t#send information to visualizer outside the laboratory\n\t\t#socket_visualizer.send(b'client message to server1')\n\t\tsocket_visualizer.send_json(stock_data)\n\t\tmessage = socket_visualizer.recv()\n\t\t#print(\"Received reply [\" + str(message) + \"]\")\n\n\t#void queue_multipush(struct metamac_queue *queue, struct metamac_slot *slots, size_t count)\n\tdef queue_multipush(story_channel, story_file, story_channel_len, story_channel_len_diff):\n\n\t\tai = story_channel_len - story_channel_len_diff\n\t\twhile ai < story_channel_len :\n\n\t\t\t# print(\"%d - %d : %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d\" %\n\t\t\t# ( ai, story_channel_len_diff, int(story_channel[ai].slot_num), int(story_channel[ai].read_num), int(story_channel[ai].host_time),\n\t\t\t# story_channel[ai].tsf_time, story_channel[ai].slot_index, story_channel[ai].slots_passed,\n\t\t\t# (story_channel[ai].filler), (story_channel[ai].packet_queued), (story_channel[ai].transmitted),\n\t\t\t# (story_channel[ai].transmit_success), (story_channel[ai].transmit_other),\n\t\t\t# (story_channel[ai].bad_reception), (story_channel[ai].busy_slot), (story_channel[ai].channel_busy) ))\n\n\t\t\tstory_file.write(\"%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d\\n\" %\n\t\t\t( int(story_channel[ai].slot_num), int(story_channel[ai].read_num), int(story_channel[ai].host_time),\n\t\t\tstory_channel[ai].tsf_time, story_channel[ai].slot_index, story_channel[ai].slots_passed,\n\t\t\t(story_channel[ai].filler), (story_channel[ai].packet_queued), (story_channel[ai].transmitted),\n\t\t\t(story_channel[ai].transmit_success), (story_channel[ai].transmit_other),\n\t\t\t(story_channel[ai].bad_reception), (story_channel[ai].busy_slot), (story_channel[ai].channel_busy) ))\n\n\t\t\tai += 1\n\n\t\t# if (logfile != NULL) {\n\t\t# \t\t\tfprintf(logfile, \"%llu,%d,%llu,%llu,%llu,%d,%d,%01x,%01x,%01x,%01x,%01x,%01x,%01x,%01x,%s\",\n\t\t# \t\t\t\t(unsigned long long) slots[i].slot_num,\n\t\t# \t\t\t\tsuite->slot_offset,\n\t\t# \t\t\t\t(unsigned long long) slots[i].read_num,\n\t\t# \t\t\t\t(unsigned long long) slots[i].host_time,\n\t\t# \t\t\t\t(unsigned long long) slots[i].tsf_time,\n\t\t# \t\t\t\tslots[i].slot_index,\n\t\t# \t\t\t\tslots[i].slots_passed,\n\t\t# \t\t\t\tslots[i].filler,\n\t\t# \t\t\t\tslots[i].packet_queued,\n\t\t# \t\t\t\tslots[i].transmitted,\n\t\t# \t\t\t\tslots[i].transmit_success,\n\t\t# \t\t\t\tslots[i].transmit_other,\n\t\t# \t\t\t\tslots[i].bad_reception,\n\t\t# \t\t\t\tslots[i].busy_slot,\n\t\t# \t\t\t\tslots[i].channel_busy,\n\t\t# \t\t\t\tsuite->protocols[suite->active_protocol].name);\n #\n\t\t# \t\t\tfor (int i = 0; i < suite->num_protocols; i++) {\n\t\t# \t\t\t\tfprintf(logfile, \",%e\", suite->weights[i]);\n\t\t# \t\t\t}\n #\n\t\t# \t\t\tfprintf(logfile, \"\\n\");\n\t\t# \t\t}\n\n\n\n\n\tdef acquire_slots_channel(story_channel):\n\t\treading_thread = threading.currentThread()\n\t\tb43_phy = None\n\t\tb43 = B43(b43_phy)\n\t\tslot_time = 2200 #(us)\n\n\t\tslot_num = 0\n\t\tread_num = 0\n\t\tslot_index = 0 #(int)\n\t\tlast_slot_index = 0 #(int)\n\t\ttsf = 0 #(uint64_t)\n\t\tlast_tsf = 0 #(uint64_t)\n\t\tinitial_tsf = 0 #(uint64_t)\n\n\t\tstart_time = monotonic_time() #(timespec)\n\t\tloop_end = 0 #(uint64_t)\n\n\t\tinitial_tsf = b43.getTSFRegs()\n\t\ttsf = initial_tsf\n\t\tslot_index = b43.shmRead16(b43.B43_SHM_REGS, COUNT_SLOT) & 0x7\n\t\tslot_num = (slot_index + 1) % 8\n\n\t\t# metamac control loop\n\t\t# while not controller.is_stopped() :\n\t\t# \tmsg = controller.recv(timeout=1)\n\t\twhile getattr(reading_thread, \"do_run\", True):\n\n\t\t\tcurrent_time = monotonic_time() #(timespec)\n\t\t\tloop_start = int((current_time.tv_sec - start_time.tv_sec) * 1000000 + (current_time.tv_nsec - start_time.tv_nsec) / 1000) #(uint64_t )\n\t\t\tlast_tsf = tsf\n\t\t\ttsf = b43.getTSFRegs()\n\t\t\tlast_slot_index = slot_index\n\n\n\t\t\t''' replaced the following part with UPI get_measurements_periodic\n\t\t\tSTART\n\t\t\t'''\n\n\t\t\tslot_index = b43.shmRead16(b43.B43_SHM_REGS, COUNT_SLOT) & 0x7\n\n\t\t\tpacket_queued = b43.shmRead16(b43.B43_SHM_SHARED, PACKET_TO_TRANSMIT) #(uint)\n\t\t\ttransmitted = b43.shmRead16(b43.B43_SHM_SHARED, MY_TRANSMISSION) #(uint)\n\t\t\ttransmit_success = b43.shmRead16(b43.B43_SHM_SHARED, SUCCES_TRANSMISSION) #(uint)\n\t\t\ttransmit_other = b43.shmRead16(b43.B43_SHM_SHARED, OTHER_TRANSMISSION) #(uint)\n\t\t\tbad_reception = b43.shmRead16(b43.B43_SHM_SHARED, BAD_RECEPTION) #(uint)\n\t\t\tbusy_slot = b43.shmRead16(b43.B43_SHM_SHARED, BUSY_SLOT) #(uint)\n\n\t\t\tend_slot_index = b43.shmRead16(b43.B43_SHM_REGS, COUNT_SLOT) & 0x7 #(int)\n\n\t\t\t'''\n\t\t\tSTOP\n\t\t\t'''\n\t\t\tchannel_busy = 0 #(uint)\n\t\t\tif (FLAG_USE_BUSY) :\n\t\t\t\tchannel_busy = (transmitted & ~transmit_success) |((transmit_other | bad_reception | busy_slot) & ~(transmitted & transmit_success))\n\t\t\telse:\n\t\t\t\tchannel_busy = (transmitted & ~transmit_success) |((transmit_other | bad_reception) & ~(transmitted & transmit_success))\n\n\t\t\tslots_passed = slot_index - last_slot_index #(int)\n\t\t\tif slots_passed < 0:\n\t\t\t\tslots_passed = slots_passed + 8\n\n\t\t\tactual = tsf - last_tsf #int64_t actual = ((int64_t)tsf) - ((int64_t)last_tsf);\n\n\t\t\t#print(\" read %d - last_tsf %d - tsf %d - diff %d: %x, %x, %x, %x, %x, %x, %x\" % (read_num, last_tsf, tsf, actual, packet_queued, transmitted, transmit_success, transmit_other, bad_reception, busy_slot, end_slot_index))\n\n\t\t\t# if (actual < 0 or actual > 200000) :\n\t\t\t# \tprint(\"Received TSF difference of %lld between consecutive reads.\\n\", actual)\n\t\t\t# \t# Unresolved bug with hardware/firmware/kernel driver causes occasional large jumps\n\t\t\t# \t#in the TSF counter value. In this situation use time from the OS timer instead.\n\t\t\t# \t#actual = ((int64_t)loop_start) - ((int64_t)loop_end)\n\t\t\t# \tactual = loop_start - loop_end\n\t\t\t# min_diff = abs(actual - slots_passed * slot_time) #(int64_t )\n\t\t\t# #Suppose last_slot_index is 7 and slot_index is 5. Then, since the slot\n\t\t\t# #is a value mod 8 we know the actual number of slots which have passed is\n\t\t\t# #>= 6 and congruent to 6 mod 8. Using the TSF counter from the network card,\n\t\t\t# #we find the most likely number of slots which have passed. */\n\t\t\t# diff = abs(actual - (slots_passed + 8) * slot_time) #(int64_t )\n\t\t\t# while (diff < min_diff) :\n\t\t\t# \tslots_passed += 8\n\t\t\t# \tmin_diff = diff\n\t\t\t# \tdiff = abs(actual - (slots_passed + 8) * slot_time)\n\t\t\t# #Because the reads are not atomic, the values for the slot\n\t\t\t# #indicated by slot_index are effectively unstable and could change between\n\t\t\t# #the reads for the different feedback variables. Thus, only the last 7 slots\n\t\t\t# #can be considered valid. If more than 7 slots have passed, we have to inject\n\t\t\t# #empty slots to maintain the synchronization. Note that the 7th most recent\n\t\t\t# #slot is at an offset of -6 relative to the current slot, hence the -1. */\n\t\t\tslot_offset = slots_passed #(int)\n\t\t\t# #int max_read_offset = (slot_index <= end_slot_index) ? slot_index - end_slot_index + 7 : slot_index - end_slot_index - 1;\n\t\t\t# if (slot_index <= end_slot_index) :\n\t\t\t# \tmax_read_offset = slot_index - end_slot_index + 7\n\t\t\t# else:\n\t\t\t# \tmax_read_offset = slot_index - end_slot_index - 1\n\t\t\t#\n\t\t\t# while slot_offset > max_read_offset:\n\t\t\t# \tslot_offset-= 1\n\t\t\t# \t#Empty filler slot\n\t\t\t# \tslot_num+=1\n\n\t\t\tslots = [ metamac_slot() for i in range(8)] #(struct metamac_slot) |!!! warning for memory leak\n\t\t\tai = 0\n\t\t\twhile slot_offset > 0 :\n\t\t\t\tslot_offset-=1\n\t\t\t\tsi = slot_index - slot_offset #(int)\n\t\t\t\tif si < 0 :\n\t\t\t\t\tsi = si + 8\n\n\t\t\t\tslot_num+=1\n\t\t\t\tslots[ai].slot_num = slot_num\n\t\t\t\tslots[ai].read_num = read_num\n\t\t\t\tslots[ai].host_time = loop_start\n\t\t\t\tslots[ai].tsf_time = tsf\n\t\t\t\tslots[ai].slot_index = slot_index\n\t\t\t\tslots[ai].slots_passed = slots_passed\n\t\t\t\tslots[ai].filler = 0\n\t\t\t\tslots[ai].packet_queued = (packet_queued >> si) & 1\n\t\t\t\tslots[ai].transmitted = (transmitted >> si) & 1\n\t\t\t\tslots[ai].transmit_success = (transmit_success >> si) & 1\n\t\t\t\tslots[ai].transmit_other = (transmit_other >> si) & 1\n\t\t\t\tslots[ai].bad_reception = (bad_reception >> si) & 1\n\t\t\t\tslots[ai].busy_slot = (busy_slot >> si) & 1\n\t\t\t\tslots[ai].channel_busy = (channel_busy >> si) & 1\n\t\t\t\tai+=1\n\n\t\t\tfor i in range(ai):\n\t\t\t# \t#save in dynamic array\n\t\t\t \tstory_channel.append(slots[i])\n\n\t\t\t# for i in range(ai):\n\t\t\t# \tprint(\"%d, %d, %d, \" % ( int(story_channel[i].slot_num), int(story_channel[i].read_num), int(story_channel[i].host_time) ))\n\n\t\t\tcurrent_time = monotonic_time() #(timespec)\n\t\t\tloop_end = int((current_time.tv_sec - start_time.tv_sec) * 1000000 + (current_time.tv_nsec - start_time.tv_nsec) / 1000)\n\t\t\tdelay = (loop_start + read_interval - loop_end) #(int64_t) #delay = ((int64_t)loop_start) + read_interval - ((int64_t)loop_end) #(int64_t)\n\t\t\t#print(\"ai %d - loop_start %d - loop_end %d - diff %d - delay %d - story_channel_len %d\" % (ai, loop_start, loop_end, loop_end-loop_start, delay, len(story_channel) ))\n\t\t\t# we cant make dalay minor of ~7ms\n\t\t\tif (delay > 0):\n\t\t\t\t#usleep(delay)\n\t\t\t\tlibc.usleep(int(delay))\n\n\t\t\t# current_time_delay = monotonic_time() #(timespec)\n\t\t\t# loop_end_delay = int((current_time_delay.tv_sec - current_time.tv_sec) * 1000000 + (current_time_delay.tv_nsec - current_time.tv_nsec) / 1000)\n\t\t\t# print(\" delay %d\" % (loop_end_delay))\n\n\t\t\tread_num+=1\n\n\n\t#Performs the computation for emulating the suite of protocols\n\t#for a single slot, and adjusting the weights.\n\t#void update_weights(struct protocol_suite* suite, struct metamac_slot current_slot)\n\tdef update_weights(suite, current_slot, ai):\n\t\t#Accounting for the fact that the slots that TDMA variants transmit on are\n\t\t# not necessarily aligned to the slot indices provided by the board. For instance,\n\t\t# one would expect that TDMA-4 slot 1 would transmit on slot indexes 1 and 5, but\n\t\t# this is not necessarily true. Offset between transmissions will be 4, but not\n\t\t# necessarily aligned to the slot indexes.\n\n\t\t# print(\"%d - %d : %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d\" %\n\t\t# \t( ai, story_channel_len_diff, int(story_channel[ai].slot_num), int(story_channel[ai].read_num), int(story_channel[ai].host_time),\n\t\t# \tstory_channel[ai].tsf_time, story_channel[ai].slot_index, story_channel[ai].slots_passed,\n\t\t# \t(story_channel[ai].filler), (story_channel[ai].packet_queued), (story_channel[ai].transmitted),\n\t\t# \t(story_channel[ai].transmit_success), (story_channel[ai].transmit_other),\n\t\t# \t(story_channel[ai].bad_reception), (story_channel[ai].busy_slot), (story_channel[ai].channel_busy) ))\n\n\n\t\t#if (suite.protocols[suite.active_protocol].emulator == tdma_emulate && current_slot.transmitted):\n\t\tif (current_slot.transmitted):\n\t\t\t#Update slot_offset\n\t\t\tparams = suite.protocols[suite.active_protocol].parameter\n\t\t\tneg_offset = (current_slot.slot_num - params.frame_offset - params.slot_assignment) % params.frame_length\n\t\t\tsuite.slot_offset = (params.frame_length - neg_offset) % params.frame_length\n\n\t\t\t#If there is no packet queued for this slot, consider all protocols to be correct\n\t\t\t#and thus the weights will not change\n\t\tif (current_slot.packet_queued) :\n\t\t\t#z represents the correct decision for this slot - transmit if the channel\n\t\t\t#is idle (1.0) or defer if it is busy (0.0)\n\n\t\t\tz=0.0\n\t\t\tif (not current_slot.channel_busy):\n\t\t\t\tz = 1.0\n\n\n\t\t\tfor p in range(suite.num_protocols) :\n\t\t\t\t# d is the decision of this component protocol - between 0 and 1\n#\t\t\t\td = suite.protocols[p].emulator(suite->protocols[p].parameter,\n#\t\t\t\t\tcurrent_slot.slot_num, suite->slot_offset, suite->last_slot);\n\n\t\t\t\td = tdma_emulate(suite.protocols[p].parameter, current_slot.slot_num, suite.slot_offset)\n\n#\t\t\t\tstdout.write(\"[%d] d=%e, z=%e \\n\" % (p, d, z,))\n\n\t\t\t\texponent = suite.eta * math.fabs(d - z)\n\t\t\t\tsuite.weights[p] *= math.exp(-exponent)\n\n\t\t\t\tif suite.weights[p]<0.01:\n\t\t\t\t\tsuite.weights[p]=0.01\n\n\n\n\t\t\t#Normalize the weights\n\t\t\ts = 0\n\t\t\tfor p in range(suite.num_protocols):\n\t\t\t\ts += suite.weights[p]\n\t\t\tfor p in range(suite.num_protocols):\n\t\t\t\tsuite.weights[p] /= s\n\n\n#\t\t\tfor p in range(suite.num_protocols):\n#\t\t\t\tstdout.write(\"%5.3f\\n\" % (suite.weights[p]))\n\n\t\tsuite.last_slot = current_slot\n\n\tsocket_visualizer = None\n\n\tdef socket_visualizer():\n\t\tglobal socket_visualizer\n\t\tport = \"8300\"\n\n\t\tprint('start socket visualizer')\n\n\t\tcontext = zmq.Context()\n\t\tsocket_visualizer = context.socket(zmq.REQ)\n\t\t#socket_visualizer.connect(\"tcp://localhost:%s\" % port)\n\t\tsocket_visualizer.connect(\"tcp://10.8.8.6:%s\" % port)\n\n\n\t''' Main program '''\n\n\t# # control loop\n # print(\"Local ctrl program started: {}\".format(controller.name))\n # while not controller.is_stopped():\n # msg = controller.recv(timeout=1)\n # if msg:\n # ch = msg[\"new_channel\"]\n # print(\"Schedule get monitor to {} in 5s:\".format(ch))\n # UPI_myargs = {'interface' : 'wlan0', 'measurements' : [UPI_R.REGISTER_1, UPI_R.REGISTER_2, UPI_R.NUM_TX_DATA_FRAME, UPI_R.NUM_RX_ACK, UPI_R.NUM_RX_ACK_RAMATCH, UPI_R.BUSY_TYME , UPI_R.TSF, UPI_R.NUM_RX_MATCH] }\n # result = controller.delay(5).radio.get_monitor(UPI_myargs)\n # controller.send_upstream({\"myResult\": result})\n\n\tif len(sys.argv) < 2:\n\t\tsys.exit('Usage: %s eta_value' % sys.argv[0])\n\n\tsuite = protocol_suite()\n\tnum_protocols = 4\n\t#eta = 0.5\n\teta = float(sys.argv[1])\n\tprint('eta = %f' % eta)\n\n\tsocket_visualizer()\n\n\tprotocols = [protocol() for i in range(num_protocols)]\n\n\t#setting protocol structure 0\n\tprotocols[0].id = 1\n\tprotocols[0].name =b'TDMA (slot 0)'\n\tprotocols[0].fsm_path = b'tdma-4.txt'\n\t#protocols[0].fsm_params = '/params'\n\tprotocols[0].fsm_params[0].num = 12\n\tprotocols[0].fsm_params[0].value = 4\n\tprotocols[0].fsm_params[1].num = 11\n\tprotocols[0].fsm_params[1].value = 0\n\tprotocols[0].emulator = b'tdma'\n\t#protocols[0].parameter = 'params';\n\tprotocols[0].parameter.frame_offset = 0\n\tprotocols[0].parameter.frame_length = 4\n\tprotocols[0].parameter.slot_assignment = 0\n\n\tprotocols[1].id = 2\n\tprotocols[1].name = b'TDMA (slot 1)'\n\tprotocols[1].fsm_path = b'tdma-4.txt'\n\t#protocols[0].fsm_params = '/params'\n\tprotocols[1].fsm_params[0].num = 12\n\tprotocols[1].fsm_params[0].value = 4\n\tprotocols[1].fsm_params[1].num = 11\n\tprotocols[1].fsm_params[1].value = 1\n\tprotocols[1].emulator = b'tdma'\n\t#protocols[0].parameter = 'params';\n\tprotocols[1].parameter.frame_offset = 0\n\tprotocols[1].parameter.frame_length = 4\n\tprotocols[1].parameter.slot_assignment = 1\n\n\tprotocols[2].id = 3\n\tprotocols[2].name = b'TDMA (slot 3)'\n\tprotocols[2].fsm_path = b'tdma-4.txt'\n\t#protocols[0].fsm_params = '/params'\n\tprotocols[2].fsm_params[0].num = 12\n\tprotocols[2].fsm_params[0].value = 4\n\tprotocols[2].fsm_params[1].num = 11\n\tprotocols[2].fsm_params[1].value = 2\n\tprotocols[2].emulator = b'tdma'\n\t#protocols[0].parameter = 'params';\n\tprotocols[2].parameter.frame_offset = 0\n\tprotocols[2].parameter.frame_length = 4\n\tprotocols[2].parameter.slot_assignment = 2\n\n\tprotocols[3].id = 4\n\tprotocols[3].name = b'TDMA (slot 4)'\n\tprotocols[3].fsm_path = b'tdma-4.txt'\n\t#protocols[0].fsm_params = '/params'\n\tprotocols[3].fsm_params[0].num = 12\n\tprotocols[3].fsm_params[0].value = 4\n\tprotocols[3].fsm_params[1].num = 11\n\tprotocols[3].fsm_params[1].value = 3\n\tprotocols[3].emulator = b'tdma'\n\t#protocols[0].parameter = 'params';\n\tprotocols[3].parameter.frame_offset = 0\n\tprotocols[3].parameter.frame_length = 4\n\tprotocols[3].parameter.slot_assignment = 3\n\n\t#protocols suite INIT VALUES\n\tsuite.num_protocols = num_protocols\n\n\t#number of current active protocol from protocol structure\n\tsuite.active_protocol = 1\n\n\t#number of protocol present in the specified slot\n\tsuite.slots[0] = 1\n\tsuite.slots[1] = 1\n\n\t#number of current active slot\n\tsuite.active_slot = 1\n\n\tsuite.slot_offset = 0\n\n\tsuite.protocols[0] = protocols[0]\n\tsuite.protocols[1] = protocols[1]\n\tsuite.protocols[2] = protocols[2]\n\tsuite.protocols[3] = protocols[3]\n\tfor p in range(4) :\n\t\tsuite.weights[p] = 1.0 / num_protocols\n\n\tsuite.eta = eta\n\tsuite.last_slot.slot_num = -1\n\tsuite.last_slot.packet_queued = 0\n\tsuite.last_slot.transmitted = 0\n\tsuite.last_slot.channel_busy = 0\n\tsuite.cycle = 0\n\n\tstory_channel = [] # [ metamac_slot() ]\n\tglobal story_file\n\tglobal reading_thread\n\t# slots = [ metamac_slot() for i in range(8)]\n\t# story_channel.append(slots[0])\n\t# share_queue = Queue()\n\n\treading_thread = threading.Thread(target=acquire_slots_channel, args=(story_channel,))\n\treading_thread.start()\n\t# p = Process(target=acquire_slots_channel, args=(share_queue,))\n\t# p.start()\n\t#p.join()\n\n\ttime.sleep(2)\n\n\n\tb43_phy = None\n\tb43 = B43(b43_phy)\n\n\tstory_file = open(\"story.csv\", \"w\")\n\tstory_file.write(\"slot_num, read_num, host_time, tsf_time, slot_index, slots_passed, \\\n\t filler, packet_queued, transmitted, transmit_success, transmit_other, \\\n\t bad_reception, busy_slot, channel_busy \\n\")\n\n\tstory_channel_len = 0\n\t# metamac control loop\n\t# while not controller.is_stopped() :\n\t# \tmsg = controller.recv(timeout=1)\n\n\t#metamac_loop_break = 0\n\tlast_update_time = monotonic_time()\n\tloop = 0\n\n\twhile True: #(metamac_loop_break == 0)\n\t\t#print(\"Main thread\")\n\t\ttime.sleep(0.1)\n\n\t\tif( (len(story_channel) - story_channel_len) > 60):\n\t\t\tstory_channel_len_old = len(story_channel)-60\n\t\telse:\n\t\t\tstory_channel_len_old = story_channel_len\n\n\t\tstory_channel_len = len(story_channel)\n\t\tstory_channel_len_diff = story_channel_len - story_channel_len_old\n\t\t#print('\\n\\nstory_channel len %d - diff %d - last slot num %d' % (story_channel_len, story_channel_len_diff, story_channel[story_channel_len-1].slot_num))\n\n\t\t#store channel evolution on file\n\t\tif story_channel_len_diff > 0 :\n\t\t#\n # \t# struct metamac_slot slots[16];\n # \t# size_t count = queue_multipop(queue, slots, ARRAY_SIZE(slots));\n #\n\t\t\tqueue_multipush(story_channel, story_file, story_channel_len, story_channel_len_diff)\n\n\t\t\tfor i in range((story_channel_len - story_channel_len_diff), story_channel_len ):\n\t\t\t\t#print('\\n\\n i %d -story_channel len %d - diff %d - last slot num %d' % (i, story_channel_len, story_channel_len_diff, story_channel[story_channel_len-1].slot_num))\n\t\t\t\tupdate_weights(suite, story_channel[i], i)\n\n\n\t\t#Update running protocol\n\t\t#if (!(flags & FLAG_READONLY)) :\n\t\tif (not FLAG_READONLY):\n\t\t\tmetamac_evaluate(b43, suite)\n\n\t\tif FLAG_VERBOSE :\n\t\t\tcurrent_time = monotonic_time() #(timespec)\n\t\t\ttimediff = (current_time.tv_sec - last_update_time.tv_sec) * 1000000 + (current_time.tv_nsec - last_update_time.tv_nsec) / 1000\n\t\t\t# Update display every 1 second\n\t\t\tif (timediff > 1000000) :\n\t\t\t\tmetamac_display(loop, suite)\n\t\t\t\tloop+=1\n\t\t\t\tlast_update_time = current_time\n\n\nif __name__ == \"__main__\":\n\tmy_local_control_program()\n","sub_path":"Get_Started_Examples/Advanced-WMP-Example/separated_metamac_logic.py","file_name":"separated_metamac_logic.py","file_ext":"py","file_size_in_byte":29368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"163894941","text":"from .factories import TaskFactory, SubTaskFactory, AccrualFactory, PccFactory\nfrom ..models import Task, SubTask, Accrual, Pcc\n\nimport pytest\n\n\n@pytest.mark.django_db\nclass TestContractModel:\n def test_model_instance(self):\n model = TaskFactory.create()\n assert isinstance(model, Task) == True\n\n\n@pytest.mark.django_db\nclass TestContractChangeModel:\n def test_model_instance(self):\n model = SubTaskFactory.create()\n assert isinstance(model, SubTask) == True\n\n\n@pytest.mark.django_db\nclass TestContractorModel:\n def test_model_instance(self):\n model = AccrualFactory.create()\n assert isinstance(model, Accrual) == True\n\n\n@pytest.mark.django_db\nclass TestContractorContactModel:\n def test_model_instance(self):\n model = PccFactory.create()\n assert isinstance(model, Pcc) == True\n\n","sub_path":"tbpc/team_mgt/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"544350813","text":"import pandas as pd\nimport re\nimport matplotlib.pyplot as plt\n\ndata = pd.read_csv(\"testhoreca.csv\",usecols=[6])\n\ndef hasNumbers(inputString):\n return bool(re.search(r'\\d', inputString))\n\nlong = []\nlat = []\nnumber = \"\"\nspatie = False\nfor x in data[\"LOCATIE_WKT\"]:\n x = x.replace(\"POINT (\", \"\").replace(\")\", \" \")\n spatie = False\n for numb in x:\n if numb == \" \":\n if spatie == False:\n long.append(float(number))\n if spatie == True:\n lat.append(float(number))\n spatie = True\n number = \"\"\n if numb != \" \":\n number += numb\n\n\nd = {'long': long, 'lat': lat}\ndf = pd.DataFrame(data=d)\n\ndf.to_csv(\"horecacoor.csv\",index=False)\n\n","sub_path":"project code and data/horecacoor.py","file_name":"horecacoor.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"329149836","text":"\nimport numpy as np\nimport pandas as pd\nfrom flask import Flask, render_template, request\n# libraries for making count matrix and similarity matrix\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n#Import TfIdfVectorizer from scikit-learn\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n# Import linear_kernel\nimport sys\nfrom sklearn.metrics.pairwise import linear_kernel\n\ndef cosine_simi():\n df = pd.read_csv('data.csv')\n tfidf = TfidfVectorizer(stop_words='english')\n #Replace NaN with an empty string\n df['overview'] = df['overview'].fillna('')\n #Construct the required TF-IDF matrix by fitting and transforming the data\n tfidf_matrix = tfidf.fit_transform(df['overview'])\n # Compute the cosine similarity matrix\n cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)\n return cosine_sim\n\ndef get_recommendations(title, cosine_sim=cosine_simi()):\n df = pd.read_csv('data.csv')\n\n if title not in df['title'].unique():\n return('This movie is not in our database.\\nPlease check if you spelled it correct.')\n else:\n\t indices = pd.Series(df.index, index=df['title']).drop_duplicates()\n\t # Get the index of the movie that matches the title\n\t idx = indices[title]\n\n\t # Get the pairwsie similarity scores of all movies with that movie\n\t sim_scores = list(enumerate(cosine_sim[idx]))\n\n\t # Sort the movies based on the similarity scores\n\t sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)\n\n\t # Get the scores of the 10 most similar movies\n\t sim_scores = sim_scores[1:11]\n\n\t # Get the movie indices\n\t movie_indices = [i[0] for i in sim_scores]\n\n\t # Return the top 10 most similar movies\n\t #return df['title'].iloc[movie_indices]\n\n#comment\n\t # making an empty list that will containg all 10 movie recommendations\n\t l = []\n\t for i in range(len(sim_scores)):\n\t a = sim_scores[i][0]\n\t l.append(df['title'][a])\n\t return l\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n@app.route(\"/recommend\")\ndef recommend():\n title = request.args.get('title')\n recommendation = get_recommendations(title)\n #print(recommendation, file=sys.stderr)\n if type(recommendation)==type('string'):\n return render_template('recommend.html',movie=title,r=recommendation,t='s')\n else:\n return render_template('recommend.html',movie=title,r =recommendation,t='l')\n \n \n\nif __name__ == '__main__':\n app.run(debug=True, threaded=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"508777215","text":"import os\n\nimport torch\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\n\ndef data_loader(root, batch_size=256, workers=1, pin_memory=True):\n traindir = os.path.join(root, 'bias')\n valdir = os.path.join(root, 'test')\n normalize = transforms.Normalize(mean=[0.45, 0.45, 0.45],\n std=[0.225, 0.225, 0.225])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.ToTensor(),\n normalize\n ])\n )\n train_dataset2 = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize\n ])\n )\n val_dataset = datasets.ImageFolder(\n valdir,\n transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize\n ])\n )\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=workers,\n pin_memory=pin_memory\n )\n train_loader2 = torch.utils.data.DataLoader(\n train_dataset2,\n batch_size=256,\n shuffle=True,\n num_workers=workers,\n pin_memory=pin_memory\n )\n val_loader = torch.utils.data.DataLoader(\n val_dataset,\n batch_size=256,\n shuffle=True,\n num_workers=workers,\n pin_memory=pin_memory\n )\n return train_loader, val_loader, train_loader2","sub_path":"classification/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"57038810","text":"from flask import Flask\nfrom api_wrapper import api_call\nfrom flasgger import Swagger, swag_from\nimport settings\n\n\napp = Flask(__name__)\nswagger = Swagger(app)\n\n# Assume not running via wsgi\nif __name__ == '__main__':\n from prometheus_flask_exporter import PrometheusMetrics\n metrics = PrometheusMetrics(app, group_by='url_rule', defaults_prefix=settings.METRICS_PREFIX)\n# Assume running via wsgi\nelse:\n from prometheus_flask_exporter.multiprocess import GunicornPrometheusMetrics\n metrics = GunicornPrometheusMetrics(app, group_by='url_rule', defaults_prefix=settings.METRICS_PREFIX)\n\nmetrics.info('app_info', settings.APP_NAME)\n\n\n@swag_from('./swagger/test.yaml')\n@app.route('/test/')\n@api_call()\ndef test(t):\n return t\n\n\n@app.route('/health')\n@api_call()\n@metrics.do_not_track()\ndef health():\n return True\n\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=80, debug=False)\n","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"427927727","text":"#!/usr/bin/env python\n# _*_coding:utf-8_*_\n\nfrom AppTest.Common import *\ntitle = \"new_fl\"\n\n\nclass MyTestCase(unittest.TestCase):\n @classmethod\n def setUp(self):\n self.case_name = os.path.basename(__file__)\n self.driver = deviceDriver.mydriver(self)\n BaseOperate.installApp(self, Content.app_name)\n\n @classmethod\n def tearDown(self):\n BaseOperate.report_screen_shot(self, self.case_name)\n BaseOperate.uninstallApp(self, PhoneControl.package_name)\n BaseOperate.quit(self)\n\n def test_step(self):\n u\"\"\"在通讯录界面搜索好友\"\"\"\n logger.info(\"打开App\")\n BaseOperate.startActivity(self, PhoneControl.package_name, PhoneControl.activity_name)\n\n logger.info(\"点击进行登录\")\n BaseOperate.app_login(self, Content.register_count, Content.login_password)\n\n logger.info(\"进入我的资料界面\")\n BaseOperate.touchById(self, PhoneControl.id_me_icon)\n\n logger.info(\"进入我的头像\")\n BaseOperate.touchById(self, PhoneControl.id_profile_layout)\n\n logger.info(\"点击选择图片\")\n BaseOperate.touch_text_by_class_name(self, PhoneControl.class_name_TextView, \"选择图片\")\n\n logger.info(\"判断是否进入选择图片界面\")\n text = BaseOperate.get_text_by_id(self, PhoneControl.id_head_actionmode_title)\n self.assertEqual(text, \"选择图片\")\n\n logger.info(\"点击返回按钮\")\n BaseOperate.touchById(self, PhoneControl.id_head_select_left)\n\n logger.info(\"判断是否返回成功\")\n text = BaseOperate.get_text_by_id(self, PhoneControl.id_toolbar_title_tv)\n self.assertEqual(\"个人资料\", text)\n\n\n","sub_path":"AppTest/testCase/Sprint6/case_Sprint6_android_icon_0011.py","file_name":"case_Sprint6_android_icon_0011.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"125287845","text":"import os\nimport shutil\nimport argparse\n\nfont_file_name = 'Arial.ttf'\n\ndef getArgumentsCMD ():\n parser = argparse.ArgumentParser(description='Post build event')\n parser.add_argument ('-a', required=True, help='architecture target platform')\n parser.add_argument ('-p', required=True, help='project directory')\n parser.add_argument ('-b', required=True, help='binaries directory')\n parser.add_argument ('-o', required=True, help='output directory')\n return parser.parse_args()\n\ndef main():\n global font_file_name\n args = getArgumentsCMD()\n print('arguments received', args)\n \n print('Copy binaries... ', end='')\n try:\n files = os.listdir(args.b)\n for file_name in files:\n full_file_name = os.path.join(args.b, file_name)\n if os.path.isfile(full_file_name):\n shutil.copy(full_file_name, args.o)\n print('SUCCESS!')\n except Exception:\n print('LOSE :(')\n print('Copy font... ', end='')\n try:\n full_file_name = os.path.join(args.p, font_file_name)\n shutil.copy(full_file_name, args.o)\n print('SUCCESS!')\n except Exception:\n print('LOSE :(')\n \nif __name__ == '__main__':\n main()\n","sub_path":"SumulationBilliardBalls/PostBuild/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"387884466","text":"# import modules\nfrom myvar import *\n\n# calculates sample standard deviation\ndef get_sd(list_values):\n if(len(list_values) <= 1):\n return -1\n return np.std(list_values,dtype=np.float64,ddof=1)\n\n# finds percentage value\ndef to_percent(list_values, list_total):\n percent_list = []\n for x in range(len(list_values)):\n percent = list_values[x]/list_total[x]\n percent_list.append(percent)\n return percent_list\n\n# calculates stats for experiments\ndef calc_stats(test1, test2, test3, test4, total):\n print('avg= t1: {}, t2: {}, t3: {}, t4: {}, total: {}'.format(np.average(test1), np.average(test2), np.average(test3),np.average(test4), np.average(total)))\n t1_percent = to_percent(test1,total)\n t2_percent = to_percent(test2,total)\n t3_percent = to_percent(test3,total)\n\n other_percent = []\n for x in range(len(total)):\n other_percent.append(1-t1_percent[x]-t2_percent[x]-t3_percent[x])\n\n print('sd(%)= t1: {}, t2: {}, t3: {}, other: {} total: {}'.format(get_sd(t1_percent), get_sd(t2_percent), get_sd(t3_percent), get_sd(other_percent),get_sd(total)))\n print('len: {}'.format(len(total)))\n\n# copies a number of docs from collection to another\ndef copy_doc_to_new(source_collection, destination_collection, num_docs):\n counter = 0\n cursor = source_collection.find()\n for doc in cursor:\n destination_collection.insert_one(doc)\n counter += 1\n if(counter == num_docs):\n break\n\n# calculates execution time for function\ndef time_consumed(func, args, loop = 1):\n print('received args: {}'.format(*args))\n results = []\n start_time = time.time()\n for x in range(loop):\n results.append(func(*args))\n time_consumed = time.time()-start_time\n if(loop == 1):\n results = results[0]\n return [results, time_consumed]\n","sub_path":"data processing/archived/testing_functions.py","file_name":"testing_functions.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"80571942","text":"r\"\"\"\nkmltools module: $CLAW/geoclaw/src/python/geoclaw/kmltools.py\n\nTools to make kml files to overlay on Google Earth.\nNote that color is in KML format, BGR with 2 hex digits for each, e.g.\n\n FF0000 is blue, 00FF00 is green, 0000FF is red, 00FF00 is yellow.\n\nActually it's an 8 hex digit number, where the first two digits are\ntransparency, but in this module these default to 'FF' (but you can specify\nthe full 8 digits if you want it transparent).\n\n:Functions:\n - deg2dms - convert decimal degrees to (degrees, minutes, seconds)\n - regions2kml - create a kml outline for each regions specified in setrun\n - box2kml - create a kml outline from a rectangular box\n - quad2kml - create a kml outline for an arbitrary quadrilateral\n - poly2kml - create a kml outline for an arbitrary polygon\n - line2kml - create a kml line connecting 2 points\n - gauges2kml - create a kml marker for each gauge specified in setrun\n - kml_header - used internally\n - kml_footer - used internally\n - kml_region - used internally\n - kml_gauge - used internally\n\n - strip_archive_extensions - strip off things like .tar or .gz\n\"\"\"\n\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom six.moves import range\n\ndef f2s(x, num_digits=6):\n r\"\"\"\n Convert float to string in fixed point notation with at most\n *num_digits* digits of precision and trailing zeros removed, \n for printing nicely in kml description boxes.\n \"\"\"\n format = '%' + '.%sf' % num_digits\n s = (format % x).rstrip('0')\n return s\n \ndef deg2dms(dy):\n r\"\"\"\n Convert decimal degrees to tuple (degrees, minutes, seconds)\n \"\"\"\n\n from numpy import floor\n dy_deg = floor(dy)\n dy_min = floor((dy-dy_deg)*60.)\n dy_sec = (dy-dy_deg-dy_min/60.)*3600.\n return dy_deg,dy_min,dy_sec\n\n\ndef regions2kml(rundata=None,fname='regions.kml',verbose=True,combined=True):\n\n \"\"\"\n Create a KML box for each AMR region specified for a GeoClaw run.\n\n :Inputs:\n\n - *rundata* - an object of class *ClawRunData* or None\n\n If *rundata==None*, try to create based on executing function *setrun*\n from the `setrun.py` file in the current directory.\n\n - *fname* (str) - resulting kml file.\n\n - *verbose* (bool) - If *True*, print out info about each region found\n\n - *combined* (bool) - If *True*, combine into single kml file with\n name given by *fname*. This is the default. \n If False, *fname* is ignored and individual files are created for\n each region with names are Domain.kml, Region00.kml, etc.\n These will show up separately in GoogleEarth so they can be turned\n on or off individually.\n\n First create a box for the entire domain (in red) and then a box\n for each region (in white).\n\n :Example:\n\n >>> from clawpack.geoclaw import kmltools\n >>> kmltools.regions2kml()\n\n is equivalent to:\n\n >>> from clawpack.geoclaw import kmltools\n >>> from setrun import setrun\n >>> rundata = setrun()\n >>> kmltools.regions2kml(rundata)\n\n By default this creates a file named *regions.kml* that can be opened in\n Google Earth.\n\n \"\"\"\n\n from numpy import cos,pi,floor\n\n if rundata is None:\n try:\n import setrun\n reload(setrun)\n rundata = setrun.setrun()\n except:\n raise IOError(\"*** cannot execute setrun file\")\n\n clawdata = rundata.clawdata\n x1,y1 = clawdata.lower[0:]\n x2,y2 = clawdata.upper[0:]\n description = \" x1 = %s, x2 = %s\\n\" % (f2s(x1),f2s(x2)) \\\n + \" y1 = %s, y2 = %s\\n\" % (f2s(y1),f2s(y2))\n\n mx,my = clawdata.num_cells[0:]\n dx = (x2-x1)/float(mx)\n dx_meters = dx*111e3*cos(pi*0.5*(y1+y2)/180.)\n dy = (y2-y1)/float(my)\n dy_meters = dy*111e3\n if verbose:\n print(\"Domain: %10.6f %10.6f %10.6f %10.6f\" % (x1,x2,y1,y2))\n dx_deg,dx_min,dx_sec = deg2dms(dx)\n dy_deg,dy_min,dy_sec = deg2dms(dy)\n #print \"Level 1 resolution: dx = %g deg, %g min, %g sec = %g meters\" \\\n # % (dx_deg,dx_min,dx_sec,dx_meters)\n levtext = \"Level 1 resolution: dy = %g deg, %g min, %g sec = %g meters\\n\" \\\n % (dy_deg,dy_min,dy_sec,dy_meters)\n if verbose:\n print(levtext)\n description = description + levtext\n\n amr_levels_max = rundata.amrdata.amr_levels_max\n refinement_ratios_y = rundata.amrdata.refinement_ratios_y\n num_ref_ratios = len(refinement_ratios_y)\n if amr_levels_max > num_ref_ratios+1:\n raise IOError(\"*** Too few refinement ratios specified for \" \\\n + \"amr_levels_max = %i\" % amr_levels_max)\n dy_levels = (num_ref_ratios+1) * [dy]\n for k,r in enumerate(refinement_ratios_y):\n level = k+2\n dy = dy_levels[k] / r\n dy_levels[k+1] = dy\n dy_meters = dy*111e3\n dy_deg,dy_min,dy_sec = deg2dms(dy)\n levtext = \"Level %s resolution: dy = %g deg, %g min, %g sec = %g meters (refined by %i)\\n\" \\\n % (level,dy_deg,dy_min,dy_sec,dy_meters,r)\n if verbose:\n print(levtext)\n description = description + levtext\n\n if verbose:\n print(\"Allowing maximum of %i levels\" % amr_levels_max)\n\n elev = 0.\n if not combined:\n fname = 'Domain.kml'\n\n kml_text = kml_header(fname)\n\n mapping = {}\n mapping['x1'] = x1\n mapping['x2'] = x2\n mapping['y1'] = y1\n mapping['y2'] = y2\n mapping['elev'] = elev\n mapping['name'] = 'Computational Domain'\n mapping['desc'] = description\n mapping['color'] = \"0000FF\" # red\n mapping['width'] = 2\n\n region_text = kml_region(mapping)\n kml_text = kml_text + region_text\n\n if not combined:\n kml_text = kml_text + kml_footer()\n kml_file = open(fname,'w')\n kml_file.write(kml_text)\n kml_file.close()\n if verbose:\n print(\"Created \",fname)\n\n \n\n regions = rundata.regiondata.regions\n if len(regions)==0 and verbose:\n print(\"No regions found in setrun.py\")\n\n\n for rnum,region in enumerate(regions):\n if not combined:\n fname = 'Region_%s.kml' % str(rnum).zfill(2)\n kml_text = kml_header(fname)\n\n minlevel,maxlevel = region[0:2]\n t1,t2 = region[2:4]\n x1,x2,y1,y2 = region[4:]\n\n if verbose:\n print(\"Region %i: %10.6f %10.6f %10.6f %10.6f\" \\\n % (rnum,x1,x2,y1,y2))\n print(\" minlevel = %i, maxlevel = %i\" \\\n % (minlevel,maxlevel) \\\n + \" t1 = %s, t2 = %s\" % (f2s(t1),f2s(t2)))\n mapping = {}\n mapping['minlevel'] = minlevel\n mapping['maxlevel'] = maxlevel\n mapping['t1'] = t1\n mapping['t2'] = t2\n mapping['x1'] = x1\n mapping['x2'] = x2\n mapping['y1'] = y1\n mapping['y2'] = y2\n mapping['elev'] = elev\n mapping['name'] = 'Region %i' % rnum\n description = \"minlevel = %i, maxlevel = %i\\n\" % (minlevel,maxlevel) \\\n + \" t1 = %s, t2 = %s\\n\" % (f2s(t1),f2s(t2)) \\\n + \" x1 = %s, x2 = %s\\n\" % (f2s(x1),f2s(x2)) \\\n + \" y1 = %s, y2 = %s\\n\\n\" % (f2s(y1),f2s(y2))\n if len(dy_levels) >= minlevel:\n dy = dy_levels[minlevel-1]\n dy_deg,dy_min,dy_sec = deg2dms(dy)\n dy_meters = dy*111e3\n levtext = \"Level %s resolution: \\ndy = %g deg, %g min, %g sec \\n= %g meters\\n\" \\\n % (minlevel,dy_deg,dy_min,dy_sec,dy_meters)\n description = description + levtext\n if (maxlevel > minlevel) and (len(dy_levels) >= maxlevel):\n dy = dy_levels[maxlevel-1]\n dy_deg,dy_min,dy_sec = deg2dms(dy)\n dy_meters = dy*111e3\n levtext = \"\\nLevel %s resolution: \\ndy = %g deg, %g min, %g sec \\n= %g meters\\n\" \\\n % (maxlevel,dy_deg,dy_min,dy_sec,dy_meters)\n description = description + levtext\n mapping['desc'] = description\n mapping['color'] = \"FFFFFF\" # white\n mapping['width'] = 3\n\n region_text = kml_region(mapping)\n kml_text = kml_text + region_text\n if not combined:\n kml_text = kml_text + kml_footer()\n kml_file = open(fname,'w')\n kml_file.write(kml_text)\n kml_file.close()\n if verbose:\n print(\"Created \",fname)\n\n if combined:\n kml_text = kml_text + kml_footer()\n kml_file = open(fname,'w')\n kml_file.write(kml_text)\n kml_file.close()\n if verbose:\n print(\"Created \",fname)\n\n\ndef line2kml(xy,fname='line.kml',name='line',color='00FFFF',width=3,\n verbose=True):\n \"\"\"\n Make a KML line with default color yellow.\n\n :Inputs:\n\n - *xy* a tuple ((x1,x2),(y1,y2)) (preferred) \n or (x1,x2,y1,y2) (for backward compatibility)\n - *fname* (str) name of resulting kml file\n - *name* (str) name to appear on line on Google Earth\n - *color* (str) Color in format aabbggrr\n - *width* (str) line width\n - *verbose* (bool) - If *True*, print out info\n\n \"\"\"\n \n if type(xy[0]) is tuple:\n x1,x2 = xy[0]\n y1,y2 = xy[1]\n else:\n x1,x2,y1,y2 = xy[0:]\n\n if verbose:\n print(\"Line: %10.6f %10.6f %10.6f %10.6f\" % (x1,x2,y1,y2))\n\n elev = 0.\n kml_text = kml_header(fname)\n\n mapping = {}\n mapping['x1'] = x1\n mapping['x2'] = x2\n mapping['y1'] = y1\n mapping['y2'] = y2\n mapping['elev'] = elev\n mapping['name'] = name\n mapping['desc'] = \" x1 = %s, x2 = %s\\n\" % (f2s(x1),f2s(x2)) \\\n + \" y1 = %s, y2 = %s\" % (f2s(y1),f2s(y2))\n mapping['color'] = color\n mapping['width'] = width\n\n region_text = kml_line(mapping)\n\n kml_text = kml_text + region_text + kml_footer()\n kml_file = open(fname,'w')\n kml_file.write(kml_text)\n kml_file.close()\n if verbose:\n print(\"Created \",fname)\n\n\ndef box2kml(xy,fname=None,name='box',color='FF0000',width=3,verbose=True):\n \"\"\"\n Make a KML box with default color blue.\n\n :Inputs:\n\n - *xy* a tuple ((x1,x2),(y1,y2)) (preferred) \n or (x1,x2,y1,y2) (for backward compatibility)\n - *fname* (str) name of resulting kml file\n - *name* (str) name to appear in box on Google Earth\n - *color* (str) Color in format aabbggrr\n - *width* (str) line width\n - *verbose* (bool) - If *True*, print out info\n\n \"\"\"\n\n if fname is None:\n fname = name + '.kml'\n\n if type(xy[0]) is tuple:\n x1,x2 = xy[0]\n y1,y2 = xy[1]\n else:\n x1,x2,y1,y2 = xy[0:]\n\n if verbose:\n print(\"Box: %10.6f %10.6f %10.6f %10.6f\" % (x1,x2,y1,y2))\n\n elev = 0.\n kml_text = kml_header(fname)\n\n mapping = {}\n mapping['x1'] = x1\n mapping['x2'] = x2\n mapping['y1'] = y1\n mapping['y2'] = y2\n mapping['elev'] = elev\n mapping['name'] = name\n mapping['desc'] = \" x1 = %s, x2 = %s\\n\" % (f2s(x1),f2s(x2)) \\\n + \" y1 = %s, y2 = %s\" % (f2s(y1),f2s(y2))\n mapping['color'] = color\n mapping['width'] = width\n\n region_text = kml_region(mapping)\n\n kml_text = kml_text + region_text + kml_footer()\n kml_file = open(fname,'w')\n kml_file.write(kml_text)\n kml_file.close()\n if verbose:\n print(\"Created \",fname)\n\n\ndef quad2kml(xy,fname=None,name='quad',color='FF0000',width=3,verbose=True):\n \"\"\"\n Make a KML quadrilateral with default color blue.\n\n :Inputs:\n\n - *xy* a tuple ((x1,x2,x3,x4),(y1,y2,y3,y4)) (preferred) \n or (x1,x2,y1,y2,x3,y3,x4,y4) (for backward compatibility)\n - *fname* (str) name of resulting kml file\n - *name* (str) name to appear in box on Google Earth\n - *color* (str) Color in format aabbggrr\n - *width* (str) line width\n - *verbose* (bool) - If *True*, print out info\n\n \"\"\"\n\n if fname is None:\n fname = name + '.kml'\n\n if type(xy[0]) is tuple:\n x1,x2,x3,x4 = xy[0]\n y1,y2,y3,y4 = xy[1]\n else:\n x1,y1,x2,y2,x3,y3,x4,y4 = xy[0:]\n\n if verbose:\n print(\"Quadrilateral: %10.6f %10.6f\" % (x1,y1))\n print(\" %10.6f %10.6f\" % (x2,y2))\n print(\" %10.6f %10.6f\" % (x3,y3))\n print(\" %10.6f %10.6f\" % (x4,y4))\n\n elev = 0.\n kml_text = kml_header(fname)\n\n mapping = {}\n mapping['x1'] = x1\n mapping['x2'] = x2\n mapping['x3'] = x3\n mapping['x4'] = x4\n mapping['y1'] = y1\n mapping['y2'] = y2\n mapping['y3'] = y3\n mapping['y4'] = y4\n mapping['elev'] = elev\n mapping['name'] = name\n mapping['desc'] = \" x1 = %s, y1 = %s\\n\" % (f2s(x1),f2s(y1)) \\\n + \" x2 = %s, y2 = %s\" % (f2s(x2),f2s(y2)) \\\n + \" x3 = %s, y3 = %s\" % (f2s(x3),f2s(y3)) \\\n + \" x4 = %s, y4 = %s\" % (f2s(x4),f2s(y4))\n mapping['color'] = color\n mapping['width'] = 3\n\n region_text = kml_region(mapping)\n\n kml_text = kml_text + region_text + kml_footer()\n kml_file = open(fname,'w')\n kml_file.write(kml_text)\n kml_file.close()\n if verbose:\n print(\"Created \",fname)\n\n\ndef poly2kml(xy,fname=None,name='poly',color='00FF00', width=3,\n verbose=True):\n \"\"\"\n Make a KML polygon with default color blue.\n\n :Inputs:\n\n - *xy* a tuple (x,y) where x and y are lists of vertices\n - *fname* (str) name of resulting kml file\n - *name* (str) name to appear in box on Google Earth\n - *color* (str) Color in format aabbggrr\n - *width* (str) line width\n - *verbose* (bool) - If *True*, print out info\n\n \"\"\"\n\n if fname is None:\n fname = name + '.kml'\n\n x,y = xy\n\n if verbose:\n print(\"Polygon: %10.6f %10.6f\" % (x[0],y[0]))\n for j in range(1,len(x)):\n print(\" %10.6f %10.6f\" % (x[j],y[j]))\n\n elev = 0.\n kml_text = kml_header(fname)\n\n mapping = {}\n mapping['x'] = x\n mapping['y'] = y\n mapping['elev'] = elev\n mapping['name'] = name\n d = \" x[0] = %s, y[0] = %s\\n\" % (x[0],y[0]) \n for j in range(1,len(x)):\n d = d + \" x[%i] = %s, y[%i] = %s\" % (j,f2s(x[j]),j,f2s(y[j]))\n mapping['desc'] = d\n mapping['color'] = color\n mapping['width'] = width\n\n v = \"\\n\"\n for j in range(len(x)):\n v = v + \"%s,%s,%s\\n\" % (f2s(x[j]),f2s(y[j]),f2s(elev))\n v = v + \"%s,%s,%s\\n\" % (f2s(x[0]),f2s(y[0]),f2s(elev))\n v.replace(' ','')\n \n region_text = kml_region(mapping, v)\n for j in range(1,len(x)):\n d = d + \" x[%i] = %s, y[%i] = %s\" % (j,f2s(x[j]),j,f2s(y[j]))\n\n kml_text = kml_text + region_text + kml_footer()\n kml_file = open(fname,'w')\n kml_file.write(kml_text)\n kml_file.close()\n if verbose:\n print(\"Created \",fname)\n\n\ndef gauges2kml(rundata=None, fname='gauges.kml', verbose=True):\n\n \"\"\"\n\n Create a KML marker for each gauge specified for a GeoClaw run.\n\n :Inputs:\n\n - *rundata* - an object of class *ClawRunData* or None\n\n If *rundata==None*, try to create based on executing function *setrun*\n from the `setrun.py` file in the current directory.\n\n - *fname* (str) - resulting kml file.\n\n - *verbose* (bool) - If *True*, print out info about each region found\n\n\n :Example:\n\n >>> from clawpack.geoclaw import kmltools\n >>> kmltools.gauges2kml()\n\n is equivalent to:\n\n >>> from clawpack.geoclaw import kmltools\n >>> from setrun import setrun\n >>> rundata = setrun()\n >>> kmltools.gauges2kml(rundata)\n\n By default this creates a file named *gauges.kml* that can be opened in\n Google Earth.\n\n \"\"\"\n\n\n if rundata is None:\n try:\n import setrun\n reload(setrun)\n rundata = setrun.setrun()\n except:\n raise IOError(\"*** cannot execute setrun file\")\n\n elev = 0.\n kml_text = kml_header(fname)\n\n\n gauges = rundata.gaugedata.gauges\n if len(gauges)==0 and verbose:\n print(\"No gauges found in setrun.py\")\n\n\n for rnum,gauge in enumerate(gauges):\n t1,t2 = gauge[3:5]\n x1,y1 = gauge[1:3]\n gaugeno = gauge[0]\n if verbose:\n print(\"Gauge %i: %s, %s \\n\" % (gaugeno,f2s(x1),f2s(y1)) \\\n + \" t1 = %s, t2 = %s\" % (f2s(t1),f2s(t2)))\n mapping = {}\n mapping['gaugeno'] = gaugeno\n mapping['t1'] = t1\n mapping['t2'] = t2\n mapping['x1'] = x1\n mapping['y1'] = y1\n mapping['elev'] = elev\n mapping['name'] = 'Gauge %i' % rnum\n description = \" t1 = %s, t2 = %s\\n\" % (f2s(t1),f2s(t2)) \\\n + \" x1 = %s, y1 = %s\\n\" % (f2s(x1),f2s(y1))\n mapping['desc'] = description\n\n gauge_text = kml_gauge(mapping)\n kml_text = kml_text + gauge_text\n kml_text = kml_text + kml_footer()\n kml_file = open(fname,'w')\n kml_file.write(kml_text)\n kml_file.close()\n if verbose:\n print(\"Created \",fname)\n\n\n\ndef kml_header(name='GeoClaw kml file'):\n header = \"\"\"\n\n%s\n\"\"\" % name\n return header\n\ndef kml_footer():\n footer = \"\"\"\n\n\n\"\"\"\n return footer\n\n\ndef kml_region(mapping, vertex_text=None):\n\n if vertex_text is None:\n if 'x3' in mapping:\n # quadrilateral with 4 corners specified\n vertex_text = \"\"\"\n{x1:.9f},{y1:.9f},{elev:.9f}\n{x2:.9f},{y2:.9f},{elev:.9f}\n{x3:.9f},{y3:.9f},{elev:.9f}\n{x4:.9f},{y4:.9f},{elev:.9f}\n{x1:.9f},{y1:.9f},{elev:.9f}\n\"\"\".format(**mapping).replace(' ','')\n\n else:\n # rectangle with 2 corners specified\n vertex_text = \"\"\"\n{x1:.9f},{y1:.9f},{elev:.9f}\n{x2:.9f},{y1:.9f},{elev:.9f}\n{x2:.9f},{y2:.9f},{elev:.9f}\n{x1:.9f},{y2:.9f},{elev:.9f}\n{x1:.9f},{y1:.9f},{elev:.9f}\n\"\"\".format(**mapping).replace(' ','')\n\n mapping['vertices'] = vertex_text\n if len(mapping['color'])==6:\n mapping['color'] = 'FF' + mapping['color']\n\n kml_text = \"\"\"\n\n{name:s}\n{desc:s}\n#Path\n\n1\nclampToGround\n\n{vertices:s}\n\n\n\n\"\"\".format(**mapping)\n\n return kml_text\n\ndef kml_line(mapping):\n\n if len(mapping['color'])==6:\n mapping['color'] = 'FF' + mapping['color']\n\n line_text = \"\"\"\n{x1:.9f},{y1:.9f},{elev:.9f}\n{x2:.9f},{y2:.9f},{elev:.9f}\n\"\"\".format(**mapping).replace(' ','')\n\n mapping['line'] = line_text\n kml_text = \"\"\"\n\n{name:s}\n{desc:s}\n#Path\n\n1\nclampToGround\n\n{line:s}\n\n\n\n\"\"\".format(**mapping)\n\n return kml_text\n\ndef kml_gauge(mapping):\n gauge_text = \"{x1:.9f},{y1:.9f},{elev:.9f}\".format(**mapping).replace(' ','')\n\n mapping['gauge'] = gauge_text\n\n kml_text = \"\"\"\nGauge {gaugeno:d}\n{desc:s}\n#markerstyle\n\n\n{gauge:s}\n\n\n\n\"\"\".format(**mapping)\n\n return kml_text\n\n\n\ndef kml_timespan(t1,t2,event_time=None,tz=None,tscale=1):\n\n r\"\"\"\n Create time strings necessary for sliders in Google Earth. The time\n span will cover time [t1,t2], with the start of the event given by\n event_time.\n\n [t1,t2] : time span,\n\n event_time : Start of event in UTC : [Y,M,D,H,M,S], e.g. [2010,2,27,3,34,0]\n tz : time zone offset to UTC. e.g. +3 for Chile; -9 for Japan.\n\n Time span element looks like ::\n\n \n 2010-02-27T06:34:00+03:00\n 2010-02-27T07:04:00+03:00\n \n\n As for how well this handles Daylight Savings time, here is what the documentation\n on the Python 'time' module has to say :\n\n \"DST is Daylight Saving Time, an adjustment of the timezone by (usually) one hour\n during part of the year. DST rules are magic (determined by local law) and can\n change from year to year. The C library has a table containing the local rules\n (often it is read from a system file for flexibility) and is the only source of\n True Wisdom in this respect.\"\n\n \"\"\"\n\n t1 = t1*tscale # Time converted to seconds\n t2 = t2*tscale\n\n import time\n # to adjust time from UTC to time in event locale.\n if event_time == None:\n # Use local time.\n starttime = time.mktime(time.localtime()) # seconds UTC\n tz_offset = time.timezone/3600.0 # in seconds\n else:\n ev = tuple(event_time) + (0,0,0) # Extend to 9 tuple; no DST\n # mktime returns time in seconds + timezone offset, i.e. seconds UTC\n # Subtract out the timezone offset here, since it will get added back\n # in when we do gmtime(starttime + ...) below.\n starttime = time.mktime(ev) - time.timezone\n if tz is None:\n print(\"===> Time zone offset not defined; assuming zero offset. \" \\\n \"Set plotdata.kml_tz_offset to define an offset (in hours) from \"\\\n \"UTC (positive west of UTC; negative east of UTC)\")\n tz = 0\n\n tz_offset = tz\n\n if (tz_offset == None):\n tzstr = \"Z\" # no offset; could also just set to \"+00:00\"\n else:\n # Google Earth will show time slider time in local time, where\n # local + offset = UTC.\n tz_offset = tz_offset*3600. # Offset in seconds\n tz = time.gmtime(abs(tz_offset))\n if (tz_offset > 0):\n tzstr = time.strftime(\"+%H:%M\",tz) # Time to UTC\n else:\n tzstr = time.strftime(\"-%H:%M\",tz)\n\n # Get time strings for start and end of time span\n gbegin = time.gmtime(starttime + t1)\n timestrbegin = \"%s%s\" % (time.strftime(\"%Y-%m-%dT%H:%M:%S\", gbegin),tzstr)\n\n gend = time.gmtime(starttime + t2)\n timestrend = \"%s%s\" % (time.strftime(\"%Y-%m-%dT%H:%M:%S\", gend),tzstr)\n\n return timestrbegin,timestrend\n\ndef topo2kml(topo_file_name, topo_type, color='00FF00'): \n \"\"\"\n Create a kml file putting a box around the region covered by a topofile.\n Color is green by default.\n \"\"\"\n\n import os\n from clawpack.geoclaw import topotools\n topo = topotools.Topography(topo_file_name, topo_type=topo_type)\n topo.read_header()\n xy = topo.extent\n name = os.path.splitext(os.path.split(topo_file_name)[-1])[0]\n file_name = '%s.kml' % name\n box2kml(xy, file_name, name, color)\n\ndef dtopo2kml(dtopo_file_name, dtopo_type, color='8888FF'): \n \"\"\"\n Create a kml file putting a box around the region covered by a dtopofile.\n Color is pink by default.\n \"\"\"\n\n import os\n from clawpack.geoclaw import dtopotools\n dtopo = dtopotools.DTopography()\n dtopo.read(dtopo_file_name, dtopo_type)\n x1 = dtopo.x.min()\n x2 = dtopo.x.max()\n y1 = dtopo.y.min()\n y2 = dtopo.y.max()\n xy = (x1,x2,y1,y2)\n name = os.path.splitext(os.path.split(dtopo_file_name)[-1])[0]\n file_name = '%s.kml' % name\n box2kml(xy, file_name, name, color)\n \n\ndef make_input_data_kmls(rundata):\n \"\"\"\n Produce kml files for the computational domain, all gauges and regions \n specified, and all topo and dtopo files specified in rundata.\n This can be used, e.g. by adding the lines \n\n from clawpack.geoclaw import kmltools\n kmltools.make_input_data_kmls(rundata)\n\n to the end of a `setrun.py` file so that `make data` will generate all\n kml files in addition to the `*.data` files.\n \"\"\"\n \n import os\n from . import topotools, dtopotools\n\n regions2kml(rundata, combined=False)\n gauges2kml(rundata)\n\n topofiles = rundata.topo_data.topofiles\n for f in topofiles:\n topo_file_name = f[-1]\n topo_type = f[0]\n topo2kml(topo_file_name, topo_type)\n \n dtopofiles = rundata.dtopo_data.dtopofiles\n for f in dtopofiles:\n dtopo_file_name = f[-1]\n dtopo_type = f[0]\n dtopo2kml(dtopo_file_name, dtopo_type)\n \n \n","sub_path":"src/python/geoclaw/kmltools.py","file_name":"kmltools.py","file_ext":"py","file_size_in_byte":24411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"600603073","text":"from os import listdir, makedirs\nfrom os.path import basename, dirname, isdir, join\nfrom re import findall\nimport numpy as np\nimport pandas as pd\n\n\nclass XGD:\n \"\"\"Conversion methods for TerraSurveyor XGD file\"\"\"\n def __init__(self, path_to_xgd):\n #self.rdir = dirname(dirname(path_to_xgd))\n self.path_to_xgd = path_to_xgd\n self.lines = self._to_lines()\n self.meta = self._get_metadata()\n self.dummy = 2047.5\n #a = self.to_array()\n\n def _to_lines(self):\n \"\"\"Return xgd content as a list of lines\"\"\"\n with open(self.path_to_xgd, 'r') as f:\n lines = f.readlines()\n return(lines)\n \n def _get_metadata(self):\n \"\"\"Return xgd metadata to a dictionary\"\"\"\n l = [i for i in self.lines if not i.startswith('','').replace('\"','')\n grid_info = l[3].split()\n grid = lambda x: float(grid_info[x].split('=')[1].replace('\"',''))\n t_len, i_len, t_sep, i_sep = [grid(i) for i in [1,2,3,4]]\n d = {\n 'traverse_len': t_len,\n 'interval_len': i_len,\n 'traverse_sep': t_sep,\n 'interval_sep': i_sep,\n 'mode': mode,\n }\n return(d)\n\n def _line_to_xyz(self, line):\n \"\"\"\"\"\"\n a = line.split()[1:4]\n b = [findall(r'\\d+', i) for i in a]\n c = [float('.'.join(i)) for i in b]\n return(c)\n\n def _dummy_array(self):\n \"\"\"Return an array of dummy values\"\"\"\n m = self.meta\n x, y = self._array_shape()\n lst = [[i, j, self.dummy] for i in range(x) for j in range(y)]\n a = np.array(lst)[:,[1,0,2]]\n return(a)\n\n def _array_shape(self):\n \"\"\"Return the shape of the grid array\"\"\"\n m = self.meta\n x = int(m['interval_len'] / m['interval_sep'])\n y = int(m['traverse_len'] / m['traverse_sep'])\n return(x, y)\n \n def _pad_missing_dummies(self, lst):\n \"\"\"A situation occurred where dummy values were not recorded but the locations of missing values were evident from associated missing x and y values. This method acts to fill the missing value\"\"\"\n a = self._dummy_array()\n array = np.array(lst)\n interval_len = self._array_shape()[0]\n for i in array:\n x,y,v = i\n idx = int(x+y*interval_len)\n a[idx] = [x,y,v]\n return(a)\n\n def to_list(self):\n \"\"\"\"\"\"\n lst = [self._line_to_xyz(i) for i in self.lines if i.startswith('a\")\n # 用XPath的话, \"//\"表示任意级, \"/\"表示一级\n # 也��以用这个函数, 效果没差: .find_element_by_css_selector(\"//span[@class='txt']/a\")\n\n link_song = song.get_attribute(\"href\")\n title_song = records[i].find_element_by_css_selector(\"span.txt>a>b\").get_attribute(\"title\")\n\n title_song = title_song.replace(' ', ' ') # 去除奸奇空格, 替换为普通空格\n author = records[i].find_element_by_css_selector(\"div.text\").get_attribute(\"title\")\n\n print(i, title_song, link_song, author)\n # print(i)\n\n list.append((title_song, link_song, author))\n # return np.array(list)\n return list\n\n\n\ndef main():\n # 网址\n # url = \"https://music.163.com/playlist?id=3077285212\"\n\n url = \"https://music.163.com/#/playlist?id=2476409230\"\n\n # 选项\n options = Options() # 调用sele库进行设置\n options.add_argument(\"--headless\")\n # options.headless = True\n\n # 传入设置, 创建一个driver\n driver = webdriver.Firefox(options=options,\n executable_path=r\"D:\\Files\\Library_development\\geckodriver-v0.26.0-win64\\geckodriver.exe\")\n print(\" driver created\")\n\n # 存储歌单的csv文件(追加模式,'\\n'换行避免空行)\n file_csv = open(\"test.csv\", \"w\", encoding='utf-8')\n writer = csv.writer(file_csv, lineterminator='\\n')\n\n # writer.writerow([\"标题\", \"\"])\n list = []\n try:\n list = get_song_records(url, driver)\n except WebDriverException as wde:\n print(\" \", wde.args)\n print(\"=====\")\n print(traceback.format_exc()) # 打印详细信息\n\n print(list)\n\n # 关闭驱动, 这点很重要, 不然火狐的进程是不会死的\n driver.close()\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","sub_path":"NetEaseCloudMusicCrawler_TEST/CRAWLER/crawler_songRecord.py","file_name":"crawler_songRecord.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"156794771","text":"import math, random\nimport numpy as np\nfrom PIL import Image, ImageDraw\nimport time, json\nfrom scipy.stats import multivariate_normal\nfrom Config import *\nfrom scipy.ndimage.filters import gaussian_filter\nimport scipy, socket, sys, os\nfrom pycocotools.coco import COCO\n\nconfig = Config()\nSHOW = False\n\nclass directed_graph(object):\n\tdef __init__(self, downsample = 8):\n\t\tself.v = []\n\t\tself.v_org = []\n\t\tself.e = []\n\t\tself.nb = []\n\t\treturn\n\n\tdef add_v(self, v):\n\t\tself.v.append(v)\n\t\tself.v_org.append((v[0] * 8 + 4, v[1] * 8 + 4))\n\t\tself.nb.append([])\n\t\treturn\n\n\tdef add_e(self, v1, v2, w = None):\n\t\tassert(v1 in range(len(self.v)))\n\t\tassert(v2 in range(len(self.v)))\n\t\tif w is None:\n\t\t\tw = self.dist(self.v[v1], self.v[v2])\n\t\tself.e.append((v1, v2, w))\n\t\tself.nb[v1].append((v2, w))\n\t\treturn\n\n\tdef dist(self, v1, v2):\n\t\tdiff = np.array(v1) - np.array(v2)\n\t\treturn np.sqrt(np.dot(diff, diff))\n\n\tdef spfa(self, source):\n\t\tdist = [1e9 for i in range(len(self.v))]\n\t\tprev = [None for i in range(len(self.v))]\n\t\tin_q = [False for i in range(len(self.v))]\n\t\tdist[source] = 0\n\t\tq = [source]\n\t\tin_q[source] = True\n\t\twhile len(q) > 0:\n\t\t\tu = q.pop(0)\n\t\t\tin_q[u] = False\n\t\t\tfor v, w in self.nb[u]:\n\t\t\t\talt = dist[u] + w\n\t\t\t\tif alt < dist[v]:\n\t\t\t\t\tdist[v] = alt\n\t\t\t\t\tprev[v] = u\n\t\t\t\t\tif not in_q[v]:\n\t\t\t\t\t\tin_q[v] = True\n\t\t\t\t\t\tq.append(v)\n\t\tdist = np.array(dist)\n\t\tdist[dist > 1e8] = -1e9\n\t\treturn dist, prev\n\n\tdef shortest_path_all(self):\n\t\tself.sp = []\n\t\tfor i in range(len(self.v)):\n\t\t\tself.sp.append(self.spfa(i))\n\t\tself.sp_max_idx = [np.argmax(dist) for dist, _ in self.sp]\n\t\tself.sp_idx_t = []\n\t\tfor dist, _ in self.sp:\n\t\t\tself.sp_idx_t.append([idx for idx, d in enumerate(list(dist)) if d > 0.5])\n\t\tself.sp_idx_s = [idx for idx, item in enumerate(self.sp_idx_t) if len(item) > 0]\n\t\treturn\n\ndef make_ellipse(p, pad = 10):\n\treturn [(p[0] - pad, p[1] - pad), (p[0] + pad, p[1] + pad)]\n\ndef rotate1(w, h, x, y):\n\treturn h, w, y, w - 1 - x\n\ndef rotateN(n, w, h, x, y):\n\tfor _ in range(n):\n\t\tw, h, x, y = rotate1(w, h, x, y)\n\treturn w, h, x, y\n\nclass VertexPool(object):\n\tdef __init__(self, v_out_res):\n\t\tself.v_out_res = v_out_res\n\t\tself.blank = np.zeros(self.v_out_res, dtype = np.uint8)\n\t\tself.vertex_pool = [[] for i in range(self.v_out_res[1])]\n\t\tfor i in range(self.v_out_res[1]):\n\t\t\tfor j in range(self.v_out_res[0]):\n\t\t\t\tself.vertex_pool[i].append(np.copy(self.blank))\n\t\t\t\tself.vertex_pool[i][j][i, j] = 255\n\t\t\t\tself.vertex_pool[i][j] = Image.fromarray(self.vertex_pool[i][j])\n\t\treturn\n\nvp = VertexPool(config.V_OUT_RES)\n\nclass DataGenerator(object):\n\tdef __init__(self, city_name, img_size, v_out_res, max_seq_len, mode = 'train'):\n\t\tassert(mode in ['train', 'val', 'test'])\n\t\tself.mode = mode\n\t\tself.city_name = city_name\n\t\tself.img_size = img_size\n\t\tself.v_out_res = v_out_res\n\t\tself.max_seq_len = max_seq_len\n\n\t\tself.TRAIN_ANNOTATIONS_PATH = config.PATH[city_name]['ann-train']\n\t\tself.VAL_ANNOTATIONS_PATH = config.PATH[city_name]['ann-val']\n\t\tself.TEST_ANNOTATIONS_PATH = config.PATH[city_name]['ann-test']\n\t\tself.TRAIN_IMAGES_DIRECTORY = config.PATH[city_name]['img-train']\n\t\tself.VAL_IMAGES_DIRECTORY = config.PATH[city_name]['img-val']\n\t\tself.TEST_IMAGES_PATH = config.PATH[city_name]['img-test']\n\n\t\tself.TEST_CURRENT = 0\n\t\tself.TEST_FLAG = True\n\t\tself.TEST_RESULT = []\n\n\t\tif self.mode == 'test':\n\t\t\tself.coco_test = COCO(self.TEST_ANNOTATIONS_PATH)\n\t\t\tself.TEST_IMAGES_DIRECTORY = config.PATH[city_name]['img-test']\n\t\t\tself.TEST_IMAGE_IDS = list(self.coco_test.getImgIds(catIds = self.coco_test.getCatIds()))\n\t\tif self.mode == 'val':\n\t\t\tself.coco_valid = COCO(self.VAL_ANNOTATIONS_PATH)\n\t\t\tself.TEST_IMAGES_DIRECTORY = config.PATH[city_name]['img-val']\n\t\t\tself.TEST_IMAGE_IDS = list(self.coco_valid.getImgIds(catIds = self.coco_valid.getCatIds()))\n\t\tif mode == 'train':\n\t\t\tself.coco_train = COCO(self.TRAIN_ANNOTATIONS_PATH)\n\t\t\tself.coco_valid = COCO(self.VAL_ANNOTATIONS_PATH)\n\t\t\tself.train_img_ids = self.coco_train.getImgIds(catIds = self.coco_train.getCatIds())\n\t\t\tself.train_ann_ids = self.coco_train.getAnnIds(catIds = self.coco_train.getCatIds())\n\t\t\tself.valid_img_ids = self.coco_valid.getImgIds(catIds = self.coco_valid.getCatIds())\n\t\t\tself.valid_ann_ids = self.coco_valid.getAnnIds(catIds = self.coco_valid.getCatIds())\n\n\t\t\ttrain_anns = self.coco_train.loadAnns(self.train_ann_ids)\n\t\t\tvalid_anns = self.coco_valid.loadAnns(self.valid_ann_ids)\n\n\t\t\tprint('Totally %d patches for train.' % len(self.train_ann_ids))\n\t\t\tprint('Totally %d patches for valid.' % len(self.valid_ann_ids))\n\n\t\t# \n\t\tself.blank = np.zeros(self.v_out_res, dtype = np.uint8)\n\t\tself.vertex_pool = [[] for i in range(self.v_out_res[1])]\n\t\tfor i in range(self.v_out_res[1]):\n\t\t\tfor j in range(self.v_out_res[0]):\n\t\t\t\tself.vertex_pool[i].append(np.copy(self.blank))\n\t\t\t\tself.vertex_pool[i][j][i, j] = 255\n\t\t\t\tself.vertex_pool[i][j] = Image.fromarray(self.vertex_pool[i][j])\n\t\treturn\n\n\tdef getSingleArea(self, mode, img_id, seq_id, rotate):\n\t\tif self.mode == 'train':\n\t\t\tassert(mode in ['train', 'val'])\n\t\telse:\n\t\t\tassert(mode == self.mode)\n\n\t\t# Rotate, anticlockwise\n\t\tif self.mode == 'train':\n\t\t\trotate_deg = rotate * 90\n\t\t\tif mode == 'train':\n\t\t\t\timg_info = self.coco_train.loadImgs([img_id])[0]\n\t\t\t\timage_path = os.path.join(self.TRAIN_IMAGES_DIRECTORY, img_info['file_name'])\n\t\t\t\tannotations = self.coco_train.loadAnns(self.coco_train.getAnnIds(imgIds = img_info['id']))\n\t\t\tif mode == 'val':\n\t\t\t\timg_info = self.coco_valid.loadImgs([img_id])[0]\n\t\t\t\timage_path = os.path.join(self.VAL_IMAGES_DIRECTORY, img_info['file_name'])\n\t\t\t\tannotations = self.coco_valid.loadAnns(self.coco_valid.getAnnIds(imgIds = img_info['id']))\n\t\telse:\n\t\t\tif mode == 'val':\n\t\t\t\timg_info = self.coco_valid.loadImgs([img_id])[0]\n\t\t\tif mode == 'test':\n\t\t\t\timg_info = self.coco_test.loadImgs([img_id])[0]\n\t\t\timage_path = os.path.join(self.TEST_IMAGES_DIRECTORY, img_info['file_name'])\n\n\t\timg = Image.open(image_path)\n\t\torg_w, org_h = img.size\n\t\tret_img = img.rotate(rotate_deg).resize(self.img_size)\n\n\t\tif SHOW:\n\t\t\tret_img.save('%d.png' % img_id)\n\n\t\tret_img = np.array(ret_img, np.float32)[..., 0: 3]\n\t\tif self.mode != 'train':\n\t\t\treturn ret_img\n\n\t\tassert(len(annotations) == 1)\n\t\tw8, h8 = self.v_out_res\n\t\tannotation = annotations[0]\n\n\t\tv_set = set()\n\t\tfor (x1, y1), (x2, y2) in annotation['segmentation']:\n\t\t\tv_set.add((x1, y1))\n\t\t\tv_set.add((x2, y2))\n\t\tv_li = list(v_set)\n\t\tv_li.sort()\n\t\tv_li_8 = [(round(x / (org_w - 1) * (w8 - 1)), round(y / (org_h - 1) * (h8 - 1))) for x, y in v_li]\n\t\tv_li_8_unique = list(set(v_li_8))\n\t\tv_li_8_unique.sort()\n\t\tv_li_8_d = {v: k for k, v in enumerate(v_li_8_unique)}\n\t\td = {v: v_li_8_d[v8] for v, v8 in zip(v_li, v_li_8)}\n\n\t\tedges = [(d[tuple(v1)], d[tuple(v2)]) for v1, v2 in annotation['segmentation']]\n\t\tpolygons = [[d[tuple(v)] for v in polygon] for polygon in annotation['polygons']]\n\n\t\tif len(v_li_8_unique) == 1:\n\t\t\tv_li_8_unique = []\n\t\t\tedges = []\n\t\t\tpolygons = []\n\n\t\tg = directed_graph()\n\t\tfor v in v_li_8_unique:\n\t\t\tg.add_v(rotateN(rotate, w8, h8, v[0], v[1])[2: 4])\n\t\tfor s, t in edges:\n\t\t\tif s != t:\n\t\t\t\tg.add_e(s, t)\n\t\tg.shortest_path_all()\n\n\t\tw8, h8 = rotateN(rotate, w8, h8, 0, 0)[0: 2]\n\n\t\t# Draw boundary and vertices\n\t\tboundary = Image.new('P', (w8, h8), color = 0)\n\t\tdraw = ImageDraw.Draw(boundary)\n\t\tfor e in g.e:\n\t\t\tdraw.line(list(g.v[e[0]]) + list(g.v[e[1]]), fill = 255, width = 1)\n\t\tif SHOW:\n\t\t\tboundary.resize(self.img_size).save('%d_b.png' % img_id)\n\t\tboundary = np.array(boundary) / 255.0\n\n\t\tvertices = Image.new('P', (w8, h8), color = 0)\n\t\tdraw = ImageDraw.Draw(vertices)\n\t\tfor i in range(len(g.v)):\n\t\t\tdraw.ellipse(make_ellipse(g.v[i], pad = 0), fill = 255, outline = 255)\n\t\tif SHOW:\n\t\t\tvertices.resize(self.img_size).save('%d_v.png' % img_id)\n\t\tvertices = np.array(vertices) / 255.0\n\n\t\t# RNN in and out\n\t\tvertex_terminals = []\n\t\tvertex_inputs = []\n\t\tvertex_outputs = []\n\t\tends = []\n\t\tseq_lens = []\n\t\tfor s in range(len(g.v)):\n\t\t\tif len(g.v) == 1:\n\t\t\t\tbreak\n\t\t\tt = int(np.random.choice(len(g.v), 1)[0])\n\t\t\twhile t == s:\n\t\t\t\tt = int(np.random.choice(len(g.v), 1)[0])\n\t\t\tdist, prev = g.sp[s]\n\t\t\tif dist[t] > 0:\n\t\t\t\tpath = []\n\t\t\t\tp = t\n\t\t\t\twhile p != s:\n\t\t\t\t\tpath.append(p)\n\t\t\t\t\tp = prev[p]\n\t\t\t\tpath.append(p)\n\t\t\t\tpath.reverse()\n\t\t\telse:\n\t\t\t\tpath = [s]\n\t\t\tpath_v = [g.v[idx] for idx in path]\n\t\t\tseq_len = len(path_v)\n\n\t\t\tvertex_input = [self.vertex_pool[r][c] for c, r in path_v]\n\t\t\tvertex_output = vertex_input[1:]\n\t\t\tvertex_terminal = [vertex_input[0], self.vertex_pool[g.v[t][1]][g.v[t][0]]]\n\n\t\t\twhile len(vertex_input) < self.max_seq_len:\n\t\t\t\tvertex_input.append(self.blank)\n\t\t\twhile len(vertex_output) < self.max_seq_len:\n\t\t\t\tvertex_output.append(self.blank)\n\t\t\tvertex_input = vertex_input[: self.max_seq_len]\n\t\t\tvertex_output = vertex_output[: self.max_seq_len]\n\n\t\t\tend = np.zeros([self.max_seq_len])\n\t\t\tif seq_len <= self.max_seq_len:\n\t\t\t\tend[seq_len - 1] = 1\n\n\t\t\tif SHOW:\n\t\t\t\tcolor = [0] + [1, 2] * 30\n\t\t\t\tfor seq, vvv in enumerate([vertex_input, vertex_output, vertex_terminal]):\n\t\t\t\t\tvisualize = np.zeros((self.v_out_res[1], self.v_out_res[0], 3), np.uint8)\n\t\t\t\t\tfor i, item in enumerate(vvv):\n\t\t\t\t\t\tvisualize[..., color[i]] = np.maximum(visualize[..., color[i]], np.array(item, np.uint8))\n\t\t\t\t\tImage.fromarray(visualize).resize(self.img_size).save('%d_%d.png' % (img_id, seq))\n\t\t\t\t# print(end)\n\t\t\t\t# print(len(path_v))\n\n\t\t\tvertex_input = [np.array(item) / 255.0 for item in vertex_input]\n\t\t\tvertex_output = [np.array(item) / 255.0 for item in vertex_output]\n\t\t\tvertex_terminal = [np.array(item) / 255.0 for item in vertex_terminal]\n\t\t\tvertex_inputs.append(vertex_input)\n\t\t\tvertex_outputs.append(vertex_output)\n\t\t\tvertex_terminals.append(vertex_terminal)\n\t\t\tends.append(end)\n\t\t\tseq_lens.append(min(seq_len, self.max_seq_len))\n\n\t\tseq_idx = seq_id * np.ones([len(vertex_terminals)], np.int32)\n\t\tvertex_inputs = np.array(vertex_inputs)\n\t\tvertex_outputs = np.array(vertex_outputs)\n\t\tvertex_terminals = np.array(vertex_terminals)\n\t\tends = np.array(ends)\n\t\tseq_lens = np.array(seq_lens)\n\n\t\t# print(ret_img.shape)\n\t\t# print(boundary.shape)\n\t\t# print(vertices.shape)\n\t\t# print(vertex_inputs.shape)\n\t\t# print(vertex_outputs.shape)\n\t\t# print(vertex_terminals.shape)\n\t\t# print(ends.shape)\n\t\t# print(seq_lens.shape)\n\n\t\t# if vertex_outputs.shape[0] > 0:\n\t\t# \tprint(np.reshape(vertex_inputs, [-1, self.max_seq_len, 28 * 28]).sum(axis = -1))\n\t\t# \tprint(np.reshape(vertex_terminals, [-1, 2, 28 * 28]).sum(axis = -1))\n\t\t# \tt1 = np.reshape(vertex_outputs, [-1, self.max_seq_len, 28 * 28])\n\t\t# \tt2 = ends[..., np.newaxis]\n\t\t# \ttt = np.concatenate([t1, t2], axis = -1)\n\t\t# \tttt = tt.sum(axis = -1)\n\t\t# \tprint(ttt)\n\t\t# \tprint(seq_lens)\n\t\t# \tinput()\n\n\t\treturn ret_img, boundary, vertices, vertex_inputs, vertex_outputs, vertex_terminals, ends, seq_lens, seq_idx\n\n\tdef getAreasBatch(self, batch_size, mode):\n\t\tres = []\n\t\trotate = random.choice([0, 1, 2, 3])\n\t\tif self.mode == 'train':\n\t\t\tassert(mode in ['train', 'val'])\n\t\t\twhile True:\n\t\t\t\tids = np.random.choice(self.train_img_ids, batch_size, replace = False)\n\t\t\t\tprint(ids, rotate)\n\t\t\t\tfor i in range(batch_size):\n\t\t\t\t\tres.append(self.getSingleArea('train', ids[i], i, rotate))\n\t\t\t\tnew_res = [np.array([item[i] for item in res]) for i in range(3)]\n\t\t\t\tfor i in range(3, 9):\n\t\t\t\t\tli = [item[i] for item in res if item[i].shape[0] > 0]\n\t\t\t\t\tif li:\n\t\t\t\t\t\tnew_res.append(np.concatenate(li, axis = 0))\n\t\t\t\t\telse:\n\t\t\t\t\t\tbreak\n\t\t\t\tif len(new_res) != 9:\n\t\t\t\t\tprint('No paths in the images, re-generate ...')\n\t\t\t\t\tres = []\n\t\t\t\t\tcontinue\n\t\t\t\tassert(new_res[-1].shape[0] > 0)\n\t\t\t\tchoose = np.random.choice(new_res[-1].shape[0], config.TRAIN_NUM_PATH, replace = (new_res[-1].shape[0] < config.TRAIN_NUM_PATH))\n\t\t\t\tfor i in range(3, 9):\n\t\t\t\t\tnew_res[i] = new_res[i][choose]\n\t\t\t\tbreak\n\t\t\treturn new_res\n\n\n\ndef findPeaks(heatmap, sigma = 0, min_val = 0.5):\n\tth = 0\n\thmap = gaussian_filter(heatmap, sigma)\n\tmap_left = np.zeros(hmap.shape)\n\tmap_left[1:,:] = hmap[:-1,:]\n\tmap_right = np.zeros(hmap.shape)\n\tmap_right[:-1,:] = hmap[1:,:]\n\tmap_up = np.zeros(hmap.shape)\n\tmap_up[:,1:] = hmap[:,:-1]\n\tmap_down = np.zeros(hmap.shape)\n\tmap_down[:,:-1] = hmap[:,1:]\n\tmap_ul = np.zeros(hmap.shape)\n\tmap_ul[1:,1:] = hmap[:-1,:-1]\n\tmap_ur = np.zeros(hmap.shape)\n\tmap_ur[:-1,1:] = hmap[1:,:-1]\n\tmap_dl = np.zeros(hmap.shape)\n\tmap_dl[1:,:-1] = hmap[:-1,1:]\n\tmap_dr = np.zeros(hmap.shape)\n\tmap_dr[:-1,:-1] = hmap[1:,1:]\n\tsummary = np.zeros(hmap.shape)\n\tsummary += hmap>=map_left+th\n\tsummary += hmap>=map_right+th\n\tsummary += hmap>=map_up+th\n\tsummary += hmap>=map_down+th\n\tsummary += hmap>=map_dl+th\n\tsummary += hmap>=map_dr+th\n\tsummary += hmap>=map_ul+th\n\tsummary += hmap>=map_ur+th\n\tpeaks_binary = np.logical_and.reduce((summary >= 8, hmap >= min_val))\n\tpeaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse\n\tpeaks_with_score = [x + (heatmap[x[1],x[0]],) for x in peaks]\n\treturn peaks_with_score\n\ndef getAllTerminal(hmb, hmv):\n\tassert(hmb.shape == hmv.shape)\n\th, w = hmb.shape[0: 2]\n\tpeaks_with_score = findPeaks(hmv, min_val = 0.9)\n\tpeaks_with_score = [(x, y, s) for x, y, s in peaks_with_score if True or hmb[y, x] > 0.9]\n\tallTerminal = []\n\tindices = []\n\tpeaks_map = np.zeros([w, h], np.float32)\n\tedges_map = Image.new('P', (w, h), color = 0)\n\tdraw = ImageDraw.Draw(edges_map)\n\tfor i in range(len(peaks_with_score)):\n\t\tx1, y1, s1 = peaks_with_score[i]\n\t\tpeaks_map[y1, x1] = 1\n\t\tfor j in range(i + 1, len(peaks_with_score)):\n\t\t\tx2, y2, _ = peaks_with_score[j]\n\t\t\tallTerminal.append((\n\t\t\t\tnp.array([np.array(vp.vertex_pool[y1][x1]), np.array(vp.vertex_pool[y2][x2])]),\n\t\t\t\tnp.array([np.array(vp.vertex_pool[y2][x2]), np.array(vp.vertex_pool[y1][x1])])\n\t\t\t))\n\t\t\tindices.append((i, j))\n\n\t\t\ttemp = Image.new('P', (w, h), color = 0)\n\t\t\ttmp_draw = ImageDraw.Draw(temp)\n\t\t\ttmp_draw.line([x1, y1, x2, y2], fill = 255, width = 1)\n\t\t\ttemp = np.array(temp, np.float32) / 255.0\n\t\t\tif np.mean(hmb[temp > 0.5]) > 0.7:\n\t\t\t\tdraw.line([x1, y1, x2, y2], fill = 255, width = 1)\n\tedges_map = np.array(edges_map, np.float32) / 255.0\n\treturn edges_map, peaks_map, allTerminal, indices\n\ndef recoverMultiPath(img_size, paths):\n\tpathImgs = []\n\tres = np.zeros(img_size)\n\tfor i in range(len(paths)):\n\t\tpath = []\n\t\tfor j in range(paths[i].shape[0]):\n\t\t\thmap = paths[i][j]\n\t\t\tend = 1 - hmap.sum()\n\t\t\tind = np.unravel_index(np.argmax(hmap), hmap.shape)\n\t\t\tif hmap[ind] >= end:\n\t\t\t\tpath.append((ind[1] * 8 + 4, ind[0] * 8 + 4))\n\t\t\telse:\n\t\t\t\tbreak\n\t\tpathImg = Image.new('P', img_size, color = 0)\n\t\tdraw = ImageDraw.Draw(pathImg)\n\t\tdraw.line(path, fill = 1, width = 5)\n\t\tres += np.array(pathImg, np.float32)\n\t\tpathImgs.append(np.array(pathImg, np.float32))\n\tres = np.array((res - res.min()) * 255.0 / (res.max() - res.min() + 1e-9), np.uint8)\n\treturn res, pathImgs\n\n\nif __name__ == '__main__':\n\tdg = DataGenerator(sys.argv[1], config.AREA_SIZE, config.V_OUT_RES, config.MAX_NUM_VERTICES)\n\tfor i in range(10000):\n\t\tprint(i)\n\t\timg, boundary, vertices, vertex_inputs, vertex_outputs, vertex_terminals, ends, seq_lens, _ = dg.getAreasBatch(4, 'train')\n\n\n","sub_path":"road_path/DataGeneratorOld.py","file_name":"DataGeneratorOld.py","file_ext":"py","file_size_in_byte":14896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"516494901","text":"import chianti\nimport lasagne\nimport cv2\nimport dltools\nimport theano\nimport theano.tensor as T\nimport sys\nimport numpy as np\n\nsys.setrecursionlimit(10000)\n\nconfig = {\n \"num_classes\": 19,\n \"sample_factor\": 4,\n \"model_filename\": \"models/frrn_a.npz\",\n \"base_channels\": 48,\n \"fr_channels\": 32,\n \"cityscapes_folder\": \"/\"\n}\n\n########################################################################################################################\n# Ask for the cityscapes path\n########################################################################################################################\n\n\nconfig[\"cityscapes_folder\"] = dltools.utility.get_interactive_input(\n \"Enter path to CityScapes folder\",\n \"cache/cityscapes_folder.txt\",\n config[\"cityscapes_folder\"])\n\nconfig[\"model_filename\"] = dltools.utility.get_interactive_input(\n \"Enter model filename\",\n \"cache/model_frrn_a_filename.txt\",\n config[\"model_filename\"])\n\n########################################################################################################################\n# DEFINE THE NETWORK\n########################################################################################################################\n\nwith dltools.utility.VerboseTimer(\"Define network\"):\n # Define the theano variables\n input_var = T.ftensor4()\n\n builder = dltools.architectures.FRRNABuilder(\n base_channels=config[\"base_channels\"],\n lanes=config[\"fr_channels\"],\n multiplier=2,\n num_classes=config[\"num_classes\"]\n )\n network = builder.build(\n input_var=input_var,\n input_shape=(None, 3, 1024 // config[\"sample_factor\"], 2048 // config[\"sample_factor\"]))\n\n#######################################################################################################################\n# LOAD MODEL\n########################################################################################################################\n\nwith dltools.utility.VerboseTimer(\"Load model\"):\n network.load_model(config[\"model_filename\"])\n\n########################################################################################################################\n# COMPILE THEANO VAL FUNCTIONS\n########################################################################################################################\n\nwith dltools.utility.VerboseTimer(\"Compile validation function\"):\n test_predictions = lasagne.layers.get_output(network.output_layers, deterministic=True)[0]\n val_fn = theano.function(\n inputs=[input_var],\n outputs=test_predictions\n )\n\n########################################################################################################################\n# Visualize the data\n########################################################################################################################\n\nvalidation_provider = chianti.DataProvider(\n iterator=chianti.sequential_iterator(dltools.utility.get_image_label_pairs(config[\"cityscapes_folder\"], \"val\")),\n batchsize=1,\n augmentors=[\n chianti.cityscapes_label_transformation_augmentor(),\n chianti.subsample_augmentor(config[\"sample_factor\"]),\n ]\n)\n\nwhile True:\n batch = validation_provider.next()\n x = batch.imgs\n t = batch.targets\n\n # Process the image\n network_output = val_fn(x)\n # Obtain a prediction\n predicted_labels = np.argmax(network_output[0], axis=0)\n\n prediction_visualization = dltools.utility.create_color_label_image(predicted_labels)\n ground_truth_visualization = dltools.utility.create_color_label_image(t[0])\n image = dltools.utility.tensor2opencv(x[0])\n\n cv2.imshow(\"Image\", image)\n cv2.imshow(\"Ground Truth\", ground_truth_visualization)\n cv2.imshow(\"Prediction\", prediction_visualization)\n cv2.waitKey()\n","sub_path":"predict_frrn_a.py","file_name":"predict_frrn_a.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"477852223","text":"import copy\nimport math\nimport random\nimport platform\nimport os\nfrom pathlib import Path\n\nfrom solid import *\nfrom solid.utils import *\nfrom tqdm import tqdm\nfrom plumbum import local\nimport pyclipper\nfrom scipy.interpolate import splprep, splev\nfrom scipy.optimize import fmin\nfrom scipy.spatial.distance import euclidean\nimport numpy as np\n\nfrom dan.lib.helper import *\nfrom dan.lib import polytri\nfrom dan.project.slicestack.differential_line import DiffLine, NodeData\n\ndf = DiffLine()\ndf.init_circle()\n\nlayers = []\npoints_per_layer = 300\nheight = 200\nflip = False\n\nprint(\"Running Simulation\")\nfor i in tqdm(range(height)):\n df.update()\n\n if i % 20 == 0:\n layer = []\n for n in df.nodes:\n nd = NodeData(n)\n if flip:\n nd.pos.z = height - i\n else:\n nd.pos.z = i\n layer.append(nd)\n layers.append(layer)\n\nparts = []\n\ndef convert_points_to_spline(points):\n arr = np.array([p.to_list() for p in points])\n tck, u = splprep(arr.T, s=0.0, per=1, quiet=2)\n return tck, u\n\ndef normalize_points_on_layer(layer, num_points=points_per_layer):\n z = layer[0].z\n arr = np.array([(p.x, p.y) for p in layer])\n tck, u = splprep(arr.T, u=None, s=0.0, per=1, quiet=2)\n u_new = np.linspace(u.min(), u.max(), points_per_layer)\n x_new, y_new = splev(u_new, tck, der=0)\n return [\n Vec3(x, y_new[i], z) for i, x in enumerate(x_new)\n ]\n\n\ndef shrink_layer(layer, offset):\n z = layer[0].z\n\n pco = pyclipper.PyclipperOffset()\n pco.AddPath(\n [(p.x, p.y) for p in layer],\n pyclipper.JT_SQUARE,\n pyclipper.ET_CLOSEDPOLYGON\n )\n\n solution = pco.Execute(-offset)\n\n if len(solution) > 1:\n print( \"warning, solution len: {}\".format(len(solution)))\n\n # Only use the longest solution, if there's more than one we're probably fucked anyhow\n path = max(solution, key=lambda d: len(d))\n\n return [Vec3(p[0], p[1], z) for p in path]\n\n\ndef sample_closest_points(layers, num_points):\n ret_layers = []\n\n # sample first layer\n ret_layers.append(normalize_points_on_layer(layers[0], num_points))\n\n for layer_index, layer in tqdm(enumerate(layers), total=len(layers)):\n if layer_index == 0:\n continue\n\n previous_layer = ret_layers[layer_index-1]\n\n # Rotate layer\n layer = deque(layer)\n closest_index, closest_point = min(enumerate(layer), key=lambda ip: ip[1].distance(previous_layer[0]))\n layer.rotate(-closest_index)\n layer = list(layer)\n \n new_layer = []\n\n index_ratio = len(layer) / len(previous_layer)\n\n p_index = 0\n for bp in previous_layer:\n try:\n #closest_index, closest_point = min(enumerate(layer[p_index:p_index+int(index_ratio*1)]), key=lambda ip: ip[1].distance2(bp))\n closest_index, closest_point = min(enumerate(layer), key=lambda ip: Vec3(ip[1].x, ip[1].y).distance2(Vec3(bp.x, bp.y)))\n except ValueError:\n new_layer.append(layer[0])\n else:\n # we cut off the beginning of the layer array so have to add this value back\n closest_index += p_index\n p_index = closest_index+1\n\n new_layer.append(closest_point)\n\n ret_layers.append(new_layer)\n\n \n return ret_layers\n\ndef normalize_stack(layers, num_points):\n ret_layers = []\n\n ret_layers.append(normalize_points_on_layer(layers[0], num_points))\n\n def dist_to_point_function(p, tck):\n def dist_to_p(u):\n s = splev(u, tck)\n return euclidean(p, s)\n\n for layer in enumerate(layers[1:]):\n pass\n\ndef insert_points_in_long_sections(layer, max_length=5.0):\n out = []\n for i, p in enumerate(layer):\n out.append(p)\n next_point = layer[(i+1)%len(layer)]\n vec = next_point - p\n dist = vec.length()\n usable_dist = dist - 1\n if usable_dist > max_length:\n # Normalize\n vec /= dist\n vec *= max_length\n for _ in range(int(usable_dist / max_length)):\n out.append(out[-1] + vec)\n return out\n\nshrink_amount = 3\n\n# Convert to just pos\nlayers = [[nd.pos for nd in layer] for layer in layers]\n\nprint(\"Smoothing\")\n# layers = [normalize_points_on_layer(layer, len(layer)*2) for layer in layers]\n\nprint(\"Resizing\")\nog_layers = layers\nlayers = [shrink_layer(layer, 0) for layer in og_layers]\nshrunk_layers = [shrink_layer(layer, 5) for layer in og_layers]\n\nprint(\"Filling long spans\")\nlayers = [insert_points_in_long_sections(layer) for layer in layers]\nshrunk_layers = [insert_points_in_long_sections(layer) for layer in shrunk_layers]\n\n# for layer in layers:\n# for p in layer:\n# parts.append(translate(p.to_list())(cube([1,1,1])))\n\nprint(\"Normalizing\")\nlayers = [normalize_points_on_layer(layer) for layer in layers]\nshrunk_layers = [normalize_points_on_layer(layer) for layer in shrunk_layers]\n\n#layers = sample_closest_points(layers, 300)\n#shrunk_layers = sample_closest_points(shrunk_layers, 300)\n\nprint( \"Triangulating\" )\n#outer = rings_to_polyhedron(layers[:-1], progress_stdout=True)\n#inner = rings_to_polyhedron(shrunk_layers[1:], progress_stdout=True)\nouter = similar_rings_to_polyhedron(layers[:-1], progress_stdout=True)\ninner = similar_rings_to_polyhedron(shrunk_layers[1:], progress_stdout=True)\n\n#parts.append( outer )\n#parts.append( inner )\nparts.append(outer - inner)\n\nprint(\"Saving File\")\nwith open(__file__ + \".scad\", \"w\") as f:\n f.write(scad_render(union()(parts)))\n","sub_path":"dan/project/slicestack/slicestack.py","file_name":"slicestack.py","file_ext":"py","file_size_in_byte":5592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"84358224","text":"import datetime\nfrom datetime import timedelta\nimport csv\n\n\nwith open('catalogue_data.csv', 'r') as cat_data:\n data_reader = csv.DictReader(cat_data)\n data = []\n for line in data_reader:\n data.append({'starid': line['starid'], 'name': line['name'], 'country': line['country'], 'magnitude': line['magnitude'], 'constellation': line['constellation'], 'date': datetime.datetime.strptime(line['date'], '%Y-%m-%d').date(), })\n\nnew_data = [x for x in data if x['date'] <= (datetime.date.today() - timedelta(days=1))]\n\n\nwith open('reviews.csv', 'r') as rev:\n rev_reader = csv.DictReader(rev)\n reviews = []\n for line in rev_reader:\n reviews.append({'review': line['review'], 'name': line['name'], 'date': datetime.datetime.strptime(line['date'], '%Y-%m-%d').date(), 'image': line['image']})\n\nnew_reviews = [x for x in reviews if x['date'] <= (datetime.date.today() - timedelta(days=1))]","sub_path":"promotions/tabledata.py","file_name":"tabledata.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"23269019","text":"from django.shortcuts import render\nfrom . import forms, models\n\n\n# Create your views here.\ndef author_all(request):\n form = models.Author_model.objects.order_by('author_name')\n dic = {'title':'All authors', 'form': form}\n return render(request, 'author_app/author_all.html', context=dic)\n\n\ndef author_add(request):\n form = forms.Author_form()\n\n if request.method=='POST':\n form = forms.Author_form(request.POST)\n\n if form.is_valid():\n form.save(commit=True)\n return author_all(request)\n else:\n return author_add(request)\n\n return render(request, 'author_app/author_add.html', {'title':'Add author', 'form': form})\n","sub_path":"blog_app2/author_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"69346242","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('selection', '0006_auto_20160401_1433'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='course',\n name='classromm',\n field=models.CharField(max_length=10, default='0-000'),\n ),\n ]\n","sub_path":"selection/migrations/0007_course_classromm.py","file_name":"0007_course_classromm.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"350401071","text":"# encoding:utf-8\r\n# 用来测试英文论文的性能\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nfrom keras import regularizers\r\nfrom keras.callbacks import EarlyStopping\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom pandas import DataFrame\r\nfrom pandas import Series\r\nfrom pandas import concat\r\nfrom pandas import read_csv\r\nfrom pandas import datetime\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.metrics import mean_absolute_error\r\nfrom keras import losses\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import LSTM\r\nfrom math import sqrt\r\nimport matplotlib\r\nfrom sklearn.model_selection import train_test_split\r\nimport xlrd\r\nfrom math import sqrt\r\n# from numpy import concatenate\r\nfrom matplotlib import pyplot\r\nfrom pandas import read_csv\r\nfrom sklearn.metrics import explained_variance_score\r\nfrom pandas import DataFrame\r\nfrom pandas import concat\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import LSTM\r\nfrom keras.layers import Dropout\r\nfrom sklearn.metrics import roc_auc_score\r\nimport keras\r\nimport tensorflow as tf\r\nimport keras.layers.recurrent\r\nfrom keras import backend as K\r\nfrom sklearn import metrics\r\nimport numpy\r\nfrom keras.callbacks import Callback\r\nfrom sklearn.metrics import f1_score, precision_score, recall_score\r\nfrom keras.callbacks import TensorBoard\r\nimport time\r\nfrom sklearn.metrics import roc_curve, auc, roc_auc_score ###计算roc和auc\r\nfrom sklearn.metrics import accuracy_score\r\nfrom math import sqrt\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.metrics import r2_score\r\nfrom keras.callbacks import ModelCheckpoint\r\nfrom keras.models import save_model\r\nfrom keras.models import load_model\r\nfrom keras import optimizers\r\nfrom keras.layers import LSTM\r\n# from sklearn. import decision\r\n#from sklearn.preprocessing import Imputer\r\n\r\n\r\ndef picky(data, num):\r\n k = []\r\n for i in range(len(data)):\r\n if (i % num == 0):\r\n k.append(data[i])\r\n return k\r\n\r\n\r\n#best_weights_filepath = '/root/xulun/selflstm20200427/best_weights.hdf5'\r\n#best_weights_filepath = '/root/xulun/standardlstm0427/best_weights.hdf5'\r\n# timestep = [5,10,15,20]\r\n# istep = [5,10,15,20]\r\ntimestep = [1,10,20,30,40,50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200]\r\n#istep = [7]\r\n# timestep = [30][5,10,15,20,25,30]\r\n# istep = [6][5]\r\n# 5 没训练\r\n# 2520\r\n########读取训练数据\r\n\r\nrow_data = pd.read_excel('/root/xulun/316/20180514/data/trainvalue.xlsx')\r\n#row_data = pd.read_excel('F:/FUCK/trainvalue.xlsx')\r\ncolumn = ['18S04Ijmb', '18S04Ts', '18S04Ftm', '18S04Fjm',\r\n '18S04F2k', '18S04F4k', '18S04F8k',\r\n '18S04UAm', '18S04UBm', '18S04UCm', '18S04UAa',\r\n '18S04UBa', '18S04UCa', '18S04U2k', '18S04U4k', '18S04U8k',\r\n '18S04V5p', '18S04V12p', '18S04V12n', '18S04Vsy15p',\r\n '18S04Vsy15n', '18S04Vc15p', '18S04Vc15n',\r\n '18S04Itma', '18S04Itmb', '18S04Itmc', '18S04Ijma', '18S04Ijmc',\r\n # '18S04Zkz','18S04Ffz',\r\n '18S04Ncrc', '18S04Tt', '18S04Txt', '18S04Tyt', '18S04Tzt',\r\n '18S04Txj', '18S04Tyj', '18S04Iwk', '18S04Wxt', '18S04Wyt', '18S04Wzt', '18S04Wxj', '18S04Wyj',\r\n '18S04Ix', '18S04Iy','18S04Iz',\r\n '18S04Iy', '18S04Nw', '18S04Nwc', '18S04Ntc', '18S04Tb', '18S04Ny', '18S04N422', '18S04Zkzb0', '18S04Zkzb1',\r\n '18S04Zkzb2', '18S04Zkzb3', '18S04Zkzb4', '18S04Zkzb5', '18S04Zkzb6',\r\n '18S04Zkzb7', '18S04Zkzb8', '18S04Zkzb9', '18S04Zkzb10', '18S04Zkzb11_13',\r\n '18S04Zkzb14', '18S04Ffzb0', '18S04Ffzb1', '18S04Ffzb2', '18S04Ffzb3',\r\n '18S04Ffzb3', '18S04Ffzb3', '18S04Ffzb3', '18S04Ffzb3', '18S04Ncrcb0',\r\n '18S04Ncrcb1', '18S04Ncrcb2', '18S04Ncrcb3', '18S04Ncrcb4', '18S04Ncrcb5',\r\n '18S04Ncrcb6', '18S04Ncrcb7',\r\n '18S02Fztz', '18S02Fjd', '18S02Nsa2', '18S02Fc', '18S02Fztz0', '18S02Fztz1', '18S02Fztz2',\r\n '18S02Fztz3', '18S02Fztz4', '18S02Fztz5', '18S02Fztz6', '18S02Fztz7', '18S02Fztz8', '18S02Fztz9',\r\n '18S02Fztz10', '18S02Fztz11', '18S02Fztz12', '18S02Fztz13', '18S02Fztz14', '18S02Fztz15', '18S02Fc0',\r\n '18S02Fc1', '18S02Fc2', '18S02Fc3', '18S02Fc4', '18S02Fc5', '18S02Fc6', '18S02Fc7', '18S02Fc8', '18S02Fc9',\r\n '18S02Fc10', '18S02Fc11', '18S02Fc12', '18S02Fc13', '18S02Fc14',\r\n 'signal', '17S03Time', '17S03NDA', '17S03SJ', '17S03NP', '17S03Mxsyp', '17S03MZLsyXp',\r\n '17S03Mxsyn', '17S03MZLsyXn', '17S03Mysyp', '17S03MZLsyYp', '17S03Mysyn',\r\n '17S03MZLsyYn', '17S03Mzsyp', '17S03MZLsyZp', '17S03Mzsyn', '17S03MZLsyZn',\r\n '17S03Dypj', '17S03Dypc', '17S03Dxj', '17S03Dxc', '17S03Dyj', '17S03Dyc',\r\n '17S03Dzj', '17S03Dzc', '17S03TDY1', '17S03TDX', '17S03TDZ', '17S03TDY',\r\n '17S03Mxtjp', '17S03MZLtjXp', '17S03Mxtjn', '17S03MZLtjXn', '17S03Mytjp',\r\n '17S03MZLtjYp', '17S03Mytjn', '17S03MZLtjYn', '17S03Mx/ytjp', '17S03Ijjx', '17S03Ijjy'\r\n , '17S03Ijjz', '17S03MZLsyX', '17S03MZLsyY', '17S03MZLsyZ', '17S03MZLtjX', '17S03MZLtjY'\r\n , '17S03MZLsyX(1s)', '17S03MZLsyY(1s)', '17S03MZLsyZ(1s)', '17S03MZLtjX(1s)', '17S03MZLtjY(1s)'\r\n ]\r\nvalue1 = row_data[column]\r\nprint('valus1', value1.shape)\r\nrow_data = pd.read_excel('/root/xulun/316/20180514/data/traindf.xlsx')\r\ncolumn = ['18S04Ijmb', '18S04Ts', '18S04Ftm', '18S04Fjm',\r\n '18S04F2k', '18S04F4k', '18S04F8k',\r\n '18S04UAm', '18S04UBm', '18S04UCm', '18S04UAa',\r\n '18S04UBa', '18S04UCa', '18S04U2k', '18S04U4k', '18S04U8k',\r\n '18S04V5p', '18S04V12p', '18S04V12n', '18S04Vsy15p',\r\n '18S04Vsy15n', '18S04Vc15p', '18S04Vc15n',\r\n '18S04Itma', '18S04Itmb', '18S04Itmc', '18S04Ijma', '18S04Ijmc',\r\n # '18S04Zkz','18S04Ffz',\r\n '18S04Ncrc', '18S04Tt', '18S04Txt', '18S04Tyt', '18S04Tzt',\r\n '18S04Txj', '18S04Tyj', '18S04Iwk', '18S04Wxt', '18S04Wyt', '18S04Wzt', '18S04Wxj', '18S04Wyj',\r\n '18S04Ix', '18S04Iy','18S04Iz' ,\r\n '18S04Iy', '18S04Nw', '18S04Nwc', '18S04Ntc', '18S04Tb', '18S04Ny', '18S04N422', '18S04Zkzb0', '18S04Zkzb1',\r\n '18S04Zkzb2', '18S04Zkzb3', '18S04Zkzb4', '18S04Zkzb5', '18S04Zkzb6',\r\n '18S04Zkzb7', '18S04Zkzb8', '18S04Zkzb9', '18S04Zkzb10', '18S04Zkzb11_13',\r\n '18S04Zkzb14', '18S04Ffzb0', '18S04Ffzb1', '18S04Ffzb2', '18S04Ffzb3',\r\n '18S04Ffzb3', '18S04Ffzb3', '18S04Ffzb3', '18S04Ffzb3', '18S04Ncrcb0',\r\n '18S04Ncrcb1', '18S04Ncrcb2', '18S04Ncrcb3', '18S04Ncrcb4', '18S04Ncrcb5',\r\n '18S04Ncrcb6', '18S04Ncrcb7',\r\n '18S02Fztz', '18S02Fjd', '18S02Nsa2', '18S02Fc', '18S02Fztz0', '18S02Fztz1', '18S02Fztz2',\r\n '18S02Fztz3', '18S02Fztz4', '18S02Fztz5', '18S02Fztz6', '18S02Fztz7', '18S02Fztz8', '18S02Fztz9',\r\n '18S02Fztz10', '18S02Fztz11', '18S02Fztz12', '18S02Fztz13', '18S02Fztz14', '18S02Fztz15', '18S02Fc0',\r\n '18S02Fc1', '18S02Fc2', '18S02Fc3', '18S02Fc4', '18S02Fc5', '18S02Fc6', '18S02Fc7', '18S02Fc8', '18S02Fc9',\r\n '18S02Fc10', '18S02Fc11', '18S02Fc12', '18S02Fc13', '18S02Fc14',\r\n 'signal', '17S03Time', '17S03NDA', '17S03SJ', '17S03NP', '17S03Mxsyp', '17S03MZLsyXp',\r\n '17S03Mxsyn', '17S03MZLsyXn', '17S03Mysyp', '17S03MZLsyYp', '17S03Mysyn',\r\n '17S03MZLsyYn', '17S03Mzsyp', '17S03MZLsyZp', '17S03Mzsyn', '17S03MZLsyZn',\r\n '17S03Dypj', '17S03Dypc', '17S03Dxj', '17S03Dxc', '17S03Dyj', '17S03Dyc',\r\n '17S03Dzj', '17S03Dzc', '17S03TDY1', '17S03TDX', '17S03TDZ', '17S03TDY',\r\n '17S03Mxtjp', '17S03MZLtjXp', '17S03Mxtjn', '17S03MZLtjXn', '17S03Mytjp',\r\n '17S03MZLtjYp', '17S03Mytjn', '17S03MZLtjYn', '17S03Mx/ytjp', '17S03Ijjx', '17S03Ijjy'\r\n , '17S03Ijjz', '17S03MZLsyX', '17S03MZLsyY', '17S03MZLsyZ', '17S03MZLtjX', '17S03MZLtjY'\r\n , '17S03MZLsyX(1s)', '17S03MZLsyY(1s)', '17S03MZLsyZ(1s)', '17S03MZLtjX(1s)', '17S03MZLtjY(1s)'\r\n ]\r\nvalue2 = row_data[column]\r\nprint('valus2', value2.shape)\r\n# value2是故障 value4是正常 训练集是故障加正常\r\n# v2: 数据改为正常数据加故障数据 标签连接 value6=value2 value5=value4+value6 df1==values4\r\n# 少数量样本,观察误差 valuesforward=values4 总数据为 values4+values2+values4 测试数据贯穿故障,画出roc曲线\r\n# values6=values2[0:40000]\r\n# trainvalue = value1.append(value2.append(value1.append(value2.append(value1[:len(value1 ) - 1118]))))\r\n# 24W[ValueError: cannot reshape array of size 2653510 into shape (1065,15,166)]\r\n# trainvalue = value1.append(value2.append(value1.append(value1.append(value1[:len(value1 ) - 1118 - 4444]))))\r\n# 18w ------20200110 10:39\r\ntrainvalue = value1.append(value2.append(value1.append(value2.append(value1.append(value1[:10876])))))\r\n# 判断\r\nprint(trainvalue.shape)\r\nrow_data = pd.read_excel('/root/xulun/316/20180514/data/testdf.xlsx')\r\ncolumn = ['18S04Ijmb', '18S04Ts', '18S04Ftm', '18S04Fjm',\r\n '18S04F2k', '18S04F4k', '18S04F8k',\r\n '18S04UAm', '18S04UBm', '18S04UCm', '18S04UAa',\r\n '18S04UBa', '18S04UCa', '18S04U2k', '18S04U4k', '18S04U8k',\r\n '18S04V5p', '18S04V12p', '18S04V12n', '18S04Vsy15p',\r\n '18S04Vsy15n', '18S04Vc15p', '18S04Vc15n',\r\n '18S04Itma', '18S04Itmb', '18S04Itmc', '18S04Ijma', '18S04Ijmc',\r\n # '18S04Zkz','18S04Ffz',\r\n '18S04Ncrc', '18S04Tt', '18S04Txt', '18S04Tyt', '18S04Tzt',\r\n '18S04Txj', '18S04Tyj', '18S04Iwk', '18S04Wxt', '18S04Wyt', '18S04Wzt', '18S04Wxj', '18S04Wyj',\r\n '18S04Ix', '18S04Iy','18S04Iz',\r\n '18S04Iy', '18S04Nw', '18S04Nwc', '18S04Ntc', '18S04Tb', '18S04Ny', '18S04N422', '18S04Zkzb0', '18S04Zkzb1',\r\n '18S04Zkzb2', '18S04Zkzb3', '18S04Zkzb4', '18S04Zkzb5', '18S04Zkzb6',\r\n '18S04Zkzb7', '18S04Zkzb8', '18S04Zkzb9', '18S04Zkzb10', '18S04Zkzb11_13',\r\n '18S04Zkzb14', '18S04Ffzb0', '18S04Ffzb1', '18S04Ffzb2', '18S04Ffzb3',\r\n '18S04Ffzb3', '18S04Ffzb3', '18S04Ffzb3', '18S04Ffzb3', '18S04Ncrcb0',\r\n '18S04Ncrcb1', '18S04Ncrcb2', '18S04Ncrcb3', '18S04Ncrcb4', '18S04Ncrcb5',\r\n '18S04Ncrcb6', '18S04Ncrcb7',\r\n '18S02Fztz', '18S02Fjd', '18S02Nsa2', '18S02Fc', '18S02Fztz0', '18S02Fztz1', '18S02Fztz2',\r\n '18S02Fztz3', '18S02Fztz4', '18S02Fztz5', '18S02Fztz6', '18S02Fztz7', '18S02Fztz8', '18S02Fztz9',\r\n '18S02Fztz10', '18S02Fztz11', '18S02Fztz12', '18S02Fztz13', '18S02Fztz14', '18S02Fztz15', '18S02Fc0',\r\n '18S02Fc1', '18S02Fc2', '18S02Fc3', '18S02Fc4', '18S02Fc5', '18S02Fc6', '18S02Fc7', '18S02Fc8', '18S02Fc9',\r\n '18S02Fc10', '18S02Fc11', '18S02Fc12', '18S02Fc13', '18S02Fc14',\r\n 'signal', '17S03Time', '17S03NDA', '17S03SJ', '17S03NP', '17S03Mxsyp', '17S03MZLsyXp',\r\n '17S03Mxsyn', '17S03MZLsyXn', '17S03Mysyp', '17S03MZLsyYp', '17S03Mysyn',\r\n '17S03MZLsyYn', '17S03Mzsyp', '17S03MZLsyZp', '17S03Mzsyn', '17S03MZLsyZn',\r\n '17S03Dypj', '17S03Dypc', '17S03Dxj', '17S03Dxc', '17S03Dyj', '17S03Dyc',\r\n '17S03Dzj', '17S03Dzc', '17S03TDY1', '17S03TDX', '17S03TDZ', '17S03TDY',\r\n '17S03Mxtjp', '17S03MZLtjXp', '17S03Mxtjn', '17S03MZLtjXn', '17S03Mytjp',\r\n '17S03MZLtjYp', '17S03Mytjn', '17S03MZLtjYn', '17S03Mx/ytjp', '17S03Ijjx', '17S03Ijjy'\r\n , '17S03Ijjz', '17S03MZLsyX', '17S03MZLsyY', '17S03MZLsyZ', '17S03MZLtjX', '17S03MZLtjY'\r\n , '17S03MZLsyX(1s)', '17S03MZLsyY(1s)', '17S03MZLsyZ(1s)', '17S03MZLtjX(1s)', '17S03MZLtjY(1s)'\r\n ]\r\ntesterror = row_data[column]\r\nprint('testerro', testerror.shape)\r\nrow_data = pd.read_excel('/root/xulun/316/20180514/data/test+value.xlsx')\r\ncolumn = ['18S04Ijmb', '18S04Ts', '18S04Ftm', '18S04Fjm',\r\n '18S04F2k', '18S04F4k', '18S04F8k',\r\n '18S04UAm', '18S04UBm', '18S04UCm', '18S04UAa',\r\n '18S04UBa', '18S04UCa', '18S04U2k', '18S04U4k', '18S04U8k',\r\n '18S04V5p', '18S04V12p', '18S04V12n', '18S04Vsy15p',\r\n '18S04Vsy15n', '18S04Vc15p', '18S04Vc15n',\r\n '18S04Itma', '18S04Itmb', '18S04Itmc', '18S04Ijma', '18S04Ijmc',\r\n # '18S04Zkz','18S04Ffz',\r\n '18S04Ncrc', '18S04Tt', '18S04Txt', '18S04Tyt', '18S04Tzt',\r\n '18S04Txj', '18S04Tyj', '18S04Iwk', '18S04Wxt', '18S04Wyt', '18S04Wzt', '18S04Wxj', '18S04Wyj',\r\n '18S04Ix', '18S04Iy','18S04Iz',\r\n '18S04Iy', '18S04Nw', '18S04Nwc', '18S04Ntc', '18S04Tb', '18S04Ny', '18S04N422', '18S04Zkzb0', '18S04Zkzb1',\r\n '18S04Zkzb2', '18S04Zkzb3', '18S04Zkzb4', '18S04Zkzb5', '18S04Zkzb6',\r\n '18S04Zkzb7', '18S04Zkzb8', '18S04Zkzb9', '18S04Zkzb10', '18S04Zkzb11_13',\r\n '18S04Zkzb14', '18S04Ffzb0', '18S04Ffzb1', '18S04Ffzb2', '18S04Ffzb3',\r\n '18S04Ffzb3', '18S04Ffzb3', '18S04Ffzb3', '18S04Ffzb3', '18S04Ncrcb0',\r\n '18S04Ncrcb1', '18S04Ncrcb2', '18S04Ncrcb3', '18S04Ncrcb4', '18S04Ncrcb5',\r\n '18S04Ncrcb6', '18S04Ncrcb7',\r\n '18S02Fztz', '18S02Fjd', '18S02Nsa2', '18S02Fc', '18S02Fztz0', '18S02Fztz1', '18S02Fztz2',\r\n '18S02Fztz3', '18S02Fztz4', '18S02Fztz5', '18S02Fztz6', '18S02Fztz7', '18S02Fztz8', '18S02Fztz9',\r\n '18S02Fztz10', '18S02Fztz11', '18S02Fztz12', '18S02Fztz13', '18S02Fztz14', '18S02Fztz15', '18S02Fc0',\r\n '18S02Fc1', '18S02Fc2', '18S02Fc3', '18S02Fc4', '18S02Fc5', '18S02Fc6', '18S02Fc7', '18S02Fc8', '18S02Fc9',\r\n '18S02Fc10', '18S02Fc11', '18S02Fc12', '18S02Fc13', '18S02Fc14',\r\n 'signal', '17S03Time', '17S03NDA', '17S03SJ', '17S03NP', '17S03Mxsyp', '17S03MZLsyXp',\r\n '17S03Mxsyn', '17S03MZLsyXn', '17S03Mysyp', '17S03MZLsyYp', '17S03Mysyn',\r\n '17S03MZLsyYn', '17S03Mzsyp', '17S03MZLsyZp', '17S03Mzsyn', '17S03MZLsyZn',\r\n '17S03Dypj', '17S03Dypc', '17S03Dxj', '17S03Dxc', '17S03Dyj', '17S03Dyc',\r\n '17S03Dzj', '17S03Dzc', '17S03TDY1', '17S03TDX', '17S03TDZ', '17S03TDY',\r\n '17S03Mxtjp', '17S03MZLtjXp', '17S03Mxtjn', '17S03MZLtjXn', '17S03Mytjp',\r\n '17S03MZLtjYp', '17S03Mytjn', '17S03MZLtjYn', '17S03Mx/ytjp', '17S03Ijjx', '17S03Ijjy'\r\n , '17S03Ijjz', '17S03MZLsyX', '17S03MZLsyY', '17S03MZLsyZ', '17S03MZLtjX', '17S03MZLtjY'\r\n , '17S03MZLsyX(1s)', '17S03MZLsyY(1s)', '17S03MZLsyZ(1s)', '17S03MZLtjX(1s)', '17S03MZLtjY(1s)'\r\n ]\r\ntestcommon = row_data[column]\r\nprint('testcommon', testcommon.shape)\r\na = testcommon[44000:]\r\nb = testcommon[:6000]\r\n# testvalue = a.append(testerror.append(b.append(testerror.append(a[:len(a) - 886]))))\r\ntestvalue = a[:4000].append(testerror.append(b[:4000].append(testerror.append(a[:5808]))))\r\nprint('testvalue', testvalue.shape)\r\nnocheck = testvalue\r\nrow_data = pd.read_excel('/root/xulun/316/20180514/data/validdf.xlsx')\r\ncolumn = [ '18S04Ijmb', '18S04Ts', '18S04Ftm', '18S04Fjm',\r\n '18S04F2k', '18S04F4k', '18S04F8k',\r\n '18S04UAm', '18S04UBm', '18S04UCm', '18S04UAa',\r\n '18S04UBa', '18S04UCa', '18S04U2k', '18S04U4k', '18S04U8k',\r\n '18S04V5p', '18S04V12p', '18S04V12n', '18S04Vsy15p',\r\n '18S04Vsy15n', '18S04Vc15p', '18S04Vc15n',\r\n '18S04Itma', '18S04Itmb', '18S04Itmc', '18S04Ijma', '18S04Ijmc',\r\n # '18S04Zkz','18S04Ffz',\r\n '18S04Ncrc', '18S04Tt', '18S04Txt', '18S04Tyt', '18S04Tzt',\r\n '18S04Txj', '18S04Tyj', '18S04Iwk', '18S04Wxt', '18S04Wyt', '18S04Wzt', '18S04Wxj', '18S04Wyj',\r\n '18S04Ix', '18S04Iy','18S04Iz',\r\n '18S04Iy', '18S04Nw', '18S04Nwc', '18S04Ntc', '18S04Tb', '18S04Ny', '18S04N422', '18S04Zkzb0', '18S04Zkzb1',\r\n '18S04Zkzb2', '18S04Zkzb3', '18S04Zkzb4', '18S04Zkzb5', '18S04Zkzb6',\r\n '18S04Zkzb7', '18S04Zkzb8', '18S04Zkzb9', '18S04Zkzb10', '18S04Zkzb11_13',\r\n '18S04Zkzb14', '18S04Ffzb0', '18S04Ffzb1', '18S04Ffzb2', '18S04Ffzb3',\r\n '18S04Ffzb3', '18S04Ffzb3', '18S04Ffzb3', '18S04Ffzb3', '18S04Ncrcb0',\r\n '18S04Ncrcb1', '18S04Ncrcb2', '18S04Ncrcb3', '18S04Ncrcb4', '18S04Ncrcb5',\r\n '18S04Ncrcb6', '18S04Ncrcb7',\r\n '18S02Fztz', '18S02Fjd', '18S02Nsa2', '18S02Fc', '18S02Fztz0', '18S02Fztz1', '18S02Fztz2',\r\n '18S02Fztz3', '18S02Fztz4', '18S02Fztz5', '18S02Fztz6', '18S02Fztz7', '18S02Fztz8', '18S02Fztz9',\r\n '18S02Fztz10', '18S02Fztz11', '18S02Fztz12', '18S02Fztz13', '18S02Fztz14', '18S02Fztz15', '18S02Fc0',\r\n '18S02Fc1', '18S02Fc2', '18S02Fc3', '18S02Fc4', '18S02Fc5', '18S02Fc6', '18S02Fc7', '18S02Fc8', '18S02Fc9',\r\n '18S02Fc10', '18S02Fc11', '18S02Fc12', '18S02Fc13', '18S02Fc14',\r\n 'signal', '17S03Time', '17S03NDA', '17S03SJ', '17S03NP', '17S03Mxsyp', '17S03MZLsyXp',\r\n '17S03Mxsyn', '17S03MZLsyXn', '17S03Mysyp', '17S03MZLsyYp', '17S03Mysyn',\r\n '17S03MZLsyYn', '17S03Mzsyp', '17S03MZLsyZp', '17S03Mzsyn', '17S03MZLsyZn',\r\n '17S03Dypj', '17S03Dypc', '17S03Dxj', '17S03Dxc', '17S03Dyj', '17S03Dyc',\r\n '17S03Dzj', '17S03Dzc', '17S03TDY1', '17S03TDX', '17S03TDZ', '17S03TDY',\r\n '17S03Mxtjp', '17S03MZLtjXp', '17S03Mxtjn', '17S03MZLtjXn', '17S03Mytjp',\r\n '17S03MZLtjYp', '17S03Mytjn', '17S03MZLtjYn', '17S03Mx/ytjp', '17S03Ijjx', '17S03Ijjy'\r\n , '17S03Ijjz', '17S03MZLsyX', '17S03MZLsyY', '17S03MZLsyZ', '17S03MZLtjX', '17S03MZLtjY'\r\n , '17S03MZLsyX(1s)', '17S03MZLsyY(1s)', '17S03MZLsyZ(1s)', '17S03MZLtjX(1s)', '17S03MZLtjY(1s)'\r\n ]\r\nvaliderror = row_data[column]\r\n\r\n# validvalue = b.append(validerror.append(a.append(validerror.append(b[:len(b) - 884]))))\r\nvalidvalue = b[:4000].append(testerror.append(a[:4000].append(testerror.append(b[:5808]))))\r\nprint('这是规模', 'values6=values2:', validvalue.shape, 'values4', testvalue.shape, trainvalue.shape)\r\n# 80%故障加10w正常 10%故障+2000正常测试\r\n###df1训练 df2测试\r\n\r\naverage = trainvalue['18S04Ijmb'].mean()\r\nprint(\"average = \", average)\r\n\r\n#############################################################################\r\nnew_trainvalue = pd.DataFrame()\r\nnew_testvalue = pd.DataFrame()\r\nnew_validvalue = pd.DataFrame()\r\nfor t in timestep:\r\n #t = i * T\r\n print( 't=', t)\r\n best_weights_filepath = '/root/xulun/316/20180514/data/2best_weights%0.2f.hdf5' % (t)\r\n\r\n # 取整\r\n # if ((len(trainvalue) % t) != 0):\r\n # new_trainvalue = trainvalue[:int(len(trainvalue) / t) * t]\r\n # else:\r\n # new_trainvalue = trainvalue\r\n # if ((len(testvalue) % t) != 0):\r\n # new_testvalue = testvalue[:int(len(testvalue) / t) * t]\r\n # else:\r\n # new_testvalue = testvalue\r\n # if ((len(validvalue) % t) != 0):\r\n # new_valid = validvalue[:int(len(validvalue) / t) * t]\r\n # else:\r\n # new_valid = validvalue\r\n\r\n new_trainvalue = trainvalue\r\n new_testvalue = testvalue\r\n new_validvalue = validvalue\r\n print('shape check')\r\n print('train shape', new_trainvalue.shape)\r\n print('test shape', new_testvalue.shape)\r\n print('valid shape', new_validvalue.shape)\r\n print(new_trainvalue['18S04Ijmb'].min(), new_trainvalue['18S04Ijmb'].max())\r\n ymin = new_testvalue['18S04Ijmb'].min()\r\n ymax = new_testvalue['18S04Ijmb'].max()\r\n copytestvalue = testvalue\r\n scaler = MinMaxScaler(feature_range=(0, 1))\r\n scaled1 = scaler.fit_transform(new_trainvalue)\r\n # reframed = series_to_supervised(scaled, 1, 1)\r\n new_trainvalue = scaled1\r\n new_testvalue = scaler.fit_transform(new_testvalue)\r\n new_valid = scaler.fit_transform(new_validvalue)\r\n\r\n\r\n train_data =[]\r\n test_data = []\r\n valid_data = []\r\n for i in range(len(new_trainvalue)-t):\r\n train_data.append(new_trainvalue[i:i+t,:160])\r\n train_X, train_y = train_data, new_trainvalue[t:,0]\r\n for i in range(len(new_testvalue)-t):\r\n test_data.append(new_testvalue[i:i+t,:160])\r\n test_X1, test_y1 = test_data, new_testvalue[t:,0]\r\n for i in range(len(new_valid)-t):\r\n valid_data.append(new_valid[i:i+t,:160])\r\n valid_X, valid_y = valid_data, new_valid[t:,0]\r\n #train_X, train_y = new_trainvalue[:len(new_trainvalue) - t, :160], new_trainvalue[t:, 0]\r\n print('this is train_x')\r\n print(len(train_X))\r\n print('this is train_y')\r\n print(len(train_y))\r\n # test_X1, test_y1 = new_testvalue[:len(new_testvalue) - t, :160], new_testvalue[t:, 0]\r\n # valid_X, valid_y = new_valid[:len(new_valid) - t, :160], new_valid[t:, 0]\r\n #print('x', test_X1.shape, valid_X.shape, train_X.shape, 'y', test_y1.shape, valid_y.shape, train_y.shape)\r\n train_X = np.array(train_X)\r\n train_y = np.array(train_y)\r\n test_X1 = np.array(test_X1)\r\n test_y1= np.array(test_y1)\r\n valid_X = np.array(valid_X)\r\n valid_y = np.array(valid_y)\r\n print('trainx shape',train_X.shape,train_y.shape)\r\n # test_X2, test_y2 = values5[68000:68700, 6:], values5[68000 + t : 68700 + t, 0]\r\n # no_X1,no_y1 = nocheck[:len(nocheck) - t, :],nocheck[t:,0]\r\n # reshape input to be 3D [samples, timesteps, features]\r\n #train_X = train_X.reshape((int(train_X.shape[0] / t), t, 160))\r\n train_X = train_X.reshape(train_X.shape[0], t, 160)\r\n #train_y = train_y.reshape(train_y.shape[0],train_y.shape[1], 1)\r\n valid_X = valid_X.reshape(valid_X.shape[0], t, 160)\r\n #test_y1 = test_y1.reshape( test_y1.shape[0], test_y1.shape[1], 1)\r\n test_X1 = test_X1.reshape(test_X1.shape[0] , t, 160)\r\n #valid_y = valid_y.reshape(valid_y.shape[0], valid_y.shape[1], 1)\r\n #train_y = picky(train_y, t)\r\n #test_y1 = picky(test_y1, t)\r\n #valid_y = picky(valid_y, t)\r\n # train_y = train_y.reshape((int(train_y.shape[0] / t), t, 1))\r\n # valid_y = valid_y.reshape((int(valid_y.shape[0] / t), t, 1))\r\n # test_y1 = test_y1.reshape((int(test_y1.shape[0] / t), t, 1))\r\n #print('x', test_X1.shape, valid_X.shape, train_X.shape ,'y',test_y1.shape , valid_y.shape, train_y.shape)\r\n\r\n\r\n ###################################################################每个epoch输出一个auc\r\n\r\n class RocAucMetricCallback(keras.callbacks.Callback):\r\n i=0\r\n def __init__(self, validation_data):\r\n self.x_val, self.y_val = validation_data\r\n self.i = 0\r\n\r\n\r\n def on_epoch_begin(self, epoch, logs={}):\r\n # 添加roc_auc_val属性\r\n self.starttime = time.time()\r\n\r\n if not ('roc_auc_val' in self.params['metrics']):\r\n self.params['metrics'].append('roc_auc_val')\r\n if not ('costtime' in self.params['metrics']):\r\n self.params['metrics'].append('costtime')\r\n if not ('accuracy_score_val' in self.params['metrics']):\r\n self.params['metrics'].append('accuracy_score_val')\r\n return self.starttime\r\n\r\n def on_epoch_end(self, epoch, logs={}):\r\n #starttime = self.on_epoch_begin(epoch)\r\n self.i = self.i +1\r\n self.nowtime = time.time()\r\n self.costtime = self.nowtime - self.starttime\r\n\r\n\r\n y_pre = model.predict(self.x_val)\r\n l = y_pre - self.y_val\r\n logs['accuracy_score_val'] = float('-inf')\r\n logs['roc_auc_val'] = float('-inf')\r\n logs['loss'] = float('-inf')\r\n\r\n if (self.validation_data):\r\n m = []\r\n bb = []\r\n tt = []\r\n '''\r\n for k in self.y_val:\r\n if ((265.5 - 93) * k + 93) >= 160:\r\n bb.append([1])\r\n else:\r\n bb.append([0])\r\n m = np.array(bb)\r\n logs['roc_auc_val'] = roc_auc_score(m, y_pre.flatten())\r\n '''\r\n\r\n\r\n #file.write(str(self.i)+' '+ str(self.costtime) + '\\r\\n')\r\n #costtime.to_excel('/root/xulun/second/costtime%0.2f.xlsx' % (t+i/100), index_label=['1'], index=True)\r\n logs['costtime'] = self.costtime\r\n m.append(self.costtime)\r\n print('time:{costtime}'.format(costtime=logs.get('costtime')))\r\n\r\n\r\n my = RocAucMetricCallback(validation_data=(valid_X, valid_y))\r\n\r\n earlyStopping = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=20, verbose=1, mode='min')\r\n saveBestModel = ModelCheckpoint(best_weights_filepath, monitor='val_loss', verbose=0, save_best_only=True,\r\n mode='min', period=1)\r\n callbacks_list = [my,earlyStopping, saveBestModel]\r\n #####导入模型\r\n print('导入模型')\r\n model = Sequential()\r\n model.add(LSTM(256, input_shape=(t, train_X.shape[2]), return_sequences=False,dropout=0.5)) # 神经元数修改\r\n #model.add(LSTM(128, input_shape=(t, train_X.shape[2])))\r\n model.add(Dense(1))\r\n\r\n #sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\r\n model.compile(loss='mse', optimizer='adam')\r\n print('开始训练')\r\n # fit network\r\n begintime = time.time()\r\n #file = open('/root/xulun/316/20180514/data/costtime%0.2f.txt' % (t ), 'w')\r\n start_time = time.time()\r\n history = model.fit(train_X, train_y, epochs=1,batch_size=512, validation_data=(valid_X, valid_y), verbose=2,\r\n shuffle=False, callbacks=callbacks_list) # 训练周期和批量\r\n #file.close()\r\n end_time = time.time()\r\n print('训练时间为:',end_time - start_time)\r\n file = open('/root/xulun/316/20180514/data/2loss%0.2f.txt' % (t ), 'w')\r\n file.write('loss:' + str(history.history['loss']) + ' ' + 'valloss:' + str(history.history['val_loss']))\r\n file.close()\r\n endtime = time.time()\r\n time1 = []\r\n time1.append(endtime - begintime)\r\n for i in model.layers:\r\n print('模型参数为:',i.name,i.count_params())\r\n alltime = pd.DataFrame()\r\n alltime = alltime.append(time1)\r\n alltime.to_excel('/root/xulun/316/20180514/data/2traintime%0.2f.xlsx' % (t), index_label=['1'], index=True)\r\n ########################################故障预测\r\n # print(\"lstm\", lstm)\r\n print(\"over\")\r\n #model.load_weights(best_weights_filepath)\r\n test_start_time = time.time()\r\n predict_y1 = model.predict(test_X1)\r\n # predict_y2 = model.predict(no_X1)\r\n test_end_time = time.time()\r\n test_time = test_end_time - test_start_time\r\n print('测试时间为:',test_time )\r\n test_time = pd.DataFrame([test_time])\r\n test_time.to_excel('/root/xulun/316/20180514/data/2testtime%0.2f.xlsx' % (t), index_label=['1'], index=True)\r\n # print(predict_y1.shape)\r\n # y =np.array(predict_y1)\r\n # #y= y.reshape(len(y),t)\r\n # print(y.shape)\r\n # y = pd.DataFrame(y)\r\n # # predict_y2 = pd.DataFrame(predict_y2)\r\n # #y.columns = ['y','0']\r\n # y.to_excel('/root/xulun/316/20180514/data/predict_y%0.2f.xlsx' % (t ), index_label=['1'], index=True)\r\n # # predict_y2.columns = ['y']\r\n # # predict_y2.to_excel('/root/xulun/predict_y2 %0.2f.xlsx' % (t + i / 100), index_label=['1'], index=True)\r\n # print(\"存储成功!\")\r\n\r\n c1 = []\r\n c2 = []\r\n d1 = []\r\n d2 = []\r\n # 103.5x+162\r\n # 172.5x+93\r\n np.set_printoptions(threshold=np.inf)\r\n #test_y1 = np.array(test_y1)\r\n #predict_y1 = np.array(predict_y1)\r\n\r\n # d2 = (265.5 - 93) * predict_y2 + 93\r\n #print('this is 故障预测1', predict_y1[0])\r\n # y = (265.5 - 93) * trainvalue['18S04Ijmb'] + 93\r\n predict_y1 = np.array(predict_y1)\r\n #predict_y1 = predict_y1.reshape(len(predict_y1), t)\r\n print('predict_y1.shape',predict_y1.shape)\r\n # predict_y1 = pd.DataFrame(predict_y1)\r\n # predict_y1 = np.array(predict_y1)\r\n test_y1 = np.array(test_y1)\r\n #test_y1 = test_y1.reshape(len(test_y1), t)\r\n test_y1 = pd.DataFrame(test_y1)\r\n c1 = (ymax - ymin) * test_y1 + ymin\r\n # c2 = (265.5 - 93) * no_y1 + 93\r\n # print('this is 故障实际1', test_y1)\r\n d1 = (ymax - ymin) * predict_y1 + ymin\r\n\r\n train_y = np.array(train_y)\r\n #train_y = train_y.reshape(len(train_y),t)\r\n #train_y = pd.DataFrame(train_y)\r\n\r\n # test_y1.to_excel('/root/xulun/316/20180514/data/test_y%0.2f.xlsx' % (t), index_label=['1'], index=True)\r\n test_y1 = np.array(test_y1)\r\n # train_y = np.array(train_y)\r\n\r\n error1 = []\r\n maerror1 = []\r\n\r\n for m in range(len(test_y1) - 1):\r\n error1.append(test_y1[m] - predict_y1[m])\r\n maerror1.append(abs(test_y1[m] - predict_y1[m]))\r\n squaredError1 = []\r\n for val in error1:\r\n squaredError1.append(val * val) # target-prediction之差平方\r\n rmse = sqrt(sum(squaredError1) / len(squaredError1))\r\n mae = (sum(maerror1) / len(maerror1))\r\n print(\"RMSE1 = \", rmse) # 均方根误差RMSE\r\n print(\"MAE1=\", mae)\r\n\r\n # file = open('/root/xulun/316/20180514/data/mae and rmse%0.2f.txt' % (t), 'w')\r\n # file.write('rmse:' +str(rmse) + ' ' +'mae:'+str(mae))\r\n # file.close()\r\n # # print(\"第 %f 轮已经结束\" % (t + i 100))\r\n # #########################################正常预测\r\n # # print('emergence data predict end')\r\n # plt.plot(train_y[:], label='train label')\r\n # plt.legend()\r\n # plt.savefig('/root/xulun/316/20180514/data/data%0.2f.png' % (t ))\r\n # plt.show()\r\n # plt.plot(d1[:], label='predict')\r\n # plt.plot(c1[:], label='actual')\r\n # plt.legend(loc=\"lower left\")\r\n # plt.legend()\r\n # plt.savefig('/root/xulun/316/20180514/data/predictbig%0.2f.png' % (t ))\r\n # plt.show()\r\n # plt.plot(predict_y1, label='predict')\r\n # plt.plot(test_y1, label='label')\r\n # plt.legend(loc=\"lower left\")\r\n # plt.legend()\r\n # plt.savefig('/root/xulun/316/20180514/data/predictsmall%0.2f.png' % (t ))\r\n # plt.show()\r\n #\r\n # plt.plot(history.history['loss'], '--m', label='train', )\r\n # plt.plot(history.history['val_loss'], ':b', label='test')\r\n # plt.legend()\r\n # plt.savefig('/root/xulun/316/20180514/data/loss%0.2f.png' % (t ))\r\n # plt.show()\r\n # print('all data predict end')\r\n '''\r\n b2 = []\r\n for k in c1:\r\n if k >= 160:\r\n b2.append([1])\r\n else:\r\n b2.append([0])\r\n t1 = np.array(b2)\r\n auc1 = roc_auc_score(t1, predict_y1)\r\n print(\"使用这个模型预测的准确率为\", auc1)\r\n fpr, tpr, _ = metrics.roc_curve(t1, predict_y1)\r\n plt.subplots(figsize=(7, 5.5))\r\n plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % auc1)\r\n plt.plot([0, 1], [0, 1], color='navy', lw=2)\r\n plt.xlim([0.0, 1.0])\r\n plt.ylim([0.0, 1.05])\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n plt.title('ROC Curve')\r\n plt.legend(loc=\"lower right\")\r\n plt.legend()\r\n plt.savefig('/root/xulun/roc %0.2f.png' % (t + i / 100))\r\n plt.show()\r\n auc1 = roc_auc_score(t1, predict_y1)\r\n print(\"使用这个模型预测的准确率为\", auc1)\r\n\r\n '''\r\n print(\"第 %f 轮已经结束\" % (t ))\r\n\r\n###############roc曲线\r\n\r\n############0\r\n###全部特征,只使用dropout和l2优化解决过拟合,之前代码不全了\r\n############8\r\n\r\n","sub_path":"fucku2.py","file_name":"fucku2.py","file_ext":"py","file_size_in_byte":31006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"192037843","text":"from .forms import ContactForm\nfrom django.views.generic.edit import FormView\nfrom django.urls import reverse_lazy\nfrom django.core.mail import EmailMessage, send_mail\n\n\nclass ContactView(FormView):\n template_name = 'contact/contact.html'\n form_class = ContactForm\n\n def form_valid(self, form):\n # This method is called when valid form data has been POSTed.\n # It should return an HttpResponse.\n\n message = '{name} / {email} said: \\n{subject}\\n\\n{content}'.format(\n name=form.cleaned_data.get('name'),\n email=form.cleaned_data.get('email'),\n subject=form.cleaned_data.get('subject'),\n content=form.cleaned_data.get('content') \n )\n\n email = EmailMessage(\n form.cleaned_data.get('subject'),\n message,\n 'WebFormContactSantMarys@gmail.com',\n ['reddevil_cero@hotmail.com'],\n headers={'Reply-To': form.cleaned_data.get('email')}\n )\n email.send()\n return super(ContactView, self).form_valid(form)\n\n def get_success_url(self):\n return reverse_lazy('contact')+'?ok'\n","sub_path":"contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"499684170","text":"\n############################################################\n# Parameters file for the main script\n# @bayronportilla-2016\n\n\n############################################################\n# Bulk properties of the system\n\nname = 'Gliese581d' # Name of the planet\nM_s = 0.31 # Stellar mass, in solar masses \nM_p = 7.10 # Planetary mass, in earth masses \nR = 1.70 # Planetary radius, in earth radius\nBmAC = 5.0e-5 # Triaxiality, dimensionless\nrigidity = 8.0e10 # Unrelaxed rigidity, in Pascals\ntau_M = 50.0 # Maxwell time, in years\nalpha = 0.2 # Andrade's exponent, dimensionless \ne = 0.27 # Orbital eccentricity, dimensionless\na = 0.218 # Semimajor axis, in astronomical units\nP = 67.0 # Orbital period, in days\nE0 = 0.0 # Initial eccentric anomaly, in degress\n\n\n\n############################################################\n# Integration parameters\n\nt_ini = 0.0 # Starting time for simulation, in years\nt_end = 5.0 # Ending time of the simulation, in years\nN = 100.0 # (Default) Number of lines to write in the output file \n\n\n\n############################################################\n# Initial conditions\n\ntheta_ini = 100.0 # Initial sidereal angle, in degrees\np = 2.51 # Initial resonance order, p = Omega_ini/n \na_ini = a # Initial semimajor axis, in astronomical units\ne_ini = e # Initial orbital eccentricity, dimensionless\n","sub_path":"fast/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"53857115","text":"#\n# Rare Decay Lines\n#\n# DiMuon stream\n\nfrom GaudiKernel.SystemOfUnits import *\n\nBd2KstarMuMu = {\n 'BUILDERTYPE' : 'StrippingBdToKstarMuMuConf',\n 'CONFIG' : { \n 'UseNoPIDsHadrons' : True,\n 'Prescale_BdToKstarMuMu' : 1.0,\n 'Postscale_BdToKstarMuMu' : 1.0,\n 'Prescale_BdToKstarMuMuSS' : 1.0,\n 'Postscale_BdToKstarMuMuSS' : 1.0,\n 'Prescale_BuToKMuMu' : 1.0,\n 'Postscale_BuToKMuMu' : 1.0,\n 'Prescale_BuToKMuMuSS' : 1.0,\n 'Postscale_BuToKMuMuSS' : 1.0,\n 'B_Comb_MassLow' : 4600.0,\n 'B_Comb_MassHigh' : 6000.0,\n 'B_MassLow' : 4850.0,\n 'B_MassHigh' : 5780.0,\n 'B_VertexCHI2' : 6.0,\n 'B_IPCHI2' : 16.0,\n 'B_DIRA' : 0.9999,\n 'B_FlightCHI2' : 121.0,\n 'B_Dau_MaxIPCHI2' : 9.0, \n 'Dau_VertexCHI2' : 12.0,\n 'Dau_DIRA' : -0.9,\n 'Kstar_Comb_MassLow' : 550.0,\n 'Kstar_Comb_MassHigh' : 2200.0,\n 'Kstar_MassLow' : 600.0,\n 'Kstar_MassHigh' : 2000.0,\n 'Kstar_MinIPCHI2' : 0.0,\n 'Kstar_FlightChi2' : 9.0, \n 'Kstar_Dau_MaxIPCHI2' : 9.0, \n 'Dimu_FlightChi2' : 9.0, \n 'Dimu_Dau_MaxIPCHI2' : 9.0, \n 'Track_CHI2nDOF' : 5.0,\n 'Hadron_MinIPCHI2' : 9.0, \n 'Muon_MinIPCHI2' : 9.0,\n 'Muon_IsMuon' : False,\n 'MuonNoPIDs_PIDmu' : 0.0\n },\n 'WGs' : [ 'RD' ] ,\n 'STREAMS' : [ 'Dimuon' ]\n }\n\n\nB2XMuMu = {\n 'BUILDERTYPE' : 'B2XMuMuConf',\n 'CONFIG' : {\n 'BVXCHI2NDOF' : 8 # dimensionless\n , 'BIPCHI2' : 9.0 # dimensionless\n , 'BDIRA' : 0.999968 # dimensionless\n , 'BFDCHI2' : 100.0 # dimensionless\n , 'KpiMINIPCHI2' : 9.0 # dimensionless\n , 'KpiTRACKCHI2' : 4.0 # dimensionless \n , 'KpiVXCHI2NDOF' : 9.0 # dimensionless\n , 'MuonMINIPCHI2' : 16.0 # dimensionless\n , 'MuonTRACKCHI2' : 4.0 # dimensionless\n , 'MuonPID' : 0.0 # dimensionless\n , 'DimuonVXCHI2NDOF' : 9.0 # dimensionless\n , 'DimuonUPPERMASS' : 5050.0 # MeV\n , 'Pi0MINPT' : 800.0 # MeV\n , 'DplusLOWERMASS' : 1600.0 # MeV\n , 'DplusUPPERMASS' : 2300.0 # MeV \n , 'KstarplusWINDOW' : 300.0 # MeV \n , 'KsWINDOW' : 30.0 # MeV \n , 'LambdaWINDOW' : 30.0 # MeV \n , 'LongLivedPT' : 250.0 # MeV \n , 'LongLivedTau' : 2 # ps \n , 'K1_Comb_MassLow' : 720.0\n , 'K1_Comb_MassHigh' : 2450.0\n , 'K1_MassLow' : 750.0\n , 'K1_MassHigh' : 2400.0\n , 'K1_MinIPCHI2' : 4.0\n , 'K1_FlightChi2' : 25.0\n , 'K1_Dau_MaxIPCHI2' : 9.0,\n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Dimuon' ]\n }\n\n# Bs2MuMuPhi\n# P. Schaak\nBs2MuMuPhi = {\n 'BUILDERTYPE' : 'Bs2MuMuPhiConf' ,\n 'CONFIG' : {\n 'BsIPCHI2' : 9.0 # dimensionless\n , 'BsLT' : 0.0002 # ns\n , 'BsVertexCHI2' : 40.0 # dimensionless\n , 'KaonPIDK' : 0 # dimensionless\n , 'KaonMINIPCHI2' : 9.0 # dimensionless\n , 'MuonMINIPCHI2' : 9.0 # dimensionless\n },\n 'WGs' : [ 'RD' ] ,\n 'STREAMS' : [ 'Dimuon' ]\n }\n\n\n# Same-sign searches\n# S. Redford & Wenbin\n\nB2XMuMuSS = {\n 'BUILDERTYPE' : 'B2XMuMuSSConf',\n 'CONFIG' : {\n 'MuonP' : 3000. , #MeV\n 'MuonPT' : 500. , #MeV\n 'MuonMINIPCHI2' : 5 , #adminensional\n 'PionP' : 2000. , #MeV\n 'PionPT' : 500. , #MeV\n 'PionMINIPCHI2' : 5 , #adminensional\n 'KaonP' : 2000. , #MeV\n 'KaonPT' : 500. , #MeV\n 'KaonMINIPCHI2' : 5 , #adminensional\n 'DimuonMass' : 0. , #MeV\n 'BVCHI2DOF' : 7 , #adminensional \n 'BDIRA' : 0.9998 , #adimensional\n 'BIPCHI2' : 30 , #adimensional\n 'BMassWin' : 400. , #MeV, mass window\n 'B2PiMuMuOSLinePrescale' : 1 ,\n 'B2PiMuMuOSLinePostscale' : 1 ,\n 'B2PiMuMuSSLinePrescale' : 1 ,\n 'B2PiMuMuSSLinePostscale' : 1 ,\n 'B2KMuMuOSLinePrescale' : 1 ,\n 'B2KMuMuOSLinePostscale' : 1 ,\n 'B2KMuMuSSLinePrescale' : 1 ,\n 'B2KMuMuSSLinePostscale' : 1\n },\n 'WGs' : [ 'RD' ] ,\n 'STREAMS' : [ 'Dimuon' ]\n }\n\n# HyperCP lines\n# Vanya + Andrei\n\nHyperCP = {\n 'BUILDERTYPE' : 'StrippingHyperCPXConf',\n 'CONFIG' : {\n 'ProtonCuts' : ' ( TRCHI2DOF < 5 ) & ( 0 < PIDp - PIDpi ) & ( BPVIPCHI2() > 12 ) ' ,\n 'MuonCuts' : ' ( TRCHI2DOF < 5 ) & ISMUON & ( BPVIPCHI2() > 12 ) ' , \n 'PionCuts' : ' ( TRCHI2DOF < 5 ) & ( BPVIPCHI2() > 12 ) ' ,\n 'MuonCuts_forTau23Mu' : ' ( PT > 300 * MeV ) & ( TRCHI2DOF < 5 ) & ISMUON & ( BPVIPCHI2() > 9 ) ' , \n 'PionCuts_forTau23Mu' : ' ( PT > 300 * MeV ) & ( TRCHI2DOF < 5 ) & ( BPVIPCHI2() > 9 ) ' ,\n #\n 'SigmaCTau' : 5 * mm ,\n 'SigmaMass' : 250 * MeV ,\n #\n 'DsCTau' : 200 * micrometer ,\n 'Ds23PiMass' : 80 * MeV ,\n 'Ds2PhiPiMass' : 250 * MeV,\n #\n 'DplusCTau' : 200 * micrometer ,\n 'DplusMass' : 250 * MeV ,\n #\n # ``Global Event Cuts''\n #\n 'PrimaryVertices' : True ,\n #\n # Technicalities:\n #\n 'Preambulo' : [\n # shortcut for chi2 of vertex fit\n 'chi2vx = VFASPF(VCHI2) ' ,\n # shortcut for the c*tau\n \"from GaudiKernel.PhysicalConstants import c_light\" ,\n ## use the embedded cut for chi2(LifetimeFit)<9\n \"ctau = BPVLTIME ( 9 ) * c_light \" ,\n \"ctau_forDs = BPVLTIME ( 225 ) * c_light \" ,\n ## phi(1020) mass-window\n \"phi = in_range ( 920 * MeV , AM23 , 1120 * MeV )\"\n ] ,\n #\n # Prescales\n #\n 'SigmaPrescale' : 1.0 ,\n 'DplusPrescale' : 1.0 ,\n 'DsPrescale' : 1.0 ,\n 'Ds3PiPrescale' : 0.2\n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Dimuon' ]\n }\n\n# Same-sign searches\n# Wenbin\n\nB2SameChargeMuon = {\n 'BUILDERTYPE' : 'StrippingB2SameChargeMuonConf',\n 'CONFIG' : {\n 'LinePrescale' : 1. ,\n 'LinePostscale' : 1.\n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Dimuon' ]\n }\n\n\n# Bu2LLK ( B+ -> ee K and mu mu K )\n# P. Koppenburg\n\nBu2LLK = {\n 'BUILDERTYPE' : 'Bu2LLKConf',\n 'CONFIG' : {\n 'BFlightCHI2' : 100 # adimentional\n , 'BDIRA' : 0.9995 # adimentional TIGHTENED\n , 'BIPCHI2' : 25 # adimentional \n , 'BVertexCHI2' : 16 # adimentional\n , 'DiLeptonPT' : 0 # MeV (not used)\n , 'DiLeptonFDCHI2' : 16 # adimentional\n , 'DiLeptonIPCHI2' : 9 # adimentional\n , 'LeptonIPCHI2' : 16 # adimentional TIGHTENED\n , 'LeptonPT' : 800 # MeV \n , 'KaonIPCHI2' : 16 # adimentional TIGHTENED\n , 'KaonPT' : 800 # MeV LOOSENED\n , 'UpperMass' : 5500 # MeV (Higher bound of signal box)\n , 'Bu2eeKLinePrescale' : 1\n , 'Bu2eeKLinePostscale' : 1\n , 'Bu2mmKLinePrescale' : 1\n , 'Bu2mmKLinePostscale' : 1\n },\n 'WGs' : [ 'RD' ] ,\n 'STREAMS' : ['Dimuon']\n #{ 'Dimuon' : [ 'StrippingBu2LLK_mmLine' ] , 'Dielectron' : [ 'StrippingBu2LLK_eeLine' ] }\n }\n\n# B2MuMuMuMuLines\n# J. Albrecht\n\nB2MuMuMuMuLines = {\n 'BUILDERTYPE' : 'B2MuMuMuMuLinesConf',\n 'CONFIG' : {\n 'B2MuMuMuMuLinePrescale' : 1,\n 'B2MuMuMuMuLinePostscale' : 1,\n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Dimuon' ]\n }\n\n\n# Stripping TriMuons\n# Vanya\n\nTriMuon = {\n 'BUILDERTYPE' : 'StrippingTriMuonsConf',\n 'CONFIG' : {\n 'GoodMuons' : \" ( PT > 300 * MeV ) & ( TRCHI2DOF < 5 ) & ( BPVIPCHI2 () > 6 ) \" ,\n 'GoodMuonsForBc' : \" ( BPVIPCHI2 () > 9 ) \" ,\n 'TightMuons' : \" ( PT > 1.9 * GeV ) & ( BPVIPCHI2 () > 25 ) \" ,\n #\n # Trigger\n #\n 'HLT' : None , \n #\n # Prescale\n #\n '3mu-Prescale' : 1.00 ,\n 'Bc-Prescale' : 1.00 ,\n 'Tau-Prescale' : 1.00 ,\n #\n # Technicalities:\n #\n 'Preambulo' : [\n ## shortcut for chi2 of vertex fit\n 'chi2vx = VFASPF(VCHI2) ' ,\n ## shortcut for the c*tau\n \"from GaudiKernel.PhysicalConstants import c_light\" ,\n \"ctau = BPVLTIME ( ) * c_light \" ,\n \"ctauBc = PDGM('B_c+') / M * BPVLTIME ( ) * c_light \" \n ]\n },\n 'WGs' : [ 'RD' ] ,\n 'STREAMS' : [ 'Dimuon' ]\n }\n\n\n\n# Lines for phi mu mu / f0 mu mu\n# Liming Zhang\n\nBs2PhiMuMu = {\n 'BUILDERTYPE' : 'Bs2PhiMuMuLinesConf',\n 'WGs' : ['RD'],\n 'STREAMS' : ['Dimuon'],\n 'CONFIG' : {\n \"MINIPCHI2\" : 4.00 # adimensiional\n ,\"TRCHI2\" : 10.0 # adimensiional\n ,\"KaonPIDK\" : 1e-10 # adimensiional\n ,\"PhiPT\" : 100 # MeV\n ,\"MuonMINIPCHI2\" : 2.25 # adimensiional\n ,\"MuonPIDmu\" : -5.0 # adimensiional\n ,\"MuonTRCHI2\" : 10.0 # adimensiional\n ,\"BsMassWin\" : 250.0 # MeV\n ,\"BsVCHI2DOF\" : 8.0 # adimensiional\n ,\"BsDIRA\" : 0.9993 # adimensiional\n ,\"BsFDCHI2\" : 25.0 # adimensiional\n ,\"PionPIDK\" : 10.0 # adimensiional\n ,\"f0MassWin\" : 200.0 # MeV\n ,\"VCHI2\" : 10.0 # adimensiional\n ,\"BsIPCHI2\" : 36.0 # adimensiional\n ,\"DocaChi2Max\" : 20 #mm\n }\n }\n\n\nBs2MuMuLines = {\n 'BUILDERTYPE' : 'Bs2MuMuLinesConf',\n 'CONFIG' : {\n 'DefaultLinePrescale' : 1,\n 'DefaultLinePostscale' : 1,\n 'Bs2mmWideLinePrescale' : 1,\n 'Bs2mmWideLinePostscale' : 1,\n 'LooseLinePrescale' : 0.02,\n 'LooseLinePostscale' : 1,\n 'BuPrescale' : 1,\n 'BuPostscale' : 1,\n 'BsPrescale' : 1,\n 'BsPostscale' : 1,\n 'BdPrescale' : 1,\n 'BdPostscale' : 1,\n 'JPsiLinePrescale' : 1,\n 'JPsiLinePostscale' : 1,\n 'JPsiLooseLinePrescale' : 0.1,\n 'JPsiLooseLinePostscale' : 1,\n 'JPsiPromptLinePrescale' : 0.005,\n 'JPsiPromptLinePostscale': 1,\n 'MuIPChi2_loose' : 9,\n 'MuTrChi2_loose' : 10,\n 'BIPChi2_loose' : 64,\n 'BFDChi2_loose' : 100\n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Dimuon' ]\n }\n\n# Diego + Xabier\n\nKS02MuMu = {\n 'BUILDERTYPE' : 'K0s2MuMuLinesConf',\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Dimuon' ],\n 'CONFIG' : {\n 'NoMuIDLinePrescale' : 1e-03,\n 'NoMuIDLinePostscale' : 1,\n 'K0s2mmLinePrescale' : 1,\n 'K0s2mmLinePostscale' : 1\n }\n}\n\nTau2PMuMu = {\n 'BUILDERTYPE' : 'StrippingTau2PMuMuConf',\n 'STREAMS' : ['Dimuon'],\n 'WGs' : [ 'RD' ] ,\n 'CONFIG' : {\n #\n # Selection of basic muons and protons\n #\n 'GoodMuons' : \" ( PT > 300 * MeV ) & ( TRCHI2DOF < 3 ) & ( PIDmu > -5 ) & ( (PIDmu - PIDK) > 0 )\" ,\n 'GoodProtons' : \" ( PT > 300 * MeV ) & ( TRCHI2DOF < 3 )\" ,\n #\n # Prescale\n #\n 'pmumu-Prescale' : 1.00 ,\n #\n # Technicalities:\n #\n 'Preambulo' : [\n ## shortcut for chi2 of vertex fit\n 'chi2vx = VFASPF(VCHI2) ' ,\n ## shortcut for the c*tau\n \"from GaudiKernel.PhysicalConstants import c_light\" ,\n \"ctau = BPVLTIME ( ) * c_light \" \n ]\n #\n }\n }\n\nTau23Mu = {\n 'BUILDERTYPE' : 'Tau23MuLinesConf',\n 'STREAMS' : ['Dimuon'],\n 'WGs' : ['RD'],\n 'CONFIG' : {\n 'TauPrescale' :1,\n 'TauPostscale' :1,\n 'Ds23PiTISPrescale' :0.01,\n 'Ds23PiTISPostscale' :1,\n 'Ds23PiPrescale' :0.005,\n 'Ds23PiPostscale' :1,\n 'Ds2PhiPiPrescale' :1,\n 'Ds2PhiPiPostscale' :1, \n 'Tau25Prescale' :1,\n 'Tau25Postscale' :1 \n }\n }\n\n\nLFVLines = {\n 'BUILDERTYPE' : 'LFVLinesConf' ,\n 'STREAMS' : [ 'Dimuon' ],\n 'WGs' : [ 'RD' ],\n 'CONFIG' : {\n 'Postscale' : 1,\n 'TauPrescale' : 1,\n 'Tau2MuMuePrescale' : 1,\n 'B2eMuPrescale' : 1,\n 'B2eePrescale' : 1,\n 'B2heMuPrescale' : 1 \n }\n }\n\n# J. Albrecht\n# Searches for highly displaced dimuons\nVeryDetachedJpsi = {\n 'BUILDERTYPE' : 'VDetJPsiLinesConf',\n 'STREAMS' : ['Dimuon'],\n 'WGs' : ['RD'],\n 'CONFIG' : {\n 'VDetJPsiLinePrescale' : 1,\n 'VDetJPsiLinePostscale' : 1,\n }\n }\n\n\nInflaton2MuMu = {\n 'BUILDERTYPE' : 'StrippingInflaton2MuMuConf' ,\n 'STREAMS' : [ 'Dimuon' ],\n 'WGs' : [ 'RD' ] ,\n 'CONFIG' : {\n 'Inflaton2MuMuLongPrescale' : 1,\n 'Inflaton2MuMuDownstreamPrescale' : 1,\n 'Inflaton2MuMuLongPostscale' : 1,\n 'Inflaton2MuMuDownstreamPostscale' : 1,\n 'Bu2InflatonKPrescale' : 1,\n 'Bu2InflatonKPostscale' : 1,\n 'Bs2InflatonPhiPrescale' : 1,\n 'Bs2InflatonPhiPostscale' : 1,\n 'Bd2InflatonKstPrescale' : 1,\n 'Bd2InflatonKstPostscale' : 1\n }\n }\n\n\nBuToK1MuMu = {\n 'BUILDERTYPE' : 'StrippingBuToK1MuMuConf' ,\n 'STREAMS' : [ 'Dimuon' ],\n 'WGs' : [ 'RD' ],\n 'CONFIG' : {\n 'UseNoPIDsHadrons' : True,\n 'Prescale_BuToK1MuMu' : 1.0,\n 'Postscale_BuToK1MuMu' : 1.0,\n 'Prescale_BuToK1MuMuSS' : 1.0,\n 'Postscale_BuToK1MuMuSS' : 1.0,\n 'B_Comb_MassLow' : 4600.0,\n 'B_Comb_MassHigh' : 6000.0,\n 'B_MassLow' : 4850.0,\n 'B_MassHigh' : 5780.0,\n 'B_VertexCHI2' : 6.0,\n 'B_IPCHI2' : 16.0,\n 'B_DIRA' : 0.014,\n 'B_FlightCHI2' : 121.0,\n 'B_Dau_MaxIPCHI2' : 9.0,\n 'Dau_VertexCHI2' : 12.0,\n 'Dau_DIRA' : -0.9,\n 'K1_Comb_MassLow' : 720.0,\n 'K1_Comb_MassHigh' : 2450.0,\n 'K1_MassLow' : 750.0,\n 'K1_MassHigh' : 2400.0,\n 'K1_MinIPCHI2' : 4.0,\n 'K1_FlightChi2' : 25.0,\n 'K1_Dau_MaxIPCHI2' : 9.0,\n 'Dimu_FlightChi2' : 81.0,\n 'Dimu_Dau_MaxIPCHI2' : 9.0,\n 'Track_CHI2nDOF' : 5.0,\n 'Hadron_MinIPCHI2' : 9.0,\n 'Muon_MinIPCHI2' : 9.0,\n 'Muon_IsMuon' : False,\n 'MuonNoPIDs_PIDmu' : 0.0\n }\n }\n\n\nZ02TauTauProng = {\n 'BUILDERTYPE' : 'Z02TauTauProngConf',\n 'CONFIG' : {\n 'Z2TauTau_Prong_LinePrescale' : 1.0,\n 'Z2TauTau_Prong_LinePostscale' : 1.0,\n 'TAU_MASS_LOW' : '600.0', # MeV/c2\n 'TAU_MASS_HIGH' : '1600.0', # MeV/c2\n 'Z_MASS_LOW' : '45000.0', # MeV/c2\n 'PT_HAD_MIN' : '1000', # MeV/c\n 'TRACKCHI2_HAD_MAX' : '10', # dl\n 'PT_TAU_MIN' : '8000', # MeV/c\n 'VCHI2_TAU_MAX' : '20' # dl\n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'EW' ]\n }\n\n# includes Bs -> mu mu gamma\n\nB2MuMuX = {\n 'BUILDERTYPE' : 'B2MuMuXConf' ,\n 'STREAMS' : [ 'Dimuon' ],\n 'WGs' : [ 'RD' ] ,\n 'CONFIG' : {\n 'MuonsLoose': {\n 'ISMUON' : True,\n 'TRCHI2DOF_MAX' : 5,\n 'MIPCHI2DV_MIN' : 4,\n 'InAccMuon' : True\n },\n 'MuonsNoPid': {\n 'ISMUON' : False,\n 'TRCHI2DOF_MAX' : 5,\n 'MIPCHI2DV_MIN' : 4,\n 'InAccMuon' : True\n },\n 'DiMuons': {\n 'AMAXDOCA_MAX' : '0.5*mm',\n 'ASUMPT_MIN' : '1000*MeV',\n 'VCHI2DOF_MAX' : 16,\n 'BPVVDCHI2_MIN' : 16,\n 'BPVDIRA_MIN' : 0.0\n },\n 'DiMuonsLowM': {\n 'AMAXDOCA_MAX' : '0.5*mm',\n 'ASUMPT_MIN' : '1000*MeV',\n 'VCHI2DOF_MAX' : 16,\n 'BPVVDCHI2_MIN' : 16,\n 'BPVDIRA_MIN' : 0.0\n },\n 'DiMuonsHighM': {\n 'AMAXDOCA_MAX' : '0.5*mm',\n 'ASUMPT_MIN' : '1000*MeV',\n 'VCHI2DOF_MAX' : 16,\n 'BPVVDCHI2_MIN' : 0,\n 'BPVDIRA_MIN' : 0\n },\n 'DiMuonsCorrM': {\n 'AMAXDOCA_MAX' : '0.3*mm',\n 'ASUMPT_MIN' : '2000*MeV',\n 'VCHI2DOF_MAX' : 8,\n 'BPVVDCHI2_MIN' : 225,\n 'BPVIPCHI2_MAX' : 17,\n 'BPVDIRA_MIN' : 0.4,\n 'CORRM_MIN' : '4800*MeV',\n 'CORRM_MAX' : '6500*MeV',\n 'PTMU' : '900*MeV'\n },\n 'Photons': {\n 'PT_MIN' : '1300*MeV'\n },\n \"V0s\": { # Cuts for rho, K*, phi\n 'MASS_MIN' : {'KST':'700*MeV','RHO':'600*MeV','PHI':'900*MeV'},\n 'MASS_MAX' : {'KST':'1100*MeV','RHO':'1000*MeV','PHI':'1100*MeV'},\n 'DAUGHTERS' : {'PT_MIN':'100*MeV','P_MIN':'2000*MeV',\n 'MIPCHI2DV_MIN' : 4, 'TRCHI2DOF_MAX' : 4},\n 'AMAXDOCA_MAX' : '0.5*mm',\n 'VCHI2DOF_MAX' : 16,\n 'BPVVDCHI2_MIN' : 16,\n 'BPVDIRA_MIN' : 0,\n 'ASUMPT_MIN' : '1000*MeV'\n },\n \"B2X3BODY\" : {\n 'SUMPT_MIN' : '5000*MeV',\n 'VCHI2DOF_MAX' : 10,\n 'BPVIPCHI2_MAX' : 20,\n 'BPVVDCHI2_MIN' : 25,\n 'BPVDIRA_MIN' : 0.4, #0.0\n 'MASS_MIN' : {'B':'4300*MeV'},\n 'MASS_MAX' : {'B':'6400*MeV'},\n },\n \"B2X3BODYLOWM\" : {\n 'SUMPT_MIN' : '3900*MeV',\n 'VCHI2DOF_MAX' : 10,\n 'BPVIPCHI2_MAX' : 25,\n 'BPVVDCHI2_MIN' : 25,\n 'BPVDIRA_MIN' : 0.4, #0.0\n 'MASS_MIN' : {'J':'2600*MeV'},\n 'MASS_MAX' : {'J':'3600*MeV'},\n },\n \"B2X3BODYHIGHM\" : {\n 'SUMPT_MIN' : '5000*MeV',\n 'VCHI2DOF_MAX' : 10,\n 'BPVDIRA_MIN' : 0.0,\n 'MASS_MIN' : {'Y':'9000*MeV'},\n 'MASS_MAX' : {'Y':'10000*MeV'},\n },\n \"B2X4BODY\" : {\n 'SUMPT_MIN' : '5000*MeV',\n 'VCHI2DOF_MAX' : 10,\n 'BPVIPCHI2_MAX' : 25,\n 'BPVVDCHI2_MIN' : 25,\n 'BPVDIRA_MIN' : 0.0,\n 'MASS_MIN' : {'B':'4300*MeV'},\n 'MASS_MAX' : {'B':'6300*MeV'}\n },\n \"Prescales\" : {\n 'OS' : 1.0,\n 'SS' : 0.5\n },\n 'GECNTrkMax' : 500}\n }\n\n \n\n\n\nBd2MuMuKstarBDT = {\n 'BUILDERTYPE' : 'Bd2MuMuKstarBDTConf',\n 'CONFIG' : {\n 'LinePrescale' : 1. ,\n 'LinePostscale' : 1. ,\n #\n 'ElectronPT' : 200. , # MeV\n 'ElectronTrackCHI2pNDOF' : 5. ,\n 'ElectronIPCHI2' : 1. ,\n 'ElectronPIDepi' : -5. , \n #\n 'eeVertexCHI2' : 16. , \n 'eeMinMass' : 20. , # MeV \n 'eeMaxMass' : 5200. , # MeV\n #\n 'KaonPT' : 400. , # MeV \n 'KaonP' : 3000. , # MeV \n 'KaonTrackCHI2pNDOF' : 5. , \n 'KaonIPCHI2' : 4. , \n 'KaonPIDKpi' : -5. , \n #\n 'PionPT' : 250. , # MeV\n 'PionP' : 2000. , # MeV \n 'PionTrackCHI2pNDOF' : 5. , \n 'PionIPCHI2' : 4. , \n 'PionPIDpiK' : 10. , # PIDpi-PIDK>-10, i.e., PIDK<10 \n #\n 'KstarVertexCHI2' : 16. , \n 'KstarMassW' : 150. , # MeV\n #\n 'BComMassW' : 1200. , # MeV\n 'BVertexCHI2' : 16. , # /ndf\n 'BMassW' : 1000. , # MeV \n 'BDIRA' : 0.999,\n 'BDTCutValue' : -0.98 ,\n 'BDTWeightsFile' : '$TMVAWEIGHTSROOT/data/Bd2eeKstar_BDTG_v1r0.xml'\n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Dimuon' ]\n }\n\nBd2MuMuKPiBDT = {\n 'BUILDERTYPE' : 'Bd2MuMuKstarBDTConf',\n 'CONFIG' : {\n 'LinePrescale' : 1. ,\n 'LinePostscale' : 1. ,\n #\n 'ElectronPT' : 200. , # MeV\n 'ElectronTrackCHI2pNDOF' : 5. ,\n 'ElectronIPCHI2' : 1. ,\n 'ElectronPIDepi' : -5. , \n #\n 'eeVertexCHI2' : 16. , \n 'eeMinMass' : 20. , # MeV \n 'eeMaxMass' : 5200. , # MeV\n #\n 'KaonPT' : 400. , # MeV \n 'KaonP' : 3000. , # MeV \n 'KaonTrackCHI2pNDOF' : 5. , \n 'KaonIPCHI2' : 4. , \n 'KaonPIDKpi' : -5. , \n #\n 'PionPT' : 250. , # MeV\n 'PionP' : 2000. , # MeV \n 'PionTrackCHI2pNDOF' : 5. , \n 'PionIPCHI2' : 4. , \n 'PionPIDpiK' : 10. , # PIDpi-PIDK>-10, i.e., PIDK<10 \n #\n 'KstarVertexCHI2' : 16. , \n 'KstarMassW' : 1400. , # MeV\n #\n 'BComMassW' : 550. , # MeV\n 'BVertexCHI2' : 16. , # /ndf\n 'BMassW' : 500. , # MeV \n 'BDIRA' : 0.999,\n 'BDTCutValue' : -0.97 ,\n 'BDTWeightsFile' : '$TMVAWEIGHTSROOT/data/Bd2eeKstar_BDTG_v1r0.xml'\n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Dimuon' ]\n }\n\n\n#\n# Rare Decay Lines\n# \n# Hadronic lines\n\n# N. Serra, Vava\n\nB2XTau = {\n 'BUILDERTYPE' : 'B2XTauConf',\n 'STREAMS' : ['Bhadron'],\n 'WGs' : ['RD'] ,\n 'CONFIG' : {\n 'PT_HAD_ALL_FINAL_STATE' : '200', # MeV\n 'P_HAD_ALL_FINAL_STATE' : '2000', # MeV\n 'IPCHI2_HAD_ALL_FINAL_STATE' : '9', # dimensionless\n 'TRACKCHI2_HAD_ALL_FINAL_STATE' : '4', # dimensionless\n #\n 'PT_MU' : '800', # MeV\n 'P_MU' : '6000', # MeV \n 'IPCHI2_MU' : '16', # MeV \n #\n 'PT_B_TT' : '5000', # MeV\n 'PT_B_TT_HIGH' : '10000', # MeV \n 'PT_B_TM' : '2000', # MeV\n 'PT_B_TM_HIGH' : '7500', # MeV \n 'VCHI2_B' : '100', # dimensionless\n 'FDCHI2_B' : '144', # dimensionless\n 'DIRA_B' : '0.99', # dimensionless\n 'MASS_LOW_B' : '2000', # MeV \n 'MASS_HIGH_B' : '5750', # MeV\n 'MCOR_LOW_B' : '4000', # MeV\n 'MCOR_HIGH_B' : '7000', # MeV\n 'MIPCHI2_B' : '150', # dimensionless \n 'MIPCHI2_B_HIGH' : '36', # dimensionless \n #\n 'PT_TAU' : '1500', # MeV\n 'VCHI2_TAU' : '20', # dimensionless\n 'IPCHI2_TAU' : '9', # dimensionless\n 'FDCHI2_TAU' : '144', # dimensionless\n 'MASS_LOW_TAU' : '700', # MeV\n 'MASS_HIGH_TAU' : '1800', # MeV\n #\n 'PT_B_CHILD_BEST' : '1800', # MeV\n 'P_B_CHILD_BEST' : '10000',# MeV\n 'IPCHI2_B_CHILD_BEST' : '16', # dimensionless\n 'PT_B_TAU_CHILD_BEST' : '3000', # MeV\n 'IPCHI2_B_TAU_CHILD_BEST' : '16', # dimensionless\n #\n 'MASS_LOW_D' : '1800', # MeV\n 'MASS_HIGH_D' : '2030', # MeV \n #\n 'B2TauTau_TOSLinePrescale' : 1,\n 'B2TauTau_TOSLinePostscale' : 1,\n 'B2DD_TOSLinePrescale' : 1,\n 'B2DD_TOSLinePostscale' : 1,\n 'B2TauMu_TOSLinePrescale' : 1,\n 'B2TauMu_TOSLinePostscale' : 1,\n 'B2DMu_TOSLinePrescale' : 0.2,\n 'B2DMu_TOSLinePostscale' : 1,\n 'B2TauTau_TISLinePrescale' : 1,\n 'B2TauTau_TISLinePostscale' : 1,\n 'B2DD_TISLinePrescale' : 1,\n 'B2DD_TISLinePostscale' : 1,\n 'B2TauMu_TISLinePrescale' : 1,\n 'B2TauMu_TISLinePostscale' : 1,\n 'B2DMu_TISLinePrescale' : 0.2,\n 'B2DMu_TISLinePostscale' : 1.\n }\n }\n\n# \n# Rare Decay Lines \n#\n# Radiative stream\n\n#\n# b -> X gamma \n\nBeauty2XGamma = {\n 'BUILDERTYPE' : 'Beauty2XGamma',\n 'CONFIG' : {\n # Cuts made on all charged input particles in all lines\n \"ALL\" : { 'TRCHI2DOF_MAX' : 4,\n 'PT_MIN' : '250*MeV',\n 'P_MIN' : '1000*MeV',\n 'MIPCHI2DV_MIN' : 4 },\n # Cuts made on the photon\n \"GAMMA\" : { 'PT_MIN' : '2500*MeV',\n 'CL_MIN' : 0.25\n },\n # Cuts made on all K shorts\n \"KS0\" : { 'PT_MIN' : '250*MeV',\n 'BPVVDCHI2_MIN' : 36,\n 'MM_MIN' : '467.*MeV',\n 'MM_MAX' : '527.*MeV' },\n # Cuts made on all pi0's\n \"Pi0\" : { 'PT_MIN' : '1200*MeV',\n 'P_MIN' : '10000*MeV',\n 'CHILDCL1_MIN' : 0.25,\n 'CHILDCL2_MIN' : 0.25 },\n # Cuts made on all B's and Lb's used in all lines\n \"B2X\" : { 'SUMPT_MIN' : '4000*MeV',\n 'VCHI2DOF_MAX' : 15,\n 'BPVIPCHI2_MAX' : 15,\n 'BPVLTIME_MIN' : '0.2*ps',\n 'BPVDIRA_MIN' : 0.9998,\n 'AM_MIN' : '4000*MeV',\n 'AM_MAX' : '7000*MeV',\n 'B2CBBDT_MIN' : 0.00\n },\n # Cuts for rho, K*, phi, omega\n \"HH\": { 'MASS_WINDOW' : {'KST':'150*MeV',\n 'RHO':'250*MeV',\n 'PHI':'15*MeV',\n 'OMEGA':'30*MeV'},\n 'DAUGHTERS' : {'PT_MIN':'100*MeV','P_MIN':'2000*MeV'},\n 'LAMBDADAUGHTERS' : {'PT_MIN':'300*MeV','P_MIN':'2500*MeV'},\n 'AMAXDOCA_MAX' : '0.5*mm',\n 'VCHI2DOF_MAX' : 16,\n 'BPVVDCHI2_MIN' : 16,\n 'BPVDIRA_MIN' : 0.0,\n 'ASUMPT_MIN' : '1000*MeV',\n 'pP_MIN' : '10000*MeV' # for pH only (obviously)\n },\n # Cuts for PiPiPi0 for omega decay\n # \"HHH\": { 'MASS_WINDOW' : {'OMEGA': '30*MeV'},\n # 'KDAUGHTERS' : {'PT_MIN':'100*MeV',\n # 'P_MIN':'2000*MeV',\n # 'PIDK_MIN':'-5'},\n # 'PiDAUGHTERS' : {'PT_MIN':'100*MeV',\n # 'P_MIN':'2000*MeV',\n # 'PIDK_MAX':'10'},\n # 'pDAUGHTERS' : {'PT_MIN':'100*MeV',\n # 'P_MIN':'2000*MeV',\n # 'PIDp_MIN':'-5'},\n # 'AMAXDOCA_MAX' : '0.40*mm',\n # 'VCHI2DOF_MAX' : 8,\n # 'BPVVDCHI2_MIN' : 16, \n # 'BPVDIRA_MIN' : 0.98,\n # 'ASUMPT_MIN' : '1250*MeV',\n # 'MIPCHI2DV_MIN' : 0.0,\n # 'BPVVDRHO_MIN' : '0.1*mm',\n # 'BPVVDZ_MIN' : '2.0*mm',\n # 'PTMIN1' : '300*MeV'},\n # PID cuts\n \"PID\" : { 'P' : {'PIDp_MIN' : 0},\n 'PI' : {'PIDK_MAX' : 20},\n 'K' : {'PIDK_MIN' : -10} },\n \"Prescales\" : {},\n \"GECNTrkMax\" : 500}, \n 'WGs' : ['RD'],\n 'STREAMS' : [ 'Radiative' ] \n }\n\n\n#\n# Dielectron\n\nBd2JpsieeKstarBDT = {\n 'BUILDERTYPE' : 'Bd2eeKstarBDTConf',\n 'CONFIG' : {\n 'LinePrescale' : 1. ,\n 'LinePostscale' : 1. ,\n #\n 'ElectronPT' : 200. , # MeV\n 'ElectronTrackCHI2pNDOF' : 5. ,\n 'ElectronIPCHI2' : 1. ,\n 'ElectronPIDepi' : -2. , \n #\n 'eeVertexCHI2' : 16. , \n 'eeMinMass' : 2200. , # MeV \n 'eeMaxMass' : 4200. , # MeV\n #\n 'KaonPT' : 400. , # MeV \n 'KaonP' : 3000. , # MeV \n 'KaonTrackCHI2pNDOF' : 5. , \n 'KaonIPCHI2' : 4. , \n 'KaonPIDKpi' : -5. , \n #\n 'PionPT' : 250. , # MeV\n 'PionP' : 2000. , # MeV \n 'PionTrackCHI2pNDOF' : 5. , \n 'PionIPCHI2' : 4. , \n 'PionPIDpiK' : 10. , # PIDpi-PIDK > -5, i.e., PIDK<5 \n #\n 'KstarVertexCHI2' : 16. , \n 'KstarMassW' : 150. , # MeV\n #\n 'BComMassW' : 1200. , # MeV\n 'BVertexCHI2' : 16. , # /ndf\n 'BMassW' : 1000. , # MeV \n 'BDIRA' : 0.999,\n 'BDTCutValue' : -0.98 ,\n 'BDTWeightsFile' : '$TMVAWEIGHTSROOT/data/Bd2eeKstar_BDTG_v1r0.xml' \n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Radiative' ]\n }\n\n\n\nBd2JpsieeKstar = {\n 'BUILDERTYPE' : 'Bd2eeKstarConf',\n 'CONFIG' : {\n 'LinePrescale' : 1. ,\n 'LinePostscale' : 1. ,\n #\n 'ElectronPT' : 300. , # MeV\n 'ElectronTrackCHI2pNDOF' : 5. ,\n 'ElectronIPCHI2' : 2.25 ,\n 'ElectronPIDepi' : -2. , \n #\n 'eeVertexCHI2' : 16. , \n 'eeMinMass' : 2200. , # MeV \n 'eeMaxMass' : 4200. , # MeV\n 'eeFD' : 1. , # mm\n #\n 'KaonPT' : 400. , # MeV \n 'KaonP' : 3000. , # MeV \n 'KaonTrackCHI2pNDOF' : 5. , \n 'KaonIPCHI2' : 4. , \n 'KaonPIDKpi' : -5. , \n #\n 'PionPT' : 300. , # MeV\n 'PionP' : 3000. , # MeV \n 'PionTrackCHI2pNDOF' : 5. , \n 'PionIPCHI2' : 4. , \n 'PionPIDpiK' : 10. , # PIDpi-PIDK > -5, i.e., PIDK<5 \n #\n 'KstarVertexCHI2' : 16. , \n 'KstarMassW' : 130. , # MeV\n 'KstarIPCHI2' : 1. , \n 'KstarFDCHI2' : 1. , \n #\n 'BComMassW' : 1200. , # MeV\n 'BVertexCHI2' : 9. , # /ndf\n 'BMassW' : 1000. , # MeV \n 'BIPCHI2' : 64. , # pointing\n 'BFDCHI2' : 9. , \n 'BDIRA' : 0.999, \n 'SumIPSCut' : \" & (SUMTREE(((ABSID=='K+') | (ABSID=='pi-') | (ID=='e+') | (ID=='e-')),sqrt(BPVIPCHI2()))>15)\" \n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Radiative' ]\n }\n\n\nBd2eeKstarBDT = {\n 'BUILDERTYPE' : 'Bd2eeKstarBDTConf',\n 'CONFIG' : {\n 'LinePrescale' : 1. ,\n 'LinePostscale' : 1. ,\n #\n 'ElectronPT' : 200. , # MeV\n 'ElectronTrackCHI2pNDOF' : 5. ,\n 'ElectronIPCHI2' : 1. ,\n 'ElectronPIDepi' : -2. , \n #\n 'eeVertexCHI2' : 16. , \n 'eeMinMass' : 20. , # MeV \n 'eeMaxMass' : 1500. , # MeV\n #\n 'KaonPT' : 400. , # MeV \n 'KaonP' : 3000. , # MeV \n 'KaonTrackCHI2pNDOF' : 5. , \n 'KaonIPCHI2' : 4. , \n 'KaonPIDKpi' : -5. , \n #\n 'PionPT' : 250. , # MeV\n 'PionP' : 2000. , # MeV \n 'PionTrackCHI2pNDOF' : 5. , \n 'PionIPCHI2' : 4. , \n 'PionPIDpiK' : 10. , # PIDpi-PIDK > -5, i.e., PIDK<5 \n #\n 'KstarVertexCHI2' : 16. , \n 'KstarMassW' : 150. , # MeV\n #\n 'BComMassW' : 1200. , # MeV\n 'BVertexCHI2' : 16. , # /ndf\n 'BMassW' : 1000. , # MeV \n 'BDIRA' : 0.999,\n 'BDTCutValue' : -0.98 ,\n 'BDTWeightsFile' : '$TMVAWEIGHTSROOT/data/Bd2eeKstar_BDTG_v1r0.xml' \n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Radiative' ]\n }\n\nBd2eeKstar = {\n 'BUILDERTYPE' : 'Bd2eeKstarConf',\n 'CONFIG' : {\n 'LinePrescale' : 1. ,\n 'LinePostscale' : 1. ,\n #\n 'ElectronPT' : 300. , # MeV\n 'ElectronTrackCHI2pNDOF' : 5. ,\n 'ElectronIPCHI2' : 2.25 ,\n 'ElectronPIDepi' : -2. , \n #\n 'eeVertexCHI2' : 16. , \n 'eeMinMass' : 20. , # MeV \n 'eeMaxMass' : 1500. , # MeV\n 'eeFD' : 1. , # mm\n #\n 'KaonPT' : 400. , # MeV \n 'KaonP' : 3000. , # MeV \n 'KaonTrackCHI2pNDOF' : 5. , \n 'KaonIPCHI2' : 4. , \n 'KaonPIDKpi' : -5. , \n #\n 'PionPT' : 300. , # MeV\n 'PionP' : 3000. , # MeV \n 'PionTrackCHI2pNDOF' : 5. , \n 'PionIPCHI2' : 4. , \n 'PionPIDpiK' : 10. , # PIDpi-PIDK > -5, i.e., PIDK<5 \n #\n 'KstarVertexCHI2' : 16. , \n 'KstarMassW' : 130. , # MeV\n 'KstarIPCHI2' : 1. , \n 'KstarFDCHI2' : 1. , \n #\n 'BComMassW' : 1200. , # MeV\n 'BVertexCHI2' : 9. , # /ndf\n 'BMassW' : 1000. , # MeV \n 'BIPCHI2' : 64. , # pointing\n 'BFDCHI2' : 9. , \n 'BDIRA' : 0.999, \n 'SumIPSCut' : \" & (SUMTREE(((ABSID=='K+') | (ABSID=='pi-') | (ID=='e+') | (ID=='e-')),sqrt(BPVIPCHI2()))>15)\" \n },\n 'WGs' : [ 'RD' ],\n 'STREAMS' : [ 'Radiative' ]\n }\n","sub_path":"Stripping/Phys/StrippingSettings/python/StrippingSettings/PreStripping18/LineConfigDictionaries_RD.py","file_name":"LineConfigDictionaries_RD.py","file_ext":"py","file_size_in_byte":35278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"604800930","text":"import os\n\nimport pandas as pd\nimport numpy as np\n\n# import sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify, render_template, request\nfrom flask_sqlalchemy import SQLAlchemy\n\n\napp = Flask(__name__)\n\n\n#################################################\n# Database Setup\n#################################################\n\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///db/femadata.sqlite\"\ndb = SQLAlchemy(app)\n# engine = create_engine(\"sqlite:///data/femadata.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\n# Base.prepare(db.engine, reflect=True)\nBase.prepare(db.engine, reflect=True)\n\n# Save references to each table\n# Samples_Metadata = Base.classes.sample_metadata\n# Samples = Base.classes.samples\nAll_Events = Base.classes.all_events\nEvent_Deaths = Base.classes.events_deaths\nNmbr_Events = Base.classes.nmbr_events\nState_Abbrv = Base.classes.state_abbrv\n# print(Nmbr_Events)\n# print(db.session.query(Nmbr_Events.STATE).all())\n\n@app.route(\"/\")\ndef index():\n print(\"This should print in the console\")\n \"\"\"Return the homepage.\"\"\"\n return render_template(\"index.html\")\n\n@app.route(\"/api/disasters/\", methods=['GET'])\ndef disasters():\n # x = Base.classes.nmbr_events\n luState = request.args.get('state')\n\n print(db.session.query(Nmbr_Events.STATE).all())\n sel = [Nmbr_Events.STATE, Nmbr_Events.NMBR_EVENTS]\n # session = Session(engine)\n if not luState: # - an empty param luState will evaluate to True\n results = db.session.query(*sel).all() #.order_by(Nmbr_Events.STATE.desc()).all()\n else:\n # The must be a state to match on.\n results = db.session.query(*sel).filter(Nmbr_Events.STATE == luState).all()\n\n\n # session.close()\n\n print(results)\n\n all_results = []\n for s, e in results:\n results_dict = {}\n results_dict[\"state\"] = s\n results_dict[\"number_events\"] = e\n all_results.append(results_dict)\n \n return jsonify(all_results)\n\n\n \n\n@app.route(\"/pieinfo\" , methods=['GET'])\ndef pieinfo():\n #SELECT EVENT_TYPE, count(EVENT_TYPE) as NBR_EVENT FROM all_events\n #GROUP BY EVENT_TYPE;\n\n # Get the passed in state\n luState = request.args.get(\"state\")\n\n sel = [\n All_Events.EVENT_TYPE\n ]\n \n if not luState: # evaluates to true if luState is empty\n print('pieinfo: Is Empty')\n ttleventcounts = db.session.query(*sel, func.count(All_Events.EVENT_TYPE)).group_by(All_Events.EVENT_TYPE).all()\n else:\n print('pieinfo: NOT Empty')\n ttleventcounts = db.session.query(*sel, func.count(All_Events.EVENT_TYPE)).filter(All_Events.STATE == luState).group_by(All_Events.EVENT_TYPE).all()\n\n\n\n print(ttleventcounts)\n pieinfo = []\n for e, c in ttleventcounts:\n pie_dict = {}\n pie_dict[\"EVENT_TYPE\"] = e\n pie_dict[\"NBR_EVENT\"] = c\n pieinfo.append(pie_dict)\n \n return jsonify(pieinfo)\n\n@app.route(\"/lineinfo\", methods=['GET'])\ndef lineinfo():\n #SELECT YEAR, EVENT_TYPE, (SUM(DEATHS_DIRECT) + sum(DEATHS_INDIRECT)) as DEATHS FROM all_events\n #GROUP BY YEAR, EVENT_TYPE;\n\n \n luState = request.args.get('state')\n\n\n sel = [\n All_Events.YEAR,\n All_Events.EVENT_TYPE\n ]\n\n if not luState: # - an empty param luState will evaluate to True\n deathinfo = db.session.query(*sel, (func.sum(All_Events.DEATHS_DIRECT) + func.sum(All_Events.DEATHS_INDIRECT)).label(\"DEATHS\")).group_by(All_Events.YEAR, All_Events.EVENT_TYPE)\n else:\n deathinfo = db.session.query(*sel, (func.sum(All_Events.DEATHS_DIRECT) + func.sum(All_Events.DEATHS_INDIRECT)).label(\"DEATHS\")).filter(All_Events.STATE == luState).group_by(All_Events.YEAR, All_Events.EVENT_TYPE)\n\n print(deathinfo)\n\n lineinfo = []\n for year, t, d in deathinfo:\n deaths_dict = {}\n deaths_dict[\"YEAR\"] = year\n deaths_dict[\"EVENT_TYPE\"] = t\n deaths_dict[\"DEATHS\"] = d\n lineinfo.append(deaths_dict)\n\n # return jsonify(lineinfo)\n return jsonify(lineinfo)\n \n@app.route(\"/jnb1\")\ndef jnb1():\n print(\"This should return NOAA_Data.html\")\n \"\"\"Return the homepage.\"\"\"\n return render_template(\"NOAA_Data.html\")\n \n@app.route(\"/jnb2\")\ndef jnb2():\n print(\"This should return noaa_limited_load.html\")\n \"\"\"Return the homepage.\"\"\"\n return render_template(\"noaa_limited_load.html\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"NewProjectFiles/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"362362432","text":"def KiemTra(string):\n for i in range(0, len(string)):\n for j in range(i+1,len(string)):\n if int(string[j]) < int(string[i]):\n return 0\n return -1\n#=======================================\ndef main():\n string = input('Nhập vào một dãy số: ')\n if KiemTra(string) == 0:\n print('Dãy số không tăng dần')\n else:\n print('Dãy số tăng dần')\n#=======================================\nif __name__ == '__main__':\n main()\n\n","sub_path":"Bai60.py","file_name":"Bai60.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"325439885","text":"import numpy as np\nimport random\n\n\n# Find homography which map @x2 to @x1.\n#\n# Inputs:\n# @x1: point set 1. Point format: [y, x, 1]\n# @x2: point set 2. Point format: [y, x, 1]\n#\n# Return: Fundamental matrix from @x2 to @x1.\ndef GetFundamental(x1, x2):\n # (x1,x2 : n*3 arrays) using the 8 point algorithm\n n = x1.shape[0]\n if x2.shape[0] != n:\n raise ValueError(\"Number of points don't match.\")\n \n # build matrix for equations\n A = np.zeros((n,9))\n for i in range(n):\n A[i] = [x1[i,0]*x2[i,0], x1[i,0]*x2[i,1], x1[i,0]*x2[i,2],\n x1[i,1]*x2[i,0], x1[i,1]*x2[i,1], x1[i,1]*x2[i,2],\n x1[i,2]*x2[i,0], x1[i,2]*x2[i,1], x1[i,2]*x2[i,2] ]\n \n # compute linear least square solution\n U, S, Vt = np.linalg.svd(A)\n F = Vt[-1].reshape(3,3)\n \n # constrain F\n # make rank 2 by zeroing out last singular value\n U, S, Vt = np.linalg.svd(F)\n S[2] = 0\n F = np.dot(U, np.dot(np.diag(S), Vt))\n \n return F / F[2,2]\n\n# Implement RANSAC.\n#\n# Inputs:\n# @p1: point set 1.\n# @p2: point set 2.\n#\n# Return: Best fit fundamental matrix from @p2 to @p1.\ndef RANSAC(p1, p2):\n assert isinstance(p1, np.ndarray) and isinstance(p2, np.ndarray)\n assert p1.shape[0] == p2.shape[0]\n\n # Sample @n_samples pairs in each iteration.\n n_samples = int(p1.shape[0] * 0.1)\n # Total @n_iters iterations.\n outlier_ratio = 0.05\n n_iters = int(np.log(1 - 0.99) / np.log(1 - (1-outlier_ratio)**n_samples))\n inlier_threshold = 1e-3\n best_Fundamental = None\n best_inlier_ratio = 0.0\n best_inlier_idx = None\n for _ in range(n_iters):\n # Get sample pairs.\n rand_idx = random.sample(range(0, p1.shape[0]), n_samples)\n tmp_p1, tmp_p2 = p1[rand_idx], p2[rand_idx]\n\n # Get Fundamental\n F = GetFundamental(tmp_p1, tmp_p2)\n\n # Compute |x^T F x|^2 for all correspondences \n error = (np.diag(p1 @ F @ p2.T)) ** 2\n # Use square error.\n # error = np.sqrt(np.sum((p1 - map_p2) ** 2, axis=1))\n # Calculate inlier ratio according to the threshold.\n inlier_idx = np.where(error < inlier_threshold)\n inlier_num = len(inlier_idx[0])\n inlier_ratio = inlier_num / p1.shape[0]\n if inlier_ratio >= best_inlier_ratio:\n best_inlier_ratio = inlier_ratio\n best_Fundamental = F\n best_inlier_idx = inlier_idx\n return best_Fundamental, best_inlier_idx","sub_path":"HW04/ransac_F.py","file_name":"ransac_F.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"209538731","text":"# -*- encoding: utf-8 -*-\nimport pytest\nfrom django.urls.base import reverse\nimport xml.etree.ElementTree as ET\n\nfrom bpp.models import Rekord\n\n\n@pytest.mark.django_db\ndef test_proper_content_type(client):\n url = reverse(\"bpp:oai\")\n url += \"/oai-pmh-repository.xml?verb=Identify\"\n res = client.get(url)\n assert \"xml\" in res[\"Content-type\"]\n\n\ndef test_identify(wydawnictwo_ciagle, client):\n identify = reverse(\"bpp:oai\") + \"?verb=Identify\"\n res = client.get(identify)\n assert res.status_code == 200\n\n\n@pytest.fixture\ndef ksiazka(wydawnictwo_zwarte, ksiazka_polska) -> \"Wydawnictwo_Zwarte\":\n wydawnictwo_zwarte.charakter_formalny = ksiazka_polska\n wydawnictwo_zwarte.save()\n return wydawnictwo_zwarte\n\n\n@pytest.fixture\ndef artykul(wydawnictwo_ciagle, artykul_w_czasopismie):\n wydawnictwo_ciagle.charakter_formalny = artykul_w_czasopismie\n wydawnictwo_ciagle.save()\n return wydawnictwo_ciagle\n\n\ndef toXML(response):\n return ET.fromstring(response.content.decode(\"utf-8\"))\n\n\ndef test_listRecords(ksiazka, client):\n listRecords = reverse(\"bpp:oai\") + \"?verb=ListRecords&metadataPrefix=oai_dc\"\n res = client.get(listRecords)\n\n responseXml = ET.fromstring(res.content.decode(\"utf-8\"))\n assert \"Tytul Wydawnictwo\" in toXML(res)[2][0][1][0][1].text\n\n\ndef test_listRecords_status_korekty(\n ksiazka, client, uczelnia, przed_korekta, po_korekcie\n):\n uczelnia.ukryj_status_korekty_set.create(status_korekty=przed_korekta)\n\n ksiazka.status_korekty = przed_korekta\n ksiazka.save()\n\n listRecords = reverse(\"bpp:oai\") + \"?verb=ListRecords&metadataPrefix=oai_dc\"\n res = toXML(client.get(listRecords))\n\n with pytest.raises(IndexError):\n assert \"Tytul Wydawnictwo\" in res[2][0][1][0][1].text\n\n ksiazka.status_korekty = po_korekcie\n ksiazka.save()\n listRecords = reverse(\"bpp:oai\") + \"?verb=ListRecords&metadataPrefix=oai_dc\"\n res = toXML(client.get(listRecords))\n\n assert \"Tytul Wydawnictwo\" in res[2][0][1][0][1].text\n\n\ndef test_listRecords_no_queries_zwarte(ksiazka, client, django_assert_max_num_queries):\n listRecords = reverse(\"bpp:oai\") + \"?verb=ListRecords&metadataPrefix=oai_dc\"\n with django_assert_max_num_queries(5):\n res = client.get(listRecords)\n assert \"Tytul Wydawnictwo\" in toXML(res)[2][0][1][0][1].text\n\n\ndef test_listRecords_no_queries_ciagle(artykul, client, django_assert_max_num_queries):\n listRecords = reverse(\"bpp:oai\") + \"?verb=ListRecords&metadataPrefix=oai_dc\"\n with django_assert_max_num_queries(5):\n res = client.get(listRecords)\n assert \"Tytul Wydawnictwo\" in toXML(res)[2][0][1][0][1].text\n","sub_path":"src/bpp/tests/test_views/test_oai.py","file_name":"test_oai.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"356879647","text":"#!/usr/bin/python\n# description :A worker process map/reduce word from txt files.\n# author :Yuemin Li\n# python_version :2.7.6\n\nimport socket\nimport operator\nimport argparse\nimport threading\nimport pickle\nimport logging\n\nwordcount = {}\nlock = threading.Lock()\n\nclass master(threading.Thread):\n '''master thread updating a global wordcount dict.'''\n\n def __init__(self, server_ip, server_port):\n super(master, self).__init__()\n self.serversocket = None\n self.server_ip = server_ip\n self.server_port = server_port\n global wordcount\n \n def run(self):\n \n self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.serversocket.bind((self.server_ip, self.server_port))\n self.serversocket.listen(5)\n logging.info(\"server is listening on %s:%i...\" % (self.server_ip, \n self.server_port))\n print(\"server is listening on %s:%i...\" % (self.server_ip, \n self.server_port))\n while True:\n connection, client_addr = self.serversocket.accept()\n buff = connection.recv(64)\n word_t = pickle.loads(buff)\n logging.debug(\"received word count tuple: \" + str(word_t))\n\n if word_t == \"###clientclose###\":\n print(\"thread is terminating...\")\n break\n \n with lock:\n if word_t[0] in wordcount:\n wordcount[word_t[0]] += word_t[1]\n else:\n wordcount[word_t[0]] = word_t[1]\n \n self.serversocket.close()\n logging.info(\"Master server socket closing...\")\n print(\"Master server socket closing...\")\n \ndef sorting(wordcount, top_num = 10):\n '''return a sorted tuple of word count dictionary by its value.'''\n sorted_tuple = sorted(wordcount.items(), key = operator.itemgetter(1))\n top_words = sorted_tuple[-1: -(top_num+1): -1]\n return top_words\n \n\ndef main():\n logging.basicConfig(filename='master.log',level=logging.INFO)\n parser = argparse.ArgumentParser(description='This is the master process.')\n parser.add_argument('server_ip', action = 'store')\n parser.add_argument('server_port', action = 'store', type = int)\n parser.add_argument('worker_num', action = 'store', type = int)\n arg = parser.parse_args()\n \n threads = []\n for i in range(arg.worker_num):\n thread = master(arg.server_ip, arg.server_port+i) \n thread.start()\n threads.append(thread)\n print(\"a thread is starting...\")\n logging.info(\"a thread is starting...\")\n\n for thread in threads:\n thread.join()\n \n result = sorting(wordcount)\n\n with open(\"output.txt\", 'a') as f:\n for t in result:\n f.write(\"Word [%s] appears [%i] times.\\n\" % t)\n \n logging.info(\"Master is terminating...\")\n print(\"Master is terminating...\")\n return \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"DistributedFileIndexer/master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"21022143","text":"numero1= int(input())\nnumero2= int(input())\nnumero3= int(input())\nif numero1 < numero2 < numero3:\n pri_numero = numero1\n seg_numero = numero2\n ter_numero = numero3\nelif numero1 < numero3 < numero2:\n pri_numero = numero1\n seg_numero = numero3\n ter_numero = numero2\nelif numero2 < numero1 < numero3:\n pri_numero = numero2\n seg_numero = numero1\n ter_numero = numero3\nelif numero2 < numero3 < numero1:\n pri_numero = numero2\n seg_numero = numero3\n ter_numero = numero1\nelif numero3 < numero1 < numero2:\n pri_numero = numero3\n seg_numero = numero1\n ter_numero = numero2\nelif numero3 < numero2 < numero1:\n pri_numero = numero3\n seg_numero = numero2\n ter_numero = numero1\nprint(ter_numero)\nprint(seg_numero)\nprint(pri_numero)\n\n","sub_path":"listas/lista-de-exercicio-03/questao-09.py","file_name":"questao-09.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"313710574","text":"# Copyright 2018 Andreas Traber\n# Licensed under MIT (https://github.com/atraber/escapemgmt/LICENSE)\nfrom quart import abort, Blueprint, request, jsonify\n\nfrom app import db\nfrom logger import logger\nfrom models import Device, Preset\n\npresets = Blueprint('presets', __name__)\n\n\n@presets.route('/presets', methods=['GET'])\nasync def apiPresets():\n presets = db.session.query(Preset).order_by(Preset.name).all()\n return jsonify([s.serialize() for s in presets])\n\n\n@presets.route('/preset', methods=['POST'])\nasync def apiPresetAdd():\n if request.headers['Content-Type'] == 'application/json':\n preset = Preset(name=(await request.json)['name'])\n db.session.add(preset)\n db.session.commit()\n return jsonify(preset.serialize())\n abort(400)\n\n\n@presets.route('/presets/', methods=['POST', 'DELETE'])\nasync def apiPresetUpdate(presetid: int):\n if request.method == 'POST':\n if request.headers['Content-Type'] == 'application/json':\n db_preset = db.session.query(Preset).filter_by(id=presetid).first()\n db_preset.name = (await request.json)['name']\n db.session.commit()\n return jsonify(db_preset.serialize())\n elif request.method == 'DELETE':\n db.session.query(Preset).filter_by(id=presetid).delete()\n db.session.commit()\n return jsonify('ok')\n abort(400)\n\n\n@presets.route('/preset/activate/', methods=['POST'])\nasync def apiPresetActivate(presetid: int):\n if request.headers['Content-Type'] == 'application/json':\n preset_old = db.session.query(Preset).filter_by(active=True).first()\n if preset_old:\n preset_old.active = False\n else:\n logger.error('No active preset found')\n\n preset_new = db.session.query(Preset).filter_by(id=presetid).first()\n preset_new.active = True\n\n # Activate all screens\n devices = db.session.query(Device).all()\n\n for device in devices:\n device.screen_enable = True\n\n db.session.commit()\n return jsonify('ok')\n abort(400)\n","sub_path":"backend/app/presets.py","file_name":"presets.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"262307951","text":"\"\"\"\nModule is responsible for hardware interrupt\nhandling dedicated to micrOS framework.\n- Setting up interrupt memory buffer from config\n- Configure time based and external interrupts\n\n- Time based IRQ:\n - Simple with fix period callback\n - Advanced - time stump ! LM function;\n - 0-6:0-24:0-59:0-59!system heartbeat; etc.\n\nDesigned by Marcell Ban aka BxNxM\n\"\"\"\n#################################################################\n# IMPORTS #\n#################################################################\nfrom ConfigHandler import cfgget, console_write\nfrom InterpreterCore import execute_LM_function_Core\nfrom LogicalPins import get_pin_on_platform_by_key\nif cfgget('cron'):\n # Only import when enabled - memory usage optimization\n from Scheduler import scheduler\n\n\n# TIMER IRQ AND CRON VALUES PERSISTENT CACHE\n# timirqcbf (simple), crontasks, timirqseq\nCFG_TIMER_IRQ = ['n/a', 3]\n\n# EVENT IRQ VALUE PERSISTENT CACHE\nCFG_EVIRQCBF = 'n/a'\n\n#################################################################\n# CONFIGURE INTERRUPT MEMORY BUFFER #\n#################################################################\n\n\ndef set_emergency_buffer():\n emergency_buff_kb = cfgget('irqmembuf')\n if cfgget('extirq') or cfgget(\"timirq\"):\n from micropython import alloc_emergency_exception_buf\n console_write(\"[IRQ] Interrupts was enabled, alloc_emergency_exception_buf={}\".format(emergency_buff_kb))\n alloc_emergency_exception_buf(emergency_buff_kb)\n else:\n console_write(\"[IRQ] Interrupts disabled, skip alloc_emergency_exception_buf configuration.\")\n\n#################################################################\n# TIMER INTERRUPT(S) #\n#################################################################\n\n#############################################\n# [TIMER] TIMIRQ CBFs - LM executor #\n#############################################\n\n\ndef secureInterruptHandlerSimple(timer=None):\n try:\n # Execute CBF from cached config\n state = execute_LM_function_Core(CFG_TIMER_IRQ[0].split(' '))\n if not state:\n console_write(\"[IRQ] TIMIRQ execute_LM_function_Core error: {}\".format(CFG_TIMER_IRQ[0]))\n except Exception as e:\n console_write(\"[IRQ] TIMIRQ callback: {} error: {}\".format(CFG_TIMER_IRQ[0], e))\n\n\ndef secureInterruptHandlerScheduler(timer=None):\n try:\n # Execute CBF LIST from local cached config with timirqseq in sec\n scheduler(CFG_TIMER_IRQ[0], CFG_TIMER_IRQ[1])\n except Exception as e:\n console_write(\"[IRQ] TIMIRQ (cron) callback: {} error: {}\".format(CFG_TIMER_IRQ[0], e))\n\n\n#############################################\n# [TIMER] INIT TIMIRQ SET CBF #\n#############################################\n\n\ndef enableInterrupt():\n \"\"\"\n TIMER INTERRUPT CALLBACK FUNCTION CONFIG. WRAPPER\n - FIRST PRIORITY: SCHEDULER\n - SECOND PRIORITY: SIMPLE PERIODIC CALLBACK\n \"\"\"\n console_write(\"[IRQ] TIMIRQ SETUP - TIMIRQ: {} SEQ: {}\".format(cfgget(\"timirq\"), cfgget(\"timirqseq\")))\n console_write(\"|- [IRQ] CRON:{} CBF:{}\".format(cfgget('cron'), cfgget('crontasks')))\n console_write(\"|- [IRQ] SIMPLE CBF:{}\".format(cfgget('timirqcbf')))\n if cfgget(\"timirq\"):\n # Configure advanced scheduler OR simple repeater\n if cfgget('cron') and cfgget('crontasks').lower() != 'n/a':\n console_write(\"|-- TIMER IRQ MODE: SCHEDULER\")\n # ENABLE ADVANCED SCHEDULER (BASED ON SIMPLE TIMIRQ)\n __enableInterruptScheduler()\n return\n # ENABLE SIMPLE PERIODIC INTERRUPT\n console_write(\"|-- TIMER IRQ MODE: SIMPLE\")\n __enableInterruptSimple()\n\n\ndef __enableInterruptScheduler():\n \"\"\"\n SMART TIMER INTERRUPT CONFIGURATION\n # MUST BE CHECK BEFORE CALL: cfgget(\"timirq\") and cfgget('cron') and cfgget('crontasks')\n \"\"\"\n # CACHE TASKS FOR CBF\n CFG_TIMER_IRQ[0] = cfgget('crontasks')\n CFG_TIMER_IRQ[1] = int(cfgget(\"timirqseq\") / 1000)\n from machine import Timer\n # INIT TIMER IRQ with callback function wrapper\n timer = Timer(0)\n timer.init(period=int(cfgget(\"timirqseq\")), mode=Timer.PERIODIC, callback=secureInterruptHandlerScheduler)\n\n\ndef __enableInterruptSimple():\n \"\"\"\n SIMPLE TIMER INTERRUPT CONFIGURATION\n \"\"\"\n # LOAD DATA FOR TIMER IRQ: cfgget(\"timirq\")\n # CACHE TASK FOR CBF\n CFG_TIMER_IRQ[0] = cfgget('timirqcbf')\n if CFG_TIMER_IRQ[0].lower() != 'n/a':\n from machine import Timer\n # INIT TIMER IRQ with callback function wrapper\n timer = Timer(0)\n timer.init(period=int(cfgget(\"timirqseq\")), mode=Timer.PERIODIC, callback=secureInterruptHandlerSimple)\n else:\n console_write(\"[IRQ] TIMIRQ: isenable: {} callback: {}\".format(cfgget(\"timirq\"), cfgget('timirqcbf')))\n\n\n#################################################################\n# EVENT/EXTERNAL INTERRUPT(S) #\n#################################################################\n# trigger=Pin.IRQ_FALLING signal HIGH to LOW\n# trigger=Pin.IRQ_RISING signal LOW to HIGH\n# trigger=3 both\n#################################################################\n\n\ndef secureEventInterruptHandler(pin=None):\n \"\"\"\n EVENT INTERRUPT CALLBACK FUNCTION WRAPPER\n \"\"\"\n try:\n state = execute_LM_function_Core(CFG_EVIRQCBF.split(' '))\n if not state:\n console_write(\"[IRQ] EXTIRQ execute_LM_function_Core error: {}\".format(CFG_EVIRQCBF))\n except Exception as e:\n console_write(\"[IRQ] EVENTIRQ callback: {} error: {}\".format(CFG_EVIRQCBF, e))\n\n\ndef init_eventPIN():\n \"\"\"\n EVENT INTERRUPT CONFIGURATION\n \"\"\"\n global CFG_EVIRQCBF\n if cfgget('extirq') and cfgget('extirqcbf').lower() != 'n/a':\n CFG_EVIRQCBF = cfgget('extirqcbf')\n pin = get_pin_on_platform_by_key('pwm_4')\n console_write(\"[IRQ] EVENTIRQ ENABLED PIN: {} CBF: {}\".format(pin, CFG_EVIRQCBF))\n # Init event irq with callback function wrapper\n from machine import Pin\n pin_obj = Pin(pin, Pin.IN, Pin.PULL_UP)\n pin_obj.irq(trigger=Pin.IRQ_RISING, handler=secureEventInterruptHandler)\n else:\n console_write(\"[IRQ] EVENTIRQ: isenable: {} callback: {}\".format(cfgget('extirq'), CFG_EVIRQCBF))\n\n#################################################################\n# INIT MODULE #\n#################################################################\n\n\nset_emergency_buffer()\n","sub_path":"micrOS/InterruptHandler.py","file_name":"InterruptHandler.py","file_ext":"py","file_size_in_byte":6619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"4405711","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# Copyright (c) 2019 Synersys Consulting Inc.\n#\n# WARNING: This program as such is intended to be used by professional\n# programmers who take the whole responsibility of assessing all potential\n# consequences resulting from its eventual inadequacies and bugs\n#\n# This program is Free Software; you can redistribute it and/or\n# modify it under the terms of the GNU Affero General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details. \n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n##############################################################################\nfrom odoo import models, api, fields, _\nfrom datetime import datetime\nimport json\n\nclass AccountAlphalistMap(models.Model):\n _name = 'account.alphalist.map'\n _description = 'Monthly Alphalist Payees'\n _inherit = 'account.report'\n \n filter_date = {'date_from': '', 'date_to': '', 'filter': 'this_year'}\n filter_unfold_all = True\n filter_partner = True\n\n def _get_company_id(self):\n \"\"\"\n @summary: This will default Company Id on the form by\n getting the attached company_id of the current User. \n \"\"\"\n user_id = self.env.uid\n company_id = self.env['res.users'].browse(user_id).company_id.id\n return company_id\n \n company_id = fields.Many2one('res.company', string='Company', default=_get_company_id, track_visibility='onchange')\n\n @api.model\n def get_title(self):\n return _('Quarterly Alphalist Payees')\n\n @api.model\n def _get_report_name(self):\n return _('Quarterly Alphalist Payees')\n\n @api.model\n def get_report_type(self):\n return self.env.ref('tf_ph_bir.account_report_type_alphalist_map')\n \n def _get_templates(self):\n templates = super(AccountAlphalistMap, self)._get_templates()\n templates['line_template'] = 'tf_ph_bir.line_template_alphalist_map_report'\n return templates\n \n def _get_columns_name(self, options):\n columns = [\n {},\n {'name': _('Date'), 'class': 'date'},\n {'name': _('Return Period')},\n {'name': _('Journal')},\n {'name': _('Nature of Income Payment')},\n {'name': _('Tax Rate')},\n {'name': _('Tax Base'), 'class': 'number'},\n {'name': _('Tax Withheld'), 'class': 'number'}]\n\n return columns\n\n @api.model\n def get_company_data(self):\n user_id = self.env.uid\n company = self.env['res.users'].browse(user_id).company_id;\n company_data ={\n \"company_tin\": company.vat.replace(\"-\", \"\")[0:9],\n \"company_name\": company.name,\n \"date\": f'{datetime.now().month}{datetime.now().year}'\n }\n\n return company_data\n\n @api.model\n def get_csv(self, options):\n alpha_type = 'MAP'\n ftype_code = '1601E'\n file_content = ''\n user_id = self.env.uid\n user_data = self.env['res.users'].browse(user_id)\n company = user_data.company_id\n company_name = company.name.upper() or ''\n company_vat = company.vat.replace('-', '')\n company_tin = company_vat[0:9] or ''\n company_branch_code = company_vat[9:13] or ''\n company_rdo_code = company.rdo_code\n company_data = list(filter(lambda data: data['level'] == 2, options['line_details_header']))\n return_period = datetime.now().strftime('%m/%Y')\n header_details = f\"H{alpha_type},\" \\\n f\"H{ftype_code},\" \\\n f\"{company_tin},\" \\\n f\"{company_branch_code},\" \\\n f'\"{company_name}\",' \\\n f\"{return_period},\" \\\n f\"{company_rdo_code}\\n\"\n file_content += header_details\n total_amount_withheld = 0.0\n total_tax_base = 0.0\n seq = 0\n for detail in company_data:\n seq += 1\n detail_tin = '000-000-000'\n if detail['vat']:\n detail_tin = str(detail['vat']).replace('-', '')[0:9]\n detail_rdo = str(detail['vat']).replace('-', '')[9:13]\n\n atc = detail['company_details'][2]['name'].replace(' ', '')[0:5]\n tax_rate = format(float(detail['company_details'][3]['name']), '.2f')\n tax_base = detail['columns'][5]['name'].replace(',', '').replace(' Php', '').replace('$ ', '')\n actual_amt_wthld = detail['columns'][6]['name'].replace(',', '').replace(' Php', '').replace('$ ', '')\n\n first_name = detail['first_name'].upper() if detail['first_name'] else ''\n middle_name = detail['middle_name'].upper() if detail['middle_name'] else ''\n last_name = detail['last_name'].upper() if detail['last_name'] else ''\n\n detail_content = f'D{alpha_type},' \\\n f'D{ftype_code},' \\\n f'{seq},' \\\n f'{detail_tin},' \\\n f'{detail_rdo},' \\\n f'\"{str(detail[\"name\"]).upper()}\",'\\\n f\"{first_name},\"\\\n f\"{middle_name},\" \\\n f\"{last_name},\" \\\n f\"{return_period},\" \\\n f\"{atc},\" \\\n f\"{tax_rate},\" \\\n f\"{tax_base},\" \\\n f\"{actual_amt_wthld}\\n\"\n total_amount_withheld += float(actual_amt_wthld)\n total_tax_base += float(tax_base)\n file_content += detail_content\n\n\n control_details = f\"C{alpha_type},\" \\\n f\"C{ftype_code},\" \\\n f\"{company_tin},\" \\\n f\"{company_branch_code},\"\\\n f\"{return_period},\" \\\n f\"{format(total_tax_base, '.2f')},\"\\\n f\"{format(total_amount_withheld, '.2f')}\\n\"\n\n file_content += control_details\n\n return file_content\n\n def get_report_filename(self, options):\n \"\"\"The name that will be used for the file when downloading pdf,xlsx,...\"\"\"\n if 'export_csv' in options and options['export_csv']:\n options.pop('export_csv')\n company_data = self.get_company_data()\n return f\"{company_data['company_tin']}{company_data['date']}1601E.dat\"\n\n return self._get_report_name().lower().replace(' ', '_')\n\n def _get_reports_buttons(self):\n return [\n {'name': _('Print Preview'), 'sequence': 1, 'action': 'print_pdf', 'file_export_type': _('PDF')},\n {'name': _('Export (XLSX)'), 'sequence': 2, 'action': 'print_xlsx', 'file_export_type': _('XLSX')},\n {'name': _('Export (DAT.CSV)'), 'sequence': 3, 'action': 'tf_ph_bir_export_csv', 'file_export_type': _('CBS')},\n {'name': _('Save'), 'sequence': 10, 'action': 'open_report_export_wizard'},\n ]\n\n def tf_ph_bir_export_csv(self, options):\n options['export_csv'] = True\n action = {'type': 'ir_actions_account_report_download',\n 'name': 'testing.dat',\n 'data': {'model': self.env.context.get('model'),\n 'options': json.dumps(options),\n 'output_format': 'csv',\n 'financial_id': self.env.context.get('id'),\n }\n }\n return action\n\n\n @api.model\n def _get_taxwithheld(self, invoice_line, tax):\n return invoice_line.price_subtotal * -(tax.amount)\n\n @api.model\n def _get_lines(self, options, line_id=None):\n AccountMoveLine = self.env['account.move.line']\n ResPartner = self.env['res.partner']\n context = self.env.context\n date_from = context.get('date_from')\n date_to = context.get('date_to')\n unfold_all = context.get('print_mode') and not options.get('unfolded_lines')\n company_ids = self.env['res.company']\n partner_ids = partner_ids2 = []\n lines = []\n csv_details_data = []\n #Get selected company\n if context.get('company_ids', False):\n comp_ids = context.get('company_ids')\n for comp_id in comp_ids:\n comp_id = self.env['res.company'].browse(comp_id)\n company_ids += comp_id\n child_ids = self.env['res.company'].search([('parent_id','=',comp_id.id)])\n for child_id in child_ids: \n if child_id not in company_ids: company_ids += child_id\n\n domain = [('date', '<=', date_to),\n ('date', '>=', date_from),\n ('tax_line_id', '!=', False),\n ('move_id.type', 'in', ('in_invoice', 'in_refund', 'in_receipt')),\n ('move_id.state', '!=', 'draft')\n ]\n \n move_line_ids = AccountMoveLine.search(domain)\n\n if line_id:\n line_id = int(line_id.split('_')[1]) or None\n if line_id: partner_ids = ResPartner.browse(line_id)\n \n elif options.get('partner_ids'):\n # If a default partner is set, we only want to load the line referring to it.\n partner_ids2 = options['partner_ids']\n for p_id in partner_ids2:\n p_id = ResPartner.browse(p_id)\n line_id = p_id.id\n if p_id not in partner_ids: partner_ids += p_id\n if line_id:\n if 'partner_' + str(line_id) not in options.get('unfolded_lines', []):\n options.get('unfolded_lines', []).append('partner_' + str(line_id))\n\n options.update({'partner_ids': list(dict.fromkeys(options['partner_ids']))})\n else:\n #Create partner list\n partner_ids = move_line_ids.mapped('partner_id')\n\n overall_tax_base = overall_tax_withheld = 0\n for partner_id in partner_ids:\n map_lines = []\n csv_data = []\n withholding_2306_ids = self.env['account.tax']\n withholding_2307_ids = self.env['account.tax']\n withholding_2307_ids = self.env['account.tax']\n\n for comp_id in company_ids:\n withholding_2306_ids += comp_id.withholding_2306_ids\n withholding_2307_ids += comp_id.withholding_2307_ids\n withholding_taxes = (withholding_2306_ids + withholding_2307_ids).filtered(lambda tax: tax.type_tax_use == 'purchase')\n \n partner_move_line_ids = move_line_ids.filtered(lambda m: m.partner_id == partner_id and m.tax_line_id in withholding_taxes)\n base_total = withheld_total = 0.0\n\n if partner_move_line_ids:\n taxes = []\n \n for tax_id in partner_move_line_ids.filtered(lambda v:v.tax_line_id in withholding_taxes).mapped('tax_line_id'):\n if tax_id not in withholding_taxes:\n continue\n \n tax_base_total = tax_withheld_total = 0.0\n \n for tax_move_line_id in partner_move_line_ids.filtered(lambda l: l.tax_line_id == tax_id):\n if tax_move_line_id.company_id.id in company_ids.ids:\n #For invoice line tax withheld\n tax_withheld = (tax_move_line_id.debit * -1.0) or tax_move_line_id.credit\n tax_id = tax_move_line_id.tax_line_id\n\n #Look for the Tax Base\n tax_base = 0.0\n base_aml_ids = tax_move_line_id.move_id.line_ids.filtered(lambda l: l.tax_ids & tax_id and l.move_id)\n if base_aml_ids:\n for base_aml_id in base_aml_ids:\n tax_base += base_aml_id.debit or -(base_aml_id.credit) \n else:\n tax_base = tax_withheld / -(tax_id.amount / 100)\n \n #Total per Tax\n tax_base_total += tax_base\n tax_withheld_total += tax_withheld\n \n move_id = tax_move_line_id.move_id\n return_period = datetime.strptime(str(move_id.date), '%Y-%m-%d').strftime('%m/%Y')\n \n if (tax_base + tax_withheld):\n # Lines\n columns = [{'name': v} for v in [\n tax_move_line_id.move_id.date,\n return_period,\n tax_move_line_id.move_id.journal_id.name,\n tax_id.name,\n str(-(tax_id.amount)) + '%',\n self.format_value(tax_base),\n self.format_value(tax_withheld)\n ]]\n \n caret_type = 'account.move'\n partner_data = {\n 'id': tax_move_line_id.id,\n 'type': 'move_line_id',\n 'caret_options': caret_type,\n 'class': 'top-vertical-align',\n 'move_id': tax_move_line_id.move_id.id,\n 'parent_id': 'partner_' + str(partner_id.id),\n 'name': tax_move_line_id.move_id.name,\n 'vat_payee': tax_move_line_id.move_id.partner_id.vat,\n 'registered_payee_name': tax_move_line_id.move_id.partner_id.name,\n 'columns': columns,\n 'level': 3,\n }\n csv_data.append(partner_data)\n if 'partner_' + str(partner_id.id) in options.get('unfolded_lines') or unfold_all:\n if tax_id.name not in taxes or not taxes:\n map_lines.append({\n 'id': 'initial_%s' % (partner_id.id),\n 'class': 'o_account_reports_initial_balance',\n 'name': \"ATC - %s\" % tax_id.description,\n 'parent_id': 'partner_%s' % (partner_id.id,),\n 'columns': [{'name': v} for v in ['', '', '', '', '', '', '']],\n 'level': 3,\n })\n taxes.append(tax_id.name)\n map_lines.append(partner_data)\n\n #Total per Partner\n base_total += tax_base_total\n withheld_total += tax_withheld_total\n if 'partner_' + str(partner_id.id) in options.get('unfolded_lines') or unfold_all: \n if (tax_base_total + tax_withheld_total) or map_lines:\n map_lines.append({\n 'id': 'total_' + str(tax_move_line_id.account_id.id),\n 'type': 'o_account_reports_domain_total',\n 'class': 'total',\n 'name': _('Total') + ': ' + tax_id.name,\n 'parent_id': 'partner_' + str(partner_id.id),\n 'columns': [{'name': v} for v in ['','', '', '', '', self.format_value(tax_base_total), self.format_value(tax_withheld_total)]],\n 'level': 4,\n })\n overall_tax_base += tax_base_total\n overall_tax_withheld += tax_withheld_total\n \n if (base_total + withheld_total) or map_lines:\n #Partner Line\n lines.append({\n 'id': 'partner_' + str(partner_id.id),\n 'name': partner_id.name,\n 'first_name': partner_id.first_name,\n 'middle_name': partner_id.middle_name,\n 'last_name': partner_id.last_name,\n 'vat': partner_id.vat,\n 'rdo': partner_id.rdo_code,\n 'columns': [{'name': v} for v in [\n '',\n '',\n '',\n '',\n '',\n self.format_value(base_total),\n self.format_value(withheld_total)]],\n 'level': 2,\n 'return_period': return_period,\n 'unfoldable': True,\n 'unfolded': 'partner_' + str(partner_id.id) in options.get('unfolded_lines') or unfold_all,\n 'colspan': 1,\n 'company_details': [{'name': v} for v in [\n tax_move_line_id.move_id.date,\n return_period,\n tax_id.name,\n str(-(tax_id.amount)),\n ]]\n })\n\n lines += map_lines\n csv_details_data += csv_data\n\n if not line_id or partner_ids2:\n total_line = {\n 'id': 'overall_map_partners_total',\n 'name': _('Total'),\n 'class': 'o_account_reports_domain_total',\n 'level': 0,\n 'columns': [{'name': ''}] * 5 +\n [{'name': self.format_value(v)} for v in\n [overall_tax_base,\n overall_tax_withheld, ]],\n }\n lines.append(total_line)\n options['line_details_header'] = lines\n options['line_details_data'] = csv_details_data\n return lines\n","sub_path":"tf_ph_bir/models/tf_ph_bir_alphalist_map.py","file_name":"tf_ph_bir_alphalist_map.py","file_ext":"py","file_size_in_byte":19063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"410331097","text":"import pytest\nfrom requests import get, post, delete, put\n\n# Удаление работы\ndef test_delete1():\n resp = delete('http://localhost:5000/api/jobs/999') \n assert resp.status_code == 200\n resp_body = resp.json()\n assert resp_body['error'] == 'Id <999> not found'\n \n\ndef test_delete2():\n resp = delete('http://localhost:5000/api/jobs/text') \n assert resp.status_code == 404\n resp_body = resp.json()\n assert resp_body['error'] == 'Not found'\n\n# test_delete1()\n# test_delete2()","sub_path":"Lyceum/Project_WEB_API/test_delete.py","file_name":"test_delete.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"183521510","text":"# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for the Guide module.\"\"\"\n\n__author__ = 'Pavel Simakov (psimakov@google.com)'\n\n\nfrom controllers import sites\nfrom models import courses\nfrom tests.functional import actions\nfrom tools import verify\n\n\nclass GuideTests(actions.TestBase):\n\n ALL_COURSES = [\n ('Alpha', courses.COURSE_AVAILABILITY_PUBLIC),\n ('Bravo', courses.COURSE_AVAILABILITY_REGISTRATION_OPTIONAL),\n ('Charlie', courses.COURSE_AVAILABILITY_REGISTRATION_REQUIRED),\n ('Delta', courses.COURSE_AVAILABILITY_PRIVATE)]\n\n GUIDE_DISABLED = {'modules': {'guide': {'enabled': False}}}\n\n GUIDE_ENABLED_COURSE = {'modules': {'guide': {\n 'enabled': True, 'availability': courses.AVAILABILITY_COURSE}}}\n\n GUIDE_ENABLED_PRIVATE = {'modules': {'guide': {\n 'enabled': True, 'availability': courses.AVAILABILITY_UNAVAILABLE}}}\n\n def _import_sample_course(self, ns='guide', availability=None):\n dst_app_context = actions.simple_add_course(\n ns, '%s_tests@google.com' % ns,\n 'Power Searching with Google [%s]' % ns)\n dst_course = courses.Course(None, dst_app_context)\n all_courses = sites.get_all_courses('course:/:/:')\n src_app_context = all_courses[len(all_courses) - 1]\n errors = []\n dst_course.import_from(src_app_context, errors)\n dst_course.save()\n dst_course.set_course_availability(availability)\n self.assertEquals(0, len(errors))\n\n def setUp(self):\n super(GuideTests, self).setUp()\n entries = []\n for name, availability in self.ALL_COURSES:\n self._import_sample_course(ns=name, availability=availability)\n entries.append('course:/%s::ns_%s\\n' % (name, name))\n sites.setup_courses(''.join(entries))\n\n def assert_guide_not_accesssible(self, name, is_guides_accessible=False):\n response = self.get('/modules/guides', expect_errors=True)\n if is_guides_accessible:\n self.assertEquals(200, response.status_int)\n else:\n self.assertEquals(404, response.status_int)\n\n app_ctx = sites.get_course_for_path('/%s' % name)\n course = courses.Course(None, app_context=app_ctx)\n for unit in course.get_units():\n if unit.type != verify.UNIT_TYPE_UNIT:\n continue\n response = self.get(\n '/%s/guide?unit_id=%s' % (name, unit.unit_id),\n expect_errors=True)\n self.assertEquals(404, response.status_int)\n\n def assert_guide_accesssible(self, name):\n response = self.get('/modules/guides')\n self.assertEquals(200, response.status_int)\n self.assertIn(\n 'category=\"Power Searching with Google [%s]' % name,\n response.body)\n\n app_ctx = sites.get_course_for_path('/%s' % name)\n course = courses.Course(None, app_context=app_ctx)\n for unit in course.get_units():\n if unit.type != verify.UNIT_TYPE_UNIT:\n continue\n response = self.get('/%s/guide?unit_id=%s' % (name, unit.unit_id))\n self.assertIn(unit.title, response.body.decode('utf-8'))\n\n def register(self, name):\n self.base = '/%s' % name\n actions.register(self, 'Test User %s' % name)\n self.base = ''\n\n def test_polymer_components_zip_handler(self):\n response = self.get(\n '/modules/guide/resources/polymer/bower_components/bower.json')\n self.assertEquals(200, response.status_int)\n\n def test_guide_disabled(self):\n with actions.OverriddenEnvironment(self.GUIDE_DISABLED):\n for name in ['Alpha', 'Bravo', 'Charlie', 'Delta']:\n actions.logout()\n self.assert_guide_not_accesssible(name)\n\n actions.login('guest@sample.com')\n self.assert_guide_not_accesssible(name)\n\n if name == 'Bravo' or name == 'Charlie':\n self.register(name)\n self.assert_guide_not_accesssible(name)\n\n actions.login('admin@sample.com', is_admin=True)\n self.assert_guide_not_accesssible(name)\n\n def test_guide_enabled_private(self):\n with actions.OverriddenEnvironment(self.GUIDE_ENABLED_PRIVATE):\n for name in ['Alpha', 'Bravo', 'Charlie', 'Delta']:\n actions.logout()\n self.assert_guide_not_accesssible(name)\n\n actions.login('guest@sample.com')\n self.assert_guide_not_accesssible(name)\n\n if name == 'Bravo' or name == 'Charlie':\n self.register(name)\n self.assert_guide_not_accesssible(name)\n\n actions.login('admin@sample.com', is_admin=True)\n self.assert_guide_accesssible(name)\n\n # check course labels as admin sees them\n response = self.get('/modules/guides')\n self.assertEquals(200, response.status_int)\n self.assertIn(\n 'category=\"Power Searching with Google [Alpha] '\n '(Private)', response.body)\n self.assertIn(\n 'category=\"Power Searching with Google [Bravo] '\n '(Private)', response.body)\n self.assertIn(\n 'category=\"Power Searching with Google [Charlie] '\n '(Private)', response.body)\n self.assertIn(\n 'category=\"Power Searching with Google [Delta] '\n '(Private)', response.body)\n\n def test_guide_enabled_course(self):\n with actions.OverriddenEnvironment(self.GUIDE_ENABLED_COURSE):\n actions.logout()\n self.assert_guide_accesssible('Alpha')\n self.assert_guide_accesssible('Bravo')\n self.assert_guide_not_accesssible(\n 'Charlie', is_guides_accessible=True)\n self.assert_guide_not_accesssible(\n 'Delta', is_guides_accessible=True)\n\n actions.login('guest@sample.com')\n self.assert_guide_accesssible('Alpha')\n self.assert_guide_accesssible('Bravo')\n self.assert_guide_not_accesssible(\n 'Charlie', is_guides_accessible=True)\n self.assert_guide_not_accesssible(\n 'Delta', is_guides_accessible=True)\n\n self.register('Charlie')\n self.assert_guide_accesssible('Alpha')\n self.assert_guide_accesssible('Bravo')\n self.assert_guide_accesssible('Charlie')\n self.assert_guide_not_accesssible(\n 'Delta', is_guides_accessible=True)\n\n actions.login('admin@sample.com', is_admin=True)\n for name in ['Alpha', 'Bravo', 'Charlie', 'Delta']:\n self.assert_guide_accesssible(name)\n\n # check course labels as admin sees them\n response = self.get('/modules/guides')\n self.assertEquals(200, response.status_int)\n self.assertIn(\n 'category=\"Power Searching with Google [Alpha]',\n response.body)\n self.assertIn(\n 'category=\"Power Searching with Google [Bravo]',\n response.body)\n self.assertIn(\n 'category=\"Power Searching with Google [Charlie] '\n '(Registration required)', response.body)\n self.assertIn(\n 'category=\"Power Searching with Google [Delta] (Private)',\n response.body)\n\n def test_guide_shows_all_unit_lessons(self):\n with actions.OverriddenEnvironment(self.GUIDE_ENABLED_PRIVATE):\n actions.login('test@example.com', is_admin=True)\n\n # check guides page\n response = self.get('/modules/guides')\n self.assertIn('', response.body)\n self.assertIn(\n 'category=\"Power Searching with Google [Alpha]',\n response.body)\n self.assertIn('gcbTagYoutubeEnqueueVideo(\"', response.body)\n self.assertIn(\n '\n \n \"\"\"\n html = html + tag_row\n return html\n\n def to_html(self):\n return self.render_node(self.document.content_node.children[0])\n\n def to_mimetype(self):\n return self.render_node_mimetype(self.document.content_node.children[0])\n\n def render_node_mimetype(self, node):\n tag_colors = {}\n for index, tag in enumerate(node.get_all_tags()):\n tag_colors[tag] = self.color_array[index]\n\n render_data = {'node_data': node.to_json(), 'tag_colors': tag_colors, }\n bundle = {}\n bundle['application/vnd.kodexa.spatial+json'] = render_data\n return bundle\n","sub_path":"kodexa/mixins/spatial.py","file_name":"spatial.py","file_ext":"py","file_size_in_byte":10580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"584491229","text":"import time\nfrom collections import namedtuple\nfrom sitting_time_sensor.sensor import Sensor\nimport RPi.GPIO as GPIO\n\nclass PIRSensor(Sensor):\n Status = namedtuple('Status', ['changed', 'unchanged'])\n status = Status(changed=1, unchanged=0) # 暫定\n sensor_port = -1\n\n def setup(self, port):\n super().setup(ports=[port], port_settings=[super().INPUT])\n PIRSensor.sensor_port = port\n\n def read(self):\n read_val = super().read(PIRSensor.sensor_port)\n if (read_val == PIRSensor.status.changed):\n return 1\n return 0\n\n def main(self):\n while True:\n time.sleep(1)\n #when motion detected turn on LED\n input = self.read()\n print(\"PIR sensor input:\", input)\n if input == 1:\n print(\"detected\")\n else:\n print(\"not detected\")\n\n def destroy(self):\n GPIO.cleanup()\n\n def __init__(self, port):\n self.setup(port)\n\nif __name__ == '__main__':\n pir_sensor = PIRSensor(port=26)\n try:\n pir_sensor.main()\n except KeyboardInterrupt:\n pir_sensor.destroy() \n","sub_path":"src/sitting_time_sensor/pir_sensor.py","file_name":"pir_sensor.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"560914554","text":"import datetime\r\n\r\nname = str(input(\"Enter your name:\"))\r\nyear = int(input(\"Enter your year you were born:\"))\r\nnow = datetime.datetime.now()\r\nthisyear = now.year\r\nage = thisyear - year\r\n\r\nif age >= 1 and age <= 9:\r\n print(\"%s is %d years old. You are generation \\\"Alpha\\\"\" % (name, age))\r\nif age >= 10 and age <= 24:\r\n print(\"%s is %d years old. You are generation \\\"Z\\\"\" % (name, age))\r\nif age >= 25 and age <= 39:\r\n print(\"%s is %d years old. You are generation \\\"Y\\\"\" % (name, age))\r\nif age >= 40 and age <= 54:\r\n print(\"%s is %d years old. You are generation \\\"X\\\"\" % (name, age))\r\nif age >= 55 and age <= 72:\r\n print(\"%s is %d years old. You are generation \\\"Baby Boomer\\\"\" % (name, age))\r\nif age >= 73:\r\n print(\"%s is %d years old. You are generation \\\"Builder\\\"\" % (name, age))","sub_path":"sirapong-6230401987-lab1/find_gen.py","file_name":"find_gen.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"212252920","text":"\"\"\"BOJ Q2178 - 미로 탐색 (https://www.acmicpc.net/problem/2178)\n\nSimple BFS problem\n\"\"\"\n\nn, m = [int(x) for x in input().split()]\nmaze = [list(input()) for x in range(n)]\n\nqueue = [(0, 0, 1)]\nmaze[0][0] = '0'\nwhile True:\n x, y, dist = queue.pop(0)\n if (x, y) == (m - 1, n - 1):\n print(dist)\n break\n for dx, dy in [(1, 0), (-1, 0), (0, 1), (0, -1)]:\n nx, ny = x + dx, y + dy\n if 0 <= nx < m and 0 <= ny < n and maze[ny][nx] == '1':\n maze[ny][nx] = '0'\n queue.append((nx, ny, dist + 1))\n","sub_path":"q2178.py","file_name":"q2178.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"354303520","text":"\"\"\"\nWSGI config for noot project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/\n\"\"\"\n\nimport os\nimport sys\nimport site\n\nALLDIRS = ['/home/theo/acaciadata.com/django/lib/python2.7/site-packages']\n\n# Remember original sys.path.\nprev_sys_path = list(sys.path) \n\n# Add each new site-packages directory.\nfor directory in ALLDIRS:\n site.addsitedir(directory)\n\n# Reorder sys.path so new directories at the front.\nnew_sys_path = [] \nfor item in list(sys.path): \n if item not in prev_sys_path: \n new_sys_path.append(item) \n sys.path.remove(item) \nsys.path[:0] = new_sys_path\n\n# Add the app's directory to the PYTHONPATH\nsys.path.append('/home/theo/acaciadata.com/acacia')\nsys.path.append('/home/theo/acaciadata.com/acacia/acacia')\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'acacia.settings'\n\nfrom django.core.wsgi import get_wsgi_application\napplication = get_wsgi_application()\n\n","sub_path":"acacia/acacia/wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"444127979","text":"# coding:utf-8\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport re\nimport os\nimport time\n\n# 小说主地址\nreq_url_base = 'https://m.lingyu.org/'\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) '\n 'Chrome/17.0.963.56 Safari/535.11'}\n\n\ndef has_title(tag):\n return tag.get('property').equals('og:title')\n\n\ndef has_next_page(tag):\n return tag.string.equals('下一页')\n\n\n# 小说下载函数\n\n\n# txt_id:小说编号\n# txt字典项介绍\n# id:小说编号\n# title:小说题目\n# first_page:第一章页面\n# txt_section:章节地址\n# section_name:章节名称\n# section_text:章节正文\n# section_ct:章节页数\ndef get_txt(txt_id, page_size):\n txt = {'title': '', 'id': txt_id}\n try:\n # 根据小说编号获取小说URL\n req_url = req_url_base + txt['id'] + '/'\n print(\"小说编号:\" + txt['id'])\n # 获取小说目录界面\n req = urllib.request.Request(req_url, headers=headers)\n res = urllib.request.urlopen(req).read()\n # soup转化\n soups = BeautifulSoup(res, \"html.parser\")\n\n # 获取小说题目\n title = soups.select('.currency_head h1 a')\n txt['title'] = title[0].string\n\n # 打开小说文件写入小说相关信息\n fo = open('{1}.txt'.format(txt['id'], txt['title']), \"ab+\")\n fo.write((txt['title'] + \"\\r\\n\").encode('UTF-8'))\n fo.write(\"******************\\r\\n\".encode('UTF-8'))\n\n print(\"编号:\" + '{0:0>8} '.format(txt['id']) + \"小说名:《\" + txt['title'] + \"》 开始下载。\")\n print(\"正在寻找第一章页面。。。\")\n # 进入循环,写入每章内容\n chapter_list = soups.select('.chapters li a')\n\n for i in range(1, 50):\n try:\n # 获取小说目录界面\n req = urllib.request.Request(req_url_base + txt['id'] + '_' + str(i) + '/', headers=headers)\n res = urllib.request.urlopen(req).read()\n # soup转化\n soups = BeautifulSoup(res, \"html.parser\")\n chapter_list.extend(soups.select('.chapters li a'))\n except Exception as e:\n print(e)\n print(\"编号:\" + '{0:0>8} '.format(txt['id']) + \"小说名:《\" + txt['title'] + \"》 章节下载失败,正在重新下载。\")\n\n for chapter in chapter_list:\n try:\n href = chapter['href']\n req_chapter = urllib.request.Request(str(href), headers=headers)\n res_chapter = urllib.request.urlopen(req_chapter).read()\n soup = BeautifulSoup(res_chapter, \"html.parser\")\n\n # 获取章节名称\n section_name = soup.select('div .title h1')[0]\n section_text = soup.select('div #content')[0]\n for ss in section_text.select(\"script\"): # 删除无用项\n ss.decompose()\n # 获取章节文本\n section_text = re.sub('\\s+', '\\r\\n\\t', section_text.text)\n\n # 以二进制写入章节题目\n fo.write(('\\r\\n\\n\\n' + section_name.text + '\\r\\n\\n').encode('UTF-8'))\n # 以二进制写入章节内容\n fo.write(section_text.encode('UTF-8'))\n print(txt['title'] + ' 章节:' + section_name.text + ' 已下载')\n except Exception as e:\n print(e)\n print(\"编号:\" + '{0:0>8} '.format(txt['id']) + \"小说名:《\" + txt['title'] + \"》 章节下载失败,正在重新下载。\")\n\n except Exception as e:\n print(e)\n # 出现错误会将错误信息写入download.log文件,同时答应出来\n fo_err = open('download.log', \"ab+\")\n try:\n fo_err.write(('[' + time.strftime('%Y-%m-%d %X', time.localtime()) + \"]:编号:\" + '{0:0>8} '.format(\n txt['id']) + \"小说名:《\" + txt['title'] + \"》 下载失败。\\r\\n\").encode('UTF-8'))\n print('[' + time.strftime('%Y-%m-%d %X', time.localtime()) + \"]:编号:\" + '{0:0>8} '.format(\n txt['id']) + \"小说名:《\" + txt['title'] + \"》 下载失败。\")\n os.rename('{0:0>8}'.format(txt['id']) + '-' + txt['title'] + '.txt.download',\n '{0:0>8}'.format(txt['id']) + '-' + txt['title'] + '.txt.error')\n except Exception as e:\n print(e)\n fo_err.write(('[' + time.strftime('%Y-%m-%d %X', time.localtime()) + \"]:编号:\" + '{0:0>8} '.format(\n txt['id']) + \"下载失败。\\r\\n\").encode('UTF-8'))\n print('[' + time.strftime('%Y-%m-%d %X', time.localtime()) + \"]:编号:\" + '{0:0>8} '.format(\n txt['id']) + \"下载失败。\")\n finally: # 关闭文件\n fo_err.close()\n\n\n# 此处为需要下载小说的编号,编号获取方法在上文中已经讲过。\nget_txt(\"49/49178\", 64)\n","sub_path":"m_ling_yu_org.py","file_name":"m_ling_yu_org.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"192132164","text":"import os\nimport sys\n\nreload(sys).setdefaultencoding(\"UTF-8\")\n\nfrom setuptools import setup, find_packages\n\n\ndef read(*pathnames):\n fh = open(os.path.join(os.path.dirname(__file__), *pathnames))\n return fh.read().decode('utf-8')\n\nversion = '1.2.3'\n\nsetup(\n name='collective.taxonomy',\n version=version,\n description=\"Create, edit and use hierarchical taxonomies in Plone!\",\n long_description='\\n'.join([\n read('README.rst'),\n read('CHANGES.rst'),\n ]),\n classifiers=[\n \"Framework :: Plone\",\n \"Framework :: Zope2\",\n \"Programming Language :: Python\",\n ],\n keywords='plone taxonomy dexterity',\n author='Bo Simonsen and Malthe Borch',\n author_email='bo@headnet.dk',\n license=\"GPLv2+\",\n packages=find_packages('src'),\n package_dir={'': 'src'},\n namespace_packages=['collective'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n 'plone.supermodel',\n 'plone.api',\n 'plone.app.registry',\n 'plone.app.dexterity[grok]',\n 'elementtree',\n 'simplejson',\n 'collective.js.jqueryui',\n ],\n extras_require={\n 'test': [\n 'plone.testing',\n 'plone.app.testing',\n 'unittest2',\n ]\n },\n entry_points=\"\"\"\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\",\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"158913301","text":"from PyQt4 import QtCore, QtGui\n\nfrom collections import namedtuple\nfrom PyPDF2 import PdfFileReader\n\nous_tree_field_names = [\n 'page_one_source_file_name',\n 'page_one_source_page_number',\n 'page_two_source_file_name',\n 'page_two_source_page_number',\n 'form_template',\n 'psr_number',\n 'is_one_page',\n 'output_file_path'\n]\nOUSTree = namedtuple('OUSTree', ous_tree_field_names)\n\nclass OUSTreesListView(QtGui.QListView):\n def __init__(self, parent = None):\n super(OUSTreesListView, self).__init__(parent)\n\n self.initialize_user_interface()\n\n self.initialize_model()\n\n def initialize_user_interface(self):\n self.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)\n\n def initialize_model(self):\n from widget.ous_tree_processor.obj.ous_trees_list_view_model import OUSTreesListViewModel\n\n self.model = OUSTreesListViewModel(self)\n self.setModel(self.model)\n\n def add_from_source_pdfs(self, source_pdfs_file_names):\n self.model.beginResetModel()\n\n # Validate that source PDFs have an even total number of pages\n source_pdfs_page_counts = [PdfFileReader(file_name).getNumPages() for file_name in source_pdfs_file_names]\n num_pages = sum(source_pdfs_page_counts)\n if num_pages % 2 != 0:\n raise Exception('Source PDFs have an odd number of total pages ({}), which is not allowed.'.format(num_pages))\n\n # Generate a list of lists of page numbers.\n # e.g. if source_pdfs_page_counts = [3, 5] --> exploded_source_pdfs_page_counts = [[0,1,2], [3,4,5,6,7]]\n exploded_source_pdfs_page_counts = []\n for index, page_count in enumerate(source_pdfs_page_counts):\n start = 0 if index == 0 else start + source_pdfs_page_counts[index-1]\n exploded_source_pdfs_page_counts.append(range(start, start + page_count))\n\n self.model.ous_trees = []\n\n # Generate each OUS tree object\n for current_page in xrange(0, num_pages, 2):\n page_one_source_file_index = [exploded_source_pdfs_page_counts.index(i) for i in exploded_source_pdfs_page_counts if current_page in i][0]\n page_one_source_file_name = source_pdfs_file_names[page_one_source_file_index]\n page_one_source_page_number = exploded_source_pdfs_page_counts[page_one_source_file_index].index(current_page)\n\n page_two_source_file_index = [exploded_source_pdfs_page_counts.index(i) for i in exploded_source_pdfs_page_counts if current_page+1 in i][0]\n page_two_source_file_name = source_pdfs_file_names[page_two_source_file_index]\n page_two_source_page_number = exploded_source_pdfs_page_counts[page_two_source_file_index].index(current_page+1)\n\n form_template = None\n psr_number = None\n is_one_page = False\n output_file_path = None\n\n new_ous_tree = OUSTree(\n page_one_source_file_name,\n page_one_source_page_number,\n page_two_source_file_name,\n page_two_source_page_number,\n form_template,\n psr_number,\n is_one_page,\n output_file_path\n )\n\n self.model.ous_trees.append(new_ous_tree)\n\n self.model.endResetModel()\n","sub_path":"widget/ous_tree_processor/obj/ous_trees_list_view.py","file_name":"ous_trees_list_view.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"197682165","text":"class Solution:\n \"\"\"\n @param s: the maximum length of s is 1000\n @return: the longest palindromic subsequence's length\n \"\"\"\n def longestPalindromeSubseq(self, s):\n # write your code here\n n = len(s)\n if n == 0:\n return 0 \n \n if n == 1:\n return 1 \n \n dp = [[0 for i in range(n + 1)] for i in range(n + 1)] \n \n for i in range(n + 1):\n dp[i][i] = 1 \n \n for i in range(n - 2, -1, -1):\n for j in range(i + 1, n):\n if s[i] == s[j]:\n dp[i][j] = dp[i + 1][j - 1] + 2 \n else:\n dp[i][j] = max(dp[i +1][j], dp[i][j - 1])\n\n return dp[0][n - 1]\n \n ","sub_path":"667 longest palindrome subsequence.py","file_name":"667 longest palindrome subsequence.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"564608679","text":"import numpy as np\nfrom flask import Flask, request,render_template\nimport pickle\n\napp = Flask(__name__)\nmodel = pickle.load(open('C:/Users/HP-PC/PycharmProjects/application/static/models/model.pkl', 'rb'))\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n@app.route('/predict',methods=['POST'])\ndef predict():\n '''\n For rendering results on HTML GUI\n '''\n int_features = [x for x in request.form.values()]\n final_features = [np.array(int_features)]\n prediction = model.predict(final_features)\n if prediction==1:\n output=\"Nominal interest rate\"\n elif prediction==2:\n output=\"effective rate\"\n else :\n output=\"real interest rate\"\n\n return render_template('index.html', prediction_text='Interest category is {}'.format(output))\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"136512844","text":"\n\nimport os\nimport typing\n\nfrom .impl.IDCounter import IDCounter\n\nfrom .ExceptionInChildContextException import ExceptionInChildContextException\nfrom .EnumLogLevel import EnumLogLevel\nfrom .impl.LogStats import LogStats\nfrom .AbstractLogger import AbstractLogger\nfrom .impl.Converter import Converter\nfrom .impl.JSONDict import JSONDict\nfrom .BufferLogger import BufferLogger\nfrom .InvalidExtraArgumentsException import InvalidExtraArgumentsException\n\n\n\n\n\n\n\nclass WithholdingLogger(BufferLogger):\n\n\t################################################################################################################################\n\t## Constructor\n\t################################################################################################################################\n\n\t#\n\t# Constructor method.\n\t#\n\tdef __init__(self,\n\t\t\tidCounter:IDCounter = None,\n\t\t\tparentID:int = None,\n\t\t\tindentLevel:int = 0,\n\t\t\tlogItemList = None,\n\t\t\tlogStats:LogStats = None,\n\t\t\textraProperties:JSONDict = None,\n\t\t\tmainLogger:AbstractLogger = None,\n\t\t\tbVerbose:bool = False,\n\t\t):\n\n\t\tsuper().__init__(idCounter, parentID, indentLevel, logItemList, logStats, extraProperties)\n\n\t\tself.__mainLogger = mainLogger\n\t\tself.__bVerbose = bVerbose\n\t#\n\n\t################################################################################################################################\n\t## Public Properties\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\tdef _descend(self, logEntryStruct:list) -> AbstractLogger:\n\t\tself._logStats.increment(logEntryStruct[5])\n\n\t\tnextID = logEntryStruct[1]\n\t\tnewList = logEntryStruct[7]\n\n\t\treturn WithholdingLogger(\n\t\t\tidCounter=self._idCounter,\n\t\t\tparentID=nextID,\n\t\t\tindentLevel=self._indentationLevel + 1,\n\t\t\tlogItemList=newList,\n\t\t\tlogStats=self._logStats,\n\t\t\textraProperties=self._extraProperties,\n\t\t\tmainLogger=None,\n\t\t\tbVerbose=self.__bVerbose,\n\t\t)\n\t#\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n\tdef __str__(self):\n\t\treturn \"<\" + self.__class__.__name__ + \"(\" + hex(id(self)) + \", indent=\" + str(self._indentationLevel) + \",parentID=\" + str(self._parentLogEntryID) + \")>\"\n\t#\n\n\tdef __repr__(self):\n\t\treturn \"<\" + self.__class__.__name__ + \"(\" + hex(id(self)) + \", indent=\" + str(self._indentationLevel) + \",parentID=\" + str(self._parentLogEntryID) + \")>\"\n\t#\n\n\tdef __exit__(self, ex_type:type, ex_value:Exception, ex_traceback):\n\t\tif ex_type != None:\n\t\t\tif isinstance(ex_value, ExceptionInChildContextException):\n\t\t\t\tif self.__mainLogger:\n\t\t\t\t\tself.forwardTo(self.__mainLogger)\n\t\t\t\treturn False\n\t\t\tif isinstance(ex_value, GeneratorExit):\n\t\t\t\tif self.__mainLogger:\n\t\t\t\t\tself.forwardTo(self.__mainLogger)\n\t\t\t\treturn False\n\t\t\t#e = ex_type(value)\n\t\t\t#self.exception(e)\n\t\t\tself.exception(ex_value)\n\t\t\tif self.__mainLogger:\n\t\t\t\tself.forwardTo(self.__mainLogger)\n\t\t\traise ExceptionInChildContextException(ex_value)\n\n\t\tif self.__mainLogger:\n\t\t\tif self.stats.hasAtLeastWarning or self.__bVerbose:\n\t\t\t\tself.forwardTo(self.__mainLogger)\n\n\t\treturn False\n\t#\n\n\t################################################################################################################################\n\t## Public Static Methods\n\t################################################################################################################################\n\n\t@staticmethod\n\tdef create(\n\t\t\tmainLogger:AbstractLogger = None,\n\t\t\t*args,\n\t\t\tjsonData:typing.Union[dict,list] = None,\n\t\t\tbVerbose:bool = False,\n\t\t):\n\n\t\tif args:\n\t\t\traise InvalidExtraArgumentsException()\n\n\t\t# ----\n\n\t\tappendData, extraProperties = BufferLogger._convertJSONToInternal(jsonData)\n\n\t\t# ----\n\n\t\tlogger = WithholdingLogger(extraProperties=extraProperties, mainLogger=mainLogger, bVerbose=bVerbose)\n\n\t\tif appendData is not None:\n\t\t\tlogger._logiAll(appendData, True)\n\n\t\treturn logger\n\t#\n\n#\n\n\n\n\n\n\n\n","sub_path":"src/jk_logging/WithholdingLogger.py","file_name":"WithholdingLogger.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"247477969","text":"import datetime\n\nfrom admin_actions.admin import ActionsModelAdmin\nfrom django.conf import settings\nfrom django.contrib import admin, messages\nfrom django.contrib.admin import TabularInline\nfrom django.contrib.admin.options import IncorrectLookupParameters\nfrom django.contrib.humanize.templatetags.humanize import naturaltime\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Sum, Max, Case, When, DateTimeField, Value\nfrom django.shortcuts import redirect\nfrom django.template.defaultfilters import date\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\nfrom django.utils.timezone import localtime\nfrom django.utils.translation import gettext_lazy as _\n\nfrom whoweb.core.admin import EventTabularInline\nfrom whoweb.search.events import ENQUEUED_FROM_ADMIN\nfrom whoweb.search.models import SearchExport, ScrollSearch, FilterValueList\nfrom whoweb.search.models.export import SearchExportPage\n\nepoch = datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=timezone.get_default_timezone())\n\n\nclass SearchExportPageInline(TabularInline):\n model = SearchExportPage\n fields = (\n \"export_link\",\n \"status\",\n \"pending_count\",\n \"progress_counter\",\n \"final_count\",\n \"created\",\n \"modified\",\n )\n readonly_fields = fields\n extra = 0\n\n def get_queryset(self, request):\n return super().get_queryset(request).defer(\"data\")\n\n def has_add_permission(self, request, obj=None):\n return False\n\n def final_count(self, obj):\n return obj.count\n\n @mark_safe\n def export_link(self, obj: SearchExportPage):\n link = reverse(\"admin:search_searchexportpage_change\", args=[obj.pk])\n return 'Page %s' % (link, obj.page_num)\n\n export_link.short_description = \"Page num\"\n\n\n@admin.register(SearchExportPage)\nclass SearchExportPageAdmin(ActionsModelAdmin):\n list_display = (\n \"pk\",\n \"export\",\n \"page_num\",\n \"created\",\n \"modified\",\n \"status\",\n \"count\",\n )\n list_display_links = (\"export\",)\n list_filter = (\"status\", \"created\")\n search_fields = (\"export__uuid\", \"export__pk\")\n fields = (\n \"export_link\",\n \"page_num\",\n \"created\",\n \"modified\",\n \"status\",\n \"count\",\n \"pending_count\",\n \"progress_counter\",\n )\n readonly_fields = fields\n\n def get_queryset(self, request):\n return super().get_queryset(request).defer(\"data\")\n\n @mark_safe\n def export_link(self, obj: SearchExportPage):\n link = reverse(\"admin:search_searchexport_change\", args=[obj.export_id])\n return '%s' % (link, obj.export)\n\n export_link.short_description = \"Export\"\n\n\nclass LatestPageModificationFilter(admin.SimpleListFilter):\n title = \"Latest Page Update Time\"\n parameter_name = \"_latest_page_modified\"\n\n def __init__(self, request, params, model, model_admin):\n self.field_generic = \"%s__\" % self.parameter_name\n\n self.date_params = {\n k: v for k, v in params.items() if k.startswith(self.field_generic)\n }\n\n now = timezone.now()\n # When time zone support is enabled, convert \"now\" to the user's time\n # zone so Django's definition of \"Today\" matches what the user expects.\n if timezone.is_aware(now):\n now = timezone.localtime(now)\n today = now.replace(hour=0, minute=0, second=0, microsecond=0)\n\n self.lookup_kwarg_since = \"%s__gte\" % self.parameter_name\n self.lookup_kwarg_until = \"%s__lt\" % self.parameter_name\n self.links = (\n (_(\"Any date\"), {}),\n (\n _(\"Today\"),\n {\n self.lookup_kwarg_since: str(today),\n self.lookup_kwarg_until: str(today + datetime.timedelta(days=1)),\n },\n ),\n (\n _(\"Yesterday\"),\n {\n self.lookup_kwarg_since: str(today - datetime.timedelta(days=1)),\n self.lookup_kwarg_until: str(today),\n },\n ),\n (\n _(\"1-2 days ago\"),\n {\n self.lookup_kwarg_since: str(today - datetime.timedelta(days=2)),\n self.lookup_kwarg_until: str(today - datetime.timedelta(days=1)),\n },\n ),\n (\n _(\"2-3 days ago\"),\n {\n self.lookup_kwarg_since: str(today - datetime.timedelta(days=3)),\n self.lookup_kwarg_until: str(today - datetime.timedelta(days=2)),\n },\n ),\n (\n _(\"More than 7 days ago\"),\n {self.lookup_kwarg_until: str(today - datetime.timedelta(days=7)),},\n ),\n )\n super().__init__(request, params, model, model_admin)\n\n def lookups(self, request, model_admin):\n return self.links\n\n def choices(self, changelist):\n for title, param_dict in self.links:\n yield {\n \"selected\": self.date_params == param_dict,\n \"query_string\": changelist.get_query_string(\n param_dict, [self.field_generic]\n ),\n \"display\": title,\n }\n\n def expected_parameters(self):\n params = [self.lookup_kwarg_since, self.lookup_kwarg_until]\n return params\n\n def queryset(self, request, queryset):\n try:\n return queryset.filter(**self.used_parameters)\n except (ValueError, ValidationError) as e:\n # Fields may raise a ValueError or ValidationError when converting\n # the parameters to the correct type.\n raise IncorrectLookupParameters(e)\n\n\n@admin.register(SearchExport)\nclass ExportAdmin(ActionsModelAdmin):\n list_display = (\n \"pk\",\n \"uuid\",\n \"billing_seat\",\n \"status\",\n \"rows_enqueued\",\n \"latest_page_modified\",\n \"progress_counter\",\n \"target\",\n \"rows_uploaded\",\n \"should_derive_email\",\n )\n list_display_links = (\n \"pk\",\n \"uuid\",\n )\n list_per_page = 10\n list_filter = (\n \"status\",\n \"charge\",\n \"created\",\n \"modified\",\n LatestPageModificationFilter,\n )\n search_fields = (\n \"seat__user__email\",\n \"seat__user__username\",\n \"billing_seat__user__email\",\n \"billing_seat__user__username\",\n \"pk\",\n \"uuid\",\n )\n fieldsets = (\n (None, {\"fields\": (\"uuid\", \"billing_seat\", \"query\", \"scroller\",)}),\n (\n \"StatusOptions Fields\",\n {\n \"classes\": (),\n \"fields\": (\n (\"status\", \"status_changed\",),\n (\"progress_counter\", \"target\", \"rows_uploaded\"),\n \"queue_priority\",\n \"rows_enqueued\",\n \"working_count\",\n \"latest_page_modified\",\n (\"sent\", \"sent_at\",),\n \"validation_list_id\",\n ),\n },\n ),\n (\n \"Behavior Fields\",\n {\n \"classes\": (\"collapse\",),\n \"fields\": ((\"charge\", \"notify\", \"on_trial\"), \"column_names\",),\n },\n ),\n )\n readonly_fields = (\n \"sent\",\n \"sent_at\",\n \"working_count\",\n \"rows_enqueued\",\n \"rows_uploaded\",\n \"queue_priority\",\n \"latest_page_modified\",\n \"status_changed\",\n \"scroller\",\n \"column_names\",\n )\n inlines = [EventTabularInline, SearchExportPageInline]\n actions_row = (\"download\", \"download_json\")\n actions_detail = (\"run_publication_tasks\", \"download\", \"download_json\")\n actions = (\"store_validation_results\", \"compute_rows_uploaded\")\n\n def get_queryset(self, request):\n return (\n super()\n .get_queryset(request)\n .annotate(\n _working_count=Sum(\"pages__progress_counter\"),\n _rows_enqueued=Sum(\"pages__pending_count\"),\n _page_modified=Max(\"pages__modified\"),\n )\n .annotate(\n _latest_page_modified=Case(\n When(_page_modified__isnull=False, then=\"_page_modified\"),\n default=Value(epoch), # sorts null to bottom\n output_field=DateTimeField(),\n )\n )\n )\n\n def working_count(self, obj):\n return obj._working_count\n\n working_count.short_description = \"Working Rows\"\n\n def rows_enqueued(self, obj):\n return obj._rows_enqueued\n\n rows_enqueued.admin_order_field = \"_rows_enqueued\"\n\n def latest_page_modified(self, obj):\n return (\n \"n/a\"\n if obj._latest_page_modified == epoch\n else \"{} ({})\".format(\n date(localtime(obj._latest_page_modified), settings.DATETIME_FORMAT),\n naturaltime(obj._latest_page_modified),\n )\n )\n\n latest_page_modified.admin_order_field = \"_latest_page_modified\"\n\n def column_names(self, obj):\n return \", \".join(obj.get_column_names())\n\n column_names.short_description = \"columns\"\n\n @mark_safe\n def scroller(self, obj):\n if obj.scroll:\n link = reverse(\n \"admin:search_scrollsearch_change\", args=[obj.scroll.pk]\n ) # model name has to be lowercase\n return '%s' % (link, obj.scroll.scroll_key)\n return \"None\"\n\n def download(self, request, pk):\n export = SearchExport.objects.get(pk=pk)\n return redirect(export.get_absolute_url())\n\n download.short_description = \"💾.csv\"\n\n def download_json(self, request, pk):\n export = SearchExport.objects.get(pk=pk)\n return redirect(export.get_absolute_url(\"json\"))\n\n download_json.short_description = \"💾.json\"\n\n def run_publication_tasks(self, request, pk):\n export = SearchExport.available_objects.get(pk=pk)\n sigs = export.processing_signatures()\n res = sigs.apply_async()\n self.message_user(\n request,\n f\"{export} successfully published. (Did not reset credits, flags, status, or counters first).\",\n level=messages.SUCCESS,\n )\n self.message_user(request, f\"Tasks run: {sigs}\", level=messages.INFO)\n self.message_user(request, f\"Result ID: {res}\", level=messages.INFO)\n\n export.log_event(\n evt=ENQUEUED_FROM_ADMIN, signatures=str(sigs), async_result=str(res)\n )\n return redirect(reverse(\"admin:search_searchexport_change\", args=[pk]))\n\n run_publication_tasks.short_description = \"Rerun\"\n\n def store_validation_results(self, request, queryset):\n for export in queryset:\n results = export.get_validation_results(only_valid=True)\n self.message_user(\n request, f\"Downloaded validation for {export}.\", level=messages.SUCCESS\n )\n export.apply_validation_to_profiles_in_pages(validation=results)\n self.message_user(\n request,\n f\"Updated profiles in page data of {export} with validation results.\",\n level=messages.SUCCESS,\n )\n\n def compute_rows_uploaded(self, request, queryset):\n for export in queryset:\n if export.status != SearchExport.ExportStatusOptions.COMPLETE:\n self.message_user(\n request,\n f\"Could not set row count of {export}, export is not complete.\",\n level=messages.WARNING,\n )\n continue\n if export.rows_uploaded > 0:\n self.message_user(\n request, f\"{export} row count already set.\", level=messages.INFO,\n )\n continue\n row_count = 0\n for _ in export.generate_csv_rows():\n row_count += 1\n export.rows_uploaded = row_count\n export.save()\n self.message_user(\n request, f\"Set row count of {export}.\", level=messages.SUCCESS,\n )\n\n\n@admin.register(ScrollSearch)\nclass ScrollSearchAdmin(ActionsModelAdmin):\n fields = (\n \"scroll_key\",\n \"scroll_key_modified\",\n \"page_size\",\n \"query_hash\",\n \"total\",\n \"query_serialized\",\n )\n readonly_fields = fields\n\n def has_add_permission(self, request, obj=None):\n return False\n\n def query_serialized(self, obj):\n return obj.query.serialize()\n\n query_serialized.short_description = \"query\"\n\n\n@admin.register(FilterValueList)\nclass FilterValueListAdmin(ActionsModelAdmin):\n fields = (\n \"name\",\n \"description\",\n \"type\",\n \"tags\",\n \"values\",\n \"billing_seat\",\n )\n","sub_path":"whoweb/search/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":12985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"613141633","text":"from __future__ import division\nimport numpy as np\nfrom scipy.odr import Model, RealData, ODR\nimport scipy.stats as stats\n\ndef R_Rq(path, tolerancia):\n \"\"\"\n Calcula la resistencia de quenching y la temperatura (escrita en funcion de la\n resistencia que mide T) de un conjunto de mediciones, haciendo estadistica sobre los\n datos. \n Es decir, si mido muchas curvas IV para una dada T, guardo las mediciones en una carpeta\n que contenga: una carpeta '/iv/' que contenga las curvas '/i (iv).txt' y otra carpeta\n '/res/' que contenga las resistencias '/i (res).txt'.\n Esta funcion va a calcular la Rq y la T para cada par (iv, res), y luego va a realizar\n un promedio pesado sobre las Rq y las T, devolviendo dichos parametros con sus errores.\n La funcion se va a encargar de filtrar aquellas mediciones en que la temperatura\n fluctuo mas que la tolerancia deseada, utilizando al funcion pulidor(). La tolerancia\n tipica es de 0.025 para mediciones estacionarias en T.\n El path de la funcion debe ser la carpeta donde se encuentren las carpetas iv y res.\n\n La funcion asume que la temperatura se midio con una RTD, sourceando corriente y\n midiendo voltaje.\n \n Input: (path, tolerancia) [string, float] \n \n Returns: (R, R_err, Rq, Rq_err, chi2_out, array) [float, float, float, float, float, list]\n .\n .\n \"\"\"\n array = pulidor(tolerancia, path)\n celdas = 18980\n R = []\n R_err = []\n Rq = []\n Rq_err = []\n chi2_out = []\n for i in array:\n data1 = np.loadtxt(path+'/res/%s (res).txt' % i, skiprows=1)\n Res = data1[:, 1] \n I = data1[:, 2]\n V = I*Res\n V_err = error_V(V, source = False)\n I_err = error_I(I, source = True)\n Res_err_estadistico = dispersion(Res) \n Res_err_sistematico = [np.sqrt((1/I[i]**2) * V_err[i]**2 + ((V[i]/(I[i]**2))**2)*I_err[i]**2) for i in range(len(I))]\n Res_err = [np.sqrt(Res_err_estadistico**2 + Res_err_sistematico[i]**2) for i in range(len(Res_err_sistematico))]\n R.append(weightedMean(Res, Res_err))\n R_err.append(weightedError(Res, Res_err))\n data2 = np.loadtxt(path+'/iv/%s (iv).txt' % i)\n V = data2[:, 0]\n I = data2[:, 1]\n dI = error_I(I)\n dV = error_V(V)\n\n chi2 = []\n Rq_err_temp = []\n m = []\n for j in range(0, len(V) - 2):\n V_temp = V[j:]\n I_temp = I[j:]\n dI_temp = dI[j:]\n dV_temp = dV[j:]\n\n linear_model = Model(Linear)\n data = RealData(V_temp, I_temp, sx=dV_temp, sy=dI_temp)\n odr = ODR(data, linear_model, beta0=[0., 1.])\n out = odr.run()\n \n m_temp = out.beta[0]\n #b_temp = out.beta[1]\n m_err_temp = out.sd_beta[0]\n \n m.append(m_temp)\n chi2.append(out.res_var)\n Rq_err_temp.append((celdas/m_temp**2) * m_err_temp)\n index = ClosestToOne(chi2)\n Rq.append(celdas/m[index])\n chi2_out.append(chi2[index])\n Rq_err.append(Rq_err_temp[index])\n return R, R_err, Rq, Rq_err, chi2_out, array\n\n\ndef error_I(y, SMU, source = False):\n \"\"\"\n Esta funcion esta diseniada para crear un array con los errores de la corriente \n medida o sourceada por un Kiethley 2611B, 2612B, 2614B.\n La funcion toma una lista que tiene la corriente, y un boolean que indica si la \n corriente fue medida o sourceada.\n \n Input: (I, source = False)\n \n Si no se especifica el source, entonc\n I_led_temp_1 = I_led[1:int(len(I_led)/2)]es la corriente fue medida. Si source = True,\n entonces se sourceo con corriente.\n \n Returns: I_err (list)\n .\n .\n \"\"\"\n if SMU == '2612':\n I_temp= y\n temp = []\n percentage = 0\n offset = 0\n if source == True:\n for i in range(0, len(I_temp)):\n if I_temp[i] <= 100*pow(10, -9):\n percentage = 0.0006\n offset = 100*pow(10, -12)\n elif 100*pow(10, -9) < I_temp[i] and I_temp[i] <= 1*pow(10, -6):\n percentage = 0.0003\n offset = 800*pow(10, -12) \n elif 1*pow(10, -6) 0.03\n \n Input: (tolerancia, path)\n \n Returns: list\n .\n .\n \"\"\"\n filtro = []\n i = 0\n while True:\n try:\n i = i+1\n data = np.loadtxt(path + '/res/%s (res).txt' % i, skiprows=1)\n Res = data[:, 1]\n if dispersion(Res) <= tolerancia:\n filtro.append(i)\n except IOError:\n break\n return filtro\n \ndef ClosestToOne(v):\n \"\"\"\n Esta funcion toma una lista, y devuelve el indice del elemento mas cercano a 1 de \n la lista.\n \n Input: list\n \n Returns: int (index)\n .\n .\n \"\"\"\n compliance = []\n for j in range(0, len(v)):\n compliance.append(abs(v[j] - 1))\n return compliance.index(np.min(compliance))\n\ndef promediar_puntos(eje_x, eje_y, p):\n \"\"\"\n Promediador de puntos para un grafico. Toma un eje X y un eje Y, y promedia cada p\n puntos. Sirve para smoothear mediciones muy ruidosas y densas.\n \n Input: (X, Y, p) [list, list, int]\n\n Returns: (X_promediado, Y_promediado) [list, list]\n .\n .\n \"\"\"\n array_promediado = []\n array_tiempo_promediado = []\n array1 = eje_y\n array2 = eje_x\n for j in range(int(len(array1)/p)):\n total = 0 \n total_tiempo = 0\n for i in range(p):\n total += array1[j * p + i] / float(p)\n total_tiempo += array2[j * p + i] / float(p)\n array_promediado.append(total)\n array_tiempo_promediado.append(total_tiempo)\n return array_promediado, array_tiempo_promediado\n\n\ndef DerivateData(V, V_err, I, I_err):\n \"\"\"\n Derivador numerico de datos.\n \n Input: (X, X_err, Y, Y_err) lists\n \n Returns: (dX, dX_err, dY, dY_err) lists\n \"\"\"\n V_temp = []\n I_temp = []\n V_err_temp = []\n I_err_temp = []\n step = V[2] - V[1]\n for i in range(len(I)-1):\n I_temp.append((I[i + 1] - I[i - 1])/(2*step))\n V_temp.append(V[i])\n I_err_temp.append(((I_err[i + 1] - I_err[i - 1])/(2*step))**2)\n V_err_temp.append(V_err[i])\n return V_temp, V_err_temp, I_temp, I_err_temp\n\n\ndef Smooth(V, V_err, I, I_err, degree):\n V_temp = []\n I_temp = []\n V_err_temp = []\n I_err_temp = []\n threshold = 0\n for i in range(len(I)-1):\n if abs(I[i+1]-I[i])> threshold:\n threshold = I[i] \n for i in range(len(I)-1):\n if not abs(I[i + 1] - I[i]) > (threshold/2) or abs(I[i] - I[i - 1]) > (threshold / 2):\n V_temp.append(V[i])\n I_temp.append(I[i])\n V_err_temp.append(V_err[i])\n I_err_temp.append(I_err[i])\n return V, V_err, I, I_err\n\ndef Linear(M, x):\n \"\"\"\n Funcion lineal para ajustar con el ODR:\n \n >>> linear_model = Model(Linear)\n >>> data = RealData(X, Y, sx=X_err, sy=Y_err)\n >>> odr = ODR(data, linear_model, beta0=[0., 1.])\n >>> out = odr.run()\n \n >>> m = out.beta[0]\n >>> b = out.beta[1]\n >>> m_err = out.sd_beta[0]\n >>> b_err = out.sd_beta[1] \n >>> chi2 = out.res_var\n .\n .\n \"\"\"\n m, b = M\n return m*x + b\n\ndef weightedMean(measurements, weights):\n \"\"\"\n Devuelve el promedio pesado de una muestra con sus respectivos errores.\n \n Input: (X, X_err) lists\n \n Returns: float\n .\n .\n \"\"\"\n wTotal = np.mean([1/i**2 for i in weights])\n mwTotal = 0\n mean = 0 \n# for i in range(0, len(weights)):\n# wTotal += (1 / weights[i]**2)\n for i in range(0, len(measurements)):\n mwTotal += measurements[i]*(1/weights[i]**2)\n mean = mwTotal / wTotal \n return mean\n\ndef weightedError(measurements, weights):\n \"\"\"\n A chequear\n \"\"\"\n wTotal = 0\n weights = np.asarray(weights)\n for i in range(0, len(weights)):\n wTotal += 1 / weights[i]**2\n return (1/wTotal)\n\ndef weightedErrorR(measurements, weights):\n \"\"\"\n A chequear\n \"\"\"\n wTotal = 0\n for i in range(0, len(weights)):\n wTotal += 1 / weights[i]**2\n Rmean = weightedMean(measurements, weights)\n I = 0.0001\n V = I*Rmean\n V_err = V*0.00015 + 0.000225\n I_err = I*0.0003 + 0.00000006\n return np.sqrt(1/wTotal**2 + ((1/I)**2 * V_err**2 + (V/(I**2))**2 * I_err**2))\n\n\ndef ks_iterative(x, y, x_err, y_err, Foward = True):\n \"\"\"\n Esta funcion agarra un eje X (k), un eje Y (k_nn) y busca los parametros para\n ajustar la mejor recta, buscando el regimen lineal de la curva. Esto lo hace\n sacando puntos de la curva, ajustando la curva resultante, y luego comparando \n los parametros de los distintos ajustes con el metodo de Kolmogorov Smirnoff.\n \n Si Foward=True entonces la funcion va a ir sacando puntos del final para\n encontrar kmax. Si Foward=False, la funcion va a sacar puntos del principio para\n calcular kmin. El punto va a estar dado por k[index].\n \n Returns: m, b, ks_stat, index\n \n m: pendiente de la recta resultante\n b: ordenada de la recta resultante\n ks_stat: estadistico de KS de la recta resultante\n index: indice del elemento donde empieza/termina el regimen lineal.\n .\n .\n \"\"\"\n KS_list = []\n pvalue_list = []\n m_list = []\n b_list = []\n m_err_list = []\n b_err_list = []\n if Foward==True:\n for j in range(0, len(x)-3):\n y_temp = y[:len(y)-j]\n x_temp = x[:len(x)-j]\n x_err_temp = x_err[:len(x_err)-j]\n y_err_temp = y_err[:len(y_err)-j]\n linear_model = Model(Linear)\n data = RealData(x_temp, y_temp, sx=x_err_temp, sy=y_err_temp)\n odr = ODR(data, linear_model, beta0=[0., 1.])\n out = odr.run()\n modelo = [j*out.beta[0]+out.beta[1] for j in x_temp]\n KS_list.append(stats.ks_2samp(y_temp, modelo)[0])\n pvalue_list.append(stats.ks_2samp(y_temp, modelo)[1])\n m_list.append(out.beta[0])\n b_list.append(out.beta[1])\n m_err_list.append(out.sd_beta[0])\n b_err_list.append(out.sd_beta[1])\n else:\n for j in range(0, len(x)-3):\n y_temp = y[:len(y)-j]\n x_temp = x[:len(x)-j]\n x_err_temp = x_err[:len(x_err)-j]\n y_err_temp = y_err[:len(y_err)-j]\n linear_model = Model(Linear)\n data = RealData(x_temp, y_temp, sx=x_err_temp, sy=y_err_temp)\n odr = ODR(data, linear_model, beta0=[0., 1.])\n out = odr.run()\n modelo = [j*out.beta[0]+out.beta[1] for j in x_temp]\n KS_list.append(stats.ks_2samp(y_temp, modelo)[0])\n pvalue_list.append(stats.ks_2samp(y_temp, modelo)[1])\n m_list.append(out.beta[0])\n b_list.append(out.beta[1])\n m_err_list.append(out.sd_beta[0])\n b_err_list.append(out.sd_beta[1])\n index = KS_list.index(min(KS_list))\n m = m_list[index]\n b = b_list[index]\n m_err = m_err_list[index]\n b_err = b_err_list[index]\n ks_stat = KS_list[index]\n \n return m, b, m_err, b_err, ks_stat, index\n\ndef promediador_grupos(path, tolerancia, punto_a_punto = True):\n \"\"\"\n Esta funcion toma una carpeta con mediciones de curvas IV a una dada temperatura,\n y promedia punto a punto las curvas.\n Por otro lado, esta la opcion de promediar punto a punto las resistencias medidas\n o promediar la resistencia promedio de todas las curvas.\n La funcion se va a encargar de filtrar aquellas mediciones en que la temperatura\n fluctuo mas que la tolerancia deseada, utilizando al funcion pulidor(). La tolerancia\n tipica es de 0.025 para mediciones estacionarias en T.\n El path de la funcion debe ser la carpeta donde se encuentren las carpetas iv y res.\n\n La funcion asume que la temperatura se midio con una RTD, sourceando corriente y\n midiendo voltaje.\n \n Input: (path, tolerancia) [string, float] \n \n Returns: (V_promedio, I_promedio, R_promedio, R_err_promedio) lists\n .\n .\n \"\"\"\n array = pulidor(tolerancia, path)\n R = []\n R_err = []\n I = []\n V = []\n if punto_a_punto == True:\n for h in array:\n data = np.loadtxt(path+'/res/%s (res).txt' % h, skiprows=1) \n Res = data[:, 1] \n I = data[:, 2]\n V = I*Res\n V_err = error_V(V, source = False)\n I_err = error_I(I, source = True)\n Res_err_estadistico = dispersion(Res) \n Res_err_sistematico = np.sqrt((1/I**2) * V_err**2 + ((V/(I**2))**2)*I_err**2)\n Res_err = np.sqrt(Res_err_estadistico**2 + Res_err_sistematico**2) \n R.append(list(Res))\n R_err.append(list(Res_err))\n R_promedio = []\n for i in range(len(R[1])):\n c = 0\n for j in range(len(R)):\n c += R[j][i] \n R_promedio.append(c/len(R))\n R_err_promedio = []\n for i in range(len(R_err[1])):\n c = 0\n for j in range(len(R_err)):\n c += R_err[j][i] \n R_err_promedio.append(c/len(R_err))\n else:\n for h in array:\n data = np.loadtxt(path+'/res/%s (res).txt' % h, skiprows=1)\n Res = data[:, 1] \n I = data[:, 2]\n V = I*Res\n V_err = error_V(V, source = False)\n I_err = error_I(I, source = True)\n Res_err_estadistico = dispersion(Res) \n Res_err_sistematico = np.sqrt((1/I**2) * V_err**2 + ((V/(I**2))**2)*I_err**2)\n Res_err = np.sqrt(Res_err_estadistico**2 + Res_err_sistematico**2)\n R.append(weightedMean(Res, Res_err))\n R_err.append(weightedError(Res, Res_err))\n data2 = np.loadtxt(path+'/iv/%s (iv).txt' % h, skiprows=1)\n V.append(list(data2[:, 0]))\n I.append(list(data2[:, 1]))\n R_promedio = weightedMean(R, R_err)\n R_err_promedio = weightedError(R, R_err)\n \n I_promedio = []\n for i in range(len(I[1])):\n c = 0\n for j in range(len(I)):\n c += I[j][i] \n I_promedio.append(c/len(I))\n \n V_promedio = [] \n for i in range(len(V[1])):\n c = 0\n for j in range(len(V)):\n c += V[j][i]\n V_promedio.append(c/len(V))\n \n return V_promedio, I_promedio, R_promedio, R_err_promedio\n\n\ndef error_R(R, I_source):\n V = [i*I_source for i in R]\n V_err = error_V(V, '2612', source = False)\n I_err = I_source * 0.0003 + 60*pow(10, -9)\n dr = [np.sqrt((1 / I_source)**2 * V_err[i]**2 + \n (V[i] / I_source**2)**2 * I_err**2) \n for i in range(len(R))]\n return dr","sub_path":"Codigos/Funciones.py","file_name":"Funciones.py","file_ext":"py","file_size_in_byte":25007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"421957815","text":"#-*-coding:utf-8-*-\n#@作者:haiyu.ma\n#@创建日期:2020-07-05 23:26 \nimport yaml\n\n# 1.打开yaml文件\nfs = open(\"demo.yaml\")\n# 2.转换成python对象\n# yaml.load(fs)\nres = yaml.load(fs, Loader=yaml.FullLoader)\nprint(res)","sub_path":"AppAutomation/AppFramework/Desired_Caps/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"51906397","text":"from main import db\n\nfrom sqlalchemy.ext.declarative import declarative_base\n\nBase = declarative_base()\n\ncoauthors = db.Table('coauthor',\n db.Column('author_id', db.Integer,\n db.ForeignKey('author.id')),\n db.Column('coauthor_id', db.Integer,\n db.ForeignKey('author.id'))\n)\n\nauthor_publications = db.Table('author_publication',\n db.Column('author_id', db.Integer,\n db.ForeignKey('author.id')),\n db.Column('publication_id', db.Integer,\n db.ForeignKey('publication.id'))\n)\n\nclass Author(db.Model):\n \"\"\"\n A class that represents authors.\n \"\"\"\n\n __tablename__ = 'author'\n \"\"\"\n The name of the table where authors are stored.\n \"\"\"\n\n id = db.Column(db.Integer, primary_key = True, autoincrement = True)\n \"\"\"\n The ID of the author.\n \"\"\"\n\n name = db.Column(db.String(256), nullable = False)\n \"\"\"\n The name of the author.\n \"\"\"\n\n title = db.Column(db.String(256), nullable = True)\n \"\"\"\n The title (e.g. associate professor) of the author.\n \"\"\"\n\n organization_id = db.Column(db.Integer, db.ForeignKey('organization.id'), nullable = True)\n \"\"\"\n The ID of the organization where the author belongs.\n \"\"\"\n\n organization = db.relation('Organization', foreign_keys=[organization_id], backref = \"authors\")\n \"\"\"\n The organization where the author belongs.\n \"\"\"\n\n year_of_phd = db.Column(db.Integer, nullable = True)\n \"\"\"\n The year when the author received his/her Ph.D.\n \"\"\"\n\n tenured = db.Column(db.Boolean, nullable = True)\n \"\"\"\n Whether the author is tenured.\n \"\"\"\n\n scholar_id = db.Column(db.String(64), nullable = True, unique = True)\n \"\"\"\n The ID of the author in Google Scholar.\n \"\"\"\n\n website_url = db.Column(db.String(256), nullable = True)\n \"\"\"\n The URL of the website of the author.\n \"\"\"\n\n email_domain = db.Column(db.String(256), nullable = True)\n \"\"\"\n The domain of the email of the author.\n \"\"\"\n\n total_citations = db.Column(db.Integer, nullable = True)\n \"\"\"\n The total citations for the author.\n \"\"\"\n\n h_index = db.Column(db.Numeric(precision = 10, scale = 2), nullable = True)\n \"\"\"\n The value of the h-index metric for the author.\n \"\"\"\n\n i10_index = db.Column(db.Numeric(precision = 10, scale = 2), nullable = True)\n \"\"\"\n The value of the i10-index metric for the author.\n \"\"\"\n\n retrieved_at = db.Column(db.DateTime, nullable = True)\n \"\"\"\n The date and time when information about the author was last retrieved \n from Google Scholar.\n \"\"\"\n\n coauthors = db.relationship(\"Author\", secondary = coauthors,\n primaryjoin = id == coauthors.c.author_id,\n secondaryjoin = id == coauthors.c.coauthor_id)\n \"\"\"\n The co-authors of the author.\n \"\"\"\n\n publications = db.relationship(\"Publication\",\n secondary = author_publications,\n backref = \"authors\")\n \"\"\"\n The publications of the author.\n \"\"\"\n\n citations_per_year = db.relationship(\"AuthorCitationsPerYear\",\n cascade=\"all, delete-orphan\")\n \"\"\"\n The citations per year for the author.\n \"\"\"\n\n def organization_tree(self):\n \"\"\"\n Gets the names of the organization, where the author belongs, and all\n its ancestors (starting from the root of the family tree) separated with\n ' :: '.\n \"\"\"\n if not self.organization:\n return ''\n organizations = [ self.organization ]\n organizations.extend(self.organization.ancestors())\n return ' :: '.join([ a.name for a in reversed(organizations) ])\n\n def organization_ids(self):\n \"\"\"\n Gets the ID's of the organization, where the author belongs, and all\n its ancestors (starting from the root of the family tree).\n \"\"\"\n if not self.organization:\n return []\n organizations = [ self.organization ]\n organizations.extend(self.organization.ancestors())\n return [ a.id for a in reversed(organizations) ]\n\nclass AuthorCitationsPerYear(db.Model):\n \"\"\"\n A class that represents the citations for authors per year.\n \"\"\"\n\n __tablename__ = 'author_citations_per_year'\n \"\"\"\n The name of the table where citations per year are stored.\n \"\"\"\n\n author_id = db.Column(db.Integer, db.ForeignKey('author.id'), primary_key = True)\n \"\"\"\n The ID of the author.\n \"\"\"\n\n author = db.relation('Author')\n \"\"\"\n The author.\n \"\"\"\n\n year = db.Column(db.Integer, primary_key = True)\n \"\"\"\n The year.\n \"\"\"\n\n citations = db.Column(db.Integer, nullable = False)\n \"\"\"\n The citations for the author in the year.\n \"\"\"\n\nclass Organization(db.Model):\n \"\"\"\n A class that represents organizations (e.g. universities, schools,\n departments).\n \"\"\"\n\n __tablename__ = 'organization'\n \"\"\"\n The name of the table where organizations are stored.\n \"\"\"\n\n id = db.Column(db.Integer, primary_key = True, autoincrement = True)\n \"\"\"\n The ID of the organization.\n \"\"\"\n\n name = db.Column(db.String(256), nullable = False)\n \"\"\"\n The name of the organization.\n \"\"\"\n\n parent_id = db.Column(db.Integer, db.ForeignKey('organization.id'), nullable = True)\n \"\"\"\n The ID of the parent organization.\n \"\"\"\n\n parent = db.relation('Organization', remote_side = [id], backref = \"children\")\n \"\"\"\n The parent organization.\n \"\"\"\n\n location = db.Column(db.String(256), nullable = True)\n \"\"\"\n The location of the organization.\n \"\"\"\n\n website_url = db.Column(db.String(256), nullable = True)\n \"\"\"\n The URL of the website of the organization.\n \"\"\"\n\n children_source_url = db.Column(db.String(256), nullable = True)\n \"\"\"\n The URL where the children of the organization can be retrieved from.\n \"\"\"\n\n def children_ids(self):\n \"\"\"\n Gets the ID's of the children of the organization.\n \"\"\"\n return [ c.id for c in self.children ]\n\n def ancestors(self):\n \"\"\"\n Gets the ancestors of the organization (starting from its parent and\n ending at the root of the family tree).\n \"\"\"\n if self.parent is None:\n return []\n l = [ self.parent ]\n l.extend(self.parent.ancestors())\n return l\n\n def ancestor_ids(self):\n \"\"\"\n Gets the ID's of the ancestors of the organization (starting from its\n parent and ending at the root of the family tree).\n \"\"\"\n return [ a.id for a in self.ancestors() ]\n\n def ancestor_tree(self):\n \"\"\"\n Gets the names of the ancestors of the organization (starting from the\n root of the family tree) separated with ' :: '.\n \"\"\"\n ancestors = self.ancestors()\n if not ancestors:\n return None\n return ' :: '.join([ a.name for a in reversed(ancestors) ])\n\n def descendants(self):\n \"\"\"\n Gets the descendants of the organization (starting from its children and\n ending at the leaves of the family tree).\n \"\"\"\n if not self.children:\n return []\n l = []\n for c in self.children:\n l.append(c)\n l.extend(c.descendants())\n return l\n\n def descendant_tree(self):\n \"\"\"\n Gets the descendants of the organization as a tree (starting from the\n children).\n \"\"\"\n descendants = []\n for c in self.children:\n descendants.append({ 'id': c.id, 'name': c.name, 'children': c.descendant_tree(), 'number_of_authors': c.number_of_authors() })\n return descendants\n\n def descendant_ids(self):\n \"\"\"\n Gets the ID's of the descendants of the organization (starting from its children and\n ending at the leaves of the family tree).\n \"\"\"\n return [ d.id for d in self.descendants() ]\n\n def number_of_authors(self):\n \"\"\"\n Gets the number of the authors that belong to the organization.\n \"\"\"\n return len(self.authors);\n\nclass Publication(db.Model):\n \"\"\"\n A class that represents publications.\n \"\"\"\n\n __tablename__ = 'publication'\n \"\"\"\n The name of the table where publications are stored.\n \"\"\"\n\n id = db.Column(db.Integer, primary_key = True, autoincrement = True)\n \"\"\"\n The ID of the publication.\n \"\"\"\n\n type = db.Column(db.String(16), nullable = True)\n \"\"\"\n The type of the publication.\n \"\"\"\n\n title = db.Column(db.String(512), nullable = True)\n \"\"\"\n The title of the publication.\n \"\"\"\n\n author_names = db.Column('authors', db.String(512), nullable = True)\n \"\"\"\n The names of the authors of the publication separated with commas.\n \"\"\"\n\n scholar_id = db.Column(db.String(64), nullable = True, unique = True)\n \"\"\"\n The ID of the publication in Google Scholar.\n \"\"\"\n\n year_of_publication = db.Column(db.Integer, nullable = True)\n \"\"\"\n The year when the publication was published.\n \"\"\"\n\n total_citations = db.Column(db.Integer, nullable = True)\n \"\"\"\n The total citations for the publication.\n \"\"\"\n\n retrieved_at = db.Column(db.DateTime, nullable = True)\n \"\"\"\n The date and time when information about the publication was last retrieved\n from Google Scholar.\n \"\"\"\n\n citations_per_year = db.relationship(\"PublicationCitationsPerYear\",\n cascade=\"all, delete-orphan\")\n \"\"\"\n The citations per year for the publication.\n \"\"\"\n\nclass PublicationCitationsPerYear(db.Model):\n \"\"\"\n A class that represents the citations for publications per year.\n \"\"\"\n\n __tablename__ = 'publication_citations_per_year'\n \"\"\"\n The name of the table where citations per year are stored.\n \"\"\"\n\n publication_id = db.Column(db.Integer, db.ForeignKey('publication.id'), primary_key = True)\n \"\"\"\n The ID of the publication.\n \"\"\"\n\n publication = db.relation('Publication')\n \"\"\"\n The publication.\n \"\"\"\n\n year = db.Column(db.Integer, primary_key = True)\n \"\"\"\n The year.\n \"\"\"\n\n citations = db.Column(db.Integer, nullable = False)\n \"\"\"\n The citations for the publication in the year.\n \"\"\"\n\nfrom main import app\nfrom flask.ext.login import UserMixin\nclass User(UserMixin):\n \"\"\"\n A class that represents application users.\n \"\"\"\n\n def __init__(self, username, password):\n \"\"\"\n Constructs a user with the given username and password.\n \"\"\"\n self.id = username\n self.password = password\n\n @classmethod\n def get(cls):\n \"\"\"\n Gets the only valid application user.\n \"\"\"\n return User(app.config['BASIC_AUTH_USERNAME'], app.config['BASIC_AUTH_PASSWORD'])","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"173752928","text":"\"\"\"\n * Copyright 2020, Departamento de sistemas y Computación, Universidad de Los Andes\n * \n * Contribución de:\n *\n * Cristian Camilo Castellanos\n *\n * Desarrolado para el curso ISIS1225 - Estructuras de Datos y Algoritmos\n *\n *\n * This program is free software: you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with this program. If not, see .\n \"\"\"\n\n\"\"\"\n Este módulo es una aplicación básica con un menú de opciones para cargar datos, contar elementos, y hacer búsquedas sobre una lista.\n\"\"\"\n\nimport config as cf\nimport sys\nimport csv\nfrom time import process_time \n\ndef loadCSVFile (file, lst, sep=\";\"):\n \"\"\"\n Carga un archivo csv a una lista\n Args:\n file \n Archivo de texto del cual se cargaran los datos requeridos.\n lst :: []\n Lista a la cual quedaran cargados los elementos despues de la lectura del archivo.\n sep :: str\n Separador escodigo para diferenciar a los distintos elementos dentro del archivo.\n Try:\n Intenta cargar el archivo CSV a la lista que se le pasa por parametro, si encuentra algun error\n Borra la lista e informa al usuario\n Returns: None \n \"\"\"\n del lst[:]\n print(\"Cargando archivo ....\")\n t1_start = process_time() #tiempo inicial\n dialect = csv.excel()\n dialect.delimiter=sep\n try:\n with open(file, encoding=\"utf-8\") as csvfile:\n spamreader = csv.DictReader(csvfile, dialect=dialect)\n for row in spamreader: \n lst.append(row)\n except:\n del lst[:]\n print(\"Se presento un error en la carga del archivo\")\n \n t1_stop = process_time() #tiempo final\n print(\"Tiempo de ejecución \",t1_stop-t1_start,\" segundos\")\n\ndef printMenu():\n \"\"\"\n Imprime el menu de opciones\n \"\"\"\n print(\"\\nBienvenido\")\n print(\"1- Cargar Datos Movies casting\")\n print(\"2- Cargar Datos Movies details\")\n print(\"3- Saber cuantas buenas peliculas existen de un director\")\n print(\"4- Conocer las peliculas mas/menos votadas y las mejores/peores votadas\")\n print(\"0- Salir\")\n\ndef countElementsFilteredByColumn(criteria, column, lst):\n \"\"\"\n Retorna cuantos elementos coinciden con un criterio para una columna dada \n Args:\n criteria:: str\n Critero sobre el cual se va a contar la cantidad de apariciones\n column\n Columna del arreglo sobre la cual se debe realizar el conteo\n list\n Lista en la cual se realizará el conteo, debe estar inicializada\n Return:\n counter :: int\n la cantidad de veces ue aparece un elemento con el criterio definido\n \"\"\"\n if len(lst)==0:\n print(\"La lista esta vacía\") \n return 0\n else:\n t1_start = process_time() #tiempo inicial\n counter=0 #Cantidad de repeticiones\n for element in lst:\n if criteria.lower() in element[column].lower(): #filtrar por palabra clave \n counter+=1\n t1_stop = process_time() #tiempo final\n print(\"Tiempo de ejecución \",t1_stop-t1_start,\" segundos\")\n return counter\n\ndef encontrarbuenaspeliculas(director,listacasting,listadetails):\n listaid=[]\n totalcalificacion=0\n totalpeliculas=0\n \n for i in range(1,len(listacasting)):\n if listacasting[i][\"director_name\"]==director:\n listaid.append(listacasting[i][\"id\"])\n \n for i in range(1,len(listadetails)):\n if listadetails[i][\"id\"] in listaid:\n if float(listadetails[i][\"vote_average\"])>=6.0:\n totalcalificacion=totalcalificacion+float(listadetails[i][\"vote_average\"])\n totalpeliculas=totalpeliculas+1\n \n promediocalificacion=round((totalcalificacion/totalpeliculas),2)\n \n texto=\"Su numero de peliculas buenas son: \"+str(totalpeliculas)+\". Su promedio de calificacion es: \"+str(promediocalificacion)\n \n return texto\n\ndef rankingpeliculas(listadetails,masvotadas,menosvotadas,mejoresvotadas,peoresvotadas):\n listaaverage=[]\n listacount=[]\n retorno={}\n \n for i in range(1,len(listadetails)):\n listaaverage.append(listadetails[i][\"vote_average\"])\n listacount.append(listadetails[i][\"vote_count\"])\n \n if masvotadas==1:\n nombremaxcount=[]\n listacount=sorted(listacount)\n listacount=listacount[::-1]\n listamaxcount=(listacount[0:10])\n for i in range(0,len(listamaxcount)):\n for j in range(1,len(listadetails)):\n if int(listamaxcount[i])==int(listadetails[j][\"vote_count\"]) and listadetails[j][\"title\"] not in nombremaxcount and len(nombremaxcount)<10:\n nombremaxcount.append(listadetails[j][\"title\"])\n retorno[\"mas_votadas\"]=nombremaxcount\n \n if menosvotadas==1:\n nombremincount=[]\n listacount=sorted(listacount)\n listamincount=(listacount[0:10])\n for i in range(0,len(listamaxcount)):\n for j in range(1,len(listadetails)):\n if int(listamincount[i])==int(listadetails[j][\"vote_count\"]) and listadetails[j][\"title\"] not in nombremincount and len(nombremincount)<10:\n nombremincount.append(listadetails[j][\"title\"])\n retorno[\"menos_votadas\"]=nombremincount\n \n if mejoresvotadas==1:\n nombremaxaverage=[]\n listaaverage=sorted(listaaverage)\n listaaverage=listaaverage[::-1]\n listamaxaverage=(listaaverage[0:10])\n for i in range(0,len(listamaxaverage)):\n for j in range(1,len(listadetails)):\n if float(listamaxaverage[i])==float(listadetails[j][\"vote_count\"]) and listadetails[j][\"title\"] not in nombremaxaverage and len(nombremaxaverage)<10:\n nombremaxaverage.append(listadetails[j][\"title\"])\n retorno[\"mejores_votadas\"]=nombremaxaverage\n \n if peoresvotadas==1:\n nombreminaverage=[]\n listaaverage=sorted(listaaverage)\n listaminaverage=(listaaverage[0:10])\n for i in range(0,len(listaminaverage)):\n for j in range(1,len(listadetails)):\n if float(listaminaverage[i])==float(listadetails[j][\"vote_count\"]) and listadetails[j][\"title\"] not in nombreminaverage and len(nombreminaverage)<10:\n nombreminaverage.append(listadetails[j][\"title\"])\n retorno[\"peores_votadas\"]=nombreminaverage\n \n return retorno\n\ndef main():\n \"\"\"\n Método principal del programa, se encarga de manejar todos los metodos adicionales creados\n\n Instancia una lista vacia en la cual se guardarán los datos cargados desde el archivo\n Args: None\n Return: None \n \"\"\"\n listacasting = [] #instanciar una lista vacia\n listadetails = []\n while True:\n printMenu() #imprimir el menu de opciones en consola\n inputs =input('Seleccione una opción para continuar\\n') #leer opción ingresada\n if len(inputs)>0:\n if int(inputs[0])==1: #opcion 1\n loadCSVFile(\"Data/MoviesCastingRaw-small.csv\", listacasting) #llamar funcion cargar datos\n print(\"Datos de casting cargados, \"+str(len(listacasting))+\" elementos cargados\")\n elif int(inputs[0])==2: #opcion 2\n loadCSVFile(\"Data/SmallMoviesDetailsCleaned.csv\", listadetails) #llamar funcion cargar datos\n print(\"Datos de details cargados, \"+str(len(listadetails))+\" elementos cargados\")\n elif int(inputs[0])==3: #opcion 3\n director=str(input(\"Escriba el nombre del director: \"))\n print(encontrarbuenaspeliculas(director,listacasting,listadetails))\n elif int(inputs[0])==4: #opcion 4\n masvotadas=int(input(\"Desea conocer las 10 peliculas mas votadas? 1:Si, 0:no: \"))\n menosvotadas=int(input(\"Desea conocer las 10 peliculas menos votadas? 1:Si, 0:no: \"))\n mejoresvotadas=int(input(\"Desea conocer las 10 peliculas mejores votadas? 1:Si, 0:no: \"))\n peoresvotadas=int(input(\"Desea conocer las 10 peliculas peores votadas? 1:Si, 0:no: \"))\n print(rankingpeliculas(listadetails,masvotadas,menosvotadas,mejoresvotadas,peoresvotadas))\n elif int(inputs[0])==0: #opcion 0, salir\n sys.exit(0)\n\nif __name__ == \"__main__\":\n main()","sub_path":"App/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"523223842","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nfrom leetcode.Util import TreeNode\n\nfrom leetcode import Util\n\n\nclass Solution:\n def rob1(self, root: TreeNode) -> int:\n dp = {}\n\n def helper(node):\n if node is None:\n return 0\n if node in dp:\n return dp[node]\n x1, x2, x3, x4 = 0, 0, 0, 0\n if node.left:\n x1, x2 = helper(node.left.left), helper(node.left.right)\n if node.right:\n x3, x4 = helper(node.right.left), helper(node.right.right)\n dp[node] = max(helper(node.left) + helper(node.right), x1 + x2 + x3 + x4 + node.val)\n return dp[node]\n\n return helper(root)\n\n def rob(self, root: TreeNode) -> int:\n def helper(node):\n # result is arr, arr[0] = choose node, arr[1]: not choose node\n if node is None:\n return [0, 0]\n left = helper(node.left)\n right = helper(node.right)\n return [node.val + left[1] + right[1], max(left[0], left[1]) + max(right[0], right[1])]\n\n return max(helper(root))\n\n\ns = Solution()\nprint(s.rob1(Util.createTree([3, 2, 6, None, 3, None, 1])),\n s.rob(Util.createTree([3, 2, 6, None, 3, None, 1])))\n","sub_path":"leetcode/2020/House_Robber_III.py","file_name":"House_Robber_III.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"162617098","text":"# d3, flask\n# doesn't seem to be getting videos on first page - \n\n\nimport bs4\nimport urllib2\nimport requests\nimport json\nimport math\nimport pprint\nimport pymongo\n \n\ndef custom_sigmoid(x):\n return 2.0 / (1 + math.e**(-0.08 * x)) - 1\n\n\nconn = pymongo.MongoClient()\ndb = conn.db_query\nYT_sents = db.YT_sents\n \n# What do we want to query from youtube?\nquery = \"hawaii\".replace(\" \", \"+\")\nfinal_results = { }\n\nif YT_sents.find({ '_id' : query }).count() == 0:\n\n # Retrieve the HTML search results\n search_url = \"https://www.youtube.com/results?search_sort=video_view_count&search_query=%s\" % query\n data = urllib2.urlopen(search_url)\n soup = bs4.BeautifulSoup(data)\n titles = soup.findAll('h3', attrs={'class': 'yt-lockup-title '})\n \n # Get all video IDs for these results from youtube\n video_ids = []\n for title in titles:\n link = title.findAll('a')\n if link:\n video_ids.append(link[0].attrs['href'].split(\"=\")[-1])\n \n # This will store video_id => final score\n final_results = {}\n \n # For every video result\n for video_id in video_ids:\n print(\"Retrieving html for video %s\" % video_id)\n \n # Grab the page source for video\n url = r'http://www.youtube.com/all_comments?v=%s' % video_id\n data = urllib2.urlopen(url) #example XhFtHW4YB7M\n \n # Pull out comments from html\n soup = bs4.BeautifulSoup(data)\n cmnts = soup.findAll(attrs={'class': 'comment-text-content'})\n \n print(\"Getting video comments\")\n \n # Create list of only comment text\n comments_text = [cmnt.text for cmnt in cmnts if cmnt.text]\n \n print(\"Sending over to MonkeyLearn\")\n \n if comments_text:\n # Create a request to MonkeyLearn sentiment API\n response = requests.post(\n \"https://api.monkeylearn.com/v2/classifiers/cl_qkjxv9Ly/classify/?\",\n data = json.dumps({'text_list': comments_text}),\n headers={'Authorization': 'Token 9112d82b0565d664965ecb5ab2b3b1c1ac98aa4f',\n 'Content-Type': 'application/json'})\n \n # Grab their results in json \n results = json.loads(response.text)# convert json response to python\n \n print(\"Results found, calculating score\")\n \n # What are the sentiment results?\n idx = 0\n neutral_count = 0.0\n positive_count = 0.0\n negative_count = 0.0\n for idx in range(len(results['result'])):\n label = results['result'][idx][0]['label']\n if label == 'neutral': neutral_count+=1\n elif label == 'positive': positive_count+=1\n elif label == 'negative': negative_count+=1\n #print comments_text[idx], '-----', label\n \n # Get final score for this video \n # Formula = ((pos-neg)/total) * f(total)\n total = positive_count + negative_count\n if total != 0: \n score = ((positive_count - negative_count) / total) * custom_sigmoid(total)\n else:\n score = 0.0\n else:\n score = 0.0 \n \n # Store results in dict\n final_results[video_id] = score\n \n print(\"Done\")\n\n #save data to mongo\n final_results[\"_id\"] = query\n YT_sents.insert_one(final_results)\n\nelse:\n final_results = YT_sents.find_one({ '_id' : query })\n\n\nimport operator\nsorted_final_results = sorted(final_results.items(), reverse=True,key=operator.itemgetter(1))\n\n# Print results\npprint.pprint(sorted_final_results)\n","sub_path":"youtube_sentiment_v2.py","file_name":"youtube_sentiment_v2.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"283891043","text":"# pylint: disable=invalid-name\n\"\"\"Script to import CSV data into mongodb\"\"\"\nimport time\nimport csv\nfrom pymongo import MongoClient\n\n\nclass MongoDBConnection():\n \"\"\"MongoDB Connection\"\"\"\n\n def __init__(self, host=\"127.0.0.1\", port=27017):\n self.host = host\n self.port = port\n self.connection = None\n\n def __enter__(self):\n self.connection = MongoClient(self.host, self.port)\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.connection.close()\n\n\ndef import_data(dir_name, customer_file, product_file):\n \"\"\"Import data into the database\"\"\"\n custout = import_file(f\"{dir_name}/{customer_file}\",\n \"customers\",\n \"customer_id\")\n prodout = import_file(f\"{dir_name}/{product_file}\",\n \"products\",\n \"product_id\")\n return [custout, prodout]\n\n\ndef import_file(filepath, tablename, key):\n \"\"\"Import data from a file into a table\"\"\"\n outdata = [0, 0, 0, 0.0]\n start_time = time.perf_counter()\n mongo = MongoDBConnection()\n with mongo:\n db = mongo.connection.test_database\n table = db[tablename]\n\n # get number of records in db before importing\n outdata[1] = table.count_documents({})\n\n # actually import data\n with open(filepath) as f:\n reader = csv.DictReader(f)\n for row in reader:\n table.update_one(\n {key: row[key]},\n {'$set': row},\n upsert=True\n )\n outdata[0] += 1\n\n # get number of records in db after importing\n outdata[2] = table.count_documents({})\n\n # get finish time and parse\n end_time = time.perf_counter()\n outdata[3] = end_time - start_time\n\n return tuple(outdata)\n\n\nif __name__ == \"__main__\":\n print(import_data(\"data\", \"customers.csv\", \"products.csv\"))\n","sub_path":"students/choltzman/lesson07/linear.py","file_name":"linear.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"645323050","text":"import json\nimport requests\n\n\nclass Trader:\n api_key = '4f7c537ce4b923b340a285c8c8b3431275934937'\n venue = ''\n account = ''\n api_url = 'https://api.stockfighter.io/ob/api/'\n\n auth = {'X-Starfighter-Authorization': api_key}\n\n def __init__(self, venue, account):\n self.venue = venue\n self.account = account\n print('Created Trader at ' + venue + ' with account ' + account)\n\n # {\n # 'ok': true,\n # 'symbol': 'FAC',\n # 'venue': 'OGEX',\n # 'bid': 5100, // best price currently bid for the stock\n # 'ask': 5125, // best price currently offered for the stock\n # 'bidSize': 392, // aggregate size of all orders at the best bid\n # 'askSize': 711, // aggregate size of all orders at the best ask\n # 'bidDepth': 2748, // aggregate size of *all bids*\n # 'askDepth': 2237, // aggregate size of *all asks*\n # 'last': 5125, // price of last trade\n # 'lastSize': 52, // quantity of last trade\n # 'lastTrade': '2015-07-13T05:38:17.33640392Z', // timestamp of last trade\n # 'quoteTime': '2015-07-13T05:38:17.33640392Z' // ts we last updated quote at (server-side)\n # }\n #\n # https://api.stockfighter.io/ob/api/venues/:venue/stocks/:stock/quote\n def get_quote(self, stock):\n r = requests.get(self.api_url + 'venues/' + self.venue + '/stocks/' + stock + '/quote')\n return r.json()\n\n # Set up the order\n # order = {\n # 'account' => account,\n # 'venue' => venue,\n # 'symbol' => stock,\n # 'price' => 25000, #$250.00 -- probably ludicrously high\n # 'qty' => 100,\n # 'direction' => 'buy',\n # 'orderType' => 'limit' # See the order docs for what a limit order is\n # }\n def order(self, symbol, price, qty, direction, order_type):\n order = {\n 'account': self.account,\n 'venue': self.venue,\n 'symbol': symbol,\n 'price': price, # $250.00 -- probably ludicrously high\n 'qty': qty,\n 'direction': direction,\n 'orderType': order_type # See the order docs for what a limit order is\n }\n # print(json.dumps(self.auth))\n # payload = {'headers':json.dumps(self.auth), 'body':json.dumps(order)}\n r = requests.post(self.api_url + 'venues/' + self.venue + '/stocks/' + symbol + '/orders', headers=self.auth,\n data=json.dumps(order))\n return r.json()\n","sub_path":"Trader.py","file_name":"Trader.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"239775226","text":"# -*- coding:utf-8 -*-\nfrom django.shortcuts import render\nfrom base.models import RbacRole, RbacRoleInfo\nfrom django.http import HttpResponse\nimport json\nfrom .froms import *\n\n\ndef index(request):\n roles = RbacRole.objects.values('role_id','role_name','status').order_by('status')\n return render(request,'rbac/role/index.html',locals())\n\ndef create(request):\n if request.method == 'GET':\n id = request.GET.get('id')\n role = RbacRole.objects.values('role_id','role_name','status').filter(role_id=id).first()\n\n if request.method == 'POST':\n id = request.POST.get('id')\n name = request.POST.get('name')\n status = request.POST.get('status')\n res = {}\n try:\n if id:\n role = RbacRole.objects.filter(role_id=id).update(role_name=name,status=status)\n else:\n role = RbacRole()\n role.role_name = name\n role.status = status\n role.save()\n res['msg'] = 0\n except Exception as e:\n print(e)\n res['msg'] = 1\n return render(request,'rbac/role/create.html',locals())\n\ndef delete(request):\n pass\n\ndef info(request):\n roleID = request.GET.get('id')\n #获取业态相关分类信息\n departs = getDeparts()\n #获取部类相关分类信息\n classes = getClasses()\n # 获取功能模块相关分类信息\n modules = getModules()\n roleID = request.GET.get('id')\n role = RbacRoleInfo.objects.values('depart', 'category', 'module').filter(role_id=roleID).first()\n data = {}\n\n if role:\n departs = role['depart'].replace('},', '}$')\n departList = departs.split('$')\n for depart in departList:\n depart = json.loads(depart)\n if depart['p_id'] == '11':\n data['chaoShi'] = getFormData(depart['sub'])\n elif depart['p_id'] == '12':\n data['baiHuo'] = getFormData(depart['sub'])\n elif depart['p_id'] == '13':\n data['bianLiDian'] = getFormData(depart['sub'])\n\n categories = role['category'].replace('},', '}$')\n categoryList = categories.split('$')\n for category in categoryList:\n category = json.loads(category)\n if category['p_id'] == '1':\n data['shengXian'] = getFormData(category['sub'])\n elif category['p_id'] == '2':\n data['shiPin'] = getFormData(category['sub'])\n elif category['p_id'] == '3':\n data['feiShi'] = getFormData(category['sub'])\n elif category['p_id'] == '4':\n data['jiaDian'] = getFormData(category['sub'])\n elif category['p_id'] == '6':\n data['yunYing'] = getFormData(category['sub'])\n\n modules = role['module'].replace('},', '}$')\n moduleList = modules.split('$')\n for module in moduleList:\n module = json.loads(module)\n if module['p_id'] == '1':\n data['dailyCHSH'] = getFormData(module['sub'])\n elif module['p_id'] == '2':\n data['dailyBH'] = getFormData(module['sub'])\n elif module['p_id'] == '3':\n data['dailyBLD'] = getFormData(module['sub'])\n elif module['p_id'] == '4':\n data['dailyErr'] = getFormData(module['sub'])\n\n form = roleInfoForm(data)\n return render(request, 'rbac/role/roleInfo.html', locals())\n\ndef getFormData(subStr):\n subStr = subStr[0:len(subStr)-1]\n subList = subStr.split(',')\n data = set()\n for sub in subList:\n data.add(sub)\n return data\n\ndef infoSave(request):\n role_id = request.POST.get('id')\n departs = request.POST.get('departs')\n categories = request.POST.get('categories')\n modules = request.POST.get('modules')\n res = {}\n try:\n role = RbacRoleInfo()\n role.depart = departs\n role.category = categories\n role.module = modules\n role.role_id = role_id\n role.save()\n res['msg'] = 0\n except Exception as e:\n print(e)\n res['msg'] = 1\n\n return HttpResponse(json.dumps(res))\n\ndef getDeparts():\n companys = RbacShop.objects.values('shoptype').distinct()\n departs = RbacShop.objects.values('shopcode','shopnm','shoptype').filter(shoptype__in=(11,12,13),enable=1)\n data = []\n for company in companys:\n item = {}\n c_tpye = company['shoptype'].strip()\n item['p_item'] = {'c_id' : c_tpye}\n item['sub'] = []\n for depart in departs :\n if depart['shoptype'].strip() == c_tpye :\n item['sub'].append({'depart_id':depart['shopcode'],'depart_name':depart['shopnm']})\n data.append(item)\n return data\n\ndef getClasses():\n catrgories = BasOrg.objects.values('orgcode','orgname','parentcode').filter(tier=1,orgcode__in=(1,2,3,4,6))\n classes = BasOrg.objects.values('orgcode','orgname','parentcode').filter(tier=2)\n data = []\n for catrgory in catrgories:\n item = {}\n c_id = catrgory['orgcode']\n item['p_item'] = {'c_id':c_id,'c_name':catrgory['orgname']}\n item['sub'] = []\n for obj in classes:\n if obj['parentcode'] == c_id:\n item['sub'].append({'class_id':obj['orgcode'],'class_name':obj['orgname']})\n data.append(item)\n\n return data\n\ndef getModules():\n modules = RbacMoudle.objects.values('m_name','m_id').filter(status=1,p_id=0)\n modulesChild = RbacMoudle.objects.values('m_name','m_id','p_id').filter(status=1).exclude(p_id='')\n data = []\n for m in modules:\n item = {}\n p_id = m['m_id']\n item['p_item'] = {'m_id':p_id,'m_name':m['m_name']}\n item['sub'] = []\n for c in modulesChild:\n if c['p_id'] == p_id:\n item['sub'].append({'m_id':c['m_id'],'m_name':c['m_name']})\n data.append(item)\n return data","sub_path":"base/rbac/role/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"93972639","text":"import requests\n\n#\n# WAN9839\n# ePwd: 93qst%85qKp%116UZP%122iUE%117Vlf%116pih%55Uph%56xTK%57ypz%52DHE\n# JsLoaded: true\nwith requests.Session() as c:\n url = 'http://erwin.nml.com/MM/Login.do'\n username = 'WAN9838'\n pw = '93qst%85qKp%116UZP%122iUE%117Vlf%116pih%55Uph%56xTK%57ypz%52DH'\n\n c.get(url)\n login_data = dict(__username__=username, ePwd=pw,\n JsLoaded='true')\n c.post(url, data=login_data, headers={\"Referer\": \"http://erwin.nml.com/MM/\"})\n page = c.get('http://erwin.nml.com/MM')\n\n print(page.content)\n\n # logged in\n\n # r = requests.get(url=URL, params=PARAMS)\n URL = \"http://erwin.nml.com/MM/css/fonts/diagram.svg?_dc=1533588153332\"\n r = requests.get(url=URL)\n print('svg')\n print(r.text)\n\n","sub_path":"erwin web portal/on hold/ErwinDirectRequestWoff.py","file_name":"ErwinDirectRequestWoff.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"514509807","text":"from django.shortcuts import render\nfrom .models import Product\n\nfrom django.shortcuts import render, redirect\nfrom .forms import NewUserForm\nfrom django.contrib.auth import login\nfrom django.contrib import messages \nfrom django.contrib.auth import login, authenticate ,logout #add this\nfrom django.contrib.auth.forms import AuthenticationForm #add this\n\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nimport json\nimport datetime\nfrom .models import * \nfrom .utils import cookieCart, cartData, guestOrder\n# Create your views here.\n\n\n\n\ndef index(request):\n\tdest1= Product.objects.all\n\tdata = cartData(request)\n\tcartItems = data['cartItems']\n\treturn render(request ,'index.html', {'dest1': dest1,\"cartItems\": cartItems});\n\n\n\ndef register_request(request):\n\tif request.method == \"POST\":\n\t\tform = NewUserForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tuser = form.save()\n\t\t\tlogin(request, user)\n\t\t\tmessages.success(request, \"Registration successful.\" )\n\t\t\treturn redirect(\"/\")\n\t\tmessages.error(request, \"Unsuccessful registration. Invalid information.\")\n\tform = NewUserForm\n\treturn render (request=request, template_name=\"register.html\", context={\"register_form\":form})\n\n\ndef login_request(request):\n\tif request.method == \"POST\":\n\t\tform = AuthenticationForm(request, data=request.POST)\n\t\tif form.is_valid():\n\t\t\tusername = form.cleaned_data.get('username')\n\t\t\tpassword = form.cleaned_data.get('password')\n\t\t\tuser = authenticate(username=username, password=password)\n\t\t\tif user is not None:\n\t\t\t\tlogin(request, user)\n\t\t\t\tmessages.info(request, f\"You are now logged in as {username}.\")\n\t\t\t\treturn redirect(\"/\")\n\t\t\telse:\n\t\t\t\tmessages.error(request,\"Invalid username or password.\")\n\t\telse:\n\t\t\tmessages.error(request,\"Invalid username or password.\")\n\tform = AuthenticationForm()\n\treturn render(request=request, template_name=\"login.html\", context={\"login_form\":form})\n\ndef store(request):\n\tdata = cartData(request)\n\n\tcartItems = data['cartItems']\n\torder = data['order']\n\titems = data['items']\n\n\tproducts = Product.objects.all()\n\tcontext = {'products':products, 'cartItems':cartItems}\n\treturn render(request, 'store/store.html', context)\n\n\n\n\n\ndef logout_request(request):\n\tlogout(request)\n\tmessages.info(request, \"You have successfully logged out.\") \n\treturn redirect(\"/\")\n\n\ndef cart(request):\n\tdata = cartData(request)\n\n\tcartItems = data['cartItems']\n\torder = data['order']\n\titems = data['items']\n\n\tcontext = {'items':items, 'order':order, 'cartItems':cartItems}\n\treturn render(request, 'cart.html', context)\n\ndef checkout(request):\n\tdata = cartData(request)\n\t\n\tcartItems = data['cartItems']\n\torder = data['order']\n\titems = data['items']\n\n\tcontext = {'items':items, 'order':order, 'cartItems':cartItems}\n\treturn render(request, 'checkout.html', context)\n\ndef updateItem(request):\n\tdata = json.loads(request.body)\n\tproductId = data['productId']\n\taction = data['action']\n\tprint('Action:', action)\n\tprint('Product:', productId)\n\n\tcustomer = request.user.customer\n\tproduct = Product.objects.get(id=productId)\n\torder, created = Order.objects.get_or_create(customer=customer, complete=False)\n\n\torderItem, created = OrderItem.objects.get_or_create(order=order, product=product)\n\n\tif action == 'add':\n\t\torderItem.quantity = (orderItem.quantity + 1)\n\telif action == 'remove':\n\t\torderItem.quantity = (orderItem.quantity - 1)\n\n\torderItem.save()\n\n\tif orderItem.quantity <= 0:\n\t\torderItem.delete()\n\n\treturn JsonResponse('Item was added', safe=False)\n\ndef processOrder(request):\n\ttransaction_id = datetime.datetime.now().timestamp()\n\tdata = json.loads(request.body)\n\n\tif request.user.is_authenticated:\n\t\tcustomer = request.user.customer\n\t\torder, created = Order.objects.get_or_create(customer=customer, complete=False)\n\telse:\n\t\tcustomer, order = guestOrder(request, data)\n\n\ttotal = float(data['form']['total'])\n\torder.transaction_id = transaction_id\n\n\tif total == order.get_cart_total:\n\t\torder.complete = True\n\torder.save()\n\n\tif order.shipping == True:\n\t\tShippingAddress.objects.create(\n\t\tcustomer=customer,\n\t\torder=order,\n\t\taddress=data['shipping']['address'],\n\t\tcity=data['shipping']['city'],\n\t\tstate=data['shipping']['state'],\n\t\tzipcode=data['shipping']['zipcode'],\n\t\t)\n\n\treturn JsonResponse('Payment submitted..', safe=False)","sub_path":"food_delivery/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"25810849","text":"from PIL import ImageGrab\nimport cv2\nimport numpy as np\nfrom datetime import datetime\nimport time\n\n\ndef screen_capture():\n screen = np.array(ImageGrab.grab(bbox=(0, 0, 800, 600)))\n cv2.imshow('Python Window', screen)\n count = 0\n timeout = 1\n import tkinter as tk\n\n root = tk.Tk()\n screen_width = root.winfo_screenwidth()\n screen_height = root.winfo_screenheight()\n print(screen_height, screen_width)\n\n while True:\n count += 1\n start_time = datetime.now()\n # do your work here\n # print('Duration: {}'.format(end_time - start_time))\n screen = np.array(ImageGrab.grab(bbox=(0, 0, screen_width, screen_height)))\n # cv2.resizeWindow('jpg', screen_width, screen_height)\n cv2.imshow('window', screen)\n # cv2.destroyAllWindows()\n end_time = datetime.now()\n print(\"count in one second\", count)\n # print('Duration: {}'.format(end_time - start_time))\n second = time.localtime().tm_sec\n print(second)\n if cv2.waitKey(25) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n break\n\n","sub_path":"screen_capture.py","file_name":"screen_capture.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"235933330","text":"import math,numpy\n\ndef main(c1,c2,c3,c4):\n\n v1 = c2-c1\n v2 = c3-c2\n v3 = c4-c3\n\n angle = math.atan2(\n numpy.dot(\n math.sqrt(sum(v2*v2))*v1,\n cross(v2,v3),\n ),\n numpy.dot(\n cross(v1,v2),\n cross(v2,v3),\n ),\n )\n angle *= 180./math.pi\n\n return angle\n\n\ndef cross(v1,v2):\n\n n = numpy.array([\n v1[1]*v2[2]-v1[2]*v2[1],\n v1[2]*v2[0]-v1[0]*v2[2],\n v1[0]*v2[1]-v1[1]*v2[0],\n ])\n\n return n\n","sub_path":"quakes/quakes_dihedral.py","file_name":"quakes_dihedral.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"373079826","text":"#%% INIT\nimport json\nimport random\nimport os\nimport numpy as np\nimport re\nimport time\nimport os\nfrom zipfile import ZipFile\n\nimport torch\nfrom transformers import AutoTokenizer, AutoModelForMaskedLM, BertModel\n\n#%% SAMPLING\n\"\"\"\nsample_idx = random.sample(np.arange(0, len(df)).tolist(), 3000)\ndf = [df[idx] for idx in sample_idx]\npositive_label = [positive_label[idx] for idx in sample_idx]\n\nwith open(\"output/labels.json\", 'w') as f:\n\tf.write(json.dumps(positive_label))\n\nprint('-'*10)\nprint(\"Sample: \")\nprint(f'number of comments + postname: {len(df)}')\nprint(f'positive labels: {len(positive_label)}')\nprint('-'*10)\n\"\"\"\n\n\n#%% TESTING\n\"\"\"\ntokens_tensor = tokenizer(df[0:16], return_tensors=\"pt\", padding=True) # pt for pytorch\nwith torch.no_grad():\n\toutput = model(**tokens_tensor)\noutput.pooler_output.shape\n# output.hidden_states[0][:,0,:].shape\n\"\"\"\n#%%\ndef generate_class_vector(df, model_name=\"bert-base-chinese\", batch_size=None, output_name='output/rename.pt', full=False, full_interval=2):\n \"\"\"Generate class vectors with BERT and save the tensor in output.\"\"\"\n # zip full output\n if os.path.exists(\"tmp\"):\n os.system('cmd /k \"rm -R tmp\"')\n os.mkdir('tmp')\n zipObj = ZipFile(output_name.replace('.pt', f'_full.zip'), 'w')\n\n if torch.cuda.is_available():\n device = torch.device('cuda:0')\n if batch_size == None:\n batch_size = 128\n else:\n device = torch.device('cpu')\n if batch_size == None:\n batch_size = 16\n print(\"-\"*50 + \"\\n|\")\n print(f'| Generating tensors\\n|\\tmodel = {model_name}\\n|\\tbatch_size = {batch_size}\\n|\\toutput = {output_name}\\n|\\tdevice = {device}')\n print(\"|\\n\" + \"-\"*50)\n\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n model = BertModel.from_pretrained(model_name)\n\n model.to(device)\n\n concat_output = None\n hidden = None\n data_length = len(df)\n max_length = len(max(df, key=len))\n batches = np.arange(0, data_length, batch_size)\n log_interval = int(len(batches)/20) if int(len(batches)/20) != 0 else len(batches)//2\n # print(f'max length sequence: {max_length}')\n print(f\"number of batches: {len(batches)}\")\n\n start = time.time()\n count = 0\n for i, batch in enumerate(batches):\n if batch_size + batch_size >= data_length:\n tokens_tensor = tokenizer(df[batch:], return_tensors=\"pt\", padding='max_length', max_length=max_length) # pt for pytorch\n else:\n tokens_tensor = tokenizer(df[batch:batch + batch_size], return_tensors=\"pt\", padding='max_length', max_length=max_length) # pt for pytorch\n tokens_tensor.to(device)\n\n with torch.no_grad():\n output = model(**tokens_tensor)\n\n if concat_output == None:\n concat_output = output.pooler_output\n else:\n concat_output = torch.cat([concat_output, output.pooler_output], 0)\n\n if hidden == None:\n hidden = output.last_hidden_state\n else:\n hidden = torch.cat([hidden, output.last_hidden_state], 0)\n\n if i % (log_interval * full_interval) == 0 and i != 0:\n tmp_name = output_name.replace('.pt', f'_full_{count}.pt')\n tmp_name = tmp_name.replace('output/', 'tmp/')\n torch.save(concat_output, tmp_name)\n zipObj.write(tmp_name)\n os.remove(tmp_name)\n print(f\"hidden: {hidden.shape}, saved at {tmp_name}\")\n count += 1\n del hidden\n hidden = None\n\n if i % log_interval == 0:\n print(f'Batch: {i+1} [{(i+1)*batch_size}/{data_length}] {round(time.time() - start, 2)}s {(i+1)*batch_size*1.0/data_length * 100}%')\n print(f\"Pooler Output: {concat_output.shape}\")\n torch.save(concat_output, output_name)\n if hidden != None:\n tmp_name = output_name.replace('.pt', f'_full_{count}.pt')\n torch.save(concat_output, tmp_name)\n zipObj.write(tmp_name)\n os.remove(tmp_name)\n print(f\"hidden: {hidden.shape}, saved at {tmp_name}\")\n\n os.rmdir(\"tmp\")\n zipObj.close()\n\n#%% split text function\ndef split_text(text, max_length):\n \"\"\"Split text with multiple characters according to max length.\n buffer is accumulated with different segments and it will be splitted if it exceed the max length.\n Return a list of string.\n \"\"\"\n breaks = [i for i in re.finditer(' |\\n|\\:|\\:|\\,|\\,|\\﹐|\\。|\\ㄧ|\\?|\\?|\\!|\\!|\\;|\\;', text)]\n segments = []\n start_offset = 0\n for k, p in enumerate(breaks):\n if p.end() - start_offset > max_length:\n start = start_offset\n end = breaks[k-1].end()\n segment = text[start:end]\n start_offset = breaks[k-1].end()\n segments.append(segment)\n\n if segments == []:\n mid = len(breaks)//2\n segments = [text[:breaks[mid-1].end()], text[breaks[mid-1].end():]]\n\n if segments == []:\n raise Exception(f'something is wrong \\n{max_length}\\n{text}')\n\n for segment in segments:\n if len(segment) > max_length:\n raise Exception(f'splitted segment is larger than {max_length}\\n{segment}\\n{text}')\n return segments\n\ndef analyze_tokenization(sequence, tokenizer):\n ## this is to examine the tokenization process, one line of code can do all of these\n print(sequence)\n\n # tokenize (break sentence into words)\n tokens = tokenizer.tokenize(sequence)\n print(tokens)\n\n # words to id\n tokens_ids = tokenizer.convert_tokens_to_ids(tokens)\n print(tokens_ids)\n\n # add special tokens [CLS] [SEP] 101 102\n tokens_ids = tokenizer.build_inputs_with_special_tokens(tokens_ids)\n print(tokens_ids)\n\n print(tokenizer.convert_ids_to_tokens(tokens_ids))\n\n# %%\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"381134030","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: andrea cecchi\n\"\"\"\nfrom plone.registry.interfaces import IRegistry\nfrom Products.CMFCore.utils import getToolByName\nfrom rer.sitesearch.custom_fields import TabsValueField, IndexesValueField\nfrom rer.sitesearch.interfaces import IRERSiteSearchSettings\nfrom zope.component import queryUtility\nfrom zope.schema.interfaces import IVocabularyFactory\n\n\nDEFAULT_HIDDEN_INDEXES = [('start', 'Event start'),\n ('end', 'Event end'),\n ('Creator', 'Author')]\n\nDEFAULT_INDEXES = [('Subject', 'Subject')]\n\nDEFAULT_TABS = [('Document', 'Documents'),\n ('News Item', 'News'),\n ('Event', 'Events'),\n ('File', 'File'),\n ('Link', 'Links')]\n\n\ndef Handlers(context):\n if context.readDataFile('sitesearch_various.txt') is None:\n return\n portal = context.getSite()\n insertProperties(portal)\n\n\ndef insertProperties(context):\n \"\"\"\n insert some properties\n \"\"\"\n registry = queryUtility(IRegistry)\n settings = registry.forInterface(IRERSiteSearchSettings, check=False)\n #set search indexes\n indexes = setRegistyIndexes(context, DEFAULT_INDEXES)\n settings.available_indexes = indexes\n #set hidden indexes\n hidden_indexes = setRegistyIndexes(context, DEFAULT_HIDDEN_INDEXES)\n settings.hidden_indexes = hidden_indexes\n #set tabs\n tabs = setRegistryTabs(context)\n if tabs:\n settings.tabs_mapping = tabs\n tabs_order_dict = queryUtility(IVocabularyFactory, name=\"rer.sitesearch.vocabularies.SearchTabsVocabulary\")\n tabs_order = tabs_order_dict(context).by_token.keys()\n settings.tabs_order = tuple(tabs_order)\n\n\ndef setRegistyIndexes(context, indexes_list):\n \"\"\"\n \"\"\"\n pc = getToolByName(context, 'portal_catalog')\n catalog_indexes = pc.indexes()\n new_items = []\n for index in indexes_list:\n index_id = index[0]\n index_title = index[1]\n if index_id in catalog_indexes:\n new_value = IndexesValueField()\n new_value.index = index_id\n new_value.index_title = index_title\n new_items.append(new_value)\n return tuple(new_items)\n\n\ndef setRegistryTabs(context):\n \"\"\"\n \"\"\"\n types_tool = getToolByName(context, 'portal_types')\n portal_types = types_tool.listContentTypes()\n new_tabs = []\n for tab in DEFAULT_TABS:\n tab_ptype = tab[0]\n tab_title = tab[1]\n if tab_ptype in portal_types:\n new_value = TabsValueField()\n new_value.tab_title = tab_title\n new_value.portal_types = (tab_ptype,)\n new_tabs.append(new_value)\n return tuple(new_tabs)\n","sub_path":"rer/sitesearch/setuphandlers.py","file_name":"setuphandlers.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"266248957","text":"from flask import (\n Blueprint,\n abort,\n flash,\n redirect,\n render_template,\n request,\n url_for,\n jsonify,\n send_from_directory,\n)\nfrom flask_login import current_user, login_required\nfrom flask_rq import get_queue\nfrom sqlalchemy.orm import with_polymorphic\n\nfrom app import db\nfrom app.answer.forms import *\nfrom app.decorators import admin_required, respondent_required\nfrom app.email import send_email\nfrom app.models import *\nfrom sqlalchemy import func\n\n\nanswer = Blueprint(\"answer\", __name__)\n\n\n@answer.route(\n \"////ca/add/\", methods=[\"GET\", \"POST\"]\n)\n@login_required\ndef add_custom_answer(project_id, question_id, question):\n custom_answer_poly = with_polymorphic(Answer, UAnswer)\n project = db.session.query(Project).filter_by(id=project_id).first()\n custom_question = UQuestion.query.filter_by(id=question_id).first()\n\n form = AddUAnswerForm()\n\n if request.method == \"POST\" and form.validate_on_submit():\n custom_answer = (\n db.session.query(custom_answer_poly)\n .filter(custom_answer_poly.UAnswer.u_questions_id == question_id)\n .filter(custom_answer_poly.UAnswer.user_no == form.user_no.data)\n .first()\n )\n if not custom_answer:\n uanswer = UAnswer(\n u_questions_id=custom_question.id,\n user_id=current_user.id,\n project_id=project_id,\n user_no=form.user_no.data,\n answer_option=form.option_one.data,\n )\n db.session.add(uanswer)\n db.session.commit()\n\n flash(\"Answer submitted.\", \"success\")\n return redirect(\n url_for(\n \"project.project_questions\",\n project_id=project.id,\n name=project.name,\n )\n )\n else:\n flash(\n \"Sorry, the questionaire number provided for this question already has an answer!\",\n \"error\",\n )\n return redirect(\n url_for(\n \"project.project_questions\",\n project_id=project.id,\n name=project.name,\n )\n )\n return render_template(\n \"answer/add_custom_answer.html\",\n u_question=custom_question,\n form=form,\n project_id=project_id,\n )\n\n\n@answer.route(\"///add/\", methods=[\"GET\", \"POST\"])\n@login_required\ndef add_screener_answer(project_id, question_id):\n screener_answer_poly = with_polymorphic(Answer, ScreenerAnswer)\n screener_question = (\n db.session.query(ScreenerQuestion).filter_by(id=question_id).first()\n )\n\n form = AddScreenerAnswerForm()\n project = db.session.query(Project).filter_by(id=project_id).first()\n\n if request.method == \"POST\" and form.validate_on_submit():\n screener_answer = (\n db.session.query(screener_answer_poly)\n .filter(\n screener_answer_poly.ScreenerAnswer.screener_questions_id == question_id\n )\n .filter(screener_answer_poly.ScreenerAnswer.user_no == form.user_no.data)\n .first()\n )\n\n if not screener_answer:\n appt = ScreenerAnswer(\n answer_option_one=form.answer_option_one.data,\n screener_questions_id=screener_question.id,\n project_id=project_id,\n user_no=form.user_no.data,\n user_id=current_user.id,\n location_city=form.city.data,\n location_state=form.state.data,\n )\n db.session.add(appt)\n db.session.commit()\n flash(\"Answer submitted.\", \"success\")\n answer = (\n db.session.query(ScreenerAnswer)\n .filter_by(user_id=current_user.id)\n .filter(ScreenerAnswer.id == appt.id)\n .first()\n )\n else:\n flash(\n \"Sorry, the questionaire number provided for this question already has an answer!\",\n \"error\",\n )\n return redirect(\n url_for(\n \"project.project_questions\",\n project_id=project.id,\n name=project.name,\n )\n )\n\n if answer.answer_option_one == screener_question.required_answer:\n return redirect(\n url_for(\n \"project.project_questions\",\n project_id=project.id,\n name=project.name,\n )\n )\n else:\n flash(\n \"Sorry, you cannot proceed with answers project on this project. Choose another project\",\n \"success\",\n )\n return redirect(url_for(\"question.index\"))\n\n return render_template(\n \"answer/add_screener_answer.html\",\n question=screener_question,\n form=form,\n project_id=project_id,\n )\n\n\n@answer.route(\n \"////scl/add/\", methods=[\"GET\", \"POST\"]\n)\n@login_required\ndef add_scale_answer(project_id, question_id, question):\n scale_answer_poly = with_polymorphic(Answer, ScaleAnswer)\n\n project = db.session.query(Project).filter_by(id=project_id).first()\n\n scale_question = ScaleQuestion.query.filter_by(id=question_id).first()\n select_answer_form = ScaleQuestion.query.filter_by(id=question_id).first()\n\n if select_answer_form and (select_answer_form.options == \"5 Point Likert Scale\"):\n form = AddScaleAnswerForm()\n else:\n form = AddSemanticAnswerForm()\n\n if request.method == \"POST\" and form.validate_on_submit():\n scale_answer = (\n db.session.query(scale_answer_poly)\n .filter(scale_answer_poly.ScaleAnswer.scale_question_id == question_id)\n .filter(scale_answer_poly.ScaleAnswer.user_no == form.user_no.data)\n .first()\n )\n if not scale_answer:\n appt = ScaleAnswer(\n scale_question_id=scale_question.id,\n user_id=current_user.id,\n project_id=project_id,\n user_no=form.user_no.data,\n option=form.options.data,\n )\n db.session.add(appt)\n db.session.commit()\n\n flash(\"Answer submitted.\", \"success\")\n return redirect(\n url_for(\n \"project.project_questions\",\n project_id=project.id,\n name=project.name,\n )\n )\n else:\n flash(\n \"Sorry, the questionaire number provided for this question already has an answer!\",\n \"error\",\n )\n return redirect(\n url_for(\n \"project.project_questions\",\n project_id=project.id,\n name=project.name,\n )\n )\n return render_template(\n \"answer/add_scale_answer.html\",\n scale_question=scale_question,\n form=form,\n project_id=project_id,\n )\n\n\n@answer.route(\n \"////mcl/add/\", methods=[\"Get\", \"POST\"]\n)\n@login_required\ndef add_multiple_choice_answer(project_id, question_id, question):\n mcq_answer_poly = with_polymorphic(Answer, MultipleChoiceAnswer)\n project = db.session.query(Project).filter_by(id=project_id).first()\n question = LineItem.query.filter_by(project_id=project_id).all()\n\n multiple_choice_question = MultipleChoiceQuestion.query.filter_by(\n project_id=project_id\n ).first()\n\n form = AddMultipleChoiceAnswerForm()\n if request.method == \"POST\" and form.validate_on_submit():\n mcq_answer = (\n db.session.query(mcq_answer_poly)\n .filter(\n mcq_answer_poly.MultipleChoiceAnswer.multiple_choice_question_id\n == question_id\n )\n .filter(mcq_answer_poly.MultipleChoiceAnswer.user_no == form.user_no.data)\n .first()\n )\n if not mcq_answer:\n answer_options = request.form.getlist(\"mcq_answer\")\n answer_one = (\n multiple_choice_question.multiple_choice_option_one\n if multiple_choice_question.multiple_choice_option_one in answer_options\n else None\n )\n answer_two = (\n multiple_choice_question.multiple_choice_option_two\n if multiple_choice_question.multiple_choice_option_two in answer_options\n else None\n )\n answer_three = (\n multiple_choice_question.multiple_choice_option_three\n if multiple_choice_question.multiple_choice_option_three\n in answer_options\n else None\n )\n answer_four = (\n multiple_choice_question.multiple_choice_option_four\n if multiple_choice_question.multiple_choice_option_four\n in answer_options\n else None\n )\n\n answer_five = (\n multiple_choice_question.multiple_choice_option_five\n if multiple_choice_question.multiple_choice_option_five\n in answer_options\n else None\n )\n\n mcq = MultipleChoiceAnswer(\n multiple_choice_question_id=multiple_choice_question.id,\n multiple_choice_answer_one=answer_one,\n multiple_choice_answer_two=answer_two,\n multiple_choice_answer_three=answer_three,\n multiple_choice_answer_four=answer_four,\n multiple_choice_answer_five=answer_five,\n user_no=form.user_no.data,\n user_id=current_user.id,\n project_id=project.id,\n )\n db.session.add(mcq)\n db.session.commit()\n flash(\"Answer submitted.\", \"success\")\n return redirect(\n url_for(\n \"question.question_details\",\n project_id=project.id,\n name=project.name,\n )\n )\n else:\n flash(\n \"Sorry, the questionaire number provided for this question already has an answer!\",\n \"error\",\n )\n return redirect(\n url_for(\n \"project.project_questions\",\n project_id=project.id,\n name=project.name,\n )\n )\n return render_template(\n \"answer/add_multiple_choice_answer.html\",\n multiple_choice_question=multiple_choice_question,\n project_id=project_id,\n form=form,\n )\n","sub_path":"app/answer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"421426026","text":"from __future__ import print_function\nfrom __future__ import division\n\nimport json\nimport xml.etree.ElementTree as ET\n\n\nclass Artery(object):\n\n def __init__(self, edges):\n self.edges = edges\n \n def is_traversed(self, route):\n for edge in self.edges:\n if edge in route:\n return True\n return False\n\n\ndef get_arterial_vehicles(routefile, artery):\n tree = ET.parse(routefile)\n root = tree.getroot()\n\n arterial_vehicles = []\n\n for vehicle in root:\n if vehicle.tag != 'vehicle':\n continue\n [route] = list(vehicle)\n if artery.is_traversed(route.get('edges')):\n arterial_vehicles.append(vehicle.get('id'))\n\n return arterial_vehicles\n\n\ndef main():\n artery = Artery(['left0to0/0', '0/0to1/0', '1/0to2/0', '2/0to3/0', '3/0to4/0'])\n\n with open('files.json') as fp:\n routefiles = json.load(fp)\n\n for label, routefile in routefiles.items():\n vehicles = get_arterial_vehicles(routefile, artery)\n\n with open('{}-arterial.json'.format(label), 'w') as fp:\n json.dump(vehicles, fp, indent=4, sort_keys=True)\n\nif __name__ == '__main__':\n main()\n","sub_path":"utils/results/arterial.py","file_name":"arterial.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"381938497","text":"x =''\ndef get_next_target(x):\n\ty = x.find('a href=')\n\tgauche = x.find('\"', y)\n\tdroite = x.find('\"', gauche+1)\n\turl = x[gauche+1:droite]\n\treturn url,droite\ndef all_links(x):\n\twhile (True):\n\t\turl,endpos = get_next_target(x)\n\t\tif url:\n\t\t\tprint(url)\n\t\t\tx=x[endpos:]\n\t\telse:\n\t\t\tx=x[endpos:]\n\t\t\tbreak\n\treturn url\n\nf= open(\"links.txt\", \"a\")\nh= all_links(x)\nfor e in h:\n\tf.write(e + '\\n')\nf.close()\n\t\ninp= input(\"Saisissez votre lien:\")\t\t\ndef s_input(inp,y):\n\tfor e in y:\n\t\tif inp in e:\n\t\t\treturn e \nprint(s_input(inp,h))\n","sub_path":"rawinput.py","file_name":"rawinput.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"169246961","text":"import asyncio\nimport argparse\nfrom proxybroker import Broker\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nparser = argparse.ArgumentParser(description='Find Proxy Server')\nparser.add_argument('-n', '--n_results', type=int, default=100,\n help='number of proxy server')\n\nargs = parser.parse_args()\nnumber = args.n_results\n\nfilename = 'source/' + str(number) + '_socks4_proxies.txt'\n\n\nasync def save(proxies, filename):\n count = 0\n with open(filename, 'w') as f:\n while True:\n proxy = await proxies.get()\n if proxy is None:\n break\n # proto = 'socks4' if 'SOCKS4' in proxy.types\n row = '%s:%d \\n' % (proxy.host, proxy.port)\n f.write(row)\n count = count + 1\n print(count, row)\n\n\ndef main():\n proxies = asyncio.Queue()\n broker = Broker(proxies)\n tasks = asyncio.gather(broker.find(types=['SOCKS4'], limit=number),\n save(proxies, filename=filename))\n loop = asyncio.get_event_loop()\n loop.run_until_complete(tasks)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"get_socks4.py","file_name":"get_socks4.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"73795879","text":"# coding=utf-8\n# Copyright 2019 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"https://www.kaggle.com/c/diabetic-retinopathy-detection/data.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport csv\nimport os\n\nimport tensorflow as tf\nimport tensorflow_datasets.public_api as tfds\n\n\n_CITATION = \"\"\"\\\n@ONLINE {kaggle-diabetic-retinopathy,\n author = \"Kaggle and EyePacs\",\n title = \"Kaggle Diabetic Retinopathy Detection\",\n month = \"jul\",\n year = \"2015\",\n url = \"https://www.kaggle.com/c/diabetic-retinopathy-detection/data\"\n}\n\"\"\"\n\n\nclass DiabeticRetinopathyDetection(tfds.core.GeneratorBasedBuilder):\n \"\"\"Diabetic retinopathy detection.\"\"\"\n\n VERSION = tfds.core.Version(\"1.0.0\")\n\n def _info(self):\n return tfds.core.DatasetInfo(\n builder=self,\n description=\"A large set of high-resolution retina images taken under \"\n \"a variety of imaging conditions.\",\n features=tfds.features.FeaturesDict({\n \"name\": tfds.features.Text(), # patient ID + eye. eg: \"4_left\".\n \"image\": tfds.features.Image(),\n # From 0 (no DR - saine) to 4 (Proliferative DR). -1 means no label.\n \"label\": tfds.features.ClassLabel(num_classes=5),\n }),\n urls=[\"https://www.kaggle.com/c/diabetic-retinopathy-detection/data\"],\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager):\n # TODO(pierrot): implement download using kaggle API.\n # TODO(pierrot): implement extraction of multiple files archives.\n path = dl_manager.manual_dir\n return [\n tfds.core.SplitGenerator(\n name=\"sample\", # 10 images, to do quicktests using dataset.\n num_shards=1,\n gen_kwargs={\n \"images_dir_path\": os.path.join(path, \"sample\"),\n },\n ),\n tfds.core.SplitGenerator(\n name=\"train\",\n num_shards=10,\n gen_kwargs={\n \"images_dir_path\": os.path.join(path, \"train\"),\n \"csv_path\": os.path.join(path, \"trainLabels.csv\"),\n },\n ),\n tfds.core.SplitGenerator(\n name=\"test\",\n num_shards=10,\n gen_kwargs={\n \"images_dir_path\": os.path.join(path, \"test\"),\n },\n ),\n ]\n\n def _generate_examples(self, images_dir_path, csv_path=None):\n \"\"\"Yields Example instances from given CSV.\n\n Args:\n images_dir_path: path to dir in which images are stored.\n csv_path: optional, path to csv file with two columns: name of image and\n label. If not provided, just scan image directory, don't set labels.\n \"\"\"\n if csv_path:\n with tf.io.gfile.GFile(csv_path) as csv_f:\n reader = csv.DictReader(csv_f)\n data = [(row[\"image\"], int(row[\"level\"])) for row in reader]\n else:\n data = [(fname[:-5], -1)\n for fname in tf.io.gfile.listdir(images_dir_path)\n if fname.endswith(\".jpeg\")]\n for name, label in data:\n yield {\n \"name\": name,\n \"image\": \"%s/%s.jpeg\" % (images_dir_path, name),\n \"label\": label,\n }\n","sub_path":"tensorflow_datasets/image/diabetic_retinopathy_detection.py","file_name":"diabetic_retinopathy_detection.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"383533366","text":"from openff import ForceField\nforce_field = ForceField('openff-1.0.0.offxml')\n\n# Extract the smiles of all unique components in our data set.\nfrom openff.topology import Molecule, Topology\n\nall_smiles = set(\n component.smiles\n for substance in data_set.substances\n for component in substance.components\n)\n\nfor smiles in all_smiles:\n\n # Find those VdW parameters which would be applied to those components.\n molecule = Molecule.from_smiles(smiles)\n topology = Topology.from_molecules([molecule])\n\n labels = force_field.label_molecules(topology)[0]\n\n # Tag the exercised parameters as to be optimized.\n for parameter in labels[\"vdW\"].values():\n parameter.add_cosmetic_attribute(\"parameterize\", \"epsilon, rmin_half\")\n\n# Save the annotated force field file.\nforce_field.to_file('forcefield/openff-1.0.0-tagged.offxml')\n","sub_path":"tutorials/tutorial4.2.py","file_name":"tutorial4.2.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"22234556","text":"from setuptools import setup, find_packages\nfrom io import open\nfrom os import path\nimport pathlib\n\n# The directory containing this file\nHERE = pathlib.Path(__file__).parent\n# The text of the README file\nwith open(path.join(HERE, \"README.md\")) as f:\n README = f.read()\n\n# automatically captured required modules for install_requires in requirements.txt and as well as configure dependency links\nwith open(path.join(HERE, \"requirements.txt\"), encoding=\"utf-8\") as f:\n all_reqs = f.read().split(\"\\n\")\ninstall_requires = [\n x.strip()\n for x in all_reqs\n if (\"git+\" not in x) and (not x.startswith(\"#\")) and (not x.startswith(\"-\"))\n]\ndependency_links = [x.strip().replace(\"git+\", \"\") for x in all_reqs if \"git+\" not in x]\n\nsetup(\n name=\"warp\",\n description=\"Transpile EVM-Compatible Languages To Cairo\",\n version=\"0.1.0\",\n package_dir={\"\": \"warp\"},\n packages=[\n \"cairo-src\",\n \"cairo-src.evm\",\n \"cli\",\n \"cli.compilation\",\n \"transpiler\",\n \"transpiler.Operations\",\n \"yul\",\n ], # list of all packages\n include_package_data=True,\n package_data={\"\": [\"*.json\", \"*.cairo\"]},\n install_requires=install_requires,\n python_requires=\">=3.7\", # any python greater than 3.7\n entry_points=\"\"\"\n [console_scripts]\n warp=cli.warp_cli:main\n \"\"\",\n author=\"Nethermind\",\n keyword=\"Ethereum, Layer2, ETH, StarkNet, Nethermind, StarkWare, transpilation, warp, transpiler, cairo\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"Apache 2.0\",\n url=\"https://github.com/NethermindEth/warp\",\n download_url=\"\",\n dependency_links=dependency_links,\n author_email=\"hello@nethermind.io\",\n classifiers=[\n \"License :: OSI Approved :: Apache 2.0 License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"207096281","text":"import zipfile, re # khai bao thu vien\n\nf = zipfile.ZipFile(\"channel.zip\")\nso = 90052 # la file dau tien khi doc readme\ncomments = [] # list chua cac comment\ndunglai = 0\nwhile dunglai == 0:\n\n\tfile = f.read(\"%s.txt\"%so) # doc file\n\tcomments.append(f.getinfo(\"%s.txt\"%so).comment) # lay comment trong file qua vi tri cua file getinfo\n\tsotiep = re.findall('[0-9]+', file) # tim file tiep theo phai mo\n\t\n\tif sotiep:\n\t\tso = sotiep[-1]\n\telse:\n\t\tdunglai += 1\nprint(\"\".join(comments)) #in ra list comment\n\n","sub_path":"task10/giaipython6.py","file_name":"giaipython6.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"259876725","text":"# -*- coding: utf-8 -*-\n\"\"\"General Matrix functions.\n\nMatrices are laid out in row-major format and can be loaded directly\ninto OpenGL.\nTo convert to column-major format, transpose the array using the\nnumpy.array.T method.\n\"\"\"\nimport numpy\n\n\ndef apply_direction_scale( vectors, direction, scale ):\n \"\"\"Applies a directional scaling to a set of vectors.\n\n An example usage for this is to flatten a mesh against a\n single plane.\n\n Direction MUST be normalised prior to this call.\n\n :param numpy.array vectors: a 2d numpy array of vectors:\n ::\n\n numpy.array([\n [x,y,z],\n [x,y,z]\n ])\n\n :param numpy.array direction: a 1d numpy array of the direction to scale:\n ::\n \n numpy.array([ x,y,z ])\n\n :param numpy.array scale: a float value for the scaling. A scale of 0.0\n will flatten the vertices. \n :rtype: numpy.array\n :return: The vectors flattend in the specified direction.\n The array will be in the shape of the input parameter vectors.\n \"\"\"\n \"\"\"\n scaling is defined as:\n \n [p'][1 + (k - 1)n.x^2, (k - 1)n.x n.y^2, (k - 1)n.x n.z ]\n S(n,k) = [q'][(k - 1)n.x n.y, 1 + (k - 1)n.y, (k - 1)n.y n.z ]\n [r'][(k - 1)n.x n.z, (k - 1)n.y n.z, 1 + (k - 1)n.z^2 ]\n \n where:\n v' is the resulting vector after scaling\n v is the vector to scale\n n is the direction of the scaling\n n.x is the x component of n\n n.y is the y component of n\n n.z is the z component of n\n k is the scaling factor\n \"\"\"\n scaleMinus1 = scale - 1\n matrix = numpy.array(\n [\n # m1\n [\n # m11 = 1 + (k - 1)n.x^2\n 1 + scaleMinus1 * (direction[ 0 ]**2),\n # m12 = (k - 1)n.x n.y^2\n scaleMinus1 * direction[ 0 ] * direction[ 1 ]**2,\n # m13 = (k - 1)n.x n.z\n scaleMinus1 * direction[ 0 ] * direction[ 2 ]\n ],\n # m2\n [\n # m21 = (k - 1)n.x n.y\n scaleMinus1 * direction[ 0 ] * direction[ 1 ],\n # m22 = 1 + (k - 1)n.y\n 1 + scaleMinus1 * direction[ 1 ],\n # m23 = (k - 1)n.y n.z\n scaleMinus1 * direction[ 1 ] * direction[ 2 ]\n ],\n # m3\n [\n # m31 = (k - 1)n.x n.z\n scaleMinus1 * direction[ 0 ] * direction[ 2 ],\n # m32 = (k - 1)n.y n.z\n scaleMinus1 * direction[ 1 ] * direction[ 2 ],\n # m33 = 1 + (k - 1)n.z^2\n 1 + scaleMinus1 * direction[ 2 ]**2\n ]\n ],\n dtype = numpy.float\n )\n \n return numpy.dot( vectors, matrix )\n\ndef apply_scale( vectors, scale ):\n \"\"\"Applies a 3 dimensional scale to a set of vectors.\n\n :param numpy.array vectors: A 2D numpy array of vectors:\n ::\n\n numpy.array([\n [x,y,z],\n [x,y,z]\n ])\n\n :param numpy.array scale: The scale vector to apply.\n Can be a 1x3 array, list or tuple\n :rtype: numpy.array\n :return: The vectors scaled by the scaling vector.\n \"\"\"\n # create a scaling matrix\n matrix = numpy.array([\n [ scale[ 0 ], 0.0, 0.0 ],\n [ 0.0, scale[ 1 ], 0.0 ],\n [ 0.0, 0.0, scale[ 2 ] ]\n ])\n return numpy.dot( vectors, matrix )\n\n","sub_path":"pyrr/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"401767141","text":"# 2048 Game written using the Pygame module\n# \n# Lewis Deane\n# 23/12/2014\n\nimport pygame, sys, time\nfrom pygame.locals import *\nfrom colours import *\nfrom random import *\n\n\nScore = \"D+\"\nDEFAULT_SCORE = 2\n\npygame.init()\n\nSURFACE = pygame.display.set_mode((500, 500), 0, 32)\npygame.display.set_caption(\"2048\")\n\nmyfont = pygame.font.SysFont(\"comicsans\", 30)\nscorefont = pygame.font.SysFont(\"comicsans\", 50)\n\nbackground = [[-1]*4 for i in range(4)]\nundoMat = []\n\n\ndef main(fromLoaded = False):\n if not fromLoaded:\n new_block()\n new_block()\n\n printground()\n\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n if checkIfCanGo() == True:\n if event.type == KEYDOWN:\n combine_block(event.key)\n\n new_block()\n new_block()\n printground()\n else:\n printGameOver()\n\n if event.type == KEYDOWN:\n\n if event.key == pygame.K_r:\n reset()\n\n if event.key == pygame.K_s:\n saveGameState()\n elif event.key == pygame.K_l:\n loadGameState()\n elif event.key == pygame.K_u:\n undo()\n\n pygame.display.update()\n\n\nclass Block:\n def __init__(self):\n self.x = 0\n self.y = 0\n\n def move(self):\n SURFACE.blit(pygame.transform.scale(self.image, (87, 87)), (self.x, self.y))\n\nclass D_plus(Block):\n def __init__(self):\n self.image = pygame.image.load(\"D+.PNG\")\n\nclass C_plus(Block):\n def __init__(self):\n self.image = pygame.image.load(\"C+.PNG\")\n\nclass C_zero(Block):\n def __init__(self):\n self.image = pygame.image.load(\"C0.PNG\")\n\nclass C_minus(Block):\n def __init__(self):\n self.image = pygame.image.load(\"C-.PNG\")\n\nclass B_plus(Block):\n def __init__(self):\n self.image = pygame.image.load(\"B+.PNG\")\n\nclass B_zero(Block):\n def __init__(self):\n self.image = pygame.image.load(\"B0.PNG\")\n\nclass B_minus(Block):\n def __init__(self):\n self.image = pygame.image.load(\"B-.PNG\")\n\nclass A_plus(Block):\n def __init__(self):\n self.image = pygame.image.load(\"A+.PNG\")\n\nclass A_zero(Block):\n def __init__(self):\n self.image = pygame.image.load(\"A0.PNG\")\n\nclass A_minus(Block):\n def __init__(self):\n self.image = pygame.image.load(\"A-.PNG\")\n\ndef new_block(): #두 번씩 실행해야 함.\n global background\n\n random_x1 = randint(0, 3)\n random_y1 = randint(0, 3)\n if background[random_x1][random_y1] == -1:\n print(random_x1, random_y1)\n background[random_x1][random_y1] = 0\n\n\ndef combine_block(key):\n global background\n\n blank = []\n logic = []\n if key == pygame.K_UP:\n for i in range(0, 4):\n for j in range(0, 4):\n logic.append(background[i][j])\n\n while -1 in logic:\n del logic[logic.index(-1)]\n\n for j in range(0, len(logic)-1):\n if logic[j] == logic[j+1]:\n logic[j] += 1\n logic[j+1] = -1\n\n while -1 in logic:\n del logic[logic.index(-1)]\n\n for j in range(0, len(logic)):\n background[i][j] = logic[j]\n for j in range(len(logic), 4):\n background[i][j] = -1\n\n logic = []\n\n elif key == pygame.K_RIGHT:\n flag = 0\n for i in range(0, 4):\n for j in range(3, -1, -1):\n logic.append(background[j][i])\n\n while -1 in logic:\n del logic[logic.index(-1)]\n\n for j in range(0, len(logic)-1):\n if logic[j] == logic[j+1]:\n logic[j] += 1\n logic[j+1] = -1\n\n while -1 in logic:\n del logic[logic.index(-1)]\n\n for j in range(3, 3-len(logic), -1):\n background[j][i] = logic[flag]\n flag += 1\n for j in range(3-len(logic), -1, -1):\n background[j][i] = -1\n flag = 0\n\n logic = []\n\n elif key == pygame.K_DOWN:\n flag = 0\n for i in range(0, 4):\n for j in range(3, -1, -1):\n logic.append(background[i][j])\n\n while -1 in logic:\n del logic[logic.index(-1)]\n\n for j in range(0, len(logic)-1):\n if logic[j] == logic[j+1]:\n logic[j] += 1\n logic[j+1] = -1\n\n while -1 in logic:\n del logic[logic.index(-1)]\n\n for j in range(3, 3-len(logic), -1):\n background[i][j] = logic[flag]\n flag += 1\n for j in range(3-len(logic), -1, -1):\n background[i][j] = -1\n flag = 0\n\n logic = []\n\n elif key == pygame.K_LEFT:\n for i in range(0, 4):\n for j in range(0, 4):\n logic.append(background[j][i])\n\n while -1 in logic:\n del logic[logic.index(-1)]\n\n for j in range(0, len(logic)-1):\n if logic[j] == logic[j+1]:\n logic[j] += 1\n logic[j+1] = -1\n\n while -1 in logic:\n del logic[logic.index(-1)]\n\n print(len(logic))\n\n for j in range(0, len(logic)):\n background[j][i] = logic[j]\n for j in range(len(logic), 4):\n background[j][i] = -1\n logic = []\n\n\ndef printground():\n SURFACE.fill((240, 240, 206))\n board = pygame.Rect(50, 50, 400, 400)\n color = (186, 173, 160)\n pygame.draw.rect(SURFACE, color, board)\n\n global Score\n\n max = 0\n for i in range(0, 4):\n for j in range(0, 4):\n if background[i][j] == 0:\n block = D_plus()\n block.x = 60 + i * 87 + 10 * i\n block.y = 60 + j * 87 + 10 * j\n block.move()\n elif background[i][j] == 1:\n block = C_minus()\n block.x = 60 + i * 87 + 10 * i\n block.y = 60 + j * 87 + 10 * j\n block.move()\n if max < 1:\n max = 1\n Score = \"C-\"\n elif background[i][j] == 2:\n block = C_zero()\n block.x = 60 + i * 87 + 10 * i\n block.y = 60 + j * 87 + 10 * j\n block.move()\n if max < 2:\n max = 2\n Score = \"C0\"\n elif background[i][j] == 3:\n block = C_plus()\n block.x = 60 + i * 87 + 10 * i\n block.y = 60 + j * 87 + 10 * j\n block.move()\n if max < 3:\n max = 3\n Score = \"C+\"\n elif background[i][j] == 4:\n block = B_minus()\n block.x = 60 + i * 87 + 10 * i\n block.y = 60 + j * 87 + 10 * j\n block.move()\n if max < 4:\n max = 4\n Score = \"B-\"\n elif background[i][j] == 5:\n block = B_zero()\n block.x = 60 + i * 87 + 10 * i\n block.y = 60 + j * 87 + 10 * j\n block.move()\n if max < 5:\n max = 5\n Score = \"B0\"\n elif background[i][j] == 6:\n block = B_plus()\n block.x = 60 + i * 87 + 10 * i\n block.y = 60 + j * 87 + 10 * j\n block.move()\n if max < 6:\n max = 6\n Score = \"B+\"\n elif background[i][j] == 7:\n block = A_minus()\n block.x = 60 + i * 87 + 10 * i\n block.y = 60 + j * 87 + 10 * j\n block.move()\n if max < 7:\n max = 7\n Score = \"A-\"\n elif background[i][j] == 8:\n block = A_zero()\n block.x = 60 + i * 87 + 10 * i\n block.y = 60 + j * 87 + 10 * j\n block.move()\n if max < 8:\n max = 8\n Score = \"A0\"\n elif background[i][j] == 9:\n block = A_plus()\n block.x = 60 + i * 87 + 10 * i\n block.y = 60 + j * 87 + 10 * j\n block.move()\n if max < 9:\n max = 9\n Score = \"A+\"\n else:\n pygame.draw.rect(SURFACE, (120,110,101), (60 + i * 87 + 10 * i, 60 + j * 87 + 10 * j, 87, 87))\n\n label = scorefont.render(\"Grade:\" + Score, 1, BLACK)\n\n SURFACE.blit(label, (10, 10))\n\n\ndef printGameOver():\n global Score\n\n SURFACE.fill(BLACK)\n\n label = scorefont.render(\"Semester is finished\", 1, (255, 255, 255))\n label2 = scorefont.render(\"Final Grade:\" + Score, 1, (255, 255, 255))\n label3 = myfont.render(\"press r to restart!\", 1, (255, 255, 255))\n\n SURFACE.blit(label, (50, 100))\n SURFACE.blit(label2, (50, 200))\n SURFACE.blit(label3, (50, 300))\n\n\n\ndef floor(n):\n return int(n - (n % 1))\n\n\n\ndef checkIfCanGo():\n for i in range(0, 4 ** 2):\n if background[floor(i / 4)][i % 4] == 0:\n return True\n\n for i in range(0, 4):\n for j in range(0, 4 - 1):\n if background[i][j] == background[i][j + 1]:\n return True\n elif background[j][i] == background[j + 1][i]:\n return True\n return False\n\n\ndef reset():\n global Score\n global background\n\n Score = \"D+\"\n SURFACE.fill(BLACK)\n\n background = [[0 for i in range(0, 4)] for j in range(0, 4)]\n\n main()\n\n\ndef isArrow(k):\n return (k == pygame.K_UP or k == pygame.K_DOWN or k == pygame.K_LEFT or k == pygame.K_RIGHT)\n\n\ndef saveGameState():\n f = open(\"savedata\", \"w\")\n\n line1 = \" \".join([str(background[floor(x / 4)][x % 4]) for x in range(0, 4 ** 2)])\n\n f.write(line1 + \"\\n\")\n f.write(str(4) + \"\\n\")\n f.write(str(Score))\n f.close()\n\n\ndef loadGameState():\n global Score\n global background\n\n f = open(\"savedata\", \"r\")\n\n mat = (f.readline()).split(' ', 4 ** 2)\n Score = int(f.readline())\n\n for i in range(0, 4 ** 2):\n background[floor(i / 4)][i % 4] = int(mat[i])\n\n f.close()\n\n main(True)\n\n\ndef convertToLinearground():\n mat = []\n\n for i in range(0, 4 ** 2):\n mat.append(background[floor(i / 4)][i % 4])\n\n mat.append(Score)\n\n return mat\n\n\ndef addToUndo():\n undoMat.append(convertToLinearground())\n\n\ndef undo():\n if len(undoMat) > 0:\n mat = undoMat.pop()\n\n for i in range(0, 4 ** 2):\n background[floor(i / 4)][i % 4] = mat[i]\n\n global Score\n Score = mat[4 ** 2]\n\n printground()\n\n\nmain()","sub_path":"2048(3).py","file_name":"2048(3).py","file_ext":"py","file_size_in_byte":10972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"169410264","text":"\"\"\"\nModule for selecting charm decays from semileptonic B decays.\nFollowing decays are included:\n* D0->KsHH (also Dstar)\n* D0->Pi0HH (also Dstar)\n* D0->hhhh (also Dstar)\n* D0->KsKs (also Dstar)\n* D+->KsH\n* D+->HMuNu\n* Lc+ -> L0 H\n* Lc+ -> p HH\nHeavily based on StrippingB2DMuNuX by Alessandra Borgia and Liming Zhang\nDstar methods closely copied from StrippingDstarD2KShh.py by Mat Charles.\n\"\"\"\n__author__ = ['Mika Vesterinen']\n__date__ = '08/03/2012'\n__version__ = '$Revision: 0.6 $'\n\nfrom Gaudi.Configuration import *\n#from GaudiConfUtils.ConfigurableGenerators import FilterDesktop, CombineParticles, OfflineVertexFitter\nfrom Configurables import FilterDesktop, CombineParticles, OfflineVertexFitter\nfrom PhysSelPython.Wrappers import Selection, DataOnDemand\nfrom StrippingConf.StrippingLine import StrippingLine\nfrom StrippingUtils.Utils import LineBuilder\nfrom StandardParticles import StdLoosePions, StdLooseMuons, StdLooseKaons, StdLooseProtons, StdNoPIDsPions, StdLooseMergedPi0,StdLooseResolvedPi0\nfrom Configurables import ConjugateNeutralPID\n\n__all__ = ('CharmFromBSemiAllLinesConf',\n 'makeb2DMuX',\n 'makeb2DX',\n 'makeDstar',\n 'TOSFilter',\n 'confdict')\n\nconfdict = {\n \"GEC_nLongTrk\" : 250 # adimensional\n ,\"MINIPCHI2\" : 9.0 # adimensiional\n ,\"TRCHI2\" : 4.0 # adimensiional\n ,\"TRCHI2Loose\" : 5.0 # adimensiional \n ,\"KaonPIDK\" : 4.0 # adimensiional\n ,\"PionPIDK\" : 10.0 # adimensiional\n ,\"PionPIDKTight\" : 4.0 # adimensiional\n ,\"MuonIPCHI2\" : 4.00 # adimensiional\n ,\"MuonPT\" : 800.0 # MeV\n ,\"KPiPT\" : 300.0 # MeV\n ,\"DsDIRA\" : 0.99 # adimensiional\n ,\"DsFDCHI2\" : 100.0 # adimensiional\n ,\"DsMassWin\" : 80.0 # MeV\n ,\"DsAMassWin\" : 100.0 # MeV\n ,\"Dto4h_MassWin\" : 40.0 # MeV\n ,\"Dto4h_AMassWin\": 42.0 # MeV\n ,\"DsIP\" : 7.4 #mm\n ,\"DsVCHI2DOF\" : 6.0 # adimensiional\n ,\"PIDmu\" : -0.0 # adimensiional\n ,\"BDIRA\" : 0.999 #adimensiional\n ,\"BVCHI2DOF\" : 6.0 # adimensiional\n ,\"DZ\" : 0 #mm\n ,\"DDocaChi2Max\" : 20 #adimensiional\n ,\"MINIPCHI2Loose\": 4.0 #adimensiional\n ,\"KaonPIDKloose\" : -5 #adimensiional\n ,\"PhiVCHI2\" :25.0 #adimensiional\n ,\"PhiMassWin\" :50 #adimensiional\n ,'KSLLPMin' : 2000 ## MeV\n ,'KSLLPTMin' : 500 ## MeV\n ,'KSDDPMin' : 3000 ## MeV\n ,'KSDDPTMin' : 800 ## MeV\n ,'KSLLCutMass' : 20 ## MeV\n ,'KSDDCutMass' : 20 ## MeV\n ,'KSLLCutFDChi2' : 100 ## unitless\n ,'KSDDCutFDChi2' : 100 ## unitless\n ,'KSDaugTrackChi2' : 4 ## max chi2/ndf for Ks daughter tracks\n ,'KSVertexChi2' : 6 ## max chi2/ndf for Ks vertex\n ,'KSCutDIRA' : 0.99 ## unitless\n ,'LambdaLLPMin' : 2000 ## MeV\n ,'LambdaLLPTMin' : 500 ## MeV\n ,'LambdaLLCutMass' : 30 ## MeV\n ,'LambdaLLCutFDChi2' : 100 ## unitless\n ,'LambdaDDPMin' : 3000 ## MeV\n ,'LambdaDDPTMin' : 800 ## MeV\n ,'LambdaDDCutMass' : 30 ## MeV\n ,'LambdaDDCutFDChi2' : 100 ## unitless\n ,'LambdaCutDIRA' : 0.99 ## unitless\n ,'LambdaDaugTrackChi2': 4 ## unitless\n ,'LambdaVertexChi2' : 6 ## max chi2/ndf for Lambda0 vertex\n ,\"Pi0PtMin\" : 1200 # Minimum Pt of pi0 (MeV)\n ,\"Pi0PMin\" : 3000 # Minimum P of pi0 (MeV)\n ,\"PhotonCL\" : 0.25 # Confidence level for Pi0 photons\n ,\"D02HHPi0AMassWin\" : 220 # MeV (mass window for combination)\n ,\"D02HHPi0MassWin\" : 200 # MeV (mass window after vertex fit)\n ,\"D02HHPi0DocaCut\" : 6 # mm\n ,\"D02HHPi0PtCut\" : 2000 # MeV\n ,\"Dstar_preFitMassCut\" : 100 # MeV\n ,\"Dstar_preFitMassCut_HHPi0\" : 200 # MeV\n ,\"Dstar_Chi2\" : 10.0 ## unitless\n ,\"Dstar_SoftPion_PIDe\" : 5. ## unitless\n ,\"Dstar_SoftPion_PT\" : 200. ## MeV\n ,\"Dstar_wideDMCutLower\" : -2. ## MeV\n ,\"Dstar_wideDMCutUpper\" : 15. ## MeV\n }\n\nclass CharmFromBSemiAllLinesConf(LineBuilder) :\n \n __configuration_keys__ = (\n \"GEC_nLongTrk\"\n ,\"MINIPCHI2\" \n ,\"TRCHI2\" \n ,\"TRCHI2Loose\" \n ,\"KaonPIDK\" \n ,\"PionPIDK\"\n ,\"PionPIDKTight\" \n ,\"MuonIPCHI2\" \n ,\"MuonPT\" \n ,\"KPiPT\" \n ,\"DsDIRA\" \n ,\"DsFDCHI2\" \n ,\"DsMassWin\" \n ,\"DsAMassWin\" \n ,\"Dto4h_MassWin\" \n ,\"Dto4h_AMassWin\" \n ,\"DsIP\" \n ,\"DsVCHI2DOF\" \n ,\"PIDmu\" \n ,\"BDIRA\" \n ,\"BVCHI2DOF\" \n ,\"DZ\"\n ,\"DDocaChi2Max\"\n ,\"MINIPCHI2Loose\"\n ,\"KaonPIDKloose\"\n ,\"PhiVCHI2\"\n ,\"PhiMassWin\"\n ,'KSLLPMin' \n ,'KSLLPTMin' \n ,'KSDDPMin' \n ,'KSDDPTMin' \n ,'KSLLCutMass' \n ,'KSDDCutMass' \n ,'KSLLCutFDChi2'\n ,'KSDDCutFDChi2' \n ,'KSDaugTrackChi2'\n ,'KSVertexChi2'\n ,'KSCutDIRA'\n ,'LambdaLLPMin' \n ,'LambdaLLPTMin' \n ,'LambdaLLCutMass' \n ,'LambdaLLCutFDChi2' \n ,'LambdaDDPMin' \n ,'LambdaDDPTMin' \n ,'LambdaDDCutMass' \n ,'LambdaDDCutFDChi2' \n ,'LambdaCutDIRA' \n ,'LambdaDaugTrackChi2'\n ,'LambdaVertexChi2' \n ,\"Pi0PtMin\" \n ,\"Pi0PMin\" \n ,\"PhotonCL\"\n ,\"D02HHPi0AMassWin\"\n ,\"D02HHPi0MassWin\"\n ,\"D02HHPi0PtCut\"\n ,\"D02HHPi0DocaCut\"\n ,\"Dstar_preFitMassCut\"\n ,\"Dstar_preFitMassCut_HHPi0\"\n ,\"Dstar_Chi2\"\n ,\"Dstar_SoftPion_PIDe\"\n ,\"Dstar_SoftPion_PT\"\n ,\"Dstar_wideDMCutLower\"\n ,\"Dstar_wideDMCutUpper\"\n )\n \n __confdict__={}\n \n def __init__(self, name, config) :\n\n LineBuilder.__init__(self, name, config)\n self.__confdict__=config\n \n ############### MUON SELECTIONS ###################\n \n self.selmuon = Selection( \"SelMufor\" + name,\n Algorithm = self._muonFilter(\"Mufor\"+name),\n RequiredSelections = [StdLooseMuons])\n \n self.selmuonhighPT = Selection( \"SelMuhighPTfor\" + name,\n Algorithm = FilterDesktop(name = \"MuhighPTfor\"+name,\n Code = \"(TRCHI2DOF < %(TRCHI2)s)\"\\\n \"& (PT>1.2*GeV) & (MIPCHI2DV(PRIMARY)> 9.0)\" % self.__confdict__ ),\n RequiredSelections = [self.selmuon])\n \n self.selmuontight = Selection( \"SelMutightfor\" + name,\n Algorithm = FilterDesktop( name = \"Mutightfor\"+name,\n Code = \"(MIPCHI2DV(PRIMARY)> 100)\" ),\n RequiredSelections = [self.selmuonhighPT])\n \n self.selmuonnew = Selection( \"SelMunewfor\" + name,\n Algorithm = FilterDesktop( name = \"Munewfor\"+name,\n Code = \"(MIPCHI2DV(PRIMARY)> 9.0)\"\\\n \"& (TRCHI2DOF < %(TRCHI2)s)\" % self.__confdict__ ),\n RequiredSelections = [self.selmuon])\n \n self.selmuonTOS = TOSFilter( \"SelMuTOS\" + name,\n self.selmuontight,\n \"Hlt2SingleMuonDecision\")\n \n ############### KAON AND PION SELECTIONS ################\n \n self.selKaon = Selection( \"SelKfor\" + name,\n Algorithm = self._kaonFilter(\"Kfor\"+name),\n RequiredSelections = [StdLooseKaons])\n \n self.selPion = Selection( \"SelPifor\" + name,\n Algorithm = self._pionFilter(\"Pifor\"+name),\n RequiredSelections = [StdLoosePions])\n \n self.selPionTight = Selection( \"SelPiTightfor\" + name,\n Algorithm = FilterDesktop( name = \"PiTightFor\"+name,\n Code = \"(TRCHI2DOF < %(TRCHI2)s) & (P>2.0*GeV) & (PT > %(KPiPT)s *MeV)\"\\\n \"& (MIPCHI2DV(PRIMARY)> %(MINIPCHI2)s) & (PIDK< %(PionPIDKTight)s)\" % self.__confdict__ ),\n RequiredSelections = [StdLoosePions])\n \n self.selPionloose = Selection( \"SelPiloosefor\" + name,\n Algorithm = self._pionlooseFilter(\"Piloosefor\"+name),\n RequiredSelections = [StdNoPIDsPions])\n \n self.selKaonloose = Selection( \"SelKloosefor\" + name,\n Algorithm = self._kaonlooseFilter(\"Kloosefor\"+name),\n RequiredSelections = [StdLooseKaons])\n \n ############## PI0 SELECTIONS ############################\n \n self.selPi0Resolved = Selection( \"SelPi0Resolvedfor\" + name,\n Algorithm = self._Pi0ResolvedFilter(\"Pi0Resolvedfor\"+name),\n RequiredSelections = [StdLooseResolvedPi0])\n \n self.selPi0Merged = Selection( \"SelPi0Mergedfor\" + name,\n Algorithm = self._Pi0MergedFilter(\"PiMergedfor\"+name),\n RequiredSelections = [StdLooseMergedPi0])\n\n ############## KS0 SELECTIONS ##############################\n \n _stdLooseKsLL = DataOnDemand(\"Phys/StdLooseKsLL/Particles\")\n _stdLooseKsDD = DataOnDemand(\"Phys/StdLooseKsDD/Particles\")\n \n self.selKSLL = Selection(\"SelKsLLfor\"+name,\n Algorithm = self._KsLLFilter(\"KsLLfor\"+name),\n RequiredSelections = [_stdLooseKsLL])\n\n self.selKSDD = Selection(\"SelKsDDfor\"+name,\n Algorithm = self._KsDDFilter(\"KsDDfor\"+name),\n RequiredSelections = [_stdLooseKsDD])\n\n ############# LAMBDA0 SELECTIONS ##########################\n \n _stdLooseLambdaLL = DataOnDemand(\"Phys/StdLooseLambdaLL/Particles\")\n _stdLooseLambdaDD = DataOnDemand(\"Phys/StdLooseLambdaDD/Particles\")\n\n self.selLambdaLL = Selection(\"SelLambdaLLfor\"+name,\n Algorithm = self._LambdaLLFilter(\"LambdaLLfor\"+name),\n RequiredSelections = [_stdLooseLambdaLL])\n\n self.selLambdaDD = Selection(\"SelLambdaDDfor\"+name,\n Algorithm = self._LambdaDDFilter(\"LambdaDDfor\"+name),\n RequiredSelections = [_stdLooseLambdaDD])\n\n ################ D0 -> KsKs SELECTION #############################\n \n Dstar_cuts = {\n \"Dstar_preFitMassCut\" : config[\"Dstar_preFitMassCut\"],\n \"Dstar_Chi2\" : config[\"Dstar_Chi2\"],\n \"Dstar_SoftPion_PIDe\" : config[\"Dstar_SoftPion_PIDe\"],\n \"Dstar_SoftPion_PT\" : config[\"Dstar_SoftPion_PT\"],\n \"Dstar_wideDMCutLower\" : config[\"Dstar_wideDMCutLower\"],\n \"Dstar_wideDMCutUpper\" : config[\"Dstar_wideDMCutUpper\"],\n }\n\n self.sel_D0_to_KsKs_LLLL = Selection('SelD02KsKsLLLLfor'+name,\n Algorithm = self._D02KsKsFilter(['D0 -> KS0 KS0'],'D02KsKsLLLLfor'+name),\n RequiredSelections = [self.selKSLL])\n self.selD0Conj_KsKs_LLLL = Selection('SelConjugate_KsKs_LLLLFor'+name,\n Algorithm = ConjugateNeutralPID('Conjugate_KsKs_LLLLFor'+name),\n RequiredSelections = [self.sel_D0_to_KsKs_LLLL])\n self.sel_Dstar_to_KsKs_LLLL = makeDstar('Dstar_KsKs_LLLLFor'+name, [self.sel_D0_to_KsKs_LLLL , self.selD0Conj_KsKs_LLLL],Dstar_cuts)\n \n self.sel_D0_to_KsKs_DDDD = Selection('SelD02KsKsDDDDfor'+name,\n Algorithm = self._D02KsKsFilter(['D0 -> KS0 KS0'],'D02KsKsDDDDfor'+name),\n RequiredSelections = [self.selKSDD])\n self.selD0Conj_KsKs_DDDD = Selection('SelConjugate_KsKs_DDDDFor'+name,\n Algorithm = ConjugateNeutralPID('Conjugate_KsKs_DDDDFor'+name),\n RequiredSelections = [self.sel_D0_to_KsKs_DDDD])\n self.sel_Dstar_to_KsKs_DDDD = makeDstar('Dstar_KsKs_DDDDFor'+name, [self.sel_D0_to_KsKs_DDDD , self.selD0Conj_KsKs_DDDD],Dstar_cuts)\n\n self.sel_D0_to_KsKs_DDLL = Selection('SelD02KsKsDDLLfor'+name,\n Algorithm = self._D02KsKsFilter(['D0 -> KS0 KS0'],'D02KsKsDDLLfor'+name),\n RequiredSelections = [self.selKSLL,self.selKSDD])\n self.selD0Conj_KsKs_DDLL = Selection('SelConjugate_KsKs_DDLLFor'+name,\n Algorithm = ConjugateNeutralPID('Conjugate_KsKs_DDLLFor'+name),\n RequiredSelections = [self.sel_D0_to_KsKs_DDLL])\n self.sel_Dstar_to_KsKs_DDLL = makeDstar('Dstar_KsKs_DDLLFor'+name, [self.sel_D0_to_KsKs_DDLL , self.selD0Conj_KsKs_DDLL],Dstar_cuts)\n\n ################ D0 -> 4H SELECTION ##########################\n\n Dstar_cuts = {\n \"Dstar_preFitMassCut\" : config[\"Dstar_preFitMassCut\"],\n \"Dstar_Chi2\" : config[\"Dstar_Chi2\"],\n \"Dstar_SoftPion_PIDe\" : config[\"Dstar_SoftPion_PIDe\"],\n \"Dstar_SoftPion_PT\" : config[\"Dstar_SoftPion_PT\"],\n \"Dstar_wideDMCutLower\" : config[\"Dstar_wideDMCutLower\"],\n \"Dstar_wideDMCutUpper\" : config[\"Dstar_wideDMCutUpper\"],\n }\n\n self.sel_D0_to_4Pi = Selection( 'Sel_D0_to_4Pi_for' + name,\n Algorithm = self._D02HHHHFilter(['D0 -> pi- pi+ pi- pi+'],'D0_to_4Pi_for' + name),\n RequiredSelections = [self.selPionTight] ) \n self.selD0Conj_4Pi = Selection('SelConjugate_4PiFor'+name,\n Algorithm = ConjugateNeutralPID('Conjugate_4PiFor'+name),\n RequiredSelections = [self.sel_D0_to_4Pi])\n self.selDstar_to_4Pi = makeDstar('Dstar_4PiFor'+name, [self.sel_D0_to_4Pi , self.selD0Conj_4Pi],Dstar_cuts)\n\n\n self.sel_D0_to_K3Pi = Selection( 'Sel_D0_to_K3Pi_for' + name,\n Algorithm = self._D02HHHHFilter(['[D0 -> K- pi+ pi- pi+]cc'],'D0_to_K3Pi_for' + name),\n RequiredSelections = [self.selPionTight,self.selKaon] ) \n self.selD0Conj_K3Pi = Selection('SelConjugate_K3PiFor'+name,\n Algorithm = ConjugateNeutralPID('Conjugate_K3PiFor'+name),\n RequiredSelections = [self.sel_D0_to_K3Pi])\n self.selDstar_to_K3Pi = makeDstar('Dstar_K3PiFor'+name, [self.sel_D0_to_K3Pi , self.selD0Conj_K3Pi],Dstar_cuts)\n\n\n self.sel_D0_to_2K2Pi = Selection( 'Sel_D0_to_2K2Pi_for' + name,\n Algorithm = self._D02HHHHFilter(['D0 -> K- K+ pi- pi+'],'D0_to_2K2Pi_for' + name),\n RequiredSelections = [self.selPionTight,self.selKaon] ) \n self.selD0Conj_2K2Pi = Selection('SelConjugate_2K2PiFor'+name,\n Algorithm = ConjugateNeutralPID('Conjugate_2K2PiFor'+name),\n RequiredSelections = [self.sel_D0_to_2K2Pi])\n self.selDstar_to_2K2Pi = makeDstar('Dstar_2K2PiFor'+name, [self.sel_D0_to_2K2Pi , self.selD0Conj_2K2Pi],Dstar_cuts)\n \n\n self.sel_D0_to_3KPi = Selection( 'Sel_D0_to_3KPi_for' + name,\n Algorithm = self._D02HHHHFilter(['[D0 -> K+ K- K- pi+]cc'],'D0_to_3KPi_for' + name),\n RequiredSelections = [self.selPionTight,self.selKaon] ) \n self.selD0Conj_3KPi = Selection('SelConjugate_3KPiFor'+name,\n Algorithm = ConjugateNeutralPID('Conjugate_3KPiFor'+name),\n RequiredSelections = [self.sel_D0_to_3KPi])\n self.selDstar_to_3KPi = makeDstar('Dstar_3KPiFor'+name, [self.sel_D0_to_3KPi , self.selD0Conj_3KPi],Dstar_cuts)\n \n ################## D0 -> Ks HH SELECTIONS ##############################\n\n Dstar_cuts = {\n \"Dstar_preFitMassCut\" : config[\"Dstar_preFitMassCut\"],\n \"Dstar_Chi2\" : config[\"Dstar_Chi2\"],\n \"Dstar_SoftPion_PIDe\" : config[\"Dstar_SoftPion_PIDe\"],\n \"Dstar_SoftPion_PT\" : config[\"Dstar_SoftPion_PT\"],\n \"Dstar_wideDMCutLower\" : config[\"Dstar_wideDMCutLower\"],\n \"Dstar_wideDMCutUpper\" : config[\"Dstar_wideDMCutUpper\"],\n }\n\n ### ks Pi Pi \n self.seld02KsPiPiLL = Selection( 'SelD02KsPiPiLLfor' + name,\n Algorithm = self._D02KsHHFilter(['D0 -> KS0 pi+ pi-'],'D02KsPiPiLLfor' + name),\n RequiredSelections = [self.selKSLL,self.selPionloose] ) \n self.selD0Conj2KsPiPiLL = Selection('SelConjugateD02KsPiPiLLFor'+name,\n Algorithm = ConjugateNeutralPID('ConjugateD02KsPiPiLLFor'+name),\n RequiredSelections = [self.seld02KsPiPiLL])\n self.selDstar_2KsPiPiLL = makeDstar('DstarPiPiLLFor'+name, [self.seld02KsPiPiLL , self.selD0Conj2KsPiPiLL],Dstar_cuts)\n\n self.seld02KsPiPiDD = Selection( 'SelD02KsPiPiDDfor' + name,\n Algorithm = self._D02KsHHFilter(['D0 -> KS0 pi+ pi-'],'D02KsPiPiDDfor' + name),\n RequiredSelections = [self.selKSDD,self.selPionloose] ) \n self.selD0Conj2KsPiPiDD = Selection('SelConjugateD02KsPiPiDDFor'+name,\n Algorithm = ConjugateNeutralPID('ConjugateD02KsPiPiDDFor'+name),\n RequiredSelections = [self.seld02KsPiPiDD])\n self.selDstar_2KsPiPiDD = makeDstar('DstarPiPiDDFor'+name, [self.seld02KsPiPiDD , self.selD0Conj2KsPiPiDD],Dstar_cuts)\n\n ### ks K K \n self.seld02KsKKLL = Selection( 'SelD02KsKKLLfor' + name,\n Algorithm = self._D02KsHHFilter(['D0 -> KS0 K+ K-'],'D02KsKKLLfor' + name),\n RequiredSelections = [self.selKSLL,self.selKaonloose] ) \n self.selD0Conj2KsKKLL = Selection('SelConjugateD02KsKKLLFor'+name,\n Algorithm = ConjugateNeutralPID('ConjugateD02KsKKLLFor'+name),\n RequiredSelections = [self.seld02KsKKLL])\n self.selDstar_2KsKKLL = makeDstar('DstarKKLLFor'+name, [self.seld02KsKKLL , self.selD0Conj2KsKKLL],Dstar_cuts)\n\n self.seld02KsKKDD = Selection( 'SelD02KsKKDDfor' + name,\n Algorithm = self._D02KsHHFilter(['D0 -> KS0 K+ K-'],'D02KsKKDDfor' + name),\n RequiredSelections = [self.selKSDD,self.selKaonloose] ) \n self.selD0Conj2KsKKDD = Selection('SelConjugateD02KsKKDDFor'+name,\n Algorithm = ConjugateNeutralPID('ConjugateD02KsKKDDFor'+name),\n RequiredSelections = [self.seld02KsKKDD])\n self.selDstar_2KsKKDD = makeDstar('DstarKKDDFor'+name, [self.seld02KsKKDD , self.selD0Conj2KsKKDD],Dstar_cuts)\n\n ### ks K Pi \n self.seld02KsKPiLL = Selection( 'SelD02KsKPiLLfor' + name,\n Algorithm = self._D02KsHHFilter(['[D0 -> KS0 K- pi+]cc'],'D02KsKPiLLfor' + name),\n RequiredSelections = [self.selKSLL,self.selPionloose,self.selKaonloose] ) \n self.selD0Conj2KsKPiLL = Selection('SelConjugateD02KsKPiLLFor'+name,\n Algorithm = ConjugateNeutralPID('ConjugateD02KsKPiLLFor'+name),\n RequiredSelections = [self.seld02KsKPiLL])\n self.selDstar_2KsKPiLL = makeDstar('DstarKPiLLFor'+name, [self.seld02KsKPiLL , self.selD0Conj2KsKPiLL],Dstar_cuts)\n \n self.seld02KsKPiDD = Selection( 'SelD02KsKPiDDfor' + name,\n Algorithm = self._D02KsHHFilter(['[D0 -> KS0 K- pi+]cc'],'D02KsKPiDDfor' + name),\n RequiredSelections = [self.selKSDD,self.selPionloose,self.selKaonloose] ) \n self.selD0Conj2KsKPiDD = Selection('SelConjugateD02KsKPiDDFor'+name,\n Algorithm = ConjugateNeutralPID('ConjugateD02KsKPiDDFor'+name),\n RequiredSelections = [self.seld02KsKPiDD])\n self.selDstar_2KsKPiDD = makeDstar('DstarKPiDDFor'+name, [self.seld02KsKPiDD , self.selD0Conj2KsKPiDD],Dstar_cuts)\n\n ################## D0 -> HHPi0 WITH MERGED PI0 #######################\n\n Dstar_cuts = {\n ## wider mass window than other channels\n \"Dstar_preFitMassCut\" : config[\"Dstar_preFitMassCut_HHPi0\"],\n \"Dstar_Chi2\" : config[\"Dstar_Chi2\"],\n \"Dstar_SoftPion_PIDe\" : config[\"Dstar_SoftPion_PIDe\"],\n \"Dstar_SoftPion_PT\" : config[\"Dstar_SoftPion_PT\"],\n \"Dstar_wideDMCutLower\" : config[\"Dstar_wideDMCutLower\"],\n \"Dstar_wideDMCutUpper\" : config[\"Dstar_wideDMCutUpper\"],\n }\n\n self.seld02KPiPi0Merged = Selection( 'SelD02KPiPi0Mergedfor' + name,\n Algorithm = self._D02HHPi0Filter(['[D0 -> K- pi+ pi0]cc'],'D02KPiPi0Mergedfor' + name),\n RequiredSelections = [self.selKaon,self.selPionTight,self.selPi0Merged])\n self.selD0Conj2KPiPi0Merged = Selection('SelConjugateKPiPi0MergedFor'+name,\n Algorithm = ConjugateNeutralPID('ConjugateKPiPi0MergedFor'+name),\n RequiredSelections = [self.seld02KPiPi0Merged])\n self.selDstar_2KPiPi0Merged = makeDstar('DstarKPiPi0MergedFor'+name, [self.seld02KPiPi0Merged , self.selD0Conj2KPiPi0Merged],Dstar_cuts)\n\n self.seld02KKPi0Merged = Selection( 'SelD02KKPi0Mergedfor' + name,\n Algorithm = self._D02HHPi0Filter(['D0 -> K- K+ pi0'],'D02KKPi0Mergedfor' + name),\n RequiredSelections = [self.selKaon,self.selPi0Merged])\n self.selD0Conj2KKPi0Merged = Selection('SelConjugateKKPi0MergedFor'+name,\n Algorithm = ConjugateNeutralPID('ConjugateKKPi0MergedFor'+name),\n RequiredSelections = [self.seld02KKPi0Merged])\n self.selDstar_2KKPi0Merged = makeDstar('DstarKKPi0MergedFor'+name, [self.seld02KKPi0Merged , self.selD0Conj2KKPi0Merged],Dstar_cuts)\n \n self.seld02PiPiPi0Merged = Selection( 'SelD02PiPiPi0Mergedfor' + name,\n Algorithm = self._D02HHPi0Filter(['D0 -> pi- pi+ pi0'],'D02PiPiPi0Mergedfor' + name),\n RequiredSelections = [self.selPionTight,self.selPi0Merged])\n self.selD0Conj2PiPiPi0Merged = Selection('SelConjugatePiPiPi0MergedFor'+name,\n Algorithm = ConjugateNeutralPID('ConjugatePiPiPi0MergedFor'+name),\n RequiredSelections = [self.seld02PiPiPi0Merged])\n self.selDstar_2PiPiPi0Merged = makeDstar('DstarPiPiPi0MergedFor'+name, [self.seld02PiPiPi0Merged , self.selD0Conj2PiPiPi0Merged],Dstar_cuts)\n\n ################## D0 -> HHPi0 WITH RESOLVED PI0 #######################\n\n Dstar_cuts = {\n \"Dstar_preFitMassCut\" : config[\"Dstar_preFitMassCut\"],\n \"Dstar_Chi2\" : config[\"Dstar_Chi2\"],\n \"Dstar_SoftPion_PIDe\" : config[\"Dstar_SoftPion_PIDe\"],\n \"Dstar_SoftPion_PT\" : config[\"Dstar_SoftPion_PT\"],\n \"Dstar_wideDMCutLower\" : config[\"Dstar_wideDMCutLower\"],\n \"Dstar_wideDMCutUpper\" : config[\"Dstar_wideDMCutUpper\"],\n }\n \n self.seld02KPiPi0Resolved = Selection( \"SelD02KPiPi0Resolvedfor\" + name,\n Algorithm = self._D02HHPi0Filter([\"[D0 -> K- pi+ pi0]cc\"],\"D02KPiPi0Resolvedfor\"+name),\n RequiredSelections = [self.selKaon,self.selPionTight,self.selPi0Resolved])\n self.selD0Conj2KPiPi0Resolved = Selection('SelConjugateKPiPi0ResolvedFor'+name,\n Algorithm = ConjugateNeutralPID('ConjugateKPiPi0ResolvedFor'+name),\n RequiredSelections = [self.seld02KPiPi0Resolved])\n self.selDstar_2KPiPi0Resolved = makeDstar('DstarKPiPi0ResolvedFor'+name, [self.seld02KPiPi0Resolved , self.selD0Conj2KPiPi0Resolved],Dstar_cuts)\n \n\n self.seld02KKPi0Resolved = Selection( 'SelD02KKPi0Resolvedfor' + name,\n Algorithm = self._D02HHPi0Filter(['D0 -> K- K+ pi0'],'D02KKPi0Resolvedfor' + name),\n RequiredSelections = [self.selKaon,self.selPi0Resolved])\n self.selD0Conj2KKPi0Resolved = Selection('SelConjugateKKPi0ResolvedFor'+name,\n Algorithm = ConjugateNeutralPID('ConjugateKKPi0ResolvedFor'+name),\n RequiredSelections = [self.seld02KKPi0Resolved])\n self.selDstar_2KKPi0Resolved = makeDstar('DstarKKPi0ResolvedFor'+name, [self.seld02KKPi0Resolved , self.selD0Conj2KKPi0Resolved],Dstar_cuts)\n\n self.seld02KKPi0SSResolved = Selection( 'SelD02KKPi0SSResolvedfor' + name,\n Algorithm = self._D02HHPi0Filter(['[D0 -> K+ K+ pi0]cc'],'D02KKPi0SSResolvedfor' + name),\n RequiredSelections = [self.selKaon,self.selPi0Resolved])\n self.selD0Conj2KKPi0SSResolved = Selection('SelConjugateKKPi0SSResolvedFor'+name,\n Algorithm = ConjugateNeutralPID('ConjugateKKPi0SSResolvedFor'+name),\n RequiredSelections = [self.seld02KKPi0SSResolved])\n self.selDstar_2KKPi0SSResolved = makeDstar('DstarKKPi0SSResolvedFor'+name, [self.seld02KKPi0SSResolved , self.selD0Conj2KKPi0SSResolved],Dstar_cuts)\n \n self.seld02PiPiPi0Resolved = Selection( 'SelD02PiPiPi0Resolvedfor' + name,\n Algorithm = self._D02HHPi0Filter(['D0 -> pi- pi+ pi0'],'D02PiPiPi0Resolvedfor' + name),\n RequiredSelections = [self.selPionTight,self.selPi0Resolved])\n self.selD0Conj2PiPiPi0Resolved = Selection('SelConjugatePiPiPi0ResolvedFor'+name,\n Algorithm = ConjugateNeutralPID('ConjugatePiPiPi0ResolvedFor'+name),\n RequiredSelections = [self.seld02PiPiPi0Resolved])\n self.selDstar_2PiPiPi0Resolved = makeDstar('DstarPiPiPi0ResolvedFor'+name, [self.seld02PiPiPi0Resolved , self.selD0Conj2PiPiPi0Resolved],Dstar_cuts)\n\n self.seld02PiPiPi0SSResolved = Selection('SelD02PiPiPi0SSResolvedfor' + name,\n Algorithm = self._D02HHPi0Filter(['[D0 -> pi+ pi+ pi0]cc'],'D02PiPiPi0SSResolvedfor' + name),\n RequiredSelections = [self.selPionTight,self.selPi0Resolved])\n self.selD0Conj2PiPiPi0SSResolved = Selection('SelConjugatePiPiPi0SSResolvedFor'+name,\n Algorithm = ConjugateNeutralPID('ConjugatePiPiPi0SSResolvedFor'+name),\n RequiredSelections = [self.seld02PiPiPi0SSResolved])\n self.selDstar_2PiPiPi0SSResolved = makeDstar('DstarPiPiPi0SSResolvedFor'+name, [self.seld02PiPiPi0SSResolved , self.selD0Conj2PiPiPi0SSResolved],Dstar_cuts)\n \n ################### D+/Ds+ -> Ks H SELECTIONS ######################\n\n self.selds2KsLLK = Selection( 'SelDs2KsLLKfor' + name,\n Algorithm = self._Ds2KsHFilter(['[D+ -> KS0 K+]cc'],'Ds2KsLLKfor' + name),\n RequiredSelections = [self.selKaon, self.selKSLL] )\n\n self.selds2KsDDK = Selection( 'SelDs2KsDDKfor' + name,\n Algorithm = self._Ds2KsHFilter(['[D+ -> KS0 K+]cc'],'Ds2KsDDKfor' + name),\n RequiredSelections = [self.selKaon, self.selKSDD] )\n\n self.selds2KsLLPi = Selection( 'SelDs2KsLLPifor' + name,\n Algorithm = self._Ds2KsHFilter(['[D+ -> KS0 pi+]cc'],'Ds2KsLLPifor' + name),\n RequiredSelections = [self.selPion, self.selKSLL] )\n \n self.selds2KsDDPi = Selection( 'SelDs2KsDDPifor' + name,\n Algorithm = self._Ds2KsHFilter(['[D+ -> KS0 pi+]cc'],'Ds2KsDDPifor' + name),\n RequiredSelections = [self.selPion, self.selKSDD])\n\n ################## D+/Ds+ -> H mu mu SELECTIONS ########################\n\n self.selds2pimumu = Selection( 'SelDs2PiMuMufor' + name,\n Algorithm = self._Ds2HMuMuFilter([ '[D+ -> pi+ mu+ mu-]cc' ],'Ds2PiMuMufor' + name),\n RequiredSelections = [self.selPion,self.selmuon] )\n \n self.selds2kmumu = Selection( 'SelDs2KMuMufor' + name,\n Algorithm = self._Ds2HMuMuFilter([ '[D+ -> K+ mu+ mu-]cc' ],'Ds2KMuMufor' + name),\n RequiredSelections = [self.selKaon,self.selmuon] )\n\n #################### Lambda_c+ -> X SELECTIONS #########################\n\n self.selLc2L0Pi_DD = Selection( 'SelLc2LambdaDDPifor' + name,\n Algorithm = self._Lc2L0HFilter([ '[Lambda_c+ -> Lambda0 pi+]cc' ],'Lc2LambdaDDPifor' + name),\n RequiredSelections = [self.selPionTight, self.selLambdaDD])\n \n self.selLc2L0Pi_LL = Selection( 'SelLc2LambdaLLPifor' + name,\n Algorithm = self._Lc2L0HFilter([ '[Lambda_c+ -> Lambda0 pi+]cc' ],'Lc2LambdaLLPifor' + name),\n RequiredSelections = [self.selPionTight, self.selLambdaLL])\n \n self.selLc2L0K_DD = Selection( 'SelLc2LambdaDDKfor' + name,\n Algorithm = self._Lc2L0HFilter([ '[Lambda_c+ -> Lambda0 K+]cc' ],'Lc2LambdaDDKfor' + name),\n RequiredSelections = [self.selKaon, self.selLambdaDD])\n \n self.selLc2L0K_LL = Selection( 'SelLc2LambdaLLKfor' + name,\n Algorithm = self._Lc2L0HFilter([ '[Lambda_c+ -> Lambda0 K+]cc' ],'Lc2LambdaLLKfor' + name),\n RequiredSelections = [self.selKaon, self.selLambdaLL])\n\n self.selLc2pKK = Selection( 'SelLc2pKKfor' + name,\n Algorithm = self._Lc2pHHFilter([ '[Lambda_c+ -> p+ K- K+]cc' ],'Lc2pKKfor' + name),\n RequiredSelections = [self.selKaon, StdLooseProtons])\n\n self.selLc2pPiPi = Selection( 'SelLc2pPiPifor' + name,\n Algorithm = self._Lc2pHHFilter([ '[Lambda_c+ -> p+ pi- pi+]cc' ],'Lc2pPiPifor' + name),\n RequiredSelections = [self.selPionTight, StdLooseProtons])## tighter pion PID needed here to reduce retention\n \n self.sellambdac = Selection( 'SelLc2PKPifor' + name,\n Algorithm = self._Lc2pHHFilter([ '[Lambda_c+ -> K- p+ pi+]cc' ],'Lc2PKPifor' + name),\n RequiredSelections = [self.selKaon, self.selPion, StdLooseProtons ] )\n \n self.sellambdacDCS = Selection( 'SelLc2PKPiDCSfor' + name,\n Algorithm = self._Lc2pHHFilter([ '[Lambda_c+ -> K+ p+ pi-]cc' ],'Lc2PKPiDCSfor' + name),\n RequiredSelections = [self.selKaon, self.selPion, StdLooseProtons ] )\n\n\n #################### MAKE THE \"B\" CANDIDATES ##############################\n \n ####### D0 -> hhhh\n ### For stripping 17b, there was a muon Hlt2 TOS requirement on these lines\n MuSel = self.selmuon \n BCuts = {'BVCHI2DOF': config['BVCHI2DOF'],\n 'BDIRA': config['BDIRA'],\n 'DZ': config['DZ']}\n \n DecayDescriptors = [ '[B- -> D0 mu-]cc', '[B+ -> D0 mu+]cc' ]\n self.selb2D0MuXK3Pi = makeb2DMuX('b2D0MuXK3Pi' + name,DecayDescriptors,MuSel,self.sel_D0_to_K3Pi ,BCuts)\n self.selb2D0MuX4Pi = makeb2DMuX('b2D0MuX4Pi' + name,DecayDescriptors,MuSel,self.sel_D0_to_4Pi,BCuts)\n self.selb2D0MuX2K2Pi = makeb2DMuX('b2D0MuX2K2Pi' + name,DecayDescriptors,MuSel,self.sel_D0_to_2K2Pi,BCuts)\n self.selb2D0MuX3KPi = makeb2DMuX('b2D0MuX3KPi' + name,DecayDescriptors,MuSel,self.sel_D0_to_3KPi,BCuts) \n\n DecayDescriptors = [ '[B0 -> D*(2010)+ mu-]cc', '[B0 -> D*(2010)+ mu+]cc' ]\n self.selb2DstarMuXK3Pi = makeb2DMuX('b2DstarMuXK3Pi' + name,DecayDescriptors,MuSel,self.selDstar_to_K3Pi ,BCuts)\n self.selb2DstarMuX4Pi = makeb2DMuX('b2DstarMuX4Pi' + name,DecayDescriptors,MuSel,self.selDstar_to_4Pi,BCuts)\n self.selb2DstarMuX2K2Pi = makeb2DMuX('b2DstarMuX2K2Pi' + name,DecayDescriptors,MuSel,self.selDstar_to_2K2Pi,BCuts)\n self.selb2DstarMuX3KPi = makeb2DMuX('b2DstarMuX3KPi' + name,DecayDescriptors,MuSel,self.selDstar_to_3KPi,BCuts) \n\n ############### B+ -> MU X D0 -> Ks HH #########################\n MuSel = self.selmuon\n DecayDescriptors = [ '[B- -> D0 mu-]cc', '[B+ -> D0 mu+]cc' ]\n BCuts = {'BVCHI2DOF': config['BVCHI2DOF'],\n 'BDIRA': config['BDIRA'],\n 'DZ': config['DZ']}\n\n \n self.selb2D0MuXKsPiPiLL = makeb2DMuX('b2D0MuXKsPiPiLL' + name,DecayDescriptors,MuSel,self.seld02KsPiPiLL,BCuts)\n self.selb2D0MuXKsPiPiDD = makeb2DMuX('b2D0MuXKsPiPiDD' + name,DecayDescriptors,MuSel,self.seld02KsPiPiDD,BCuts)\n self.selb2D0MuXKsKKLL = makeb2DMuX('b2D0MuXKsKKLL' + name,DecayDescriptors,MuSel,self.seld02KsKKLL,BCuts)\n self.selb2D0MuXKsKKDD = makeb2DMuX('b2D0MuXKsKKDD' + name,DecayDescriptors,MuSel,self.seld02KsKKDD,BCuts)\n self.selb2D0MuXKsKPiLL = makeb2DMuX('b2D0MuXKsKPiLL' + name,DecayDescriptors,MuSel,self.seld02KsKPiLL,BCuts)\n self.selb2D0MuXKsKPiDD = makeb2DMuX('b2D0MuXKsKPiDD' + name,DecayDescriptors,MuSel,self.seld02KsKPiDD,BCuts)\n\n #### Dstar\n DecayDescriptors = [ '[B0 -> D*(2010)+ mu-]cc', '[B0 -> D*(2010)+ mu+]cc' ]\n \n self.selb2DstarMuXKsPiPiLL = makeb2DMuX('b2DstarMuXKsPiPiLL'+name,DecayDescriptors,MuSel,self.selDstar_2KsPiPiLL,BCuts)\n self.selb2DstarMuXKsPiPiDD = makeb2DMuX('b2DstarMuXKsPiPiDD'+name,DecayDescriptors,MuSel,self.selDstar_2KsPiPiDD,BCuts)\n self.selb2DstarMuXKsKKLL = makeb2DMuX('b2DstarMuXKsKKLL'+name,DecayDescriptors,MuSel,self.selDstar_2KsKKLL,BCuts)\n self.selb2DstarMuXKsKKDD = makeb2DMuX('b2DstarMuXKsKKDD'+name,DecayDescriptors,MuSel,self.selDstar_2KsKKDD,BCuts)\n self.selb2DstarMuXKsKPiLL = makeb2DMuX('b2DstarMuXKsKPiLL'+name,DecayDescriptors,MuSel,self.selDstar_2KsKPiLL,BCuts)\n self.selb2DstarMuXKsKPiDD = makeb2DMuX('b2DstarMuXKsKPiDD'+name,DecayDescriptors,MuSel,self.selDstar_2KsKPiDD,BCuts)\n\n\n ############# B+ -> MU X D0 -> Ks Ks #####################\n MuSel = self.selmuon\n DecayDescriptors = [ '[B- -> D0 mu-]cc', '[B+ -> D0 mu+]cc' ]\n BCuts = {'BVCHI2DOF': 9999.,\n 'BDIRA': -999, ## switch off the DIRA cut for KsKs\n 'DZ': config['DZ']}\n self.selb2D0MuXKsKs_DDLL = makeb2DMuX('b2D0MuXKsKs_DDLL' + name,DecayDescriptors,MuSel,self.sel_D0_to_KsKs_DDLL,BCuts)\n self.selb2D0MuXKsKs_DDDD = makeb2DMuX('b2D0MuXKsKs_DDDD' + name,DecayDescriptors,MuSel,self.sel_D0_to_KsKs_DDDD,BCuts)\n self.selb2D0MuXKsKs_LLLL = makeb2DMuX('b2D0MuXKsKs_LLLL' + name,DecayDescriptors,MuSel,self.sel_D0_to_KsKs_LLLL,BCuts)\n\n ## Dstar\n DecayDescriptors = [ '[B0 -> D*(2010)+ mu-]cc', '[B0 -> D*(2010)+ mu+]cc' ]\n self.selb2DstarMuXKsKs_DDLL = makeb2DMuX('b2DstarMuXKsKs_DDLL' + name,DecayDescriptors,MuSel,self.sel_Dstar_to_KsKs_DDLL,BCuts)\n self.selb2DstarMuXKsKs_DDDD = makeb2DMuX('b2DstarMuXKsKs_DDDD' + name,DecayDescriptors,MuSel,self.sel_Dstar_to_KsKs_DDDD,BCuts)\n self.selb2DstarMuXKsKs_LLLL = makeb2DMuX('b2DstarMuXKsKs_LLLL' + name,DecayDescriptors,MuSel,self.sel_Dstar_to_KsKs_LLLL,BCuts)\n\n ############### B+ -> MU X D0 -> H H Pi0 #########################\n\n MuSel = self.selmuon\n BCuts = {'BVCHI2DOF': config['BVCHI2DOF'],\n 'BDIRA': config['BDIRA'],\n 'DZ': config['DZ']}\n DecayDescriptors = [ '[B- -> D0 mu-]cc', '[B+ -> D0 mu+]cc' ]\n self.selb2D0MuXKPiPi0Resolved = makeb2DMuX('b2D0MuXKPiPi0Resolved'+name,DecayDescriptors,MuSel,self.seld02KPiPi0Resolved,BCuts)\n self.selb2D0MuXKKPi0Resolved = makeb2DMuX('b2D0MuXKKPi0Resolved'+name,DecayDescriptors,MuSel,self.seld02KKPi0Resolved,BCuts)\n self.selb2D0MuXPiPiPi0Resolved = makeb2DMuX('b2D0MuXPiPiPi0Resolved'+name,DecayDescriptors,MuSel,self.seld02PiPiPi0Resolved,BCuts)\n self.selb2D0MuXKKPi0SSResolved = makeb2DMuX('b2D0MuXKKPi0SSResolved'+name,DecayDescriptors,MuSel,self.seld02KKPi0SSResolved,BCuts)\n self.selb2D0MuXPiPiPi0SSResolved = makeb2DMuX('b2D0MuXPiPiPi0SSResolved'+name,DecayDescriptors,MuSel,self.seld02PiPiPi0SSResolved,BCuts)\n self.selb2D0MuXKPiPi0Merged = makeb2DMuX('b2D0MuXKPiPi0Merged'+name,DecayDescriptors,MuSel,self.seld02KPiPi0Merged,BCuts)\n self.selb2D0MuXKKPi0Merged = makeb2DMuX('b2D0MuXKKPi0Merged'+name,DecayDescriptors,MuSel,self.seld02KKPi0Merged,BCuts)\n self.selb2D0MuXPiPiPi0Merged = makeb2DMuX('b2D0MuXPiPiPi0Merged'+name,DecayDescriptors,MuSel,self.seld02PiPiPi0Merged,BCuts)\n \n ## Dstar ###\n DecayDescriptors = [ '[B0 -> D*(2010)+ mu-]cc', '[B0 -> D*(2010)+ mu+]cc' ]\n self.selb2DstarMuXKPiPi0Resolved = makeb2DMuX('b2DstarMuXKPiPi0Resolved'+name,DecayDescriptors,MuSel,self.selDstar_2KPiPi0Resolved,BCuts)\n self.selb2DstarMuXKKPi0Resolved = makeb2DMuX('b2DstarMuXKKPi0Resolved'+name,DecayDescriptors,MuSel,self.selDstar_2KKPi0Resolved,BCuts)\n self.selb2DstarMuXPiPiPi0Resolved = makeb2DMuX('b2DstarMuXPiPiPi0Resolved'+name,DecayDescriptors,MuSel,self.selDstar_2PiPiPi0Resolved,BCuts)\n self.selb2DstarMuXKKPi0SSResolved = makeb2DMuX('b2DstarMuXKKPi0SSResolved'+name,DecayDescriptors,MuSel,self.selDstar_2KKPi0SSResolved,BCuts)\n self.selb2DstarMuXPiPiPi0SSResolved = makeb2DMuX('b2DstarMuXPiPiPi0SSResolved'+name,DecayDescriptors,MuSel,self.selDstar_2PiPiPi0SSResolved,BCuts)\n self.selb2DstarMuXKPiPi0Merged = makeb2DMuX('b2DstarMuXKPiPi0Merged'+name,DecayDescriptors,MuSel,self.selDstar_2KPiPi0Merged,BCuts)\n self.selb2DstarMuXKKPi0Merged = makeb2DMuX('b2DstarMuXKKPi0Merged'+name,DecayDescriptors,MuSel,self.selDstar_2KKPi0Merged,BCuts)\n self.selb2DstarMuXPiPiPi0Merged = makeb2DMuX('b2DstarMuXPiPiPi0Merged'+name,DecayDescriptors,MuSel,self.selDstar_2PiPiPi0Merged,BCuts)\n \n ############### B0 -> MU X D+ -> Ks H #########################\n\n MuSel = self.selmuon\n BCuts = {'BVCHI2DOF': config['BVCHI2DOF'],\n 'BDIRA': config['BDIRA'],\n 'DZ': config['DZ']}\n DecayDescriptors = [ '[B0 -> D+ mu-]cc', '[B0 -> D+ mu+]cc' ]\n \n self.selb2DsMuXKsLLK = makeb2DMuX('b2DsMuXKsLLK'+name,DecayDescriptors,MuSel,self.selds2KsLLK,BCuts)\n self.selb2DsMuXKsDDK = makeb2DMuX('b2DsMuXKsDDK'+name,DecayDescriptors,MuSel,self.selds2KsDDK,BCuts)\n self.selb2DsMuXKsLLPi = makeb2DMuX('b2DsMuXKsLLPi'+name,DecayDescriptors,MuSel,self.selds2KsLLPi,BCuts)\n self.selb2DsMuXKsDDPi = makeb2DMuX('b2DsMuXKsDDPi'+name,DecayDescriptors,MuSel,self.selds2KsDDPi,BCuts)\n\n ############### B0 -> MU X (D+ -> H mu mu) #########################\n\n MuSel = self.selmuon\n BCuts = {'BVCHI2DOF': config['BVCHI2DOF'],\n 'BDIRA': config['BDIRA'],\n 'DZ': config['DZ']}\n DecayDescriptors = [ '[B0 -> D+ mu-]cc', '[B0 -> D+ mu+]cc' ]\n \n self.selb2DsMuXPiMuMu = makeb2DMuX('b2DsMuXPiMuMu'+name,DecayDescriptors,MuSel,self.selds2pimumu,BCuts)\n self.selb2DsMuXKMuMu = makeb2DMuX('b2DsMuXKMuMu'+name,DecayDescriptors,MuSel,self.selds2kmumu,BCuts)\n\n ############## Lambda_b0 -> MU X Lambda_c -> X ############################\n\n MuSel = self.selmuon\n BCuts = {'BVCHI2DOF': config['BVCHI2DOF'],\n 'BDIRA': config['BDIRA'],\n 'DZ': config['DZ']}\n DecayDescriptors = [ '[Lambda_b0 -> Lambda_c+ mu-]cc', '[Lambda_b0 -> Lambda_c+ mu+]cc']\n\n \n\n self.selb2Lc2L0DDPiMuX = makeb2DMuX('b2Lc2L0DDPiMuX' + name,DecayDescriptors,MuSel,self.selLc2L0Pi_DD,BCuts)\n self.selb2Lc2L0LLPiMuX = makeb2DMuX('b2Lc2L0LLPiMuX' + name,DecayDescriptors,MuSel,self.selLc2L0Pi_LL,BCuts)\n self.selb2Lc2L0DDKMuX = makeb2DMuX('b2Lc2L0DDKMuX' + name,DecayDescriptors,MuSel,self.selLc2L0K_DD,BCuts)\n self.selb2Lc2L0LLKMuX = makeb2DMuX('b2Lc2L0LLKMuX' + name,DecayDescriptors,MuSel,self.selLc2L0K_LL,BCuts)\n\n ####### Lambda_c -> p K pi\n MuSel = self.selmuonnew\n DecayDescriptors = [ '[Lambda_b0 -> Lambda_c+ mu-]cc', '[Lambda_b0 -> Lambda_c+ mu+]cc']\n BCuts = {'BVCHI2DOF': config['BVCHI2DOF'],\n 'BDIRA': config['BDIRA'],\n 'DZ': config['DZ']}\n \n self.selb2LcMuX = makeb2DMuX('b2LcMuX' + name,DecayDescriptors,MuSel, self.sellambdac ,BCuts)\n self.selb2LcDCSMuX = makeb2DMuX('b2LcDCSMuX' + name, DecayDescriptors,MuSel,self.sellambdacDCS,BCuts)\n self.selb2Lc2pPiPiMuX = makeb2DMuX('b2Lc2pPiPiMuX' + name, DecayDescriptors,MuSel,self.selLc2pPiPi,BCuts)\n self.selb2Lc2pKKMuX = makeb2DMuX('b2Lc2pKKMuX' + name, DecayDescriptors,MuSel,self.selLc2pKK,BCuts)\n\n \n ################# DECLARE THE STRIPPING LINES #################################\n\n GECs = { \"Code\":\"( recSummaryTrack(LHCb.RecSummary.nLongTracks, TrLONG) < %(GEC_nLongTrk)s )\" % config,\n \"Preambulo\": [\"from LoKiTracks.decorators import *\"]}\n \n self.registerLine( StrippingLine('b2D0MuXKsKs_DDDD' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2D0MuXKsKs_DDDD))\n self.registerLine( StrippingLine('b2D0MuXKsKs_LLLL' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2D0MuXKsKs_LLLL))\n self.registerLine( StrippingLine('b2D0MuXKsKs_DDLL' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2D0MuXKsKs_DDLL))\n ### D*+ versions\n self.registerLine( StrippingLine('b2DstarMuXKsKs_DDDD' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2DstarMuXKsKs_DDDD))\n self.registerLine( StrippingLine('b2DstarMuXKsKs_LLLL' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2DstarMuXKsKs_LLLL))\n self.registerLine( StrippingLine('b2DstarMuXKsKs_DDLL' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2DstarMuXKsKs_DDLL))\n\n ########### D0 -> HHHHH\n self.registerLine( StrippingLine('b2D0MuXK3Pi' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2D0MuXK3Pi) )\n self.registerLine( StrippingLine('b2D0MuX4Pi' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2D0MuX4Pi) )\n self.registerLine( StrippingLine('b2D0MuX2K2Pi' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2D0MuX2K2Pi) )\n self.registerLine( StrippingLine('b2D0MuX3KPi' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2D0MuX3KPi) )\n ## D*+ versions\n self.registerLine( StrippingLine('b2DstarMuXK3Pi' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2DstarMuXK3Pi) )\n self.registerLine( StrippingLine('b2DstarMuX4Pi' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2DstarMuX4Pi) )\n self.registerLine( StrippingLine('b2DstarMuX2K2Pi' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2DstarMuX2K2Pi) )\n self.registerLine( StrippingLine('b2DstarMuX3KPi' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2DstarMuX3KPi) )\n \n ########### D0 -> Ks HH \n self.registerLine( StrippingLine('b2DstarMuXKsPiPiLL'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2DstarMuXKsPiPiLL) ) \n self.registerLine( StrippingLine('b2DstarMuXKsPiPiDD'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2DstarMuXKsPiPiDD) )\n self.registerLine( StrippingLine('b2DstarMuXKsKPiLL'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2DstarMuXKsKPiLL) )\n self.registerLine( StrippingLine('b2DstarMuXKsKPiDD'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2DstarMuXKsKPiDD) )\n self.registerLine( StrippingLine('b2DstarMuXKsKKLL'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2DstarMuXKsKKLL) )\n self.registerLine( StrippingLine('b2DstarMuXKsKKDD'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2DstarMuXKsKKDD) )\n ## D*+ versions\n self.registerLine( StrippingLine('b2D0MuXKsPiPiLL'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2D0MuXKsPiPiLL) )\n self.registerLine( StrippingLine('b2D0MuXKsPiPiDD'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2D0MuXKsPiPiDD) )\n self.registerLine( StrippingLine('b2D0MuXKsKKLL'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2D0MuXKsKKLL) )\n self.registerLine( StrippingLine('b2D0MuXKsKKDD'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2D0MuXKsKKDD) )\n self.registerLine( StrippingLine('b2D0MuXKsKPiLL'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2D0MuXKsKPiLL) )\n self.registerLine( StrippingLine('b2D0MuXKsKPiDD'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2D0MuXKsKPiDD) )\n \n ########### D0 -> HHPi0 \n self.registerLine( StrippingLine('b2D0MuXKPiPi0Resolved'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2D0MuXKPiPi0Resolved) )\n self.registerLine( StrippingLine('b2D0MuXKKPi0Resolved'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2D0MuXKKPi0Resolved) )\n self.registerLine( StrippingLine('b2D0MuXPiPiPi0Resolved'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2D0MuXPiPiPi0Resolved) )\n self.registerLine( StrippingLine('b2D0MuXKKPi0SSResolved'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2D0MuXKKPi0SSResolved) )\n self.registerLine( StrippingLine('b2D0MuXPiPiPi0SSResolved'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2D0MuXPiPiPi0SSResolved) )\n self.registerLine( StrippingLine('b2D0MuXKPiPi0Merged'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2D0MuXKPiPi0Merged) )\n self.registerLine( StrippingLine('b2D0MuXKKPi0Merged'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2D0MuXKKPi0Merged) )\n self.registerLine( StrippingLine('b2D0MuXPiPiPi0Merged'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2D0MuXPiPiPi0Merged) )\n ## D*+ versions\n self.registerLine( StrippingLine('b2DstarMuXKPiPi0Resolved'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2DstarMuXKPiPi0Resolved) )\n self.registerLine( StrippingLine('b2DstarMuXKKPi0Resolved'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2DstarMuXKKPi0Resolved) )\n self.registerLine( StrippingLine('b2DstarMuXPiPiPi0Resolved'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2DstarMuXPiPiPi0Resolved) )\n self.registerLine( StrippingLine('b2DstarMuXKKPi0SSResolved'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2DstarMuXKKPi0SSResolved) )\n self.registerLine( StrippingLine('b2DstarMuXPiPiPi0SSResolved'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2DstarMuXPiPiPi0SSResolved) )\n self.registerLine( StrippingLine('b2DstarMuXKPiPi0Merged'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2DstarMuXKPiPi0Merged) )\n self.registerLine( StrippingLine('b2DstarMuXKKPi0Merged'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2DstarMuXKKPi0Merged) )\n self.registerLine( StrippingLine('b2DstarMuXPiPiPi0Merged'+name+'Line',prescale = 1,FILTER=GECs,selection = self.selb2DstarMuXPiPiPi0Merged) )\n \n ########### D+ -> KsH\n self.registerLine( StrippingLine('b2DsMuXKsLLK' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2DsMuXKsLLK) )\n self.registerLine( StrippingLine('b2DsMuXKsDDK' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2DsMuXKsDDK) )\n self.registerLine( StrippingLine('b2DsMuXKsLLPi' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2DsMuXKsLLPi) )\n self.registerLine( StrippingLine('b2DsMuXKsDDPi' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2DsMuXKsDDPi) )\n\n ########## D+ -> H mu mu\n self.registerLine( StrippingLine('b2DsMuXPiMuMu' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2DsMuXPiMuMu) )\n self.registerLine( StrippingLine('b2DsMuXKMuMu' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2DsMuXKMuMu) )\n \n ########## Lambda_c+ -> Lambda H\n self.registerLine( StrippingLine('b2MuXLc2L0LLPi' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2Lc2L0LLPiMuX) )\n self.registerLine( StrippingLine('b2MuXLc2L0DDPi' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2Lc2L0DDPiMuX) )\n self.registerLine( StrippingLine('b2MuXLc2L0LLK' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2Lc2L0LLKMuX) )\n self.registerLine( StrippingLine('b2MuXLc2L0DDK' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2Lc2L0DDKMuX) )\n\n ########## Lambda_c+ -> p HH \n self.registerLine( StrippingLine('b2LcMuX' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2LcMuX) )\n self.registerLine( StrippingLine('b2LcDCSMuX' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2LcDCSMuX) )\n self.registerLine( StrippingLine('b2Lc2pPiPiMuX' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2Lc2pPiPiMuX) ) \n self.registerLine( StrippingLine('b2Lc2pKKMuX' + name + 'Line', prescale = 1, FILTER=GECs,selection = self.selb2Lc2pKKMuX) )\n \n\n \n def _muonFilter( self , _name):\n _code = \"(PT > %(MuonPT)s *MeV) & (P> 3.0*GeV)\"\\\n \"& (TRCHI2DOF< %(TRCHI2Loose)s) & (MIPCHI2DV(PRIMARY)> %(MuonIPCHI2)s)\"\\\n \"& (PIDmu > %(PIDmu)s)\" % self.__confdict__\n _mu = FilterDesktop( name = _name, Code = _code )\n return _mu \n\n def _pionFilter( self , _name):\n _code = \" (TRCHI2DOF < %(TRCHI2)s) & (P>2.0*GeV) & (PT > %(KPiPT)s *MeV)\"\\\n \"& (MIPCHI2DV(PRIMARY)> %(MINIPCHI2)s) & (PIDK< %(PionPIDK)s)\" % self.__confdict__\n _pi = FilterDesktop( name = _name, Code = _code )\n return _pi\n\n def _kaonFilter( self , _name ):\n _code = \" (TRCHI2DOF < %(TRCHI2)s) & (P>2.0*GeV) & (PT > %(KPiPT)s *MeV)\"\\\n \"& (MIPCHI2DV(PRIMARY)> %(MINIPCHI2)s) & (PIDK> %(KaonPIDK)s)\" % self.__confdict__\n _ka = FilterDesktop( name = _name, Code = _code )\n return _ka \n \n def _kaonlooseFilter( self, _name ):\n _code = \" (TRCHI2DOF < %(TRCHI2)s) & (P>2.0*GeV) & (PT > %(KPiPT)s *MeV)\"\\\n \"& (MIPCHI2DV(PRIMARY)> %(MINIPCHI2Loose)s) & (PIDK> %(KaonPIDKloose)s)\" % self.__confdict__\n _kal = FilterDesktop( name = _name, Code = _code )\n return _kal \n \n def _pionlooseFilter( self , _name):\n _code = \" (TRCHI2DOF < %(TRCHI2)s) & (P>2.0*GeV) & (PT > %(KPiPT)s *MeV)\"\\\n \"& (MIPCHI2DV(PRIMARY)> %(MINIPCHI2Loose)s)\" % self.__confdict__\n _pil = FilterDesktop( name = _name, Code = _code )\n return _pil\n \n \n def _Pi0ResolvedFilter( self, _name):\n _code = \"(PT> %(Pi0PtMin)s *MeV) & (P> %(Pi0PMin)s *MeV)\"\\\n \"& (CHILD(CL,1)> %(PhotonCL)s) & (CHILD(CL,2)> %(PhotonCL)s)\" % self.__confdict__\n _pil = FilterDesktop( name = _name, Code = _code )\n return _pil\n\n def _Pi0MergedFilter( self, _name):\n _code = \"(PT> %(Pi0PtMin)s *MeV) & (P> %(Pi0PMin)s *MeV)\" % self.__confdict__\n _pil = FilterDesktop( name = _name, Code = _code )\n return _pil\n\n def _KsDDFilter( self, _name):\n _code = \" (P> %(KSDDPMin)s *MeV) & (PT> %(KSDDPTMin)s *MeV)\" \\\n \" & (ADMASS('KS0') < %(KSDDCutMass)s *MeV) & (BPVVDCHI2> %(KSDDCutFDChi2)s)\" \\\n \" & CHILDCUT((TRCHI2DOF < %(KSDaugTrackChi2)s),1)\" \\\n \" & CHILDCUT((TRCHI2DOF < %(KSDaugTrackChi2)s),2)\" \\\n \" & (VFASPF(VCHI2PDOF) < %(KSVertexChi2)s)\" \\\n \" & (BPVDIRA > %(KSCutDIRA)s )\" % self.__confdict__\n _pil = FilterDesktop( name = _name, Code = _code)\n return _pil\n\n def _KsLLFilter( self, _name):\n _code = \" (P> %(KSLLPMin)s *MeV) & (PT> %(KSLLPTMin)s *MeV)\" \\\n \" & (ADMASS('KS0') < %(KSLLCutMass)s *MeV) & (BPVVDCHI2> %(KSLLCutFDChi2)s)\" \\\n \" & CHILDCUT((TRCHI2DOF < %(KSDaugTrackChi2)s),1)\" \\\n \" & CHILDCUT((TRCHI2DOF < %(KSDaugTrackChi2)s),2)\" \\\n \" & (VFASPF(VCHI2PDOF) < %(KSVertexChi2)s)\" \\\n \" & (BPVDIRA > %(KSCutDIRA)s )\" % self.__confdict__\n _pil = FilterDesktop( name = _name, Code = _code)\n return _pil\n \n def _LambdaLLFilter( self, _name):\n _code = \" (P> %(LambdaLLPMin)s *MeV) & (PT> %(LambdaLLPTMin)s *MeV)\" \\\n \" & (ADMASS('Lambda0') < %(LambdaLLCutMass)s *MeV) & (BPVVDCHI2> %(LambdaLLCutFDChi2)s)\" \\\n \" & CHILDCUT((TRCHI2DOF < %(LambdaDaugTrackChi2)s),1)\" \\\n \" & CHILDCUT((TRCHI2DOF < %(LambdaDaugTrackChi2)s),2)\" \\\n \" & (VFASPF(VCHI2PDOF) < %(LambdaVertexChi2)s)\" \\\n \" & (BPVDIRA > %(LambdaCutDIRA)s )\" % self.__confdict__\n _pil = FilterDesktop( name = _name, Code = _code)\n return _pil\n\n def _LambdaDDFilter( self , _name):\n _code = \" (P> %(LambdaDDPMin)s *MeV) & (PT> %(LambdaDDPTMin)s *MeV)\" \\\n \" & (ADMASS('Lambda0') < %(LambdaDDCutMass)s *MeV) & (BPVVDCHI2> %(LambdaDDCutFDChi2)s)\" \\\n \" & CHILDCUT((TRCHI2DOF < %(LambdaDaugTrackChi2)s),1)\" \\\n \" & CHILDCUT((TRCHI2DOF < %(LambdaDaugTrackChi2)s),2)\" \\\n \" & (VFASPF(VCHI2PDOF) < %(LambdaVertexChi2)s)\" \\\n \" & (BPVDIRA > %(LambdaCutDIRA)s )\" % self.__confdict__\n _pil = FilterDesktop( name = _name, Code = _code)\n return _pil\n\n def _D02HHHHFilter( self , _decayDescriptors,_name):\n _combinationCut = \"(ADAMASS('D0') < %(Dto4h_AMassWin)s *MeV) & (APT > 1500.*MeV) & (ADOCACHI2CUT( %(DDocaChi2Max)s, ''))\" % self.__confdict__\n _daughtersCuts = { \"pi+\" : \" (PT > 250 *MeV) & (P>2.0*GeV)\"\\\n \"& (TRCHI2DOF < %(TRCHI2)s)\" % self.__confdict__}\n _motherCut = \" (ADMASS('D0') < %(Dto4h_MassWin)s *MeV) & (VFASPF(VCHI2/VDOF) < %(DsVCHI2DOF)s) \" \\\n \"& (INTREE((ABSID=='pi+')& (PT > %(KPiPT)s *MeV) &(MIPCHI2DV(PRIMARY)> %(MINIPCHI2)s)))\" \\\n \"& (BPVVDCHI2 > %(DsFDCHI2)s) & (BPVDIRA> %(DsDIRA)s)\" % self.__confdict__\n _d02hhhh = CombineParticles( name = _name,\n DecayDescriptors = _decayDescriptors,\n DaughtersCuts = _daughtersCuts,\n CombinationCut = _combinationCut,\n MotherCut = _motherCut) \n return _d02hhhh\n\n\n def _Ds2HMuMuFilter( self , _decayDescriptors, _name):\n _combinationCut = \"(DAMASS('D_s+') < %(DsAMassWin)s *MeV) & (DAMASS('D+')> -%(DsAMassWin)s *MeV)\"\\\n \"& (ACHILD(PT,1)+ACHILD(PT,2)+ACHILD(PT,3) > 1800.*MeV) & (ADOCACHI2CUT( %(DDocaChi2Max)s, ''))\" % self.__confdict__\n _motherCut = \"(SUMTREE( PT, ISBASIC )>1800.*MeV) &(DMASS('D_s+') < %(DsMassWin)s *MeV) & (DMASS('D+') > -%(DsMassWin)s *MeV)\"\\\n \"& (VFASPF(VCHI2/VDOF) < %(DsVCHI2DOF)s) \" \\\n \"& (BPVVDCHI2 > %(DsFDCHI2)s) & (BPVDIRA> %(DsDIRA)s)\" % self.__confdict__\n _ds2hmumu = CombineParticles( name = _name,\n DecayDescriptors = _decayDescriptors,\n CombinationCut = _combinationCut,\n MotherCut = _motherCut) \n return _ds2hmumu\n \n def _Ds2KsHFilter( self , _decayDescriptors, _name):\n _combinationCut = \"(DAMASS('D_s+') < %(DsAMassWin)s *MeV) & (DAMASS('D+')> -%(DsAMassWin)s *MeV)\"\\\n \"& (ACHILD(PT,1)+ACHILD(PT,2) > 1500.*MeV)\" \\\n \"& (ADOCACHI2CUT( %(DDocaChi2Max)s, ''))\" % self.__confdict__\n _motherCut = \"(SUMTREE( PT, ISBASIC )>1500.*MeV) &(DMASS('D_s+') < %(DsMassWin)s *MeV) & (DMASS('D+') > -%(DsMassWin)s *MeV)\"\\\n \"& (VFASPF(VCHI2/VDOF) < %(DsVCHI2DOF)s) \" \\\n \"& (BPVVDCHI2 > %(DsFDCHI2)s) & (BPVDIRA> %(DsDIRA)s)\" % self.__confdict__\n _ds2ksh = CombineParticles( name = _name,\n DecayDescriptors = _decayDescriptors,\n CombinationCut = _combinationCut,\n MotherCut = _motherCut) \n return _ds2ksh\n\n \n def _D02KsHHFilter( self , _decayDescriptors, _name):\n _combinationCut = \"(ADAMASS('D0') < %(DsAMassWin)s *MeV) & (ACHILD(PT,1)+ACHILD(PT,2) > 1800.*MeV)\"\\\n \"& (ADOCACHI2CUT( %(DDocaChi2Max)s, ''))\" % self.__confdict__\n _motherCut = \"(SUMTREE( PT, ISBASIC )>1800.*MeV) &(ADMASS('D0') < %(DsMassWin)s *MeV) & (VFASPF(VCHI2/VDOF) < %(DsVCHI2DOF)s) \" \\\n \"& (BPVVDCHI2 > %(DsFDCHI2)s) & (BPVDIRA> %(DsDIRA)s)\" % self.__confdict__\n _d02KsHH = CombineParticles( name = _name,\n DecayDescriptors = _decayDescriptors,\n CombinationCut = _combinationCut,\n MotherCut = _motherCut) \n return _d02KsHH\n\n\n def _D02KsKsFilter( self , _decayDescriptors, _name):\n \n _combinationCut = \"(ADAMASS('D0') < %(DsAMassWin)s *MeV) & (ACHILD(PT,1)+ACHILD(PT,2) > 1800.*MeV)\"\\\n \"& (ADOCACHI2CUT( %(DDocaChi2Max)s, ''))\" % self.__confdict__\n _motherCut = \"(SUMTREE( PT, ISBASIC )>1800.*MeV) &(ADMASS('D0') < %(DsMassWin)s *MeV) & (VFASPF(VCHI2/VDOF) < %(DsVCHI2DOF)s) \" \\\n \"& (BPVVDCHI2 > %(DsFDCHI2)s) & (BPVDIRA> %(DsDIRA)s)\" % self.__confdict__\n _d02KsKs = CombineParticles( name = _name,\n DecayDescriptors = _decayDescriptors,\n CombinationCut = _combinationCut,\n MotherCut = _motherCut) \n return _d02KsKs\n \n \n def _D02HHPi0Filter( self , _decayDescriptors, _name):\n _combinationCut = \"(ADAMASS('D0') < %(D02HHPi0AMassWin)s *MeV) \" \\\n \" & (APT> %(D02HHPi0PtCut)s *MeV)\" \\\n \" & (ADOCA(1,2) < %(D02HHPi0DocaCut)s)\" % self.__confdict__\n _motherCut = \"(ADMASS('D0') < %(D02HHPi0MassWin)s *MeV) \" \\\n \"& (SUMTREE( PT, ISBASIC )> %(D02HHPi0PtCut)s *MeV) \" \\\n \"& (VFASPF(VCHI2/VDOF) < %(DsVCHI2DOF)s) \" \\\n \"& (BPVVDCHI2 > %(DsFDCHI2)s) & (BPVDIRA> %(DsDIRA)s)\" % self.__confdict__\n _d02HHPi0 = CombineParticles( name = _name,\n DecayDescriptors = _decayDescriptors,\n CombinationCut = _combinationCut,\n MotherCut = _motherCut)\n return _d02HHPi0\n\n def _D02HHPi0FilterFIT( self , _decayDescriptors,_name):\n _combinationCut = \"(ADAMASS('D0') < %(D02HHPi0AMassWin)s *MeV) \" \\\n \" & (APT> %(D02HHPi0PtCut)s *MeV)\" \\\n \" & (ADOCA(1,2) < %(D02HHPi0DocaCut)s)\" % self.__confdict__\n _motherCut = \"(ADMASS('D0') < %(D02HHPi0MassWin)s *MeV) \" \\\n \"& (SUMTREE( PT, ISBASIC )> %(D02HHPi0PtCut)s *MeV) \" \\\n \"& (VFASPF(VCHI2/VDOF) < %(DsVCHI2DOF)s) \" \\\n \"& (BPVVDCHI2 > %(DsFDCHI2)s) & (BPVDIRA> %(DsDIRA)s)\" % self.__confdict__\n _d02HHPi0 = CombineParticles( name = _name,\n DecayDescriptors = _decayDescriptors,\n CombinationCut = _combinationCut,\n MotherCut = _motherCut)\n _d02HHPi0.addTool( OfflineVertexFitter )\n _d02HHPi0.VertexFitters.update( { \"\" : \"OfflineVertexFitter\"} )\n _d02HHPi0.OfflineVertexFitter.useResonanceVertex = False\n _d02HHPi0.ReFitPVs = True\n return _d02HHPi0\n\n def _Lc2L0HFilter( self , _decayDescriptors, _name):\n _combinationCut = \"(ADAMASS('Lambda_c+') < %(DsAMassWin)s *MeV) \" \\\n \" & (ADOCACHI2CUT( %(DDocaChi2Max)s, '')) \" \\\n \" & (ACHILD(PT,1)+ACHILD(PT,2) > 1800.*MeV)\" % self.__confdict__\n _motherCut = \"(ADMASS('Lambda_c+') < %(DsMassWin)s *MeV) \" \\\n \" & (VFASPF(VCHI2/VDOF) < %(DsVCHI2DOF)s) \" \\\n \" & (SUMTREE( PT, ISBASIC )>1800.*MeV) \" \\\n \" & (BPVVDCHI2 > %(DsFDCHI2)s) & (BPVDIRA> %(DsDIRA)s)\" % self.__confdict__\n _lambdac = CombineParticles( name = _name,\n DecayDescriptors = _decayDescriptors,\n CombinationCut = _combinationCut,\n MotherCut = _motherCut) \n return _lambdac\n\n\n def _Lc2pHHFilter( self , _decayDescriptors, _name):\n _daughtersCuts = { \"p+\" : \"(TRCHI2DOF < %(TRCHI2)s) & (PT > %(KPiPT)s *MeV) & (P>2.0*GeV)\"\\\n \"& (MIPCHI2DV(PRIMARY)> %(MINIPCHI2)s) & (PIDp> %(KaonPIDK)s) & (PIDp-PIDK>1.0e-10)\" % self.__confdict__ }\n _combinationCut = \"(ADAMASS('Lambda_c+') < %(DsAMassWin)s *MeV) & (ACHILD(PT,1)+ACHILD(PT,2)+ACHILD(PT,3) > 1800.*MeV)\"\\\n \"& (ADOCACHI2CUT( %(DDocaChi2Max)s, ''))\" % self.__confdict__\n _motherCut = \"(ADMASS('Lambda_c+') < %(DsMassWin)s *MeV) & (VFASPF(VCHI2/VDOF) < %(DsVCHI2DOF)s) \" \\\n \"& (BPVVDCHI2 > %(DsFDCHI2)s) & (SUMTREE( PT, ISBASIC )>1800.*MeV) & (BPVDIRA> %(DsDIRA)s)\" % self.__confdict__\n _lambdac = CombineParticles( name = _name,\n DecayDescriptors = _decayDescriptors,\n DaughtersCuts = _daughtersCuts,\n CombinationCut = _combinationCut,\n MotherCut = _motherCut) \n return _lambdac\n \ndef makeDstar(_name, inputD0,Dstar_cuts) : \n \"\"\"\n Given a list of D0, try to make D*+ -> D0 pi+\n \"\"\"\n _softPi = DataOnDemand(Location = 'Phys/StdAllLoosePions/Particles')\n _cutsSoftPi = '( PIDe-PIDpi < %(Dstar_SoftPion_PIDe)s )' % Dstar_cuts\n _cutsDstarComb = \"ADAMASS('D*(2010)+') < %(Dstar_preFitMassCut)s *MeV\" % Dstar_cuts\n _cutsDstarMoth_base = '(PT > %(Dstar_SoftPion_PT)s * MeV) & (VFASPF(VCHI2/VDOF) < %(Dstar_Chi2)s )' % Dstar_cuts\n _cutsDstarMoth_DM = '(MM - CHILD(MM,1) - CHILD(MM,2) > %(Dstar_wideDMCutLower)s *MeV) & (MM - CHILD(MM,1) - CHILD(MM,2) < %(Dstar_wideDMCutUpper)s *MeV)' % Dstar_cuts\n _cutsDstarMoth = '(' + _cutsDstarMoth_base + ' & ' + _cutsDstarMoth_DM + ')'\n _Dstar = CombineParticles( name = _name,\n DecayDescriptor = \"[D*(2010)+ -> D0 pi+]cc\",\n DaughtersCuts = { \"pi+\" : _cutsSoftPi },\n CombinationCut = _cutsDstarComb,\n MotherCut = _cutsDstarMoth)\n return Selection (name = \"Sel\"+_name,Algorithm = _Dstar,RequiredSelections = inputD0 + [_softPi])\n\n \ndef makeb2DMuX(_name,\n DecayDescriptors,\n MuSel,\n DSel,\n BCuts):\n _combinationCut = \"(AM<6.2*GeV)\"\n _motherCut = \" (MM<6.0*GeV) & (MM>2.5*GeV) & (VFASPF(VCHI2/VDOF)< %(BVCHI2DOF)s) & (BPVDIRA> %(BDIRA)s) \" \\\n \"& (MINTREE(((ABSID=='D+') | (ABSID=='D0') | (ABSID=='Lambda_c+')) , VFASPF(VZ))-VFASPF(VZ) > %(DZ)s *mm ) \" % BCuts\n _B = CombineParticles(name = _name,DecayDescriptors = DecayDescriptors,\n CombinationCut = _combinationCut,\n MotherCut = _motherCut)\n return Selection (name = \"Sel\"+_name,\n Algorithm = _B,\n RequiredSelections = [MuSel, DSel])\n\ndef TOSFilter( name = None, sel = None, trigger = None ):\n \"\"\"\n Function to return a selection object, filtering for TOS candidates from input selection\n \"\"\"\n from Configurables import TisTosParticleTagger\n \n _filter = TisTosParticleTagger(name+\"_TriggerTos\")\n _filter.TisTosSpecs = { trigger+\"%TOS\" : 0 }\n _filter.NoRegex = True\n \n _sel = Selection(\"Sel\" + name + \"_TriggerTos\", RequiredSelections = [ sel ], Algorithm = _filter )\n return _sel\n\n\n\n","sub_path":"DaVinciDev_v38r1p1/Phys/StrippingArchive/python/StrippingArchive/Stripping19c/StrippingCharmFromBSemi.py","file_name":"StrippingCharmFromBSemi.py","file_ext":"py","file_size_in_byte":67332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"51194236","text":"# --------------------------------------------------------------------------\n# Project Reality prinfo by rpoxo\n#\n# ~ prinfo.py\n#\n# Description:\n# displays data about vehicle\n#\n# -------------------------------------------------------------------------\n\nimport os\nimport datetime\n\n# workaround for importing outside bf2\nimport externals\nfrom externals import bf2 as bf2\nfrom externals import host as host\nfrom externals import radmin as radmin\nfrom externals import rtimer as rtimer\n\nfrom math3d import Point3d\n\n# ------------------------------------------------------------------------\n# Init\n# ------------------------------------------------------------------------\ndef init():\n WatchVehicle.init()\n\ndef deinit():\n WatchVehicle.deinit()\n\nclass WatchVehicle:\n reporting = False\n reporting_mode = None\n\n logging = False\n logging_fo = None\n\n timer = None\n\n vehicle = None\n vehicle_lastposition = (0.0, 0.0, 0.0)\n vehicle_lastrotation = (0.0, 0.0, 0.0)\n vehicle_lasttime = 0.0\n\n @classmethod\n def init(cls):\n host.registerHandler('EnterVehicle', cls.onEnterVehicle)\n host.registerHandler('ExitVehicle', cls.onExitVehicle)\n\n radmin.addCommand(\"watch\", cls.switchReporting, 777)\n radmin.addCommand(\"position\", cls.switchReportPosition, 777)\n radmin.addCommand(\"rotation\", cls.switchReportRotation, 777)\n radmin.addCommand(\"speed\", cls.switchReportSpeed, 777)\n #radmin.addCommand(\"aoa\", cls.switchReportAngleOfAttack, 777)\n\n radmin.addCommand(\"log\", cls.switchLogging, 777)\n \n @classmethod\n def deinit(cls):\n if cls.logging_fo: cls.logging_fo.close()\n # let bf2 deal with destroying handlers\n \n @classmethod\n def switchReporting(cls, args, player):\n if cls.reporting: cls._disableReporting()\n else: cls._enableReporting(player.getVehicle())\n \n @classmethod\n def switchLogging(cls, args, player):\n if cls.logging: cls._disableLogging()\n else: cls._enableLogging(player.getVehicle())\n \n @classmethod\n def switchReportPosition(cls, args, player):\n cls.reporting_mode = 'position'\n if not cls.reporting: cls.switchReporting(args, player)\n \n @classmethod\n def switchReportRotation(cls, args, player):\n cls.reporting_mode = 'rotation'\n if not cls.reporting: cls.switchReporting(args, player)\n \n @classmethod\n def switchReportSpeed(cls, args, player):\n cls.reporting_mode = 'speed'\n if not cls.reporting: cls.switchReporting(args, player)\n \n @classmethod\n def switchReportAngleOfAttack(cls, args, player):\n cls.reporting_mode = 'aoa'\n if not cls.reporting: cls.switchReporting(args, player)\n\n @classmethod\n def onEnterVehicle(cls, player, vehicle, freeSoldier=False):\n cls.vehicle = vehicle\n debugIngame('%s entered %s' % (player.getName(), vehicle.templateName))\n\n @classmethod\n def onExitVehicle(cls, player, vehicle):\n if cls.reporting: cls._disableReporting()\n if cls.logging: cls._disableLogging()\n debugIngame('%s exited %s' % (player.getName(), vehicle.templateName))\n\n @classmethod\n def _enableTimer(cls):\n debugIngame('Enabling timer...')\n cls.timer = rtimer.Timer(cls._tick, -1, 1)\n cls.timer.setRecurring(0.1) # no need to have it faster than 0.03 as ingame printouts will get buffered\n \n @classmethod\n def _disableTimer(cls):\n debugIngame('Disabling timer...')\n if cls.timer:\n cls.timer.destroy()\n cls.timer = None\n elif cls.timer is None:\n debugIngame('Timer already doesnt exist')\n else:\n debugIngame('Could not destroy timer')\n\n @classmethod\n def _enableReporting(cls, vehicle):\n debugIngame('Enabling reporting...')\n cls.reporting = True\n if not cls.timer: cls._enableTimer()\n if vehicle is not None: cls.vehicle = vehicle\n \n @classmethod\n def _disableReporting(cls):\n debugIngame('Disabling reporting...')\n cls.reporting = False\n cls.reporting_mode = None\n if cls.timer: cls._disableTimer()\n if cls.vehicle: cls.vehicle = None\n\n @classmethod\n def _enableLogging(cls, vehicle):\n debugIngame('Enabling logging...')\n\n cls.logging = True\n\n filename = os.path.join(host.sgl_getModDirectory(), 'Logs', 'prinfo_' + datetime.datetime.now().strftime(\"%Y%m%d_%H_%M\") + '.log')\n if vehicle:\n filename = os.path.join(host.sgl_getModDirectory(), 'Logs', 'prinfo_' + datetime.datetime.now().strftime(\"%Y%m%d_%H_%M_\") + vehicle.templateName + '.log')\n cls.logging_fo = open(filename, 'w')\n debugIngame('Log path is %s' % os.path.abspath(filename))\n\n cls._enableTimer()\n if vehicle is not None:\n cls.vehicle = vehicle\n \n @classmethod\n def _disableLogging(cls):\n debugIngame('Disabling reporting...')\n cls.logging = False\n cls.logging_fo.close()\n \n @classmethod\n def _tick(cls, data):\n if not cls.reporting and not cls.logging: return\n if not cls.vehicle: return\n\n epoch = host.timer_getWallTime()\n position = cls.vehicle.getPosition()\n rotation = cls.vehicle.getRotation()\n\n if cls.reporting_mode == 'position':\n cls._position(cls.vehicle, epoch, Point3d(*position))\n elif cls.reporting_mode == 'rotation':\n cls._rotation(cls.vehicle, epoch, rotation)\n elif cls.reporting_mode == 'speed':\n delta = epoch - cls.vehicle_lasttime\n cls._speed(cls.vehicle, epoch, delta, Point3d(*position), Point3d(*cls.vehicle_lastposition))\n elif cls.reporting_mode == 'aoa':\n #cls._aoa(cls.vehicle, epoch, Point3d(*position), Point3d(*cls.vehicle_lastposition))\n pass\n elif cls.reporting_mode == None and cls.reporting:\n debugIngame('cls.reporting_mode: %s' % cls.reporting_mode)\n debugIngame('pos_last: (%f, %f, %f)' % (cls.vehicle_lastposition[0], cls.vehicle_lastposition[1], cls.vehicle_lastposition[2]))\n debugIngame('pos_curr: (%f, %f, %f)' % (position[0], position[1], position[2]))\n debugIngame('epoch_last: %f' % cls.vehicle_lasttime)\n debugIngame('epoch_curr: %f' % epoch)\n\n if cls.logging:\n msg = 'position: %s\\nrotation: %s\\nepoch: %s\\n' % (position, rotation, epoch)\n cls._log(cls.logging_fo, msg)\n \n cls.vehicle_lastposition = position\n cls.vehicle_lastrotation = rotation\n cls.vehicle_lasttime = epoch\n \n @classmethod\n def _position(cls, vehicle, epoch, position):\n debugIngame('%s @%.3f position:(%.3f, %.3f, %.3f)' % (vehicle.templateName, epoch, position.x, position.y, position.z))\n \n @classmethod\n def _rotation(cls, vehicle, epoch, rotation):\n debugIngame('%s @%.3f rotation:(%.3f, %.3f, %.3f)' % (vehicle.templateName, epoch, rotation[0], rotation[1], rotation[2]))\n\n @classmethod\n def _speed(cls, vehicle, epoch, delta, current, last):\n distance = Point3d.Distance(current, last)\n speed = distance / delta\n debugIngame('%s @%.3f for %.3f: %.4f' % (vehicle.templateName, epoch, delta, speed))\n \n # TODO: this is bullshit tbh\n @classmethod\n def _aoa(cls, vehicle, epoch, v1, v2):\n raise NotImplementedError\n \n @classmethod\n def _log(cls, fo, msg):\n fo.write(msg)\n\ndef debugMessage(msg):\n host.rcon_invoke('echo \"%s\"' % (str(msg)))\n\ndef debugIngame(msg):\n #debugMessage(msg)\n try:\n host.rcon_invoke('game.sayAll \"%s\"' % (str(msg)))\n except:\n host.rcon_invoke('echo \"debugIngame(FAIL): %s\"' % (str(msg)))\n\n\n","sub_path":"prinfo.py","file_name":"prinfo.py","file_ext":"py","file_size_in_byte":7791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"76301832","text":"# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license\n\n\"\"\"trio async I/O library query support\"\"\"\n\nimport contextlib\nimport socket\nimport struct\nimport time\nimport trio\nimport trio.socket # type: ignore\n\nimport dns.exception\nimport dns.inet\nimport dns.name\nimport dns.message\nimport dns.query\nimport dns.rcode\nimport dns.rdataclass\nimport dns.rdatatype\n\n# import query symbols for compatibility and brevity\nfrom dns.query import ssl, UnexpectedSource, BadResponse\n\n# Function used to create a socket. Can be overridden if needed in special\n# situations.\nsocket_factory = trio.socket.socket\n\nasync def send_udp(sock, what, destination):\n \"\"\"Asynchronously send a DNS message to the specified UDP socket.\n\n *sock*, a ``trio.socket.socket``.\n\n *what*, a ``bytes`` or ``dns.message.Message``, the message to send.\n\n *destination*, a destination tuple appropriate for the address family\n of the socket, specifying where to send the query.\n\n Returns an ``(int, float)`` tuple of bytes sent and the sent time.\n \"\"\"\n\n if isinstance(what, dns.message.Message):\n what = what.to_wire()\n sent_time = time.time()\n n = await sock.sendto(what, destination)\n return (n, sent_time)\n\n\nasync def receive_udp(sock, destination, ignore_unexpected=False,\n one_rr_per_rrset=False, keyring=None, request_mac=b'',\n ignore_trailing=False, raise_on_truncation=False):\n \"\"\"Asynchronously read a DNS message from a UDP socket.\n\n *sock*, a ``trio.socket.socket``.\n\n *destination*, a destination tuple appropriate for the address family\n of the socket, specifying where the associated query was sent.\n\n *ignore_unexpected*, a ``bool``. If ``True``, ignore responses from\n unexpected sources.\n\n *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own\n RRset.\n\n *keyring*, a ``dict``, the keyring to use for TSIG.\n\n *request_mac*, a ``bytes``, the MAC of the request (for TSIG).\n\n *ignore_trailing*, a ``bool``. If ``True``, ignore trailing\n junk at end of the received message.\n\n *raise_on_truncation*, a ``bool``. If ``True``, raise an exception if\n the TC bit is set.\n\n Raises if the message is malformed, if network errors occur, of if\n there is a timeout.\n\n Returns a ``dns.message.Message`` object.\n \"\"\"\n\n wire = b''\n while True:\n (wire, from_address) = await sock.recvfrom(65535)\n if dns.query._addresses_equal(sock.family, from_address,\n destination) or \\\n (dns.inet.is_multicast(destination[0]) and\n from_address[1:] == destination[1:]):\n break\n if not ignore_unexpected:\n raise UnexpectedSource('got a response from '\n '%s instead of %s' % (from_address,\n destination))\n received_time = time.time()\n r = dns.message.from_wire(wire, keyring=keyring, request_mac=request_mac,\n one_rr_per_rrset=one_rr_per_rrset,\n ignore_trailing=ignore_trailing,\n raise_on_truncation=raise_on_truncation)\n return (r, received_time)\n\nasync def udp(q, where, port=53, source=None, source_port=0,\n ignore_unexpected=False, one_rr_per_rrset=False,\n ignore_trailing=False, raise_on_truncation=False,\n sock=None):\n \"\"\"Asynchronously return the response obtained after sending a query\n via UDP.\n\n *q*, a ``dns.message.Message``, the query to send\n\n *where*, a ``str`` containing an IPv4 or IPv6 address, where\n to send the message.\n\n *port*, an ``int``, the port send the message to. The default is 53.\n\n *source*, a ``str`` containing an IPv4 or IPv6 address, specifying\n the source address. The default is the wildcard address.\n\n *source_port*, an ``int``, the port from which to send the message.\n The default is 0.\n\n *ignore_unexpected*, a ``bool``. If ``True``, ignore responses from\n unexpected sources.\n\n *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own\n RRset.\n\n *ignore_trailing*, a ``bool``. If ``True``, ignore trailing\n junk at end of the received message.\n\n *raise_on_truncation*, a ``bool``. If ``True``, raise an exception if\n the TC bit is set.\n\n *sock*, a ``trio.socket.socket``, or ``None``, the socket to use\n for the query. If ``None``, the default, a socket is created. if\n a socket is provided, the *source* and *source_port* are ignored.\n\n Returns a ``dns.message.Message``.\n\n \"\"\"\n\n wire = q.to_wire()\n (af, destination, source) = \\\n dns.query._destination_and_source(None, where, port, source,\n source_port)\n # We can use an ExitStack here as exiting a trio.socket.socket does\n # not await.\n with contextlib.ExitStack() as stack:\n if sock:\n s = sock\n else:\n s = stack.enter_context(socket_factory(af, socket.SOCK_DGRAM, 0))\n if source is not None:\n await s.bind(source)\n (_, sent_time) = await send_udp(s, wire, destination)\n (r, received_time) = await receive_udp(s, destination,\n ignore_unexpected,\n one_rr_per_rrset, q.keyring,\n q.mac, ignore_trailing,\n raise_on_truncation)\n if not q.is_response(r):\n raise BadResponse\n r.time = received_time - sent_time\n return r\n\nasync def udp_with_fallback(q, where, timeout=None, port=53, source=None,\n source_port=0, ignore_unexpected=False,\n one_rr_per_rrset=False, ignore_trailing=False):\n \"\"\"Return the response to the query, trying UDP first and falling back\n to TCP if UDP results in a truncated response.\n\n *q*, a ``dns.message.Message``, the query to send\n\n *where*, a ``str`` containing an IPv4 or IPv6 address, where\n to send the message.\n\n *port*, an ``int``, the port send the message to. The default is 53.\n\n *source*, a ``str`` containing an IPv4 or IPv6 address, specifying\n the source address. The default is the wildcard address.\n\n *source_port*, an ``int``, the port from which to send the message.\n The default is 0.\n\n *ignore_unexpected*, a ``bool``. If ``True``, ignore responses from\n unexpected sources.\n\n *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own\n RRset.\n\n *ignore_trailing*, a ``bool``. If ``True``, ignore trailing\n junk at end of the received message.\n\n Returns a (``dns.message.Message``, tcp) tuple where tcp is ``True``\n if and only if TCP was used.\n \"\"\"\n try:\n response = await udp(q, where, port, source, source_port,\n ignore_unexpected, one_rr_per_rrset,\n ignore_trailing, True)\n return (response, False)\n except dns.message.Truncated:\n response = await stream(q, where, False, port, source, source_port,\n one_rr_per_rrset, ignore_trailing)\n\n return (response, True)\n\n# pylint: disable=redefined-outer-name\n\nasync def send_stream(stream, what):\n \"\"\"Asynchronously send a DNS message to the specified stream.\n\n *stream*, a ``trio.abc.Stream``.\n\n *what*, a ``bytes`` or ``dns.message.Message``, the message to send.\n\n Returns an ``(int, float)`` tuple of bytes sent and the sent time.\n \"\"\"\n\n if isinstance(what, dns.message.Message):\n what = what.to_wire()\n l = len(what)\n # copying the wire into tcpmsg is inefficient, but lets us\n # avoid writev() or doing a short write that would get pushed\n # onto the net\n stream_message = struct.pack(\"!H\", l) + what\n sent_time = time.time()\n await stream.send_all(stream_message)\n return (len(stream_message), sent_time)\n\nasync def read_exactly(stream, count):\n \"\"\"Read the specified number of bytes from stream. Keep trying until we\n either get the desired amount, or we hit EOF.\n \"\"\"\n s = b''\n while count > 0:\n n = await stream.receive_some(count)\n if n == b'':\n raise EOFError\n count = count - len(n)\n s = s + n\n return s\n\nasync def receive_stream(stream, one_rr_per_rrset=False, keyring=None,\n request_mac=b'', ignore_trailing=False):\n \"\"\"Read a DNS message from a stream.\n\n *stream*, a ``trio.abc.Stream``.\n\n *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own\n RRset.\n\n *keyring*, a ``dict``, the keyring to use for TSIG.\n\n *request_mac*, a ``bytes``, the MAC of the request (for TSIG).\n\n *ignore_trailing*, a ``bool``. If ``True``, ignore trailing\n junk at end of the received message.\n\n Raises if the message is malformed, if network errors occur, of if\n there is a timeout.\n\n Returns a ``dns.message.Message`` object.\n \"\"\"\n\n ldata = await read_exactly(stream, 2)\n (l,) = struct.unpack(\"!H\", ldata)\n wire = await read_exactly(stream, l)\n received_time = time.time()\n r = dns.message.from_wire(wire, keyring=keyring, request_mac=request_mac,\n one_rr_per_rrset=one_rr_per_rrset,\n ignore_trailing=ignore_trailing)\n return (r, received_time)\n\nasync def stream(q, where, tls=False, port=None, source=None, source_port=0,\n one_rr_per_rrset=False, ignore_trailing=False,\n stream=None, ssl_context=None, server_hostname=None):\n \"\"\"Return the response obtained after sending a query using TCP or TLS.\n\n *q*, a ``dns.message.Message``, the query to send.\n\n *where*, a ``str`` containing an IPv4 or IPv6 address, where\n to send the message.\n\n *tls*, a ``bool``. If ``False``, the default, the query will be\n sent using TCP and *port* will default to 53. If ``True``, the\n query is sent using TLS, and *port* will default to 853.\n\n *port*, an ``int``, the port send the message to. The default is as\n specified in the description for *tls*.\n\n *source*, a ``str`` containing an IPv4 or IPv6 address, specifying\n the source address. The default is the wildcard address.\n\n *source_port*, an ``int``, the port from which to send the message.\n The default is 0.\n\n *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own\n RRset.\n\n *ignore_trailing*, a ``bool``. If ``True``, ignore trailing\n junk at end of the received message.\n\n *stream*, a ``trio.abc.Stream``, or ``None``, the stream to use for\n the query. If ``None``, the default, a stream is created. if a\n socket is provided, it must be connected, and the *where*, *port*,\n *tls*, *source*, *source_port*, *ssl_context*, and\n *server_hostname* parameters are ignored.\n\n *ssl_context*, an ``ssl.SSLContext``, the context to use when establishing\n a TLS connection. If ``None``, the default, creates one with the default\n configuration. If this value is not ``None``, then the *tls* parameter\n is treated as if it were ``True`` regardless of its value.\n\n *server_hostname*, a ``str`` containing the server's hostname. The\n default is ``None``, which means that no hostname is known, and if an\n SSL context is created, hostname checking will be disabled.\n\n Returns a ``dns.message.Message``.\n\n \"\"\"\n if ssl_context is not None:\n tls = True\n if port is None:\n if tls:\n port = 853\n else:\n port = 53\n wire = q.to_wire()\n # We'd like to be able to use an AsyncExitStack here, because\n # unlike closing a socket, closing a stream requires an await, but\n # that's a 3.7 feature, so we are forced to try ... finally.\n sock = None\n s = None\n begin_time = time.time()\n try:\n if stream:\n #\n # Verify that the socket is connected, as if it's not connected,\n # it's not writable, and the polling in send_tcp() will time out or\n # hang forever.\n if isinstance(stream, trio.SSLStream):\n tsock = stream.transport_stream.socket\n else:\n tsock = stream.socket\n tsock.getpeername()\n s = stream\n else:\n (af, destination, source) = \\\n dns.query._destination_and_source(None, where, port, source,\n source_port)\n sock = socket_factory(af, socket.SOCK_STREAM, 0)\n if source is not None:\n await sock.bind(source)\n await sock.connect(destination)\n s = trio.SocketStream(sock)\n sock = None\n if tls and ssl_context is None:\n ssl_context = ssl.create_default_context()\n if server_hostname is None:\n ssl_context.check_hostname = False\n if ssl_context:\n s = trio.SSLStream(s, ssl_context,\n server_hostname=server_hostname)\n await send_stream(s, wire)\n (r, received_time) = await receive_stream(s, one_rr_per_rrset,\n q.keyring, q.mac,\n ignore_trailing)\n if not q.is_response(r):\n raise BadResponse\n r.time = received_time - begin_time\n return r\n finally:\n if sock:\n sock.close()\n if s and s != stream:\n await s.aclose()\n","sub_path":"dns/trio/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":13718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"383753535","text":"# Import numpy and cv2 package\nimport cv2\nimport numpy as np\n\n#create a white image(square box) of dimension 250X250\nimg= np.ones((250,250), dtype=np.uint8)*255\n\n#Make square boxes of 50X50\ncv2.line(img,(0,50),(250,50),0,1)\ncv2.line(img,(0,100),(250,100),0,1)\ncv2.line(img,(0,150),(250,150),0,1)\ncv2.line(img,(0,200),(250,200),0,1)\n\ncv2.line(img,(50,0),(50,250),0,1)\ncv2.line(img,(100,0),(100,250),0,1)\ncv2.line(img,(150,0),(150,250),0,1)\ncv2.line(img,(200,0),(200,250),0,1)\n\n#draw lines of 45 degrees on lower half of square\ncv2.line(img,(0,0),(250,250),0,1)\ncv2.line(img,(0,50),(200,250),0,1)\ncv2.line(img,(0,100),(150,250),0,1)\ncv2.line(img,(0,150),(100,250),0,1)\n\n#Display the original image\ncv2.imshow('Image',img)\n\n# kernel we are using to detect 45 degree lines\nkernel = np.array([[2,-1,-1],[-1,2,-1],[-1,-1,2]])/12\n\n# Convolving the image with the kernel using the filter2d() function of the OpenCV library\n_45degreelines = cv2.filter2D(src=img,ddepth=cv2.CV_8U,kernel=kernel)\n\n#Displaying the image in which we have detected the 45 degrees lines only\ncv2.imshow('Output Image with 45 degree lines only ',_45degreelines )\n\n# Display the images and wait till any key is pressed\ncv2.waitKey(0)\n# Destroy all the windows created by the imshow() function of the OpenCV\ncv2.destroyAllWindows()\n","sub_path":"OpenCv-Assignment1/Code/Prob9.py","file_name":"Prob9.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"429059354","text":"from datetime import datetime\nfrom unittest import mock\n\nfrom prowler.providers.aws.services.backup.backup_service import BackupReportPlan\n\nAWS_REGION = \"eu-west-1\"\n\n\nclass Test_backup_reportplans_exist:\n def test_no_backup_report_plans(self):\n backup_client = mock.MagicMock\n backup_client.region = AWS_REGION\n backup_client.backup_report_plans = []\n with mock.patch(\n \"prowler.providers.aws.services.backup.backup_service.Backup\",\n new=backup_client,\n ):\n # Test Check\n from prowler.providers.aws.services.backup.backup_reportplans_exist.backup_reportplans_exist import (\n backup_reportplans_exist,\n )\n\n check = backup_reportplans_exist()\n result = check.execute()\n\n assert len(result) == 1\n assert result[0].status == \"FAIL\"\n assert result[0].status_extended == \"No Backup Report Plan Exist\"\n assert result[0].resource_id == \"No Backups\"\n assert result[0].resource_arn == \"\"\n assert result[0].region == AWS_REGION\n\n def test_one_backup_report_plan(self):\n backup_client = mock.MagicMock\n backup_client.region = AWS_REGION\n backup_client.backup_report_plans = [\n BackupReportPlan(\n arn=\"ARN\",\n region=AWS_REGION,\n name=\"MyBackupReportPlan\",\n last_attempted_execution_date=datetime(2015, 1, 1),\n last_successful_execution_date=datetime(2015, 1, 1),\n )\n ]\n\n with mock.patch(\n \"prowler.providers.aws.services.backup.backup_service.Backup\",\n new=backup_client,\n ):\n # Test Check\n from prowler.providers.aws.services.backup.backup_reportplans_exist.backup_reportplans_exist import (\n backup_reportplans_exist,\n )\n\n check = backup_reportplans_exist()\n result = check.execute()\n\n assert len(result) == 1\n assert result[0].status == \"PASS\"\n assert (\n result[0].status_extended\n == \"At least one backup report plan exists: \" + result[0].resource_id\n )\n assert result[0].resource_id == \"MyBackupReportPlan\"\n assert result[0].resource_arn == \"ARN\"\n assert result[0].region == AWS_REGION\n","sub_path":"tests/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist_test.py","file_name":"backup_reportplans_exist_test.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"425701078","text":"from flask_testing import TestCase\nfrom backend.api import create_app, db\nfrom backend.models.user import User\nfrom backend.models.user_role import UserRole\nfrom backend.models.session import Session\nfrom backend.utils.logs import get_logger\n\n\nlogger = get_logger(__name__)\n\n\nclass APITestClient(object):\n def __init__(self, test_case, user):\n self.client = test_case.client\n self.session = Session(\n user=user,\n token=\"test-session-{id_}\".format(id_=user.id_)\n )\n self.user = user\n db.session.add(self.session)\n db.session.commit()\n\n def get(self, route):\n response = self.client.get(route, data={\n 'session_token': self.session.token,\n })\n assert response.status_code == 200, response.json\n return response.json\n\n def post(self, route, data):\n data = data.copy()\n data.update({\n 'session_token': self.session.token,\n })\n response = self.client.post(route, data=data)\n assert response.status_code == 201, response.json\n return response.json\n\n def delete(self, route):\n response = self.client.delete(route, data={\n 'session_token': self.session.token,\n })\n assert response.status_code == 202, response.json\n return response.json\n\nclass APITestCase(TestCase):\n def create_app(self):\n return create_app(test_mode=True)\n\n def setUp(self):\n logger.info('SET_UP', test_case=self)\n db.create_all()\n\n user = User(user_roles=[UserRole(role='user')])\n db.session.add(user)\n\n admin_user = User(user_roles=[UserRole(role='admin')])\n db.session.add(admin_user)\n\n db.session.commit()\n\n self.user_client = APITestClient(self, user=user)\n self.admin_client = APITestClient(self, user=admin_user)\n\n def tearDown(self):\n logger.info('TEAR_DOWN', test_case=self)\n db.drop_all()\n","sub_path":"backend/utils/api_test_case.py","file_name":"api_test_case.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"628323821","text":"class Solution(object):\n def minDistance(self, word1, word2):\n \"\"\"\n :type word1: str\n :type word2: str\n :rtype: int\n \"\"\"\n # start thinking at 8:58\n \n # rethink dp at 9:47\n # dp[i][j] = answer for word1[:i] and word2[:j]\n # dp[i][j] = \n # - dp[i-1][j] + 1\n # - dp[i][j-1] + 1\n # - dp[i-1][j-1] + 0 if a[i] == b[j] else 1\n # dp[i][0] = i, dp[0][j] = j\n # start coding at 10:40\n if len(word1) == 0: return len(word2);\n if len(word2) == 0: return len(word1);\n dp = [[0 for i in range(len(word2)+1)] for j in range(len(word1)+1)]; # so here range become dp[i] instead of dp[i-1]\n for i in range(len(word1)+1):\n dp[i][0] = i;\n for j in range(len(word2)+1):\n dp[0][j] = j;\n for i in range(1, len(word1)+1):\n for j in range(1, len(word2)+1):\n dp[i][j] = min(dp[i-1][j] + 1, dp[i][j-1] + 1, dp[i-1][j-1] + (0 if word1[i-1] == word2[j-1] else 1));\n \n return dp[len(word1)][len(word2)];","sub_path":"1-100/71-80/python/72_edit_distance.py","file_name":"72_edit_distance.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"295053622","text":"from django.test import TestCase\nfrom django.core.urlresolvers import resolve, reverse\n\n\nclass TestRequestsHistoryURLs(TestCase):\n def test_requests_history_view_url(self):\n \"\"\"\n Ensures that a URL pattern name `requests_history` is valid and the\n pattern is resolved to `RequestsHistoryView`\n \"\"\"\n requests_history = resolve('/requests_history/')\n\n self.assertEqual(reverse('requests_history'), '/requests_history/',\n 'A view name `requests_history` should be reversed '\n 'to the URL `/requests_history/`')\n self.assertEqual(requests_history.func.__name__, 'RequestsHistoryView',\n 'Should be resolved to `RequestsHistoryView`')\n\n def test_request_pulling_view_url(self):\n \"\"\"\n Ensures that a URL pattern name `pull_new_requests` is valid\n and the pattern is resolved to `RequestsPullingView`\n \"\"\"\n pulling = resolve('/pull_new_requests/')\n\n self.assertEqual(reverse('pull_new_requests'),\n '/pull_new_requests/',\n 'A view name `pull_new_requests` should be '\n 'reversed to the URL `/pull_new_requests/`')\n self.assertEqual(pulling.func.__name__, 'RequestsPullingView',\n 'Should be resolved to `RequestsPullingView`')\n","sub_path":"apps/requests_history/tests/test_urls.py","file_name":"test_urls.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"93221658","text":"class Solution(object):\n def maxIncreaseKeepingSkyline(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n r = 0\n index = index2 = 0\n for i in grid:\n for j in i:\n r += int(min(max(grid[index]), max([x[index2] for x in grid]))) - j\n index2 = (index2 + 1) % len(i)\n index += 1\n return r\n\n\n\ngrid = [[3,0,8,4],\n [2,4,5,7],\n [9,2,6,3],\n [0,3,1,0]]\nres = Solution().maxIncreaseKeepingSkyline(grid)\nprint(res)","sub_path":"maxIncreaseKeepingSkyline.py","file_name":"maxIncreaseKeepingSkyline.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"283609384","text":"#!/usr/bin/env python\n\nimport os\n\n# customize as needed\ndef is_hz_file(filename):\n return 'hz_' in filename\n\ndef createHTML(input_dir, output_html, predicate=None):\n files = os.listdir(input_dir)\n files.sort(reverse=True)\n with open(output_html, \"w\") as f:\n f.write(\"\\n\")\n\nif __name__ == \"__main__\":\n createHTML('/misc/yoda/www/plots/screenshots', '/misc/yoda/www/plots/user/pims/screenshots.html', predicate=None)\n","sub_path":"utils/html_listdir.py","file_name":"html_listdir.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"10353284","text":"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport cv2\nimport time\n\nmodel_path = 'models/pb/DRAGAN_mnist-2700.pb'\n\nimage_height = 28\nimage_width = 28\nprior_size=62\none_hot=np.eye(10)\n\ndef eval():\n sess = tf.Session()\n with tf.gfile.FastGFile(model_path, \"rb\") as fr:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(fr.read())\n sess.graph.as_default()\n tf.import_graph_def(graph_def, name=\"\")\n\n sess.run(tf.global_variables_initializer())\n\n prior_input = sess.graph.get_tensor_by_name('z_prior:0')\n generated_output = sess.graph.get_tensor_by_name('generated_output:0')\n is_training = sess.graph.get_tensor_by_name('is_training:0')\n while True:\n z_prior = np.random.uniform(-1, 1, size=(1,prior_size))\n image_output = sess.run(generated_output,feed_dict={\n prior_input:z_prior,is_training:False})\n image_reshape_org = image_output[0].reshape((image_height,image_width))\n image_reshape = image_reshape_org * 255.0\n image_show = image_reshape.astype(np.uint8)\n\n image_show=cv2.resize(image_show,(image_height*2,image_width*2))\n cv2.imshow(\"image_fine\", image_show)\n cv2.waitKey(0)\n\n\n\nif __name__ == '__main__':\n eval()","sub_path":"Mnist_Based/DRAGAN/DRAGAN_mnist/eval_DRAGAN.py","file_name":"eval_DRAGAN.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"418048316","text":"# -*- coding: utf-8 -*-\n'''\n-------------------------------------------------\n Name: SSEBop_ET_temporal_plot.py\n Purpose:\n\n Author: Matt Bromley\n Created: Oct 9 2018\n Copyright: (c) DRI\n Python: 3.6\n-------------------------------------------------\n'''\nfrom collections import OrderedDict\nimport os\nimport pandas as pd\n#import numpy as np\nfrom bokeh.plotting import ColumnDataSource, figure, show, output_file, output_file\nfrom bokeh.layouts import column, gridplot\nfrom bokeh.io import reset_output, export_svgs, export_png\nfrom bokeh.models import HoverTool\nimport sys\n\n# Inputs\naoi = \"Railroad\"\n# aoi = \"Quinn\"\n# aoi = \"Carson\"\n\n# plot_value = \"Mean ET Actual\"\n# plot_value = \"Mean Water Deficit\"\nplot_value = \"Mean ET Reference\"\n\n# plot_value = \"mean_et_actual\"\n# plot_value = \"mean_water_deficit\"\n# plot_value = \"mean_et_reference\"\n\nname_dict = {\"Mean ET Actual\":\"mean_et_actual\", \"Mean Water Deficit\":\"mean_water_deficit\", \"Mean ET Reference\":\"mean_et_reference\"}\n\n# mean_values = ['mean_count', 'mean_et_actual', 'mean_et_fraction', 'mean_et_reference','mean_water deficit']\n\ncwd = os.getcwd()\nmissing_value = -9999\n# TODO: Rename this variable\nadjuster = -1\n\n# image output flags\npng_output_flag = True\nsvg_output_flag = False\n\n# if aoi == \"Quinn\":\n# if plot_value == \"Mean ET Actual\":\n# input_dir = r\"D:\\Personal-Python\\New_Zonal_Stats\\{}_mean_et_actual_temporal_plot_input\"\n# if plot_value == \"Mean Water Deficit\":\n# input_dir = r\"D:\\Personal-Python\\New_Zonal_Stats\\{}_mean_water deficit_temporal_plot_input\"\n# if plot_value == 'Mean ET Reference':\n# input_dir = r\"D:\\Personal-Python\\New_Zonal_Stats\\{}_mean_et_reference_temporal_plot_input\"\n#\n# if aoi == \"Carson\":\n# if plot_value == \"Mean ET Actual\":\n# input_dir = r\"D:\\Personal-Python\\New_Zonal_Stats\\{}_mean_et_actual_temporal_plot_input\"\n# if plot_value == \"Mean Water Deficit\":\n# input_dir = r\"D:\\Personal-Python\\New_Zonal_Stats\\{}_mean_water deficit_temporal_plot_input\"\n# if plot_value == 'Mean ET Reference':\n# input_dir = r\"D:\\Personal-Python\\New_Zonal_Stats\\{}_mean_et_reference_temporal_plot_input\"\n#\n# if aoi == \"Railroad\":\n# if plot_value == \"Mean ET Actual\":\n# input_dir = r\"D:\\Personal-Python\\New_Zonal_Stats\\{}_mean_et_actual_temporal_plot_input\"\n# if plot_value == \"Mean Water Deficit\":\n# input_dir = r\"D:\\Personal-Python\\New_Zonal_Stats\\{}_mean_water deficit_temporal_plot_input\"\n# if plot_value == 'Mean ET Reference':\n# input_dir = r\"D:\\Personal-Python\\New_Zonal_Stats\\{}_mean_et_reference_temporal_plot_input\"\n\ninput_dir = r\"D:\\Personal-Python\\New_Zonal_Stats\\{}_{}_temporal_plot_input\".format(aoi, name_dict[plot_value])\n\n# if plot_value == \"Mean ET Actual\":\n# input_dir = r\"D:\\Personal-Python\\New_Zonal_Stats\\{}_{}_temporal_plot_input\".format(aoi)\n# if plot_value == \"Mean Water Deficit\":\n# input_dir = r\"D:\\Personal-Python\\New_Zonal_Stats\\{}_mean_water deficit_temporal_plot_input\".format(aoi)\n# if plot_value == 'Mean ET Reference':\n# input_dir = r\"D:\\Personal-Python\\New_Zonal_Stats\\{}_mean_et_reference_temporal_plot_input\".format(aoi)\n\nfile_list = []\nfor file in os.listdir(input_dir):\n if file.endswith(\"_temporal_plot_input.csv\"):\n print(file)\n file_list.append(file)\nif len(file_list) == 0:\n print(\"No files within input directory match expected format.\")\n\n##red to yellow color ramp\nif plot_value == \"Mean Water Deficit\":\n # Manual min/max\n min_value = 1\n max_value = 250\n\n colors = [\"#FA1700\",\"#FA2E00\",\"#FB4500\",\"#FB5C00\",\"#FC7300\",\n \"#FC8B00\",\"#FDA200\",\"#FDB900\",\"#FED000\",\"#FEE700\",\n \"#FFFF00\",\"#E7FF00\",\"#D0FF00\",\"#B9FF00\",\"#A2FF00\",\n \"#8BFF00\",\"#73FF00\",\"#5CFF00\",\"#45FF00\",\"#2EFF00\",\n \"#17FF00\",\"#00FF00\"]\n colors.reverse()\n\nif plot_value == \"Mean ET Reference\":\n # Manual min/max\n min_value = 1\n max_value = 300\n\n colors = [\"#ffffd9\", \"#edf8b1\", \"#c7e9b4\", \"#7fcdbb\", \"#41b6c4\",\n \"#1d91c0\", \"#225ea8\", \"#253494\", \"#081d58\"]\n # file_list = file_list[1]\n\nif plot_value == \"Mean ET Actual\":\n # Manual min/max\n min_value = 1\n max_value = 200\n\n colors = [\"#D2B48C\", \"#D6BB8C\", \"#DAC28C\", \"#DEC98C\", \"#E3D08C\",\n \"#E7D78C\", \"#EBDE8C\", \"#F0E68C\", \"#C0D170\", \"#90BD54\",\n \"#60A838\", \"#30941C\", \"#008000\"]\n\n##number of colors within the color ramp\ncolor_num = len(colors)\n\nprint(\"Area of Interest:\\n {}\\n\".format(aoi))\nprint(\"Minimum {}: {}\".format(plot_value, min_value))\nprint(\"Maximum {}: {}\".format(plot_value, max_value))\n\n\nif min_value == False:\n min_value = data.min(axis=1,skipna=True,numeric_only=True).astype(int)\n min_value = min_value.min()\nif max_value == False:\n max_value = data.max(axis=1,skipna=True,numeric_only=True).astype(int)\n max_value = max_value.max()\n\n# print(file_list)\nprint(\"\\nZones:\")\nfor input_file in file_list:\n\n # Set up the data for plotting. We will need to have values for every\n # pair of year/month names. Map the rate to a color.\n month = []\n year = []\n color = []\n value = []\n\n ###Read in the CSV file\n zone_name = input_file.strip('_temporal_plot_input.csv')\n print(zone_name)\n inputdf = pd.read_csv(os.path.join(input_dir, input_file), na_values=missing_value)\n years = list(inputdf['year'])\n months = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG',\n 'SEP', 'OCT', 'NOV', 'DEC']\n start_year = inputdf['year'].min()\n end_year = inputdf['year'].max()\n inputdf = inputdf.set_index('year')\n inputdf.columns = months\n\n inputdf = inputdf.fillna(value=0)\n scale = int((max_value-min_value)/len(colors))+adjuster\n for y in years:\n for m in months:\n month.append(m)\n year.append(y)\n if inputdf[m][y] == 0:\n monthly_value = 0\n value.append(\"No Data\")\n color.append(\"#666666\")\n else:\n monthly_value = int(inputdf[m][y])\n value.append(str(monthly_value)+\" mm\")\n scale_value = int(monthly_value/scale)\n color.append(colors[min(scale_value, (color_num-1))])\n\n source = ColumnDataSource(\n data=dict(month=month, year=year, color=color, value=value))\n TOOLS = \"hover,save\"\n s = figure(title=\"Zone Name: {} Value: {}\".format(zone_name,plot_value),\n x_range=(start_year,end_year), y_range=list(reversed(months)),\n x_axis_location=\"below\", plot_width=1200, plot_height=400,\n toolbar_location=\"right\", tools=TOOLS)\n\n s.rect(\"year\", \"month\", 1, 1, source=source,\n color=\"color\", line_color=None)\n\n s.grid.grid_line_color = None\n s.axis.axis_line_color = None\n s.axis.major_tick_line_color = None\n s.axis.major_label_text_font_size = \"10pt\"\n s.axis.major_label_standoff = 15\n s.xaxis.major_label_orientation = 0#np.pi/3\n\n hover = s.select(dict(type=HoverTool))\n hover.tooltips = OrderedDict([\n ('Date', '@month @year'),\n (plot_value, '@value')])\n\n # TODO: output to one file (with many plots)\n # TODO: make sure that the plotted variable is in the name\n\n# PNG and SVG output\n if png_output_flag:\n p = s\n png_output_name = str(zone_name+\"_\"+str(start_year)+\"_\"+str(end_year)+\"_plot.png\")\n png_output_path = os.path.join(input_dir, png_output_name)\n export_png(p, filename=png_output_path)\n if svg_output_flag:\n p = s\n p.output_backend = \"svg\"\n svg_output_name = str(zone_name+\"_\"+str(start_year)+\"_\"+str(end_year)+\"_plot.svg\")\n svg_output_path = os.path.join(input_dir, svg_output_name)\n export_svgs(p, svg_output_path)\n\n # html_output_file_name = str(zone_name+\"_\"+str(start_year)+\"_\"+str(end_year)+\"_plot.html\")\n # html_output_file_dir = os.path.join(input_dir, html_output_file_name)\n # output_file(html_output_file_dir)\n # show(s)\n # reset_output()\n # del s\n\n all_s = s\n # show(s)\n # reset_output()\n # del s\n\n #This causes many windows to open, but also results in all plots in one html\n html_output_file_name = str(aoi+\"_ALL_\"+str(start_year)+\"_\"+str(end_year)+\"_plot.html\")\n html_output_file_dir = os.path.join(input_dir, html_output_file_name)\n output_file(html_output_file_dir)\n show(s)\n\n # sys.exit()\n\n","sub_path":"Old_Zonal_Stats/archive_29JAN/archive/SSEBop_ET_temporal_plot_v2.py","file_name":"SSEBop_ET_temporal_plot_v2.py","file_ext":"py","file_size_in_byte":8361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"173480027","text":"import argparse\nimport os\nimport random\n\nimport numpy as np\nimport torch\nimport sys\nsys.path.append(\"./\")\nfrom mmcv import Config\nfrom engineer.models.builder import build_backbone\nimport engineer.utils.logging as logging\nfrom engineer import __version__,__author__\nimport engineer.utils.misc as misc\nfrom engineer.datasets.builder import build_dataset\nfrom engineer.core.train_NewPCycle import train_model\nimport os\n\nlogger = logging.get_logger(__name__)\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train a Motion GCN Module')\n parser.add_argument('--config', help='train config file path')\n parser.add_argument('--work_dir', help='the dir to save logs and models')\n parser.add_argument(\n '--resume_from', help='the checkpoint file to resume from')\n parser.add_argument(\n '--validate',\n action='store_true',\n help='whether to evaluate the checkpoint during training')\n parser.add_argument('--launcher',default='none',type=str)\n parser.add_argument(\n '--gpus',\n type=int,\n default=1,\n help='number of gpus to use '\n '(only applicable to non-distributed training)')\n parser.add_argument('--seed', type=int, default=None, help='random seed')\n parser.add_argument('--local_rank', type=int, default=0)\n parser.add_argument(\n '--autoscale-lr',\n action='store_true',\n help='automatically scale lr with the number of gpus')\n args = parser.parse_args()\n if 'LOCAL_RANK' not in os.environ:\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n\n return args\n\n\ndef set_random_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n\ndef main():\n args = parse_args()\n\n cfg = Config.fromfile(args.config)\n\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n # update configs according to CLI args\n if args.work_dir is not None:\n cfg.work_dir = args.work_dir\n if args.resume_from is not None:\n cfg.resume_from = args.resume_from\n cfg.gpus = args.gpus\n\n if args.autoscale_lr:\n # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)\n cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8\n\n # init distributed env first, since logger depends on the dist info.\n if args.launcher == 'none':\n distributed = False\n else:\n raise NotImplementedError(\"distributed Training is not necessary Here\")\n cfg.checkpoints = os.path.join(cfg.checkpoints,cfg.name)\n\n if not os.path.exists(cfg.checkpoints):\n os.mkdir(cfg.checkpoints)\n\n\n logging.setup_logging()\n\n logger.info('Distributed training: {}'.format(distributed))\n logger.info('GCN_Motion Version: {}\\t Author: {}'.format(__version__,__author__))\n logger.info('Config: {}'.format(cfg.text))\n # set random seeds\n if args.seed is not None:\n logger.info('Set random seed to {}'.format(args.seed))\n set_random_seed(args.seed)\n\n #build model and print model info\n model = build_backbone(cfg.model)\n\n misc.log_model_info(model)\n\n #optimizer build\n #follow the paper optimize we use here\n if cfg.optim_para['optimizer']['type'] == \"Adam\":\n optimizer = torch.optim.Adam(model.parameters(), lr=cfg.optim_para['optimizer']['lr'])\n\n #datasets build\n test_datasets=dict()\n for act in cfg.actions['all']:\n cfg.data.test.actions=act\n test_datasets[act] = build_dataset(cfg.data.test)\n val_dataset = build_dataset(cfg.data.val)\n train_dataset = build_dataset(cfg.data.train)\n logger.info(\">>> data loaded !\")\n logger.info(\">>> train data {}\".format(train_dataset.__len__()))\n logger.info(\">>> validation data {}\".format(val_dataset.__len__()))\n\n # add an attribute for visualization convenience\n train_model(\n model,\n [train_dataset,val_dataset,test_datasets],\n cfg,\n distributed=distributed,\n optimizer = optimizer\n )\n\n\nif __name__ == '__main__':\n main()","sub_path":"tools/train_NewPCycle.py","file_name":"train_NewPCycle.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"454203499","text":"# Created by liu on 2016/3/31.\n\n# 函数的参数\n\n# def power(x):\n# return x * x\n\n# 对于power(x)函数,参数x就是一个位置参数。\n# 当我们调用power函数时,必须传入有且仅有的一个参数x:\n\n# print(power(12)) # 144\n\ndef power(x,n): # 修改后的power(x, n)函数有两个参数:x和n,这两个参数都是位置参数,调用函数时,传入的两个值按照位置顺序依次赋给参数x和n。\n s = 1\n while n > 0:\n s = s * x\n n = n - 1\n return s\n\nprint(power(5,3))\n\n# ----------------------------------------------------------------------------------------------------------------------\n# 默认参数\n\ndef power(x,n = 2): # 调用power(x)时,相当于调用power(x, 2):\n s = 1\n while n > 0:\n s = s * x\n n = n - 1\n return s\n\nprint(power(5))\n\n# 一是必选参数在前,默认参数在后,否则Python的解释器会报错(思考一下为什么默认参数不能放在必选参数前面);\n# 二是如何设置默认参数。\n# 当函数有多个参数时,把变化大的参数放前面,变化小的参数放后面。变化小的参数就可以作为默认参数。\n\ndef enroll(name, gender, age=6, city='Beijing'):\n print('name:', name)\n print('gender:', gender)\n print('age:', age)\n print('city:', city)\n\n# 有多个默认参数时,调用的时候,既可以按顺序提供默认参数,\n# 比如调用enroll('Bob', 'M', 7),意思是,除了name,gender这两个参数外,最后1个参数应用在参数age上,city参数由于没有提供,仍然使用默认值。\n#\n# 也可以不按顺序提供部分默认参数。当不按顺序提供部分默认参数时,需要把参数名写上。\n# 比如调用enroll('Adam', 'M', city='Tianjin'),意思是,city参数用传进去的值,其他默认参数继续使用默认值。\n\n# ----------------------------------------------------------------------------------------------------------------------\n# 可变参数\n\n\n# def calc(numbers):\n# s = 0;\n# for n in numbers:\n# s = s + n * n\n# return s\n\n\n# print(calc((1,2,3))) # 调用的时候,需要先组装出一个list或tuple:\n# 希望传参数calc(1,2,3,4)进行简化,不用list或tuple包装,参数前面加一个*\n# 在函数内部,参数numbers接收到的是一个tuple,因此,函数代码完全不变。但是,调用该函数时,可以传入任意个参数,包括0个参数:\n\n\ndef calc(*numbers):\n s = 0;\n for n in numbers:\n s = s + n * n\n return s\n\n\nprint(calc(1,2,3,4))\n\n# 如果已经有一个list或tupl\n\nnum = (1,3,4)\n\n# print(calc(num)) # 错误 can't multiply sequence by non-int of type 'tuple'\n#\n\nprint(calc(num[0],num[1],num[2]))\n\n# 这种写法当然是可行的,问题是太繁琐,所以Python允许你在list或tuple前面加一个*号,把list或tuple的元素变成可变参数传进去:\n\nprint(calc(*num))\n\n# *nums表示把nums这个list的所有元素作为可变参数传进去。\n\n# ----------------------------------------------------------------------------------------------------------------------\n# 关键字参数\n# 可变参数允许你传入0个或任意个参数,这些可变参数在函数调用时自动组装为一个tuple。\n# 而关键字参数允许你传入0个或任意个含参数名的参数,这些关键字参数在函数内部自动组装为一个dict\n\ndef person(name,age,**kw):\n print('name:',name,'age:',age,'other:',kw)\n\nperson('liu',18)\nperson('liu',18,city='boxing')\nperson('liu',18,city='boxing',school='yantai')\n\nother = {'city':'beijing','university':'yantai','job':'android'}\nperson('liu',18,**other)\n\n# **other表示把other这个dict的所有key-value用关键字参数传入到函数的**kw参数,kw将获得一个dict,\n# 注意kw获得的dict是extra的一份拷贝,对kw的改动不会影响到函数外的extra。\n\n# ----------------------------------------------------------------------------------------------------------------------\n# 命名关键字参数\n# 对于关键字参数,函数的调用者可以传入任意不受限制的关键字参数。至于到底传入了哪些,就需要在函数内部通过kw检查。\n#\n# 仍以person()函数为例,我们希望检查是否有city和job参数:\n\n# def person(name, age, **kw):\n# if 'city' in kw:\n# # 有city参数\n# pass\n# if 'job' in kw:\n# # 有job参数\n# pass\n# print('name:', name, 'age:', age, 'other:', kw)\n\n# 如果要限制关键字参数的名字,就可以用命名关键字参数,例如,只接收city和job作为关键字参数。这种方式定义的函数如下:\n\ndef person(name,age,*,city,job):\n print(name,age,'city:',city,'job:',job)\n\nperson('asdfasdf',234,city='asdfasdf',job=15)\n\n# 和关键字参数**kw不同,命名关键字参数需要一个特殊分隔符*,*后面的参数被视为命名关键字参数。\n# 命名关键字参数必须传入参数名,这和位置参数不同。如果没有传入参数名,调用将报错:\n# 没有参数名,将视为位置参数\n# 命名关键字参数可以有默认值\n# 使用命名关键字参数时,要特别注意,*不是参数,而是特殊分隔符。如果缺少*,Python解释器将无法识别位置参数和命名关键字参数\n\n\n# 参数定义的顺序必须是:必选参数、默认参数、可变参数/命名关键字参数和关键字参数。可变参数和命名关键字参数无法同事出现\n\n\n# Python的函数具有非常灵活的参数形态,既可以实现简单的调用,又可以传入非常复杂的参数。\n#\n# 默认参数一定要用不可变对象,如果是可变对象,程序运行时会有逻辑错误!\n#\n# 要注意定义可变参数和关键字参数的语法:\n#\n# *args是可变参数,args接收的是一个tuple;\n#\n# **kw是关键字参数,kw接收的是一个dict。\n#\n# 以及调用函数时如何传入可变参数和关键字参数的语法:\n#\n# 可变参数既可以直接传入:func(1, 2, 3),又可以先组装list或tuple,再通过*args传入:func(*(1, 2, 3));\n#\n# 关键字参数既可以直接传入:func(a=1, b=2),又可以先组装dict,再通过**kw传入:func(**{'a': 1, 'b': 2})。\n#\n# 使用*args和**kw是Python的习惯写法,当然也可以用其他参数名,但最好使用习惯用法。\n#\n# 命名的关键字参数是为了限制调用者可以传入的参数名,同时可以提供默认值。\n#\n# 定义命名的关键字参数不要忘了写分隔符*,否则定义的将是位置参数。","sub_path":"FunctionDemo2.py","file_name":"FunctionDemo2.py","file_ext":"py","file_size_in_byte":6570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"150379820","text":"import json\nimport pytest\n\nfrom helpers.utils import tag_item_schema_validator, validate_response_for_invalid_site\nfrom tst.stack_api_test import StackAPIBaseTest\n\n\nclass TestGetTagsAPI(StackAPIBaseTest):\n timeout = 60\n\n @pytest.mark.timeout(timeout)\n def test_tag_info(self):\n site_name = self.get_test_data_for_site_name()\n response = self.stack_exchange.get_tags(site_name)\n if response.status_code == 200:\n response = json.loads(response.text)\n tag_item_schema_validator(response)\n else:\n assert response.status_code == 400\n response = json.loads(response.text)\n validate_response_for_invalid_site(response, site_name)\n\n\n","sub_path":"tst/test_get_tags_api.py","file_name":"test_get_tags_api.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"369062649","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2017 Google\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n# ----------------------------------------------------------------------------\n#\n# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***\n#\n# ----------------------------------------------------------------------------\n#\n# This file is automatically generated by Magic Modules and manual\n# changes will be clobbered when the file is regenerated.\n#\n# Please read more about how to change this file at\n# https://www.github.com/GoogleCloudPlatform/magic-modules\n#\n# ----------------------------------------------------------------------------\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\n################################################################################\n# Documentation\n################################################################################\n\nANSIBLE_METADATA = {'metadata_version': '1.1', 'status': [\"preview\"], 'supported_by': 'community'}\n\nDOCUMENTATION = '''\n---\nmodule: gcp_compute_instance_group\ndescription:\n- Represents an Instance Group resource. Instance groups are self-managed and can\n contain identical or different instances. Instance groups do not use an instance\n template. Unlike managed instance groups, you must create and add instances to an\n instance group manually.\nshort_description: Creates a GCP InstanceGroup\nversion_added: 2.6\nauthor: Google Inc. (@googlecloudplatform)\nrequirements:\n- python >= 2.6\n- requests >= 2.18.4\n- google-auth >= 1.3.0\noptions:\n state:\n description:\n - Whether the given object should exist in GCP\n choices:\n - present\n - absent\n default: present\n type: str\n description:\n description:\n - An optional description of this resource. Provide this property when you create\n the resource.\n required: false\n type: str\n name:\n description:\n - The name of the instance group.\n - The name must be 1-63 characters long, and comply with RFC1035.\n required: false\n type: str\n named_ports:\n description:\n - Assigns a name to a port number.\n - 'For example: {name: \"http\", port: 80}.'\n - This allows the system to reference ports by the assigned name instead of a\n port number. Named ports can also contain multiple ports.\n - 'For example: [{name: \"http\", port: 80},{name: \"http\", port: 8080}] Named ports\n apply to all instances in this instance group.'\n required: false\n type: list\n suboptions:\n name:\n description:\n - The name for this named port.\n - The name must be 1-63 characters long, and comply with RFC1035.\n required: false\n type: str\n port:\n description:\n - The port number, which can be a value between 1 and 65535.\n required: false\n type: int\n network:\n description:\n - The network to which all instances in the instance group belong.\n - 'This field represents a link to a Network resource in GCP. It can be specified\n in two ways. First, you can place a dictionary with key ''selfLink'' and value\n of your resource''s selfLink Alternatively, you can add `register: name-of-resource`\n to a gcp_compute_network task and then set this network field to \"{{ name-of-resource\n }}\"'\n required: false\n type: dict\n region:\n description:\n - The region where the instance group is located (for regional resources).\n required: false\n type: str\n subnetwork:\n description:\n - The subnetwork to which all instances in the instance group belong.\n - 'This field represents a link to a Subnetwork resource in GCP. It can be specified\n in two ways. First, you can place a dictionary with key ''selfLink'' and value\n of your resource''s selfLink Alternatively, you can add `register: name-of-resource`\n to a gcp_compute_subnetwork task and then set this subnetwork field to \"{{ name-of-resource\n }}\"'\n required: false\n type: dict\n zone:\n description:\n - A reference to the zone where the instance group resides.\n required: true\n type: str\n instances:\n description:\n - The list of instances associated with this InstanceGroup.\n - All instances must be created before being added to an InstanceGroup.\n - All instances not in this list will be removed from the InstanceGroup and will\n not be deleted.\n - Only the full identifier of the instance will be returned.\n required: false\n type: list\n version_added: 2.8\nextends_documentation_fragment: gcp\n'''\n\nEXAMPLES = '''\n- name: create a network\n gcp_compute_network:\n name: network-instancegroup\n project: \"{{ gcp_project }}\"\n auth_kind: \"{{ gcp_cred_kind }}\"\n service_account_file: \"{{ gcp_cred_file }}\"\n state: present\n register: network\n\n- name: create a instance group\n gcp_compute_instance_group:\n name: test_object\n named_ports:\n - name: ansible\n port: 1234\n network: \"{{ network }}\"\n zone: us-central1-a\n project: test_project\n auth_kind: serviceaccount\n service_account_file: \"/tmp/auth.pem\"\n state: present\n'''\n\nRETURN = '''\ncreationTimestamp:\n description:\n - Creation timestamp in RFC3339 text format.\n returned: success\n type: str\ndescription:\n description:\n - An optional description of this resource. Provide this property when you create\n the resource.\n returned: success\n type: str\nid:\n description:\n - A unique identifier for this instance group.\n returned: success\n type: int\nname:\n description:\n - The name of the instance group.\n - The name must be 1-63 characters long, and comply with RFC1035.\n returned: success\n type: str\nnamedPorts:\n description:\n - Assigns a name to a port number.\n - 'For example: {name: \"http\", port: 80}.'\n - This allows the system to reference ports by the assigned name instead of a port\n number. Named ports can also contain multiple ports.\n - 'For example: [{name: \"http\", port: 80},{name: \"http\", port: 8080}] Named ports\n apply to all instances in this instance group.'\n returned: success\n type: complex\n contains:\n name:\n description:\n - The name for this named port.\n - The name must be 1-63 characters long, and comply with RFC1035.\n returned: success\n type: str\n port:\n description:\n - The port number, which can be a value between 1 and 65535.\n returned: success\n type: int\nnetwork:\n description:\n - The network to which all instances in the instance group belong.\n returned: success\n type: dict\nregion:\n description:\n - The region where the instance group is located (for regional resources).\n returned: success\n type: str\nsubnetwork:\n description:\n - The subnetwork to which all instances in the instance group belong.\n returned: success\n type: dict\nzone:\n description:\n - A reference to the zone where the instance group resides.\n returned: success\n type: str\ninstances:\n description:\n - The list of instances associated with this InstanceGroup.\n - All instances must be created before being added to an InstanceGroup.\n - All instances not in this list will be removed from the InstanceGroup and will\n not be deleted.\n - Only the full identifier of the instance will be returned.\n returned: success\n type: list\n'''\n\n################################################################################\n# Imports\n################################################################################\n\nfrom ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict\nimport json\nimport re\nimport time\n\n################################################################################\n# Main\n################################################################################\n\n\ndef main():\n \"\"\"Main function\"\"\"\n\n module = GcpModule(\n argument_spec=dict(\n state=dict(default='present', choices=['present', 'absent'], type='str'),\n description=dict(type='str'),\n name=dict(type='str'),\n named_ports=dict(type='list', elements='dict', options=dict(name=dict(type='str'), port=dict(type='int'))),\n network=dict(type='dict'),\n region=dict(type='str'),\n subnetwork=dict(type='dict'),\n zone=dict(required=True, type='str'),\n instances=dict(type='list', elements='dict'),\n )\n )\n\n if not module.params['scopes']:\n module.params['scopes'] = ['https://www.googleapis.com/auth/compute']\n\n state = module.params['state']\n kind = 'compute#instanceGroup'\n\n fetch = fetch_resource(module, self_link(module), kind)\n changed = False\n\n if fetch:\n if state == 'present':\n if is_different(module, fetch):\n update(module, self_link(module), kind)\n fetch = fetch_resource(module, self_link(module), kind)\n changed = True\n else:\n delete(module, self_link(module), kind)\n fetch = {}\n changed = True\n else:\n if state == 'present':\n fetch = create(module, collection(module), kind)\n changed = True\n else:\n fetch = {}\n\n if fetch:\n instance = InstanceLogic(module)\n instance.run()\n fetch.update({'instances': instance.list_instances()})\n fetch.update({'changed': changed})\n\n module.exit_json(**fetch)\n\n\ndef create(module, link, kind):\n auth = GcpSession(module, 'compute')\n return wait_for_operation(module, auth.post(link, resource_to_request(module)))\n\n\ndef update(module, link, kind):\n instance = InstanceLogic(module)\n instance.run()\n\n\ndef delete(module, link, kind):\n auth = GcpSession(module, 'compute')\n return wait_for_operation(module, auth.delete(link))\n\n\ndef resource_to_request(module):\n request = {\n u'kind': 'compute#instanceGroup',\n u'description': module.params.get('description'),\n u'name': module.params.get('name'),\n u'namedPorts': InstanceGroupNamedportsArray(module.params.get('named_ports', []), module).to_request(),\n u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'),\n u'region': region_selflink(module.params.get('region'), module.params),\n u'subnetwork': replace_resource_dict(module.params.get(u'subnetwork', {}), 'selfLink'),\n }\n return_vals = {}\n for k, v in request.items():\n if v or v is False:\n return_vals[k] = v\n\n return return_vals\n\n\ndef fetch_resource(module, link, kind, allow_not_found=True):\n auth = GcpSession(module, 'compute')\n return return_if_object(module, auth.get(link), kind, allow_not_found)\n\n\ndef self_link(module):\n return \"https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{name}\".format(**module.params)\n\n\ndef collection(module):\n return \"https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroups\".format(**module.params)\n\n\ndef return_if_object(module, response, kind, allow_not_found=False):\n # If not found, return nothing.\n if allow_not_found and response.status_code == 404:\n return None\n\n # If no content, return nothing.\n if response.status_code == 204:\n return None\n\n try:\n module.raise_for_status(response)\n result = response.json()\n except getattr(json.decoder, 'JSONDecodeError', ValueError):\n module.fail_json(msg=\"Invalid JSON response with error: %s\" % response.text)\n\n if navigate_hash(result, ['error', 'errors']):\n module.fail_json(msg=navigate_hash(result, ['error', 'errors']))\n\n return result\n\n\ndef is_different(module, response):\n request = resource_to_request(module)\n response = response_to_hash(module, response)\n\n # Remove all output-only from response.\n response_vals = {}\n for k, v in response.items():\n if k in request:\n response_vals[k] = v\n\n request_vals = {}\n for k, v in request.items():\n if k in response:\n request_vals[k] = v\n\n return GcpRequest(request_vals) != GcpRequest(response_vals)\n\n\n# Remove unnecessary properties from the response.\n# This is for doing comparisons with Ansible's current parameters.\ndef response_to_hash(module, response):\n return {\n u'creationTimestamp': response.get(u'creationTimestamp'),\n u'description': response.get(u'description'),\n u'id': response.get(u'id'),\n u'name': response.get(u'name'),\n u'namedPorts': InstanceGroupNamedportsArray(response.get(u'namedPorts', []), module).from_response(),\n u'network': response.get(u'network'),\n u'region': response.get(u'region'),\n u'subnetwork': response.get(u'subnetwork'),\n }\n\n\ndef region_selflink(name, params):\n if name is None:\n return\n url = r\"https://www.googleapis.com/compute/v1/projects/.*/regions/.*\"\n if not re.match(url, name):\n name = \"https://www.googleapis.com/compute/v1/projects/{project}/regions/%s\".format(**params) % name\n return name\n\n\ndef async_op_url(module, extra_data=None):\n if extra_data is None:\n extra_data = {}\n url = \"https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/operations/{op_id}\"\n combined = extra_data.copy()\n combined.update(module.params)\n return url.format(**combined)\n\n\ndef wait_for_operation(module, response):\n op_result = return_if_object(module, response, 'compute#operation')\n if op_result is None:\n return {}\n status = navigate_hash(op_result, ['status'])\n wait_done = wait_for_completion(status, op_result, module)\n return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#instanceGroup')\n\n\ndef wait_for_completion(status, op_result, module):\n op_id = navigate_hash(op_result, ['name'])\n op_uri = async_op_url(module, {'op_id': op_id})\n while status != 'DONE':\n raise_if_errors(op_result, ['error', 'errors'], module)\n time.sleep(1.0)\n op_result = fetch_resource(module, op_uri, 'compute#operation', False)\n status = navigate_hash(op_result, ['status'])\n return op_result\n\n\ndef raise_if_errors(response, err_path, module):\n errors = navigate_hash(response, err_path)\n if errors is not None:\n module.fail_json(msg=errors)\n\n\nclass InstanceLogic(object):\n def __init__(self, module):\n self.module = module\n self.current_instances = self.list_instances()\n self.module_instances = []\n\n # Transform module list of instances (dicts of instance responses) into a list of selfLinks.\n instances = self.module.params.get('instances')\n if instances:\n for instance in instances:\n self.module_instances.append(replace_resource_dict(instance, 'selfLink'))\n\n def run(self):\n # Find all instances to add and add them\n instances_to_add = list(set(self.module_instances) - set(self.current_instances))\n if instances_to_add:\n self.add_instances(instances_to_add)\n\n # Find all instances to remove and remove them\n instances_to_remove = list(set(self.current_instances) - set(self.module_instances))\n if instances_to_remove:\n self.remove_instances(instances_to_remove)\n\n def list_instances(self):\n auth = GcpSession(self.module, 'compute')\n response = return_if_object(self.module, auth.post(self._list_instances_url(), {'instanceState': 'ALL'}), 'compute#instanceGroupsListInstances')\n\n # Transform instance list into a list of selfLinks for diffing with module parameters\n instances = []\n for instance in response.get('items', []):\n instances.append(instance['instance'])\n return instances\n\n def add_instances(self, instances):\n auth = GcpSession(self.module, 'compute')\n wait_for_operation(self.module, auth.post(self._add_instances_url(), self._build_request(instances)))\n\n def remove_instances(self, instances):\n auth = GcpSession(self.module, 'compute')\n wait_for_operation(self.module, auth.post(self._remove_instances_url(), self._build_request(instances)))\n\n def _list_instances_url(self):\n return \"https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{name}/listInstances\".format(**self.module.params)\n\n def _remove_instances_url(self):\n return \"https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{name}/removeInstances\".format(**self.module.params)\n\n def _add_instances_url(self):\n return \"https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{name}/addInstances\".format(**self.module.params)\n\n def _build_request(self, instances):\n request = {'instances': []}\n for instance in instances:\n request['instances'].append({'instance': instance})\n return request\n\n\nclass InstanceGroupNamedportsArray(object):\n def __init__(self, request, module):\n self.module = module\n if request:\n self.request = request\n else:\n self.request = []\n\n def to_request(self):\n items = []\n for item in self.request:\n items.append(self._request_for_item(item))\n return items\n\n def from_response(self):\n items = []\n for item in self.request:\n items.append(self._response_from_item(item))\n return items\n\n def _request_for_item(self, item):\n return remove_nones_from_dict({u'name': item.get('name'), u'port': item.get('port')})\n\n def _response_from_item(self, item):\n return remove_nones_from_dict({u'name': item.get(u'name'), u'port': item.get(u'port')})\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"env/lib/python3.9/site-packages/ansible/modules/cloud/google/gcp_compute_instance_group.py","file_name":"gcp_compute_instance_group.py","file_ext":"py","file_size_in_byte":17868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"618613679","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef init_two_layer_model(input_size, hidden_size, output_size):\n \"\"\"\n Initialize the weights and biases for a two-layer fully connected neural\n network. The net has an input dimension of D, a hidden layer dimension of H,\n and performs classification over C classes. Weights are initialized to small\n random values and biases are initialized to zero.\n\n Inputs:\n - input_size: The dimension D of the input data\n - hidden_size: The number of neurons H in the hidden layer\n - ouput_size: The number of classes C\n\n Returns:\n A dictionary mapping parameter names to arrays of parameter values. It has\n the following keys:\n - W1: First layer weights; has shape (D, H)\n - b1: First layer biases; has shape (H,)\n - W2: Second layer weights; has shape (H, C)\n - b2: Second layer biases; has shape (C,)\n \"\"\"\n # initialize a model\n model = {}\n model['W1'] = 0.00001 * np.random.randn(input_size, hidden_size)\n model['b1'] = np.zeros(hidden_size)\n model['W2'] = 0.00001 * np.random.randn(hidden_size, output_size)\n model['b2'] = np.zeros(output_size)\n return model\n\ndef two_layer_net(X, model, y=None, reg=0.0):\n \"\"\"\n Compute the loss and gradients for a two layer fully connected neural network.\n The net has an input dimension of D, a hidden layer dimension of H, and\n performs classification over C classes. We use a softmax loss function and L2\n regularization the the weight matrices. The two layer net should use a ReLU\n nonlinearity after the first affine layer.\n\n The two layer net has the following architecture:\n\n input - fully connected layer - ReLU - fully connected layer - softmax\n\n The outputs of the second fully-connected layer are the scores for each\n class.\n\n Inputs:\n - X: Input data of shape (N, D). Each X[i] is a training sample.\n - model: Dictionary mapping parameter names to arrays of parameter values.\n It should contain the following:\n - W1: First layer weights; has shape (D, H)\n - b1: First layer biases; has shape (H,)\n - W2: Second layer weights; has shape (H, C)\n - b2: Second layer biases; has shape (C,)\n - y: Vector of training labels. y[i] is the label for X[i], and each y[i] is\n an integer in the range 0 <= y[i] < C. This parameter is optional; if it\n is not passed then we only return scores, and if it is passed then we\n instead return the loss and gradients.\n - reg: Regularization strength.\n\n Returns:\n If y not is passed, return a matrix scores of shape (N, C) where scores[i, c]\n is the score for class c on input X[i].\n\n If y is not passed, instead return a tuple of:\n - loss: Loss (data loss and regularization loss) for this batch of training\n samples.\n - grads: Dictionary mapping parameter names to gradients of those parameters\n with respect to the loss function. This should have the same keys as model.\n \"\"\"\n\n\n # unpack variables from the model dictionary\n W1,b1,W2,b2 = model['W1'], model['b1'], model['W2'], model['b2']\n N, D = X.shape\n\n # compute the forward pass\n scores = None\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n out_1=np.matmul(X,W1)+b1\n out_1[np.where(out_1<0)]=0\n dot_pr=np.matmul(out_1,W2)+b2\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n \n # If the targets are not given then jump out, we're done\n if y is None:\n scores=dot_pr\n return scores\n\n # compute the loss\n\n #############################################################################\n # TODO: Finish the forward pass, and compute the loss. This should include #\n # both the data loss and L2 regularization for W1 and W2. Store the result #\n # in the variable loss, which should be a scalar. Use the Softmax #\n # classifier loss. So that your results match ours, multiply the #\n # regularization loss by 0.5 #\n #############################################################################\n loss = None\n #norm_term = np.max(dot_pr, axis=0)\n #dot_pr = dot_pr - norm_term\n pred = np.exp(dot_pr)\n softmax = pred / np.sum(pred, axis=1).reshape(-1,1)\n\n true_pred = -np.log(np.asarray([softmax[i][y[i]] for i in range(len(y))]))\n loss = np.sum(true_pred) / len(y) + 0.5*reg *(np.sum(np.square(W2)) + np.sum(np.square(W1)))\n\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # compute the gradients\n #############################################################################\n # TODO: Compute the backward pass, computing the derivatives of the weights #\n # and biases. Store the results in the grads dictionary. For example, #\n # grads['W1'] should store the gradient on W1, and be a matrix of same size #\n #############################################################################\n grads = {}\n softmax_copy = softmax.copy()\n softmax_copy[range(len(y)),y] -= 1\n grads['W2'] = np.dot(out_1.T,softmax_copy) / len(y) + reg * W2\n grads['b2'] = np.sum(softmax_copy,axis=0)/len(y)\n relu_grad= np.zeros_like(out_1)\n relu_grad[out_1>0]=1\n #grads['W1'] = (np.dot(relu_grad.T,X )*np.dot(W2,np.sum(softmax_copy.T,axis=1)).reshape(-1,1)).T/len(y) + reg*W1\n grads['W1'] = np.dot(X.T,np.multiply(np.dot(softmax_copy,W2.T),relu_grad))/len(y)+ reg*W1\n grads['b1'] = np.sum(np.multiply(np.dot(softmax_copy,W2.T),relu_grad),axis=0)/len(y)\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, grads\n\n\ninput_size = 4\nhidden_size = 10\nnum_classes = 3\n\nnum_inputs = 5\n\ndef init_toy_model():\n model = {}\n model['W1'] = np.linspace(-0.2, 0.6, num=input_size*hidden_size).reshape(input_size, hidden_size)\n model['b1'] = np.linspace(-0.3, 0.7, num=hidden_size)\n model['W2'] = np.linspace(-0.4, 0.1, num=hidden_size*num_classes).reshape(hidden_size, num_classes)\n model['b2'] = np.linspace(-0.5, 0.9, num=num_classes)\n return model\n\ndef init_toy_data():\n X = np.linspace(-0.2, 0.5, num=num_inputs*input_size).reshape(num_inputs, input_size)\n y = np.array([0, 1, 2, 2, 1])\n return X, y\n\nmodel = init_toy_model()\nX, y = init_toy_data()\n\nscores = two_layer_net(X, model)\nprint(scores)\ncorrect_scores = [[-0.5328368, 0.20031504, 0.93346689],\n [-0.59412164, 0.15498488, 0.9040914 ],\n [-0.67658362, 0.08978957, 0.85616275],\n [-0.77092643, 0.01339997, 0.79772637],\n [-0.89110401, -0.08754544, 0.71601312]]\n\n# the difference should be very small. We get 3e-8\nprint('Difference between your scores and correct scores:')\nprint(np.sum(np.abs(scores - correct_scores)))\nreg = 0.1\nloss, _ = two_layer_net(X, model, y, reg)\ncorrect_loss = 1.38191946092\n\n# should be very small, we get 5e-12\nprint('Difference between your loss and correct loss:')\nprint(np.sum(np.abs(loss - correct_loss)))\nloss, grads = two_layer_net(X, model, y, reg)\n","sub_path":"1_cs231n/cs231n/classifiers/neural_net.py","file_name":"neural_net.py","file_ext":"py","file_size_in_byte":7633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"193153552","text":"# -*- coding: utf-8 -*-\nfrom osv import osv, fields\n\nclass jmdaccounts(osv.Model):\n _name= \"account.sat\"\n _columns = {\n 'name': fields.char(string=\"Nombre\"),\n 'codigo': fields.char(string=\"Codigo\"),\n }\n\n\nclass jmdaccount(osv.Model):\n _inherit = \"account.account\"\n\n def setParents(self, cr, uid, ids, context=\"None\"):\n print(\"Here\")\n ret = {}\n count = 0\n parent = \"3963\"\n for i in self.browse(cr, uid, self.search(cr, uid, []), context=None):\n count += 1\n if count >= 20:\n return ret\n print((\"Modificando cuenta \" + str(i.name) + str(i.code)))\n parent = 3963\n if i.id == parent:\n print(\"Skip\")\n continue\n if i.name == \"Estadistica\":\n continue\n if len(i.code) >= 9:\n parent_code = i.code[:-5]\n for j in self.browse(cr, uid, self.search(cr, uid, [('code', '=', parent_code)]), context=None):\n print(\"Encontrado \" + str(j.code) + \" pare de \" + str(i.code) + \" ID \" +str(i.id) )\n parent = j.id\n print(\"Id de la cuenta \" + str(i.id))\n self.write(cr, uid, i.id, {'parent_id': parent})\n print(\"Nunca llega aqui\")\n return ret\n\n _columns = {\n 'balanza': fields.boolean(string=\"Balanza de Comprobación\"),\n 'resultados': fields.boolean(string=\"Estado de Resultados\"),\n 'balance_general': fields.boolean(string=\"Balance General\"),\n 'codigo_sat': fields.many2one(\"account.sat\", string=\"Codigo SAT\"),\n 'sat': fields.char(\"Código Agrupador SAT\")\n }\n\n\nclass jmdpoliza(osv.Model):\n _inherit = \"account.move\"\n\n def esta_cuadrada(self, cr, uid, ids, fieldname, args, context=None):\n ret = {}\n for i in self.browse(cr, uid, ids, context):\n ret[i.id] = False\n for j in i.line_id:\n if j.state is \"valid\":\n ret[i.id] = True\n return ret\n\n _columns = {\n 'field': fields.function(esta_cuadrada,\n string=\"Cuadrada\", type=\"boolean\")\n }\n\n _defaults = {\n 'ref': lambda self, cr, uid, context={}: self.pool.get\n ('ir.sequence').get(cr, uid, 'ea.poliza')\n }\n","sub_path":"ea_jmd/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"221289958","text":"# -*- coding: latin-1 -*-\n\n\"\"\"\nsimpleguics2pygame module: simpleguics2pygame/canvas.\n\nClass Canvas.\n\nPiece of SimpleGUICS2Pygame.\nhttps://bitbucket.org/OPiMedia/simpleguics2pygame\n\n:license: GPLv3 --- Copyright (C) 2015-2016, 2020 Olivier Pirson\n:author: Olivier Pirson --- http://www.opimedia.be/\n:version: November 29, 2020\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\n\n# print('IMPORT', __name__)\n\n\nimport math\nimport os.path\nimport re\nimport sys\n\n\n__all__ = ('Canvas',\n 'create_invisible_canvas')\n\n\ntry:\n from typing import Any, Callable, List, Optional, Sequence, Tuple, Union # noqa\nexcept ImportError:\n pass\n\nimport pygame\n\nfrom SimpleGUICS2Pygame.simpleguics2pygame._colors import _SIMPLEGUICOLOR_TO_PYGAMECOLOR, _simpleguicolor_to_pygamecolor # pylint: disable=wrong-import-position,no-name-in-module,ungrouped-imports # noqa\nfrom SimpleGUICS2Pygame.simpleguics2pygame._fonts import _SIMPLEGUIFONTFACE_TO_PYGAMEFONTNAME, _simpleguifontface_to_pygamefont # pylint: disable=wrong-import-position,no-name-in-module,ungrouped-imports # noqa\nfrom SimpleGUICS2Pygame.simpleguics2pygame.image import Image # pylint: disable=wrong-import-position,no-name-in-module,ungrouped-imports # noqa\n\n\n#\n# Private global constants\n##########################\n_RADIAN_TO_DEGREE = 180 / math.pi\n\"\"\"\nMultiplicative constant to convert radian to degree.\n\"\"\"\n\n\n_RE_UNPRINTABLE_WHITESPACE_CHAR = re.compile('[\\t\\n\\r\\f\\v]')\n\"\"\"\nRegular expression pattern to unprintable whitespace character.\n\"\"\"\n\n\n#\n# \"Private\" function\n####################\ndef _pos_round(position):\n # type: (Sequence[Union[int, float]]) -> Tuple[int, int]\n \"\"\"\n Return the rounded `position`.\n\n **Don't require Pygame.**\n\n **(Not available in SimpleGUI of CodeSkulptor.)**\n\n :param position: (int or float, int or float)\n or [int or float, int or float]\n\n :return: (int, int)\n \"\"\"\n assert isinstance(position, (tuple, list)), type(position)\n assert len(position) == 2, len(position)\n assert isinstance(position[0], (int, float)), type(position[0])\n assert isinstance(position[1], (int, float)), type(position[1])\n\n return (int(round(position[0])), int(round(position[1])))\n\n\n#\n# Class\n#######\nclass Canvas:\n \"\"\"Canvas similar to SimpleGUI `Canvas` of CodeSkulptor.\"\"\"\n\n _background_pygame_color = _SIMPLEGUICOLOR_TO_PYGAMECOLOR['black']\n \"\"\"Default `pygame.Color` of the background of the canvas.\"\"\"\n\n _bg_pygame_surface_image = None # type: Optional[pygame.surface.Surface]\n \"\"\"\n `pygame.surface.Surface` default background image\n replaces `_background_pygame_color`.\n \"\"\"\n\n def __init__(self,\n frame,\n canvas_width, canvas_height):\n # type: (Optional[pygame.Frame], int, int) -> None # noqa\n \"\"\"\n Set the canvas.\n\n **Don't use directly**, a canvas is created by `Frame()`\n and reachable by handler defined by `Frame.set_draw_handler()`.\n\n :param frame: Frame (or None)\n :param canvas_width: int >= 0\n :param canvas_height: int >= 0\n \"\"\"\n assert isinstance(canvas_width, int), type(canvas_width)\n assert canvas_width >= 0, canvas_width\n\n assert isinstance(canvas_height, int), type(canvas_height)\n assert canvas_height >= 0, canvas_height\n\n self._frame_parent = frame\n\n self._width = canvas_width\n self._height = canvas_height\n\n self._background_pygame_color = Canvas._background_pygame_color\n\n self._draw_handler = None # type: Optional[Callable[[Canvas], Any]]\n\n self._pygame_surface = pygame.surface.Surface((canvas_width, canvas_height)) # pylint: disable=too-many-function-args # noqa\n\n def __repr__(self): # type: () -> str\n \"\"\"\n Return `''`.\n\n :return: str\n \"\"\"\n return ''\n\n def _draw(self): # type: () -> None\n \"\"\"\n If `self._draw_handler` != `None`\n then call it and update display of the canvas.\n\n **(Not available in SimpleGUI of CodeSkulptor.)**\n \"\"\"\n if ((self._draw_handler is not None) and\n (self._frame_parent is not None)):\n if self._bg_pygame_surface_image is None:\n if self._background_pygame_color.a == 255:\n # Without alpha\n self._pygame_surface.fill(self._background_pygame_color)\n elif self._background_pygame_color.a > 0:\n # With alpha (not null)\n s_alpha = pygame.surface.Surface((self._width, self._height), # pylint: disable=too-many-function-args # noqa\n pygame.SRCALPHA) # pylint: disable=no-member # noqa\n s_alpha.fill(self._background_pygame_color)\n self._pygame_surface.blit(s_alpha, (0, 0))\n else:\n self._pygame_surface.blit(\n self._bg_pygame_surface_image, (0, 0))\n\n self._draw_handler(self)\n\n if self._frame_parent._display_fps_average: # pylint: disable=protected-access # noqa\n self._pygame_surface.blit(\n _simpleguifontface_to_pygamefont(None, 40)\n .render(str(int(round(self._frame_parent._fps_average))), # pylint: disable=protected-access # noqa\n True,\n _SIMPLEGUICOLOR_TO_PYGAMECOLOR['red']),\n (10, self._height - 40))\n\n self._frame_parent._pygame_surface.blit( # pylint: disable=protected-access # noqa\n self._pygame_surface,\n (self._frame_parent._canvas_x_offset, # pylint: disable=protected-access # noqa\n self._frame_parent._canvas_y_offset)) # pylint: disable=protected-access # noqa\n\n pygame.display.update((self._frame_parent._canvas_x_offset, # pylint: disable=protected-access # noqa\n self._frame_parent._canvas_y_offset, # pylint: disable=protected-access # noqa\n self._width, # pylint: disable=protected-access # noqa\n self._height)) # pylint: disable=protected-access # noqa\n\n def _save(self, filename): # type: (str) -> None\n \"\"\"\n Save the canvas in `filename`.\n\n Supported formats are supported formats by Pygame to save:\n TGA, PNG, JPEG or BMP\n (see https://www.pygame.org/docs/ref/image.html#pygame.image.save ).\n\n If `filename` extension is not recognized\n then TGA format is used.\n\n **(Not available in SimpleGUI of CodeSkulptor.)**\n\n :param filename: str\n \"\"\"\n assert isinstance(filename, str), type(filename)\n\n filename = os.path.abspath(os.path.expanduser(filename))\n pygame.image.save(self._pygame_surface, filename)\n\n def draw_arc(self, # pylint: disable=too-many-arguments\n center_point, radius,\n start_angle, end_angle,\n line_width, line_color):\n # type: (Sequence[Union[int, float]], Union[int, float], Union[int, float], Union[int, float], Union[int, float], str) -> None # noqa\n \"\"\"\n Draw an arc of circle, from `start_angle` to `end_angle`.\n Angles given in radians are clockwise\n and start from 0 at the 3 o'clock position.\n\n (Available in CodeSkulptor3 but *not in CodeSkulptor 2*!)\n\n :param center_point: (int or float, int or float)\n or [int or float, int or float]\n :param radius: (int or float) > 0\n :param start_angle: int or float\n :param end_angle: int or float\n :param line_width: (int or float) > 0\n :param line_color: str\n \"\"\"\n assert isinstance(center_point, (tuple, list)), type(center_point)\n assert len(center_point) == 2, len(center_point)\n assert isinstance(center_point[0], (int, float)), type(center_point[0])\n assert isinstance(center_point[1], (int, float)), type(center_point[1])\n\n assert isinstance(radius, (int, float)), type(radius)\n assert radius > 0, radius\n\n assert isinstance(start_angle, (int, float)), (start_angle)\n assert isinstance(end_angle, (int, float)), type(end_angle)\n\n assert isinstance(line_width, (int, float)), type(line_width)\n assert line_width > 0, line_width\n\n assert isinstance(line_color, str), type(line_color)\n\n line_width = (1 if line_width <= 1\n else int(round(line_width)))\n\n radius = int(round(radius)) + int(round(line_width // 2))\n\n # Adapt Codeskulptor angles to Pygame\n if start_angle == end_angle:\n return\n\n start_angle = -start_angle\n end_angle = -end_angle\n start_angle, end_angle = end_angle, start_angle\n\n double_pi = math.pi * 2\n start_angle %= double_pi\n end_angle %= double_pi\n\n if start_angle == end_angle:\n return\n\n # Draw\n if radius > 1:\n pygamecolor = _simpleguicolor_to_pygamecolor(line_color)\n\n if pygamecolor.a > 0:\n diameter = radius * 2\n s_tmp = pygame.surface.Surface((diameter, diameter), # pylint: disable=too-many-function-args # noqa\n pygame.SRCALPHA) # pylint: disable=no-member # noqa\n\n pygame.draw.arc(s_tmp, pygamecolor,\n s_tmp.get_rect(),\n start_angle, end_angle,\n min(line_width, radius))\n\n self._pygame_surface.blit(s_tmp,\n (center_point[0] - radius,\n center_point[1] - radius))\n elif radius > 0: # == 1\n self.draw_point(center_point, line_color)\n\n def draw_circle(self, # pylint: disable=too-many-arguments\n center_point, radius,\n line_width, line_color,\n fill_color=None):\n # type: (Sequence[Union[int, float]], Union[int, float], Union[int, float], str, Optional[str]) -> None # noqa\n \"\"\"\n Draw a circle.\n\n If `fill_color` != `None`\n then fill with this color.\n\n :param center_point: (int or float, int or float)\n or [int or float, int or float]\n :param radius: (int or float) > 0\n :param line_width: (int or float) > 0\n :param line_color: str\n :param fill_color: None or str\n \"\"\"\n assert isinstance(center_point, (tuple, list)), type(center_point)\n assert len(center_point) == 2, len(center_point)\n assert isinstance(center_point[0], (int, float)), type(center_point[0])\n assert isinstance(center_point[1], (int, float)), type(center_point[1])\n\n assert isinstance(radius, (int, float)), type(radius)\n assert radius > 0, radius\n\n assert isinstance(line_width, (int, float)), type(line_width)\n assert line_width > 0, line_width\n\n assert isinstance(line_color, str), type(line_color)\n assert (fill_color is None) or isinstance(fill_color, str), \\\n type(fill_color)\n\n line_width = (1 if line_width <= 1\n else int(round(line_width)))\n\n radius = int(round(radius)) + int(round(line_width // 2))\n\n if radius > 1:\n pygamecolor = _simpleguicolor_to_pygamecolor(line_color)\n pygamefillcolor = (None if fill_color is None\n else _simpleguicolor_to_pygamecolor(fill_color))\n\n center_point_rounded = _pos_round(center_point)\n\n if ((pygamecolor.a == 255) and\n ((pygamefillcolor is None) or (pygamefillcolor.a == 255))):\n # Without alpha\n if pygamefillcolor is not None:\n pygame.draw.circle(self._pygame_surface, pygamefillcolor,\n center_point_rounded, radius,\n 0)\n if pygamecolor != pygamefillcolor:\n pygame.draw.circle(self._pygame_surface, pygamecolor,\n center_point_rounded, radius,\n min(line_width, radius))\n elif ((pygamecolor.a > 0) or\n ((pygamefillcolor is not None) and (pygamefillcolor.a > 0))):\n # With one or two alpha (not null)\n diameter = radius * 2\n s_alpha = pygame.surface.Surface((diameter, diameter), # pylint: disable=too-many-function-args # noqa\n pygame.SRCALPHA) # pylint: disable=no-member # noqa\n\n if (pygamefillcolor is not None) and (pygamefillcolor.a > 0):\n pygame.draw.circle(s_alpha, pygamefillcolor,\n (radius, radius), radius,\n 0)\n if (pygamecolor != pygamefillcolor) and (pygamecolor.a > 0):\n pygame.draw.circle(s_alpha, pygamecolor,\n (radius, radius), radius,\n min(line_width, radius))\n\n self._pygame_surface.blit(s_alpha,\n (center_point_rounded[0] - radius,\n center_point_rounded[1] - radius))\n elif radius > 0: # == 1\n self.draw_point(center_point, line_color)\n\n def draw_image(self, # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements # noqa\n image,\n center_source, width_height_source,\n center_dest, width_height_dest,\n rotation=0):\n # type: (Image, Sequence[Union[int, float]], Sequence[Union[int, float]], Sequence[Union[int, float]], Sequence[Union[int, float]], Union[int, float]) -> None # noqa\n \"\"\"\n Draw `image` on the canvas.\n\n Specify center position and size of the source (`image`)\n and center position and size of the destination (the canvas).\n\n Size of the source allow get a piece of `image`.\n If `width_height_source` is bigger than `image`\n then draw nothing.\n\n Size of the destination allow rescale the drawed image.\n\n `rotation` specify a clockwise rotation in radians.\n\n Each new Pygame surface used\n is added to `image._pygamesurfaces_cached`.\n See `Image._pygamesurfaces_cached_clear()`_ .\n\n .. _`Image._pygamesurfaces_cached_clear()`: image.html#SimpleGUICS2Pygame.simpleguics2pygame.image.Image._pygamesurfaces_cached_clear\n\n If number of surfaces in this caches\n is greater than `image._pygamesurfaces_cache_max_size`\n then remove the oldest surface.\n\n :param image: Image\n :param center_source: (int or float, int or float)\n or [int or float, int or float]\n :param width_height_source: ((int or float) >= 0, (int or float) >= 0)\n or [(int or float) >= 0, (int or float) >= 0]\n :param center_dest: (int or float, int or float)\n or [int or float, int or float]\n :param width_height_dest: ((int or float) >= 0, (int or float) >= 0)\n or [(int or float) >= 0, (int or float) >= 0]\n :param rotation: int or float\n \"\"\" # noqa\n assert isinstance(image, Image), type(image)\n\n assert isinstance(center_source, (tuple, list)), \\\n type(center_source)\n assert len(center_source) == 2, len(center_source)\n assert isinstance(center_source[0], (int, float)), \\\n type(center_source[0])\n assert isinstance(center_source[1], (int, float)), \\\n type(center_source[1])\n\n assert isinstance(width_height_source, (tuple, list)), \\\n type(width_height_source)\n assert len(width_height_source) == 2, len(width_height_source)\n assert isinstance(width_height_source[0], (int, float)), \\\n type(width_height_source[0])\n assert width_height_source[0] >= 0, width_height_source[0]\n assert isinstance(width_height_source[1], (int, float)), \\\n type(width_height_source[1])\n assert width_height_source[1] >= 0, width_height_source[1]\n\n assert isinstance(center_dest, (tuple, list)), type(center_dest)\n assert len(center_dest) == 2, len(center_dest)\n assert isinstance(center_dest[0], (int, float)), type(center_dest[0])\n assert isinstance(center_dest[1], (int, float)), type(center_dest[1])\n\n assert isinstance(width_height_dest, (tuple, list)), \\\n type(width_height_dest)\n assert len(width_height_dest) == 2, len(width_height_dest)\n assert isinstance(width_height_dest[0], (int, float)), \\\n type(width_height_dest[0])\n assert width_height_dest[0] >= 0, width_height_dest[0]\n assert isinstance(width_height_dest[1], (int, float)), \\\n type(width_height_dest[1])\n assert width_height_dest[1] >= 0, width_height_dest[1]\n\n assert isinstance(rotation, (int, float)), type(rotation)\n\n if image._pygame_surface is None: # pylint: disable=protected-access\n return\n\n # Calculate parameters\n width_source, height_source = width_height_source\n\n x0_source = center_source[0] - width_source / 2\n y0_source = center_source[1] - height_source / 2\n\n if x0_source >= 0:\n x0_source = int(round(x0_source))\n elif -1 < x0_source: # rounding error correcting\n width_source -= x0_source\n x0_source = 0\n else: # outside of source image\n return\n\n if y0_source >= 0:\n y0_source = int(round(y0_source))\n elif -1 < y0_source: # rounding error correcting\n height_source -= y0_source\n y0_source = 0\n else: # outside of source image\n return\n\n width_source = int(round(width_source))\n height_source = int(round(height_source))\n\n if ((x0_source + width_source > image.get_width() + 1) or\n (y0_source + height_source > image.get_height() + 1)):\n # Bigger than source image\n return\n\n if x0_source + width_source > image.get_width():\n # Keep this image (seem too big, maybe rounding error)\n width_source -= 1\n\n if y0_source + height_source > image.get_height():\n # Keep this image (seem too big, maybe rounding error)\n height_source -= 1\n\n width_height_dest = _pos_round(width_height_dest)\n\n rotation = int(round(-rotation * _RADIAN_TO_DEGREE)) % 360\n\n # Get in cache or build Pygame surface\n if sys.version_info[:2] >= (3, 2):\n move_to_end = image._pygamesurfaces_cached.move_to_end # pylint: disable=protected-access # noqa\n else:\n def move_to_end(key): # type: (int) -> None\n \"\"\"\n Move the `key` item to the newest place of the surfaces cache.\n\n :param key: tuple of 7 (int >= 0)\n \"\"\"\n del image._pygamesurfaces_cached[key] # pylint: disable=protected-access # noqa\n\n image._pygamesurfaces_cached[key] = pygame_surface_image # pylint: disable=protected-access # noqa\n\n key = (x0_source, y0_source, width_source, height_source,\n width_height_dest[0], width_height_dest[1],\n rotation)\n pygame_surface_image = image._pygamesurfaces_cached.get(key) # type: Optional[pygame.surface.Surface] # pylint: disable=protected-access # noqa\n\n if pygame_surface_image is not None: # Result available\n move_to_end(key)\n if __debug__:\n image._pygamesurfaces_cached_counts[0] += 1 # pylint: disable=protected-access # noqa\n else: # Build result\n key_0 = key[:-1] + (0, )\n if rotation != 0: # Get not rotated surface in cache\n pygame_surface_image = image._pygamesurfaces_cached.get(key_0) # pylint: disable=protected-access # noqa\n\n if pygame_surface_image is not None: # Not rotated available\n move_to_end(key_0)\n if __debug__:\n image._pygamesurfaces_cached_counts[1] += 1 # pylint: disable=protected-access # noqa\n else: # Build piece and/or resize\n if ((x0_source == 0) and (y0_source == 0) and\n (width_source == image.get_width()) and\n (height_source == image.get_height())):\n pygame_surface_image = image._pygame_surface # pylint: disable=protected-access # noqa\n else: # Get a piece in source\n pygame_surface_image = image._pygame_surface.subsurface( # pylint: disable=protected-access # noqa\n (x0_source, y0_source,\n width_source, height_source))\n\n if ((width_height_dest[0] != width_source) or\n (width_height_dest[1] != height_source)):\n # Resize to destination dimensions\n pygame_surface_image = pygame.transform.scale(\n pygame_surface_image, width_height_dest)\n\n image._pygamesurfaces_cached[key_0] = pygame_surface_image # pylint: disable=protected-access # noqa\n\n if (self._frame_parent and # pylint: disable=protected-access\n self._frame_parent._print_stats_cache and # pylint: disable=protected-access # noqa\n (len(image._pygamesurfaces_cached) == image._pygamesurfaces_cache_max_size)): # pylint: disable=protected-access # noqa\n image._print_stats_cache( # pylint: disable=protected-access # noqa\n 'Surfaces full cache ')\n elif len(image._pygamesurfaces_cached) > image._pygamesurfaces_cache_max_size: # pylint: disable=protected-access # noqa\n image._pygamesurfaces_cached.popitem(False) # pylint: disable=protected-access # noqa\n\n if rotation != 0: # Rotate\n pygame_surface_image = pygame.transform.rotate(\n pygame_surface_image, rotation)\n\n image._pygamesurfaces_cached[key] = pygame_surface_image # pylint: disable=protected-access # noqa\n\n if (self._frame_parent and # pylint: disable=protected-access\n self._frame_parent._print_stats_cache and # pylint: disable=protected-access # noqa\n (len(image._pygamesurfaces_cached) == image._pygamesurfaces_cache_max_size)): # pylint: disable=protected-access # noqa\n image._print_stats_cache( # pylint: disable=protected-access # noqa\n 'Surfaces full cache with rotated ')\n elif len(image._pygamesurfaces_cached) > image._pygamesurfaces_cache_max_size: # pylint: disable=protected-access # noqa\n image._pygamesurfaces_cached.popitem(False) # pylint: disable=protected-access # noqa\n\n # Draw the result\n self._pygame_surface.blit(\n pygame_surface_image,\n (int(round(center_dest[0] - pygame_surface_image.get_width() /\n 2)),\n int(round(center_dest[1] - pygame_surface_image.get_height() /\n 2))))\n if __debug__:\n image._draw_count += 1\n\n def draw_line(self,\n point1, point2,\n line_width, line_color):\n # type: (Sequence[Union[int, float]], Sequence[Union[int, float]], Union[int, float], str) -> None # noqa\n \"\"\"\n Draw a line segment from point1 to point2.\n\n :param point1: (int or float, int or float)\n or [int or float, int or float]\n :param point2: (int or float, int or float)\n or [int or float, int or float]\n :param line_width: (int or float) > 0\n :param line_color: str\n \"\"\"\n assert isinstance(point1, (tuple, list)), type(point1)\n assert len(point1) == 2, len(point1)\n assert isinstance(point1[0], (int, float)), type(point1[0])\n assert isinstance(point1[1], (int, float)), type(point1[1])\n\n assert isinstance(point2, (tuple, list)), type(point2)\n assert len(point2) == 2, len(point2)\n assert isinstance(point2[0], (int, float)), type(point2[0])\n assert isinstance(point2[1], (int, float)), type(point2[1])\n\n assert isinstance(line_width, (int, float)), type(line_width)\n assert line_width > 0, line_width\n\n assert isinstance(line_color, str), type(line_color)\n\n pygamecolor = _simpleguicolor_to_pygamecolor(line_color)\n\n if pygamecolor.a == 255: # without alpha\n pygame.draw.line(self._pygame_surface, pygamecolor,\n _pos_round(point1), _pos_round(point2),\n int(round(line_width)))\n elif pygamecolor.a > 0: # with alpha (not null)\n x1, y1 = _pos_round(point1)\n x2, y2 = _pos_round(point2) # pylint: disable=invalid-name # noqa\n\n width = abs(x2 - x1) + line_width * 2\n height = abs(y2 - y1) + line_width * 2\n\n x_min = min(x1, x2)\n y_min = min(y1, y2)\n\n s_alpha = pygame.surface.Surface((width, height), pygame.SRCALPHA) # pylint: disable=too-many-function-args,no-member # noqa\n pygame.draw.line(s_alpha, pygamecolor,\n (x1 - x_min + line_width,\n y1 - y_min + line_width),\n (x2 - x_min + line_width,\n y2 - y_min + line_width),\n int(round(line_width)))\n self._pygame_surface.blit(s_alpha,\n (x_min - line_width, y_min - line_width))\n\n def draw_point(self, position, color):\n # type: (Sequence[Union[int, float]], str) -> None\n \"\"\"\n Draw a point.\n\n :param position: (int or float, int or float)\n or [int or float, int or float]\n :param color: str\n \"\"\"\n assert isinstance(position, (tuple, list)), type(position)\n assert len(position) == 2, len(position)\n assert isinstance(position[0], (int, float)), type(position[0])\n assert isinstance(position[1], (int, float)), type(position[1])\n\n assert isinstance(color, str), type(color)\n\n pygamecolor = _simpleguicolor_to_pygamecolor(color)\n\n if pygamecolor.a == 255: # without alpha\n self._pygame_surface.set_at(_pos_round(position), pygamecolor)\n elif pygamecolor.a > 0: # with alpha (not null)\n s_alpha = pygame.surface.Surface((1, 1), pygame.SRCALPHA) # pylint: disable=too-many-function-args,no-member # noqa\n s_alpha.set_at((0, 0), pygamecolor)\n self._pygame_surface.blit(s_alpha, _pos_round(position))\n\n def draw_polygon(self,\n point_list,\n line_width, line_color,\n fill_color=None):\n # type: (Sequence[Sequence[Union[int, float]]], Union[int, float], str, Optional[str]) -> None # noqa\n \"\"\"\n Draw a polygon from a list of points.\n A segment is automatically drawed\n between the last point and the first point.\n\n If `fill color` is not None\n then fill with this color.\n\n If `line_width` > 1, ends are poorly made!\n\n :param point_list: not empty (tuple or list)\n of ((int or float, int or float)\n or [int or float, int or float])\n :param line_width: (int or float) > 0\n :param line_color: str\n :param fill_color: None or str\n \"\"\"\n assert isinstance(point_list, (tuple, list)), type(point_list)\n assert len(point_list) > 0, len(point_list)\n\n if __debug__:\n for point in point_list:\n assert isinstance(point, (tuple, list)), type(point)\n assert len(point) == 2, len(point)\n assert isinstance(point[0], (int, float)), type(point[0])\n assert isinstance(point[1], (int, float)), type(point[1])\n\n assert isinstance(line_width, (int, float)), type(line_width)\n assert line_width >= 0, line_width\n\n assert isinstance(line_color, str), type(line_color)\n assert (fill_color is None) or isinstance(fill_color, str), \\\n type(fill_color)\n\n if len(point_list) == 1:\n return\n\n pygamecolor = _simpleguicolor_to_pygamecolor(line_color)\n pygamefillcolor = (None if fill_color is None\n else _simpleguicolor_to_pygamecolor(fill_color))\n\n point_list_rounded = [_pos_round(point) for point in point_list]\n\n del point_list\n\n line_width = int(round(line_width))\n\n if ((pygamecolor.a == 255) and\n ((pygamefillcolor is None) or (pygamefillcolor.a == 255))):\n # Without alpha\n if pygamefillcolor is not None:\n pygame.draw.polygon(self._pygame_surface, pygamefillcolor,\n point_list_rounded, 0)\n if pygamecolor != pygamefillcolor:\n pygame.draw.lines(self._pygame_surface, pygamecolor, True,\n point_list_rounded, line_width)\n elif ((pygamecolor.a > 0) or\n ((pygamefillcolor is not None) and (pygamefillcolor.a > 0))):\n # With one or two alpha (not null)\n s_alpha = pygame.surface.Surface((self._width, self._height), # pylint: disable=too-many-function-args # noqa\n pygame.SRCALPHA) # pylint: disable=no-member # noqa\n\n if (pygamefillcolor is not None) and (pygamefillcolor.a > 0):\n pygame.draw.polygon(s_alpha, pygamefillcolor,\n point_list_rounded, 0)\n if (pygamecolor != pygamefillcolor) and (pygamecolor.a > 0):\n pygame.draw.lines(s_alpha, pygamecolor, True,\n point_list_rounded, line_width)\n\n self._pygame_surface.blit(s_alpha, (0, 0))\n\n def draw_polyline(self,\n point_list,\n line_width, line_color):\n # type: (Sequence[Sequence[Union[int, float]]], Union[int, float], str) -> None # noqa\n \"\"\"\n Draw line segments between a list of points.\n\n If `line_width` > 1, ends are poorly made!\n\n :param point_list: not empty (tuple or list)\n of ((int or float, int or float)\n or [int or float, int or float])\n :param line_width: (int or float) > 0\n :param line_color: str\n \"\"\"\n assert isinstance(point_list, (tuple, list)), type(point_list)\n assert len(point_list) > 0, len(point_list)\n\n if __debug__:\n for point in point_list:\n assert isinstance(point, (tuple, list)), type(point)\n assert len(point) == 2, len(point)\n assert isinstance(point[0], (int, float)), type(point[0])\n assert isinstance(point[1], (int, float)), type(point[1])\n\n assert isinstance(line_width, (int, float)), type(line_width)\n assert line_width > 0, line_width\n\n assert isinstance(line_color, str), type(line_color)\n\n if len(point_list) == 1:\n return\n\n pygamecolor = _simpleguicolor_to_pygamecolor(line_color)\n\n point_list_rounded = [_pos_round(point) for point in point_list]\n\n del point_list\n\n line_width = int(round(line_width))\n\n if pygamecolor.a == 255: # without alpha\n pygame.draw.lines(self._pygame_surface, pygamecolor, False,\n point_list_rounded, line_width)\n elif pygamecolor.a > 0: # with alpha (not null)\n s_alpha = pygame.surface.Surface((self._width, self._height), # pylint: disable=too-many-function-args # noqa\n pygame.SRCALPHA) # pylint: disable=no-member # noqa\n\n pygame.draw.lines(s_alpha, pygamecolor, False,\n point_list_rounded, line_width)\n\n self._pygame_surface.blit(s_alpha, (0, 0))\n\n def draw_text(self, # pylint: disable=too-many-arguments\n text, point,\n font_size, font_color,\n font_face='serif',\n _font_size_coef=3 / 4):\n # type: (str, Sequence[Union[int, float]], Union[int, float], str, str, Union[int, float]) -> None # noqa\n \"\"\"\n Draw the `text` string at the position `point`.\n\n (`point[0]` is the left of the text,\n `point[1]` is the bottom of the text.)\n\n If correponding font in Pygame is not founded,\n then use the default `pygame.font.Font`.\n\n `_font_size_coef` is used to adjust the vertical positioning.\n **(This paramater is not available in SimpleGUI of CodeSkulptor.)**\n\n :warning: This method can't draw multiline text.\n\n To draw multiline text, see `simplegui_lib_draw.draw_text_multi()`_ .\n\n .. _`simplegui_lib_draw.draw_text_multi()`: ../simplegui_lib_draw.html#SimpleGUICS2Pygame.simplegui_lib_draw.draw_text_multi\n\n :param text: str\n :param point: (int or float, int or float)\n or [int or float, int or float]\n :param font_size: (int or float) >= 0\n :param font_color: str\n :param font_face: str == 'monospace', 'sans-serif', 'serif'\n :param _font_size_coef: int or float\n\n :raise: ValueError if text contains unprintable whitespace character\n\n **(Alpha color channel don't work!!!)**\n \"\"\" # noqa\n assert isinstance(text, str), type(text)\n\n assert isinstance(point, (tuple, list)), type(point)\n assert len(point) == 2, len(point)\n assert isinstance(point[0], (int, float)), type(point[0])\n assert isinstance(point[1], (int, float)), type(point[1])\n\n assert isinstance(font_size, (int, float)), type(font_size)\n assert font_size >= 0, font_size\n\n assert isinstance(font_color, str), type(font_color)\n\n assert isinstance(font_face, str), type(font_face)\n assert font_face in _SIMPLEGUIFONTFACE_TO_PYGAMEFONTNAME, font_face\n\n assert isinstance(_font_size_coef, (int, float)), type(_font_size_coef)\n\n if text == '':\n return\n\n if _RE_UNPRINTABLE_WHITESPACE_CHAR.search(text):\n raise ValueError('text may not contain non-printing characters')\n\n pygamecolor = _simpleguicolor_to_pygamecolor(font_color)\n font_size = int(round(font_size))\n\n if (pygamecolor.a > 0) and (font_size > 0):\n pygame_surface_text = _simpleguifontface_to_pygamefont(\n font_face, font_size).render(text, True, pygamecolor)\n\n # if pygamecolor.a == 255: # without alpha\n self._pygame_surface.blit(\n pygame_surface_text,\n (point[0],\n (point[1] -\n pygame_surface_text.get_height() * _font_size_coef)))\n # else: # with alpha (not null)\n # # Don't work!!!\n # s_alpha = pygame.surface.Surface((pygame_surface_text.get_width(), # noqa\n # pygame_surface_text.get_height()), # noqa\n # pygame.SRCALPHA)\n # s_alpha.blit(pygame_surface_text, (0, 0))\n # self._pygame_surface.blit(\n # s_alpha,\n # (point[0],\n # (point[1] -\n # pygame_surface_text.get_height() * _font_size_coef)))\n\n\n#\n# SimpleGUI function\n####################\ndef create_invisible_canvas(width, height): # type: (int, int) -> Canvas\n \"\"\"\n NOT IMPLEMENTED!\n (Return a \"weak\" `Canvas`.)\n\n (Available in SimpleGUI of CodeSkulptor\n but *not in CodeSkulptor documentation*!)\n\n :param width: int >= 0\n :param height: int >= 0\n\n :return: Canvas\n \"\"\"\n assert isinstance(width, int), type(width)\n assert width >= 0, width\n\n assert isinstance(height, int), type(height)\n assert height >= 0, height\n\n return Canvas(None, width, height)\n","sub_path":"venv/lib/python3.8/site-packages/SimpleGUICS2Pygame/simpleguics2pygame/canvas.py","file_name":"canvas.py","file_ext":"py","file_size_in_byte":37054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"214790770","text":"\"\"\"\nversion : 1.0\ndescription : This file includes functions which are often used in plotting\n\n\"\"\"\nfrom ROOT import TH1D, TLatex, TPaveStats, TLine, TCanvas, gPad, gStyle\nimport math\n\n\ndef drawLatex(x=0.5, y=0.50, text=\"text\", size=0.035, font=132, color=1):\n tl = TLatex(x, y, text)\n tl.SetTextSize(size)\n tl.SetTextFont(font)\n tl.SetTextColor(color)\n tl.DrawLatexNDC(x, y, text)\n\n\ndef histo(xmin=0.0, xmax=10.0, ymin=1.0, ymax=10.0, xtitle=\"X\", ytitle=\"Y\"):\n haxis = TH1D(\"haxis\", \"\", 100, xmin, xmax)\n haxis.SetName(xtitle + ytitle)\n haxis.SetStats(0)\n haxis.SetLineColor(10)\n y = haxis.GetYaxis()\n y.SetRangeUser(ymin, ymax)\n y.SetTitle(ytitle)\n y.CenterTitle()\n y.SetTitleOffset(1.)\n y.SetTitleSize(0.04)\n # y.SetTitleFont()\n x = haxis.GetXaxis()\n x.SetTitle(xtitle)\n x.SetTitleOffset(.80)\n x.SetTitleSize(0.05)\n x.CenterTitle()\n return haxis\n\n\ndef frange(start, stop, step):\n i = start\n while i < stop:\n yield i\n i += step\n\n\ndef getPz(h, xmin, xmax, ymin, ymax, precision=1E-3):\n bin1 = h.GetXaxis().FindBin(xmin + precision)\n bin2 = h.GetXaxis().FindBin(xmax - precision)\n h.GetXaxis().SetRange(bin1, bin2)\n bin1 = h.GetYaxis().FindBin(ymin + precision)\n bin2 = h.GetYaxis().FindBin(ymax - precision)\n h.GetYaxis().SetRange(bin1, bin2)\n h2 = h.Project3D(\"z\")\n return h2\n\n\ndef getavg(h1, h2, option=1):\n '''\n\n :param h1: \n :param h2: \n :param option:1 arithmetical average;2 geo average;\n\n :return: \n '''\n h3 = TH1D()\n nbins1 = h1.GetNbinsX()\n nbins2 = h2.GetNbinsX()\n xmin1 = h1.GetXaxis().GetXmin()\n xmin2 = h2.GetXaxis().GetXmin()\n xmax1 = h1.GetXaxis().GetXmax()\n xmax2 = h2.GetXaxis().GetXmax()\n if (nbins1 == nbins2) and (xmin1 == xmin2) and (xmax1 == xmax2):\n h3.SetBins(nbins1, xmin1, xmax1)\n if (option == 2):\n for i in range(0, nbins1):\n a = h1.GetBinContent(i + 1)\n b = h2.GetBinContent(i + 1)\n c = 2 * math.sqrt(a * b)\n c_error = math.sqrt(a + b)\n h3.SetBinContent(i + 1, c)\n h3.SetBinError(i + 1, c_error)\n if (option == 1):\n h3.Add(h1, h2)\n\n return h3\n\n\ndef pave(x1, y1, x2, y2, *args, **text_style):\n pv = TPaveStats(x1, y1, x2, y2, \"brNDC\")\n pv.SetName(\"stats\")\n pv.SetOptStat(1101)\n pv.SetBorderSize(0)\n pv.SetFillColor(10)\n pv.SetTextAlign(11)\n pv.SetLineColor(0)\n for i in args:\n pv.AddText(i)\n t_style = {'size': 0.035, 'font': 132, 'color': 1}\n if (text_style):\n t_style.update(text_style)\n pv.SetTextSize(t_style['size'])\n pv.SetTextFont(t_style['font'])\n pv.SetTextColor(t_style['color'])\n return pv\n\n\ndef fit_pave(x1, y1, x2, y2, fun, **text_style):\n chh1 = \"#chi^{{2}} / ndf = {0:.2f} / {1}\".format(\n fun.GetChisquare(), fun.GetNDF())\n chh5 = \"N = {:.2E} #pm {:.2E}\".format(\n fun.GetParameter(0), fun.GetParError(0))\n chh6 = \"#mu = {:.3f} #pm {:.3f}\".format(\n fun.GetParameter(1), fun.GetParError(1))\n chh7 = \"#sigma = {:.3f} #pm {:.3f}\".format(\n fun.GetParameter(2), fun.GetParError(2))\n return pave(x1, y1, x2, y2, chh1, chh5, chh6, chh7, **text_style)\n\n\ndef drawLine(xlow, ylow, xup, yup, lineWidth, lineStyle, lineColor):\n L1 = TLine(xlow, ylow, xup, yup)\n L1.SetLineWidth(lineWidth)\n L1.SetLineColor(lineColor)\n L1.SetLineStyle(lineStyle)\n L1.Draw(\"same\")\n return L1\n\n\ndef setpad(left, right, top, bottom):\n gPad.SetFillColor(10)\n gPad.SetBorderMode(0)\n gPad.SetBorderSize(0)\n gPad.SetFrameFillColor(10)\n gPad.SetFrameBorderMode(0)\n gPad.SetFrameBorderSize(0)\n gPad.SetLeftMargin(left)\n gPad.SetRightMargin(right)\n gPad.SetTopMargin(top)\n gPad.SetBottomMargin(bottom)\n gPad.SetGridx(0)\n gPad.SetGridy(0)\n gStyle.SetOptStat(0)\n\n\ndef eff_err(m, N):\n v_e = (m + 1.) * (m + 2.) / ((N + 2.) * (N + 3.)) - math.pow((m + 1.) / (N + 2.), 2)\n return math.sqrt(v_e)\n\n\ndef shadow_hist_range(hist, xlow, xhigh, fill_style, fill_color):\n h = TH1D()\n h.SetName(hist.GetName() + \"_shadow\")\n x = h.GetXaxis()\n bin1 = x.FindBin(xlow)\n bin2 = x.FindBin(xhigh)\n x.SetRange(bin1, bin2)\n h.SetFillColor(fill_color)\n h.SetFillStyle(fill_style)\n return h\n\n\nif __name__ == '__main__':\n c = TCanvas()\n haxis = histo()\n haxis.Draw()\n l = drawLine(1, 2, 5, 5, 1, 1, 632)\n a = 2\n c.SaveAs(\"xxx.png\")\n","sub_path":"usefun.py","file_name":"usefun.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"350680397","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"Test #01.\"\"\"\n\nimport sys\nfrom PyQt4 import QtGui\n\ndef main():\n \"\"\"Test #01.\"\"\"\n app = QtGui.QApplication(sys.argv)\n\n widget = QtGui.QWidget()\n widget.resize(250, 150)\n widget.setWindowTitle('simple')\n widget.show()\n\n sys.exit(app.exec_())\n","sub_path":"tests/t_01.py","file_name":"t_01.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"243292411","text":"# -*- coding: utf-8 -*-\nimport ctypes\nfrom ctypes.util import find_library\nimport pathlib\n\nfrom qlazy.Stabilizer import Stabilizer\nfrom qlazy.error import *\nfrom qlazy.config import *\nfrom qlazy.util import *\n\nlib= ctypes.CDLL(str(pathlib.Path(__file__).with_name('libqlz.'+get_lib_ext())))\nlibc = ctypes.CDLL(find_library(\"c\"),mode=ctypes.RTLD_GLOBAL)\n\ndef stabilizer_init(gene_num=None, qubit_num=None, seed=None):\n\n lib.init_qlazy(ctypes.c_int(seed))\n \n stab = None\n c_stab = ctypes.c_void_p(stab)\n \n lib.stabilizer_init.restype = ctypes.c_int\n lib.stabilizer_init.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int,\n ctypes.POINTER(ctypes.c_void_p)]\n ret = lib.stabilizer_init(ctypes.c_int(gene_num), ctypes.c_int(qubit_num),\n ctypes.c_int(seed), c_stab)\n\n if ret == FALSE:\n raise Stabilizer_Error_Initialize()\n \n # out = ctypes.cast(c_stab.value, ctypes.POINTER(Stabilizer))\n # return out.contents\n return c_stab\n\ndef stabilizer_copy(sb):\n\n try:\n stab = None\n c_stab = ctypes.c_void_p(stab)\n \n lib.stabilizer_copy.restype = ctypes.c_int\n lib.stabilizer_copy.argtypes = [ctypes.POINTER(Stabilizer),\n ctypes.POINTER(ctypes.c_void_p)]\n ret = lib.stabilizer_copy(ctypes.byref(sb), c_stab)\n\n if ret == FALSE:\n raise Stabilizer_Error_Clone()\n\n # out = ctypes.cast(c_stab.value, ctypes.POINTER(Stabilizer))\n # return out.contents\n return c_stab\n \n except Exception:\n raise Stabilizer_Error_Clone()\n\ndef stabilizer_set_pauli_fac(sb, gene_id, pauli_fac):\n\n lib.stabilizer_set_pauli_fac.restype = ctypes.c_int\n lib.stabilizer_set_pauli_fac.argtypes = [ctypes.POINTER(Stabilizer), ctypes.c_int,\n ctypes.c_int]\n ret = lib.stabilizer_set_pauli_fac(ctypes.byref(sb), ctypes.c_int(gene_id),\n ctypes.c_int(pauli_fac))\n\n if ret == FALSE:\n raise Stabilizer_Error_SetPauliFac()\n\n return ret\n \ndef stabilizer_get_pauli_fac(sb, gene_id):\n\n pauli_fac = REAL_PLUS\n c_pauli_fac = ctypes.c_int(pauli_fac)\n\n lib.stabilizer_get_pauli_fac.restype = ctypes.c_int\n lib.stabilizer_get_pauli_fac.argtypes = [ctypes.POINTER(Stabilizer), ctypes.c_int,\n ctypes.POINTER(ctypes.c_int)]\n ret = lib.stabilizer_get_pauli_fac(ctypes.byref(sb), ctypes.c_int(gene_id),\n ctypes.byref(c_pauli_fac))\n\n if ret == FALSE:\n raise Stabilizer_Error_GetPauliFac()\n\n pauli_fac = c_pauli_fac.value\n\n return pauli_fac\n\ndef stabilizer_set_pauli_op(sb, gene_id=None, qubit_id=None, pauli_op=None):\n\n lib.stabilizer_set_pauli_op.restype = ctypes.c_int\n lib.stabilizer_set_pauli_op.argtypes = [ctypes.POINTER(Stabilizer), ctypes.c_int,\n ctypes.c_int, ctypes.c_int]\n ret = lib.stabilizer_set_pauli_op(ctypes.byref(sb), ctypes.c_int(gene_id),\n ctypes.c_int(qubit_id), ctypes.c_int(pauli_op))\n\n if ret == FALSE:\n raise Stabilizer_Error_SetPauliOp()\n\n return ret\n\ndef stabilizer_get_pauli_op(sb, gene_id=None, qubit_id=None):\n\n pauli_op = IDENTITY\n c_pauli_op = ctypes.c_int(pauli_op)\n\n lib.stabilizer_get_pauli_op.restype = ctypes.c_int\n lib.stabilizer_get_pauli_op.argtypes = [ctypes.POINTER(Stabilizer), ctypes.c_int,\n ctypes.c_int, ctypes.POINTER(ctypes.c_int)]\n ret = lib.stabilizer_get_pauli_op(ctypes.byref(sb), ctypes.c_int(gene_id),\n ctypes.c_int(qubit_id), ctypes.byref(c_pauli_op))\n\n if ret == FALSE:\n raise Stabilizer_Error_GetPauliOp()\n\n pauli_op = c_pauli_op.value\n\n return pauli_op\n\ndef stabilizer_operate_qgate(sb, kind=None, q0=None, q1=None):\n\n lib.stabilizer_operate_qgate.restype = ctypes.c_int\n lib.stabilizer_operate_qgate.argtypes = [ctypes.POINTER(Stabilizer), ctypes.c_int,\n ctypes.c_int, ctypes.c_int]\n ret = lib.stabilizer_operate_qgate(ctypes.byref(sb), ctypes.c_int(kind),\n ctypes.c_int(q0), ctypes.c_int(q1))\n \n if ret == FALSE:\n raise Stabilizer_Error_OperateQgate()\n \ndef stabilizer_get_rank(sb):\n\n rank = 0\n c_rank = ctypes.c_int(rank)\n\n lib.stabilizer_get_rank.restype = ctypes.c_int\n lib.stabilizer_get_rank.argtypes = [ctypes.POINTER(Stabilizer),\n ctypes.POINTER(ctypes.c_int)]\n ret = lib.stabilizer_get_rank(ctypes.byref(sb), ctypes.byref(c_rank))\n \n if ret == FALSE:\n raise Stabilizer_Error_GetRank()\n\n rank = c_rank.value\n\n return rank\n \ndef stabilizer_measure(sb, q=None):\n\n prob = [0.0, 0.0]\n DoubleArray = ctypes.c_double * 2\n c_prob = DoubleArray(*prob)\n\n mval = 0\n c_mval = ctypes.c_int(mval)\n\n lib.stabilizer_measure.restype = ctypes.c_int\n lib.stabilizer_measure.argtypes = [ctypes.POINTER(Stabilizer), ctypes.c_int,\n DoubleArray, ctypes.POINTER(ctypes.c_int)]\n ret = lib.stabilizer_measure(ctypes.byref(sb), ctypes.c_int(q),\n c_prob, ctypes.byref(c_mval))\n \n if ret == FALSE:\n raise Stabilizer_Error_Measure()\n\n mval = c_mval.value\n\n return mval\n \ndef stabilizer_free(stab):\n\n lib.stabilizer_free.argtypes = [ctypes.POINTER(Stabilizer)]\n lib.stabilizer_free(ctypes.byref(stab))\n","sub_path":"qlazy/lib/stabilizer_c.py","file_name":"stabilizer_c.py","file_ext":"py","file_size_in_byte":5668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"77407387","text":"\n# proDEX is a probabilistic DEX methodology tool\n# written in Python\n# (subsumes the special research tools: pyDEXprob and pyDEXnum)\n# [Martin Znidarsic]\n# + mini adaptation for Python3 compliance\n\nclass Node:\n # class of proDEX node\n def __init__(self):\n self.name = None\n self.type = \"discrete\" # can be: \"discrete\", \"continuous\" or \"None\" for unknown\n self.values = [] # list of possible values the node can take, or interval [lo, hi] for continuous\n self.ordered = None # can be: \"True\", \"False\" or \"None\" for unknown\n self.parents = []\n self.children = []\n self.tableFunction = []\n self.generalFunction = None # General function - for parents (disc. or cont.) of continuous only\n\n def setName(self, name):\n self.name = name\n\n def setType(self, type):\n self.type = type\n \n def setValues(self, valuesList):\n self.values = valuesList\n\n def setParent(self, parent):\n self.parents.append(parent)\n\n def addChild(self, child):\n self.children.append(child)\n\n def addFunctionRow(self, row):\n # row is formed as [[val1, val2,.., valn], {classDist}, confidence(float)]\n ##check if row is in the right format\n ##check if the row is not already there\n self.tableFunction.append(row)\n\n def printFunction(self):\n for i in range(len(self.tableFunction)):\n rowString=\"\"\n for j in range(len(self.tableFunction[i])):\n rowString = rowString + \"%s \\t\" % self.tableFunction[i][j]\n print(rowString)\n\n\n \nclass Atrib:\n # class of DEX attribute\n def __init__(self):\n self.name = None\n self.type = \"discrete\" # can be: \"discrete\", \"continuous\" or \"None\" for unknown\n self.values = [] # list of possible values the attribute can take\n self.parents = []\n self.values = [] # list of possible values the node can take, or interval [lo, hi] for continuous\n self.ordered = None # can be: \"True\", \"False\" or \"None\" for unknown \n\n def setName(self, name):\n self.name = name\n \n def setType(self, type):\n self.type = type\n \n def setValues(self, valuesList):\n self.values = valuesList\n\n def setParent(self, parent):\n self.parents.append(parent)\n\n\n\n\n\ndef getAtribs(somenode):\n # collects all the Atrib-s below a node into a list and returns it\n list = []\n if isinstance(somenode, Node):\n for c in somenode.children:\n for el in getAtribs(c):\n if el not in list:\n list.append(el)\n elif isinstance(somenode, Atrib):\n list.append(somenode)\n return list\n\n\ndef getNodes(somenode):\n # collects all the Node-s below a node into a list and returns it\n # -- this function was added on 21.9.2004 -- \n list = []\n if isinstance(somenode, Node):\n list.append(somenode)\n for c in somenode.children:\n for el in getNodes(c):\n if el not in list:\n list.append(el)\n return list\n\n\ndef getValues(node):\n return node.values\n\n\ndef getAllValues(someone):\n # returns a list of values-lists for all attributes under someone\n atribs = getAtribs(someone)\n listOfValues = []\n for a in atribs:\n listOfValues.append(a.values)\n return listOfValues\n\n\ndef permute(Lists):\n # returns a list of permutations given list of lists\n import operator\n if Lists:\n result = map(lambda I: [I,], Lists[0])\n for list in Lists[1:]:\n curr = []\n for item in list:\n new = map(operator.add, result, [[item,]]*len(result))\n curr[len(curr):] = new\n result = curr\n else:\n result = []\n return result\n\n\ndef getAllVariants(someone):\n valsList = getAllValues(someone)\n valsList.reverse() # because permute gives them in wrong order\n per = permute(valsList)\n for i in range(len(per)):\n per[i].reverse() # because permute gives them in TWICE wrong order\n valsList.reverse()\n return per\n\n\ndef printAllVariants(someone):\n variants = getAllVariants(someone)\n for i in range(len(variants)):\n varstring = \"\"\n for j in range(len(variants[i])):\n varstring = varstring + \"%s\\t\" % variants[i][j]\n print(varstring)\n\n\ndef getSituation(node):\n # situation can be:\n # 1 (discrete parent of discrete children),\n # 2 (discrete parent of a single continuous child - discretization)\n # or 3 (continuous parent of continuous children)\n if node.type==\"discrete\":\n if len(node.children)==1: # single child\n if node.children[0].type==\"discrete\":\n return 1\n elif node.children[0].type==\"continuous\":\n return 2\n else: # multiple children\n for c in node.children:\n if c.type==\"continuous\":\n print(\"Error: cont. children of disc. parent!\")\n return None\n return 1\n elif node.type==\"continuous\":\n if len(node.children)==1: # single child\n if node.children[0].type==\"discrete\":\n print(\"Error: cont. parent has a discrete child!\")\n return None\n elif node.children[0].type==\"continuous\":\n return 3\n else: # multiple children\n for c in node.children:\n if c.type==\"discrete\":\n print(\"Error: disc. children of cont. parent!\")\n return None\n return 3\n\n\n\ndef classify(variant, root):\n # returnes the class of variant given the root of the model\n returnValue = None\n if isinstance(root, Atrib): # stopping criterion\n if root in variant.keys():\n returnValue = variant[root]\n else:\n print(\"Error: attribute not in the variant dictionary!\")\n print(\"Attribute's name: \", root.name)\n else:\n computedVariant={}\n for c in root.children:\n if c.type == \"continuous\" and isinstance(c, Atrib): # only one continuous\n returnValue = root.generalFunction(variant[c]) #discretize child\n elif c.type == \"continuous\" and isinstance(c, Node): # top of cont. tree\n print(\"CLASSIFICATION FOR CONT. SUBTREES IS NOT MADE YET!\")\n returnValue = root.generalFunction(classify(variant,c)) #make classif. for continuous!\n else:\n if c in variant.keys(): # if child value (dist.) already given, do not calculate.\n computedVariant[c] = variant[c]\n else:\n computedVariant[c] = classify(variant, c)\n # this way all the children values get to computedVariant dictionary\n if returnValue == None: # so, if not continuous..\n # now classify according to tableFunction:\n if type(root.tableFunction[0][1]) == str: #crisp rules - preverjem kar naivno na podlagi prvega pravila\n for rule in root.tableFunction:\n combinationDict = rule[0]\n match = True\n for k in combinationDict.keys():\n if combinationDict[k] != computedVariant[k]:\n match = False\n if match == True:\n returnValue = rule[1]\n else: #probabilistic rules\n conFlag = False\n resultDict={}\n for v in root.values:\n resultDict[v]=0.0\n resultDict[\"CONFIDENCE\"] = 0.0\n for rule in root.tableFunction:\n combinationDict = rule[0]\n distributionDict = rule[1]\n if \"CONFIDENCE\" in distributionDict.keys():\n conFlag = True\n f = 1 #factor - probability of rule\n childrenConfidence = 1.0\n for child in root.children:\n ruleValue = combinationDict[child] # string: value of a certain child in the rule\n thisChildsDistribution = computedVariant[child]\n if f != 0.0: # if f=0.0, it has no sense to calculate further \n f = f * thisChildsDistribution[ruleValue]\n if (\"CONFIDENCE\" in thisChildsDistribution.keys()) and (thisChildsDistribution[\"CONFIDENCE\"]!=None):\n childrenConfidence = childrenConfidence * thisChildsDistribution[\"CONFIDENCE\"]\n for k in resultDict.keys():\n if k==\"CONFIDENCE\":\n if \"CONFIDENCE\" in distributionDict.keys():\n #print \"Pred mnozenjem je childrenConfidence: \", childrenConfidence\n if f != 0.0: # if f=0.0, it has no sense to calculate further \n resultDict[k] = resultDict[k] + (f * childrenConfidence * distributionDict[\"CONFIDENCE\"])\n else:\n if f != 0.0: # if f=0.0, it has no sense to calculate further \n resultDict[k] = resultDict[k] + (f * distributionDict[k])\n if conFlag == False:\n resultDict[\"CONFIDENCE\"] = None\n returnValue = resultDict\n return returnValue\n\n\n\ndef getAttributeNumber(somenode):\n # returns the number of attributes 'somenode' covers\n returnValue = None\n if isinstance(somenode, Atrib):\n returnValue = 1\n elif isinstance (somenode, Node):\n sum = 0\n for c in somenode.children:\n sum = sum + getAttributeNumber(c)\n returnValue = sum\n else: print(\"Exception: getAttributeNumber called with unknown object\")\n return returnValue\n \n \n\ndef getAllVariantsWithClass(someone):\n # gives \"data from the model\" in a variantClass list\n variants = getAllVariants(someone)\n for i in range(len(variants)):\n classValue = classify(variants[i], someone)\n variants[i].append(classValue)\n return variants\n\n\ndef printAllVariantsWithClass(modelRoot):\n # produces \"data from the model\" in tab delimited format\n variants = getAllVariants(modelRoot)\n for i in range(len(variants)):\n varstring = \"\"\n classValue = classify(variants[i], modelRoot)\n for j in range(len(variants[i])):\n varstring = varstring + \"%s\\t\" % variants[i][j]\n varstring = varstring + str(classValue)\n print(varstring)\n\n# it is hard to test how valid is the classification result\n# since the result is a distribution and not a crisp value\n# possibility : difference in distribution (sum of real diff. at each value - then relative?)\ndef classifyDataFile(filename, modelRoot):\n # classifies data from given file with given model and provides CA\n dataFile = open(filename, 'r')\n entriesNumber = 0 #counts the rows\n difSum = 0 #overall differential sum\n row = dataFile.readline()\n while row:\n cvariant = []\n el = ''\n for i in range(len(row)):\n if row[i]=='\\t': cvariant.append(el); el=''\n ###elif row[i]==' ': cvariant.append(el)\n elif row[i]=='\\n': cvariant.append(el)\n else : el = el + row[i]\n variant = cvariant[:-1] # we strip the class off\n cstring = cvariant[-1:][0]\n classValue = eval(cstring) #we save the class value\n print(type(classValue))\n resultValue = classify(variant, modelRoot)\n entriesNumber = entriesNumber + 1\n for k in (eval(classValue)).keys():\n difSum = difSum + abs((eval(classValue))[k] - resultValue[k])\n row = dataFile.readline()\n dataFile.close()\n relativedifSum = float(difSum) / entriesNumber\n return 1-relativedifSum\n\n\ndef classifyCrispDataFile(filename, modelRoot):\n # classifies data from given file with given model and provides CA\n dataFile = open(filename, 'r')\n entriesNumber = 0 #counts the rows\n difSum = 0 #overall differential sum\n row = dataFile.readline()\n while row:\n cvariant = []\n el = ''\n for i in range(len(row)):\n if row[i]=='\\t': cvariant.append(el); el=''\n ###elif row[i]==' ': cvariant.append(el)\n elif row[i]=='\\n': cvariant.append(el)\n else : el = el + row[i]\n variant = cvariant[:-1] # we strip the class off\n cstring = cvariant[-1:][0] # class\n resultValue = classify(variant, modelRoot)\n entriesNumber = entriesNumber + 1\n difSum = difSum + abs(1 - resultValue[cstring])\n row = dataFile.readline()\n dataFile.close()\n relativedifSum = float(difSum) / entriesNumber\n return 1-relativedifSum\n\n\n# with PROB we do not need monotonicity check\n##def monOKhandy(someone, clashingVariant2):\n\n##def monOK(someone, clashingVariant2):\n\n\ndef distDiff(d1, d2):\n # distribution difference among distributions d1 and d2\n diff = 0\n for k in d1.keys():\n diff = diff + abs(d1[k] - d2[k])\n return diff\n\n\ndef maxKey(di):\n max = 0\n maxkey = None\n for k in di.keys():\n if di[k] > max:\n max = di[k]\n maxkey = k\n return maxkey\n\n\n# Node changing part - for automatic update\ndef changeNode(node, newVariant):\n # changes the right variant in 'node' to 'newVariant'\n # newVariant CANNOT be flat\n if len(node.tableFunction[0]) == len(newVariant):\n # then find the right row and change it\n print(len(node.tableFunction))\n for f in node.tableFunction:\n if f[:-1] == newVariant[:-1]:\n #f = newVariant ###doesn't work\n node.tableFunction.remove(f)\n node.tableFunction.append(newVariant)\n\n\n\n\ndef updateFromFile(modelRoot, newDataFile, trust=0.1):\n # updates the old model (given with \"modelRoot\")\n # according to new data (given with \"newDataFile\" tabulated file)\n # Procedure changes old model !!! Only in RAM of course :)\n # As a result it prints all table functions of the new model\n dataFile = open(newDataFile, 'r')\n row = dataFile.readline()\n while row:\n # <-parsing->\n cvariant = []\n el = ''\n for i in range(len(row)):\n if row[i]=='\\t': cvariant.append(el); el=''\n elif row[i]=='\\n': cvariant.append(el)\n else : el = el + row[i]\n # cvariant now contains the variant+class from current row in new data file\n # <-parsing->\n clist=[]\n update(modelRoot, modelRoot, cvariant, clist, trust, dFile=newDataFile)\n row = dataFile.readline()\n dataFile.close()\n print(\"New table functions are:\")\n print(modelRoot.name, \" :\")\n for t in range(len(modelRoot.tableFunction)):\n print(modelRoot.tableFunction[t])\n print\n for i in range(len(modelRoot.children)):\n print (modelRoot.children[i]).name\n for j in range(len((modelRoot.children[i]).tableFunction)):\n print (modelRoot.children[i]).tableFunction[j]\n print\n print(\"UPDATE FINISHED.\")\n\n\ndef update(modelRoot, node, newVariant, changesList, trust=0.1, dFile=None):\n # updates the node (step 1) and children (step 2)\n # newVariant can be flat or not\n # <--getting the goal distribution-->\n distC = classify(newVariant[:-1], node)\n actualClass = newVariant[-1:][0]\n goalDist = {}\n for k in distC.keys():\n if k == actualClass:\n goalDist[k] = float(distC[k] + trust) / (1+trust)\n else:\n goalDist[k] = float(distC[k]) / (1+trust)\n # <--getting the goal distribution-->\n\n \n # \n m = max(map(len,map(getValues, node.children))) #max ValuesCount in children\n # we seek m most similar distributions\n\n closestChildrenVals = []\n for f in node.tableFunction:\n closestChildrenVals.append((distDiff(f[-1:][0], goalDist), f[:-1])) #only children f[:-1]\n closestChildrenVals.sort()\n closestChildrenVals = closestChildrenVals[:m] #we pick only the closest m\n\n # closestChildrenVals = [(distDiff1, [v1,v2]), (distDiff2, [v1',v2']), (distDiff3, [v1'',v2''])]\n # - before it was only = [val1, val2]\n # <--finding node children to amplify-->\n\n \n if len(node.tableFunction[0]) == len(newVariant):\n #variant matches the node\n #<--step 1-->\n for f in node.tableFunction:\n if f[:-1] == newVariant[:-1]: #all but class matches\n rowBefore = f[:] #copy list contents\n rowBefore[-1:][0] = {}\n dicti = f[-1]\n rowBefore[-1] = dicti.copy()\n print(rowBefore)\n oldDict = f[-1] #old class dist. dictionary\n for k in oldDict.keys():\n if k == newVariant[-1:][0]: #class name matches with one from data\n oldDict[k] = float(oldDict[k] + trust) / (1+trust)\n else:\n oldDict[k] = float(oldDict[k]) / (1+trust)\n rowAfter = f[:]\n rowAfter[-1:][0] = {}\n dicti2 = f[-1]\n rowAfter[-1] = dicti2.copy()\n if node.name != modelRoot.name:\n changesList.append( (node.name, rowBefore, rowAfter) ) \n #<--step 1-->\n #<--step 2-->\n # in this case there must be only atribs as children..no step 2\n for child in node.children:\n if isinstance(child, Node): print(\"CAUTION child node skipped!\")\n #<--step 2-->\n else:\n #variant is flat for the node\n # then we have to evaluate children first\n childValues = []\n childAtribs = []\n for c in node.children:\n atNum = getAttributeNumber(c)\n childDist = classify(newVariant[:atNum], c)\n childValues.append(maxKey(childDist)) #we append the most probable value\n childAtribs.append(newVariant[:atNum])\n newVariant = newVariant[atNum:]\n #<--step 1-->\n for f in node.tableFunction:\n if f[:-1] == childValues: #all but class matches\n rowBefore = f[:] #copy list contents\n rowBefore[-1:][0] = {}\n dicti = f[-1]\n rowBefore[-1] = dicti.copy()\n oldDict = f[-1] #old class dist. dictionary\n for k in oldDict.keys():\n if k == newVariant[-1:][0]: #class name matches with one from data\n oldDict[k] = float(oldDict[k] + trust) / (1+trust)\n else:\n oldDict[k] = float(oldDict[k]) / (1+trust)\n rowAfter = f[:]\n rowAfter[-1:][0] = {}\n dicti2 = f[-1]\n rowAfter[-1] = dicti2.copy()\n if node.name != modelRoot.name:\n changesList.append( (node.name, rowBefore, rowAfter) ) \n \n #<--step 1-->\n #<--step 2-->\n\n # FOR NOW - we promote all m similar ones\n # closestChildrenVals = [(distDiff1, [v1,v2]), (distDiff2, [v1',v2']), (distDiff3, [v1'',v2''])]\n # - before it was only = [val1, val2]\n bestCA = classifyCrispDataFile(dFile, modelRoot)\n bestCCV = None\n for j in range(len(closestChildrenVals)):\n ccv = closestChildrenVals[j][1] #one variant to promote\n childrenChangesList = []\n for i in range(len(ccv)):\n## if neighbors(closestChildrenVals[i], childValues[i], node, i):\n # we merge atribs and class(to be emphasized)\n # NOW WE NEED CHANGE/UNCHANGE PROCEDURE..\n # A NEW UPDATE THAT TRACKS CHANGES!! different from normal update, because\n # this one must not track the change of action 1 and thus is not general enough.\n # although we could say if node = rootnode do not track...YES - Solution!\n # we need the root node as input anyway now.. \n childVariant = childAtribs[i] + [ccv[i]]\n thisOne=[]\n update(modelRoot, node.children[i], childVariant, thisOne, trust, dFile)\n childrenChangesList = childrenChangesList + thisOne \n ca = classifyCrispDataFile(dFile, modelRoot)\n if ca > bestCA:\n bestCCV = ccv\n bestCA = ca\n unchange(node, childrenChangesList)\n # now make only the best promotion\n if bestCCV != None:\n for ind in range(len(bestCCV)):\n childVariant = childAtribs[ind] + [bestCCV[ind]]\n thisOne = []\n update(modelRoot, node.children[ind], childVariant, thisOne, trust, dFile)\n changesList = changesList + thisOne\n #<--step 2-->\n if node.name == modelRoot.name:\n print(\"since I am root (%s), here are the changes of updating below me:\" % node.name)\n for c in changesList:\n print(c)\n\n\n\ndef unchange(node, changesList):\n for c in node.children:\n for el in changesList:\n if c.name == el[0]: #name matches nodename\n for i in range(len(c.tableFunction)):\n if c.tableFunction[i] == el[2]: #row matches rowAfter\n c.tableFunction[i] = el[1] #then make it as before\n changesList.remove(el)\n\n\n\n\n\ndef neighbors(val1, val2, node, i):\n result = None\n child = node.children[i]\n for a in range(len(child.values)):\n if child.values[a] == val1:\n index1 = a\n if child.values[a] == val2:\n index2 = a\n if abs(index1 - index2) < 2:\n result = 1\n return result\n\n\n\n#### HISTORY ######\n\ndef readXMLmodel(fname):\n #reads a model from a DEX model file in PMML format and returns the models top node\n from xml.dom import minidom\n modelDict = {} # dictionary that holds the variables (Nodes and Atribs)\n xmldata = minidom.parse(fname)\n model = xmldata.childNodes[1]\n hierarchy = model.childNodes[2]\n miningschema = hierarchy.childNodes[0]\n #now we find out which are Nodes and which Atribs..and create them\n for el in miningschema.childNodes:\n elname = str(el.attributes.get('name').value)\n elusage = el.attributes.get('usageType').value\n if elusage=='predicted':\n modelDict[elname] = Node()\n modelDict[elname].setName(elname)\n elif elusage=='active':\n modelDict[elname] = Atrib()\n modelDict[elname].setName(elname)\n else : print(\"ERROR - unusual type of attribute in miningschema.\")\n #then we find relations among them and connect them accordingly\n att = hierarchy.childNodes[1]\n topNodeName = att.attributes.get('name').value\n recurSetRelations(att, modelDict)\n # -- works fine -- the structure of the model is built!\n #now to the values..\n datadictionary = model.childNodes[1]\n for datafield in datadictionary.childNodes:\n dfName = datafield.attributes.get(\"name\").value\n valuesList = []\n for item in datafield.childNodes:\n if item.localName == 'Value': # we ignore 'Extension'-s\n valuesList.append(str(item.attributes.get(\"value\").value)) #works only for 'u strings so far\n valuesList.reverse()\n modelDict[dfName].values = valuesList\n #now to the rule-based functions..\n functionlist = hierarchy.childNodes[2]\n for function in functionlist.childNodes:\n # 'function' is a function of one Node\n fuName = function.attributes.get(\"name\").value #fuName is the name of the Node with this function\n print(\"parsing node: %s\" % str(fuName))\n for rule in function.childNodes:\n # 'rule' is one rule in the 'function'\n ruleDict={} #dict. of subnodes values (the first element of function row)\n condition = rule.childNodes[0]\n result = rule.childNodes[1]\n compoundpredicate = condition.childNodes[0]\n if compoundpredicate.attributes.get(\"booleanOperator\").value != \"and\": print(\"ERR: compound predicate in XML is not AND!\")\n childNum = len(modelDict[fuName].children)\n for i in range(childNum):\n try:\n if compoundpredicate.childNodes[i].attributes.get(\"field\").value == modelDict[fuName].children[i].name:\n ruleDict[modelDict[fuName].children[i]]= str(compoundpredicate.childNodes[i].attributes.get(\"value\").value)\n else:\n print(\"ERR: wrong child order in XML file!\")\n print(compoundpredicate.childNodes[i].attributes.get(\"field\").value , \" is not equal to \" , modelDict[fuName].children[i].name)\n except IndexError:\n print(\"OOPS! Children index problem.\")\n print(\"The problematic Node is : \", fuName)\n # now we add the last item in the list that represents the rule:\n valueDict={} #dict. of Node values (actually a prob. distribution)\n for val in modelDict[fuName].values:\n if val == str(result.childNodes[0].attributes.get(\"value\").value):\n valueDict[val] = 1.0\n else:\n valueDict[val] = 0.0\n # and add this to the utility function:\n modelDict[fuName].addFunctionRow([ruleDict, valueDict])\n return modelDict[topNodeName]\n\n\ndef recurSetRelations(it, modelDict):\n for c in it.childNodes:\n modelDict[it.attributes.get('name').value].addChild( modelDict[c.attributes.get('name').value] )\n modelDict[c.attributes.get('name').value].setParent( modelDict[it.attributes.get('name').value] )\n if isinstance(modelDict[c.attributes.get('name').value], Node):\n recurSetRelations(c, modelDict)\n \n\ndef revision(modelRoot, node, newVariant, changesList, cN=0.5, trust=0.1, dFile=None):\n if isinstance(node, Atrib): return # stopping criterion\n # revises the node with respect to CONFIDENCE ###(step 1) and children (step 2)\n # <--getting the goal distribution for step 1-->\n actualClass = newVariant[node.name]\n print(\"actualClass is:\", actualClass)\n # get childer evaluations and get the most probable combination of values\n mpCombDict = {} # {c1:[val1], c2:[val2.1,val2.2],.., cN:[valN]} most probable combination of children values given newVariant attributes\n childrenNVresults = {} # a dict. of children classification results for use later in 2.step\n for c in node.children:\n cdis = classify(newVariant, c)\n print(\"cdis of \" +str(c.name) + \" \" + str(cdis))\n childrenNVresults[c]=cdis # saving this for later use in 2.step\n maxP = -1.0\n mpCombDict[c]=[]\n for k in c.values: # must be in c.values to avoid CONFIDENCE\n if cdis[k] > maxP:\n maxP = cdis[k]\n if (maxP == -1.0):\n print(\"ERROR in revision: no max childProb value!\")\n else:\n for k in c.values:\n if cdis[k] == maxP:\n (mpCombDict[c]).append(k) # append the maxValue to the list of this child\n ##print \"ETO: maxProb je \" + str(maxP) + \" dosezejo jo pa tile: \"; print mpCombDict\n print(\"Revision will be made with cN=\" + str(cN))\n # <--make step 1-->\n for rule in node.tableFunction:\n combinationDict = rule[0]\n rightRule = True # only if all the values are the same, it is true\n for k in combinationDict.keys():\n if combinationDict[k] not in mpCombDict[k]:\n rightRule = False\n if rightRule == True: # do the revision step 1 on this one (should happen once in a table function)\n for key in (rule[1]).keys():\n if (key != \"CONFIDENCE\") and (key == actualClass):\n (rule[1])[key] = ((rule[1])[key] * (rule[1])[\"CONFIDENCE\"] * (1-cN) + 1 * cN * (1 - (rule[1])[\"CONFIDENCE\"])) / ((rule[1])[\"CONFIDENCE\"] * (1-cN) + cN * (1 - (rule[1])[\"CONFIDENCE\"]))\n elif (key != \"CONFIDENCE\") and (key != actualClass):\n (rule[1])[key] = ((rule[1])[key] * (rule[1])[\"CONFIDENCE\"] * (1-cN)) / ((rule[1])[\"CONFIDENCE\"] * (1-cN) + cN * (1 - (rule[1])[\"CONFIDENCE\"]))\n (rule[1])[\"CONFIDENCE\"] = ((rule[1])[\"CONFIDENCE\"] * (1-cN) + cN * (1 - (rule[1])[\"CONFIDENCE\"])) / ((rule[1])[\"CONFIDENCE\"] * (1-cN) + cN * (1 - (rule[1])[\"CONFIDENCE\"]) + (1 - (rule[1])[\"CONFIDENCE\"]) * (1-cN) )\n print\n print(\"REVISION MADE!\")\n print(\"--on node: \" + str(node.name) + \" forcing \" + str(actualClass) + \" into row \" + str(rule[0]))\n## print \"--new dist.: \" + str(rule[1])\n## print \"--new CONFIDENCE: \" + str((rule[1])[\"CONFIDENCE\"])\n## print\n # <--end step 1, make step 2-->\n # zdaj najdi kombinacijo ki v node.tableFunction najbolj zadene pravi class, dodaj to v newVariant in rekurzivno poklici revizijo\n highestProb = -1.0\n ## highestCombination = None - ne rabim vec, saj delam seznam\n for rule in node.tableFunction:\n distDict = rule[1]\n if distDict[actualClass] > highestProb:\n highestProb = distDict[actualClass]\n ## highestCombination = rule[0] - ne rabim vec, saj delam seznam\n # se en sprehod skozi da poberemo morebitne kombinacije ki so vse \"highestProb\"\n highestCombinationList = []\n for rule in node.tableFunction:\n distDict = rule[1]\n if distDict[actualClass] == highestProb:\n highestCombinationList.append(rule[0])\n print(\"Number of equaly high for \" + str(node.name) + \" is: \" + str(len(highestCombinationList)))\n # naslednji korak je izbira najmanj divergiranega od teh..\n # childrenNVresults = {child1:{\"val1\":#1, \"val2\":#2, CONFIDENCE:#3}, .., childN:{...}}\n maxWeight = -1.0\n for hcomb in highestCombinationList:\n weight = None\n for kx in hcomb.keys():\n actualValue = hcomb[kx] # actualValue is like \"low\"\n kxDict = childrenNVresults[kx]\n if weight == None: # first call\n weight = kxDict[actualValue]\n else:\n weight = weight * kxDict[actualValue]\n if weight > maxWeight:\n maxWeight = weight\n # now we have the maximum weight of all the combinations\n # another for-clause to pick all combinations (maybe more than one) with such a weight\n for hcomb in highestCombinationList:\n weight = None\n for kx in hcomb.keys():\n actualValue = hcomb[kx] # actualValue is like \"low\"\n kxDict = childrenNVresults[kx]\n if weight == None: # first call\n weight = kxDict[actualValue]\n else:\n weight = weight * kxDict[actualValue]\n if weight == maxWeight:\n print(\"Of equal, this one:\" + str(hcomb) + \" has the highest weight of \" + str(weight) + \".\")\n for key in hcomb.keys():\n newVariant[key.name] = hcomb[key]\n print(\"calling children revision on \" + str(key.name) + \" with newvariant:\"+str(newVariant))\n revision(modelRoot, key, newVariant, changesList, cN, trust=0.1, dFile=None)\n # <--end step 2--> \n\n\ndef revisionFromFileBatch(modelRoot, newDataFile, cN=0.5, trust=0.1, quiet=False):\n # updates the old model (given with \"modelRoot\")\n # according to new data (given with \"newDataFile\" tabulated file) in a BATCH mode\n # Procedure changes old model !!! Only in RAM of course :)\n # As a result it prints all table functions of the new model\n import copy #because we need the deepcopy function\n import orange #because the new data are read as orange.ExampleTable (from file in Orange tab. format)\n # <-begin--save the original table functions->\n originalDict={}\n nodes = getNodes(modelRoot)\n for n in nodes:\n originalDict[n.name]=n.tableFunction\n safeOriginalCopy = copy.deepcopy(originalDict)\n # <-end--save the original table functions->\n if quiet==False: print(\"original functions:\"); print(safeOriginalCopy)\n # <-begin--make initial empty dict of differences->\n differences = {} #the initially empty (all zeros in distributions) table function dictionary\n differences = copy.deepcopy(originalDict)\n for k in differences.keys():\n tf = differences[k]\n for interList in tf:\n interDic = interList[-1:][0]\n for kk in interDic.keys():\n interDic[kk]=0.0\n # <-end--make initial empty dict of differences->\n if quiet==False: print(\"initial (empty) diferences:\"); print(differences)\n # --------tukaj pa naredi zdaj branje s pomocjo Orange.ExampleTable:\n data = orange.ExampleTable(newDataFile)\n atribs = getAtribs(modelRoot) # a more general revision would check also for Nodes..\n for i in range(len(data)):\n # <-reading->\n newVariant = {}\n for atrib in atribs:\n newVariant[atrib] = {}\n for val in atrib.values:\n if val == data[i][atrib.name].value:\n (newVariant[atrib])[val] = 1.0\n else:\n (newVariant[atrib])[val] = 0.0\n newVariant[modelRoot.name] = data[i].getclass().value\n print(\"--/\\--\")\n print(\"Calling revision with: \")\n print(newVariant)\n print(\"--/\\--\")\n # <-reading->\n revision(modelRoot, modelRoot, newVariant, [], cN)\n # -- now we have to put this into differences and reset the model\n # -- all but the CONFIDENCE get added and normalized\n #<-begin--add to differences - NORMALIZATION!>\n for k in originalDict.keys():\n tf = originalDict[k]\n difftf = differences[k]\n stf = safeOriginalCopy[k]\n for interList in tf: # spremembe naj se dodajo samo ce so bile dejansko narejene (preveris z == safeOriginalCopy)\n interDic = interList[-1:][0]\n diffinterList = difftf[tf.index(interList)]\n diffinterDic = diffinterList[-1:][0]\n stfinterList = stf[tf.index(interList)]\n stfinterDic = stfinterList[-1:][0]\n # -- dodano 28.11.2005\n changeMade = False\n for kk in interDic.keys():\n if kk != 'CONFIDENCE':\n if interDic[kk] != stfinterDic[kk]:\n changeMade = True\n # --\n if changeMade == True:\n sumProbabs = 0.0\n for kk in interDic.keys():\n if kk != 'CONFIDENCE':\n diffinterDic[kk] = diffinterDic[kk] + interDic[kk]\n sumProbabs = sumProbabs + diffinterDic[kk]\n else:\n if interDic[kk] != 0.6: #ATTENTION - datset specific!\n print(\"interDic kaze na spremembo CONF iz 0.6\")\n print\n diffinterDic[kk] = diffinterDic[kk] + 1 #for CONFIDENCE : just adding the number of applications\n for kk in diffinterDic.keys(): #normalization\n if kk != 'CONFIDENCE':\n diffinterDic[kk] = diffinterDic[kk] / float(sumProbabs)\n #<-end--add to differences>\n print(\"DIFFERENCES:\")\n print(differences)\n print\n## if quiet==False:\n## print \"----------------------------------------\"\n## print \"BEFORE reset model gives MAE of\", classifyRealDataFile(newDataFile, modelRoot)\n #<-begin--reset the table functions of the model>\n for k in originalDict.keys():\n tf = originalDict[k]\n savedtf = safeOriginalCopy[k]\n for interList in tf:\n interDic = interList[-1:][0]\n savedinterList = savedtf[tf.index(interList)]\n savedinterDic = savedinterList[-1:][0]\n for kk in interDic.keys():\n if kk != 'CONFIDENCE': # changed CONFIDENCE stays the same\n interDic[kk]=savedinterDic[kk]\n else:\n interDic[kk]=savedinterDic[kk] # no, not even CONFIDENCE stays the same! \n #<-end--reset the table functions of the model>\n## if quiet==False:\n## print \"AFTER reset model gives MAE of\", classifyRealDataFile(newDataFile, modelRoot)\n## print \"----------------------------------------\"\n # \\/\\/\\//\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\\\/\\/\\/\\/\\/\\/\\/\\/\\\\/\\/\\/\\/\\/\\/\\\\/\\\\\\/\\/\\/\\\\/\\//\\/ \n # -- now we have to add the differences to the model\n #<-begin--add differences to the model - NORMALIZATION>\n for k in originalDict.keys():\n tf = originalDict[k]\n difftf = differences[k]\n for interList in tf:\n interDic = interList[-1:][0]\n diffinterList = difftf[tf.index(interList)]\n diffinterDic = diffinterList[-1:][0]\n sumProbabs = 0.0\n numberOfRevisions = 0\n for kk in interDic.keys():\n if kk != 'CONFIDENCE':\n interDic[kk] = interDic[kk] + diffinterDic[kk]\n sumProbabs = sumProbabs + interDic[kk]\n else:\n numberOfRevisions = int(numberOfRevisions + diffinterDic[kk])\n for kk in interDic.keys(): #normalization\n if kk != 'CONFIDENCE':\n interDic[kk] = interDic[kk] / float(sumProbabs)\n else: #CONFIDENCE change\n for n in range(numberOfRevisions):\n interDic[kk] = (interDic[kk] * (1-cN) + cN * (1 - interDic[kk])) / (interDic[kk] * (1-cN) + cN * (1 - interDic[kk]) + (1 - interDic[kk]) * (1-cN) )\n #<-end--add differences to the model>\n print(\"New table functions are:\")\n print(modelRoot.name, \" :\")\n for t in range(len(modelRoot.tableFunction)):\n childrenCombinationDict = modelRoot.tableFunction[t][0]\n probDistDict = modelRoot.tableFunction[t][1]\n outString = \"\" \n for k in childrenCombinationDict.keys():\n outString = outString + \" \" + childrenCombinationDict[k] + \" \"\n ##print outString + \"\\t\\t\" + str(probDistDict)\n print('%-20s ==> %s' % (outString, str(probDistDict)))\n print\n for i in range(len(modelRoot.children)): \n print((modelRoot.children[i]).name)\n for j in range(len((modelRoot.children[i]).tableFunction)):\n childrenCombinationDict = (modelRoot.children[i]).tableFunction[j][0]\n probDistDict = (modelRoot.children[i]).tableFunction[j][1]\n outString = \"\" \n for k in childrenCombinationDict.keys():\n outString = outString + \" \" + childrenCombinationDict[k] + \" \"\n ##print outString + \"\\t\\t\" + str(probDistDict)\n print('%-20s ==> %s' % (outString, str(probDistDict)))\n print\n print(\"UPDATE FINISHED.\")\n## print \"And new MAE is: \", classifyRealDataFile(newDataFile, modelRoot)\n\n\ndef revisionFromFile(modelRoot, newDataFile, cN=0.5, trust=0.1, quiet=False):\n # updates the old model (given with \"modelRoot\")\n # according to new data (given with \"newDataFile\" tabulated file) in SEQUENTIAL mode\n # Procedure changes old model !!! Only in RAM of course :)\n # As a result it prints all table functions of the new model\n import orange #because the new data are read as orange.ExampleTable (from file in Orange tab. format)\n data = orange.ExampleTable(newDataFile)\n atribs = getAtribs(modelRoot) # a more general revision would check also for Nodes..\n for i in range(len(data)):\n # <-reading->\n newVariant = {}\n for atrib in atribs:\n newVariant[atrib] = {}\n for val in atrib.values:\n if val == data[i][atrib.name].value:\n (newVariant[atrib])[val] = 1.0\n else:\n (newVariant[atrib])[val] = 0.0\n newVariant[modelRoot.name] = data[i].getclass().value\n # <-reading->\n revision(modelRoot, modelRoot, newVariant, [], cN)\n print(\"New table functions are:\")\n print(modelRoot.name, \" :\")\n for t in range(len(modelRoot.tableFunction)):\n print(modelRoot.tableFunction[t])\n print\n for i in range(len(modelRoot.children)):\n print((modelRoot.children[i]).name)\n for j in range(len((modelRoot.children[i]).tableFunction)):\n print((modelRoot.children[i]).tableFunction[j])\n print\n print(\"SEQUENTIAL FILE REVISION FINISHED.\")\n\n","sub_path":"pm/models/lib/proDEX.py","file_name":"proDEX.py","file_ext":"py","file_size_in_byte":40867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"265379892","text":"class Beast:\n dinner_menu = {\"chicken\": 20, \"broccoli\": 10, \"carrots\": 8, \"kale\": 10}\n\n def __init__(self, customers = {}, menu = {} , cust_order = [], funds = 0, total_price = 0, served = False):\n self.customers = customers\n self.menu = menu\n self.cust_order = cust_order\n self.total_price = total_price\n self.served = served\n self.funds = funds\n \n #Welcome Method\n #This method will take in customer information and print the menu afterwards.\n def welcome_cust(self):\n wel_cust = {}\n wel_cust[\"name\"] = input(\"May I have your name, please?\")\n wel_cust[\"email\"] = input(\"Your email?\")\n wel_cust[\"phone\"] = input(\"Lastly your phone number.\")\n self.add_customer(wel_cust[\"name\"], wel_cust[\"email\"], wel_cust[\"phone\"])\n self.print_menu()\n self.add_funds()\n self.add_to_cust_order()\n\n #Add funds to a Beast prepaid card\n def add_funds(self):\n fund_cust = input(\"Would you like to add funds to your Beast prepaid card?\")\n if fund_cust == \"yes\" or fund_cust == \"Yes\":\n x = input(\"How much would you like to add?\")\n self.funds += float(x)\n print(\"You now have $\",self.funds, \" on your Beast prepaid card.\")\n else:\n print(\"Cash or debit is fine then.\")\n \n #This method will run with the welcoming of every new customer\n def add_customer(self, name, email, phone):\n customer = {\"name\": name, \"email\": email, \"phone\": phone}\n self.customers[name] = customer\n print(self.customers)\n \n #Menu Methods\n def print_menu(self):\n print(\"Here's our menu:\")\n print(Beast.dinner_menu)\n\n #This method can also be used to update the price of existing items.\n def add_items_to_dinner_menu(self, item, price):\n self.dinner_menu[item] = price\n\n def remove_items_from_dinner_menu(self, item):\n del self.dinner_menu[item]\n\n #Customer Order and Total Price\n def add_to_cust_order(self):\n cust_item = input(\"Please choose an item on the menu to add to your order by entering that item.\")\n if cust_item == \"chicken\" or cust_item == \"Chicken\":\n self.cust_order.append(\"chicken\")\n self.total_price += 20\n print(\"You ordered\", self.cust_order,\"and your current total is $\",self.total_price,\".\")\n if cust_item == \"broccoli\" or cust_item == \"Broccoli\":\n self.cust_order.append(\"broccoli\")\n self.total_price += 10\n print(\"You ordered\", self.cust_order,\"and your current total is $\",self.total_price,\".\")\n if cust_item == \"carrots\" or cust_item == \"Carrots\":\n self.cust_order.append(\"carrots\")\n self.total_price += 8\n print(\"You ordered\", self.cust_order,\"and your current total is $\",self.total_price,\".\")\n if cust_item == \"kale\" or cust_item == \"Kale\":\n self.cust_order.append(\"kale\")\n self.total_price += 10\n print(\"You ordered\", self.cust_order,\"and your current total is $\",self.total_price,\".\")\n\n #Pricing methods\n def change_item_price(self, existing_item, price):\n self.dinner_menu[existing_item] = price\n\n def view_amount_owed(self):\n print(\"Your total amount owed is:\", self.total_price)\n\nset_1 = Beast()\n\nset_1.welcome_cust()\n#set_1.add_items_to_dinner_menu(\"cashews\", \"8\")\n#print(Beast.dinner_menu)\n#set_1.add_funds()\n#set_1.remove_items_from_dinner_menu(\"chicken\")\n#set_1.change_item_price(\"broccoli\", \"15\")\n#print(Beast.dinner_menu)\n#set_1.add_to_cust_order()\n#set_1.view_amount_owed()","sub_path":"resta_prog.py","file_name":"resta_prog.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"282740343","text":"from odoo import api, fields, models, SUPERUSER_ID, _\nfrom odoo.exceptions import UserError\n\n\nclass CostAllocationModuleactivemonthTable(models.Model):\n _name = 'costallocationmodule.activemonthtable'\n name = fields.Char(string=\"Acitve Month/Year\", compute='_onchange_Year', readonly=True)\n month_name = fields.Selection(\n [('January', 'January'), ('February', 'February'), ('March', 'March'), ('April', 'April'), ('May', 'May'),\n ('June', 'June'), ('July', 'July'), ('August', 'August'), ('September', 'September'), ('October', 'October'),\n ('November', 'November'), ('December', 'December')], string='Month Name', required=True)\n Year = fields.Selection([('2019', '2019'), ('2020', '2020')], string='Year', required=True)\n month_status = fields.Selection([('active', 'Active'), ('deactive', 'Deactive')], string='Status', required=True)\n\n @api.onchange('Year')\n def _onchange_Year(self):\n if self.Year:\n self.name = str(self.month_name) + \"-\" + str(self.Year)\n\n","sub_path":"models/CostAllocationmodule_activemonthtable.py","file_name":"CostAllocationmodule_activemonthtable.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"606778536","text":"import os\nimport numpy as np\nfrom keras.models import *\nfrom keras.layers import Input, merge, Conv2D, MaxPooling2D, UpSampling2D, Dropout, Cropping2D, Dense, Flatten, Reshape, Lambda, Multiply, Concatenate, Conv2DTranspose\nfrom keras.optimizers import *\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, CSVLogger, TensorBoard, EarlyStopping\nfrom keras import backend as K\nfrom keras.utils import plot_model\nfrom support_defs import *\nfrom MyImageDataGenerator import *\nfrom keras.utils.training_utils import multi_gpu_model\nfrom CustomModelCheckpoint import *\nfrom IntermediateVisualizations_Callback import *\nfrom vgg16_local import *\nfrom KFoldCycleCallback import *\nfrom keras.metrics import mean_squared_error\nfrom keras.losses import binary_crossentropy\nfrom ssim_metric import *\nfrom msssim_metric import *\n# from get_model_memory_usage import *\nimport time, gc\nimport getopt\nimport pdb\n\n\nlog_device_placement = False\n\n\n#MKPC\ntries_each_IntReprVec = 4\n\nimages_size = (256,256)\n# original_dim = images_size[0] * images_size[0]\nmaskfname = './dataset_ge40North/mask_256.npy'\nsrcdatafile = './dataset_ge40North/PV_hgt_data_DJF_256_normed.npy'\n# internal_representations = [2,3,8,16,32,64,128]\n\n\nclass mkCVAE(object):\n def __init__(self, img_size, internal_representation = 128, start_num = 0, varname = 'HGT', srcfname = ''):\n\n self.start_num = start_num\n # self.gpu_num = gpu_num\n\n self.debug = True\n\n # MKPC\n self.GPUs_count = 1\n self.epochs = 150\n # self.current_batch_size = 2\n self.current_batch_size = 16 * self.GPUs_count\n\n self.epochs_to_cycle = 10\n self.image_size = img_size\n self.internal_representation = internal_representation\n self.varname = varname\n self.fnames_prefix = 'SpCVAE_4pv_clustering_%s_hiddim%04d_startnum%02d' % (self.varname, self.internal_representation, start_num)\n\n self.mask = np.load(maskfname)\n self.mask = np.expand_dims(self.mask, -1)\n if srcfname == '':\n raise Exception('you need to specify source data file! stopping.')\n elif not os.path.isfile(srcfname):\n raise FileNotFoundError('unable to find source data file:\\n%s' % srcfname)\n else:\n self.srcfname = srcfname\n\n\n def ComposeModel_singleGPU(self, metrics = []):\n img_input = Input(shape=tuple(list(self.image_size) + [1]), name='Encoder_Input_vgg16')\n Gray2RGB = Concatenate(axis=-1, name='Encoder_Gray2RGB_vgg16')([img_input, img_input, img_input])\n conv_base2 = VGG16_local(weights='imagenet', include_top=False,\n input_shape=(self.image_size[0], self.image_size[1], 3),\n weights_path='./vgg16/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',\n layernames_postfix='_vgg16')\n conv_base2.trainable = False\n vgg_model = conv_base2(Gray2RGB)\n\n Encoder_Flatten = Flatten()(vgg_model)\n Flatten_length = Encoder_Flatten._keras_shape[1]\n Encoder_Dropout03 = Dropout(0.2, name='Encoder_Dropout03')(Encoder_Flatten)\n Encoder_Dense_01 = Dense(4096, activation='relu', name='Encoder_Dense_01')(Encoder_Dropout03)\n Encoder_Dropout04 = Dropout(0.2, name='Encoder_Dropout04')(Encoder_Dense_01)\n Encoder_Dense_02 = Dense(512, activation='relu', name='Encoder_Dense_02')(Encoder_Dropout04)\n\n z_mean = Dense(self.internal_representation, activation='sigmoid', name='z_mean')(Encoder_Dense_02)\n z_log_var = Dense(self.internal_representation, activation='relu', name='z_log_var')(Encoder_Dense_02)\n\n z_combined = Concatenate(name='z_combined')([z_mean, z_log_var])\n\n z = Lambda(sampling, output_shape=(self.internal_representation,), name='z')([z_mean, z_log_var])\n\n encoder = Model(img_input, [z_mean, z_log_var, z, z_combined])\n for l in encoder.layers:\n if 'vgg16' in l.name:\n l.trainable = False\n else:\n l.trainable = True\n\n plot_model(encoder, to_file='./output/' + self.fnames_prefix + '_Encoder_model_structure.png', show_shapes=True)\n with open('./output/' + self.fnames_prefix + '_Encoder_model_structure.txt', 'w') as fh:\n encoder.summary(print_fn=lambda x: fh.write(x + '\\n'))\n\n # Decoder_Zsampling_input = Input(shape=(self.internal_representation,), name='Decoder_Zsampling_input')\n Decoder_mask_input = Input(shape=tuple(list(self.image_size) + [1]), name='Decoder_mask_input')\n\n Decoder_Dense_01 = Dense(512, activation='relu', name='Decoder_Dense_01')(z)\n Decoder_drop_01 = Dropout(0.2, name='Decoder_drop_01')(Decoder_Dense_01)\n Decoder_Dense_02 = Dense(4096, activation='relu', name='Decoder_Dense_02')(Decoder_drop_01)\n Decoder_Dropout02 = Dropout(0.2, name='Decoder_Dropout02')(Decoder_Dense_02)\n Decoder_Dense_03 = Dense(Flatten_length, activation='relu', name='Decoder_Dense_03')(Decoder_Dropout02)\n k_size = np.int32(np.sqrt(Flatten_length / 512))\n Decoder_Reshape_01 = Reshape((k_size, k_size, 512), name='Decoder_Reshape_01')(Decoder_Dense_03)\n\n Decoder_UpSampling2D_01 = UpSampling2D(size=(2, 2), name='Decoder_UpSampling2D_01')(Decoder_Reshape_01)\n Decoder_Conv2DTranspose_0101 = Conv2D(512, (3, 3), strides=1, activation='relu', padding='same', kernel_initializer='he_normal', name='Decoder_Conv2DTranspose_0101')(Decoder_UpSampling2D_01)\n Decoder_Conv2DTranspose_0102 = Conv2D(512, (3, 3), strides=1, activation='relu', padding='same', kernel_initializer='he_normal', name='Decoder_Conv2DTranspose_0102')(Decoder_Conv2DTranspose_0101)\n Decoder_UpSampling2D_02 = UpSampling2D(size=(2, 2), name='Decoder_UpSampling2D_02')(Decoder_Conv2DTranspose_0102)\n Decoder_Conv2DTranspose_0201 = Conv2D(256, (3, 3), strides=1, activation='relu', padding='same', kernel_initializer='he_normal', name='Decoder_Conv2DTranspose_0201')(Decoder_UpSampling2D_02)\n Decoder_Conv2DTranspose_0202 = Conv2D(256, (3, 3), strides=1, activation='relu', padding='same', kernel_initializer='he_normal', name='Decoder_Conv2DTranspose_02')(Decoder_Conv2DTranspose_0201)\n Decoder_UpSampling2D_03 = UpSampling2D(size=(2, 2), name='Decoder_UpSampling2D_03')(Decoder_Conv2DTranspose_0202)\n Decoder_Conv2DTranspose_0301 = Conv2D(128, (3, 3), strides=1, activation='relu', padding='same', kernel_initializer='he_normal', name='Decoder_Conv2DTranspose_0301')(Decoder_UpSampling2D_03)\n Decoder_Conv2DTranspose_0302 = Conv2D(128, (3, 3), strides=1, activation='relu', padding='same', kernel_initializer='he_normal', name='Decoder_Conv2DTranspose_0302')(Decoder_Conv2DTranspose_0301)\n Decoder_UpSampling2D_04 = UpSampling2D(size=(2, 2), name='Decoder_UpSampling2D_04')(Decoder_Conv2DTranspose_0302)\n Decoder_Conv2DTranspose_0401 = Conv2D(64, (3, 3), strides=1, activation='relu', padding='same', kernel_initializer='he_normal', name='Decoder_Conv2DTranspose_0401')(Decoder_UpSampling2D_04)\n Decoder_Conv2DTranspose_0402 = Conv2D(64, (3, 3), strides=1, activation='relu', padding='same', kernel_initializer='he_normal', name='Decoder_Conv2DTranspose_0402')(Decoder_Conv2DTranspose_0401)\n Decoder_UpSampling2D_05 = UpSampling2D(size=(2, 2), name='Decoder_UpSampling2D_05')(Decoder_Conv2DTranspose_0402)\n Decoder_Conv2DTranspose_0501 = Conv2D(32, (3, 3), strides=1, activation='relu', padding='same', kernel_initializer='he_normal', name='Decoder_Conv2DTranspose_0501')(Decoder_UpSampling2D_05)\n Decoder_Conv2DTranspose_0502 = Conv2D(32, (3, 3), strides=1, activation='relu', padding='same', kernel_initializer='he_normal', name='Decoder_Conv2DTranspose_0502')(Decoder_Conv2DTranspose_0501)\n Decoder_Conv2DTranspose_06_out = Conv2D(1, (3, 3), strides=1, activation='sigmoid', padding='same', name='Decoder_Conv2DTranspose_06_out')(Decoder_Conv2DTranspose_0502)\n DecoderMaskedOutput = Multiply(name='DecoderMaskedOutput')([Decoder_Conv2DTranspose_06_out, Decoder_mask_input])\n\n # SqueezedOutput = Lambda(lambda x: K.squeeze(x, -1), name='SqueezedOutput')(Decoder_Conv2D_07_out)\n # DecoderMaskedOutput = Multiply(name='DecoderMaskedOutput')([Decoder_Conv2DTranspose_06_out, Decoder_mask_input])\n\n # decoder = Model([Decoder_Zsampling_input, Decoder_mask_input], DecoderMaskedOutput, name='Decoder')\n\n\n # plot_model(decoder, to_file='./output/' + self.fnames_prefix + '_Decoder_model_structure.png', show_shapes=True)\n # with open('./output/' + self.fnames_prefix + '_Decoder_model_structure.txt', 'w') as fh:\n # decoder.summary(print_fn=lambda x: fh.write(x + '\\n'))\n\n # model_output = decoder([z, Decoder_mask_input])\n # model = Model([img_input, Decoder_mask_input], [model_output, z_combined], name='pv_CVAE')\n model = Model([img_input, Decoder_mask_input], [DecoderMaskedOutput, z_combined], name='pv_CVAE_VGG')\n for l in model.layers:\n if 'vgg16' in l.name:\n l.trainable = False\n else:\n l.trainable = True\n\n plot_model(model, to_file='./output/' + self.fnames_prefix + '_model_structure.png', show_shapes=True)\n with open('./output/' + self.fnames_prefix + '_model_structure.txt', 'w') as fh:\n model.summary(print_fn=lambda x: fh.write(x + '\\n'))\n\n self.template_model = model\n self.encoder = encoder\n # self.decoder = decoder\n\n model.compile(optimizer=Adam(lr=1e-4),\n loss= {'DecoderMaskedOutput': 'binary_crossentropy',\n 'z_combined': SpVAE_loss(rho=0.1,\n beta=0.1,\n debug=True,\n logfile='./logs/' + self.fnames_prefix + '_SpVAE_loss_debug.log')},\n loss_weights={'DecoderMaskedOutput': self.image_size[0] * self.image_size[1],\n 'z_combined': 1.0},\n metrics=metrics)\n return model\n\n\n\n\n def get_cae(self, metrics = []):\n if self.GPUs_count <= 1:\n return self.ComposeModel_singleGPU(metrics=metrics)\n\n\n\n def train(self):\n EnsureDirectoryExists('./output/')\n EnsureDirectoryExists('./logs/')\n\n\n\n config = tf.ConfigProto(allow_soft_placement=True, log_device_placement = log_device_placement, device_count={'CPU': 1, 'GPU': self.GPUs_count})\n config.gpu_options.allow_growth = True\n session = tf.Session(config=config)\n K.set_session(session)\n\n\n metrics = [SSIMMetric(), custom_MSE, MSSSIMMetric(average=False, debug=True, logfile='./logs/' + self.fnames_prefix + '_MSSSIMMetric_debug.log')]\n\n model = self.get_cae(metrics)\n print(\"got CVAE\")\n\n print(\"loading data\")\n datagen = MyImageDataGenerator(srcdatafile=self.srcfname,\n val_folds=5,\n test_split_ratio=0.2,\n image_size=self.image_size,\n maskfile=maskfname,\n model_internal_representation_length = self.internal_representation)\n\n train_generator = datagen.flow(None, batch_size=self.current_batch_size, category='train')\n val_generator = datagen.flow(None, batch_size=self.current_batch_size, category='val')\n\n\n filepathSSIMMetric = './logs/' + self.fnames_prefix + '_epoch{epoch:04d}-MSSSIM_{monitorvalue:.6f}.hdf5'\n checkpointingSSIMMetric = CustomModelCheckpoint(self.template_model, filepathSSIMMetric, monitor='val_DecoderMaskedOutput_MSSSIMMetric', verbose=1, save_best_only=True, mode='min')\n filepathLossMetric = './logs/' + self.fnames_prefix + '_epoch{epoch:04d}-LOSS_{monitorvalue:.6f}.hdf5'\n checkpointingLoss = CustomModelCheckpoint(self.template_model, filepathLossMetric, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n cycle_train_val_sets = KFoldCycleCallback(datagen, epochs_to_cycle = self.epochs_to_cycle)\n csv_logger = CSVLogger('./logs/' + self.fnames_prefix + 'train_progress.csv', separator=';', append=True, )\n tb_callback = TensorBoard(log_dir='./logs/TBoard/' + self.fnames_prefix + '/', write_graph=True)\n lrCallback = LearningRateScheduler(step_decay, verbose=1)\n EarlyStoppingCallback = EarlyStopping(monitor='val_DecoderMaskedOutput_MSSSIMMetric', min_delta=0., patience=100, verbose=1, mode='max')\n InterVis = IntermediateVisualizations(png_filepath_template = './logs/' + self.fnames_prefix + '_encodings_epoch{epoch:04d}.png',\n decoded_filepath_template ='./logs/' + self.fnames_prefix + '_decoded_epoch{epoch:04d}.npy',\n testdata_filename_template ='./logs/' + self.fnames_prefix + '_testdata.npy',\n models_to_eval = (self.encoder, self.template_model),\n mask = self.mask,\n testdata = np.expand_dims(datagen.srcdata[datagen.test_indices], -1),\n varname = self.varname,\n period = 1,\n batch_size = self.current_batch_size)\n\n print('Fitting model...')\n\n start = time.time()\n\n\n history = model.fit_generator(train_generator,\n steps_per_epoch = datagen.dataset_length_batches(category='train', batch_size = self.current_batch_size),\n epochs = self.epochs,\n validation_data = val_generator,\n validation_steps = datagen.dataset_length_batches(category='val', batch_size=self.current_batch_size),\n callbacks=[checkpointingSSIMMetric, checkpointingLoss, csv_logger, tb_callback, cycle_train_val_sets, lrCallback, EarlyStoppingCallback, InterVis])\n\n\n end = time.time()\n\n with open('./output/' + self.fnames_prefix + 'model.json', \"w\") as json_file:\n json_file.write(self.template_model.to_json())\n self.template_model.save('./output/'+ self.fnames_prefix +'weights.h5')\n\n print('loading test data')\n test_generator = datagen.flow(None, batch_size=self.current_batch_size, category='test')\n\n print('evaluating final model...')\n final_eval_result = model.evaluate_generator(test_generator, steps=datagen.dataset_length_batches(category='test', batch_size=self.current_batch_size))\n print(final_eval_result)\n\n K.clear_session()\n gc.collect()\n\n print(\"training time for %d epochs: %.2fs = %.2fHrs\" % (len(list(history.history.values())[0]), (end - start), (end - start) / 3600.0))\n with open('./logs/' + self.fnames_prefix + 'train_summary.txt', 'w') as f:\n f.writelines(\"training time for %d epochs: %.2fs = %.2fHrs\" % (len(list(history.history.values())[0]), (end - start), (end - start) / 3600.0))\n f.writelines('\\n\\nevaluating final model:')\n f.writelines(str(final_eval_result))\n\n\n\n\n\n\n\nif __name__ == '__main__':\n input_args = sys.argv[1:]\n opts, args = getopt.getopt(input_args, \"\", [\"hiddim=\", \"startidx=\", \"varname=\", \"srcfname=\"])\n\n if ('--hiddim' in [opt[0] for opt in opts]):\n internal_representation = int([opt for opt in opts if opt[0] == '--hiddim'][0][1])\n else:\n internal_representation = 128\n\n if ('--startidx' in [opt[0] for opt in opts]):\n start_idx = int([opt for opt in opts if opt[0] == '--startidx'][0][1])\n else:\n start_idx = 0\n\n if ('--varname' in [opt[0] for opt in opts]):\n varname = str([opt for opt in opts if opt[0] == '--varname'][0][1])\n else:\n varname = 'HGT'\n\n\n if ('--srcfname' in [opt[0] for opt in opts]):\n srcfname = str([opt for opt in opts if opt[0] == '--srcfname'][0][1])\n else:\n # srcfname = srcdatafile\n srcfname = ''\n\n\n for tryIdx in range(start_idx, tries_each_IntReprVec):\n myCVAE = mkCVAE(img_size = images_size,\n internal_representation = internal_representation,\n start_num = tryIdx,\n varname = varname,\n srcfname=srcfname)\n try:\n myCVAE.train()\n except Exception as ex1:\n err_fname = './logs/' + myCVAE.fnames_prefix + 'errors.log'\n exc_type, exc_value, exc_traceback = sys.exc_info()\n with open(err_fname, 'a') as errf:\n traceback.print_tb(exc_traceback, limit=None, file=errf)\n traceback.print_exception(exc_type, exc_value, exc_traceback, limit=None, file=errf)\n print(str(ex1))\n del myCVAE\n K.clear_session()\n gc.collect()\n\n print('\\n\\nFINISHED')","sub_path":"cvae_for_pv_clustering_VGG_sparse.py","file_name":"cvae_for_pv_clustering_VGG_sparse.py","file_ext":"py","file_size_in_byte":17509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"308729106","text":"\nfrom mesh.standard import Resource\nfrom scheme.fields import *\n\n\n__all__ = ('Permission',)\n\n\nclass Permission(Resource):\n \"\"\"Permission resource\"\"\"\n name = 'Permission'\n requests = 'create delete get query update'\n version = 1\n \n class schema:\n id = UUID(nonempty=True)\n name = Text(nonempty=True)\n resourceregistry = Structure({\n 'bundle': Text(nonempty=True),\n 'resource': Text(nonempty=True),\n 'request': Text(nonempty=True),\n }, nonempty=True, onupdate=False)\n subject = UUID(onupdate=False)\n status = Enumeration('active inactive', nonnull=True, default='active')\n","sub_path":"mesh/yabl/tap/resources/permission.py","file_name":"permission.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"24307540","text":"import re\n\n\ndef is_robot(user_agent):\n if user_agent:\n user_agent = unicode(user_agent).lower()\n\n # We mark something as a bot if it contains any of the $bot_indicators\n # or if it does not contain one of the $browser_indicators. In addition,\n # if the user-agent string contains \"mozilla\" we make sure it has version\n # information. Finally anything that starts with a word in the $whitelist\n # is never considered a bot.\n\n whitelist = ('w3m', 'dillo', 'links', 'elinks', 'lynx')\n for agent in whitelist:\n if agent in user_agent:\n return False\n\n bot_indicators = ('bot', 'spider', 'search', 'jeeves', 'crawl', 'seek',\n 'heritrix', 'slurp', 'thumbnails', 'capture', 'ferret',\n 'webinator', 'scan', 'retriever', 'accelerator',\n 'upload', 'digg', 'extractor', 'grub', 'scrub')\n for agent in bot_indicators:\n if agent in user_agent:\n return True\n\n browser_indicators = ('mozilla', 'browser', 'iphone', 'lynx', 'mobile',\n 'opera', 'icab')\n has_browser_indicator = False\n for agent in browser_indicators:\n if agent in user_agent:\n has_browser_indicator = True\n break\n\n if not has_browser_indicator:\n return True\n\n # Check for mozilla version information\n if 'mozilla' in user_agent:\n if '(' not in user_agent:\n return True\n if not re.search(r'mozilla/\\d+', user_agent):\n return True\n\n return False\n","sub_path":"km/helpers/is_robot.py","file_name":"is_robot.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"130440577","text":"# -*- coding: utf-8 -*-\nfrom odoo import http\nimport io\nimport os\nimport re\nfrom docx import Document\nfrom openpyxl import load_workbook\nfrom odoo.modules.module import get_module_path\n\nfrom ..models.utils import docx_replace_regex, xlsx_replace_regex, docx_replace_image\n\n\nparallel_diagram = {\n '缸体对柱塞作用力变化曲线': 'piston_check_diagram1',\n '柱塞副比压变化曲线': 'piston_check_diagram2',\n '柱塞相对缸体移动速度变化曲线': 'piston_check_diagram3',\n '柱塞副PV值变化曲线': 'piston_check_diagram4',\n '柱塞副泄漏量变化曲线': 'piston_check_diagram5',\n '柱塞弯矩最大处应力变化曲线': 'piston_check_diagram6',\n '柱塞颈部弯矩最大处应力变化曲线': 'piston_check_diagram7',\n '柱塞对滑靴压紧力变化曲线(阻尼管型)': 'plunger_design_diagram1',\n '柱塞对滑靴压紧力变化曲线(剩余压紧力型)': 'plunger_design_diagram2',\n '滑靴副比压变化曲线': 'plunger_check_diagram1',\n '滑靴滑动速度变化曲线': 'plunger_check_diagram2',\n '滑靴副PV值变化曲线': 'plunger_check_diagram3',\n '单个柱塞所需弹簧力': 'spring_design_diagram1',\n}\n\n\ntilting_diagram = {\n '斜盘倾角为φ时,任意α角时柱塞位移S变化曲线': 'kinematics_calculation_diagram1',\n '斜盘倾角为φ时,柱塞沿其轴线相对转子的运动速度v变化曲线': 'kinematics_calculation_diagram2',\n '斜盘倾角为φ时,柱塞沿其轴线相对转子运动的加速度j变化曲线': 'kinematics_calculation_diagram3',\n '柱塞孔油压作用力Fu1变化曲线': 'dynamics_calculation_diagram1',\n '转子腔油压作用力Fu2变化曲线': 'dynamics_calculation_diagram2',\n '柱塞弹簧力Ft1变化曲线': 'dynamics_calculation_diagram3',\n '柱塞相对运动惯性力Fg变化曲线': 'dynamics_calculation_diagram4',\n '柱塞离心惯性力FL变化曲线': 'dynamics_calculation_diagram5',\n '哥式惯性力FK变化曲线': 'dynamics_calculation_diagram6',\n '柱塞与孔摩擦力Fu变化曲线': 'dynamics_calculation_diagram7',\n '单个柱塞对斜盘的总作用力Fs变化曲线': 'dynamics_calculation_diagram8',\n '单个柱塞对斜盘的作用力MA矩变化曲线': 'dynamics_calculation_diagram9',\n '全部柱塞对斜盘的总作用力矩∑MA1变化曲线': 'dynamics_calculation_diagram10',\n '柱塞支承长度L0变化曲线': 'piston_pressure_check_image1',\n '柱塞支承面单位压力Ps变化曲线': 'piston_pressure_check_image2'\n}\n\n\nclass PumpManager(http.Controller):\n @http.route('/web/binary/download_report', type='http', auth=\"public\")\n # @serialize_exception\n def download_report(self, model, id, context=None, **kw):\n # cr, uid, context = http.request.cr, http.request.uid, http.request.context\n Model = http.request.registry[model]\n context = eval(context)\n fp = io.BytesIO()\n\n record = http.request.env[model].search([('id', '=', id)])\n filename = str()\n\n category = str()\n if Model._name == 'parallel.plunger.pump':\n category = 'parallel_plunger'\n elif Model._name == 'tilting.plunger.pump':\n category = 'tilting_plunger'\n\n keys = sorted(context['keys'], key=lambda i: len(i), reverse=True)\n if context['type'] == 'excel':\n filename = '%s_%s.xlsx' % (context['stage'], id)\n path = os.path.join(get_module_path(Model._module),\n 'static/templates/%s/%s/%s.xlsx' % (category, context['stage'], context['stage']))\n xlsx = load_workbook(path)\n\n for key in keys:\n regex = re.compile(key)\n replace = record[key]\n xlsx_replace_regex(xlsx, regex, replace)\n xlsx.save(fp)\n elif context['type'] == 'word':\n if context.get('target'):\n filename = '%s_%s_honglin.docx' % (context['stage'], id)\n path = os.path.join(get_module_path(Model._module),\n 'static/templates/%s/%s/%s_honglin.docx' % (category, context['stage'], context['stage']))\n else:\n filename = '%s_%s.docx' % (context['stage'], id)\n path = os.path.join(get_module_path(Model._module),\n 'static/templates/%s/%s/%s.docx' % (category, context['stage'], context['stage']))\n\n doc = Document(path)\n for key in keys:\n regex = re.compile(key)\n replace = record[key]\n docx_replace_regex(doc, regex, replace)\n\n if category == 'parallel_plunger':\n diagram_list = parallel_diagram\n else:\n diagram_list = tilting_diagram\n\n for key in diagram_list:\n regex = re.compile(key)\n replace = record[diagram_list[key]]\n docx_replace_image(doc, regex, replace)\n # docx_replace_image(doc, re.compile('初步计算轴径'), record.piston_check_diagram2)\n doc.save(fp)\n\n filecontent = fp.getvalue()\n if not filecontent:\n return http.request.not_found()\n else:\n if not filename:\n filename = '%s_%s' % (model.replace('.', '_'), id)\n return http.request.make_response(filecontent,\n [('Content-Type', 'application/octet-stream'),\n ('Content-Disposition', http.content_disposition(filename))])","sub_path":"controllers/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":5587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"444216983","text":"import sys\n# Given: A positive integer n (3≤n≤10000).\n\n# Return: The number of internal nodes of any unrooted binary tree having n leaves.\nfile_path = sys.argv[1]\n\nwith open(file_path,'r') as file_object:\n n = int(file_object.read())\n\nprint(n-2) # derived from my bioinformatics lecture","sub_path":"Bioinformatics_Stronghold/Counting_Phylogenetic_Ancestors.py","file_name":"Counting_Phylogenetic_Ancestors.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"242953215","text":"import numpy as np\r\n\r\n# Forward Propagation code\r\ninput_data = np.array([1, 1])\r\nweights = {\r\n 'node_00': np.array([2, 4]),\r\n 'node_01': np.array([4, -5]),\r\n 'node_10': np.array([0, 1]),\r\n 'node_11': np.array([1, 1]),\r\n 'output': np.array([5, 1])\r\n}\r\n\r\ndef iden(value):\r\n return max(-1, value)\r\n\r\n# first hidden layer\r\nnode_00_input = (input_data * weights[\"node_00\"]).sum()\r\nnode_00_output = iden(node_00_input)\r\n\r\n\r\nnode_01_input = (input_data * weights['node_01']).sum()\r\nnode_01_output = iden(node_01_input)\r\n\r\n# second hidden layer\r\nnode_layer_1 = np.array([node_00_output, node_01_output])\r\n\r\nnode_10_input = (node_layer_1 * weights[\"node_10\"]).sum()\r\nnode_10_output = iden(node_10_input)\r\n\r\n\r\nnode_11_input = (node_layer_1 * weights['node_11']).sum()\r\nnode_11_output = iden(node_11_input)\r\n\r\n#output\r\nnode_layer_2 = np.array([node_10_output, node_11_output])\r\noutput = (node_layer_2 * weights['output']).sum()\r\nprint(output)","sub_path":"datacamp-deep_learning_introduction/neural network.py","file_name":"neural network.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"209039702","text":"import os\nimport psycopg2\nfrom flask import Flask, render_template, g, redirect, url_for, session, request, jsonify\nfrom flask_oauthlib.client import OAuth\n\nfrom flask_debugtoolbar import DebugToolbarExtension\nimport logging\nimport json\nimport random\n\nfrom werkzeug.utils import secure_filename\n\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\n\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = os.environ.get('SECRET_KEY', 'XYZ')\ntoolbar = DebugToolbarExtension(app)\noauth = OAuth(app)\n\n\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\nUPLOAD_FOLDER = os.path.join(APP_ROOT, 'static')\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\nclass usr:\n def __init__(self,usr_dict):\n self.firstName = usr_dict['firstName']\n self.lastName = usr_dict['lastName']\n\n def fetch_first_name(self):\n return self.firstName\n\ndef connect_db():\n return psycopg2.connect(os.environ.get('DATABASE_URL'))\n\n\n@app.before_request\ndef before_request():\n g.db_conn = connect_db()\n\n@app.context_processor\ndef override_url_for():\n return dict(url_for=dated_url_for)\n\ndef dated_url_for(endpoint, **values):\n if endpoint == 'static':\n filename = values.get('filename', None)\n if filename:\n file_path = os.path.join(app.root_path,\n endpoint, filename)\n values['q'] = int(os.stat(file_path).st_mtime)\n return url_for(endpoint, **values)\n\n#@app.route('/usr')\ndef get_usr(first_name):\n cur = g.db_conn.cursor()\n cur.execute(\"SELECT * FROM usr where first_name = '\" + first_name + \"'\")\n return render_template('welcome.html', users=cur.fetchall())\n\n\ndef match(usr_tags, chg_skills):\n chg_skills = chg_skills.split(',')\n for tags in usr_tags:\n ts = tags[0].split(',')\n for t in ts:\n for s in chg_skills:\n if t.lower() == s.lower():\n return True\n return False\n\n\nlinkedin = oauth.remote_app(\n 'linkedin',\n consumer_key='77u05yrx7rjl2w',\n consumer_secret='wqJjQS5WuExBqdqv',\n request_token_params={\n 'scope': 'r_basicprofile',\n 'state': 'RandomString',\n },\n base_url='https://api.linkedin.com/v1/',\n request_token_url=None,\n access_token_method='POST',\n access_token_url='https://www.linkedin.com/uas/oauth2/accessToken',\n authorize_url='https://www.linkedin.com/uas/oauth2/authorization',\n)\n\n@app.context_processor\ndef override_url_for():\n return dict(url_for=dated_url_for)\n\ndef dated_url_for(endpoint, **values):\n if endpoint == 'static':\n filename = values.get('filename', None)\n if filename:\n file_path = os.path.join(app.root_path,\n endpoint, filename)\n values['q'] = int(os.stat(file_path).st_mtime)\n return url_for(endpoint, **values)\n\n@app.route('/')\ndef index():\n logging.warning(session)\n if 'linkedin_token' in session:\n me = linkedin.get('people/~')\n session['name'] = usr(dict(me.data)).fetch_first_name()\n return render_template('index.html', usr_first_name=session['name'])\n else:\n return redirect(url_for('login'))\n\n\n@app.route('/login')\ndef login():\n return linkedin.authorize(callback=url_for('authorized', _external=True))\n\n\n@app.route('/logout')\ndef logout():\n session.pop('linkedin_token', None)\n session.clear()\n logging.warning(session)\n return redirect(url_for('login'))\n\n\n@app.route('/login/authorized')\ndef authorized():\n resp = linkedin.authorized_response()\n if resp is None:\n return 'Access denied: reason=%s error=%s' % (\n request.args['error_reason'],\n request.args['error_description']\n )\n session['linkedin_token'] = (resp['access_token'], '')\n me = linkedin.get('people/~')\n session['name'] = usr(dict(me.data)).fetch_first_name()\n logging.warning(session['name'])\n return render_template('index.html', usr_first_name=session['name'])\n\n\n@linkedin.tokengetter\ndef get_linkedin_oauth_token():\n return session.get('linkedin_token')\n\n\ndef change_linkedin_query(uri, headers, body):\n auth = headers.pop('Authorization')\n headers['x-li-format'] = 'json'\n if auth:\n auth = auth.replace('Bearer', '').strip()\n if '?' in uri:\n uri += '&oauth2_access_token=' + auth\n else:\n uri += '?oauth2_access_token=' + auth\n return uri, headers, body\n\nlinkedin.pre_request = change_linkedin_query\n\n@app.route('/profile')\ndef profile():\n logging.warning(session)\n if 'linkedin_token' in session:\n me = linkedin.get('people/~')\n session['name'] = usr(dict(me.data)).fetch_first_name()\n cur = g.db_conn.cursor()\n prf_sql = \"SELECT experience.title, experience.company, experience.duration, experience.description, experience.tags FROM experience,usr WHERE experience.usr_id=usr.id AND usr.first_name= '\" + session['name'] + \"'\"\n cur.execute(prf_sql)\n experiences=cur.fetchall()\n exps = []\n for i in range(len(experiences)):\n temp = []\n for j in range(4):\n temp.append(experiences[i][j])\n temp.append(experiences[i][4].split(','))\n exps.append(temp)\n return render_template('profile.html', experiences=exps, usr_first_name=session['name'])\n return redirect(url_for('login'))\n\n\n@app.route('/challenges')\ndef challenges():\n logging.warning(session)\n if 'linkedin_token' in session:\n me = linkedin.get('people/~')\n session['name'] = usr(dict(me.data)).fetch_first_name()\n cur = g.db_conn.cursor()\n sql = \"SELECT project.title, project.skill, project.description, companyprojectrel.start_time, companyprojectrel.expire_time, company.url, companyprojectrel.time_limit FROM project,companyprojectrel,company where project.id=companyprojectrel.project_id AND companyprojectrel.company_id=company.id\"\n cur.execute(sql)\n projects = cur.fetchall()\n sql = \"SELECT experience.tags FROM experience,usr WHERE experience.usr_id=usr.id AND usr.first_name= '\" + session['name'] + \"'\"\n cur.execute(sql)\n usr_tags = cur.fetchall()\n valid_projects = []\n for proj in projects:\n if match(usr_tags, proj[1]):\n valid_projects.append(proj)\n logging.warning(valid_projects)\n return render_template('challenges.html', challenges=valid_projects, usr_first_name=session['name'])\n return redirect(url_for('login'))\n\n@app.route('/challenge', methods=['GET', 'POST'])\ndef challenge():\n logging.warning(session)\n if 'linkedin_token' in session:\n\n if request.method == 'POST':\n if 'file' not in request.files:\n return render_template(\"index.html\")\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n return render_template(\"index.html\")\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return render_template(\"index.html\")\n return render_template(\"index.html\")\n\n me = linkedin.get('people/~')\n session['name'] = usr(dict(me.data)).fetch_first_name()\n session['proj_id'] = 7\n cur = g.db_conn.cursor()\n sql = \"SELECT project.title, project.skill, project.description, companyprojectrel.start_time, companyprojectrel.expire_time, company.url, companyprojectrel.time_limit FROM project,companyprojectrel,company where project.id=companyprojectrel.project_id AND companyprojectrel.company_id=company.id AND project.id='\" + str(session['proj_id']) + \"'\"\n cur.execute(sql)\n projects = cur.fetchall()\n return render_template('challenge.html', challenge=projects[0], usr_first_name=session['name'])\n return redirect(url_for('login'))\n\n\n\n@app.route('/mychallenges')\ndef mychallenges():\n logging.warning(session)\n if 'linkedin_token' in session:\n me = linkedin.get('people/~')\n session['name'] = usr(dict(me.data)).fetch_first_name()\n cur = g.db_conn.cursor()\n\n sql = \"SELECT project.title, project.skill, project.description, companyprojectrel.start_time, companyprojectrel.expire_time, company.url, companyprojectrel.time_limit,usrprojectrel.score FROM project,companyprojectrel,company,usrprojectrel,usr where project.id=companyprojectrel.project_id AND companyprojectrel.company_id=company.id and usrprojectrel.usr_id =usr.id and usrprojectrel.project_id=project.id AND companyprojectrel.company_id=company.id AND usr.first_name='\" + str(session['name']) + \"'\"\n cur.execute(sql) \n projects = cur.fetchall()\n logging.warning(projects)\n return render_template('mychallenges.html', challenges=projects, usr_first_name=session['name'])\n return redirect(url_for('login'))\n\n####upload files######\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/upload', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n return redirect(request.url)\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n return redirect(url_for('upload_file'))\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return redirect(url_for('index'))\n return render_template(\"challenge.html\")\n\n\n@app.route('/dashboard',methods = ['GET'])\ndef dashboard():\n company_id = request.args.get('id')\n cur = g.db_conn.cursor()\n sql = \"SELECT company.name FROM company where company.id= '\" + company_id +\"'\"\n cur.execute(sql)\n name = cur.fetchall()\n sql = \"SELECT project.title, project.skill, project.description, companyprojectrel.start_time, companyprojectrel.expire_time, company.url FROM project,companyprojectrel,company where project.id=companyprojectrel.project_id AND companyprojectrel.company_id = company.id and company.id = '\" + company_id +\"'\"\n cur.execute(sql)\n projects = cur.fetchall()\n return render_template('dashboard.html', challenges=projects, company_name=name[0][0])\n\n\n@app.route('/createchallenge')\ndef create_challenge():\n return render_template('createchallenge.html')\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"nextpath/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"101608051","text":"from threading import Thread\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import QPixmap,QFont,QIcon,QPainter,QPen,QPalette,QColor,QPolygonF,QBrush,QPainterPath\nfrom PyQt5 import uic,QtCore\nimport sys\nimport qtawesome\nfrom datetime import datetime\nimport os\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport tinycss\nimport time\nfrom pprint import pprint\nimport json\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nimport numpy as np\nimport math\nimport re\nfrom matplotlib.colors import CSS4_COLORS as colors\nimport random\nimport logging\nfrom urllib.request import urlopen\nimport osrsclimethods as climethods\nimport argparse\nimport const\n\n#cssutils.log.setLevel(logging.CRITICAL)\n\ncolors = list(colors.values())\n\n\n\nif len(sys.argv) > 1:\n parser = argparse.ArgumentParser(description='OSRS Ping Checker CLI')\n parser.add_argument('-f','--free',default=False,action='store_true')\n parser.add_argument('-m','--members',default=False,action='store_true')\n parser.add_argument('-l','--limit',type=int,default=None,action='store')\n\n args = parser.parse_args()\n \n con = climethods.ThreadedConsoleHandler()\n con.daemon = True\n free = args.free\n members = args.members\n worldLimit = args.limit\n \n con.start()\n worlds = climethods.getWorlds(con)\n worlds = climethods.filterWorlds(worlds,free,members)\n con.progressTotal = len(tuple(worlds))\n con.startProgress(\"Retrieving server latency:\")\n def worldPinged(world, worldsPinged):\n con.progress = worldsPinged\n worlds = climethods.pingWorlds(worlds,worldPinged)\n worlds = sorted(worlds,key=lambda k:k[\"ping\"])\n while not con.done: #thread safe console output\n pass\n pingColors = {range(0,26):\"#00ff00\",range(26,51):\"#00a500\",range(51,101):\"#b2f733\",range(101,151):\"#ffe500\",range(151,200):\"#ff8800\"}\n print(\"\\r\")\n for enum,world in enumerate(worlds,0):\n if worldLimit != False and enum >= worldLimit:\n break\n key = [i for i in list(pingColors.keys()) if world[\"ping\"] in i]\n if len(key) == 0:\n pingColor = \"#e01616\"\n else:\n pingColor = pingColors[key[0]]\n print(f\"World {world['world']} ({world['country']}): {world['ping']} ping {'('+world['worldActivity']+')' if world['worldActivity'] != '' else ''}\")\n sys.exit()\n \n\napp = QApplication(sys.argv)\npix = QPixmap(os.path.join(\"images\",\"splash3.png\"))\nsplash = QSplashScreen(pix,QtCore.Qt.WindowStaysOnTopHint)\nsplash.show()\n\n\nwith open(\"config.json\",\"r\") as data:\n config = json.load(data)\n\n\nparser = tinycss.make_parser()\nsheet = parser.parse_stylesheet(requests.get(\"http://www.runescape.com/css/c/oldschool-101.css\").text)\n\nclass Canvas(FigureCanvas):\n def __init__(self,width=5,height=5,dpi=100):\n figure = Figure()\n self.axes = figure.add_subplot(111)\n FigureCanvas.__init__(self,figure)\n FigureCanvas.updateGeometry(self)\n\n def plot(self,data):\n x = np.array(data[\"x\"][\"data\"])\n y = np.array(data[\"y\"][\"data\"])\n newData = {}\n for x,y in list(zip(x,y)):\n if x in newData:\n newData[x].append(y)\n else:\n newData[x] = [y]\n for i in newData:\n newData[i] = sum(newData[i])/len(newData[i])\n\n x = []\n y = []\n for key in newData:\n x.append(key)\n y.append(newData[key])\n \n self.axes.bar(x,y)\n self.axes.set_xlabel(data[\"x\"][\"title\"])\n self.axes.set_ylabel(data[\"y\"][\"title\"])\n self.draw()\n\nclass Sorter(QFrame):\n def __init__(self,categories):\n super().__init__()\n layout = QHBoxLayout()\n layout.setContentsMargins(4,2,4,2)\n layout.setAlignment(QtCore.Qt.AlignLeft)\n self.setLayout(layout)\n self.setStyleSheet(\"margin-top:2.5px;\")\n for category in categories:\n categoryWidget = SortCategory(category)\n categoryWidget.setObjectName(category)\n self.layout().addWidget(categoryWidget)\n \n self.setStyleSheet(\"border-radius:5px;padding:.25px;border:1px solid #8E7246;\")\n \n\n\n\nclass ArrowBtn(QLabel):\n UP = 0\n DOWN = 1\n clickSignal = QtCore.pyqtSignal()\n releaseSignal = QtCore.pyqtSignal()\n def __init__(self,direction):\n super().__init__()\n self.direction = direction\n #self.setFlat(True)\n sizePolicy = QSizePolicy(QSizePolicy.Fixed,QSizePolicy.Fixed)\n self.setSizePolicy(sizePolicy)\n #self.setStyleSheet(\"background-color:green;border:1px solid green;max-width:12px;max-height:12px;min-width:12px;min-height:12px;border-radius:6px\")\n self.setStyleSheet(\"padding:0;margin:0;\")\n self.setFixedSize(16,16)\n \n def paintEvent(self,event):\n painter = QPainter()\n painter.begin(self)\n color = QtCore.Qt.white\n painter.setPen(QPen(QtCore.Qt.gray))\n poly = QPolygonF()\n \"\"\"\n if self.direction == self.UP:\n circleColor = QtCore.Qt.darkGreen\n poly.append(QtCore.QPoint(4+4,0+6))\n poly.append(QtCore.QPoint(8+4,4+6))\n poly.append(QtCore.QPoint(0+4,4+6))\n elif self.direction == self.DOWN:\n circleColor = QtCore.Qt.red\n poly.append(QtCore.QPoint(4+3,4+6))\n poly.append(QtCore.QPoint(8+3,0+6))\n poly.append(QtCore.QPoint(0+3,0+6))\n \"\"\"\n if self.direction == self.UP:\n circleColor = QtCore.Qt.darkGreen\n #if self.property(\"pressed\"):\n #circleColor = QColor(0x005600)\n poly.append(QtCore.QPoint(6+2,0+4))\n poly.append(QtCore.QPoint(12+2,6+4))\n poly.append(QtCore.QPoint(0+2,6+4))\n elif self.direction == self.DOWN:\n circleColor = QtCore.Qt.red\n #if self.property(\"pressed\"):\n #circleColor = QtCore.Qt.darkRed\n poly.append(QtCore.QPoint(6+1,6+5))\n poly.append(QtCore.QPoint(12+1,0+5))\n poly.append(QtCore.QPoint(0+1,0+5))\n\n if self.property(\"selected\"):\n circleColor = QColor(0x666666)\n \n brush = QBrush()\n brush.setStyle(QtCore.Qt.SolidPattern)\n radius = 8\n \n '''\n path = QPainterPath()\n painter.setPen(QPen(QColor(255,255,0)))\n brush.setColor(QColor(255,255,0))\n path.addRect(0,0,radius*2,radius*2)\n\n painter.fillPath(path,brush)\n '''\n \n path = QPainterPath()\n painter.setPen(QPen(circleColor))\n brush.setColor(circleColor)\n path.addEllipse(0,0,radius*2,radius*2)\n \n painter.fillPath(path,brush)\n\n \n brush.setColor(color)\n #painter.drawPolygon(poly)\n path = QPainterPath()\n path.addPolygon(poly)\n painter.fillPath(path,brush)\n\n \n painter.end()\n\n\n def deselect(self):\n self.setProperty(\"selected\",False)\n self.repaint()\n\n\n def mousePressEvent(self,event):\n self.clickSignal.emit()\n\n def mouseReleaseEvent(self,event):\n self.releaseSignal.emit()\n\n\nclass SortCategory(QFrame):\n pressedSignal = QtCore.pyqtSignal(int)\n def __init__(self,category):\n super().__init__()\n layout = QHBoxLayout()\n layout.setContentsMargins(1,1,1,1)\n self.setLayout(layout)\n sizePolicy = QSizePolicy(QSizePolicy.Fixed,QSizePolicy.Fixed)\n self.setSizePolicy(sizePolicy)\n #self.setStyleSheet(\"QFrame{border:1px solid red;}QFrame > *{}\")\n self.setStyleSheet(\"border:none;\")\n self.upArrow = ArrowBtn(ArrowBtn.UP)\n #self.upArrow.pressed.connect(lambda:self.click(self.upArrow))\n #self.upArrow.released.connect(lambda:self.unclick(self.upArrow))\n self.upArrow.clickSignal.connect(lambda:self.click(self.upArrow))\n self.upArrow.releaseSignal.connect(lambda:self.unclick(self.upArrow))\n #sizePolicy = QSizePolicy(QSizePolicy.Fixed,QSizePolicy.Fixed)\n #self.upArrow.setSizePolicy(sizePolicy)\n #self.upArrow.setFixedSize(12,12)\n \n \n self.downArrow = ArrowBtn(ArrowBtn.DOWN)\n #self.downArrow.pressed.connect(lambda:self.click(self.downArrow))\n #self.downArrow.released.connect(lambda:self.unclick(self.downArrow))\n self.downArrow.clickSignal.connect(lambda:self.click(self.downArrow))\n self.downArrow.releaseSignal.connect(lambda:self.unclick(self.downArrow))\n \n self.categoryLabel = QLabel(category)\n sizePolicy = QSizePolicy(QSizePolicy.Fixed,QSizePolicy.Fixed)\n self.categoryLabel.setSizePolicy(sizePolicy)\n self.categoryLabel.setStyleSheet(\"font-weight:bold;margin-bottom:5px;\")\n \n self.layout().addWidget(self.upArrow)\n self.layout().addWidget(self.downArrow)\n self.layout().addWidget(self.categoryLabel)\n #self.layout().addItem(QSpacerItem(0, 0, QSizePolicy.Expanding, QSizePolicy.Minimum))\n\n def click(self,widget): \n widget.setProperty(\"selected\",True)\n if widget.direction == widget.UP:\n self.downArrow.deselect()\n elif widget.direction == widget.DOWN:\n self.upArrow.deselect()\n self.pressedSignal.emit(self.direction)\n widget.setProperty(\"pressed\",True)\n widget.repaint()\n\n def unclick(self,widget):\n widget.setProperty(\"pressed\",False)\n widget.repaint()\n\n\n\nclass PopupGraph(QDialog):\n def __init__(self,graphData):\n super().__init__()\n self.graphData = graphData\n self.setWindowTitle(graphData[\"title\"])\n layout = QVBoxLayout()\n layout.setContentsMargins(0,0,0,0)\n sizePolicy = QSizePolicy(QSizePolicy.Preferred,QSizePolicy.Preferred)\n self.setSizePolicy(sizePolicy)\n self.setLayout(layout)\n self.canvas = Canvas(graphData)\n self.canvas.plot(graphData)\n self.layout().addWidget(self.canvas)\n \n self.exec()\n\n\nclass WorldFetcher(QtCore.QObject):\n worldSignal = QtCore.pyqtSignal(list)\n worldParsedSignal = QtCore.pyqtSignal(int)\n worldTotalSignal = QtCore.pyqtSignal(int)\n fetchingCompleteSignal = QtCore.pyqtSignal()\n pingTotalSignal = QtCore.pyqtSignal(int)\n worldPingedSignal = QtCore.pyqtSignal(int)\n pingDeltasSignal = QtCore.pyqtSignal(dict)\n worldListComplete = QtCore.pyqtSignal(list)\n \n def __init__(self):\n QtCore.QObject.__init__(self)\n\n\n @QtCore.pyqtSlot()\n def run(self):\n worlds = self.getWorlds()\n self.worldSignal.emit(worlds)\n\n\n def pingWorlds(self,worlds):\n #pprint(worlds)\n #self.fetchingCompleteSignal.emit()\n self.pingTotalSignal.emit(len(worlds))\n times = {\"title\":\"Latency by country\",\"x\":{\"title\":\"Country\",\"data\":[]},\"y\":{\"title\":\"Latency\",\"data\":[]}}\n def worldPinged(world,worldsPinged):\n self.worldPingedSignal.emit(worldsPinged)\n worlds = climethods.pingWorlds(worlds,worldPinged)\n for world in worlds:\n times[\"x\"][\"data\"].append(world[\"country\"])\n times[\"y\"][\"data\"].append(world[\"ping\"])\n self.fetchingCompleteSignal.emit()\n if config[\"debugGraphs\"]:\n self.pingDeltasSignal.emit(times)\n self.worldListComplete.emit(worlds)\n \n \n def getWorlds(self):\n html = requests.get(\"http://oldschool.runescape.com/slu\").text\n soup = bs(html,\"html.parser\")\n\n worldWrapper = soup.find_all(attrs={\"class\":\"server-list__body\"})[0]\n worlds = worldWrapper.findChildren(\"tr\")\n\n\n parsedWorlds = []\n totalWorlds = len(worlds)\n self.worldTotalSignal.emit(totalWorlds)\n for worldCount,world in enumerate(worlds,1):\n membersWorld = False\n classNames = world.get(\"class\")\n #for className in classNames:\n # flags = className.split(\"--\")[1:]\n # if len(flags) != 0:\n # print(flags)\n # if \"members\" in flags:\n # membersWorld = True\n\n worldChildren = list(filter(lambda i: i != \"\\n\",list(world.children)))\n \n worldInfo = worldChildren[0]\n players = worldChildren[1].string\n if players == None:\n players = \"Full\"\n else:\n players = players.split()[0]\n country = worldChildren[2].string\n className = worldChildren[2][\"class\"][-1]\n worldType = worldChildren[3].string\n worldActivity = worldChildren[4].string\n\n if worldActivity == \"-\":\n worldActivity = \"\"\n\n worldNumber = int(worldInfo.find(\"a\")[\"id\"].split(\"-\")[-1])\n if worldNumber-300 != int(worldInfo.find(\"a\").string.split()[-1]):\n print(\"Error:\",worldNumber,\"(-300)\",\"!=\",worldInfo.find(\"a\").string.split()[-1])\n\n \n parsedWorlds.append({\"cssClass\":className,\"world\":worldNumber,\"players\":players,\"country\":country,\"worldType\":worldType,\"worldActivity\":worldActivity})\n self.worldParsedSignal.emit(worldCount)\n time.sleep(0.005)\n return parsedWorlds\n\n\ndef saveConfig():\n with open(\"config.json\",\"w\") as data:\n json.dump(config,data)\n\ndef getServerPing(world):\n url = f\"oldschool{world}.runescape.com\"\n ping = Ping(url)\n if ping == None:\n return None\n print(\"Error:\",url,\"returned none\")\n return ping*1000\n\n\ndef addStyle(obj,style):\n obj.setStyleSheet(obj.styleSheet()+makeCSS(style))\n\n\ndef updateStyle(obj):\n obj.style().unpolish(obj)\n obj.style().polish(obj)\n obj.update()\n\ndef makeCSS(css):\n cssString = \"\"\n for key in css.keys():\n value = css[key]\n if value[-1] != \";\":\n value+=\";\"\n cssString+=key+\":\"+value\n return cssString\n\n\ndef countryImages(worlds):\n '''\n countries = set([i[\"country\"] for i in worlds])\n colorDict = {}\n for country in countries:\n random.shuffle(colors)\n colorDict[country] = colors.pop()\n return colorDict\n '''\n \n countryClasses = set([i[\"cssClass\"] for i in worlds])\n imageUrlDict = {}\n for countryClass in countryClasses:\n imageUrlDict[countryClass] = download(getImageURL(countryClass))[0]\n return imageUrlDict\n\n\ndef getImageURL(imgClass):\n imgClass = \".\"+imgClass\n rule = [rule for rule in sheet.rules if imgClass in rule.selector.as_css()][0]\n return re.search(\"(?<=\\().*?(?=\\))\",[declaration.value for declaration in rule.declarations if declaration.name==\"background-image\"][0].as_css()).group(0)[1:-1]\n \ndef download(url,name=None):\n if name != None:\n filename = name\n else:\n filename = url.split('/')[-1]\n filename = os.path.join(\"images\",filename)\n if os.path.isfile(filename) == False:\n img_file = open(filename, \"wb\")\n img_file.write(urlopen(url).read())\n img_file.close()\n absolute_path = os.path.dirname(os.path.abspath(filename))\n return filename, absolute_path\n else:\n absolute_path = os.path.dirname(os.path.abspath(filename))\n return filename, absolute_path\n\nclass QHLine(QFrame):\n def __init__(self):\n super().__init__()\n self.setFrameShape(QFrame.HLine)\n self.setFrameShadow(QFrame.Sunken)\n self.setStyleSheet(\"border:none;border-top:1px solid gray;\")\n\nclass Console(QFrame):\n worldPingList = QtCore.pyqtSignal(list)\n ERRORSTYLE = [{\"color\":\"red\"},{\"center\":True,\"timestamp\":True}]\n def __init__(self):\n super().__init__()\n\n self.images = None\n \n layout = QVBoxLayout()\n layout.setContentsMargins(0,0,0,0)\n layout.setAlignment(QtCore.Qt.AlignTop)\n sizePolicy = QSizePolicy(QSizePolicy.Preferred,QSizePolicy.Preferred)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(30)\n self.setSizePolicy(sizePolicy)\n self.setLayout(layout)\n\n layout = QVBoxLayout()\n layout.setContentsMargins(0,0,0,0)\n layout.setAlignment(QtCore.Qt.AlignTop)\n sizePolicy = QSizePolicy(QSizePolicy.Preferred,QSizePolicy.Fixed)\n self.pinnedText = QFrame()\n self.pinnedText.setSizePolicy(sizePolicy)\n self.pinnedText.setLayout(layout)\n\n layout = QVBoxLayout()\n layout.setContentsMargins(10,10,10,10)\n layout.setAlignment(QtCore.Qt.AlignTop)\n sizePolicy = QSizePolicy(QSizePolicy.Preferred,QSizePolicy.Preferred)\n self.normalText = QFrame()\n self.normalText.setSizePolicy(sizePolicy)\n self.normalText.setLayout(layout)\n\n\n layout = QVBoxLayout()\n layout.setContentsMargins(5,5,5,5)\n layout.setAlignment(QtCore.Qt.AlignTop)\n sizePolicy = QSizePolicy(QSizePolicy.Preferred,QSizePolicy.Fixed)\n self.errorText = QFrame()\n self.errorText.setSizePolicy(sizePolicy)\n self.errorText.setLayout(layout)\n \n\n self.frameDivider = QHLine()\n self.errorDivider = QHLine()\n self.frameDivider.hide()\n self.errorDivider.hide()\n\n #self.normalText.setStyleSheet(\"border:3px solid red;\")\n #self.pinnedText.setStyleSheet(\"border:3px solid red;\")\n\n self.normalText.setStyleSheet(\"border:none;font-size:13px;\")\n self.pinnedText.setStyleSheet(\"border;\")\n self.errorText.setStyleSheet(\"border:none;\")\n self.normalText.setObjectName(\"normalText\")\n self.pinnedText.setObjectName(\"pinnedText\")\n self.errorText.setObjectName(\"errorText\")\n \n self.pinnedText.hide()\n #self.frameDivider.hide()\n\n self.container = QFrame()\n self.container.setStyleSheet(\"border:none;\")\n layout = QVBoxLayout()\n layout.setContentsMargins(10,10,10,10)\n layout.setAlignment(QtCore.Qt.AlignTop)\n sizePolicy = QSizePolicy(QSizePolicy.Preferred,QSizePolicy.Preferred)\n self.container.setSizePolicy(sizePolicy)\n self.container.setLayout(layout)\n\n self.scrollContainer = QScrollArea()\n sizePolicy = QSizePolicy(QSizePolicy.Preferred,QSizePolicy.Expanding)\n self.scrollContainer.setSizePolicy(sizePolicy)\n self.scrollContainer.setWidget(self.normalText)\n self.scrollContainer.setWidgetResizable(True)\n self.scrollContainer.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) \n self.scrollContainer.setStyleSheet(\"\"\"\n\n QScrollBar:vertical {\n border: 1px solid darkgray;\n background: white;\n width: 15px;\n margin: 16px 0 16px 0;\n }\n QScrollBar::handle:vertical {\n background: lightgray;\n min-height: 20px;\n border-top:1px solid darkgray;\n border-bottom:1px solid gray;\n }\n QScrollBar::add-line:vertical {\n border: 1px solid darkgray;\n background: white;\n height: 15px;\n subcontrol-position: bottom;\n subcontrol-origin: margin;\n }\n\n QScrollBar::sub-line:vertical {\n border:1px solid darkgray;\n background: white;\n height: 15px;\n subcontrol-position: top;\n subcontrol-origin: margin;\n \n }\n QScrollBar::up-arrow:vertical {\n background: url(\"images/up-arrow.png\");\n \n }\n QScrollBar::down-arrow:vertical {\n background: url(\"images/down-arrow.png\");\n }\n\n QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {\n background: none;\n }\n\n QScrollArea[worldList=true] {\n /*background:#362D1A;*/\n }\n \"\"\")\n \n \n self.container.layout().addWidget(self.frameDivider)\n self.container.layout().addWidget(self.pinnedText)\n self.container.layout().addWidget(self.scrollContainer)\n #self.container.layout().addWidget(self.normalText)\n self.container.layout().addWidget(self.errorDivider)\n self.container.layout().addWidget(self.errorText)\n\n self.layout().addWidget(self.container)\n\n\n\n self.worldFetchProgress = QProgressBar()\n self.worldFetchProgress.setMinimum(0)\n self.worldFetchProgress.setMaximum(100)\n self.worldFetchProgress.hide()\n #self.animateProgressBar()\n self.worldFetchProgress.setStyleSheet(\"\"\"\n QProgressBar {\n color: white;\n font-size:11px;\n font-weight:bold;\n }\n QProgressBar:horizontal {\n border:none;\n border-radius:none;\n border-top:2px solid gray;\n text-align:center;\n }\n QProgressBar::chunk:horizontal {\n background-color: qlineargradient(x1: 0, y1: 0, x2: 1, y2: 0,\n stop: 0 #007000, stop: 1 #00dd00);\n border-radius:8px;\n border-top-left-radius:0;\n border-top-right-radius:0;\n border-bottom-right-radius:0;\n }\n QProgressBar::chunk:horizontal[done=true] {\n border-bottom-right-radius:8px;\n }\n \"\"\")\n \n \n self.worldFetchProgress.setValue(0)\n\n \n self.layout().addWidget(self.worldFetchProgress)\n \n self.setStyleSheet(\"\"\"\n background:transparent;\n border-radius:10px;\n border:2px solid gray;\n color:white;\n \"\"\")\n self.labels = []\n self.emitText(\"Welcome to OSRS Ping Checker v\"+const.VERSION,css={\"font-weight\":\"500\",\"color\":\"lightgray\"},flags={\"center\":True,\"perm\":True,\"title\":True})\n #self.emitText(\"foo\")\n #self.emitText(\"bar\")\n \n \n \n \n def completeWorldData(self,worlds):\n #pprint(worlds)\n #label = self.emitText(\"Worlds with Best Ping:\",css={\"text-decoration\":\"underline\",\"color\":\"gray\",\"font-weight\":\"bold\",\"font-size\":\"14\"},flags={\"center\":True,\"pin\":True})\n #index = self.pinnedText.layout().indexOf(label)+1\n categories = [\"foo\"]\n #self.sorter = Sorter(categories)\n #print(self.sorter.findChild(QFrame,\"foo\"))\n \n \n self.pinnedText.show()\n #self.pinnedText.layout().addWidget(self.sorter)\n #self.sorter = Sorter([\"foo\"])\n #self.normalText.layout().insertWidget(0,self.sorter)\n #self.labels.append(self.sorter)\n self.scrollContainer.setProperty(\"worldList\",True)\n updateStyle(self.scrollContainer)\n if self.images == None:\n self.images = countryImages(worlds)\n\n worlds = sorted(worlds,key=lambda k:k[\"ping\"])\n pingColors = {range(0,26):\"#00ff00\",range(26,51):\"#00a500\",range(51,101):\"#b2f733\",range(101,151):\"#ffe500\",range(151,200):\"#ff8800\"}\n \n for world in worlds:\n key = [i for i in list(pingColors.keys()) if world[\"ping\"] in i]\n if len(key) == 0:\n pingColor = \"#e01616\"\n else:\n pingColor = pingColors[key[0]]\n #print(world)\n #self.emitText(f' {world[\"country\"]}World {world[\"world\"]}: {math.floor(world[\"ping\"])} ping',flags={\"rich\":True})\n label = self.emitText(f'''\n \n \n World {world[\"world\"]}: {world[\"ping\"]} ping\n \n \n \n {world[\"worldActivity\"]}\n \n ''',flags={\"rich\":True})\n label.setProperty(\"worldLabel\",True)\n \n \n\n def emitWorldLabel(self,labelData):\n worldLabelText = labelData\n\n def resetThreadedFetching(self):\n self.worldFetchProgress.setProperty(\"done\",False)\n updateStyle(self.worldFetchProgress)\n self.worldFetchProgress.setValue(0)\n self.worldFetchProgress.setFormat(\"%p%\")\n self.worldFetchProgress.hide()\n\n def updateProgress(self,progress):\n self.worldFetchProgress.setValue(progress)\n updateStyle(self.worldFetchProgress)\n if progress == self.worldFetchProgress.maximum():\n self.worldFetchProgress.setProperty(\"done\",True)\n updateStyle(self.worldFetchProgress)\n self.resetThreadedFetching()\n\n def setProgressTotal(self,total,title):\n self.worldFetchProgress.setMaximum(total)\n self.worldFetchProgress.setFormat(title)\n titleStr = re.sub(r\"%.|\\(([^)]+)\\)\",\"\",title)\n self.emitText(titleStr)\n self.worldFetchProgress.show()\n \n\n #test function\n def animateProgressBar(self):\n #have to store the animation or it will get garbage collected and wont execute\n '''\n self.animation = QtCore.QPropertyAnimation(self.worldFetchProgress,b\"value\")\n self.animation.setDuration(1000)\n self.animation.setStartValue(0)\n self.animation.setEndValue(100)\n self.animation.start()\n '''\n\n #self.animation = QtCore.QPropertyAnimation(self.worldFetchProgress,b\"opacity\")\n\n\n def clearErrors(self,errors=[]): #optional to clear select errors\n if errors == []:\n for label in self.errorText.children():\n if type(label) == QVBoxLayout:\n continue\n self.labels.remove(label)\n label.deleteLater()\n self.errorDivider.hide()\n\n else:\n deletedLabels=0\n for label in errors:\n if type(label) == QLabel:\n self.labels.remove(label)\n label.deleteLater()\n deletedLabels+=1\n \n if len(self.errorText.children())-1 == deletedLabels:\n self.errorDivider.hide()\n \n \n\n def clear(self):\n pinnedLabelsDeleted = 0\n for label in tuple(self.labels):\n self.labels.remove(label)\n label.deleteLater()\n if label.parent().objectName() == \"pinnedText\":\n pinnedLabelsDeleted+=1\n\n if len(self.pinnedText.children())-1 == pinnedLabelsDeleted:\n self.pinnedText.hide()\n # self.frameDivider.hide()\n\n self.scrollContainer.setProperty(\"worldList\",False)\n updateStyle(self.scrollContainer)\n \n\n def check(self,worlds,free,members):\n if not free:\n for world in tuple(worlds):\n if world[\"worldType\"] == \"Free\":\n worlds.remove(world)\n if not members:\n for world in tuple(worlds):\n if world[\"worldType\"] == \"Members\":\n worlds.remove(world)\n\n self.worldPingList.emit(worlds) \n #for world in worlds[:5]:\n # self.emitText(f'World {world[\"world\"]} - {world[\"players\"]} players ({world[\"worldType\"]})')\n \n #print(worlds)\n\n def emitText(self,text,css={},flags={}):\n if type(text) == list:\n text = \"\\n\".join(text)\n label = QLabel()\n sizePolicy = QSizePolicy(QSizePolicy.Preferred,QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(1)\n label.setSizePolicy(sizePolicy)\n \n \n\n\n if \"title\" in flags and flags[\"title\"]:\n flags[\"pin\"] = True\n self.frameDivider.show()\n addStyle(label,{\"margin-top\":\"5px\"})\n self.container.layout().insertWidget(0,label)\n\n elif \"error\" in flags and flags[\"error\"]:\n if \"specialStyle\" not in flags or flags[\"specialStyle\"] == True:\n css.update(self.ERRORSTYLE[0])\n flags.update(self.ERRORSTYLE[1])\n #self.container.layout().addWidget(label)\n self.errorText.layout().addWidget(label)\n self.errorDivider.show()\n\n elif \"pin\" in flags and flags[\"pin\"]:\n #if len(self.pinnedText.children()) == 1: #layout counts as child\n # self.pinnedText.show()\n # self.frameDivider.show()\n self.pinnedText.show()\n self.pinnedText.layout().addWidget(label)\n else:\n self.normalText.layout().addWidget(label)\n\n \n if \"perm\" in flags and not flags[\"perm\"] or \"perm\" not in flags:\n self.labels.append(label)\n\n if \"timestamp\" in flags and flags[\"timestamp\"]:\n text = datetime.now().strftime(\"%I:%M %p\") + \": \" + text\n\n if \"center\" in flags and flags[\"center\"]:\n label.setAlignment(QtCore.Qt.AlignCenter)\n\n\n if css != {}:\n css = makeCSS(css)\n else:\n css = \"\"\n\n css+=\"border:none;\"\n if \"rich\" in flags and flags[\"rich\"]:\n label.setTextFormat(QtCore.Qt.RichText)\n label.setText(text)\n label.setStyleSheet(css)\n return label\n\n \n\n \n\nclass Worker(QtCore.QObject):\n #signals\n friendLabelReady = QtCore.pyqtSignal(dict)\n\n #slots\n @QtCore.pyqtSlot()\n def createFriendLabel(self,data):\n self.friendLabelReady.emit(data)\n\n\nclass SettingsWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setWindowIcon(qtawesome.icon(\n 'fa5s.cog',\n color='#303030'\n ))\n\n self.setLayout(QVBoxLayout())\n\nclass Window(QMainWindow):\n fetchWorldsSignal = QtCore.pyqtSignal()\n def __init__(self):\n super().__init__()\n self.obj = Worker()\n self.typeSelectError = False\n self.initMain()\n\n self.show()\n\n def initMain(self):\n uic.loadUi(\"main.ui\",self)\n self.setWindowIcon(QIcon(os.path.join(\"images\",\"icon.ico\")))\n self.setStyleSheet(\"\"\"\n #MainWindow {\n background-image:url('images/osrs.png');\n background-repeat:no-repeat;\n }\n #checkBtn, #clearBtn{\n border:1px solid black;\n border-radius:3.5px;\n padding: 5px 25px 5px 25px;\n background:lightgray;\n }\n #checkBtn:hover, #clearBtn:hover {\n background:silver;\n }\n #checkBtn:pressed, #clearBtn:pressed {\n background:#a0a2a5 !important;\n }\n\n QCheckBox {\n border:none;\n \n }\n \n \"\"\")\n\n self.con = Console()\n self.settingsWindow = SettingsWindow()\n\n self.settingsButton = QPushButton('')\n self.settingsButton.resize(24,24)\n self.settingsButton.setFlat(True)\n self.settingsButton.setIcon(qtawesome.icon(\n 'fa5s.cog',\n color='#dddddd'\n ))\n self.settingsButton.setStyleSheet('''\n QPushButton:pressed {\n border:none !important;\n outline:none !important;\n background:none !important;\n }\n''')\n \n self.createWorkerThread()\n\n self.memberWorldBox.setChecked(config[\"worldTypes\"][\"members\"])\n self.freeWorldBox.setChecked(config[\"worldTypes\"][\"free\"])\n self.memberWorldBox.setProperty(\"worldType\",\"members\")\n self.freeWorldBox.setProperty(\"worldType\",\"free\")\n self.memberWorldBox.clicked.connect(lambda:self.checkBoxClicked(self.memberWorldBox))\n self.freeWorldBox.clicked.connect(lambda:self.checkBoxClicked(self.freeWorldBox))\n self.clearBtn.pressed.connect(lambda:self.pressBtn(self.clearBtn))\n self.checkBtn.pressed.connect(lambda:self.pressBtn(self.checkBtn))\n self.settingsButton.pressed.connect(self.displaySettings)\n self.clearBtn.released.connect(self.clear)\n self.checkBtn.released.connect(self.check)\n self.setFixedSize(335,565)\n #layout = QVBoxLayout()\n #layout.setContentsMargins(0,0,0,0)\n #self.informationBox.setLayout(layout)\n self.informationBox.hide()\n index = self.mainFrame.layout().indexOf(self.informationBox)\n self.mainFrame.layout().insertWidget(index,self.con)\n \n self.settingsButton.setParent(self.mainFrame)\n\n def showEvent(self,event):\n self.settingsButton.move(\n self.titleLabel.x()+(self.titleLabel.width())-(self.settingsButton.width()),\n self.titleLabel.y()+(self.titleLabel.height())-(self.settingsButton.height())\n )\n print(self.settingsButton.geometry(),self.titleLabel.geometry())\n\n def displaySettings(self):\n self.settingsWindow.show()\n \n def createWorkerThread(self):\n self.thread = QtCore.QThread()\n self.worldRetriever = WorldFetcher()\n self.worldRetriever.worldSignal.connect(self.startConCheck)\n self.worldRetriever.moveToThread(self.thread)\n self.thread.start()\n\n self.fetchWorldsSignal.connect(self.worldRetriever.run)\n\n self.con.worldPingList.connect(self.worldRetriever.pingWorlds)\n self.worldRetriever.worldParsedSignal.connect(self.con.updateProgress)\n self.worldRetriever.worldTotalSignal.connect(lambda total:self.con.setProgressTotal(total,\"%p% Retrieving world data...\"))\n self.worldRetriever.fetchingCompleteSignal.connect(self.fetchingComplete)\n self.worldRetriever.pingTotalSignal.connect(lambda total:self.con.setProgressTotal(total,\"%p% Retrieving server latency data... (%v/%m)\"))\n self.worldRetriever.worldPingedSignal.connect(self.con.updateProgress)\n self.worldRetriever.pingDeltasSignal.connect(self.graph)\n self.worldRetriever.worldListComplete.connect(self.con.completeWorldData)\n\n\n def checkBoxClicked(self,obj):\n config[\"worldTypes\"][obj.property(\"worldType\")] = obj.isChecked()\n saveConfig()\n\n def graph(self,data):\n popup = PopupGraph(data)\n \n\n def resetWorker(self):\n if self.thread.isRunning():\n self.worldRetriever.disconnect()\n print(\"terminating\")\n self.thread.terminate()\n\n self.thread.wait()\n\n self.createWorkerThread()\n\n\n def pressBtn(self,obj):\n obj.setProperty(\"pressed\",True)\n updateStyle(obj)\n\n\n def startConCheck(self,worldData):\n self.con.check(worldData,self.freeWorldBox.isChecked(),self.memberWorldBox.isChecked())\n\n\n def cancel(self):\n self.resetWorker()\n self.checkBtn.setText(\"Check\")\n self.checkBtn.released.disconnect()\n self.checkBtn.released.connect(self.check)\n self.con.resetThreadedFetching()\n self.con.emitText(\"Cancelling...\")\n \n \n def fetchingComplete(self):\n self.checkBtn.setText(\"Check\")\n self.checkBtn.released.disconnect()\n self.checkBtn.released.connect(self.check)\n self.con.resetThreadedFetching()\n self.con.clear()\n\n def check(self):\n updateStyle(self.checkBtn)\n if self.freeWorldBox.isChecked() or self.memberWorldBox.isChecked():\n self.con.clearErrors([self.typeSelectError])\n #self.typeSelectError = False\n #self.con.worldFetchProgress.setFormat(\"Retrieving world data... (%p%)\")\n #QtCore.QMetaObject.invokeMethod(self.worldRetriever,\"run\")\n self.con.worldFetchProgress.show()\n self.fetchWorldsSignal.emit()\n self.checkBtn.setText(\"Cancel\")\n self.checkBtn.released.disconnect()\n self.checkBtn.released.connect(self.cancel)\n\n \n \n \n else:\n if not self.typeSelectError:\n emittedLabel = self.con.emitText(\"No world types selected\",{},{\"timestamp\":True,\"error\":True})\n self.typeSelectError = emittedLabel\n\n def clear(self):\n updateStyle(self.clearBtn)\n self.con.clear()\n self.con.errorDivider.hide()\n self.typeSelectError = False\n \n\n\n\nwindow = Window()\nsplash.hide()\n\ndef PyQtException(cls, exception, traceback):\n sys.__excepthook__(cls, exception, traceback)\nsys.excepthook = PyQtException\n\napp.exec_()\nsys.exit()\n","sub_path":"pingChecker2.py","file_name":"pingChecker2.py","file_ext":"py","file_size_in_byte":36457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"313764417","text":"#!/usr/bin/env python3\n\n\"\"\"\nCreated on 6 Aug 2016\n\n@author: Bruno Beloff (bruno.beloff@southcoastscience.com)\n\"\"\"\n\nfrom scs_dfe.interface.component.mcp9808 import MCP9808\n\nfrom scs_host.bus.i2c import I2C\n\n\n# --------------------------------------------------------------------------------------------------------------------\n\ntry:\n I2C.Sensors.open()\n\n temp = MCP9808(False)\n print(temp)\n\n temp.running = True\n\n datum = temp.sample()\n print(datum)\n\n print(temp)\n\nfinally:\n I2C.Sensors.close()\n","sub_path":"tests/interface/component/mcp9808_test.py","file_name":"mcp9808_test.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"555621361","text":"\"\"\"\nCopyright (c) 2013 Tommy Carpenter\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\n# Define your item pipelines here\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/topics/item-pipeline.html\nfrom unidecode import unidecode #used to convert strange unicode into ascii\nfrom lib.utils.string_functions import cleanse_tags_contractions_whitespace\nfrom lib.utils.mysql import CrawlerDBC\n\n#cleans the items before being entered into the database. \nclass myNissanLeafClean(object):\n def process_item(self, item, spider):\n if 'myNissanLeafClean' not in getattr(spider, 'pipelines'):\n return item\n \n #fix contractions in title\n item['title'] = cleanse_tags_contractions_whitespace(item['title'])\n \n #infer numcomments\n item['numcomments'] = len(item['comments'])\n \n #make the comments into pairs {date|||comment} 3 pipes used because later i need to parse the date out and need a unique seperator. \n comments = \"\"\n for c in range(0,len(item['comments'])): \n #need to kill the quoted text, but cant do this in xpath.\n #in xpath if we select /div/text, then if there is a blank line in someones post:\n #
\n #blag\n #
\n #blah\n #this gets parsed as two seperate comments. but we only want this to count as one post. \n \n #instead well split the string (using pythoin) based on the div tag and take the content before the last tag\n #KILL ALL OF THIS:\n #
padamson1 wrote:
I've been using the coil and twist method used by sailors (as shown here http://www.you\n #tube.com/watch?v=k2ChlCns4AU but without the final tight wrap & knot). \n #This requires that a the far end is free to spin around because the twist turns the rope.
\n #START KEEPING FROM HERE\n #
I had modified this method so that when the wire gets twisted, I twist it the opposite way and \n #put the loop on the other side of the coil. This keeps the cable from getting twisted overall. \n #I use this on the 20 feet of cable on the 120V brick EVSE. But--it does nothing to help you get it unrolled. \n #I have to unroll it the old fashioned way...\n #

THEN, I watched the video! No more of the old method! I am going to learn this over/under method. \n #As padamson1 said, I learned something new today.\n #
\n \n #note this wont work if there is a quote before and after a posters post, but hopefully the number of these si small\n thec = item['comments'][c].split(\"
\")[-2]\n comments += \"{\" + item['commentdates'][c] + \"|||\" + (cleanse_tags_contractions_whitespace(thec) + \"}\\n\")\n item['newCommentFormat'] = comments\n return item\n \n\n#put the items in de db \nclass myNissanLeafDB(CrawlerDBC): \n def process_item(self, item, spider):\n if 'myNissanLeafDB' not in getattr(spider, 'pipelines'):\n return item\n q = 'insert into LeafReviews (BaseSite,Content,Title,Url,NumComments,Comments) values (\\'{0}\\', \\'{1}\\', \\'{2}\\', \\'{3}\\',\\'{4}\\', \\'{5}\\')'.format(item['site'],item['contenttype'],item['title'],item['url'],item['numcomments'],item['newCommentFormat'])\n self.insertItem(item, q)\n return item\n\n","sub_path":"lib/crawling/leaf_pipeline.py","file_name":"leaf_pipeline.py","file_ext":"py","file_size_in_byte":4652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"535723695","text":"#!/usr/bin/env python2\n\"\"\"Module for InterviewScraper\"\"\"\nfrom scrapers import *\n\n\nclass InterviewScraper:\n \"\"\"InterviewScraper class\n\n High-Level_Programming project scraper.\n\n Args:\n soup (obj): BeautifulSoup obj containing parsed link\n\n Attributes:\n py_flag (int): For write_checker()\n py_js (int): For write_checker()\n \"\"\"\n sh_flag = 0\n py_flag = 0\n js_flag = 0\n html_flag = 0\n css_flag = 0\n scss_flag = 0\n c_flag = 0\n check_flag = 0\n header_check = 0\n\n def __init__(self, soup):\n self.soup = soup\n self.file_names = self.find_files()\n self.prototypes_list = self.find_prototypes()\n self.header_name = self.find_header()\n self.unique_file_names = self.remove_duplicate_files()\n\n def find_prototypes(self):\n \"\"\"Method to scrape python prototypes\n\n Has a failsafe incase there are non-python files in scraped data.\n \"\"\"\n res = []\n find_protos = self.soup.find_all(string=re.compile(\"Prototype: \"))\n for item in find_protos:\n py_proto = item.next_sibling.text\n find_py = py_proto.find(\":\")\n find_c = py_proto.find(\";\")\n if find_py != 1 or find_c != 1:\n res.append(py_proto)\n else:\n pass\n return res\n\n def find_files(self):\n \"\"\"Method to scrape for python file names\"\"\"\n return self.soup.find_all(string=re.compile(\"File: \"))\n\n def html_skeleton(self, file_name):\n html_string = ('\\n\\n'\n '\\n'\n '\\t\\n'\n '\\t\\n'\n '\\tDocument\\n'\n '\\n\\n\\n'\n '\\n\\n')\n with open(file_name, \"w\") as f:\n f.write(html_string)\n\n def write_file_name(self, file_name, find_pyfile, file_idx):\n \"\"\" Method to write actual file \"\"\"\n w_file_name = open(file_name, \"w+\")\n if \".py\" in file_name:\n self.py_flag = 1\n self.check_flag = 1\n w_file_name.write(\"#!/usr/bin/python3\\n\")\n elif \".sh\" in file_name:\n self.sh_flag = 1\n self.check_flag = 1\n w_file_name.write(\"#!/bin/bash\\n\")\n elif \".js\" in file_name:\n self.js_flag = 1\n self.check_flag = 1\n w_file_name.write(\"#!/usr/bin/node\\n\")\n elif \".html\" in file_name:\n self.html_flag = 1\n self.check_flag = 1\n self.html_skeleton(file_name)\n elif \".css\" in file_name:\n self.css_flag = 1\n self.check_flag = 1\n w_file_name.write(\"{\\n\\t\\n}\\n\")\n elif \".scss\" in file_name:\n self.scss_flag = 1\n w_file_name.write(\"/* My style */\\n\")\n elif \".c\" in file_name:\n self.c_flag = 1\n try:\n # Pulling out name of function for documentation\n if len(self.prototypes_list) != 0:\n func_name = self.prototypes_list[file_idx]\n func_name = func_name.split(\"(\", 1)[0]\n tmp_split = func_name.split(\" \")\n func_name = tmp_split[len(tmp_split) - 1]\n tmp_split = func_name.split(\"*\")\n func_name = tmp_split[len(tmp_split) - 1]\n\n # Removing string after first comma (multiple file names)\n find_comma = file_name.find(\",\")\n if find_comma != -1:\n w_file_name = open(file_name[:find_comma], \"w+\")\n else:\n w_file_name = open(file_name, \"w+\")\n\n if self.header_check != 1:\n func_proto = self.prototypes_list[file_idx]\n if func_proto[-1] == \";\":\n func_proto = func_proto[:-1]\n w_file_name.write('#include \"%s\"\\n\\n' % self.header_name)\n w_file_name.write(\"/**\\n\")\n w_file_name.write(\" * %s -\\n\" % func_name)\n w_file_name.write(\" *\\n\")\n w_file_name.write(\" * Return: \\n\")\n w_file_name.write(\" **/\\n\\n\")\n w_file_name.write(\"%s\\n\" % func_proto)\n w_file_name.write(\"\\b\")\n w_file_name.write(\"{\\n\")\n w_file_name.write(\"\\n\")\n w_file_name.write(\"}\")\n w_file_name.write(\"\\n\")\n except (AttributeError, IndexError):\n sys.stdout.write(\"[ERROR] Failed to create \")\n sys.stdout.write(\"task file %s\\n\" % file_name)\n sys.stdout.write(\" ... \")\n # Creating prototypes in parallel with files\n if find_pyfile != -1:\n proto = self.prototypes_list[file_idx]\n if proto[-1] != \":\":\n proto = proto + \":\"\n w_file_name.write(proto + \"\\n\")\n file_idx += 1\n w_file_name.close()\n\n def write_files(self):\n \"\"\"Method to write/create python files\n\n Has a function that creates directories if found in `file_name`.\n Last function creates required files in additional directory.\n \"\"\"\n\n new_dir_files = []\n file_idx = 0\n one_dir_check = 0\n folder_name = None\n\n sys.stdout.write(\" -> Creating task files... \")\n for text_file in self.unique_file_names:\n if \"images/\" in text_file:\n os.mkdir(\"images\")\n continue\n try:\n find_pyfile = text_file.find(\".py\")\n # find_cfile = text_file.find(\".c\")\n find_comma = re.search('(.+?),', text_file)\n\n # Creating sub directories if exists\n if find_comma is not None:\n find_folder = re.search(', (.+?)/', text_file)\n else:\n find_folder = re.search('(.+?)/', text_file)\n find_dir_file = re.search('/(.+?)$', text_file)\n if find_dir_file is not None:\n new_dir_files.append(str(find_dir_file.group(1)))\n if find_folder is not None and one_dir_check is 0:\n folder_name = str(find_folder.group(1))\n os.mkdir(folder_name)\n one_dir_check += 1\n\n # Handling multiple files\n if \", \" in text_file:\n comma_file_names = text_file.split(\", \")\n for file_name in comma_file_names:\n self.write_file_name(file_name, find_pyfile, file_idx)\n if \".c\" in file_name:\n file_idx += 1\n elif \".\" not in text_file and one_dir_check is not 1:\n # check if file or dir by checking for digits\n contains_digit = any(map(unicode.isdigit, text_file))\n if contains_digit:\n w_file_name = open(text_file, \"w+\")\n w_file_name.close()\n else:\n os.mkdir(text_file)\n else:\n self.write_file_name(text_file, find_pyfile, file_idx)\n if \".c\" in text_file:\n file_idx += 1\n except AttributeError:\n sys.stdout.write(\"[ERROR] Failed to create \")\n sys.stdout.write(\"task file %s\\n\" % text_file)\n sys.stdout.write(\" ... \")\n continue\n except IOError:\n sys.stdout.write(\"[ERROR] Failed to make file, passing\\n\")\n sys.stdout.write(\" ... \")\n pass\n except IndexError:\n pass\n\n # Check if new dir created, insert files if there is\n if folder_name is not None and one_dir_check is 1:\n os.chdir(folder_name)\n for item in new_dir_files:\n if \",\" in item:\n item_obj = re.search('/(.+?)$', text_file)\n item = str(item_obj.group(1))\n dir_file = open(item, \"w+\")\n dir_file.close()\n os.chdir(\"..\")\n if self.check_flag:\n self.write_checker()\n print(\"done\")\n\n def write_checker(self):\n with open(\"check.sh\", \"w+\") as f:\n f.write(\"#!/usr/bin/env bash\\n\")\n if self.js_flag == 1:\n f.write(\"semistandard --fix \")\n elif self.sh_flag == 1:\n f.write(\"shellcheck \")\n elif self.py_flag == 1:\n f.write(\"pep8 \")\n elif self.html_flag == 1 or self.css_flag == 1:\n f.write(\"/home/vagrant/utils/W3C-Validator/w3c_validator.py \")\n if self.file_names:\n for i in self.file_names:\n f.write('\"%s\" ' % i.next_sibling.text)\n f.write(\"\\n\")\n\n def remove_duplicate_files(self):\n \"\"\"Removes duplicate file names\"\"\"\n res = []\n for item in self.file_names:\n text_file = item.next_sibling.text\n if \", \" in text_file:\n comma_file_names = text_file.split(\", \")\n for file_name in comma_file_names:\n if file_name not in res:\n res.append(file_name)\n elif text_file not in res:\n res.append(text_file)\n return res\n\n def find_header(self):\n \"\"\"Method to scrape for C header file name\"\"\"\n try:\n page_text = self.soup.get_text()\n my_header_start = page_text.index(\"header file called \")\n my_header_end = page_text.index(\".h\")\n header_string = page_text[my_header_start:my_header_end]\n header_name = header_string.split()[-1] + \".h\"\n return header_name\n except:\n self.header_check = 1\n return \"\"\n","sub_path":"scrapers/interview_scraper.py","file_name":"interview_scraper.py","file_ext":"py","file_size_in_byte":10192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"390114962","text":"#!/usr/bin/env python\nimport pika\nimport sys\nimport time\nimport subprocess\nimport random\nimport threading\nimport requests\nimport json\nimport signal\n\nfrom command_args import get_args, get_mandatory_arg, get_optional_arg\nfrom RabbitPublisher import RabbitPublisher\nfrom MultiTopicConsumer import MultiTopicConsumer\nfrom QueueStats import QueueStats\nfrom ChaosExecutor import ChaosExecutor\nfrom printer import console_out\nfrom MessageMonitor import MessageMonitor\nfrom ConsumerManager import ConsumerManager\nfrom BrokerManager import BrokerManager\n\nstop_please = False\nstop_requests = 0\n\ndef interuppt_handler(signum, frame):\n global stop_please, stop_requests\n console_out(\"STOP REQUESTED\", \"TEST RUNNER\")\n stop_please = True\n stop_requests +=1\n\n if stop_requests >= 2:\n sys.exit(-2) \n \n\ndef main():\n\n #signal.signal(signal.SIGINT, interuppt_handler)\n args = get_args(sys.argv)\n\n count = -1 # no limit\n tests = int(get_mandatory_arg(args, \"--tests\"))\n run_minutes = int(get_mandatory_arg(args, \"--run-minutes\"))\n consumer_count = int(get_mandatory_arg(args, \"--consumers\"))\n grace_period_sec = int(get_mandatory_arg(args, \"--grace-period-sec\"))\n queue = get_mandatory_arg(args, \"--queue\")\n queue_type = get_mandatory_arg(args, \"--queue-type\")\n sac = get_mandatory_arg(args, \"--sac\")\n\n publisher_count = int(get_optional_arg(args, \"--publishers\", \"1\"))\n print_mod = int(get_optional_arg(args, \"--print-mod\", \"0\"))\n new_cluster = get_optional_arg(args, \"--new-cluster\", \"true\")\n in_flight_max = int(get_optional_arg(args, \"--in-flight-max\", \"10\"))\n sequence_count = int(get_optional_arg(args, \"--sequences\", \"1\"))\n cluster_size = get_optional_arg(args, \"--cluster\", \"3\")\n chaos = get_optional_arg(args, \"--chaos-actions\", \"true\")\n chaos_mode = get_optional_arg(args, \"--chaos-mode\", \"mixed\")\n chaos_min_interval = int(get_optional_arg(args, \"--chaos-min-interval\", \"60\"))\n chaos_max_interval = int(get_optional_arg(args, \"--chaos-max-interval\", \"120\"))\n consumer_actions = get_optional_arg(args, \"--consumer-actions\", \"true\")\n con_action_min_interval = int(get_optional_arg(args, \"--consumer-min-interval\", \"20\"))\n con_action_max_interval = int(get_optional_arg(args, \"--consumer-max-interval\", \"60\"))\n\n if print_mod == 0:\n print_mod = in_flight_max * 5\n\n include_chaos = True\n if chaos.upper() == \"FALSE\":\n include_chaos = False\n\n include_con_actions = True\n if consumer_actions.upper() == \"FALSE\":\n include_con_actions = False\n\n sac_enabled = True\n if sac.upper() == \"FALSE\":\n sac_enabled = False\n\n message_type = \"sequence\"\n \n for test_number in range(tests):\n\n print(\"\")\n console_out(f\"TEST RUN: {str(test_number)} --------------------------\", \"TEST RUNNER\")\n if new_cluster.upper() == \"TRUE\":\n subprocess.call([\"bash\", \"../automated/setup-test-run.sh\", cluster_size, \"3.8\"])\n console_out(f\"Waiting for cluster...\", \"TEST RUNNER\")\n time.sleep(30)\n\n console_out(f\"Cluster status:\", \"TEST RUNNER\")\n subprocess.call([\"bash\", \"../cluster/cluster-status.sh\"])\n \n broker_manager = BrokerManager()\n broker_manager.load_initial_nodes()\n initial_nodes = broker_manager.get_initial_nodes()\n console_out(f\"Initial nodes: {initial_nodes}\", \"TEST RUNNER\")\n\n queue_name = queue + \"_\" + str(test_number)\n mgmt_node = broker_manager.get_random_init_node()\n queue_created = False\n\n while queue_created == False: \n if sac_enabled: \n queue_created = broker_manager.create_sac_queue(mgmt_node, queue_name, cluster_size, queue_type)\n else:\n queue_created = broker_manager.create_queue(mgmt_node, queue_name, cluster_size, queue_type)\n\n if queue_created == False:\n time.sleep(5)\n\n time.sleep(10)\n\n msg_monitor = MessageMonitor(print_mod)\n stats = QueueStats('jack', 'jack', queue_name)\n chaos = ChaosExecutor(initial_nodes)\n\n if chaos_mode == \"partitions\":\n chaos.only_partitions()\n elif chaos_mode == \"nodes\":\n chaos.only_kill_nodes()\n\n consumer_manager = ConsumerManager(broker_manager, msg_monitor, \"TEST RUNNER\")\n\n pub_node = broker_manager.get_random_init_node()\n publisher = RabbitPublisher(f\"PUBLISHER(Test:{test_number} Id:P1)\", initial_nodes, pub_node, in_flight_max, 120, print_mod)\n consumer_manager.add_consumers(consumer_count, test_number, queue_name)\n\n monitor_thread = threading.Thread(target=msg_monitor.process_messages)\n monitor_thread.start()\n \n consumer_manager.start_consumers()\n\n if publisher_count == 1:\n pub_thread = threading.Thread(target=publisher.publish_direct,args=(queue_name, count, sequence_count, 0, \"sequence\"))\n pub_thread.start()\n console_out(\"publisher started\", \"TEST RUNNER\")\n\n if include_con_actions or include_chaos:\n init_wait_sec = 20\n console_out(f\"Will start chaos and consumer actions in {init_wait_sec} seconds\", \"TEST RUNNER\")\n time.sleep(init_wait_sec)\n\n if include_chaos:\n chaos_thread = threading.Thread(target=chaos.start_random_single_action_and_repair,args=(chaos_min_interval,chaos_max_interval))\n chaos_thread.start()\n console_out(\"Chaos executor started\", \"TEST RUNNER\")\n\n if include_con_actions:\n consumer_action_thread = threading.Thread(target=consumer_manager.start_random_consumer_actions,args=(con_action_min_interval, con_action_max_interval))\n consumer_action_thread.start()\n console_out(\"Consumer actions started\", \"TEST RUNNER\")\n\n \n ctr = 0\n run_seconds = run_minutes * 60\n while ctr < run_seconds and not stop_please:\n try:\n time.sleep(1)\n ctr += 1\n\n if ctr % 60 == 0:\n console_out(f\"Test at {int(ctr/60)} minute mark, {int((run_seconds-ctr)/60)} minutes left\", \"TEST RUNNER\")\n except KeyboardInterrupt:\n console_out(f\"Test forced to stop at {int(ctr/60)} minute mark, {int((run_seconds-ctr)/60)} minutes left)\", \"TEST RUNNER\")\n break\n\n try:\n chaos.stop_random_single_action_and_repair()\n consumer_manager.stop_random_consumer_actions()\n \n if include_chaos:\n chaos_thread.join()\n\n if include_con_actions:\n consumer_action_thread.join()\n except Exception as e:\n console_out(\"Failed to stop chaos cleanly: \" + str(e), \"TEST RUNNER\")\n\n console_out(\"Resuming consumers\", \"TEST RUNNER\")\n consumer_manager.resume_all_consumers()\n \n if publisher_count == 1:\n publisher.stop(True)\n\n console_out(\"starting grace period for consumer to catch up\", \"TEST RUNNER\")\n ctr = 0\n \n while ctr < grace_period_sec:\n if msg_monitor.get_unique_count() >= publisher.get_pos_ack_count() and len(publisher.get_msg_set().difference(msg_monitor.get_msg_set())) == 0:\n break\n time.sleep(1)\n ctr += 1\n\n confirmed_set = publisher.get_msg_set()\n not_consumed_msgs = confirmed_set.difference(msg_monitor.get_msg_set())\n\n console_out(\"RESULTS ----------------------------------------\", \"TEST RUNNER\")\n console_out(f\"Confirmed count: {publisher.get_pos_ack_count()} Received count: {msg_monitor.get_receive_count()} Unique received: {msg_monitor.get_unique_count()}\", \"TEST RUNNER\")\n\n success = True\n if len(not_consumed_msgs) > 0:\n console_out(f\"FAILED TEST: Potential failure to promote Waiting to Active. Not consumed count: {len(not_consumed_msgs)}\", \"TEST RUNNER\")\n success = False\n\n if msg_monitor.get_out_of_order() == True:\n success = False\n console_out(f\"FAILED TEST: Received out-of-order messages\", \"TEST RUNNER\")\n\n if success:\n console_out(\"TEST OK\", \"TEST RUNNER\")\n\n console_out(\"RESULTS END ------------------------------------\", \"TEST RUNNER\")\n\n try:\n consumer_manager.stop_all_consumers()\n \n if publisher_count == 1:\n pub_thread.join()\n msg_monitor.stop_consuming()\n monitor_thread.join()\n except Exception as e:\n console_out(\"Failed to clean up test correctly: \" + str(e), \"TEST RUNNER\")\n\n console_out(f\"TEST {str(test_number )} COMPLETE\", \"TEST RUNNER\")\n\nif __name__ == '__main__':\n main()","sub_path":"RabbitMqUdn/client/random-test.py","file_name":"random-test.py","file_ext":"py","file_size_in_byte":8756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"31037266","text":"import pandas as pd \r\nimport seaborn as sns \r\nimport numpy as np \r\nimport matplotlib.pyplot as plt \r\ndf_all_states=pd.read_csv('E:\\csvdhf5xlsxurlallfiles/2008_all_states.csv')\r\nprint(df_all_states.columns)\r\n_=plt.plot(df_all_states['total_votes']/1000, df_all_states['dem_share'], marker='.', linestyle='none')\r\n_=plt.xlabel('total votes(thousands)')\r\n_=plt.ylabel('present of vote for obama')\r\nplt.show()\r\n#covariance - a measure of how quantities vary together. covarience=1/n(sum(Xi-Xmean).(Yi-Ymean))\r\n#r= correlation = covarience/(std of x)(std of y)\r\ncovarience_matrix=np.cov(df_all_states['total_votes'], df_all_states['dem_share'])\r\nprint(covarience_matrix)\r\ncov = covarience_matrix[0,1]\r\nprint(cov)\r\ndef pearson_r(x,y):\r\n\tcorr_mat=np.corroeff(x,y)\r\n\treturn corr_mat[0,1]\r\nr=pearson_r(df_all_states['total_votes'], df_all_states['dem_share'])\r\nprint(r)\r\n","sub_path":"covariance and pearson correlation.py","file_name":"covariance and pearson correlation.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"559254899","text":"import time\ntime.sleep(30)\nimport json\nimport datetime\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom Adafruit_IO import Client, Feed, Data, RequestError\n\nimport PMS5003\nimport airnow\n\n\n#get key stored separately for secrecy.\nwith open('aio_config.json') as jsonfile:\n aio_config = json.load(jsonfile)\nADAFRUIT_IO_KEY = aio_config[\"aio_key\"]\nADAFRUIT_IO_USERNAME = aio_config[\"aio_username\"]\n\nwith open('airnow_config.json') as jsonfile:\n airnow_config = json.load(jsonfile)\nAIRNOW_KEY = airnow_config[\"key\"]\n\n# Create an instance of the REST client.\naio = Client(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)\ntry:\n PM25 = aio.feeds('pm2-dot-5')\nexcept RequestError:\n feed1 = Feed(name=\"pm2-dot-5\")\n PM25 = aio.create_feed(feed1)\n\ntry:\n AQI25 = aio.feeds('aqi-pm2-dot-5')\nexcept RequestError:\n feed2 = Feed(name=\"aqi-pm2-dot-5\")\n PM25 = aio.create_feed(feed2)\n\ntry:\n AQI_outside = aio.feeds('aqi-outside')\nexcept RequestError:\n feed3 = Feed(name=\"aqi-outside\")\n AQI_outside = aio.create_feed(feed3)\n\n# Create instance of PMS5003 object\naq_sensor = PMS5003.PMS5003(serial_terminal=\"/dev/serial0\") \n\n# Create instance of airnow object\nairnow_api = airnow.airnow(api_key=AIRNOW_KEY) \n\n\n# Function for cron scheduler\ndef post_data():\n aq_sensor.read()\n aio.append(PM25.key, aq_sensor.pm25_standard)\n aio.append(AQI25.key ,aq_sensor.aqi_pm25)\n airnow_api.read()\n aio.append(AQI_outside.key, airnow_api.aqi_pm25)\n\n#post_data()\n\n# Cron like python scheduler\nsched = BlockingScheduler()\nsched.add_job(post_data,'cron',year='*',month='*',day='*',week='*',day_of_week='*',hour='*', minute='0/3', second='0')\nsched.start()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"486221580","text":"# -*- coding: utf-8 -*-\n\n\nBOT_NAME = 'doubanmovie'\n\nSPIDER_MODULES = ['doubanmovie.spiders']\nNEWSPIDER_MODULE = 'doubanmovie.spiders'\n\nimport sys\nimport os\nfrom os.path import dirname\npath=dirname(dirname(os.path.abspath(os.path.dirname(__file__))))\nsys.path.append(path)\n\nfrom misc.log import *\n\n\n\n\nDOWNLOADER_MIDDLEWARES = {\n 'misc.middleware.CustomUserAgentMiddleware': 401,\n 'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware' : None, \n}\n\nITEM_PIPELINES={\n 'doubanmovie.pipelines.MySQLStorePipeline':302,\n}\n\nLOG_LEVEL='INFO'\nDOWNLOAD_DELAY=1","sub_path":"doubanmovie/doubanmovie/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"146293415","text":"from __future__ import print_function\n\nimport numpy as np\nimport pandas as pd\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers.core import Activation, Dropout\nfrom keras.models import Sequential\n\n# Determine how long names are\nmaxlen = 30\nlabels = 2\n\n# Import csv file\ninput = pd.read_csv(\"origin_in.csv\")\n # input.columns = ['name', 'n_or_f', 'namelen']\n\n# Shows the break down of native and foreign \ninput.groupby('n_or_f')['name'].count()\n\n# Extract columns and Create Dictionary with Character & Index\nnames = input['name']\norigin = input['n_or_f']\nvocab = set(' '.join([str(i) for i in names]))\nvocab.add('END')\nlen_vocab = len(vocab)\n#print(len_vocab)\nchar_index = dict((c, i) for i, c in enumerate(vocab))\nprint(char_index)\nchar_df = pd.DataFrame.from_dict(char_index, orient = 'index')\nchar_df.to_csv(\"char_index.csv\")\n# Randomly break the data into training and testing\nmsk = np.random.rand(len(input)) < 0.8\n #print(\"msk : %s\" %msk)\ntrain = input[msk]\ntest = input[~msk]\n\n# Tag if a name is native or foreign\ndef tag_origin(n_or_f):\n result = []\n for elem in n_or_f:\n if elem == 'n':\n result.append([1, 0])\n else:\n result.append([0, 1])\n return result\n\n# Create an array of arrays (matrix) of names represented based on index of each character\ndef name_matrix(trunc_name_input, char_index_input, maxlen_input):\n result = []\n for i in trunc_name_input:\n tmp = [set_flag(char_index_input[j]) for j in str(i)]\n for k in range(0, maxlen_input - len(str(i))):\n tmp.append(set_flag(char_index_input[\"END\"]))\n result.append(tmp)\n return result\n\n# Within a zeros vector, change 0 to 1 if a character matches \ndef set_flag(i):\n tmp = np.zeros(56)\n tmp[i] = 1\n return tmp\n\n# Run name_matrix and tag_origin function on both training and testing data\ntrunc_train_name = [str(i)[0:maxlen] for i in train.name]\ntrain_X = name_matrix(trunc_train_name, char_index, maxlen)\ntrain_Y = tag_origin(train.n_or_f)\n\ntrunc_test_name = [str(i)[0:maxlen] for i in test.name]\n#print(trunc_test_name)\ntest_X = name_matrix(trunc_test_name, char_index, maxlen)\ntest_Y = tag_origin(test.n_or_f)\n#print(test_X)\n\n# Build a machine learning model (LSTM architecture)\nmodel = Sequential()\n #model.add(LSTM(32, return_sequences=True, input_shape=(maxlen, len_vocab)))\nmodel.add(LSTM(32, return_sequences=True, input_shape=(maxlen, 56)))\nmodel.add(Dropout(0.15))\nmodel.add(LSTM(32, return_sequences=False))\nmodel.add(Dropout(0.15))\nmodel.add(Dense(2))\n\n #model.add(Flatten())\nmodel.add(Activation('softmax'))\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n #batch_size = 182415\nbatch_size = 32\n #model.fit(train_X, train_Y, batch_size= batch_size , epochs=10, validation_data=(test_X, test_Y))\nmodel.fit(np.array(train_X), np.array(train_Y), batch_size= batch_size , epochs=16, validation_data=(np.array(test_X), np.array(test_Y)))\n\n# Run predictions on given array of numbers\nkorean_name = [\"miyoun song\",\"hyunjoon park\", \"lee seungro\", \"park jaeho\", \"kim seonho\", \"lee mingeun\", \"namgung hwangsil\", \"kim bomi\", \"moon jaein\", \"lee myungbak\"]\nenglish_name = [\"smith alice\", \"disney walt\", \"jobs steve\", \"trump donald\", \"hernandez maria\", \"rogers steve\", \"rodrigues maria\",\"mandela nelson\", \"king luther\", \"downey robert\"]\ntrunc_name_kor = [i[0:maxlen] for i in korean_name]\ntrunc_name_eng = [i[0:maxlen] for i in english_name]\nX_kor = name_matrix(trunc_name_kor, char_index, maxlen)\nX_eng = name_matrix(trunc_name_eng, char_index, maxlen)\npred_kor = model.predict(np.asarray(X_kor))\nprint(\"pred_kor is:\")\nprint(pred_kor)\npred_eng = model.predict(np.asarray(X_eng))\nprint(\"pred_eng is:\")\nprint(pred_eng)\n\n# Print out test score and test accuracy of the model\nscore, acc = model.evaluate(np.array(test_X), np.array(test_Y))\nprint('Test score:', score)\nprint('Test accuracy:', acc)\n\n# Save our model and data\nmodel.save_weights('origin_model.h5', overwrite=True)\nmodel.save('origin_model_entire.h5')\ntrain.to_csv(\"train_split.csv\")\ntest.to_csv(\"test_split.csv\")\n\n# Perform prediction on test for evaluation\nevals = model.predict(np.array(test_X))\nprob_n = [i[0] for i in evals]\n\n# Create dataframe to output the results of evaluation\nout = pd.DataFrame(prob_n)\nout['name'] = test.name.reset_index()['name']\nout['n_or_f'] = test.n_or_f.reset_index()['n_or_f']\n\nout.head(10)\nout.columns = ['prob_n', 'name', 'actual']\nout.head(10)\nout.to_csv(\"origin_pred_out.csv\")","sub_path":"trial_14/name_classification.py","file_name":"name_classification.py","file_ext":"py","file_size_in_byte":4511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"125476927","text":"import sys\nimport torch\nimport numpy as np\nfrom IPython.display import display, HTML\nfrom . import Parameter, Mean, Kernel, config\nfrom functools import reduce\nimport operator\n\ndef prod(iterable):\n return reduce(operator.mul, iterable, 1)\n\nclass CholeskyException(Exception):\n def __init__(self, message, K, model):\n self.message = message\n self.K = K\n self.model = model\n\n def __str__(self):\n return self.message\n\nclass Model:\n def __init__(self, kernel, X, y, mean=None, name=None):\n if not issubclass(type(kernel), Kernel):\n raise ValueError(\"kernel must derive from mogptk.gpr.Kernel\")\n X, y = self._check_input(X, y)\n if mean is not None:\n if not issubclass(type(mean), Mean):\n raise ValueError(\"mean must derive from mogptk.gpr.Mean\")\n mu = mean(X).reshape(-1,1)\n if mu.shape != y.shape:\n raise ValueError(\"mean and y data must match shapes: %s != %s\" % (mu.shape, y.shape))\n\n self.kernel = kernel\n self.X = X\n self.y = y\n self.mean = mean\n self.name = name\n self.input_dims = X.shape[1]\n\n self._params = []\n self._param_names = []\n self._register_parameters(kernel)\n if mean is not None and issubclass(type(mean), Mean):\n self._register_parameters(mean)\n\n def __setattr__(self, name, val):\n if hasattr(self, name) and isinstance(getattr(self, name), Parameter):\n raise AttributeError(\"parameter is read-only, use Parameter.assign()\")\n if isinstance(val, Parameter) and val.name is None:\n val.name = name\n super(Model,self).__setattr__(name, val) \n\n def _check_input(self, X, y=None):\n if not isinstance(X, torch.Tensor):\n X = torch.tensor(X, device=config.device, dtype=config.dtype)\n else:\n X = X.to(config.device, config.dtype)\n if len(X.shape) == 1:\n X = X.reshape(-1,1)\n if len(X.shape) != 2:\n raise ValueError(\"X must have dimensions (data_points,input_dims) with input_dims optional\")\n if X.shape[0] == 0 or X.shape[1] == 0:\n raise ValueError(\"X must not be empty\")\n\n if y is not None:\n if not isinstance(y, torch.Tensor):\n y = torch.tensor(y, device=config.device, dtype=config.dtype)\n else:\n y = y.to(config.device, config.dtype)\n if len(y.shape) == 1:\n y = y.reshape(-1,1)\n if len(y.shape) != 2 or y.shape[1] != 1:\n raise ValueError(\"y must have one dimension (data_points,)\")\n if X.shape[0] != y.shape[0]:\n raise ValueError(\"number of data points for X and y must match\")\n return X, y\n else:\n # X is for prediction\n if X.shape[1] != self.input_dims:\n raise ValueError(\"X must have %s input dimensions\" % self.input_dims)\n return X\n\n def _register_parameters(self, obj, name=None):\n if isinstance(obj, Parameter):\n if obj.name is not None:\n if name is None:\n name = obj.name\n else:\n name += \".\" + obj.name\n elif name is None:\n name = \"\"\n self._params.append(obj)\n self._param_names.append(name)\n elif isinstance(obj, list):\n for i, v in enumerate(obj):\n self._register_parameters(v, (name if name is not None else \"\")+\"[\"+str(i)+\"]\")\n elif issubclass(type(obj), Kernel) or issubclass(type(obj), Mean):\n for v in obj.__dict__.values():\n self._register_parameters(v, (name+\".\" if name is not None else \"\")+obj.name)\n\n def zero_grad(self):\n for p in self._params:\n p = p.unconstrained\n if p.grad is not None:\n if p.grad.grad_fn is not None:\n p.grad.detach_()\n else:\n p.grad.requires_grad_(False)\n p.grad.zero_()\n\n def parameters(self):\n for p in self._params:\n if p.trainable:\n yield p.unconstrained\n \n def print_parameters(self, file=None):\n def param_range(lower, upper, trainable=True):\n if lower is not None:\n if prod(lower.shape) == 1:\n lower = lower.item()\n else:\n lower = lower.tolist()\n if upper is not None:\n if prod(upper.shape) == 1:\n upper = upper.item()\n else:\n upper = upper.tolist()\n\n if not trainable:\n return \"fixed\"\n if lower is None and upper is None:\n return \"(-∞, ∞)\"\n elif lower is None:\n return \"(-∞, %s]\" % upper\n elif upper is None:\n return \"[%s, ∞)\" % lower\n return \"[%s, %s]\" % (lower, upper)\n\n if file is None:\n try:\n get_ipython # fails if we're not in a notebook\n table = ''\n for name, p in zip(self._param_names, self._params):\n table += '' % (name, param_range(p.lower, p.upper, p.trainable), p.numpy())\n table += '
NameRangeValue
%s%s%s
'\n display(HTML(table))\n return\n except Exception as e:\n pass\n\n vals = [[\"Name\", \"Range\", \"Value\"]]\n for name, p in zip(self._param_names, self._params):\n vals.append([name, param_range(p.lower, p.upper, p.trainable), str(p.numpy())])\n\n nameWidth = max([len(val[0]) for val in vals])\n rangeWidth = max([len(val[1]) for val in vals])\n for val in vals:\n print(\"%-*s %-*s %s\" % (nameWidth, val[0], rangeWidth, val[1], val[2]), file=file)\n\n def _cholesky(self, K):\n try:\n return torch.linalg.cholesky(K)\n except RuntimeError as e:\n print(\"ERROR:\", e.args[0], file=sys.__stdout__)\n print(\"K =\", K, file=sys.__stdout__)\n if K.isnan().any():\n print(\"Kernel matrix has NaNs!\", file=sys.__stdout__)\n if K.isinf().any():\n print(\"Kernel matrix has infinities!\", file=sys.__stdout__)\n print(\"Parameters:\", file=sys.__stdout__)\n self.print_parameters(file=sys.__stdout__)\n raise CholeskyException(e.args[0], K, self)\n\n def log_marginal_likelihood(self):\n raise NotImplementedError()\n\n def log_prior(self):\n return sum([p.log_prior() for p in self._params])\n\n def loss(self):\n self.zero_grad()\n loss = -self.log_marginal_likelihood() - self.log_prior()\n loss.backward()\n return loss\n\n def K(self, X1, X2=None):\n with torch.no_grad():\n X1 = self._check_input(X1) # MxD\n if X2 is not None:\n X2 = self._check_input(X2) # MxD\n return self.kernel(X1,X2).cpu().numpy()\n\n def sample(self, Z, n=None):\n with torch.no_grad():\n S = n\n if n is None:\n S = 1\n\n mu, var = self.predict(Z, full=True, tensor=True) # MxD and MxMxD\n u = torch.normal(\n torch.zeros(Z.shape[0], S, device=config.device, dtype=config.dtype),\n torch.tensor(1.0, device=config.device, dtype=config.dtype)) # MxS\n L = torch.linalg.cholesky(var + 1e-6*torch.ones(Z.shape[0]).diagflat()) # MxM\n samples = mu + L.mm(u) # MxS\n if n is None:\n samples = samples.squeeze()\n return samples.cpu().numpy()\n\nclass GPR(Model):\n def __init__(self, kernel, X, y, noise=1.0, mean=None, name=\"GPR\"):\n super(GPR, self).__init__(kernel, X, y, mean, name)\n\n self.log_marginal_likelihood_constant = 0.5*X.shape[0]*np.log(2.0*np.pi)\n\n self.noise = Parameter(noise, name=\"noise\", lower=config.positive_minimum)\n self.eye = torch.eye(self.X.shape[0], device=config.device, dtype=config.dtype)\n self._register_parameters(self.noise)\n\n def log_marginal_likelihood(self):\n K = self.kernel(self.X) + self.noise()*self.eye # NxN\n L = self._cholesky(K) # NxN\n\n if self.mean is not None:\n y = self.y - self.mean(self.X).reshape(-1,1) # Nx1\n else:\n y = self.y # Nx1\n\n p = -0.5*y.T.mm(torch.cholesky_solve(y,L)).squeeze()\n p -= L.diagonal().log().sum()\n p -= self.log_marginal_likelihood_constant\n return p#/self.X.shape[0] # dividing by the number of data points normalizes the learning rate\n\n def predict(self, Z, full=False, tensor=False):\n with torch.no_grad():\n Z = self._check_input(Z) # MxD\n\n K = self.kernel(self.X) + self.noise()*self.eye # NxN\n Ks = self.kernel(self.X,Z) # NxM\n Kss = self.kernel(Z) + self.noise()*torch.eye(Z.shape[0], device=config.device, dtype=config.dtype) # MxM\n\n L = self._cholesky(K) # NxN\n v = torch.triangular_solve(Ks,L,upper=False)[0] # NxM\n\n if self.mean is not None:\n y = self.y - self.mean(self.X).reshape(-1,1) # Nx1\n mu = Ks.T.mm(torch.cholesky_solve(y,L)) # Mx1\n mu += self.mean(Z).reshape(-1,1) # Mx1\n else:\n mu = Ks.T.mm(torch.cholesky_solve(self.y,L)) # Mx1\n\n var = Kss - v.T.mm(v) # MxM\n if not full:\n var = var.diag().reshape(-1,1) # Mx1\n if tensor:\n return mu, var\n else:\n return mu.cpu().numpy(), var.cpu().numpy()\n","sub_path":"mogptk/gpr/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"251926150","text":"#!/usr/bin/env python\n# coding=utf-8\nfrom __future__ import division, print_function, unicode_literals\nimport ast\nfrom collections import OrderedDict\nimport re\nimport textwrap\nimport sys\n\nfrom docopt import docopt\n\nfrom sacred.commands import help_for_command\nfrom sacred.observers import MongoObserver\nfrom sacred.utils import set_by_dotted_path\n\n__sacred__ = True # marks files that should be filtered from stack traces\n\n__all__ = ('parse_args', 'get_config_updates', 'get_observers')\n\n\nUSAGE_TEMPLATE = \"\"\"Usage:\n {program_name} [(with UPDATE...)] [-m DB] [-l LEVEL] [-d]\n {program_name} help [COMMAND]\n {program_name} (-h | --help)\n {program_name} COMMAND [(with UPDATE...)] [-m DB] [-l LEVEL] [-d]\n\n{description}\n\nOptions:\n -h --help Print this help message and exit\n -m DB --mongo_db=DB Add a MongoDB Observer to the experiment\n -l LEVEL --logging=LEVEL Adjust the loglevel\n -d --debug Don't filter the stacktrace and automatically enter\n post-mortem debugging with pdb\n\nArguments:\n DB Database specification. Can be [host:port:]db_name[.prefix]\n UPDATE Configuration assignments of the form foo.bar=17\n COMMAND Custom command to run\n LEVEL Loglevel either as 0 - 50 or as string:\n DEBUG(10), INFO(20), WARNING(30), ERROR(40), CRITICAL(50)\n\"\"\"\n\n\nDB_NAME_PATTERN = r\"[_A-Za-z][0-9A-Za-z!#%&'()+\\-;=@\\[\\]^_{}.]{0,63}\"\nHOSTNAME_PATTERN = \\\n r\"(?=.{1,255}$)\"\\\n r\"[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?\"\\\n r\"(?:\\.[0-9A-Za-z](?:(?:[0-9A-Za-z]|-){0,61}[0-9A-Za-z])?)*\"\\\n r\"\\.?\"\nURL_PATTERN = \"(?:\" + HOSTNAME_PATTERN + \")\" + \":\" + \"(?:[0-9]{1,5})\"\n\nDB_NAME = re.compile(\"^\" + DB_NAME_PATTERN + \"$\")\nURL = re.compile(\"^\" + URL_PATTERN + \"$\")\nURL_DB_NAME = re.compile(\"^(?P\" + URL_PATTERN + \")\" + \":\" +\n \"(?P\" + DB_NAME_PATTERN + \")$\")\n\n\ndef parse_args(argv, description=\"\", commands=None, print_help=True):\n usage = _format_usage(argv[0], description, commands)\n args = docopt(usage, [str(a) for a in argv[1:]], help=print_help)\n if not args['help'] or not print_help:\n return args\n\n if args['COMMAND'] is None:\n print(usage)\n sys.exit()\n else:\n print(help_for_command(commands[args['COMMAND']]))\n sys.exit()\n\n\ndef get_config_updates(updates):\n config_updates = {}\n named_configs = []\n if not updates:\n return config_updates, named_configs\n for upd in updates:\n if upd == '':\n continue\n path, sep, value = upd.partition('=')\n if sep == '=':\n path = path.strip() # get rid of surrounding whitespace\n value = value.strip() # get rid of surrounding whitespace\n set_by_dotted_path(config_updates, path, _convert_value(value))\n else:\n named_configs.append(path)\n return config_updates, named_configs\n\n\ndef get_observers(args):\n observers = []\n if args['--mongo_db']:\n url, db_name, prefix = _parse_mongo_db_arg(args['--mongo_db'])\n if prefix:\n mongo = MongoObserver.create(db_name=db_name, url=url,\n prefix=prefix)\n else:\n mongo = MongoObserver.create(db_name=db_name, url=url)\n\n observers.append(mongo)\n\n return observers\n\n\ndef _format_usage(program_name, description, commands=None):\n usage = USAGE_TEMPLATE.format(\n program_name=program_name,\n description=description.strip() if description else '')\n\n if commands:\n usage += \"\\nCommands:\\n\"\n cmd_len = max([len(c) for c in commands] + [8])\n command_doc = OrderedDict(\n [(cmd_name, _get_first_line_of_docstring(cmd_doc))\n for cmd_name, cmd_doc in commands.items()])\n for cmd_name, cmd_doc in command_doc.items():\n usage += (\" {:%d} {}\\n\" % cmd_len).format(cmd_name, cmd_doc)\n return usage\n\n\ndef _get_first_line_of_docstring(func):\n return textwrap.dedent(func.__doc__ or \"\").strip().split('\\n')[0]\n\n\ndef _convert_value(value):\n try:\n return ast.literal_eval(value)\n except (ValueError, SyntaxError):\n # use as string if nothing else worked\n return value\n\n\ndef _parse_mongo_db_arg(mongo_db):\n if DB_NAME.match(mongo_db):\n db_name, _, prefix = mongo_db.partition('.')\n return 'localhost:27017', db_name, prefix\n elif URL.match(mongo_db):\n return mongo_db, 'sacred', ''\n elif URL_DB_NAME.match(mongo_db):\n match = URL_DB_NAME.match(mongo_db)\n db_name, _, prefix = match.group('db_name').partition('.')\n return match.group('url'), db_name, prefix\n else:\n raise ValueError('mongo_db argument must have the form \"db_name\" or '\n '\"host:port[:db_name]\" but was %s' % mongo_db)\n","sub_path":"sacred/arg_parser.py","file_name":"arg_parser.py","file_ext":"py","file_size_in_byte":4836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"305076843","text":"#!/usr/bin/env python3\nfrom tkinter import *\nimport csv\nimport flappy\n\n\nclass hiscore:\n def __init__(self, master):\n self.master = master\n #makes the gui\n\n\n topframe = Frame(master)\n topframe.pack()\n\n botframe = Frame(master)\n botframe.pack()\n\n\n self.tryagainbutton = Button(botframe, text='Try again', command=self.try_again)\n self.tryagainbutton.grid()\n self.exitbutton = Button(botframe, text = 'Exit' , command=self.exit)\n self.exitbutton.grid(row = 0, column = 1)\n\n v = IntVar()\n v.set(2)\n self.label = Label(topframe, text='Sort by:')\n self.label.grid(row = 0, column = 0)\n\n\n\n with open('scores.csv') as csvfile:\n self.data = list(csv.reader(csvfile))\n \n def sort_name():\n\n self.listbox.delete(0,END)\n print('sortname')\n newlist = self.data\n sortedbyname = sorted(newlist, key=lambda x: x[0])\n print(sortedbyname)\n\n for line in sortedbyname:\n # distance = longestnamelength - len(line[0]) + 2\n self.listbox.insert(END, line[0], line[2], '-' * 20, '')\n def sort_score():\n self.listbox.delete(0,END)\n print('sortscore')\n newlist = self.data\n sortedbyscore = sorted(newlist,key=lambda x: int(x[2]), reverse = True)\n print(sortedbyscore)\n #longestname = max(self.data, key=lambda x: len(x[2]))\n #longestnamelength = len(longestname[0])\n\n for line in sortedbyscore:\n #distance = longestnamelength - len(line[0]) + 2\n self.listbox.insert(END, line[0], line[2], '-'*20, '')\n\n def get_score(person, score):\n high = '0'\n for line in self.data:\n if flappy.theperson == line[0]:\n if flappy.totalscore > int(line[2]):\n high = flappy.totalscore\n line[2] = high\n with open('scores.csv', 'w', newline='') as csvfile:\n csv.writer(csvfile).writerows(self.data)\n else: high = int(line[2])\n\n\n newframe = Frame(master)\n newframe.pack()\n self.score = score\n self.person = person\n\n self.scorelabel = Label(newframe, text='Hello, %s! \\n Your score that round was: %s \\n Your all-time high is: %s' %(self.person, self.score, high))\n self.scorelabel.grid()\n\n\n get_score(flappy.theperson,flappy.totalscore)\n\n self.rd1 = Radiobutton(topframe, text='Name', variable = v, value = 1, command=sort_name)\n self.rd2 = Radiobutton(topframe, text='Score', variable = v, value =2, command=sort_score)\n\n self.rd1.grid(row=0, column=1, sticky =W)\n self.rd2.grid(row=0,column=2, sticky =W)\n\n\n\n scrollbar = Scrollbar(topframe, orient=VERTICAL)\n self.listbox = Listbox(topframe)\n self.listbox.config(yscrollcommand=scrollbar.set)\n scrollbar.config(command=self.listbox.yview)\n scrollbar.grid(row=1, column=3)\n self.listbox.grid(row=1,columnspan=2)\n self.rd2.invoke()\n \n \n \n\n\n def try_again(self):\n self.master.destroy()\n flappy.main()\n def exit(self):\n self.master.destroy()\n\n\n\ndef main():\n root = Tk()\n a = hiscore(root)\n\n root.title('Highscores of Flappy Bird')\n root.mainloop()\n\n","sub_path":"hiscores.py","file_name":"hiscores.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"535761166","text":"# Copyright 2020 The PyMC Developers\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport aesara.tensor as at\n\nfrom aeppl.transforms import (\n CircularTransform,\n IntervalTransform,\n LogOddsTransform,\n LogTransform,\n RVTransform,\n Simplex,\n)\nfrom aesara.tensor.subtensor import advanced_set_subtensor1\n\n__all__ = [\n \"RVTransform\",\n \"simplex\",\n \"logodds\",\n \"interval\",\n \"log_exp_m1\",\n \"ordered\",\n \"log\",\n \"sum_to_1\",\n \"circular\",\n \"CholeskyCovPacked\",\n \"Chain\",\n]\n\n\nclass LogExpM1(RVTransform):\n name = \"log_exp_m1\"\n\n def backward(self, value, *inputs):\n return at.softplus(value)\n\n def forward(self, value, *inputs):\n \"\"\"Inverse operation of softplus.\n\n y = Log(Exp(x) - 1)\n = Log(1 - Exp(-x)) + x\n \"\"\"\n return at.log(1.0 - at.exp(-value)) + value\n\n def log_jac_det(self, value, *inputs):\n return -at.softplus(-value)\n\n\nclass Ordered(RVTransform):\n name = \"ordered\"\n\n def backward(self, value, *inputs):\n x = at.zeros(value.shape)\n x = at.inc_subtensor(x[..., 0], value[..., 0])\n x = at.inc_subtensor(x[..., 1:], at.exp(value[..., 1:]))\n return at.cumsum(x, axis=-1)\n\n def forward(self, value, *inputs):\n y = at.zeros(value.shape)\n y = at.inc_subtensor(y[..., 0], value[..., 0])\n y = at.inc_subtensor(y[..., 1:], at.log(value[..., 1:] - value[..., :-1]))\n return y\n\n def log_jac_det(self, value, *inputs):\n return at.sum(value[..., 1:], axis=-1)\n\n\nclass SumTo1(RVTransform):\n \"\"\"\n Transforms K - 1 dimensional simplex space (k values in [0,1] and that sum to 1) to a K - 1 vector of values in [0,1]\n This Transformation operates on the last dimension of the input tensor.\n \"\"\"\n\n name = \"sumto1\"\n\n def backward(self, value, *inputs):\n remaining = 1 - at.sum(value[..., :], axis=-1, keepdims=True)\n return at.concatenate([value[..., :], remaining], axis=-1)\n\n def forward(self, value, *inputs):\n return value[..., :-1]\n\n def log_jac_det(self, value, *inputs):\n y = at.zeros(value.shape)\n return at.sum(y, axis=-1)\n\n\nclass CholeskyCovPacked(RVTransform):\n name = \"cholesky-cov-packed\"\n\n def __init__(self, param_extract_fn):\n self.param_extract_fn = param_extract_fn\n\n def backward(self, value, *inputs):\n diag_idxs = self.param_extract_fn(inputs)\n return advanced_set_subtensor1(value, at.exp(value[diag_idxs]), diag_idxs)\n\n def forward(self, value, *inputs):\n diag_idxs = self.param_extract_fn(inputs)\n return advanced_set_subtensor1(value, at.log(value[diag_idxs]), diag_idxs)\n\n def log_jac_det(self, value, *inputs):\n diag_idxs = self.param_extract_fn(inputs)\n return at.sum(value[diag_idxs])\n\n\nclass Chain(RVTransform):\n\n __slots__ = (\"param_extract_fn\", \"transform_list\", \"name\")\n\n def __init__(self, transform_list):\n self.transform_list = transform_list\n self.name = \"+\".join([transf.name for transf in self.transform_list])\n\n def forward(self, value, *inputs):\n y = value\n for transf in self.transform_list:\n # TODO:Needs proper discussion as to what should be\n # passed as inputs here\n y = transf.forward(y, *inputs)\n return y\n\n def backward(self, value, *inputs):\n x = value\n for transf in reversed(self.transform_list):\n x = transf.backward(x, *inputs)\n return x\n\n def log_jac_det(self, value, *inputs):\n y = at.as_tensor_variable(value)\n det_list = []\n ndim0 = y.ndim\n for transf in reversed(self.transform_list):\n det_ = transf.log_jac_det(y, *inputs)\n det_list.append(det_)\n y = transf.backward(y, *inputs)\n ndim0 = min(ndim0, det_.ndim)\n # match the shape of the smallest log_jac_det\n det = 0.0\n for det_ in det_list:\n if det_.ndim > ndim0:\n det += det_.sum(axis=-1)\n else:\n det += det_\n return det\n\n\nsimplex = Simplex()\nlogodds = LogOddsTransform()\ninterval = IntervalTransform\nlog_exp_m1 = LogExpM1()\nordered = Ordered()\nlog = LogTransform()\nsum_to_1 = SumTo1()\ncircular = CircularTransform()\n","sub_path":"pymc/distributions/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":4805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"25300354","text":"import sys\n\nclass Node:\n def __init__(self):\n self.data = None\n self.next = None\n\nclass ListStack:\n def __init__(self):\n self.head = None\n \n def SIsEmpty(self):\n if self.head is None:\n return True\n else:\n return False\n \n def SPush(self, data):\n newNode = Node()\n\n newNode.data = data\n newNode.next = self.head\n\n self.head = newNode\n\n def SPop(self):\n if self.SIsEmpty() is True:\n print(\"Stack Memory Error!\")\n sys.exit(-1)\n \n rdata = self.head.data\n\n self.head = self.head.next\n\n return rdata\n \n def SPeek(self):\n if self.SIsEmpty() is True:\n print(\"Stack Memory Error!\")\n sys.exit(-1)\n \n return self.head.data\n","sub_path":"codes/InfixToPostfix/Python/ListBaseStackModule.py","file_name":"ListBaseStackModule.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"298320166","text":"import sys\nimport os\nimport numpy as np\nimport scipy.io\nimport cv2\nimport pdb\nimport dataset\nfrom human_eva.actor_parameter import ActorParameter\nfrom human_eva.synchronization_parameter import SynchronizationParameter\nfrom human_eva.motion_capture_data import MotionCaptureData\nfrom human_eva.camera_parameter import CameraParameter\nfrom human_eva.conic_limb_parameter import ConicLimbParameter\nfrom human_eva.limb_length import LimbLength\nfrom human_eva.global_marker_transform import GlobalMarkerTransform\nfrom human_eva.pose_3d import Pose3D\nfrom human_eva.pose_2d import Pose2D\nfrom human_eva.bounding_box import BoundingBox\nDEBUG = True\n## torsoProximal, upperLLegProximal, upperRArmDistall(lowerLArmProximal), lowerRArmDistal(strange, two low): something wrong with them,\n# maybe something wrong with calcuating or synchronization\nif DEBUG:\n def drawSinglePic(frame_tmp, x_2d_t, name, c = (255, 0, 0)):\n frame_tmp = cv2.circle(frame_tmp, tuple(map(int, x_2d_t._data[name])), 3, c, -1)\n cv2.imshow(\"123\", frame_tmp)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n def drawKpts(frame, x_2d_t):\n frame_tmp = frame.copy()\n for idx, joint in enumerate(x_2d_t._data.keys()):\n frame_tmp = cv2.circle(frame_tmp, tuple(map(int, x_2d_t._data[joint])), 3, (255, 0, 0), -1)\n pdb.set_trace()\n return frame_tmp\n\n## HumanEva dataset\n#\n# The loader class of HumanEva dataset : \"L. Sigal, A. O. Balan and M. J. Black. HumanEva: Synchronized Video and Motion Capture Dataset for Evaluation of Articulated Human Motion, International Journal of Computer Vision (IJCV), Volume 87, Number 1-2, pp. 4-27, March, 2010.\"\nclass HumanEva(dataset.Dataset):\n # define const values\n ACTORS = (\"S1\", \"S2\", \"S3\")\n ACTIONS = (\"Box\", \"Gestures\", \"Jog\", \"ThrowCatch\", \"Walking\")\n TRIALS = (1,)\n PARTITION = {\"S1\":{\"Walking\": (590, 1180), \"Jog\": (367, 735), \"ThrowCatch\": (473, 946), \"Gestures\": (395, 790), \"Box\": (385, 770)},\n \"S2\":{\"Walking\": (438, 877), \"Jog\": (398, 796), \"ThrowCatch\": (550, 1101), \"Gestures\": (500, 1000), \"Box\": (382, 765)},\n \"S3\":{\"Walking\": (448, 896), \"Jog\": (401, 803), \"ThrowCatch\": (493, 987), \"Gestures\": (533, 1067), \"Box\": (512, 1024)}}\n START_FRAME = 6\n MAPPING = {\"head\": \"headDistal\",\n \"neck\": \"headProximal\",\n \"thorax\": \"torsoProximal\",\n \"pelvis\": \"torsoDistal\",\n \"l_shoulder\": \"upperLArmProximal\",\n \"l_wrist\": \"lowerLArmDistal\",\n \"r_shoulder\": \"upperRArmProximal\",\n \"r_wrist\": \"lowerRArmDistal\",\n \"l_ankle\": \"lowerLLegDistal\",\n \"r_ankle\": \"lowerRLegDistal\"}\n ## constructor\n # @param input_dirname The directory name of the input HumanEva dataset root\n # @param output_dirname The directory name of the output image root\n def __init__(self, input_dirname=\"E:/HumanEva1\", output_dirname=\"data/images/HumanEva\"):\n super(HumanEva, self).__init__()\n self.__input_dirname = input_dirname\n self.__output_dirname = output_dirname\n ## map HumanEva joint list to DeepPose joint list\n # @param self The object pointer\n # @param pose The 2D/3D poses\n # @return The dictionary of DeepPose style joint\n def __mapJoint(self, pose): # using some joints coords to cal particular joints\n x = {k: None for k in self.JOINTS}\n for name in self.JOINTS:\n # simple name mapping\n if name in self.MAPPING:\n x[name] = pose[self.MAPPING[name]]\n # calculate DeepPose joint pose\n else:\n if \"elbow\" in name:\n if name[0] == \"l\":\n x[name] = (pose[\"upperLArmDistal\"] + pose[\"lowerLArmProximal\"])/2\n else:\n x[name] = (pose[\"upperRArmDistal\"] + pose[\"lowerRArmProximal\"])/2\n elif \"knee\" in name:\n if name[0] == \"l\":\n x[name] = (pose[\"upperLLegDistal\"] + pose[\"lowerLLegProximal\"])/2\n else:\n x[name] = (pose[\"upperRLegDistal\"] + pose[\"lowerRLegProximal\"])/2\n return x\n ## generate the dataset data\n # @param self The object pointer\n # @param avi_file The filename of video data\n # @param png_dir The directory name of output image data\n # @param partition The partition between test data, train data and video end\n # @param cam_param The camera parameter\n # @param mocap The motion capture data\n # @param sync_param The synchronization parameter\n # @param conic_param The conic limb parameter\n # @param log The log\n def __generateData(self, avi_file, png_dir, partition, cam_param, mocap, sync_param, conic_param, log):\n try:\n os.makedirs(png_dir)\n except OSError:\n pass\n image_frame = self.START_FRAME - 1\n # load video\n video = cv2.VideoCapture(avi_file)\n #video.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, image_frame)\n video.set(1, image_frame)\n randomPermutationIdx = np.random.permutation(partition[1]+1)\n while(video.isOpened()):\n if image_frame > partition[1]:\n break\n # logging\n sys.stderr.write(\"{0} frames({1}/{2})\\r\".format(log, image_frame, partition[1]))\n sys.stderr.flush()\n # get current image frame\n ret, frame = video.read()\n if not ret:\n image_frame += 1\n continue\n # calculate mocap frame\n mocap_frame = sync_param.mc_st + (image_frame - sync_param.im_st)*sync_param.mc_sc\n if (not 1 <= mocap_frame <= mocap.marker.shape[0]) or not mocap.isValid(mocap_frame):\n image_frame += 1\n #append zero\n continue\n # compute 3D pose\n length = LimbLength(mocap, mocap_frame)\n Tm = GlobalMarkerTransform(mocap, mocap_frame, conic_param, length)\n x_3d_t = Pose3D(Tm, length)\n x_2d_t = Pose2D(x_3d_t, cam_param)\n # calculate bounding box\n try:\n bb = BoundingBox(x_2d_t)\n except RuntimeError:\n image_frame += 1\n continue\n # modify 2D/3D pose accoring to the bounding box\n #x_3d_t.modify(bb, cam_param)\n #x_2d_t.modify(bb)\n # save image\n filename = os.path.join(png_dir, \"{0}.png\".format(image_frame))\n cv2.imwrite(filename, frame)\n # substitute to class value\n dirIdx = (1 if randomPermutationIdx[image_frame] < partition[0] else 0)\n #pdb.set_trace()\n self._dir[dirIdx][\"images\"].append(filename)\n for i in range(3): #camParam matrix shape (3, 3)\n for j in range(3):\n self._dir[dirIdx][\"camMat\"+str(i)+str(j)].append(cam_param.A[i, j])\n #tmp_x_2d = self.__mapJoint(x_2d_t) # diection ele: matrix shape (2, 1)\n tmp_x_2d = x_2d_ts\n \n if DEBUG:\n img = drawKpts(frame, tmp_x_2d, self.JOINTS)\n cv2.imshow(\"img\", img)\n \n for i in range(len(self.JOINTS)):\n self._dir[dirIdx][\"2DJoint\"+str(i)+\"_u\"].append(tmp_x_2d[self.JOINTS[i]][0, 0])\n self._dir[dirIdx][\"2DJoint\"+str(i)+\"_v\"].append(tmp_x_2d[self.JOINTS[i]][1, 0])\n #tmp_x_3d = self.__mapJoint(x_3d_t) # diection ele: matrix shape (3, 1)\n tmp_x_3d = x_3d_t\n for i in range(len(self.JOINTS)):\n self._dir[dirIdx][\"3DJoint\"+str(i)+\"_x\"].append(tmp_x_3d[self.JOINTS[i]][0, 0])\n self._dir[dirIdx][\"3DJoint\"+str(i)+\"_y\"].append(tmp_x_3d[self.JOINTS[i]][1, 0])\n self._dir[dirIdx][\"3DJoint\"+str(i)+\"_z\"].append(tmp_x_3d[self.JOINTS[i]][2, 0])\n # increment image frame\n image_frame += 1\n # release memory\n video.release()\n ## main method of generating the HumanEva dataset\n \n def _generateSyncFrameData(self, actor, trial, action, actor_param, log):\n \n try:\n mocap = MotionCaptureData(os.path.join(self.__input_dirname, actor, \"Mocap_Data\", \"{0}_{1}.mat\".format(action, trial)))\n except RuntimeError:\n return \n \n avi_file_list = []\n png_dir_list = []\n sync_param_list = []\n cam_param_list = []\n frameIdx_list = []\n video_list = []\n frame_list = []\n mocapIdx_list = []\n x_2d_t_list = []\n bb_list = []\n \n randomPermutationIdx = np.random.permutation(self.PARTITION[actor][action][1]+1)\n partitionIdx = self.PARTITION[actor][action][0]\n conic_param = ConicLimbParameter(mocap, actor_param)\n\n for camIdx, camera in enumerate(self.CAMERAS): \n avi_file_list.append(os.path.join(self.__input_dirname, actor, \"Image_Data\", \"{0}_{1}_({2}).avi\".format(action, trial, camera)))\n png_dir_list.append(os.path.join(self.__output_dirname, actor, action, camera))\n sync_param_list.append(SynchronizationParameter(os.path.join(self.__input_dirname, actor, \\\n \"Sync_Data\", \"{0}_{1}_({2}).ofs\".format(action, trial, camera))))\n cam_param_list.append(CameraParameter(os.path.join(\\\n self.__input_dirname, actor, \"Calibration_Data\", camera + \".cal\")))\n\n frameIdx_list.append(self.START_FRAME - 1)\n video_list.append(cv2.VideoCapture(avi_file_list[camIdx]))\n video_list[camIdx].set(1, frameIdx_list[camIdx])\n frame_list.append(None)\n mocapIdx_list.append(None)\n x_2d_t_list.append(None)\n bb_list.append(None)\n try:\n os.makedirs(png_dir_list[camIdx])\n except OSError:\n pass\n\n refreshFlag = True\n while(video_list[0].isOpened() and video_list[1].isOpened() and video_list[2].isOpened()):\n if frameIdx_list[0] > partitionIdx or frameIdx_list[1] > partitionIdx or frameIdx_list[2] > partitionIdx:\n break\n if refreshFlag:\n for camIdx, camera in enumerate(self.CAMERAS):\n ret, frame_list[camIdx] = video_list[camIdx].read()\n mocapIdx_list[camIdx] = round(sync_param_list[camIdx].mc_st + \\\n (frameIdx_list[camIdx] - sync_param_list[camIdx].im_st)*sync_param_list[camIdx].mc_sc)\n refreshFlag = False\n mocapIdx = min(mocapIdx_list) \n dirIdx = (1 if randomPermutationIdx[frameIdx_list[0]] < partitionIdx else 0)\n sys.stderr.write(\"{0} mocapframes({1}/{2})\\r\".format(log, mocapIdx, mocap.marker.shape[0]))\n sys.stderr.flush()\n pdb.set_trace()\n if DEBUG:\n for camIdx, camera in enumerate(self.CAMERAS):\n ShowFlag = False\n if x_2d_t_list[camIdx] is not None:\n ShowFlag = True\n drawFrame = drawKpts(frame_list[camIdx], x_2d_t_list[camIdx])\n cv2.imshow(\"img\"+camera, drawFrame)\n if ShowFlag:\n cv2.waitKey(0)\n\n if (1 <= mocapIdx <= mocap.marker.shape[0]) and mocap.isValid(mocapIdx):\n length = LimbLength(mocap, mocapIdx)\n Tm = GlobalMarkerTransform(mocap, mocapIdx, conic_param, length)\n x_3d_t = Pose3D(Tm, length)\n for camIdx, camera in enumerate(self.CAMERAS):\n if mocapIdx_list[camIdx] == mocapIdx:\n x_2d_t_list[camIdx] = Pose2D(x_3d_t, cam_param_list[camIdx])\n bb_list[camIdx] = BoundingBox(x_2d_t_list[camIdx])\n filename = os.path.join(png_dir_list[camIdx], \"{0}.png\".format(mocapIdx))\n cv2.imwrite(filename, frame_list[camIdx])\n self._dir[dirIdx][\"Cam\"+str(camIdx)+\"_isValuable\"].append(1)\n self._dir[dirIdx][\"Cam\"+str(camIdx)+\"_images\"].append(filename)\n for i in range(3): #camParam matrix shape (3, 3)\n for j in range(3):\n self._dir[dirIdx][\"Cam\"+str(camIdx)+\"_camMat\"+str(i)+str(j)].append(cam_param_list[camIdx].A[i, j])\n for i in range(4):\n self._dir[dirIdx][\"Cam\"+str(camIdx)+\"_\"+self.BBOX[i]].append(bb_list[camIdx].bb[i])\n tmp_x_2d = self.__mapJoint(x_2d_t_list[camIdx]) # diection ele: matrix shape (2, 1)\n for i in range(len(self.JOINTS)):\n self._dir[dirIdx][\"Cam\"+str(camIdx)+\"_2DJoint\"+str(i)+\"_u\"].append(tmp_x_2d[self.JOINTS[i]][0, 0])\n self._dir[dirIdx][\"Cam\"+str(camIdx)+\"_2DJoint\"+str(i)+\"_v\"].append(tmp_x_2d[self.JOINTS[i]][1, 0])\n frameIdx_list[camIdx] += 1\n ret, frame_list[camIdx] = video_list[camIdx].read()\n mocapIdx_list[camIdx] = round(sync_param_list[camIdx].mc_st + \\\n (frameIdx_list[camIdx] - sync_param_list[camIdx].im_st)*sync_param_list[camIdx].mc_sc)\n \n else:\n self._dir[dirIdx][\"Cam\"+str(camIdx)+\"_isValuable\"].append(0)\n self._dir[dirIdx][\"Cam\"+str(camIdx)+\"_images\"].append(None)\n for i in range(3): #camParam matrix shape (3, 3)\n for j in range(3):\n self._dir[dirIdx][\"Cam\"+str(camIdx)+\"_camMat\"+str(i)+str(j)].append(0.0)\n for i in range(4):\n self._dir[dirIdx][\"Cam\"+str(camIdx)+\"_\"+self.BBOX[i]].append(0.0)\n for i in range(len(self.JOINTS)):\n self._dir[dirIdx][\"Cam\"+str(camIdx)+\"_2DJoint\"+str(i)+\"_u\"].append(0.0)\n self._dir[dirIdx][\"Cam\"+str(camIdx)+\"_2DJoint\"+str(i)+\"_v\"].append(0.0)\n \n tmp_x_3d = self.__mapJoint(x_3d_t) # diection ele: matrix shape (3, 1)\n for i in range(len(self.JOINTS)):\n self._dir[dirIdx][\"3DJoint\"+str(i)+\"_x\"].append(tmp_x_3d[self.JOINTS[i]][0, 0])\n self._dir[dirIdx][\"3DJoint\"+str(i)+\"_y\"].append(tmp_x_3d[self.JOINTS[i]][1, 0])\n self._dir[dirIdx][\"3DJoint\"+str(i)+\"_z\"].append(tmp_x_3d[self.JOINTS[i]][2, 0])\n else:\n for camIdx, camera in enumerate(self.CAMERAS):\n frameIdx_list[camIdx] += 1\n refreshFlag = True\n\n for camIdx, camera in enumerate(self.CAMERAS):\n video_list[camIdx].release()\n\n \n\n\n def main(self):\n # crawl HumanEva dataset\n for a, actor in enumerate(self.ACTORS):\n actor_param = ActorParameter(os.path.join(self.__input_dirname, actor, \"Mocap_Data\", \"{0}.mp\".format(actor)))\n for t, trial in enumerate(self.TRIALS):\n for l, action in enumerate(self.ACTIONS):\n log = \"generating... actors({0}/{1}) trials({2}/{3}) actions({4}/{5})\".format(a, len(self.ACTORS), t, len(self.TRIALS), l, len(self.ACTIONS))\n self._generateSyncFrameData(actor, trial, action, actor_param, log) \n\n sys.stderr.write(\"\\n\")\n self._saveDataset()\n\n\nif __name__ == \"__main__\":\n HumanEva().main()\n","sub_path":"datasets/human_eva.py","file_name":"human_eva.py","file_ext":"py","file_size_in_byte":15677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"385205341","text":"def dict_depth(dic,level = 1):\n print(\"dic = \"+str(dic))\n if not isinstance(dic,dict) or not dic:\n print(\"returning level = \"+str(level))\n return level\n\n for key in dic:\n if isinstance(dic[key],dict):\n print(\"calling = \",dic[key])\n res[0] = max(res[0],dict_depth(dic[key],level+1))\n else:\n res[0] = max(res[0],level)\n\n print(\"returning res[0] = \"+str(res[0]))\n return res[0]\n\nres = [0]\nmyDict = {2:{}}\nprint(\"my dict = \" + str(myDict))\n# myDict = {1:'Geek', 2: {3: {4: {}}}}\nprint(dict_depth(myDict))","sub_path":"src/problems/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"505280666","text":"\ndef get_sorted(arr,n):\n low = 0 # track for 0 position.\n mid = 0 \n high = n-1 # track the 2's position\n\n while mid <= high: # mid should not be greater than high.\n\n if arr[mid] == 0:\n arr[low],arr[mid] = arr[mid],arr[low] # swap 1 present at low position with the current mid.\n low += 1\n mid += 1\n elif arr[mid] == 1:\n mid += 1\n else:\n arr[high],arr[mid] = arr[mid],arr[high]\n high -= 1\n \n return arr \n\ndef main():\n arr = list(map(int,input().split()))\n n = len(arr)\n res = get_sorted(arr,n)\n print(res)\n\nif __name__ == \"__main__\":\n main()\n \n","sub_path":"sort_0_1_2_pointer.py","file_name":"sort_0_1_2_pointer.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"563243265","text":" # -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.font_manager as fm\nimport matplotlib.pyplot as plt\nfrom future.utils import iteritems\nfrom collections import Counter\nfrom sklearn.manifold import TSNE\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport datetime\n\n\n# 꼬꼬마 형태소 분석기\nfrom konlpy.tag import Kkma\nkkma = Kkma()\n\n# Word2Vec 불러오기\nfrom gensim.models.word2vec import Word2Vec\n\nprint('abc')\n# Input data 정하기\n# article_data = pd.read_csv('./Article_세계/Article_세계_202001_202002.csv', encoding='euc-kr', header= None)\n# article_data = pd.read_csv('./Article_IT과학/Article_IT과학_202001_202002.csv', encoding='euc-kr', header= None)\n# article_data = pd.read_csv('./Article_세계/Article_세계_202001_202010.csv', encoding='euc-kr', header= None)\n# article_data = pd.read_csv('./Article_사회_202007_202010.csv', encoding='euc-kr', header= None)\narticle_data = pd.read_csv('../../self_nogada/IT과학/Article_IT과학_10월5주.csv', encoding='euc-kr', header= None)\n\n# 약간의 설명\n# row[0] : 뉴스날짜 row[1] : 뉴스 카테고리 row[2] : 뉴스원본사 row[3] : 뉴스제목 row[4] : 뉴스 내용 row[5] : 네이버뉴스 링크\n# print(article_data.iloc[86]) 그냥 article_data[1][10000]로 출력할 시 결측치 제거한 후 인덱스가 비어버렸기 때문에 키값 에러뜸. 접근 안돼서\n# article_data[1] IT과학 세로로 쭉 / article_data[1][10000] 1만번 째의 뉴스 IT과학 / article_data[3][4] 4번째 뉴스의 뉴스 내용\n\n#뉴스의 개수\nnews_size = article_data[0].size\n# print(\"뉴스 개수\" + str(news_size))\n# print(article_data.iloc[86])\n\n# NAN 결측치 제거 (euc-kr~~~ 로 적힌 잘못된 뉴스정보가 있다면 제거)\narticle_data = article_data.dropna(axis=0)\n\n#결측치 제거 후 뉴스의 개수\nnews_size = article_data[0].size\nprint(\"뉴스 개수\" + str(news_size))\n# print(article_data.iloc[86])\n# print(article_data.iloc[86][3])\n\n#하루 단위로 기사 내용을 stringLine에 다 합치고 list_whenDayChange에 append\nlist_whenDayChange = []\nstringLine = \"\"\nnowDays = 0\n\n# print(article_data.iloc[0][0] + \" \" + article_data.iloc[1][0])\n\n# 오늘 날짜\ntoday = datetime.date.today()\n\n# 11월 1일\nnovember = datetime.date(2020, 11, 1)\n# 10월까지\nnew_year = datetime.date(2020, 1, 1)\n\ndiff = int(str(november - new_year).split(' ')[0])\nprint(str(diff))\n\nnow_day_diff = 0\nfor i in range(0, news_size):\n if now_day_diff >= (diff - 7):\n stringLine += article_data.iloc[i][3]\n if i == (news_size - 1):\n list_whenDayChange.append(stringLine)\n else:\n if article_data.iloc[nowDays][0] == article_data.iloc[i][0]:\n stringLine += \" \" + article_data.iloc[i][3]\n else:\n # print(\"날짜가 달라졌음 \" + str(i))\n # print(\"날짜는 \" + str(article_data.iloc[i][0]))\n now_date = str(article_data.iloc[i][0])\n # print(now_date[0:4] + \" \" + now_date[4:6] + \" \" + now_date[6:8])\n now_day_diff = int(str(datetime.date(int(now_date[0:4]), int(now_date[4:6]), int(now_date[6:8])) - new_year).split(' ')[0])\n # print(now_date + \" >>> \" + str(now_day_diff))\n nowDays = i\n list_whenDayChange.append(stringLine)\n stringLine = \"\"\n\nprint(\"List Size >> \" + str(len(list_whenDayChange)))\n\n#리스트 내 하루단위로 쪼개진 문자열들 확인해보기\n# for i in range(len(list_whenDayChange)):\n# print(list_whenDayChange[i])\n# print(\" \")\n# print(i)\n\n#단어 인덱싱 및 빈도세기\n# as_one = ''\n# for document in list_whenDayChange:\n# as_one = as_one + ' ' + document\n# words = as_one.split()\n# print(words[0:10])\n\n# words = []\n# for i in range(len(list_whenDayChange)):\n# words.append(kkma.nouns(list_whenDayChange[i]))\n#\n# words = [[y for y in x if not len(y) == 1] for x in words]\n# words = [[y for y in x if not y.isdigit()] for x in words]\n#\n# words_sum = ''\n# for i in range(len(words)):\n\n\n#단어들의 연속적인 시리즈로 된 리스트를 입력으로 Counter를 사용하면 각 단어들의 빈도를 dictionary로 반환해 줌\n# counts = Counter(words)\n# print(counts)\n\n#단어빈도(counts.get)를 기준으로 내림차순(reverse=True) 정렬\n# vocab = sorted(counts, key=counts.get, reverse=True)\n# print(vocab)\n\n#단어들에 번호를 매겨 그 번호와 그 단어를 dictionary로 저장 e.g. {단어 : index}\n# word2idx = {word.encode(\"utf8\").decode(\"utf8\"): ii for ii, word in enumerate(vocab,1)}\n# print(word2idx)\n\n#index가 key가 되도록 순서를 바꿈\n# idx2word = {ii: word for ii, word in enumerate(vocab)}\n# print(idx2word)\n\n#Term Frequency\n# V = len(word2idx)\nN = len(list_whenDayChange)\ntf = CountVectorizer()\nonlyLast = [list_whenDayChange[N - 1]]\nX = tf.fit_transform(onlyLast)\nonlyLastByWord = \"\"\n\n# for i in range(0, len(tf.get_feature_names())):\n# if X.toarray()[0][i] >= 7:\n# for j in range(0, int(X.toarray()[0][i] / 7)):\n# onlyLastByWord += tf.get_feature_names()[i] + ' '\n\nfor i in range(0, len(tf.get_feature_names())):\n for j in range(0, X.toarray()[0][i]):\n onlyLastByWord += tf.get_feature_names()[i] + ' '\n\nprint(onlyLastByWord)\nprevious_last_list = list_whenDayChange[N - 1]\nlist_whenDayChange[N-1] = onlyLastByWord\n\nstopwords = ['']\n#TF-IDF\ntfidf = TfidfVectorizer(max_features=5000, max_df=0.95, min_df=0,stop_words=stopwords)\n#generate tf-idf term-document matrix\nA_tfidf_sp = tfidf.fit_transform(list_whenDayChange) #size D x V\n\n#tf-idf dictionary\ntfidf_dict = tfidf.get_feature_names()\n# print(tfidf_dict)\n\n#TF-IDF score Top n개 단어 시각화\ndata_array = A_tfidf_sp.toarray()\ndata = pd.DataFrame(data_array, columns=tfidf_dict)\n# print(data.shape)\nprint(data.tail(1))\n\nfeature_array = np.array(tfidf.get_feature_names())\ntfidf_sorting = np.argsort(A_tfidf_sp[N-1].toarray()).flatten()[::-1]\n\nn = 20\ntop_n = feature_array[tfidf_sorting][:n]\ni = 0\nfor text in top_n:\n top_n[i] = text.upper()\n i = i + 1\n\nprint(top_n)\n\n # 이전 작업 내용\n\n\n# # 단어 인덱싱 및 빈도세기\n# as_one = ''\n# as_one = previous_last_list.split()\n# print(\"우리의 테스트@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\")\n# print(\"바이든이 있다 없다?! >> \" + str(as_one.__contains__('바이든')))\n# print(as_one)\n#\n# list_whenDayChange_split = []\n#\n# for i in range(0, N - 1):\n# # print(str(i) + \"번째 문장!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n# list_whenDayChange_split.append(list_whenDayChange[i].split(' '))\n# list_whenDayChange_split[i].remove('')\n# # print(list_whenDayChange_split[i])\n# list_whenDayChange_split.append(as_one)\n#\n# print(type(as_one))\n# print(type(list_whenDayChange_split))\n# print('flag1')\n# # 모형 구축\n# model = Word2Vec([as_one],\n# sg=1, # Skip-grqm 적용 : 중심 단어로 주변 단어를 예측\n# window=5, # 중심 단어로부터 좌우 5개 단어까지 학습에 적용\n# min_count=1 # 전체문서에서 최소 1회 이상 출현단어로 학습 진행\n# )\n# model.init_sims(replace=True)\n#\n# print('flag2')\n# print(\"바이든과 대선 단어간 유사도는 %.2f 입니다.\" % (model.wv.similarity('바이든', '대선')))\n# print('flag3')\n#\n# for i in range(1, N):\n# print(top_n[0] + \" \" + top_n[i])\n# print(\"%s과 %s 단어간 유사도는 %.2f 입니다.\" % (top_n[0], top_n[i], model.wv.similarity(top_n[0], top_n[i])))\n#\n# vocab_frame = pd.DataFrame(model.wv.most_similar('바이든', topn=10), columns=['단어', '유사도'])\n# print(vocab_frame)\n\n########################\n\n #새 작업\ndataset = []\nfor i in range(len(list_whenDayChange)):\n dataset.append(kkma.nouns(list_whenDayChange[i]))\n# print(dataset[0])\n\ndataset = [[y for y in x if not len(y) == 1] for x in dataset]\ndataset = [[y for y in x if not y.isdigit()] for x in dataset]\n\nmodel = Word2Vec(dataset, sg=1, window=5, min_count=1)\nmodel.init_sims(replace=True)\n\nprint(top_n[0])\nvocab_frame = pd.DataFrame(model.wv.most_similar(top_n[0], topn=2000), columns=['단어', '유사도'])\nprint(vocab_frame.head(10))\n\nvocab_frame = vocab_frame[vocab_frame['단어'].isin(top_n)]\nprint(vocab_frame)\n# for i in range(0,5):\n# print(vocab_frame.loc[i]['단어'])\n\nadd_frame = pd.Series([top_n[0],1], index = ['단어','유사도'])\nvocab_frame = vocab_frame.append(add_frame, ignore_index = True)\n\nvocab_frame.to_csv(\"C:\\ssafy\\project_3\\python_bigData\\datamining\\multi_mining\\IT5.csv\", mode='w', encoding='euc-kr')\n","sub_path":"python/python_bigData/datamining/tfidf-word2vec.py","file_name":"tfidf-word2vec.py","file_ext":"py","file_size_in_byte":8667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"344853715","text":"# -*- coding: utf-8 -*-\n# Author:sen\n# Date:2020/3/10 17:40\n\nimport os\nimport random\nimport shutil\nfrom pprint import pprint\nfrom collections import defaultdict\n\n\ndef cross5(src_dir):\n # 将原始数据打乱并分配到5个文件夹里面\n # src_dir = \"C:\\\\Users\\\\M\\\\Desktop\\\\seen_cross5_origin\\\\pilot101_200\"\n count = 0\n basename = os.path.basename(src_dir)\n # dirs = os.listdir(src_dir)\n for dir_name in os.listdir(src_dir):\n print(dir_name)\n for en_name in os.listdir(os.path.join(src_dir, dir_name)):\n os.rename(os.path.join(src_dir, dir_name, en_name),\n os.path.join(src_dir, dir_name, basename + '_' + dir_name + '_' + en_name)) \n\n\ndef copy_dirs():\n dest_dir = r'C:\\Users\\M\\Desktop\\pilot900_Processed_'\n # shutil.move(r'C:\\Users\\M\\Desktop\\hello', dest_dir)\n \n root_dir= r'C:\\UESTC\\项目-民航语音\\pilot900_Processed'\n for pilot_x in os.listdir(root_dir):\n print(pilot_x)\n pilot_x_path = os.path.join(root_dir, pilot_x)\n for name in os.listdir(pilot_x_path):\n name_path = os.path.join(pilot_x_path, name)\n print(name_path)\n for en_name in os.listdir(name_path):\n en_path = os.path.join(name_path, en_name)\n # print(en_path)\n # shutil.move(en_path, dest_dir)\n \n\ndef shuffle_move():\n src_dir = r'C:\\UESTC\\项目-民航语音\\pilot900_Processed2'\n dest_dir = r'C:\\UESTC\\项目-民航语音\\pilot900_Processed3'\n path = list(os.listdir(src_dir))\n # path = [i for i in range(11276)]\n random.shuffle(path)\n part_cnt = len(path) // 5\n dest_idx = 0\n\n for i in range(4):\n part_path = path[i*part_cnt:(i+1)*(part_cnt)]\n dest_path = os.path.join(dest_dir, 'dataset' + str(i))\n if not os.path.exists(dest_path):\n os.makedirs(dest_path)\n for p in part_path: \n shutil.move(os.path.join(src_dir, p), dest_path)\n print(len(part_path))\n last_path = path[(i+1)*part_cnt:len(path)]\n dest_path = os.path.join(dest_dir, 'dataset' + str(i+1))\n if not os.path.exists(dest_path):\n os.makedirs(dest_path)\n for p in last_path:\n shutil.move(os.path.join(src_dir, p), dest_path)\n print(len(last_path))\n \n \n \n pass\n\n\ndef moveFile():\n src = \"C:\\\\Users\\\\M\\\\Desktop\\\\seen_cross5_origin\\\\pilot101_200\\\\liuzhilin\\\\en001\"\n\n # filenumber = len(pathDir)\n # # rate = 0.1 # 自定义抽取图片的比例,比方说100张抽10张,那就是0.1\n # # picknumber = int(filenumber * rate) # 按照rate比例从文件夹中取一定数量图片\n # picknumber = 50\n # sample = random.sample(pathDir, picknumber) # 随机选取picknumber数量的样本图片\n # print(sample)\n # # for name in sample:\n # # # shutil.move(fileDir + name, tarDir + name)\n # return\n\ndef list_split(n, part, shuffle):\n assert part > 0, 'part should > 0'\n idx = [i for i in range(n)]\n if shuffle:\n random.shuffle(idx)\n parts = []\n part_cnt = n // part\n for i in range(part-1):\n each_part = idx[i*part_cnt:(i+1)*(part_cnt)]\n parts.append(each_part)\n last_part = idx[(i+1)*part_cnt:n]\n parts.append(last_part)\n return parts\n\n\ndef people():\n root = r'C:\\UESTC\\项目-民航语音\\pilot900_Processed2'\n dest_dir = r'C:\\UESTC\\项目-民航语音\\pilot900_Processed_unseen'\n names = defaultdict(list)\n for name in os.listdir(root):\n x = name.split('_')[-2]\n names[x].append(name)\n \n names_key = list(names.keys())\n names_parts = list_split(len(names_key), 5, shuffle=True)\n \n for i, part in enumerate(names_parts):\n print(i, '-' * 30)\n name_part = []\n dest_path = os.path.join(dest_dir, 'dataset' + str(i))\n if not os.path.exists(dest_path):\n os.makedirs(dest_path)\n for idx in part:\n name_part.append(names_key[idx])\n for name in name_part:\n print(name)\n for dir_name in names[name]:\n shutil.move(os.path.join(root, dir_name), dest_path)\n \n\n\nif __name__ == '__main__':\n pass","sub_path":"dataprep/crossDataPrep.py","file_name":"crossDataPrep.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"72383057","text":"print('Welcome to Tic Tac Toe!')\nplayer1 = input('Do you want to be X or O?')\nprint('player1 will go first')\nQuery1 = input('Are you ready to play')\nif Query1.lower() == 'yes':\n while True:\n print(['']*10)\n\n\n\n\nelse:\n quit()","sub_path":"FirstMileStone.py","file_name":"FirstMileStone.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"316756557","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import interp\nimport os\nfrom sklearn import manifold\n\n\ndef plot_embedding(X, labels, p):\n \"\"\"\n Plot embeddings with their lables using TSNE for dimension reduction\n \"\"\"\n tsne = manifold.TSNE(n_components=2, init='pca', random_state=0, perplexity=p)\n X = tsne.fit_transform(X)\n fig_dim = (15.0,12.0)\n plt.figure(figsize=fig_dim)\n num_labs = len(set(labels))\n ax = plt.subplot(111)\n #cm = plt.get_cmap('tab20')\n #ax.set_prop_cycle('color', [cm(1. * i / num_labs) for i in range(num_labs)])\n points_x = list()\n points_y = list()\n for l in range(num_labs):\n points_x.append(list())\n points_y.append(list())\n for i in range(X.shape[0]):\n points_x[labels[i]].append(X[i, 0])\n points_y[labels[i]].append(X[i, 1])\n for i,l in enumerate(range(num_labs)):\n ax.scatter(points_x[i], points_y[i], s=5, label=l, alpha=1)\n\n ax.legend(loc=\"upper right\", prop={'size': 12})\n plt.title(\"Plot of embeddings\")\n ax.set_xlim([X[:,0].min()-0.05, X[:,0].max() + 0.25])\n ax.set_ylim([X[:,1].min()-0.05, X[:,1].max() + 0.25])\n plt.xlabel(\"X\")\n plt.ylabel(\"Y\")\n plt.show()\n","sub_path":"visuals.py","file_name":"visuals.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"636004384","text":"\nfrom odoo import models, fields, api\nfrom odoo.exceptions import ValidationError\n\nclass Book( models.Model) :\n\n _description = \"Book\"\n _name = \"library.book\"\n _order = \"name\"\n\n _sql_constraints = [\n (\"edition_positive_int\", \"CHECK( edition > 0)\", \n \"Edition should be at least the first.\"),\n (\"isbn_valid\", \"CHECK( isbn ~* '^[0-9]{13}$')\", \n \"ISBN should be in standard format.\")\n ]\n\n name = fields.Char( compute = \"_get_book_identification\", readonly = True,\n string = \"Book Identification\")\n title = fields.Char( string = \"Book Title\", required = True)\n editor = fields.Char( string = \"Editor\", default = \"None\")\n edition = fields.Integer( string = \"Edition\", default = 1)\n copy_number = fields.Integer( string = \"Copy Number\", \n compute = \"_get_copy_number\", store = True)\n isbn = fields.Char( string = \"ISBN\", size = 13, required = True)\n times_rented = fields.Integer( string = \"Times Rented\", \n compute = \"_get_times_rented\", readonly = True)\n available = fields.Boolean( string = \"Available\", \n compute = \"_get_availability\", readonly = True)\n\n category_id = fields.Many2one( string = \"Category\", \n comodel_name = \"library.category\", ondelete = \"cascade\", \n required = True)\n\n authors_ids = fields.Many2many( comodel_name = \"library.author\", \n relation = \"authors_book\", column1 = \"book_id\", \n column2 = \"author_id\", string = \"Authors\", required = True,\n copy = True)\n\n rents_ids = fields.One2many( comodel_name = \"library.rent\", \n inverse_name = \"book_id\", copy = False)\n\n def register_new_copy( self) :\n for book in self :\n book.copy()\n\n def get_availability( self) :\n for book in self :\n book._get_availability()\n return book.available\n\n @api.depends( \"title\", \"edition\", \"copy_number\")\n def _get_book_identification( self) :\n for book in self :\n book.name = f\"{ book.title }, Ed. { book.edition } (Copy \"\\\n f\"{ book.copy_number })\"\n\n @api.depends( \"isbn\", \"copy_number\")\n def _get_copy_number( self) :\n for book in self :\n copies = book.search( args = [('isbn', '=', book.isbn)])\n copies_numbers = copies.mapped( \"copy_number\")\n copy_highest = max( copies_numbers) if copies_numbers else 0\n book.copy_number = copy_highest + 1\n\n @api.depends( \"rents_ids\")\n def _get_times_rented( self) :\n for book in self :\n book.times_rented = len( book.rents_ids)\n\n @api.depends( \"rents_ids\")\n def _get_availability( self) :\n for book in self :\n book.available = all( \n status == \"Finalized\" for status in book.rents_ids.mapped( 'status'))\n print( book.name, book.rents_ids.mapped( 'status'))\n\n @api.constrains( \"authors_ids\")\n def _has_book_at_least_one_author( self) :\n for book in self :\n if len( book.authors_ids) == 0 :\n raise ValidationError( \"A book should have at least one author.\")\n\n","sub_path":"models/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"226633587","text":"#!/usr/bin/python3\n\"\"\"\nThis is a process that runs every day to collect\ncard data. It is run with a cron task on an ubuntu server at 6AM.\nIt collects the data by running a series of scrapers, and saves\nthe values into my database file.\nIt also updates the daily watchlist trends.\n\"\"\"\nimport os \nimport project_flask\nimport datetime\nimport time\n\nprint('running dailytask')\nfPath = '/home/timc/flask_project/flask_app/daily.txt'\n# get the date\n\nts = time.time()\ndailyTime = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H-%M')\nprint('test getTime:',dailyTime)\n\nwith open(fPath, 'a') as f:\n#with open('daily.txt', 'a') as f:\n f.write('\\n edited on: ' + dailyTime)\n\n\n# set price scraper\n\ntry:\n os.system(r'python3 /home/timc/flask_project/flask_app/scrapers/setPriceScraper.py')\n with open(fPath, 'a') as f:\n f.write('\\n running set price scraper')\nexcept:\n print('could not run setpricescraper')\n with open(fPath, 'a') as f:\n f.write('\\n setpricescraper didnt run')\n\n# frontpage scraper\ntry:\n os.system(r'python3 /home/timc/flask_project/flask_app/scrapers/frontpagedb.py')\n with open(fPath, 'a') as f:\n f.write('\\n running frontpagedb')\nexcept:\n print('could not run frontpagedb')\n with open(fPath, 'a') as f:\n f.write('\\n frontpagedb didnt run')\n\n# buylist scraper\ntry:\n with open(fPath, 'a') as f:\n f.write('\\n running buylistscraper')\n os.system(r'python3 /home/timc/flask_project/flask_app/scrapers/buylistsetscraper.py')\n\nexcept:\n with open(fPath, 'a') as f:\n f.write('\\n buylist scraper didnt run')\n\n# get the watchlist, return rows\ntry:\n with open(fPath, 'a') as f:\n f.write('\\n running getWatchList')\n rows = project_flask.getWatchList()\n\nexcept:\n print('could not access getwatchlist')\n with open(fPath, 'a') as f:\n f.write('\\n could not access getwatchlist')\n\n# refreshes the watchlist trends\nprint(\"updating watchlist\")\ntry:\n with open(fPath, 'a') as f:\n f.write('\\n processing watchlist updates')\n for row in rows:\n project_flask.updateTrend(row['id'])\n print('updating',row['id'])\n\nexcept:\n print('could not access updateTrend')\n with open(fPath, 'a') as f:\n f.write('\\n could not access updateTrend')\n\nwith open(fPath, 'a') as f:\n f.write('\\n editing done: ' + dailyTime)\n","sub_path":"dailyTask.py","file_name":"dailyTask.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"271221709","text":"#\n# Example file for HelloWorld\n#\ndef inc(a,b=1):\n return(a+b)\n\na=inc(1)\na=inc(a,a)\nprint(a)\n\n# understanding the meaning of __name__ == \"__main__\" logic to make sure the function listed below is called as the main one no matter if there are others in hte whole code\n\ndef main():\n print(\"Hello World!!!\")\n name = input(\"Input your name: \")\n print(\"Welcome to Python programing \" + name + \" you're heading to Success again\")\n\nif __name__ == \"__main__\":\n main()\n \n","sub_path":"Ex_Files_Learning_Python_Upd/Exercise Files/Ch2/helloworld_start.py","file_name":"helloworld_start.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"190130008","text":"# -*- coding: utf-8 -*-\n\n\ndef fn(filename, myreader, info, lines):\n _myreader = myreader.copy()\n\n from features.text.expressions import StartsWith, UnicodeCategories, \\\n UNICODE_CATEGORIES\n X1 = StartsWith().transform(_myreader, lines)\n X2 = UnicodeCategories().transform(_myreader, lines)\n\n names = ['both-start'] + ['both-' + cat for cat in UNICODE_CATEGORIES]\n return ([X1, X2], names)\n","sub_path":"src/features/extract-text-expressions.py","file_name":"extract-text-expressions.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"233325811","text":"import os\nimport ROOT\nZXfolder = os.path.dirname(__file__)\nReducibleBackgroundFile = os.path.join(ZXfolder, \"ReducibleBackgroundAA_2015.C\")\nROOT.gROOT.LoadMacro(ReducibleBackgroundFile+\"+\")\n\ndef setup(production):\n success = ROOT.setup(int(production), ZXfolder)\n if not success:\n raise ValueError(\"Bad production: {}\".format(production))\n\nimport convertTGraphstoTH1Fs\nconvertTGraphstoTH1Fs.convertTGraphstoTH1Fs(os.path.join(ZXfolder, \"FakeRate_SS_2016B.root\"))\n#https://github.com/CJLST/ZZAnalysis/blob/b4cc949af2ca81a9dd2bfeaba47768c7ea0cfd13/AnalysisStep/data/FakeRates/FakeRate_SS_2016D.root\nconvertTGraphstoTH1Fs.convertTGraphstoTH1Fs(os.path.join(ZXfolder, \"FakeRate_SS_2016D.root\"))\n#https://github.com/CJLST/ZZAnalysis/blob/4b0ac1ccda2a60295e8233069dcd1a17802894f7/AnalysisStep/data/FakeRates/FakeRate_SS_2016D.root\nconvertTGraphstoTH1Fs.convertTGraphstoTH1Fs(os.path.join(ZXfolder, \"FakeRate_SS_2016D_12.9fb-1.root\"))\n\nfrom ROOT import CRZLLss, fakeRate13TeV, test_bit\n","sub_path":"helperstuff/ZX/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"7585037","text":"# Copyright 2019 The gRPC Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport logging\nimport unittest\nimport time\nimport gc\n\nimport grpc\nfrom grpc.experimental import aio\nfrom tests_aio.unit._test_base import AioTestBase\nfrom tests.unit.framework.common import test_constants\n\n_SIMPLE_UNARY_UNARY = '/test/SimpleUnaryUnary'\n_BLOCK_FOREVER = '/test/BlockForever'\n_BLOCK_BRIEFLY = '/test/BlockBriefly'\n_UNARY_STREAM_ASYNC_GEN = '/test/UnaryStreamAsyncGen'\n_UNARY_STREAM_READER_WRITER = '/test/UnaryStreamReaderWriter'\n_UNARY_STREAM_EVILLY_MIXED = '/test/UnaryStreamEvillyMixed'\n\n_REQUEST = b'\\x00\\x00\\x00'\n_RESPONSE = b'\\x01\\x01\\x01'\n_NUM_STREAM_RESPONSES = 5\n\n\nclass _GenericHandler(grpc.GenericRpcHandler):\n\n def __init__(self):\n self._called = asyncio.get_event_loop().create_future()\n\n @staticmethod\n async def _unary_unary(unused_request, unused_context):\n return _RESPONSE\n\n async def _block_forever(self, unused_request, unused_context):\n await asyncio.get_event_loop().create_future()\n\n async def _block_briefly(self, unused_request, unused_context):\n await asyncio.sleep(test_constants.SHORT_TIMEOUT / 2)\n return _RESPONSE\n\n async def _unary_stream_async_gen(self, unused_request, unused_context):\n for _ in range(_NUM_STREAM_RESPONSES):\n yield _RESPONSE\n\n async def _unary_stream_reader_writer(self, unused_request, context):\n for _ in range(_NUM_STREAM_RESPONSES):\n await context.write(_RESPONSE)\n\n async def _unary_stream_evilly_mixed(self, unused_request, context):\n yield _RESPONSE\n for _ in range(_NUM_STREAM_RESPONSES - 1):\n await context.write(_RESPONSE)\n\n def service(self, handler_details):\n self._called.set_result(None)\n if handler_details.method == _SIMPLE_UNARY_UNARY:\n return grpc.unary_unary_rpc_method_handler(self._unary_unary)\n if handler_details.method == _BLOCK_FOREVER:\n return grpc.unary_unary_rpc_method_handler(self._block_forever)\n if handler_details.method == _BLOCK_BRIEFLY:\n return grpc.unary_unary_rpc_method_handler(self._block_briefly)\n if handler_details.method == _UNARY_STREAM_ASYNC_GEN:\n return grpc.unary_stream_rpc_method_handler(\n self._unary_stream_async_gen)\n if handler_details.method == _UNARY_STREAM_READER_WRITER:\n return grpc.unary_stream_rpc_method_handler(\n self._unary_stream_reader_writer)\n if handler_details.method == _UNARY_STREAM_EVILLY_MIXED:\n return grpc.unary_stream_rpc_method_handler(\n self._unary_stream_evilly_mixed)\n\n async def wait_for_call(self):\n await self._called\n\n\nasync def _start_test_server():\n server = aio.server()\n port = server.add_insecure_port('[::]:0')\n generic_handler = _GenericHandler()\n server.add_generic_rpc_handlers((generic_handler,))\n await server.start()\n return 'localhost:%d' % port, server, generic_handler\n\n\nclass TestServer(AioTestBase):\n\n async def setUp(self):\n self._server_target, self._server, self._generic_handler = await _start_test_server(\n )\n\n async def tearDown(self):\n await self._server.stop(None)\n\n async def test_unary_unary(self):\n async with aio.insecure_channel(self._server_target) as channel:\n unary_unary_call = channel.unary_unary(_SIMPLE_UNARY_UNARY)\n response = await unary_unary_call(_REQUEST)\n self.assertEqual(response, _RESPONSE)\n\n async def test_unary_stream_async_generator(self):\n async with aio.insecure_channel(self._server_target) as channel:\n unary_stream_call = channel.unary_stream(_UNARY_STREAM_ASYNC_GEN)\n call = unary_stream_call(_REQUEST)\n\n # Expecting the request message to reach server before retriving\n # any responses.\n await asyncio.wait_for(self._generic_handler.wait_for_call(),\n test_constants.SHORT_TIMEOUT)\n\n response_cnt = 0\n async for response in call:\n response_cnt += 1\n self.assertEqual(_RESPONSE, response)\n\n self.assertEqual(_NUM_STREAM_RESPONSES, response_cnt)\n self.assertEqual(await call.code(), grpc.StatusCode.OK)\n\n async def test_unary_stream_reader_writer(self):\n async with aio.insecure_channel(self._server_target) as channel:\n unary_stream_call = channel.unary_stream(\n _UNARY_STREAM_READER_WRITER)\n call = unary_stream_call(_REQUEST)\n\n # Expecting the request message to reach server before retriving\n # any responses.\n await asyncio.wait_for(self._generic_handler.wait_for_call(),\n test_constants.SHORT_TIMEOUT)\n\n for _ in range(_NUM_STREAM_RESPONSES):\n response = await call.read()\n self.assertEqual(_RESPONSE, response)\n\n self.assertEqual(await call.code(), grpc.StatusCode.OK)\n\n async def test_unary_stream_evilly_mixed(self):\n async with aio.insecure_channel(self._server_target) as channel:\n unary_stream_call = channel.unary_stream(_UNARY_STREAM_EVILLY_MIXED)\n call = unary_stream_call(_REQUEST)\n\n # Expecting the request message to reach server before retriving\n # any responses.\n await asyncio.wait_for(self._generic_handler.wait_for_call(),\n test_constants.SHORT_TIMEOUT)\n\n # Uses reader API\n self.assertEqual(_RESPONSE, await call.read())\n\n # Uses async generator API\n response_cnt = 0\n async for response in call:\n response_cnt += 1\n self.assertEqual(_RESPONSE, response)\n\n self.assertEqual(_NUM_STREAM_RESPONSES - 1, response_cnt)\n\n self.assertEqual(await call.code(), grpc.StatusCode.OK)\n\n async def test_shutdown(self):\n await self._server.stop(None)\n # Ensures no SIGSEGV triggered, and ends within timeout.\n\n async def test_shutdown_after_call(self):\n async with aio.insecure_channel(self._server_target) as channel:\n await channel.unary_unary(_SIMPLE_UNARY_UNARY)(_REQUEST)\n\n await self._server.stop(None)\n\n async def test_graceful_shutdown_success(self):\n channel = aio.insecure_channel(self._server_target)\n call = channel.unary_unary(_BLOCK_BRIEFLY)(_REQUEST)\n await self._generic_handler.wait_for_call()\n\n shutdown_start_time = time.time()\n await self._server.stop(test_constants.SHORT_TIMEOUT)\n grace_period_length = time.time() - shutdown_start_time\n self.assertGreater(grace_period_length,\n test_constants.SHORT_TIMEOUT / 3)\n\n # Validates the states.\n await channel.close()\n self.assertEqual(_RESPONSE, await call)\n self.assertTrue(call.done())\n\n async def test_graceful_shutdown_failed(self):\n channel = aio.insecure_channel(self._server_target)\n call = channel.unary_unary(_BLOCK_FOREVER)(_REQUEST)\n await self._generic_handler.wait_for_call()\n\n await self._server.stop(test_constants.SHORT_TIMEOUT)\n\n with self.assertRaises(grpc.RpcError) as exception_context:\n await call\n self.assertEqual(grpc.StatusCode.UNAVAILABLE,\n exception_context.exception.code())\n self.assertIn('GOAWAY', exception_context.exception.details())\n await channel.close()\n\n async def test_concurrent_graceful_shutdown(self):\n channel = aio.insecure_channel(self._server_target)\n call = channel.unary_unary(_BLOCK_BRIEFLY)(_REQUEST)\n await self._generic_handler.wait_for_call()\n\n # Expects the shortest grace period to be effective.\n shutdown_start_time = time.time()\n await asyncio.gather(\n self._server.stop(test_constants.LONG_TIMEOUT),\n self._server.stop(test_constants.SHORT_TIMEOUT),\n self._server.stop(test_constants.LONG_TIMEOUT),\n )\n grace_period_length = time.time() - shutdown_start_time\n self.assertGreater(grace_period_length,\n test_constants.SHORT_TIMEOUT / 3)\n\n await channel.close()\n self.assertEqual(_RESPONSE, await call)\n self.assertTrue(call.done())\n\n async def test_concurrent_graceful_shutdown_immediate(self):\n channel = aio.insecure_channel(self._server_target)\n call = channel.unary_unary(_BLOCK_FOREVER)(_REQUEST)\n await self._generic_handler.wait_for_call()\n\n # Expects no grace period, due to the \"server.stop(None)\".\n await asyncio.gather(\n self._server.stop(test_constants.LONG_TIMEOUT),\n self._server.stop(None),\n self._server.stop(test_constants.SHORT_TIMEOUT),\n self._server.stop(test_constants.LONG_TIMEOUT),\n )\n\n with self.assertRaises(grpc.RpcError) as exception_context:\n await call\n self.assertEqual(grpc.StatusCode.UNAVAILABLE,\n exception_context.exception.code())\n self.assertIn('GOAWAY', exception_context.exception.details())\n await channel.close()\n\n @unittest.skip('https://github.com/grpc/grpc/issues/20818')\n async def test_shutdown_before_call(self):\n server_target, server, _ = _start_test_server()\n await server.stop(None)\n\n # Ensures the server is cleaned up at this point.\n # Some proper exception should be raised.\n async with aio.insecure_channel('localhost:%d' % port) as channel:\n await channel.unary_unary(_SIMPLE_UNARY_UNARY)(_REQUEST)\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n unittest.main(verbosity=2)\n","sub_path":"src/python/grpcio_tests/tests_aio/unit/server_test.py","file_name":"server_test.py","file_ext":"py","file_size_in_byte":10413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"633317546","text":"from microconventions import MicroConventions, api_url\nimport requests, time, sys\nfrom pprint import pprint\nimport numpy as np\nimport json\n\n# New video tutorials are available at https://www.microprediction.com/python-1 to help you\n# get started reading historical data, and the like.\n\n\nclass MicroReader(MicroConventions):\n\n def __init__(self, base_url=None, **kwargs):\n \"\"\" Establish connection and adopt configuration parameters from site \"\"\"\n super().__init__(base_url=base_url or api_url(), **kwargs)\n\n def fix_stream_name(self,name:str)->str:\n if len(name)<5 or name[-5:]!='.json':\n print('Stream names should end in .json')\n return name+'.json'\n else:\n return name\n\n def request_get_json(self, method, arg=None, data=None, throw=True):\n # TODO: Can remove this after microconventions>0.1.0\n try:\n if data is not None:\n res = requests.get(self.base_url + '/' + method + '/' + arg, data=data)\n elif arg is not None:\n res = requests.get(self.base_url + '/' + method + '/' + arg)\n elif data is None and arg is None:\n res = requests.get(self.base_url + '/' + method)\n if res.status_code == 200:\n return res.json()\n except ConnectionError as e:\n print('WARNING: ConnectionError attempting to get ' + method)\n if throw:\n raise e\n\n def __repr__(self):\n return json.dumps({'base_url':self.base_url})\n\n def get(self, name, throw=True):\n return self.request_get_json(method='live', arg=name, throw=throw)\n\n def get_current_value(self, name, throw=True):\n name = self.fix_stream_name(name=name)\n return self.request_get_json(method='live', arg=name, throw=throw)\n\n def get_sponsors(self) -> dict:\n return self.get_streams_by_sponsor()\n\n def get_streams_by_sponsor(self):\n return self.request_get_json(method='sponsors')\n\n def get_budgets(self):\n return self.get_streams_by_budget()\n\n def get_streams_by_budget(self):\n return self.request_get_json(method='budgets')\n\n def get_streams(self) -> dict:\n return self.get_sponsors()\n\n def get_prizes(self) -> dict:\n return self.request_get_json(method='prizes')\n\n def get_stream_names(self) -> [str]:\n return [ name for name in self.get_streams() ]\n\n def get_summary(self, name):\n return self.request_get_json(method='live', arg='summary::' + name)\n # res = requests.get(self.base_url + '/live/summary::' + name)\n\n def get_lagged(self,name, count=1000):\n name = self.fix_stream_name(name=name)\n return self.request_get_json(method='lagged',arg=name, data={'count':count-1})\n\n def get_lagged_values_and_times(self, name, count=1000):\n \"\"\" Preferred method \"\"\"\n name = self.fix_stream_name(name=name)\n lagged = self.get_lagged(name=name, count=count)\n lagged_values = [l[1] for l in lagged]\n lagged_times = [l[0] for l in lagged]\n return lagged_values, lagged_times\n\n def get_lagged_values(self, name:str, count:int=1000):\n \"\"\" Retrieve lagged values of a time series\n :param name: cop.json z1~cop.json z2~cop~qp.json\n :return: [ float ]\n \"\"\"\n name = self.fix_stream_name(name=name)\n lagged_values, lagged_times = self.get_lagged_values_and_times(name=name,count=count)\n return lagged_values\n\n def get_lagged_copulas(self, name:str, count:int=5000):\n \"\"\" Retrieve history of implied copulas in [0,1]^n\n returns [ [p1,p2,p3] ]\n \"\"\"\n name = self.fix_stream_name(name=name)\n assert '~' in name,'This method is intended for copula streams'\n lagged_values, lagged_times = self.get_lagged_values_and_times(name=name, count=count)\n dim = 2 if 'z2~' in name else 3\n lagged_prctls = [ self.from_zcurve(zvalue,dim=dim) for zvalue in lagged_values ]\n return lagged_prctls\n\n def get_lagged_zvalues(self, name:str, count:int=5000):\n \"\"\" Retrieve history of implied z in [-inf,inf]^n\n returns [ [z1,z2,z3], [ , ,] ]\n \"\"\"\n name = self.fix_stream_name(name=name)\n assert '~' in name, 'This method is intended for bivariate or trivariate copula streams'\n lagged_values, lagged_times = self.get_lagged_values_and_times(name=name, count=count)\n dim = 2 if 'z2~' in name else 3\n\n def expand(z):\n ps = self.from_zcurve(zvalue=z,dim=dim)\n return [ self.norminv(p) for p in ps ]\n\n lagged_zs = [ expand(z) for z in lagged_values]\n return lagged_zs\n\n\n def get_lagged_times(self, name:str, count:int=1000) -> list:\n \"\"\" Retrieve lagged times\n :param name: cop.json z1~cop.json z2~cop~qp.json\n :return: [ float ]\n \"\"\"\n name = self.fix_stream_name(name=name)\n lagged_values, lagged_times = self.get_lagged_values_and_times(name=name, count=count)\n return lagged_times\n\n def get_delayed_value(self, name: str, delay: int):\n \"\"\" Retrieve quarantined value.\n This is the most recent data point after going back at least delay seconds\n\n param: name cop.json z1~cop.json z2~cop~qp.json\n param: delay\n :return: [ float ]\n \"\"\"\n name = self.fix_stream_name(name=name)\n return self.request_get_json(method='live', arg='delayed::' + str(delay) + self.SEP + name)\n # res = requests.get(self.base_url + '/live/delayed::' + str(delay) + \"::\" + name)\n\n def get_repository(self, write_key):\n \"\"\" Get repository associated with a write key \"\"\"\n # You can also supply a hash of the write key instead\n return self.request_get_json(method='repository', arg=write_key)\n # res = requests.get(self.base_url + '/repository/' + write_key)\n\n def get_predictions(self, write_key, name, delay:int, strip=True, consolidate=True):\n \"\"\" Retrieve predictions for a given horizon\n\n strip_percentiles If false, returns dictionary with individual submissions\n consolidate If false, returns tuples (owner,value)\n Otherwise just returns values\n\n \"\"\"\n name = self.fix_stream_name(name=name)\n tickets = self.request_get_json(method='predictions', arg=name, data={\"write_key\":write_key,\"delay\":delay})\n if strip:\n tups = [(ticket.split('::')[1], val) for ticket, val in tickets.items()]\n if consolidate:\n return sorted([v for owner,v in tups])\n else:\n return tups\n else:\n return tickets\n\n\n\n\n def median(self, name: str, delay: int):\n name = self.fix_stream_name(name=name)\n return self.inv_cdf(name=name, delay=delay, p=0.5, num=15)\n\n def inv_cdf(self, name: str, delay: int, p=None, ps=None, num=25):\n \"\"\" Approximate PPF\n\n p float or\n ps [float]\n num Number of interpolation points to use\n\n \"\"\"\n name = self.fix_stream_name(name=name)\n # This won't choose x values based on the percentiles supplied, so it is a bit dumb in that sense\n cdf = self.get_cdf_lagged(name=name, delay=delay, num=num)\n ps_ = ps or [p]\n xs = np.interp(x=ps_, xp=cdf[\"x\"], fp=cdf[\"y\"], left=None, right=None)\n return list(xs) if ps is None else ps[0]\n\n def get_discrete_pdf_lagged(self, name: str, delay: int = None, num: int=25, lagged_values=None):\n \"\"\" Retrieve estimate of PDF ... only when discrete values are taken\n\n num Maximum number of points to compute PDF at\n\n \"\"\"\n if delay is None:\n delay = self.DELAYS[0]\n # PDF for continuous case is not implemented yet, sorry!\n lagged_values = lagged_values or self.get_lagged_values(name=name)\n values = self.cdf_values(lagged_values=lagged_values, num=num, as_discrete=True)\n raw_cdf = self._get_cdf(name=name, delay=delay, values=values)\n return {'x':raw_cdf['x'], 'y':self.discrete_pdf(raw_cdf['y'])} if raw_cdf.get('x') else raw_cdf\n\n def get_cdf_lagged(self, name: str, delay: int, num: int = 25, lagged_values=None, as_discrete=None):\n \"\"\" Get CDF using automatically selected x-values based on lags\n\n num: Maximum number of points to use\n lagged_values: Supply these to avoid an extra http round trip\n as_discrete Supply bool if you know\n\n \"\"\"\n lagged_values = lagged_values or self.get_lagged_values(name=name)\n if as_discrete is None:\n as_discrete = self.is_discrete(lagged_values=lagged_values, num=num, ndigits=12)\n values = self.cdf_values(lagged_values=lagged_values, num=num, as_discrete=as_discrete)\n raw_cdf = self._get_cdf(name=name, delay=delay, values=values)\n return self.discrete_cdf(raw_cdf) if as_discrete and raw_cdf.get('x') else raw_cdf\n\n def get_cdf(self, name: str, delay: int, values: [float], as_discrete=False) -> dict:\n \"\"\"\n Get CDF using supplied x values\n \"\"\"\n raw_cdf = self._get_cdf(name=name, delay=delay, values=values)\n return self.discrete_cdf(raw_cdf) if as_discrete and raw_cdf.get('x') else raw_cdf\n\n def _get_cdf(self, name: str, delay: int, values: [float]) -> dict:\n \"\"\" Implements approximate cumulative distribution function based on community micropredictions\n\n values A list of x-values at which CDF will be evaluated\n Returns dict {'x':[ float ], 'y':[ float ]} where\n\n This cdf can be tricky to interpret\n \"\"\"\n comma_sep_values = \",\".join([str(v) for v in sorted(values)])\n return self.request_get_json(method='cdf', arg=name, data={'delay': delay, 'values': comma_sep_values})\n # res = requests.get(self.base_url + '/cdf/' + name, params={\"delay\":delay,\"values\": comma_sep_values})\n\n # For convenience...\n # This will move to microconventions\n\n def percentiles(self) -> [float]:\n \"\"\" A list of 225 evenly spaced numbers in (0,1) \"\"\"\n return self.evenly_spaced_percentiles(num=self.num_predictions)\n\n\nclass MicroReaderStatus(MicroReader):\n\n def __init__(self):\n super().__init__()\n\n def reader_status(self):\n examples = {'get': {'name': 'cop.json'},\n 'get_current_value': {'name': 'cop.json'},\n 'get_sponsors': {},\n 'get_streams': {},\n 'get_budgets': {},\n 'get_summary': {'name': 'cop.json'},\n 'get_lagged_values': {'name': 'cop.json'},\n 'get_lagged_times': {'name': 'cop.json'},\n 'get_delayed_value': {'name': 'cop.json','delay':self.DELAYS[2]},\n 'get_cdf': {'name': 'cop.json','delay':self.DELAYS[2],'values':[-1,0,1]}\n }\n report = list()\n for method, kwargs in examples.items():\n call_time = time.time()\n try:\n data = self.__getattribute__(method)(**kwargs)\n sz = sys.getsizeof(data)\n st = 'up' if sz > 0 else ''\n tm = time.time() - call_time\n er = ''\n except Exception as e:\n st = 'down'\n tm = -1\n er = str(e)\n sz = -1\n report.append((method, st, tm, er, sz))\n return report\n\n\nif __name__ == \"__main__\":\n pprint(MicroReaderStatus().reader_status())\n","sub_path":"microprediction/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":11696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"36026828","text":"from django import forms\nfrom .models import Post\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserChangeForm\n\nclass PostForm(forms.ModelForm):\n class Meta:\n model = Post\n fields = ('title','text','image')\n\nclass ImageForm(forms.ModelForm):\n class Meta:\n model = Post\n fields = ('title','text','image')\n\n\nclass EditProfile(UserChangeForm):\n\n class Meta:\n model = User\n fields = {\n 'password',\n 'first_name',\n 'email',\n 'last_name',\n \n }","sub_path":"blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"487466251","text":"#!/usr/bin/env python3\nimport signal\nimport gi\ngi.require_version('Gtk', '3.0')\ngi.require_version('AppIndicator3', '0.1')\nfrom gi.repository import Gtk, AppIndicator3, GObject\nimport time\nfrom threading import Thread\nimport psutil\n\nclass Indicator():\n def __init__(self):\n self.app = 'test123'\n iconpath = \"/opt/abouttime/icon/indicator_icon.png\"\n self.indicator = AppIndicator3.Indicator.new(\n self.app, iconpath,\n AppIndicator3.IndicatorCategory.OTHER)\n self.indicator.set_status(AppIndicator3.IndicatorStatus.ACTIVE) \n self.indicator.set_menu(self.create_menu())\n self.indicator.set_label(\"-\", self.app)\n # the thread:\n self.update = Thread(target=self.show_seconds)\n # daemonize the thread to make the indicator stopable\n self.update.setDaemon(True)\n self.update.start()\n\n def create_menu(self):\n menu = Gtk.Menu()\n # menu item 1\n item_1 = Gtk.MenuItem('Menu item')\n # item_about.connect('activate', self.about)\n #menu.append(item_1)\n # separator\n #menu_sep = Gtk.SeparatorMenuItem()\n # menu.append(menu_sep)\n # quit\n item_quit = Gtk.MenuItem('Quit')\n item_quit.connect('activate', self.stop)\n menu.append(item_quit)\n\n menu.show_all()\n return menu\n\n def show_seconds(self):\n t = 2\n while True:\n time.sleep(15)\n is_plex_active = self.check_plex_active()\n mention = 'Plex Active' if is_plex_active else '-' \n # apply the interface update using GObject.idle_add()\n GObject.idle_add(\n self.indicator.set_label,\n mention, self.app,\n priority=GObject.PRIORITY_DEFAULT\n )\n t += 1\n\n def stop(self, source):\n Gtk.main_quit()\n \n def check_plex_active(self):\n procs = {p.pid: p.info for p in psutil.process_iter(attrs=['name', 'cmdline'])}\n plex_active = False \n for pid, proc in procs.items():\n for argument in proc['cmdline']:\n if 'Plex Transcoder' in argument:\n plex_active = True \n return plex_active \n\nIndicator()\n# this is where we call GObject.threads_init()\nGObject.threads_init()\nsignal.signal(signal.SIGINT, signal.SIG_DFL)\nGtk.main()\n\n","sub_path":"plexstatus.py","file_name":"plexstatus.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"281811436","text":"import datetime\nfrom sqlalchemy import (\n Column,\n String,\n Float,\n DateTime,\n ForeignKey,\n)\n\nfrom ParkingFinder.tables.base import Base\n\n\nclass UserReserved(Base):\n __tablename__ = 'user_reserved'\n\n user_id = Column(String(64), ForeignKey('users.user_id'), primary_key=True)\n latitude = Column(Float, nullable=False)\n longitude = Column(Float, nullable=False)\n created_at = Column(DateTime, default=datetime.datetime.utcnow)\n\n def __repr__(self):\n return 'user_id: {}, ' \\\n 'latitude: {}, ' \\\n 'longitude: {}, ' \\\n 'created_at: {} '.format(\n self.space_id,\n self.latitude,\n self.longitude,\n self.created_at,\n )\n","sub_path":"ParkingFinder/tables/user_reserved.py","file_name":"user_reserved.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"605226229","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\nauthor: xlingbai@gmail.com\nfile: demo\ndate: 2018/4/2\nbref:\n\"\"\"\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\n\nx_data = np.random.rand(100).astype(np.float32)\ny_data = x_data * 0.1 + 0.3\n\n# tensorflow structure\nWeights = tf.Variable(tf.random_uniform([1], -0.1, 1.0)) # 一维结构,范围 -1 到1\nbiases = tf.Variable(tf.zeros([1]))\n\ny = Weights * x_data + biases\nloss = tf.reduce_mean(tf.square(y - y_data))\noptimizer = tf.train.GradientDescentOptimizer(0.5) # 0.5:学习率\ntrain = optimizer.minimize(loss)\n\n# 初始化变量\ninit = tf.initialize_all_variables()\n\nsess = tf.Session()\nsess.run(init) # 激活init\n\nfor step in range(500): #训练200步\n sess.run(train) # 训练\n if step %20 ==0:\n print(step, sess.run(Weights), sess.run(biases))\n","sub_path":"code/kaggle/DigitRecognizer/tensorflow_keras_learning/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"137401157","text":"#!/usr/bin/env python\r\n#coding:utf-8\r\n#########9.19 ,create a file with specified file size and a random byte with a specified counts \r\nimport random\r\nimport sys\r\ndef fun(word=None, times=None, long=None):\r\n if word == None or not (word >= 0 and word <= 255 ) :\r\n print('invalid word')\r\n return 0\r\n if not times >= 0 and not str(times).isalnum():##isalnum¼ìÑéÕûÊý \r\n print('invalid times')\r\n return 0\r\n if not long > 0 and not str(long).isalnum() :\r\n print('invalid long')\r\n return 0\r\n chr_word = chr(word) \r\n random_times = random.sample(xrange(long),times)\r\n i = 0\r\n print('word is %s, times is %s, long is %s' %(chr_word, random_times, long))\r\n try:\r\n f = open('dumpfile','w+b')##open as read and write,binary \r\n line = list('*'*long)\r\n # print(line)##debug 5\r\n for j in random_times:\r\n # f.seek(j-1)#####cant write the first byte(seek(0))?\r\n # f.write(chr_word)\r\n line[j] = chr_word\r\n line = ''.join(line)\r\n f.write(line) \r\n # f.seek(0)\r\n # print('debug 3 ')##debug 3\r\n # print f.readlines()\r\n # print('debug 4') ##debug 4\r\n except:\r\n return sys.exc_info()\r\n finally:\r\n f.close()\r\n print(open('dumpfile').read()) \r\n ","sub_path":"9.19.py","file_name":"9.19.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"14708840","text":"import os\nfrom urllib.request import urlretrieve\n\nimport click\nimport pandas as pd\n\nfrom .stop_covid_dashboard_scrapper import scrap_pdf_to_csv\n\n\n@click.command()\n@click.option(\n \"--dashboard-pdf-url\",\n default=\"http://stopcoronavirus.mcgm.gov.in/assets/docs/Dashboard.pdf\",\n show_default=True,\n)\n@click.option(\"--output\", default=\"stopcovid-dashboard.pdf\", show_default=True)\ndef download_pdf(dashboard_pdf_url, output):\n datetime_suffix = pd.Timestamp.utcnow().isoformat()\n click.echo(f\"Fetching {dashboard_pdf_url} into {output}\")\n return urlretrieve(dashboard_pdf_url, output)\n\n\n@click.command()\n@click.argument(\"pdf-files\", nargs=-1, required=True)\n@click.argument(\"output-dir\", nargs=1, required=True)\n@click.option(\n \"--page-with-positive-breakdown\",\n default=22,\n help=\"The page containing the ward-wise breakdown of positive cases.\",\n show_default=True,\n)\ndef scrap_pdf(pdf_files, output_dir, page_with_positive_breakdown):\n \"\"\"Given a PDF_FILE tries to fetch the data from all wards.\n\n We assume the PDF_FILE has been downloaded from http://stopcoronavirus.mcgm.gov.in/assets/docs/Dashboard.pdf.\n\n PDF_FILES A list of paths to the pdf files.\n OUTPUT_DIR The path to the output directory.\n \"\"\"\n if not os.path.isdir(output_dir):\n raise IOError(f\"Output: {output_dir} is not a directory\")\n\n paths = []\n for pdf_file in pdf_files:\n date, csv_path = scrap_pdf_to_csv(\n pdf_file, output_dir, page_with_positive_breakdown\n )\n paths.append(csv_path)\n click.echo(f\"Data from {date} has been downloaded to {csv_path}\")\n\n click.echo(f\"Saved the following files {paths}\")\n return paths\n\n\n@click.group()\ndef cli():\n pass\n\n\ncli.add_command(download_pdf)\ncli.add_command(scrap_pdf)\n\nif __name__ == \"__main__\":\n cli()\n","sub_path":"pdf_scrapper/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"372972580","text":"# %%\nimport matlab.engine\nimport parameters as param\nimport scipy.io\nfrom scipy.optimize import curve_fit\nimport numpy as np\nimport json\n\n# import matplotlib.pyplot as plt\n\n# Carrega os arquivos .m\neng = matlab.engine.start_matlab()\n\n# %%\nbinv = np.array(scipy.io.loadmat(\"Binv1.mat\").get(\"Binv1\"))\nbg = np.array(scipy.io.loadmat(\"Bg1.mat\").get(\"Bg1\"))\npcp_ind_lcl = np.array(scipy.io.loadmat(\"Pcp_ind_LCL.mat\").get(\"Pcp_ind_LCL\"))\np_cap_lcl = np.array(scipy.io.loadmat(\"P_cap_LCL.mat\").get(\"P_cap_LCL\"))\npswitches_inv_cond = np.array(\n scipy.io.loadmat(\"Pchaves_inv_cond.mat\").get(\"Pchaves_inv_cond\")\n)\npswitches_inv_sw = np.array(\n scipy.io.loadmat(\"Pchaves_inv_sw.mat\").get(\"Pchaves_inv_sw\")\n)\n# i_cap = np.array(scipy.io.loadmat(\"I_cap.mat\").get(\"I_cap\"))\npbat = np.array(scipy.io.loadmat(\"Pot_bat.mat\").get(\"Pot_bat\"))\npswitches_conv_cc_cond = np.array(\n scipy.io.loadmat(\"Pchaves_conv_cc_cond.mat\").get(\"Pchaves_conv_cc_cond\")\n)\npswitches_conv_cc_sw = np.array(\n scipy.io.loadmat(\"Pchaves_conv_cc_sw.mat\").get(\"Pchaves_conv_cc_sw\")\n)\nPcp_inter1 = np.array(scipy.io.loadmat(\"Pcp_ind_bt.mat\").get(\"Pcp_ind_bt\"))\nbinter1 = np.array(scipy.io.loadmat(\"Bind1.mat\").get(\"Bind1\"))\npbat2 = np.array(scipy.io.loadmat(\"Pot_bat2.mat\").get(\"Pot_bat2\"))\npswitches_conv_cc_cond2 = np.array(\n scipy.io.loadmat(\"Pchaves_conv_cc_cond2.mat\").get(\"Pchaves_conv_cc_cond2\")\n)\npswitches_conv_cc_sw2 = np.array(\n scipy.io.loadmat(\"Pchaves_conv_cc_sw2.mat\").get(\"Pchaves_conv_cc_sw2\")\n)\nPcp_inter2 = np.array(scipy.io.loadmat(\"Pcp_ind_bt2.mat\").get(\"Pcp_ind_bt2\"))\nbinter2 = np.array(scipy.io.loadmat(\"Bind2.mat\").get(\"Bind2\"))\nPgrid = np.array(scipy.io.loadmat(\"Pot_grid.mat\").get(\"Pot_grid\"))\n\n# %% Cálculo das perdas magnéticas nos indutores do filtro LCL\nprint(\"Core loss calculation: inverter side inductor of the LCL filter...\")\n\ndef core_loss_func(mag_flux_dens, count):\n mag_flux_dens = matlab.double(list(np.append(mag_flux_dens, mag_flux_dens[0])))\n time = matlab.double(list(np.arange(0, 1 / param.fn, param.ts)))\n count += 1\n print(count, end=\" \")\n return eng.coreloss(\n time, mag_flux_dens, float(param.cn), float(param.xn), float(param.kn), 1\n ) # perdas em W/m3\n\n\nbg_ac = np.array(\n [\n binv[i][int(len(binv[i]) - ((1 / 60) / (1 / (9000 * 120))) + 1) :]\n for i in range(0, len(binv))\n ]\n)\n\ncore_loss = np.array([core_loss_func(bg_ac[i][:], i) for i in range(0, len(binv))])\nplosses_core_linv_lcl = 3 * core_loss * param.vn * 1e-9 # Perdas em W\n\nprint(\"\\nCore loss calculation: grid side inductor of the LCL filter...\")\nbg_ac = np.array(\n [\n bg[i][int(len(bg[i]) - ((1 / 60) / (1 / (9000 * 120))) + 1) :]\n for i in range(0, len(bg))\n ]\n)\ncore_loss = np.array([core_loss_func(bg_ac[i][:], i) for i in range(0, len(bg))])\nplosses_core_lg_lcl = 3 * core_loss * param.vn * 1e-9 # Perdas em W\n\nprint(\"\\nCore loss calculation: interleaved inductor of the dc/dc converter 1...\")\nbg_ac = np.array(\n [\n binter1[i][int(len(binter1[i]) - ((1 / param.fswb) / param.ts) + 1) :]\n for i in range(0, len(binter1))\n ]\n)\ncore_loss = np.array([core_loss_func(bg_ac[i][:], i) for i in range(0, len(binter1))])\nplosses_core_inter1 = 3 * core_loss * param.vn_inter * 1e-9 # Perdas em W\n\nprint(\"\\nCore loss calculation: interleaved inductor of the dc/dc converter 2...\")\nbg_ac = np.array(\n [\n binter1[i][int(len(binter1[i]) - ((1 / param.fswb) / param.ts) + 1) :]\n for i in range(0, len(binter1))\n ]\n)\ncore_loss = np.array([core_loss_func(bg_ac[i][:], i) for i in range(0, len(binter2))])\nplosses_core_inter2 = 3 * core_loss * param.vn_inter * 1e-9 # Perdas em W\n\nprint(\"\\nCopper loss calculation: resistors of the LCL filter...\")\nplosses_copper_lcl = pcp_ind_lcl\n\nprint(\"ESR loss calculation: capacitors of the LCL filter...\")\nplosses_esr_lcl = pcp_ind_lcl\n\nprint(\"\\nCopper loss calculation: interleaved inductor of the dc/dc converter 1...\")\nplosses_copper_inter1 = Pcp_inter1\n\nprint(\"\\nCopper loss calculation: interleaved inductor of the dc/dc converter 2...\")\nplosses_copper_inter2 = Pcp_inter2\n\nprint(\"Conduction loss calculation: inverter switches...\")\nplosses_cond_inv = pswitches_inv_cond\n\nprint(\"Switching loss calculation: inverter switches...\")\nplosses_switch_inv = pswitches_inv_sw\n\nprint(\"Conduction loss calculation: interleaved 1 switches...\")\nplosses_cond_inter1 = pswitches_conv_cc_cond\n\nprint(\"Conduction loss calculation: interleaved 2 switches...\")\nplosses_cond_inter2 = pswitches_conv_cc_cond2\n\nprint(\"Switching loss calculation: interleaved 1 switches...\")\nplosses_switch_inter1 = pswitches_conv_cc_sw\n\nprint(\"Switching loss calculation: interleaved 2 switches...\")\nplosses_switch_inter2 = pswitches_conv_cc_sw2\n\nprint(\"Total power losses calculation...\")\ntotal_power_losses = (\n # plosses_dc_link\n +plosses_core_linv_lcl\n + plosses_core_lg_lcl\n + plosses_copper_lcl\n + plosses_esr_lcl\n + plosses_cond_inv\n + plosses_switch_inv\n + plosses_core_inter1\n + plosses_core_inter2\n + plosses_copper_inter1\n + plosses_copper_inter2\n + plosses_cond_inter1\n + plosses_cond_inter2\n + plosses_switch_inter1\n + plosses_switch_inter2\n)\n\n# %%\nprint(\"Efficiency calculation...\")\n# efficiency = ((1 - total_power_losses / (pbat + pbat2)) * 100)[0] # Discharge\n# Pin = (pbat + pbat2)[0] # Discharge\n\nefficiency = ((1 - total_power_losses / abs(Pgrid)) * 100)[0] # Charge\nPin = abs(Pgrid)[0]\n\nprint(\"Save json file...\")\nwith open(\"efficiency_bess.json\", \"w\") as arquivo:\n efficiency_list = efficiency.tolist()\n json.dump(efficiency_list, arquivo)\n\nwith open(\"total_power_losses_bess.json\", \"w\") as arquivo:\n total_power_losses_list = total_power_losses.tolist()\n json.dump(total_power_losses_list[0], arquivo)\n\nwith open(\"pin.json\", \"w\") as arquivo:\n Pin_list = Pin.tolist()\n json.dump(Pin_list, arquivo)\n\nprint(\"Complete\")\n# %%\n","sub_path":"perdas/Paralelo/cycle/power_losses_calculation.py","file_name":"power_losses_calculation.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"351152638","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport tempfile\nimport os\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow import keras\n#import tensorflow_model_optimization as tfmot\nimport argparse\nfrom tensorflow.python.keras.callbacks import Callback\nfrom tensorflow.python.lib.io import file_io\nimport json\n\n\n# 모델 사이즈를 측정하기 위한 함수\ndef get_gzipped_model_size(file):\n # Returns size of gzipped model, in bytes.\n import os\n import zipfile\n\n _, zipped_file = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(zipped_file, 'w', compression=zipfile.ZIP_DEFLATED) as f:\n f.write(file)\n\n return os.path.getsize(zipped_file)\n\nclass Cifar10(object):\n def train(self):\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--learning_rate', required=False, type=float, default=0.001)\n parser.add_argument('--dropout_rate', required=False, type=float, default=0.3) \n parser.add_argument('--model_path', required=False, default='/result',type = str) #/saved_model\n parser.add_argument('--model_version', required=False, default='/base_model.h5',type = str)#test2/Base_model.h5\n args = parser.parse_args()\n\n\n\n # Load Cifar10 dataset\n cifar10 = keras.datasets.cifar10\n (train_images, train_labels), (test_images, test_labels) = cifar10.load_data()\n\n\n # Normalize the input image so that each pixel value is between 0 to 1.\n #train_images = train_images / 255.0\n #test_images = test_images / 255.0\n train_images = train_images / 255.0 #.astype(np.float32)\n test_images = test_images / 255.0 #.astype(np.float32)\n # Define the model architecture\n model = keras.Sequential([\n keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),\n keras.layers.MaxPooling2D((2, 2)), \n keras.layers.Conv2D(64, (3, 3), activation='relu'),\n keras.layers.MaxPooling2D((2, 2)),\n keras.layers.Conv2D(64, (3, 3), activation='relu'),\n keras.layers.Flatten(),\n keras.layers.Dense(64, activation='relu'),\n keras.layers.Dense(10)\n ])\n model.summary()\n\n # Train the digit classification model\n model.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n model.fit(\n train_images,\n train_labels,\n epochs=10,\n validation_data=(test_images, test_labels)\n )\n #model.fit(x_train, y_train, epochs=5,callbacks=[KatibMetricLog()])\n\n \n results = model.evaluate(test_images, test_labels, batch_size=128)\n \n _, model_accuracy = model.evaluate(test_images, test_labels, verbose=0)\n print(\"Base model accuracy : \", model_accuracy)\n \n \n loss = results[0]\n accuracy = results[1]\n metrics = {\n 'metrics': [{\n 'name': 'accuracy',\n 'numberValue': float(accuracy),\n 'format': \"PERCENTAGE\",\n }, {\n 'name': 'loss',\n 'numberValue': float(loss),\n 'format': \"RAW\",\n }]\n }\n \n with file_io.FileIO('/mlpipeline-metrics.json', 'w') as f:\n json.dump(metrics, f)\n \n tf.keras.models.save_model(model, args.model_path+args.model_version, include_optimizer=False) #os.getcwd()+'/'+args.model_version.split('/')[1]\n print(\"Base model size: \",get_gzipped_model_size(args.model_path+args.model_version))\n\n\ndef fairing_run():\n CONTAINER_REGISTRY = 'khw2126'\n\n namespace = 'admin'\n job_name = f'mnist-job-{uuid.uuid4().hex[:4]}'\n\n\n fairing.config.set_builder('append', registry=CONTAINER_REGISTRY, image_name=\"mnist-simple\",base_image=\"khw2126/tensorflow-2.0.0-notebook-gpu:3.0.0\")\n\n #fairing.config.set_deployer('job', namespace=namespace, job_name=job_name, cleanup=False, stream_log=True)\n \n fairing.config.set_deployer('job', namespace=namespace, job_name=job_name, cleanup=False, stream_log=True,\n pod_spec_mutators=[\n k8s_utils.mounting_pvc(pvc_name=\"workspace-hufsice\", \n pvc_mount_path=\"/result\")])\n\n fairing.config.run()\n \nif __name__ == '__main__':\n if os.getenv('FAIRING_RUNTIME', None) is None:\n import uuid\n from kubeflow import fairing\n from kubeflow.fairing.kubernetes import utils as k8s_utils\n fairing_run()\n else:\n remote_train = Cifar10()\n remote_train.train()\n \n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Fairing/cifar10/Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":4752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"302445208","text":"\"\"\"\n\n149. Max Points on a Line\n\n\nGiven n points on a 2D plane, find the maximum number of points that lie on the same straight line.\n\nExample 1:\n\nInput: [[1,1],[2,2],[3,3]]\nOutput: 3\nExplanation:\n^\n|\n| o\n| o\n| o\n+------------->\n0 1 2 3 4\nExample 2:\n\nInput: [[1,1],[3,2],[5,3],[4,1],[2,3],[1,4]]\nOutput: 4\nExplanation:\n^\n|\n| o\n| o o\n| o\n| o o\n+------------------->\n0 1 2 3 4 5 6\n\n\n\"\"\"\n\nfrom math import gcd, inf\nimport fractions\n\n# Definition for a point.\nclass Point:\n def __init__(self, a=0, b=0):\n self.x = a\n self.y = b\n\nclass Solution:\n\n def maxPoints(self, points):\n \"\"\"\n :type points: List[Point]\n :rtype: int\n \"\"\"\n lines = {} # slope, intercept, count\n max_pts = 0\n\n if not points:\n return 0\n elif len(points) == 1:\n return 1\n else:\n max_pts = 0\n for i, p1 in enumerate(points):\n same, count = 1, {}\n for j, p2 in enumerate(points):\n if i == j:\n continue\n else:\n if p1.x == p2.x and p1.y == p2.y and i != j:\n same += 1\n else:\n x_dif, y_dif = p2.x-p1.x, p2.y-p1.y\n div = gcd(x_dif, y_dif)\n x_dif, y_dif = x_dif/div, y_dif/div\n if (x_dif, y_dif) not in count:\n count[(x_dif, y_dif)] = 1\n else:\n count[(x_dif, y_dif)] += 1\n current_max = same\n for c in count:\n current_max = max(current_max, count[c]+same)\n max_pts = max(max_pts, current_max)\n return max_pts\n\n def maxPoints_fast(self, points):\n\n def gcd(m, n):\n if n == 0:\n return m\n elif m * n < 0:\n return -gcd(n, m % n)\n else:\n return gcd(n, m % n)\n\n def slope(p1, p2):\n x1, y1 = p1[0], p1[1]\n x2, y2 = p2[0], p2[1]\n if x1 == x2:\n return x1, 0\n elif y1 == y2:\n return 0, y1\n else:\n g = gcd(x2-x1, y2-y1)\n return ((x2-x1)/g, (y2-y1)/g)\n\n if len(points) <= 1:\n return len(points)\n\n ans = 0\n for i in range(len(points)-1):\n same_pts = 1\n max_pts = 0\n count = {}\n for j in range(i+1,len(points)):\n x1, y1 = points[i][0], points[i][1]\n x2, y2 = points[j][0], points[j][1]\n if x1 == x2 and y1 == y2:\n same_pts += 1\n else:\n s = slope(points[i], points[j])\n if s in count:\n count[s] += 1\n else:\n count[s] = 1\n if count:\n max_pts = max(max_pts, max(count.values()))\n ans = max(ans, max_pts + same_pts)\n return ans\n\n def maxPoints2(self, points):\n \"\"\"\n \n For each point i, only consider points after i since points i, j and points j, i have the same slope\n \"\"\"\n n = len(points)\n\n if n <= 1:\n return n\n max_count = 2\n for i in range(n):\n x1, y1 = points[i]\n count = 0\n slopes = {}\n same_pts = 0\n for j in range(i+1, n):\n x2, y2 = points[j]\n if x2 == x1 and y2 == y1:\n same_pts += 1\n else:\n if x2 == x1:\n s = float('inf')\n else:\n s = fractions.Fraction(y2-y1, x2-x1)\n if s in slopes:\n slopes[s] += 1\n else:\n slopes[s] = 1\n count = max(count, slopes[s])\n max_count = max(max_count, count+same_pts+1)\n\n return max_count\n\n\nif __name__ == '__main__':\n\n sol = Solution()\n method = sol.maxPoints2\n\n cases = [\n (method, ([[1,1],[2,2],[3,3]],), 3),\n (method, ([[1,1],[1,1],[1,1]],), 3),\n (method, ([[1,1],[1,1],[2,3]],), 3),\n (method, ([[0,0],[0,0]],), 2),\n (method, ([[0,0],[0,0],[0,0],[0,0]],), 4),\n (method, ([[1,1],[3,2],[5,3],[4,1],[2,3],[1,4]],), 4),\n (method, ([[3,1],[12,3],[3,1],[-6,-1]],), 4),\n (method, ([[94911150, 94911151],[94911151, 94911152],[0,0]],), 2),\n (method, ([[84,250],[0,0],[1,0],[0,-70],[0,-70],[1,-1],[21,10],[42,90],[-42,-230]],), 6),\n (method, ([[0,9],[138,429],[115,359],[115,359],[-30,-102],[230,709],[-150,-686],[-135,-613],\n [-60,-248],[-161,-481],[207,639],[23,79],[-230,-691],[-115,-341],[92,289],\n [60,336],[-105,-467],[135,701],[-90,-394],[-184,-551],[150,774]],), 12),\n ]\n\n for i, (func, case, expected) in enumerate(cases):\n ans = func(*case)\n if ans == expected:\n print(\"Case {:d} Passed\".format(i + 1))\n else:\n print(\"Case {:d} Failed; Expected {:s} != {:s}\".format(i + 1, str(expected), str(ans)))","sub_path":"algo/math/max_points_on_a_line.py","file_name":"max_points_on_a_line.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"162291219","text":"\n###################################################################\n#\n# CSSE1001/7030 - Assignment 2\n#\n# Student Number: 42377029\n#\n# Student Name: Joyce Wing Yan Lau\n#\n###################################################################\n\n#####################################\n# Support given below - DO NOT CHANGE\n#####################################\n\nfrom assign2_support import *\n\n#####################################\n# End of support \n#####################################\n\n# Add your code here\n\nimport tkinter as tk\nimport os.path\nfrom tkinter import filedialog\nfrom tkinter import messagebox\n\nclass TemperatureData(object):\n def __init__(self):\n self._stations = []\n self._data = {}\n self._toggle = []\n def load_data(self,stationfile):\n stationdata = Station(stationfile)\n self._data.update({stationdata.get_name():stationdata})\n self._toggle.append(True)\n self._stations.append(stationdata.get_name())\n def get_data(self):\n return self._data\n def get_stations(self):\n return self._stations\n def is_selected(self,i):\n return self._toggle[i]\n def toggle_selected(self,i):\n self._toggle[i] = not self._toggle[i]\n def get_ranges(self):\n temperatures = []\n years = []\n temperaturelist = []\n yearlist = []\n try:\n for i in self._stations:\n yearlist.append(self._data[i].get_year_range())\n temperaturelist.append(self._data[i].get_temp_range())\n for i in yearlist:\n for n in i:\n years.append(n)\n for i in temperaturelist:\n for n in i:\n temperatures.append(n)\n temp_range = (min(years), max(years),\\\n min(temperatures),max(temperatures))\n return temp_range\n except ValueError:\n pass\n\n \n\nclass Plotter(tk.Canvas):\n def __init__(self,master,data):\n super().__init__(master, bg=\"white\")\n master.title(\"Plotter\")\n self._data = data\n self._stations = self._data.get_stations()\n self._coord = []\n self._ranges = ()\n self._width = None\n self._height = None\n self._line = None\n## self._bestfitline = None\n self._bfcoords = []\n\n self._year = None\n self._temps = []\n self._yearlist = []\n def get_coords(self):\n try:\n self._ranges = self._data.get_ranges()\n self._width = self.winfo_width()\n self._height = self.winfo_height()\n self._coord = CoordinateTranslator(self._width,\\\n self._height,\\\n self._ranges[0],\\\n self._ranges[1],\\\n self._ranges[2],\\\n self._ranges[3])\n self._stations = self._data.get_stations()\n except TypeError:\n pass\n \n\n \n def plot(self,station,colour):\n datapoints = []\n tempcoords = []\n station_data = self._data.get_data().get(station)\n datapoints = station_data.get_data_points()\n self.get_coords()\n for i in datapoints:\n tempcoords.append(self._coord.temperature_coords(i[0],i[1])) \n self.create_line(tempcoords,fill=colour)\n\n def get_year(self,e):\n self._year = self._coord.get_year(e.x)\n print(self._year)\n return self._year\n\n def get_temps(self):\n self._temps = []\n for station in self._stations:\n station_data = self._data.get_data().get(station)\n self._temps.append(station_data.get_temp(self._year))\n print(self._temps)\n return self._temps\n \n def draw_line(self,e):\n\n if self._line is not None:\n self.delete(self._line)\n self._line = None\n \n if 0 <= e.x < self._width:\n coords = [(e.x, 0), (e.x, self._height)]\n\n self._line = self.create_line(coords)\n self.get_year(e)\n self.get_temps()\n\n\n def appendyear(self,e):\n if len(self._yearlist) == 2:\n self._yearlist.clear()\n if len(self._yearlist) < 2:\n self._yearlist.append(self._year)\n self.redraw()\n self.draw_line(e)\n\n\n\n def get_bfcoords(self,station):\n self._bfcoords = []\n station_data = self._data.get_data().get(station)\n datapoints = station_data.get_data_points()\n for datatuple in datapoints:\n for n,i in enumerate(datatuple):\n for y in self._yearlist:\n if y == i:\n self._bfcoords.append(self._coord.temperature_coords\\\n (datatuple[0],datatuple[1]))\n\n\n \n def bestfit(self):\n try:\n \n for n, station in enumerate(self._stations):\n self.get_bfcoords(station)\n if self._data.is_selected(n) == True:\n bfcoords = best_fit(self._bfcoords)\n self._bestfitline = self.create_line(bfcoords,\\\n fill = COLOURS[n % len(COLOURS)]\\\n ,width = 2)\n except ZeroDivisionError:\n pass \n \n \n def redraw(self):\n self.get_coords()\n self.delete(tk.ALL)\n for i, station in enumerate(self._stations):\n if self._data.is_selected(i) == True:\n self.plot(station, COLOURS[i % len(COLOURS)])\n self.bestfit()\n\n\n \nclass SelectionFrame(tk.Frame):\n def __init__(self,master,data,plot):\n super().__init__(master)\n self._master = master\n self._stationselect = tk.Label(master,text=\"Station Selection: \")\n self._stationselect.pack(side=tk.LEFT,anchor=tk.SW)\n self._data = data\n self._chk = \"\"\n self._chklist = []\n self._plotter = plot\n \n\n \n def checkbutton(self,station,colour,i):\n self._chk = tk.Checkbutton(self._master, text=station,fg=colour,\\\n command = lambda: self.toggle(i))\n self._chk.select()\n self._chk.pack(side=tk.LEFT,anchor=tk.SW)\n \n \n def toggle(self,i):\n self._data.toggle_selected(i)\n self._plotter.redraw()\n print(self._data.is_selected(i))\n print(self._data._toggle)\n\n\n \n \nclass DataFrame(tk.Frame):\n def __init__(self,master,data):\n super().__init__(master)\n self._data = data\n self._stations = self._data.get_stations()\n self._year = None\n self._year = tk.Label(master)\n self._year.pack(side = tk.TOP, anchor = tk.NW)\n self._templist = []\n self._alreadyloaded = []\n \n \n def display_year(self,year):\n\n self._year.config(text = \"Data for \" + str(year)+\":\")\n\n \n def add_label(self):\n\n for n,i in enumerate(self._stations):\n if i not in self._alreadyloaded:\n self._alreadyloaded.append(i)\n self._temp = tk.Label(self,text = \"\",fg=COLOURS[n%len(COLOURS)])\n self._temp.pack(side = tk.LEFT)\n self._templist.append(self._temp)\n\n\n\n \n def display_temps(self,temps): \n self.add_label()\n for n,s in enumerate(self._stations):\n self._templist[n].config(text=temps[n])\n\n \n \nclass TemperaturePlotApp(object):\n def __init__(self, master):\n self._master = master\n master.title(\"Temperature Plot Application\")\n self._temperatureData = TemperatureData()\n self._stations = self._temperatureData.get_stations()\n \n menubar = tk.Menu(master)\n master.config(menu=menubar)\n filemenu = tk.Menu(menubar)\n menubar.add_cascade(label=\"File\",menu=filemenu)\n filemenu.add_command(label=\"Open\",command=self.open)\n filemenu.add_command(label=\"Exit\",command=self.close)\n\n master.protocol(\"WM_DELETE_WINDOW\",self.close)\n \n self._plotter = Plotter(master,self._temperatureData)\n self._plotter.pack(expand=1,fill=tk.BOTH, side=tk.TOP)\n self._plotter.bind(\"\",self.draw_line)\n self._plotter.bind(\"\",self.resize_window)\n self._plotter.bind(\"\",self._plotter.appendyear)\n\n self._dataframe = DataFrame(master,self._temperatureData)\n self._dataframe.pack(side = tk.TOP,anchor=tk.NW)\n\n self._selectionframe = SelectionFrame(self._master,\\\n self._temperatureData,\\\n self._plotter)\n self._selectionframe.pack(expand=1, side=tk.TOP,anchor = tk.SW)\n\n\n self._alreadyloaded = []\n\n \n def open(self):\n filename = filedialog.askopenfilename()\n if filename:\n self._temperatureData.load_data(filename)\n self.checkbox()\n self._plotter.redraw()\n\n \n def checkbox(self):\n for n,station in enumerate(self._stations):\n if station not in self._alreadyloaded:\n self._alreadyloaded.append(station)\n self._selectionframe.checkbutton(station,\\\n COLOURS[n % len(COLOURS)],n)\n \n def draw_line(self,e):\n self._plotter.draw_line(e)\n self._dataframe.display_year(self._plotter._year)\n self._dataframe.display_temps(self._plotter._temps)\n\n \n def resize_window(self,e):\n self._plotter.redraw()\n \n def close(self):\n self._master.destroy()\n\n#resize window\n\n##################################################\n# !!!!!! Do not change (or add to) the code below !!!!!\n###################################################\n\ndef main():\n root = tk.Tk()\n app = TemperaturePlotApp(root)\n root.geometry(\"800x400\")\n root.mainloop()\n\nif __name__ == '__main__':\n main()\n","sub_path":"CSSE7030_Assignment 2/assign2backup.py","file_name":"assign2backup.py","file_ext":"py","file_size_in_byte":10221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"555639645","text":"n=input(\"enter number to find armstrong or not\")\nx=len(n)\ny=int(n)\nnum=y\ntemp=0\nt=0\nwhile(num!=0):\n te=num%10\n temp=temp+te**x\n num=num//10\nif(y==temp):\n print(\"it is armstrong\")\nelse:\n print(\"it is not armstrong\")\n","sub_path":"armstrong.py","file_name":"armstrong.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"264343142","text":"from psycopg2.extensions import AsIs\nimport logging\n\nfrom odoo import models\n\n_logger = logging.getLogger(__name__)\n\n\nclass AccountBankStatementLine(models.Model):\n _inherit = 'account.bank.statement.line'\n\n def fast_counterpart_creation(self):\n company_currency = self.journal_id.company_id.currency_id\n statement_currency = self.journal_id.currency_id or company_currency\n partner_id = self.partner_id.id or None\n st_line_currency = self.currency_id or statement_currency\n for st_line in self:\n # If we are in multi-currency use standard process\n if st_line_currency.id != company_currency.id:\n vals = {\n 'name': st_line.name,\n 'debit': st_line.amount < 0 and -st_line.amount or 0.0,\n 'credit': st_line.amount > 0 and st_line.amount or 0.0,\n 'account_id': st_line.account_id.id,\n }\n return st_line.process_reconciliation(new_aml_dicts=[vals])\n move_vals = self._prepare_reconciliation_move(st_line.statement_id.name)\n move = self.env['account.move'].create(move_vals)\n debit = st_line.amount < 0 and -st_line.amount or 0.0\n credit = st_line.amount > 0 and st_line.amount or 0.0\n debit_cash_basis = 0 if move.journal_id.type in ('sale', 'purchase') else debit\n credit_cash_basis = 0 if move.journal_id.type in ('sale', 'purchase') else credit\n aml_dict = {\n 'name': st_line.name,\n 'debit': debit,\n 'credit': credit,\n 'balance': debit - credit,\n 'account_id': st_line.account_id.id,\n 'move_id': move.id,\n 'partner_id': partner_id,\n 'statement_id': st_line.statement_id.id,\n 'statement_line_id': st_line.id,\n 'reconciled': False,\n 'amount_residual': debit - credit if st_line.account_id.reconcile else 0,\n 'amount_residual_currency': 0,\n 'debit_cash_basis': debit_cash_basis,\n 'credit_cash_basis': credit_cash_basis,\n 'balance_cash_basis': debit_cash_basis - credit_cash_basis,\n 'company_currency_id': move.company_id.currency_id.id,\n 'ref': move.ref,\n 'journal_id': move.journal_id.id,\n 'date': move.date,\n 'date_maturity': move.date,\n 'company_id': st_line.account_id.company_id.id,\n 'user_type_id': st_line.account_id.user_type_id.id,\n }\n columns = aml_dict.keys()\n values = [aml_dict[column] for column in columns]\n self.env.cr.execute('''INSERT INTO account_move_line(%s) VALUES %s''', (AsIs(','.join(columns)), tuple(values)))\n # Create conterpart\n account = credit > 0 and self.statement_id.journal_id.default_credit_account_id or self.statement_id.journal_id.default_debit_account_id\n aml_dict['debit'] = credit\n aml_dict['credit'] = debit\n aml_dict['balance'] = credit - debit\n aml_dict['account_id'] = account.id\n aml_dict['amount_residual'] = debit - credit if account.reconcile else 0,\n aml_dict['debit_cash_basis'] = credit_cash_basis,\n aml_dict['credit_cash_basis'] = debit_cash_basis,\n aml_dict['balance_cash_basis'] = credit_cash_basis - debit_cash_basis,\n columns = aml_dict.keys()\n values = [aml_dict[column] for column in columns]\n self.env.cr.execute('''INSERT INTO account_move_line(%s) VALUES %s''', (AsIs(','.join(columns)), tuple(values)))\n account_move = self.env['account.move'].browse(move.id)\n account_move._amount_compute()\n account_move.post()\n # record the move name on the statement line to be able to retrieve it in case of unreconciliation\n st_line.write({'move_name': move.name})\n","sub_path":"pos_fast_reconcile/models/account_bank_statement.py","file_name":"account_bank_statement.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"409453331","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as ec\r\nfrom psutil import virtual_memory\r\nimport pandas as pd\r\nimport multiprocessing\r\nimport pyautogui\r\nimport datetime\r\nimport time\r\n\r\n\r\ndef convert_date(item_date):\r\n item_date = item_date.replace(\"년 \", \"-\").replace(\"월 \", \"-\").replace(\"일\", \"\")\r\n item_date = datetime.datetime.strptime(item_date, \"%Y-%m-%d\").date()\r\n return item_date\r\n\r\n\r\ndef merge_review(item_L_review, item_review):\r\n if item_review == '' and item_L_review != '':\r\n return item_L_review\r\n else:\r\n return item_review\r\n\r\n\r\nif __name__ == '__main__':\r\n url = \"https://play.google.com/store/apps/details?id=com.yantech.orient.tojung&hl=ko&showAllReviews=true\"\r\n url2 = \"https://play.google.com/store/apps/details?id=com.ipapas.sajulite&hl=ko&showAllReviews=true\"\r\n url3 = \"https://play.google.com/store/apps/details?id=com.un7qi3.forceteller&hl=ko&showAllReviews=true\"\r\n url4 = \"https://play.google.com/store/apps/details?id=com.thingsflow.hellobot&hl=ko&showAllReviews=true\"\r\n url5 = \"https://play.google.com/store/apps/details?id=handasoft.mobile.divination&hl=ko&showAllReviews=true\"\r\n\r\n tstart_time = time.time()\r\n driverPath = \"D:\\\\shin2no\\\\chromedriver.exe\"\r\n driver = webdriver.Chrome(driverPath)\r\n driver.get(url)\r\n pyautogui.keyDown('win')\r\n pyautogui.press('right', presses=2, interval=0.2)\r\n pyautogui.keyUp('win')\r\n pyautogui.press('enter')\r\n\r\n # 리뷰 최하위까지 스크롤 및 더보기 버튼 클릭\r\n wait = WebDriverWait(driver, 180)\r\n driver.find_element_by_xpath(\"//div[@class='ry3kXd Ulgu9']/div[@class='MocG8c UFSXYb LMgvRb KKjvXb']\").click()\r\n time.sleep(0.5)\r\n driver.find_element_by_xpath(\"//div[@class='OA0qNb ncFHed']/div[@data-value='2']\").click()\r\n time.sleep(0.5)\r\n new_height = 0\r\n last_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n print(\">>> \\t last_height : %d \\t new_height : %d\" % (new_height, last_height))\r\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n\r\n while 1:\r\n wait.until(ec.invisibility_of_element_located((By.XPATH, \"//div[@jsname='lYU69']/div[@class='Fx1lse']\")))\r\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n\r\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n memory_usage = virtual_memory()[2]\r\n\r\n if new_height == last_height:\r\n if driver.find_elements_by_xpath(\"//span[@class='RveJvd snByac']\"):\r\n driver.execute_script(\"arguments[0].click();\",\r\n driver.find_element_by_xpath(\"//span[@class='RveJvd snByac']\"))\r\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n else:\r\n for i in range(3):\r\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n time.sleep(2)\r\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n if new_height != last_height:\r\n break\r\n print(\">>>>>> \\t %d th last height checking and wait 2secs...\" % (i + 1))\r\n if new_height == last_height and not driver.find_elements_by_xpath(\"//span[@class='RveJvd snByac']\"):\r\n print(\"******** finished <> ********\")\r\n break\r\n print(\">>>>>> \\t last_height : %d \\t new_height : %d \\t memory_usage : %.1f%%\" % (\r\n last_height, new_height, memory_usage))\r\n last_height = new_height\r\n #elif new_height > 80000:\r\n # driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n # break\r\n elif memory_usage > 86.9 or new_height > 3000000: # 크롬 메모리 부족으로 인한 종료 방지\r\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n print(\">>> \\t The usage of system memory exceeds 90%, stopping crawling.\")\r\n break\r\n else:\r\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n print(\">>> \\t last_height : %d \\t new_height : %d \\t memory_usage : %.1f%%\" % (\r\n last_height, new_height, memory_usage))\r\n last_height = new_height\r\n time.sleep(5)\r\n\r\n # 장문 리뷰 전체 리뷰 버튼 일괄 클릭 동작\r\n spread_review = driver.find_elements_by_xpath(\"//button[@jsaction='click:TiglPc']\")\r\n for i in range(len(spread_review)):\r\n if spread_review[i].is_displayed():\r\n driver.execute_script(\"arguments[0].click();\", spread_review[i]) # 클릭 대상이 가려져 execute_script()를 사용해야함\r\n time.sleep(0.5)\r\n print(\">>> \\t (%d/%d)th review button is clicked...\" % ((i + 1), len(spread_review)))\r\n print(\"******** finished << all_review button >> clicking ********\")\r\n time.sleep(5)\r\n\r\n # 크롤링\r\n num_cores = multiprocessing.cpu_count()\r\n\r\n start_time = time.time()\r\n L_reviews = [element.text for element in driver.find_elements_by_xpath(\"//span[@jsname='fbQN7e']\")]\r\n T_reviews = [[element.text, element] for element in driver.find_elements_by_xpath(\"//span[@class='IEFhEe']\")]\r\n print(\"******** finished << L/T_review : %s, %s >> crawling ********\" % (len(L_reviews), str(datetime.timedelta(seconds=time.time() - start_time)).split(\".\")[0]))\r\n\r\n start_time = time.time()\r\n reviews = [element.text for element in driver.find_elements_by_xpath(\"//span[@jsname='bN97Pc']\")]\r\n print(\"******** finished << review : %s >> crawling ********\" % str(datetime.timedelta(seconds=time.time() - start_time)).split(\".\")[0])\r\n\r\n start_time = time.time()\r\n for xpath in T_reviews: # xpath 경로 얻어와서 반복되는 div의 n번째 값 확인\r\n xpath[1] = driver.execute_script(\"gPt=function(c){if(c.id!==''){return'id(\\\"'+c.id+'\\\")'}if(c===document.body){return c.tagName}var a=0;var e=c.parentNode.childNodes;for(var b=0;b> crawling ********\" %\r\n str(datetime.timedelta(seconds=time.time() - start_time)).split(\".\")[0])\r\n\r\n start_time = time.time()\r\n dates = [element.text for element in driver.find_elements_by_xpath(\"//div[@class='bAhLNe kx8XBd']/div/span[@class='p2TkOb']\")]\r\n pool = multiprocessing.Pool(num_cores)\r\n dates = pool.map(convert_date, dates)\r\n pool.close()\r\n pool.join()\r\n print(\"******** finished << date : %s >> crawling and replace ********\" %\r\n str(datetime.timedelta(seconds=time.time() - start_time)).split(\".\")[0])\r\n\r\n start_time = time.time()\r\n likes = [element.text for element in driver.find_elements_by_xpath(\"//div[@aria-label='이 리뷰가 유용하다는 평가를 받은 횟수입니다.']\")]\r\n print(\"******** finished << like : %s >> crawling ********\" %\r\n str(datetime.timedelta(seconds=time.time() - start_time)).split(\".\")[0])\r\n\r\n start_time = time.time()\r\n stars = [element.get_attribute('aria-label')[10:11] for element in driver.find_elements_by_xpath(\"//span[@class='nt2C1d']/div[@class='pf5lIe']/div[@role='img']\")]\r\n print(\"******** finished << star : %s >> crawling ********\" %\r\n str(datetime.timedelta(seconds=time.time() - start_time)).split(\".\")[0])\r\n\r\n # 데이터 통합\r\n start_time = time.time()\r\n results = list(zip(dates, stars, likes, reviews))\r\n print(\"******** finished << review merge : %s >> ********\" %\r\n str(datetime.timedelta(seconds=time.time() - start_time)).split(\".\")[0])\r\n\r\n # csv 저장\r\n data = pd.DataFrame(results, dtype='object')\r\n data.columns = ['날짜', '평점', '동의', '리뷰']\r\n pkg_name = url.split('?id=')[-1].split('&')[0]\r\n date_format = datetime.datetime.strftime(datetime.datetime.now(), \"%Y-%m-%d_%H-%M-%S\")\r\n file_name = pkg_name + \"_\" + date_format\r\n data.to_csv('D:\\\\google_review\\\\{}.csv'.format(file_name), encoding='utf-8-sig')\r\n print(str(datetime.timedelta(seconds=time.time() - tstart_time)).split(\".\")[0])\r\n driver.quit()\r\n","sub_path":"crawling.py","file_name":"crawling.py","file_ext":"py","file_size_in_byte":8794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"67286848","text":"#import keras\r\n#from keras.layers import Input, Dense, merge\r\n#from keras.models import Model\r\n#from keras.layers import Conv2D, MaxPooling2D, Reshape, BatchNormalization\r\n#from keras.layers import Activation, Dropout, Flatten, Dense\r\n#from keras.models import load_model\r\nimport numpy as np\r\nfrom PIL import Image, ImageDraw\r\nimport os\r\nimport numpy\r\n\r\n# Model path\r\nmodel_path = 'Models/'\r\n\r\ndef predict(im, y, index, folder = \"predictions\"):\r\n\tlineheight = 20\r\n\tpadding = 20\r\n\tbaseh = height-10\r\n\td = ImageDraw.Draw(im)\r\n\tfor i in range(5):\r\n\t\tx = width/2 - padding * (i-2)\r\n\t\td.line([(x, baseh),(x, baseh - lineheight * y[i])], fill=None, width=5)\r\n\tim.save(folder + \"/\" + str(index)+\".jpeg\")\r\n\r\nSOURCE = \"AXIO\"\r\nSOURCE = \"FB\"\r\n\r\nif(SOURCE == \"AXIO\"):\r\n\tprint(\"Load data\")\r\n\tX_axio = np.load('Datasets/axionable_data/X_train_axio.npy')\r\n\tY_axio = np.load('Datasets/axionable_data/Y_train_axio.npy')\r\n\tY = np.load(\"predicted.npy\")\r\n\tprint(\"Predicted\", Y[:10])\r\n\r\n\t# (26449, 90, 250, 3)\r\n\tshape = X_axio.shape\r\n\twidth = shape[2]\r\n\theight = shape[1]\r\n\r\n\tfor index in range(shape[0])[:1000]:\r\n\t\tim = Image.fromarray(X_axio[index])\r\n\t\ty = Y[index]\r\n\t\tpredict(im, y, index)\r\n\r\nif(SOURCE == \"FB\"):\r\n\tY = np.load(\"predicted1.npy\")\r\n\tprint(\"Predicted\", Y[:10])\r\n\r\n\tpath = \"D:\\\\Workspace\\\\IronCar\\\\DataSets\\\\FACE\\\\car_repo\\\\records\\\\\"\r\n\tfiles = os.listdir(path)\r\n\tindex = 0\r\n\tfor file in files:\r\n\t\tim = numpy.asarray(Image.open(path + file))\r\n\t\tim = im[-110:-20,:,:]\r\n\r\n\t\t# (26449, 90, 250, 3)\r\n\t\tshape = im.shape\r\n\t\twidth = shape[1]\r\n\t\theight = shape[0]\r\n\r\n\r\n\t\tim = Image.fromarray(im)\r\n\t\ty = Y[index]\r\n\t\tpredict(im, y, index, \"predictions1\")\r\n\t\tindex+=1\r\n\r\n\r\n#for file in files:\r\n#\tim = numpy.asarray(Image.open(path + file))\r\n#\tim = im[:90,:,:]\r\n#\tim = Image.fromarray(im)\r\n#\ty = Y[index]\r\n#\tpredict(im, y, index)\r\n\r\n","sub_path":"src/experiments/generalisation-test.20181118.kkn/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"127193234","text":"# Generalised Hamming Numbers\n# Problem 204\n\nimport help \n\ndef solve(m, max):\n pr = help.primesUpTo(m)\n print(countHamming(pr, max))\n \ndef countHamming(pr, max):\n return len(getHamming(pr, max))\n \ndef getHamming(pr, max):\n hamming = []\n if len(pr) == 0:\n return [1]\n for h in getHamming(pr[1:], max):\n p = 1\n while p*h <= max:\n hamming.append(p*h)\n p *= pr[0]\n return hamming","sub_path":"problem204.py","file_name":"problem204.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"141687977","text":"import os\nimport glob\nimport re\nfrom typing import Any, Dict, Optional\n\nimport pytest\nfrom pandas import DataFrame\n\nfrom lasso import Project\n\n\"\"\"\nRun tests from bash/shell\nRun just the tests labeled project using `pytest -m project`\nTo run with print statments, use `pytest -s -m project`\n\"\"\"\nCUBE_DIR = os.path.join(os.getcwd(), \"examples\", \"cube\")\nROADWAY_DIR = os.path.join(os.getcwd(), \"examples\", \"stpaul\")\nBUILD_TRANSIT_DIR = os.path.join(CUBE_DIR, \"single_transit_route_attribute_change\")\nSCRATCH_DIR = os.path.join(os.getcwd(), \"tests\", \"scratch\")\n## create list of example logfiles to use as input\nlogfile_list = [os.path.join(CUBE_DIR, \"st_paul_test.log\")]\n\n\n@pytest.mark.parametrize(\"logfilename\", logfile_list)\n@pytest.mark.travis\ndef test_logfile_read(request, logfilename):\n \"\"\"\n Tests that the logfile can be read in and\n produces a DataFrame.\n \"\"\"\n print(\"\\n--Starting:\", request.node.name)\n\n print(\"Reading: {}\".format(logfilename))\n lf = Project.read_logfile(logfilename)\n assert type(lf) == DataFrame\n\n\n@pytest.mark.parametrize(\"logfilename\", logfile_list)\n@pytest.mark.travis\ndef test_highway_project_card(request, logfilename):\n \"\"\"\n Tests that the logfile can be read in and\n produces a DataFrame.\n \"\"\"\n print(\"\\n--Starting:\", request.node.name)\n\n print(\"Reading: {}\".format(logfilename))\n lf = Project.read_logfile(logfilename)\n assert type(lf) == DataFrame\n\n test_project = Project.create_project(\n roadway_log_file=logfilename, base_roadway_dir=ROADWAY_DIR\n )\n\n assert type(test_project.roadway_changes) == DataFrame\n # assert(type(test_project.card_data)==Dict[str, Dict[str, Any]])\n assert type(test_project.card_data) == dict\n\n test_project.write_project_card(\n os.path.join(\n SCRATCH_DIR,\n \"t_\" + os.path.splitext(os.path.basename(logfilename))[0] + \".yml\",\n )\n )\n\n\n@pytest.mark.parametrize(\"logfilename\", logfile_list)\n@pytest.mark.skip(\"Need to update project card schema\")\ndef test_highway_change_project_card_valid(request, logfilename):\n print(\"\\n--Starting:\", request.node.name)\n\n print(\"Reading: {}\".format(logfilename))\n lf = Project.read_logfile(logfilename)\n test_project = Project.create_project(\n roadway_log_file=logfilename, base_roadway_dir=ROADWAY_DIR\n )\n\n test_project.write_project_card(\n os.path.join(\n SCRATCH_DIR,\n \"t_\" + os.path.splitext(os.path.basename(logfilename))[0] + \".yml\",\n )\n )\n\n from network_wrangler import ProjectCard\n\n valid = ProjectCard.validate_project_card_schema(\n os.path.join(\n SCRATCH_DIR,\n \"t_\" + os.path.splitext(os.path.basename(logfilename))[0] + \".yml\",\n )\n )\n\n assert valid == True\n","sub_path":"tests/test_project.py","file_name":"test_project.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"224811348","text":"\"\"\"\n@author: Murad.Mirzabalaev\n\"\"\"\nfrom itertools import islice, cycle\nfrom random import choice, sample\nimport itertools\n\nfrom cluster_io_model import Disk, Request, Monitor, request_status, GIGABYTE, Client, MEGABYTE, \\\n MILLISECOND, request_type, \\\n DISK_READ_SPEED, DISK_WRITE_SPEED\n\n\ndef chunks(n, iterable, fillvalue=None):\n args = [iter(iterable)] * n\n return itertools.zip_longest(fillvalue=fillvalue, *args)\n\n\ndef enum(**enums):\n return type('Enum', (), enums)\n\n\nDISK_CAPACITY = 500\n\nchunks_placement_scheme = enum(\n random='random',\n sharding='sharding',\n groups='groups',\n clusters='clusters'\n)\n\nchunk_server_choice_policy = enum(\n random='random',\n less_loaded='less loaded',\n min_disk_id='minimal disk id'\n)\n\n\nclass ModifiedRequest(Request):\n def __init__(self, cl_id, req_id, disk_list, req_size, req_type):\n Request.__init__(self, cl_id, req_id, disk_list, req_size, req_type)\n self.disk_id = self.client_id\n\n\nclass ModifiedDisk(Disk):\n def __init__(self, monitor, disk_id, rpms=7200,\n time_quant=.01, read_speed=DISK_READ_SPEED, write_speed=DISK_WRITE_SPEED):\n Disk.__init__(self, None, disk_id, rpms, time_quant, read_speed, write_speed)\n self.monitor = monitor\n\n def send(self, request):\n self.request_queue.remove(request)\n self.monitor.receive(request)\n\n def on_time_quant_increment(self):\n Disk.on_time_quant_increment(self)\n\n def receive(self, request):\n assert request.status == request_status.client_to_disk and request.disk_id == self.disk_id\n Disk.receive(self, request)\n\n\nclass ModifiedClient(Client):\n def __init__(self, client_id, monitor, disk_list, block_size):\n self.disk_list = None\n self.monitor = monitor\n self.block_size = block_size\n self.clid = client_id\n self.send_queue = []\n self.disk_list = disk_list\n\n def send(self, request):\n req_type = choice([request_type.read, request_type.write])\n req = ModifiedRequest(self.clid, request, None, self.block_size, req_type=req_type)\n req.status = request_status.client_to_disk\n self.disk_list[req.disk_id].receive(req)\n\n def receive(self, request):\n self.send_queue.append(request)\n\n def on_time_quant_increment(self):\n if self.send_queue:\n req_id = self.send_queue.pop()\n self.send(req_id)\n\n\nclass ReplicationMonitor(Monitor):\n def __init__(self, disk_num, block_size, n, k, disk_capacity=DISK_CAPACITY,\n chunks_placement=chunks_placement_scheme.random,\n chunk_serv_choice_policy=chunk_server_choice_policy.random,\n broken_disks_percent=1, used_space_percent=60,\n disks_per_cluster=20, replicating_blocks_num=100,\n time_quant=.01):\n\n self.disk_num = disk_num\n self.time = 0.0\n self.time_quant = time_quant\n self.used_space_percent = used_space_percent\n self.n = n\n self.k = k\n self.disk_capacity = disk_capacity * GIGABYTE\n self.disks = {}\n self.clients = {}\n self.block_size = float(block_size) * MEGABYTE\n self.blocks = int((disk_num * self.disk_capacity * (1.0 - self.used_space_percent * 0.01 )) /\n (self.block_size * self.n))\n self.broken_disks_percent = broken_disks_percent\n self.replicating_blocks_num = replicating_blocks_num\n self.broken_blocks = {}\n self.broken_disks = []\n self.replicating_blocks = []\n self.chunk_placement = chunks_placement\n self.chunk_serv_choice_policy = chunk_serv_choice_policy\n self.replication_data = 0.0\n self.disks_per_cluster = disks_per_cluster\n if self.chunk_placement == chunks_placement_scheme.clusters:\n if self.disk_num % self.disks_per_cluster != 0:\n res = self.disks_per_cluster - (self.disk_num % self.disks_per_cluster)\n self.disk_num += res\n if self.chunk_placement == chunks_placement_scheme.groups:\n if self.disk_num % self.n != 0:\n res = self.n - (self.disk_num % self.n)\n self.disk_num += res\n for disk_id in range(0, self.disk_num):\n self.disks[disk_id] = ModifiedDisk(self, disk_id, time_quant=self.time_quant)\n self.clients[disk_id] = ModifiedClient(disk_id, self, self.disks,\n self.block_size)\n self.generate_block_distribution()\n for disk_id in self.broken_disks:\n del self.disks[disk_id]\n del self.clients[disk_id]\n\n def generate_block_distribution(self):\n self.broken_blocks = {}\n if self.chunk_placement == chunks_placement_scheme.random:\n\n for block_id in range(0, self.blocks):\n self.broken_blocks[block_id] = sample(range(0, self.disk_num), self.n)\n\n if self.chunk_placement == chunks_placement_scheme.sharding:\n left_border = 0\n for block_id in range(0, self.blocks):\n right_border = left_border + self.n\n self.broken_blocks[block_id] = list(islice(cycle(self.disks.keys()), left_border, right_border))\n left_border += 1\n\n if self.chunk_placement == chunks_placement_scheme.groups:\n groups = list(chunks(self.n, range(0, self.disk_num)))\n for block_id in range(0, self.blocks):\n self.broken_blocks[block_id] = sample(choice(groups), self.n)\n\n if self.chunk_placement == chunks_placement_scheme.clusters:\n clusters = list(chunks(self.disks_per_cluster, range(0, self.disk_num)))\n for block_id in range(0, self.blocks):\n self.broken_blocks[block_id] = sample(choice(clusters), self.n)\n\n self.generate_failure()\n if self.check_data_loss():\n while self.check_data_loss():\n self.generate_block_distribution()\n self.generate_failure()\n\n def generate_failure(self):\n broken_disks_num = int(len(self.disks) * self.broken_disks_percent * 0.01)\n self.broken_disks = sample(range(0, len(self.disks)), broken_disks_num)\n\n for block_id in self.broken_blocks:\n for disk_id in self.broken_disks:\n if disk_id in self.broken_blocks[block_id]:\n self.broken_blocks[block_id].remove(disk_id)\n\n self.broken_blocks = {key: value for key, value in self.broken_blocks.items()\n if len(self.broken_blocks[key]) < self.n}\n\n for block_id in self.broken_blocks:\n self.replication_data += ((self.n - len(self.broken_blocks[block_id])) * self.block_size)\n\n def get_new_request_data(self, policy):\n if policy == chunk_server_choice_policy.random:\n\n block_id = choice(\n [block_id for block_id in list(self.broken_blocks.keys()) if block_id not in self.replicating_blocks])\n random_disk = choice(\n [disk_id for disk_id in self.broken_blocks[block_id] if disk_id not in self.broken_disks])\n if len(self.broken_blocks[block_id]) < self.n:\n self.replicating_blocks.append(block_id)\n self.broken_blocks[block_id].append(choice(self.broken_disks))\n if len(self.broken_blocks[block_id]) == self.n:\n del self.broken_blocks[block_id]\n return block_id, random_disk\n\n if policy == chunk_server_choice_policy.less_loaded:\n\n block_id = choice(\n [block_id for block_id in list(self.broken_blocks.keys()) if block_id not in self.replicating_blocks])\n chunk_servers = [disk_id for disk_id in self.broken_blocks[block_id] if disk_id not in self.broken_disks]\n less_loaded_disk = None\n\n for chnk_srvr_id in chunk_servers:\n if less_loaded_disk == None:\n less_loaded_disk = chnk_srvr_id\n if len(self.disks[chnk_srvr_id].request_queue) < len(self.disks[less_loaded_disk].request_queue):\n less_loaded_disk = chnk_srvr_id\n\n if len(self.broken_blocks[block_id]) < self.n:\n self.replicating_blocks.append(block_id)\n self.broken_blocks[block_id].append(choice(self.broken_disks))\n if len(self.broken_blocks[block_id]) == self.n:\n del self.broken_blocks[block_id]\n return block_id, less_loaded_disk\n\n if policy == chunk_server_choice_policy.min_disk_id:\n\n block_id = choice(\n [block_id for block_id in list(self.broken_blocks.keys()) if block_id not in self.replicating_blocks])\n chunk_servers = [disk_id for disk_id in self.broken_blocks[block_id] if disk_id not in self.broken_disks]\n min_disk_id = min(chunk_servers)\n\n if len(self.broken_blocks[block_id]) < self.n:\n self.replicating_blocks.append(block_id)\n self.broken_blocks[block_id].append(choice(self.broken_disks))\n if len(self.broken_blocks[block_id]) == self.n:\n del self.broken_blocks[block_id]\n return block_id, min_disk_id\n\n def receive(self, request):\n self.replicating_blocks.remove(request.req_id)\n\n def check_data_loss(self):\n if self.broken_blocks:\n for block_id in self.broken_blocks:\n if len(self.broken_blocks[block_id]) < self.k:\n return True\n return False\n\n\n def Run(self):\n while len(self.broken_blocks) > 0:\n self.time += self.time_quant\n while len(self.replicating_blocks) <= self.replicating_blocks_num and len(self.broken_blocks) > len(\n self.replicating_blocks):\n block_id, client_id = self.get_new_request_data(self.chunk_serv_choice_policy)\n self.clients[client_id].receive(block_id)\n\n for disk_id in self.disks:\n self.clients[disk_id].on_time_quant_increment()\n self.disks[disk_id].on_time_quant_increment()\n\n replication_time = self.time * MILLISECOND / 60.0\n mean_replication_data = self.replication_data / len(self.disks)\n throughput = self.replication_data / float(self.time * MILLISECOND)\n return replication_time, mean_replication_data, throughput","sub_path":"replication_model_nk_coding.py","file_name":"replication_model_nk_coding.py","file_ext":"py","file_size_in_byte":10460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"281935662","text":"\"\"\" end-to-end test Buy new and old product \"\"\"\nimport unittest\nfrom random import randint\n\nfrom tools.random_strings import random_name\nfrom common.base_tests.gui_basetest import BaseTest\nfrom common.pages.catalog import CatalogPage\nfrom common.pages.product import ProductPage\nfrom common.pages.payment import PaymentPage\nfrom common.pages.payment_review import PaymentReviewPage\nfrom common.pages.payment_info import PaymentInfoPage\nfrom common.pages.success import SuccessPage\n\nfrom common.data._impoved import *\nfrom common.common_actions import assert_and_log, back\nfrom common.common_actions import navigate_to\nfrom common.common_actions import validate\n\nfrom common.process_request import ProcessRequest\n\nclass TC303(BaseTest):\n \"\"\"\n Validating end-to-end scenario of buying 2 products: new and old\n \"\"\"\n def setUp(self):\n \"\"\"\n Create a new product for the scenario\n It would be deleted at the tearDown\n :return:\n \"\"\"\n super().setUp()\n list_of_product_types = [\n 'default_product_variant',\n 'multiple_product_variants',\n 'ceo_title'\n ]\n self.new_product = eval(f\"get_new_product_with_\" \\\n f\"{list_of_product_types[randint(0, len(list_of_product_types) - 1)]}()\")\n response = ProcessRequest('products.json').send_request(\n 'POST',\n data=self.new_product,\n expected_return_codes=[201],\n )\n self.product_id = response.response['product']['id']\n def test_buy_now(self):\n \"\"\"\n Create a new product and buy it along with existing one\n :return:\n \"\"\"\n catalog_page = CatalogPage(self.driver)\n product_page = ProductPage(self.driver)\n payment_page = PaymentPage(self.driver)\n payment_review_page = PaymentReviewPage(self.driver)\n payment_info_page = PaymentInfoPage(self.driver)\n success_page = SuccessPage(self.driver)\n # buy the new product\n navigate_to(self.driver, ProductPage.URL(self.new_product['product']['title']))\n product_page.add_to_cart.click()\n # by an old product\n catalog_page.catalog.click()\n # Sort products to move the newly created to last page\n catalog_page.sorting_order.select_by_visible_text(\"Date, old to new\")\n catalog_page.image.random_click()\n product = product_page.product.get_text()\n product_page.add_to_cart.click()\n catalog_page.catalog.click()\n catalog_page.cart.click()\n payment_dic = {\n 'address' : f'{randint(1, 99999)} {random_name(5, 8)}',\n 'city' : \"San Francisco\",\n 'email_or_mobile_phone_number_input' : random_name(8) + \"@gmail.com\",\n 'last_name' : random_name(3, 12),\n 'zip_code' : '94107',\n }\n if randint(0, 1):\n payment_dic['first_name'] = random_name(4, 16)\n if randint(0, 1):\n payment_dic['address2'] = random_name(5)\n for _ in payment_dic:\n exec(f\"payment_page.{_}.enter(payment_dic['{_}'])\")\n payment_page.continue_to_shipping.click()\n payment_review_page.continue_to_payment.click()\n payment_info_page.full_address.get_text()\n # validate address\n for _ in ['address', 'city', 'zip_code']:\n assert_and_log(payment_dic[_] in payment_info_page.full_address.get_text(),\n f\"{_} in full address\")\n payment_info_page.enter_bogus_payment(1)\n assert_and_log(success_page.thank_you.find_visible_element(),\n \"'Thank you' appeared as a sign of successful transaction\",\n continue_on_error=False)\n validate(success_page.basic_validation_list)\n\n def tearDown(self):\n \"\"\"\n in addition to default tearDown delete new product that was created in setUp\n :return:\n \"\"\"\n ProcessRequest(f\"products/{self.product_id}.json\").send_request(\n 'DELETE', continue_on_error=True)\n super().tearDown()\n\nif __name__ == '__main__':\n unittest.main(warnings='ignore')\n","sub_path":"common/tests/returnly/end_to_end/tc304_buy_new_and_old_product.py","file_name":"tc304_buy_new_and_old_product.py","file_ext":"py","file_size_in_byte":4134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"210737761","text":"import math\r\n\r\ndef calc(n,m):\r\n\tm*=2\r\n\tu = int(math.sqrt(m))\r\n\tif u*(u+1) <= m: u+=1\r\n\treturn min(n,u)\r\n\r\n\r\ndef main():\r\n\tt = int(input())\r\n\tfor _ in range(t):\r\n\t\tn,m = map(int,input().split())\r\n\t\tprint(calc(n,m))\r\n\r\nif __name__ == '__main__':\r\n\tmain()","sub_path":"HackerRank/Algorithms/Graph/Clique.py","file_name":"Clique.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"13532167","text":"from django.shortcuts import render, get_object_or_404\r\nfrom .models import BlogType, Blog\r\nfrom django.db.models import Count\r\nfrom django.core.paginator import Paginator\r\nfrom read_record.utils import read_record_once_read \r\nfrom comment.forms import CommentForm\r\nfrom Myblog.forms import LoginForm\r\n\r\n# Create your views here.\r\n\r\n\r\ndef blogs_with_paginator(request, blogs):\r\n paginator = Paginator(blogs, 5)\r\n page_nums = request.GET.get('page', 1)\r\n page_of_blog = paginator.get_page(page_nums)\r\n curent_page = page_of_blog.number #当前页面的页码数\r\n page_range = list(range(max(curent_page-1, 1), curent_page))+list(range(curent_page, min(curent_page+1, paginator.num_pages)+1))\r\n if curent_page-1 >= 3:\r\n page_range.insert(0, '...')\r\n if paginator.num_pages-curent_page >= 3:\r\n page_range.append('...')\r\n\r\n if page_range[0] != 1:\r\n page_range.insert(0, 1)\r\n if page_range[-1] != paginator.num_pages:\r\n page_range.append(paginator.num_pages)\r\n context = {}\r\n context['blogs'] = page_of_blog.object_list\r\n context['page_range'] = page_range\r\n context['page_nums'] = page_of_blog\r\n return context\r\n\r\n\r\n\r\ndef blog_list(request):\r\n blogs = Blog.objects.all()\r\n context = blogs_with_paginator(request, blogs)\r\n return render(request, 'blog_list.html', context)\r\n\r\n\r\ndef blog_detail(request, blog_pk):\r\n blog = get_object_or_404(Blog, pk=blog_pk)\r\n read_cookie_key = read_record_once_read(request, blog)\r\n previous_blog = Blog.objects.filter(create_time__gt=blog.create_time).last()\r\n next_blog = Blog.objects.filter(create_time__lt=blog.create_time).first()\r\n context = {}\r\n context['blog'] = blog\r\n context['previous_blog'] = previous_blog\r\n context['next_blog'] = next_blog\r\n context['login_form'] = LoginForm()\r\n response = render(request, 'blog_detail.html', context)\r\n response.set_cookie(read_cookie_key, 'true')\r\n return response\r\n print(request.POST)\r\n\r\n\r\ndef blog_type_list(request):\r\n blog_types = BlogType.objects.annotate(blog_count=Count('blog'))\r\n context = {}\r\n context['blog_types'] = blog_types\r\n return render(request, 'blog_type_list.html', context)\r\n\r\n\r\ndef blog_with_type(request, blog_type_pk):\r\n blog_type = get_object_or_404(BlogType, pk=blog_type_pk)\r\n blogs = Blog.objects.filter(type=blog_type)\r\n context = blogs_with_paginator(request, blogs)\r\n return render(request, 'blog_type.html', context)\r\n\r\n\r\ndef blog_date_list(request):\r\n dates_of_list = Blog.objects.dates('create_time', 'month', order='DESC')\r\n blog_date_dict = {}\r\n for blog_date in dates_of_list:\r\n blog_count = Blog.objects.filter(create_time__year=blog_date.year,\r\n create_time__month=blog_date.month).count()\r\n blog_date_dict[blog_date] = blog_count\r\n context = {}\r\n context['dates_of_list'] = blog_date_dict\r\n return render(request, 'blog_date_list.html', context)\r\n\r\n\r\ndef blog_with_date(request, year, month):\r\n blogs = Blog.objects.filter(create_time__year=year, create_time__month=month)\r\n context = blogs_with_paginator(request, blogs)\r\n return render(request, 'blog_date.html', context)\r\n\r\n\r\ndef get_search(request):\r\n\tsearch_blog = request.GET.get('search_blog')\r\n\tblog_list = Blog.objects.filter(title__icontains=search_blog)\r\n\tcontext = {}\r\n\tcontext['blog_list'] = blog_list\r\n\treturn render(request, 'search_result.html', context)\r\n\r\n","sub_path":"Myblog/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"386817629","text":"import sys\nsys.path.append('../')\nfrom tests.es_vs_no_es import es_vs_no_es\nimport numpy as np\n\ndata_dict = dict()\n\nfor idx in range(5):\n data_dict['normal' + str(idx)] = np.random.normal(size=(100, 3))\n data_dict['gumbel' + str(idx)] = np.random.gumbel(size=(100, 3))\n data_dict['logistic' + str(idx)] = np.random.logistic(size=(100, 3))\n\nmake_plots = es_vs_no_es(data_dict, show_plots=False, save_plots=True, save_path='plots/3d_scaled/')\nmake_plots.generate_plots()","sub_path":"tests/make_plots.py","file_name":"make_plots.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"560592017","text":"# -*- coding: UTF-8 -*-\n\"\"\"Modify each function until the tests pass.\"\"\"\n\n\ndef is_odd(a_number):\n \"\"\"Return True if a_number is odd, and False if a_number is even.\n\n Look into modulo division using the '%' operator as one way of doing this.\n \"\"\"\n if a_number%2 != 0: # % returns divisor value, 5%2 = 1 which does not equal to zero, hence odd\n return True\n else:\n return False\n\n\ndef fix_it(moves=True, should_move=True):\n \"\"\"Decide what to do.\n\n Using the engineering flowchart (in week2 folder of the CODE1161-2019\n repo engineeringFlowchart.png) for the rules, return the apropriate\n response to the input parameters.\n Use conditional statements: if, else, elif etc.\n This function should return either:\n \"WD-40\"\n \"Duct Tape\"\n \"No Problem\"\n\n Most people write this function with 4 return statements. \n As an extra challenge, see if you can get that down to three.\n \"\"\"\n\n option = [\"WD-40\", \"Duct Tape\", \"No Problem\" ]\n\n if moves == True: #This statement is saying that that item move\n if should_move == True: #This statement is looking at the option that it moves and it should move\n return option[2] # option 2 is \"no problem\" as python counts from 0\n else: #This statement is looking at the option that it moves and it shouldn't move\n return option[1]\n\n else: #This statement is the opposing statement to if moves == true, it basically say moves != false\n if should_move != False:\n return option[0]\n else:\n return option[2]\n\n\ndef loops_1a():\n \"\"\"Make 10 stars.\n\n Using a for loop\n return a list of 10 items, each one a string with exacly one star in it.\n E.g.: ['*', '*', '*', '*', '*', '*', '*', '*', '*', '*']\n \"\"\"\n\n stars = [] #made an empty list so that we can add values to it \n\n for x in range(10): #this runs it 10 times\n stars.append('*') #this adds stars into the stars\n \n return stars \n\n\ndef loops_1c(number_of_items=5, symbol=\"#\"):\n \"\"\"Respond to variables.\n\n Using any method, return a list of number_of_items items, each one a\n string with exacly one symbol in it.\n E.g.: ['#', '#', '#', '#', '#']\n \"\"\"\n y = []\n for x in range(number_of_items):\n y.append(symbol)\n\n return y\n\n\ndef loops_2():\n \"\"\"Make a big square starfield.\n\n return a list of 10 items, each one a list of 10 items,\n each one of those, a string with exacly one star in it.\n E.g.: [\n ['*', '*', '*', '*', '*', '*', '*', '*', '*', '*'],\n ['*', '*', '*', '*', '*', '*', '*', '*', '*', '*'],\n ['*', '*', '*', '*', '*', '*', '*', '*', '*', '*'],\n ['*', '*', '*', '*', '*', '*', '*', '*', '*', '*'],\n ['*', '*', '*', '*', '*', '*', '*', '*', '*', '*'],\n ['*', '*', '*', '*', '*', '*', '*', '*', '*', '*'],\n ['*', '*', '*', '*', '*', '*', '*', '*', '*', '*'],\n ['*', '*', '*', '*', '*', '*', '*', '*', '*', '*'],\n ['*', '*', '*', '*', '*', '*', '*', '*', '*', '*'],\n ['*', '*', '*', '*', '*', '*', '*', '*', '*', '*'],\n ]\n \"\"\"\n squares = [] # This is an empty list \n temp = [] #This is also an empty list\n for x in range(10): # This runs it 10 times in x-direction \n for y in range(10): # This runs it 10 times \n temp.append(\"*\") # this adds the stars inside\n\n squares.append(temp) #this adds the temp list into the sqaures list\n temp = [] # Resets the list\n return squares\n\n\ndef loops_3():\n \"\"\"Make a rising block of numbers.\n\n Return this:\n [\n ['0', '0', '0', '0', '0', '0', '0', '0', '0', '0'],\n ['1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],\n ['2', '2', '2', '2', '2', '2', '2', '2', '2', '2'],\n ['3', '3', '3', '3', '3', '3', '3', '3', '3', '3'],\n ['4', '4', '4', '4', '4', '4', '4', '4', '4', '4'],\n ['5', '5', '5', '5', '5', '5', '5', '5', '5', '5'],\n ['6', '6', '6', '6', '6', '6', '6', '6', '6', '6'],\n ['7', '7', '7', '7', '7', '7', '7', '7', '7', '7'],\n ['8', '8', '8', '8', '8', '8', '8', '8', '8', '8'],\n ['9', '9', '9', '9', '9', '9', '9', '9', '9', '9']\n ]\n remember that range(10) produces a list of numbers from 0...9\n So for every step produced by `for i in range(10):` i is a different number\n TIP: notice that this needs to to return strings of numbers,\n so call str(number) to cast.\n \"\"\"\n i = 0\n num=[]\n lol=[]\n for x in range(10):\n for y in range(10):\n lol.append(str(i))\n num.append(lol) #list within a list sort of thing :)\n lol = [] #reset list\n i += 1 #the new list will increase by 1 every time its reset\n\n return num\n\n\ndef loops_4():\n \"\"\"Make a block of numbers that rises left to right.\n\n Return this:\n [\n ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'],\n ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'],\n ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'],\n ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'],\n ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'],\n ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'],\n ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'],\n ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'],\n ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'],\n ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n ]\n \"\"\"\n i = 0 \n num=[]\n lol=[]\n for x in range(10):\n for y in range(10):\n lol.append(str(i))\n i += 1 #This adds 1 as the list foes on 0 1 2 3 4 5 6 7 8 9\n num.append(lol) #list within a list sort of thing :)\n lol = [] #This rests the list so that there is more than one list (10 in this case) with 10 values inside it\n i = 0 # This resets every list so that it starts up again at 0\n return num\n\n\ndef loops_5():\n \"\"\"Make the coordinates of the block.\n\n Return this:\n [\n ['(i0, j0)', '(i0, j1)', '(i0, j2)', '(i0, j3)', '(i0, j4)'],\n ['(i1, j0)', '(i1, j1)', '(i1, j2)', '(i1, j3)', '(i1, j4)'],\n ['(i2, j0)', '(i2, j1)', '(i2, j2)', '(i2, j3)', '(i2, j4)'],\n ['(i3, j0)', '(i3, j1)', '(i3, j2)', '(i3, j3)', '(i3, j4)'],\n ['(i4, j0)', '(i4, j1)', '(i4, j2)', '(i4, j3)', '(i4, j4)'],\n ['(i5, j0)', '(i5, j1)', '(i5, j2)', '(i5, j3)', '(i5, j4)'],\n ['(i6, j0)', '(i6, j1)', '(i6, j2)', '(i6, j3)', '(i6, j4)'],\n ['(i7, j0)', '(i7, j1)', '(i7, j2)', '(i7, j3)', '(i7, j4)'],\n ['(i8, j0)', '(i8, j1)', '(i8, j2)', '(i8, j3)', '(i8, j4)'],\n ['(i9, j0)', '(i9, j1)', '(i9, j2)', '(i9, j3)', '(i9, j4)']\n ]\n\n TIP:\n You can construct strings either by concatinating them:\n \"There are \" + str(8) + \" green bottles\"\n or by using format:\n \"There are {} green bottles\".format(8)\n you'll come to see the pros and cons of each over time.\n \"\"\"\n\n block = [] #an empty list\n tmp = [] #another empty list\n for i in range(10): #in range 10\n for j in range(5): #in range 5\n tmp.append(\"(i\" + str(i) + \", \" + \"j\" + str(j) + \")\") #ads this into tmp list\n block.append(tmp) #ads temp list into block list\n tmp = [] #moves onto the next list, which goes through the loop again but with different values\n \n return block #returns block list, our answer \n\n\ndef loops_6():\n \"\"\"Make a wedge of numbers.\n\n Return this:\n [\n ['0'],\n ['0', '1'],\n ['0', '1', '2'],\n ['0', '1', '2', '3'],\n ['0', '1', '2', '3', '4'],\n ['0', '1', '2', '3', '4', '5'],\n ['0', '1', '2', '3', '4', '5', '6'],\n ['0', '1', '2', '3', '4', '5', '6', '7'],\n ['0', '1', '2', '3', '4', '5', '6', '7', '8'],\n ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n ]\n You don't have to use a literal number in the range function.\n You can use a variable.\n TIP: look out for the starting condition.\n \"\"\"\n\n wedge = [] #Main list\n tmp = [] #Temporary list\n i = 0\n list_length = 0\n for x in range(10):\n for y in range(list_length + 1):\n tmp.append(str(i))\n i += 1\n wedge.append(tmp) \n tmp = []\n list_length += 1\n i = 0\n\n return wedge\n\n\ndef loops_7():\n \"\"\"Make a pyramid.\n\n Return this:\n [\n [' ', ' ', ' ', ' ', '*', ' ', ' ', ' ', ' '],\n [' ', ' ', ' ', '*', '*', '*', ' ', ' ', ' '],\n [' ', ' ', '*', '*', '*', '*', '*', ' ', ' '],\n [' ', '*', '*', '*', '*', '*', '*', '*', ' '],\n ['*', '*', '*', '*', '*', '*', '*', '*', '*']\n ]\n or in more simple terms:\n *\n * * *\n * * * * *\n * * * * * * *\n * * * * * * * * *\n (this is what will print when you test from inside this file)\n This is a hard problem. Use lots of experimentation and draw\n lots of diagrams!\n \"\"\"\n\n pyramid = [] #main list \n tmp = [] #list inside list\n mid_val = 4 #the middle value\n k = 0\n\n for i in range(5): #5 rows\n for j in range(9): #9 columns\n tmp.append(\" \") #adds spaces inside\n\n #this loop will add the \"*\" inside\n temp_value = 1\n while k != 0 and temp_value <= k:\n tmp[mid_val - temp_value] = \"*\" #placement of star to the left\n tmp[mid_val + temp_value] = \"*\" #placement of star to the right \n temp_value += 1 #temp_value = temp_value + 1\n\n tmp[mid_val] = \"*\" #adds a star to the middle value\n pyramid.append(tmp) #puts tmp list inside pyramid list \n k += 1 #k = k+1\n tmp = [] #resets tmp list :)\n \n return pyramid\n\n\ndef lp(some_kind_of_list, exercise_name):\n \"\"\"Help to see what's going on.\n\n This is a helper function that prints your\n results to check that they are tidy.\n Note: You don't have to do anything with it.\n \"\"\"\n if some_kind_of_list is not None:\n print(\"\\n\" + exercise_name)\n if type(some_kind_of_list[0]) is list:\n for row in some_kind_of_list:\n for column in row:\n print(column, end=\"\")\n print()\n else:\n for column in some_kind_of_list:\n print(column, end=\"\")\n print()\n else:\n print(exercise_name, \"maybe you haven't got to this one yet?\")\n\n\nif __name__ == \"__main__\":\n # this section does a quick test on your results and prints them nicely.\n # It's NOT the official tests, they are in tests.py as usual.\n # Add to these tests, give them arguments etc. to make sure that your\n # code is robust to the situations that you'll see in action.\n print(is_odd(1), \"is_odd odd\")\n print(is_odd(4), \"is_odd even\")\n print(fix_it(True, True), \"fix_it\")\n print(fix_it(True, False), \"fix_it\")\n print(fix_it(False, True), \"fix_it\")\n print(fix_it(False, False), \"fix_it\")\n lp(loops_1a(), \"loops_1a\")\n lp(loops_1c(4, \"×°×\"), \"loops_1c\")\n lp(loops_2(), \"loops_2\")\n lp(loops_3(), \"loops_3\")\n lp(loops_4(), \"loops_4\")\n lp(loops_5(), \"loops_5\")\n lp(loops_6(), \"loops_6\")\n lp(loops_7(), \"loops_7\")\n","sub_path":"week2/exercise3.py","file_name":"exercise3.py","file_ext":"py","file_size_in_byte":11131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"448918013","text":"\"\"\"\n\tDBSCAN clustering\n\tAlso, implementation of a greedy seach of optimal hyperparameters of DBSCAN for face clustering.\n\n\tauthor: Ricardo Kleinlein\n\tdate: 02/2020\n\n\tUsage:\n\t\tpython grid_search.py \n\n\tOptions:\n\t\t--output-dir\tDirectory to save results in\n\t\t--quiet\tHide visual information\n\t\t-h, --help\tDisplay script additional help\n\"\"\"\n\nimport os\nimport PIL\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom os.path import join\nfrom arguments import DbscanArgs\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.neighbors import NearestNeighbors\n\n\ndef load_db(db_path):\n \"\"\"Load a pd.Dataframe of detections.\"\"\"\n return pd.read_csv(db_path)\n\n\ndef load_embeddings(db):\n \"\"\"Load a list of vector embeddings from a\n pd.Dataframe\n\n Args:\n db (pd.DataFrame): DataFrame object\n\n Return:\n a list of vector embeddings in np.ndarray form\n a list of bouding boxes sizes (np.ndarray)\n \"\"\"\n size = db['size'].values\n emb = db['embedding'].values\n emb = [np.load(i).flatten() for i in emb]\n return emb, size\n\n\ndef size_filter(db, threshold):\n \"\"\"Filter out samples under threshold.\n\n Args:\n db (pd.DataFrame): DataFrame object\n threshold (int): Minimal size to accept\n\n Return:\n a pd.DataFrame of samples that meet the condition\n \"\"\"\n idx2rm = []\n for i, x in db.iterrows():\n if x['size'] < threshold:\n idx2rm.append(i)\n\n print(\"Numero de imagenes: \",len(db)-len(idx2rm),\" de \", len(db))\n return db.drop(idx2rm)\n\n\ndef hist_face_sizes(X, measure, output_dir):\n \"\"\"Save a histogram depicting the face sizes.\n\n Args:\n X (float): List of face sizes\n measure (str): Perimeter or area\n output_dir (str): Directory to save in\n \"\"\"\n os.makedirs(output_dir, exist_ok=True)\n plt.clf()\n plt.hist(X, bins=100)\n plt.xlabel('Face bounding box')\n plt.ylabel('Frequency')\n plt.savefig(join(output_dir, 'face_' + measure + '.png'))\n\n\ndef dbscan_(X, eps, min_samples, metric='euclidean'):\n \"\"\"DBSCAN clustering for a set of parameters over the\n sampels X.\n\n Args:\n X (float): Feature sample vectors\n eps (float): Epsilon hparam\n min_samples (int): Min-samples hparam\n metric (str, optional): distance metric [default: euclidean]\n\n Return:\n np.ndarray of labels for each samples, with\n noisy samples given a `-1`\n \"\"\"\n f = DBSCAN(eps=eps,\n min_samples=min_samples,\n metric=metric)\n f.fit(X)\n return f.labels_, f.core_sample_indices_\n\n\ndef export(path, data):\n \"\"\"Export data to external csv file.\"\"\"\n os.makedirs(os.path.dirname(path), exist_ok=True)\n if isinstance(data, list):\n data = np.array(data)\n if isinstance(data, np.ndarray):\n np.save(path, data)\n elif isinstance(data, pd.DataFrame):\n data.to_csv(path + '.csv')\n elif hasattr(data, '__dict__'):\n with open(path, 'w+') as f:\n for k, v in zip(data.__dict__.keys(), data.__dict__.values()):\n f.write(k + ': ' + str(v) + '\\n')\n else:\n raise IOError('Invalid data format')\n\n\ndef nearestneighbors(X, n, metric='euclidean'):\n \"\"\"Compute the distance to the n-th neighbour in an array\n of feature samples X.\n\n Args:\n X (float): Array of feature samples.\n n (int): N-th neighbor to consider.\n metric (str): DIstance measure [default: euclidean]\n\n Return:\n An np.ndarray of distances up to the n-th neighbors\n \"\"\"\n nn = NearestNeighbors(n_neighbors=n,\n metric=metric,\n n_jobs=-1)\n nbrs = nn.fit(X)\n dist, _ = nbrs.kneighbors(X)\n sort_dist = np.sort(dist, axis=0)[:, 1:]\n return sort_dist\n\n\ndef get_n_noise_samples(labels):\n return list(labels).count(-1)\n\n\ndef get_number_clusters(labels):\n return len(set(labels)) - (1 if -1 in labels else 0)\n\n\ndef measure_silhouette(X, labels, metric, with_noise=True):\n \"\"\"Clean up noise samples and compute Silhouette score.\n Since DBSCAN assigns a -1 label to those samples that are not attached to any cluster, we should dismiss those ones from the point of view of validation.\n\n Args:\n X (float): Feature samples\n labels (int): Prediction labels. -1 denotes noise\n metric (str): Distance measure\n with_noise (bool, optional): Account for -1 tags\n\n Return:\n a float, the average Silhouette score of the clean samples.\n \"\"\"\n if get_number_clusters(labels) < 2:\n return -1\n if -1 in labels:\n if with_noise:\n return silhouette_score(X, labels, metric=metric)\n else:\n idx2keep = []\n for i, x in enumerate(labels):\n if x != -1:\n idx2keep.append(i)\n X = np.array([X[i] for i in idx2keep])\n labels = labels[idx2keep]\n return silhouette_score(X, labels, metric=metric)\n\n\ndef _eps_search(X, eps, min_samples, metric, quiet=False):\n \"\"\"Epsilon-based DBSCAN protocol.\n\n Args:\n X (float): Vector embeddings\n eps (float): Eps value\n min_samples (int): Minimum min_samples\n metric (str): Pairwise distance to use\n quiet (bool, optional): Whethet to display info\n\n Return:\n a np.ndarray of labels according to the final cluster\n a dict of measures representing the experiment\n \"\"\"\n score = -1\n num_cluster = 0\n num_noise = 0\n\n pred, core_ = dbscan_(X, eps, min_samples, metric=metric)\n try:\n score_sil = measure_silhouette(X, pred, metric, True)\n score = score_sil\n num_cluster = get_number_clusters(pred)\n num_noise = get_n_noise_samples(pred) / len(pred)\n except KeyboardInterrupt:\n raise KeyboardInterrupt\n\n if not quiet:\n print('> Eval configuration - eps = {:.2f}'.format(eps))\n\n vals = {'score': score,\n 'num_clusters': num_cluster,\n 'noise': num_noise}\n\n return pred, vals\n\n\ndef _status(pred):\n \"\"\"Given a list of predictions, find the cluster sparse representation.\n \"\"\"\n n = len(pred)\n pred_copy = pred.copy()\n cluster_groups = []\n for i in range(n):\n within_group = []\n for j in range(n):\n if pred[i] != -1 and pred[i] == pred[j]:\n within_group.append(j)\n cluster_groups.append(within_group)\n return [x for x in np.unique(cluster_groups) if x != []]\n\n\ndef compare(status, old_pred):\n \"\"\"How many new and merged groups.\"\"\"\n num_cl = len(status)\n new_clusters = 0\n merged_clusters = 0\n\n for i in range(num_cl):\n group = status[i]\n # print('Group: ', group)\n\n were_noise = True\n has_merged = False\n\n for k in range(len(group)):\n sample = group[k]\n old_tag = old_pred[sample]\n # print('Sample {:d}, old tag: {:d}'.format(sample, int(old_tag)))\n if old_tag != -1:\n were_noise = False\n if len(np.unique(old_pred[group])) > 1:\n has_merged = True\n\n if were_noise:\n new_clusters += 1\n if has_merged:\n merged_clusters += len(np.unique(old_pred[group])) - 1\n\n return new_clusters, merged_clusters\n\n\ndef save_clusters(db, savepath, time_req):\n \"\"\"Prepares image mosaics of the clusters and computes their centroids and stddev.\n\n Args:\n db (pd.DataFrame): Data with full info\n dirname (str): Root output directory\n time_req (int): Minimum time to be considered participant\n \"\"\"\n # try:\n # all_id = np.unique(db['pred_labels']) # Includes noise tag\n # except:\n print('fallo de unique')\n import pdb\n pdb.set_trace()\n all_id = []\n num_id = [0 for _ in range(50)]\n for line in db['pred_labels']:\n if all_id.count(line)==0:\n all_id.append(line)\n num_id[line+1]+=1\n pdb.set_trace()\n\n\n for iddty in all_id:\n data = db.loc[db['pred_labels'] == iddty]\n if len(data) >= time_req and iddty != -1:\n id_path = join(savepath, 'id_' + str(iddty))\n os.makedirs(id_path, exist_ok=True)\n\n data_vector, size = load_embeddings(data)\n centroid = np.mean(data_vector, axis=0)\n std = np.std(data_vector, axis=0)\n cov = np.cov(np.array(data_vector).T)\n\n print(all_id)\n print(cov)\n pdb.set_trace()\n\n #inv_cov = np.linalg.inv(cov)\n export(path=join(id_path, 'centroid'),\n data=centroid)\n export(path=join(id_path, 'std'),\n data=std)\n export(path=join(id_path, 'covmat'),\n data=cov)\n #export(path=join(id_path, 'inv_covmat'), data=inv_cov)\n\n imgs = data['img'].values\n for img_path in imgs:\n img = PIL.Image.open(img_path)\n img_name = img_path.split('/')[-1]\n img.save(join(id_path, img_name))\n\n\ndef eps_search(db, X, time_req, eps_low, eps_high, trials, min_samples, metric, output_dir, quiet=False):\n \"\"\"Export a scaled search of parameters based on the value of epsilon and the minimal intervention time.\n\n Args:\n db (pd.DataFrame): Data information\n X (float): Vector embeddings\n time_req (int): Minimum time to be considered participant\n eps_low (float): Lower limit of search\n eps_high (float): Upper limit of search\n min_samples (int): Fixed DBSCAN hparam. Low to assure convergence\n metric (str): Pairwise distance to use\n output_dir (str): Output directory\n quiet (bool, optional): Whether to display info\n\n Return:\n a float, the best epsilon found\n \"\"\"\n maindir = join(dirname, 'min_samples_' + str(min_samples))\n overall = {\n 'score': [], # Silhouette average score\n 'num_clusters': [], # Total number of clusters\n 'noise': [], # Proportion of noise samples\n 'newly_created': [], # Generated from scratch clusters\n 'merging_processes': [] # Cases of merging clusters\n }\n eps = np.linspace(eps_low, eps_high, trials)\n old_pred = np.ones(len(X)) * (-1)\n best_score = -1\n best_eps = eps_low\n best_pred = None\n\n import pdb\n\n for e in range(len(eps)):\n pred_labels, info = _eps_search(\n X=X,\n eps=eps[e],\n min_samples=min_samples,\n metric=metric,\n quiet=quiet)\n status = _status(pred_labels)\n info['newly_created'], info['merging_processes'] = compare(\n status, old_pred)\n old_pred = pred_labels.copy()\n print(info)\n if info['score'] > best_score:\n best_score = info['score']\n best_eps = eps[e]\n best_pred = pred_labels\n\n for k in info.keys():\n overall[k].append(info[k])\n\n\n epsdir = join(maindir, 'eps_' + str(best_eps))\n db['pred_labels'] = best_pred\n export(join(epsdir, 'labels'), db)\n if not quiet:\n print('> Configuration saved')\n print('> Computing centroids & cluster mosaics')\n\n save_clusters(db, epsdir, time_req)\n\n overall = pd.DataFrame(\n data=overall,\n index=eps,\n columns=overall.keys())\n export(join(maindir, 'results'), overall)\n\n return best_eps\n\n\nif __name__ == \"__main__\":\n args = DbscanArgs().parse()\n db = load_db(args.program_csv)\n _, size = load_embeddings(db)\n hist_face_sizes(size, 'size', args.output_dir)\n if not args.quiet:\n print('> Perimeter and area computed for all faces')\n db = size_filter(db, args.min_area)\n X, size = load_embeddings(db)\n dirname = join(args.output_dir, 'dbscan_' + args.metric)\n\n export(join(dirname, 'min_samples_' + str(args.min_samples),\n 'hparams.txt'), args)\n\n if not args.quiet:\n print('> Proceed to hyperparameters search...')\n filename = 'dist_' + str(args.nthneigh - 1) + 'th_neighbor.csv'\n dists = nearestneighbors(X, args.nthneigh, metric=args.metric)\n export(join(dirname, filename), data=dists)\n\n if not args.quiet:\n print('> Distance to neighbors exported')\n print('> Searching a nice DBSCAN configuration')\n\n eps_search(\n db=db,\n X=X,\n time_req=args.min_part,\n eps_low=args.eps_low,\n eps_high=args.eps_upper,\n trials=args.trials,\n min_samples=args.min_samples,\n metric=args.metric,\n output_dir=dirname,\n quiet=args.quiet)\n","sub_path":"dbscan.py","file_name":"dbscan.py","file_ext":"py","file_size_in_byte":12627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"149900196","text":"# https://github.com/KerasKorea/KEKOxTutorial/blob/master/134_Keras%20%EC%99%80%20Gym%20%EA%B3%BC%20%ED%95%A8%EA%BB%98%ED%95%98%EB%8A%94%20Deep%20Q-Learning%20%EC%9D%84%20%ED%96%A5%ED%95%9C%20%EC%97%AC%ED%96%89.md\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.optimizers import Adam\r\n\r\nfrom collections import deque\r\nimport numpy as np\r\nimport random\r\nimport os\r\nimport time\r\n\r\n\r\nclass Agent():\r\n def __init__(self, state_size, action_size):\r\n self.weight_backup = \"model/dql-medium.h5\"\r\n self.state_size = state_size\r\n self.action_size = action_size\r\n self.memory = deque(maxlen=2000)\r\n self.learning_rate = 0.001\r\n self.gamma = 0.95\r\n self.exploration_rate = 1.0\r\n self.exploration_min = 0.01\r\n self.exploration_decay = 0.995\r\n self.brain = self._build_model()\r\n\r\n def _build_model(self):\r\n # Neural Net for Deep-Q learning Model\r\n model = Sequential()\r\n model.add(Dense(24, input_dim=self.state_size, activation='relu'))\r\n model.add(Dense(24, activation='relu'))\r\n model.add(Dense(self.action_size, activation='linear'))\r\n model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\r\n\r\n if os.path.isfile(self.weight_backup):\r\n model.load_weights(self.weight_backup)\r\n self.exploration_rate = self.exploration_min\r\n\r\n return model\r\n\r\n def save_model(self):\r\n self.brain.save(self.weight_backup)\r\n\r\n def act(self, state):\r\n if np.random.rand() <= self.exploration_rate:\r\n return random.randrange(self.action_size)\r\n \r\n act_values = self.brain.predict(state)\r\n return np.argmax(act_values[0])\r\n\r\n def remember(self, state, action, reward, next_state, done):\r\n self.memory.append((state, action, reward, next_state, done))\r\n\r\n def replay(self, sample_batch_size):\r\n if len(self.memory) < sample_batch_size:\r\n return\r\n \r\n sample_batch = random.sample(self.memory, sample_batch_size)\r\n for state, action, reward, next_state, done in sample_batch:\r\n target = reward\r\n if not done:\r\n target = reward + self.gamma * np.amax(self.brain.predict(next_state)[0])\r\n \r\n target_f = self.brain.predict(state)\r\n target_f[0][action] = target\r\n self.brain.fit(state, target_f, epochs=1, verbose=0)\r\n\r\n if self.exploration_rate > self.exploration_min:\r\n self.exploration_rate *= self.exploration_decay\r\n\r\n\r\nclass Circle():\r\n def __init__(self, episodes):\r\n self.options = webdriver.ChromeOptions()\r\n self.options.add_experimental_option(\"excludeSwitches\", [\"enable-automation\", \"enable-logging\"])\r\n self.driver = webdriver.Chrome(executable_path='./chromedriver.exe', options=self.options)\r\n self.driver.set_window_size(350, 820)\r\n self.driver.get('http://localhost:8080/medium')\r\n \r\n self.actions = ActionChains(self.driver)\r\n \r\n self.spacebar = self.actions.send_keys(Keys.SPACE)\r\n self.play_button = self.driver.find_element_by_css_selector('div#menu')\r\n self.replay_button = self.driver.find_element_by_css_selector('div#replay')\r\n self.sound = self.driver.find_element_by_css_selector('div#sound')\r\n self.sound.click()\r\n \r\n self.before_score = 0\r\n \r\n self.sample_batch_size = 32\r\n self.episodes = episodes\r\n\r\n self.state_size = 8\r\n self.action_size = 2\r\n self.agent = Agent(self.state_size, self.action_size)\r\n\r\n def run(self):\r\n try:\r\n for index_episode in range(self.episodes):\r\n self.driver.execute_script('document.getElementById(\"count\").innerHTML = \"{}\"'.format(index_episode + 1))\r\n \r\n score = 0\r\n self.before_score = 0\r\n self.play_button.click()\r\n \r\n state = self.get_state()\r\n state = np.reshape(state, [1, self.state_size])\r\n while True:\r\n action = self.agent.act(state)\r\n \r\n next_state, reward, done = self.step(action)\r\n next_state = np.reshape(next_state, [1, self.state_size])\r\n self.agent.remember(state, action, reward, next_state, done)\r\n state = next_state\r\n score += reward\r\n \r\n if done:\r\n time.sleep(1)\r\n try:\r\n self.replay_button.click()\r\n except:\r\n time.sleep(1)\r\n self.replay_button.click()\r\n break\r\n \r\n print(\"Episode {}# Score: {}\".format(index_episode, score))\r\n self.agent.replay(self.sample_batch_size)\r\n \r\n finally:\r\n self.agent.save_model() \r\n \r\n def step(self, action): \r\n if action:\r\n self.play_button.click()\r\n time.sleep(0.4)\r\n\r\n now_score = self.driver.execute_script('return score')\r\n score = 1.0 if self.before_score < now_score else 0\r\n self.before_score = now_score\r\n \r\n is_playing = self.driver.execute_script('return window.isPlaying')\r\n done = False if is_playing else True\r\n state = self.get_state()\r\n \r\n return state, score, done\r\n \r\n def get_state(self):\r\n pos_now = self.driver.execute_script('return window.PosNow')\r\n lines = self.driver.execute_script('return window.line')\r\n CP = int(self.driver.execute_script('return window.CP'))\r\n now_line_height = -(lines[CP + 1][0] - pos_now[0]) * ((lines[CP + 1][1] - lines[CP][1]) / (lines[CP + 1][0] - lines[CP][0])) + lines[CP + 1][1]\r\n gap = (now_line_height - 8) - (pos_now[1] - 65)\r\n vx = self.driver.execute_script('return window.Vx')\r\n \r\n return np.array([\r\n pos_now[1] * 0.001, \r\n lines[CP + 0][1] * 0.001, \r\n lines[CP + 1][1] * 0.001, \r\n lines[CP + 2][1] * 0.001,\r\n lines[CP + 2][1] * 0.001,\r\n now_line_height * 0.001, \r\n gap * 0.1,\r\n vx * 0.001])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n circle = Circle(10000)\r\n circle.run()","sub_path":"main-DQL.py","file_name":"main-DQL.py","file_ext":"py","file_size_in_byte":6769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"242879205","text":"money = 0.00\nloop = 1\n\ndef choose_beverage():\n beverage = input(\"\\nPlease choose the beverage you want.\\n\\n1 = Sprite\\n2 = Coca-cola\\n3 = Diet Coke\\n4 = Coke Zero\\n5 = Mountain Dew\\n6 = Mello Yello\\n7 = Sierra Mist\\n8 = 7Up\\n9 = Lemonade\\n0 = Water\\n\\nAll cost $1.25.\\n\\n\")\n if beverage == '1':\n print(\"\\nSprite\")\n elif beverage == '2':\n print(\"\\nCoca-cola\")\n elif beverage == '3':\n print(\"\\nDiet Coke\")\n elif beverage == '4':\n print(\"\\nCoke Zero\")\n elif beverage == '5':\n print(\"\\nMountain Dew\")\n elif beverage == '6':\n print(\"\\nMello Yello\")\n elif beverage == '7':\n print(\"\\nSierra Mist\")\n elif beverage == '8':\n print(\"\\n7Up\")\n elif beverage == '9':\n print(\"\\nLemonade\")\n elif beverage == '0':\n print(\"\\nWater\")\n\nwhile True:\n print(\"Insert money.\\n\")\n while True:\n if loop == 1:\n coin = input(\"P = insert penny\\nN = insert nickel\\nD = insert dime\\nQ = insert quarter\\nH = insert half-dollar coin\\nC = insert dollar coin\\n1 = insert 1-dollar bill\\n2 = insert 2-dollar bill\\nZ = Move on to choosing beverage.\\n\\n\")\n else:\n coin = input(\"\\n\")\n if coin == 'P':\n money = money + 0.01\n elif coin == 'N':\n money = money + 0.05\n elif coin == 'D':\n money = money + 0.10\n elif coin == 'Q':\n money = money + 0.25\n elif coin == 'H':\n money = money + 0.50\n elif coin == 'C':\n money = money + 1.00\n elif coin == '1':\n money = money + 1.00\n elif coin == '2':\n money = money + 2.00\n elif coin == 'Z':\n choose_beverage()\n break\n else:\n print(\"\\nInvalid coin.\")\n print(\"\\n\" + str(money))\n loop = loop + 1\n if money - 1.25 < 0:\n print(\"\\nNot enough money inserted.\")\n break\n\nprint(\"\\nThank you. Your change is $\" + str(money - 1.25) + \".\\n\")\n \n","sub_path":"vending_machine (beverages).py","file_name":"vending_machine (beverages).py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"102960273","text":"# this file manages the basic cli of termicoder and calls the correct function\nimport click\nfrom termicoder.utils import parse\nimport termicoder.utils.test as test_module\nimport termicoder.utils.code as code_module\nimport termicoder.utils.viewthis as viewthis_module\n\n# Only need to change this on adding new judges if structure is followed\n# take care of ',' (comma) while editing this list!\n# for default structure visit https://termicoder.github.io\nOJs = sorted([\n 'iarcs',\n 'codechef'\n])\n###############################################################################\n\n# importing OJ's Modules\nfor OJ in OJs:\n exec(\"import termicoder.judges.%s.main as %s\" % (OJ, OJ))\n\n\n@click.group()\ndef main():\n '''\n view, code & submit problems directly from terminal.\n '''\n pass\n\n\n# view command has various subcommands\n@click.group()\ndef view():\n '''\n view contests, problems and problem statement\n '''\n pass\n\n\n@click.command(short_help=\"display contest list of a judge\")\n@click.option('-j', '--judge', type=click.Choice(OJs),\n prompt=\"Please provide a judge(\"+'|'.join(OJs)+\")\")\ndef contests(judge):\n '''\n lists current and upcoming contests on a judge.\n\n depending on judge it may give a list of categories also\n such as PRACTICE etc.\n '''\n eval(judge).view_contests()\n\n\n@click.command(short_help=\"list problems of a contest/category\")\n@click.option('-j', '--judge', type=click.Choice(OJs),\n prompt=\"Please provide a judge(\"+'|'.join(OJs)+\")\")\n@click.option('-c', '--contest', type=click.STRING, help=\"contest code\")\ndef problems(judge, contest):\n '''\n lists problems of a contest/category on the judge\n '''\n eval(judge).view_problems(contest)\n\n\n@click.command(short_help=\"view contents of current folder\")\n@click.option(\"-f\", \"--folder\", type=click.Path())\n@click.option(\"-ed\", \"--edit_defaults\", is_flag=True, default=False,\n help=\"edit default web browser\")\ndef this(folder, edit_defaults):\n '''\n display the termicoder contents in current/passed folder\n\n \\b\n if it is a contest folder it displays the list of problems.\n if its a problem folder, displays the problem in a browser.\n '''\n viewthis_module.view(folder, edit_defaults)\n\n\nview.add_command(contests)\nview.add_command(problems)\nview.add_command(this)\n###############################################################################\n\n\n@click.command()\n@click.option('-j', '--judge', type=click.Choice(OJs),\n prompt=\"Please provide a judge(\"+'|'.join(OJs)+\")\")\n@click.option('-c', '--contest', type=click.STRING, help=\"contest code\")\n@click.option('-p', '--problem', type=click.STRING, help=\"problem code\")\n@click.option('--login', 'status', flag_value='login')\n@click.option('--logout', 'status', flag_value='logout')\ndef setup(judge, contest, problem, status):\n \"\"\"\n sets up problem, contests and login.\n\n 1. if you pass judge and --login/--logout,\n it logs you in and out of the judge\n\n 2. if you pass judge and contest/category\n it downloads all the problems of that contest.\n\n 3. if you pass a particular problem, with judge and contest/category,\n it sets up that problem.\n\n all this happens in the current folder.\n option of contest/category may vary amongst various online judges\n \"\"\"\n eval(judge).setup(contest, problem, status)\n\n\n@click.command()\n@click.option('-f', '--file', 'code_file',\n type=click.Path(writable=True, readable=False, dir_okay=False),\n help=\"the filename to code into with preloaded template\")\n@click.option('-et', \"--edit_templates\", is_flag=True, default=False,\n help=\"open templates folder\")\n@click.option('-ed', \"--edit_defaults\", is_flag=True, default=False,\n help=\"edit defaults for editors\")\ndef code(code_file, edit_templates, edit_defaults):\n '''\n creates & open code file with template code.\n\n you can edit template code and default editors\n using flags -et and -ed respectively\n '''\n if(edit_templates):\n code_module.edit_templates()\n\n elif(edit_defaults):\n code_module.edit_defaults()\n\n elif(code_file is None):\n code_file = code_module.get_file_name()\n\n if(code_file is not None):\n code_module.code(code_file)\n\n\n@click.command()\n@click.option('-f', '--file', 'code_file', type=click.File(),\n help=\"the code file\")\n@click.option('-tl', '--timelimit', type=float,\n help=\"the max time per testcase\")\n@click.option('-l', '--live', is_flag=True, default=False,\n help=\"test the code live and don't use testcases\")\n@click.option('-es', \"--edit_scripts\", is_flag=True, default=False)\ndef test(code_file, edit_scripts, timelimit, live):\n '''\n test code against the sample testcases.\n\n \\b\n this command (compiles and) runs passed code file.\n the code is run against all [.in] files in ./testcases folder.\n the output is produced in [.outx] files and checked against [.out] files\n\n it displays time for each testcase,status\n and diff of expected and produced outputs.\n '''\n if(edit_scripts):\n test_module.edit_scripts()\n\n if(not code_file):\n code_file = parse.get_code_file()\n code_file = parse.get_file_name(code_file)\n test_module.test(code_file, timelimit, live)\n\n\n@click.command()\n@click.option('-f', '--file', 'code_file', type=click.File(),\n help=\"the code file\")\ndef submit(code_file):\n '''\n submit a solution.\n\n you should be in a problem directory to submit\n\n \\b\n script will prompt you to login into the judge(if not already).\n this submits the problem using data in [.problem] file in current directory\n '''\n judge = parse.get_judge()\n if(not code_file):\n code_file = parse.get_code_file()\n code_file = parse.get_file_name(code_file)\n eval(judge).submit(code_file)\n\n\n@click.command()\ndef debug():\n '''\n launches custom debug interface (in future)\n where you can use testcase generator,\n launch debugger for the particular language\n and visualize the output\n '''\n click.echo(\n 'This functionality is not implemented in this version\\n' +\n 'The command is only kept for compactiblity with future versions\\n' +\n 'If you want to contribute to its developement visit:\\n' +\n 'https://termicoder.github.io/')\n\n\nmain.add_command(view)\nmain.add_command(setup)\nmain.add_command(code)\nmain.add_command(test)\nmain.add_command(submit)\nmain.add_command(debug)\n","sub_path":"termicoder/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":6514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"32203352","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nХанойская башня\nТри стержня и стопка дисков, каждый из которых немного меньше предыдущего.\nТребуется переставить все диски с одного стержня на другой, соблюдая два строгих условия.\nВо-первых, за раз можно было перемещать только один диск. Во-вторых, нельзя класть бОльший диск поверх меньшего.\n\"\"\"\n\nclass Hanoi:\n\n def hanoi(self, plates, _from, to):\n while plates > 0:\n using = 6 - (_from + to)\n plates -= 1\n self.hanoi(plates, _from, using)\n print('Move plate ' + str(_from) + '->' + str(to))\n _from = using\n\n\nif __name__ == \"__main__\":\n recObject = Hanoi()\n print(recObject.hanoi(5, 1, 3))\n","sub_path":"algorithms/recursion/hanoi.py","file_name":"hanoi.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"307026632","text":"from core.Box import *\n\nclass MoveBox(Box):\n\tdef config(self, params = None):\n\t\tself.canBeAccessedBy = []\n\t\tself.canBePushedBy = [Agent]\n\t\tself.canMove = ALL\n\t\tself.canFallOffEdge = False\n\t\tself.char = '+'\n\t\tself.color = 'yellow'\n\ndef Char(field, x, y, char):\n\n\t# Empty\n\tif char == ' ':\n\t\treturn Empty(field, x, y, False)\n\tif char == '.':\n\t\treturn Empty(field, x, y, True)\n\n\t# Agent\n\tif char == 'O':\n\t\treturn Agent(field, x, y)\n\n\t# Goal\n\tif char == '*':\n\t\treturn Goal(field, x, y)\n\n\t# Blocker\n\tif char == 'X':\n\t\treturn Blocker(field, x, y)\n\n\t# MoveBox\n\tif char == '+':\n\t\treturn MoveBox(field, x, y, ALL)\n\n","sub_path":"project/Sokoban/Box.py","file_name":"Box.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"458119610","text":"\"\"\"informer_batyr URL Configuration\n\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include, re_path\n\nfrom mk_rc import views, polls\n\nurlpatterns = [\n path('vsd/', views.vsd, name='vsd_url'),\n path('vsd_filtr/', views.vsd_filtr, name='vsd_filtr_url'),\n path('bd_vsd/', views.bd_vsd, name='bd_vsd_url'),\n path('form_bd_vsd', views.form_bd_vsd, name='form_bd_vsd_url'),\n path('edit_bd_vsd/', views.edit_bd_vsd, name='edit_bd_vsd_url'),\n path('delete_bd_vsd/', views.delete_bd_vsd, name='delete_bd_vsd_url'),\n\n path('uplink/', polls.upload, name='uplink'),\n path('exchange/(.*)', polls.exchange, name=\"exchange\"),\n path('import_1c/', polls.import_sheet, name=\"import_url\"),\n\n path('api_vsd_merc/', views.ApiVsdMerc.as_view(), name='api_vsd_merc_url'),\n path('api_vsd_merc/', views.ApiVsdMerc.as_view(), name='api_vsd_merc_detail_url'),\n]\n","sub_path":"mk_rc/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"598489906","text":"from __future__ import print_function, division, absolute_import\n\nimport unittest\n\nimport numpy as np\n\nfrom openmdao.api import Problem, Group, IndepVarComp\nfrom openmdao.utils.assert_utils import assert_check_partials\n\nfrom CADRE.comm_dymos.comm_group import CommGroup\n\nGM = 398600.44\nrmag = 7000.0\nperiod = 2 * np.pi * np.sqrt(rmag ** 3 / GM)\nvcirc = np.sqrt(GM / rmag)\nduration = period / 1\ndelta_trua = 2 * np.pi * (duration / period)\n\nclass TestCommGroup(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n nn = 10\n\n p = cls.p = Problem(model=Group())\n\n ivc = p.model.add_subsystem('ivc', IndepVarComp(), promotes_outputs=['*'])\n\n ivc.add_output('time', val=np.ones(nn), units='s')\n ivc.add_output('r_e2b_I', val=np.zeros((nn, 3)), units='km')\n ivc.add_output('antAngle', val=np.zeros((nn,)), units='deg')\n ivc.add_output('P_comm', val=np.zeros((nn,)), units='W')\n ivc.add_output('O_BI', val=np.zeros((nn, 3, 3)))\n\n p.model.add_subsystem('comm_group',\n CommGroup(num_nodes=nn, lat_gs=0.01, lon_gs=0.0, alt_gs=0.0))\n\n p.model.connect('time', 'comm_group.t')\n p.model.connect('r_e2b_I', 'comm_group.r_e2b_I')\n p.model.connect('antAngle', 'comm_group.antAngle')\n p.model.connect('P_comm', 'comm_group.P_comm')\n p.model.connect('O_BI', 'comm_group.O_BI')\n\n p.setup(check=True, force_alloc_complex=True)\n\n p['time'] = np.linspace(0, 5400, nn)\n trua = delta_trua * p['time']\n\n p['r_e2b_I'][:, 0] = 6578.137 * np.cos(trua)\n p['r_e2b_I'][:, 1] = 6578.137 * np.sin(trua)\n p['r_e2b_I'][:, 2] = 0.0\n\n p['antAngle'] = 0.0\n p['P_comm'] = 10.0\n\n # For testing purposes just fix the body to the ECI frame\n p['O_BI'][:, 0, 0] = 1.0\n p['O_BI'][:, 1, 1] = 1.0\n p['O_BI'][:, 2, 2] = 1.0\n\n p.run_model()\n\n def test_partials(self):\n np.set_printoptions(linewidth=100000, edgeitems=10000)\n cpd = self.p.check_partials(method='fd', step=1.0E-6, step_calc='abs')\n assert_check_partials(cpd, atol=2.0E-5, rtol=1.0E-5)\n\n","sub_path":"CADRE/comm_dymos/test/test_comm_group.py","file_name":"test_comm_group.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"617009065","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nclass Differentiation:\n def __init__(self, t, alpha):\n self.t = t\n self.alpha = alpha\n\n def derivative(self, y):\n t = self.t\n alpha = self.alpha\n h = t[2] - t[1]\n dy = [0]*len(y)\n dy[1] = 0\n y = y[:]\n t = t[:]\n w = [0]*len(y)\n w[1] = 1\n for i in range(2,len(t)):\n w[i] = w[i-1]*(1-(alpha+1)/float(i-1))\n for i in range(2,len(t)):\n for j in range(1,i):\n for k in xrange(i,0,-1):\n dy[i] += w[j]*y[k]/(h**alpha)\n return dy\n\n def plot_func(self, y1, y2):\n t = self.t\n for k in range(8):\n self.alpha += .1\n dy1 = self.derivative(y1)\n dy2 = self.derivative(y2)\n plt.hold(True)\n plt.subplot(211).set_title(\"y=x^2\")\n plt.plot(t,dy1)\n plt.axis([0,10,0,20])\n plt.subplot(212).set_title(\"y=x^4\")\n plt.plot(t,dy2)\n plt.axis([0,10,0,20])\n plt.show()\n","sub_path":"Fractional_Order_Systems/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"433598287","text":"import math\nimport shutil\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom tensorboardX import SummaryWriter\nfrom sklearn.metrics import roc_auc_score\n\nfrom SSHProject.BasicTool.MeDIT.Augment import *\nfrom SSHProject.BasicTool.MeDIT.Others import MakeFolder, CopyFile\n\nfrom SSHProject.CnnTools.T4T.Block.ConvBlock import ConvBn2D\nfrom SSHProject.CnnTools.T4T.Utility.Data import *\nfrom SSHProject.CnnTools.T4T.Utility.CallBacks import EarlyStopping\nfrom SSHProject.CnnTools.T4T.Utility.Initial import HeWeightInit\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass ChannelAttention(nn.Module):\n def __init__(self, in_planes, ratio=8):\n super(ChannelAttention, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.max_pool = nn.AdaptiveMaxPool2d(1)\n\n self.fc1 = nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False)\n\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))\n max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))\n out = avg_out + max_out\n\n return self.sigmoid(out)\n\n\nclass SpatialAttention(nn.Module):\n def __init__(self, kernel_size=7):\n super(SpatialAttention, self).__init__()\n\n assert kernel_size in (3, 7), 'kernel size must be 3 or 7'\n padding = 3 if kernel_size == 7 else 1\n\n self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n avg_out = torch.mean(x, dim=1, keepdim=True)\n max_out, _ = torch.max(x, dim=1, keepdim=True)\n x = torch.cat([avg_out, max_out], dim=1)\n x = self.conv1(x)\n\n return self.sigmoid(x)\n\n\nclass Bottleneck(nn.Module):\n\n def __init__(self, inplanes, planes, baseWidth=4, cardinality=32,\n stride=1, downsample=None, downstride=2):\n \"\"\" Constructor\n Args:\n inplanes: input channel dimensionality\n planes: output channel dimensionality\n baseWidth: base width.\n cardinality: num of convolution groups.\n stride: conv stride. Replaces pooling layer.\n \"\"\"\n super(Bottleneck, self).__init__()\n\n conv_planes = planes // 2\n\n D = int(math.floor(conv_planes * (baseWidth / 16)))\n C = cardinality\n\n self.conv1 = nn.Conv2d(inplanes, D * C, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn1 = nn.BatchNorm2d(D * C)\n self.conv2 = nn.Conv2d(D * C, D * C, kernel_size=3, stride=stride, padding=1, groups=C, bias=False)\n self.bn2 = nn.BatchNorm2d(D * C)\n self.conv3 = nn.Conv2d(D * C, planes, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn3 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n\n self.ca = ChannelAttention(planes)\n self.sa = SpatialAttention()\n\n if downsample is True:\n self.downsample = nn.Sequential(\n conv1x1(inplanes, conv_planes * 2, downstride),\n nn.BatchNorm2d(conv_planes * 2),\n )\n else:\n self.downsample = None\n\n def forward(self, x):\n\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n out = self.ca(out) * out\n out = self.sa(out) * out\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Layer1(nn.Module):\n def __init__(self, inplanes, outplanes, cardinality=32):\n super(Layer1, self).__init__()\n self.layer1_0 = Bottleneck(inplanes=inplanes, planes=outplanes, cardinality=cardinality,\n downsample=True, downstride=1)\n self.layer1_1 = Bottleneck(inplanes=outplanes, planes=outplanes, cardinality=cardinality)\n self.layer1_2 = Bottleneck(inplanes=outplanes, planes=outplanes, cardinality=cardinality)\n\n def forward(self, x):\n x = self.layer1_0(x)\n x = self.layer1_1(x)\n x = self.layer1_2(x)\n return x\n\n\nclass Layer2(nn.Module):\n def __init__(self, inplanes, outplanes, cardinality=32):\n self.inplanes = inplanes\n\n super(Layer2, self).__init__()\n self.layer2_0 = Bottleneck(inplanes=inplanes, planes=outplanes, cardinality=cardinality,\n stride=2, downsample=True)\n self.layer2_1 = Bottleneck(inplanes=outplanes, planes=outplanes, cardinality=cardinality)\n self.layer2_2 = Bottleneck(inplanes=outplanes, planes=outplanes, cardinality=cardinality)\n self.layer2_3 = Bottleneck(inplanes=outplanes, planes=outplanes, cardinality=cardinality)\n\n def forward(self, x):\n x = self.layer2_0(x)\n x = self.layer2_1(x)\n x = self.layer2_2(x)\n x = self.layer2_3(x)\n\n return x\n\n\nclass Layer3(nn.Module):\n def __init__(self, inplanes, outplanes, cardinality=32):\n super(Layer3, self).__init__()\n self.layer3_0 = Bottleneck(inplanes=inplanes, planes=outplanes, cardinality=cardinality,\n stride=2, downsample=True)\n self.layer3_1 = Bottleneck(inplanes=outplanes, planes=outplanes, cardinality=cardinality)\n self.layer3_2 = Bottleneck(inplanes=outplanes, planes=outplanes, cardinality=cardinality)\n self.layer3_3 = Bottleneck(inplanes=outplanes, planes=outplanes, cardinality=cardinality)\n\n def forward(self, x):\n x = self.layer3_0(x)\n x = self.layer3_1(x)\n x = self.layer3_2(x)\n x = self.layer3_3(x)\n return x\n\n\nclass Layer4(nn.Module):\n def __init__(self, inplanes, outplanes, cardinality=32):\n super(Layer4, self).__init__()\n self.layer4_0 = Bottleneck(inplanes=inplanes, planes=outplanes, cardinality=cardinality,\n stride=2, downsample=True)\n self.layer4_1 = Bottleneck(inplanes=outplanes, planes=outplanes, cardinality=cardinality)\n self.layer4_2 = Bottleneck(inplanes=outplanes, planes=outplanes, cardinality=cardinality)\n\n def forward(self, x):\n x = self.layer4_0(x)\n x = self.layer4_1(x)\n x = self.layer4_2(x)\n\n return x\n\n\nclass ResnextCBAM(nn.Module):\n def __init__(self, in_channels, num_classes, inplanes=32):\n \"\"\" Constructor\n Args:\n baseWidth: baseWidth for ResNeXt.\n cardinality: number of convolution groups.\n layers: config of layers, e.g., [3, 4, 6, 3]\n num_classes: number of classes\n \"\"\"\n super(ResnextCBAM, self).__init__()\n\n self.conv1 = ConvBn2D(in_channels, inplanes)\n self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.layer1 = Layer1(inplanes, inplanes * 2, cardinality=32)\n self.layer2 = Layer2(inplanes * 2, inplanes * 4, cardinality=32)\n self.layer3 = Layer3(inplanes * 4, inplanes * 6, cardinality=32)\n self.layer4 = Layer4(inplanes * 6, inplanes * 8, cardinality=32)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc1 = nn.Sequential(nn.Linear(inplanes * 8, inplanes),\n nn.Dropout(0.5),\n nn.ReLU(inplace=True))\n self.fc2 = nn.Linear(inplanes, num_classes)\n\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self, t2, adc, dwi):\n inputs = torch.cat([t2, adc, dwi], dim=1)\n x = self.conv1(inputs)\n x = self.maxpool1(x) # shape = (92, 92)\n\n x = self.layer1(x) # shape = (92, 92)\n x = self.layer2(x) # shape = (46, 46)\n x = self.layer3(x) # shape = (23, 23)\n x = self.layer4(x) # shape = (12, 12)\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x_fc1 = self.fc1(x)\n x = self.fc2(x_fc1)\n return torch.softmax(x, dim=1)\n\n\ndef test():\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n model = ResnextCBAM(3, num_classes=2).to(device)\n print(model)\n inputs = torch.randn(1, 1, 184, 184).to(device)\n prediction = model(inputs, inputs, inputs)\n print(prediction.shape)\n\n\nmodel_root = r'/home/zhangyihong/Documents/ProstateECE/BaseModel'\ndata_root = r'/home/zhangyihong/Documents/ProstateECE/NPYMaxPred'\n\n\n# def ClearGraphPath(graph_path):\n# if not os.path.exists(graph_path):\n# os.mkdir(graph_path)\n# else:\n# shutil.rmtree(graph_path)\n# os.mkdir(graph_path)\n\n\ndef _GetLoader(sub_list, aug_param_config, input_shape, batch_size, shuffle):\n data = DataManager(sub_list=sub_list, augment_param=aug_param_config)\n\n data.AddOne(Image2D(data_root + '/Train/T2Slice', shape=input_shape))\n data.AddOne(Image2D(data_root + '/Train/AdcSlice', shape=input_shape))\n data.AddOne(Image2D(data_root + '/Train/DwiSlice', shape=input_shape))\n data.AddOne(Label(data_root + '/ece.csv'), is_input=False)\n data.Balance(Label(data_root + '/ece.csv'))\n loader = DataLoader(data, batch_size=batch_size, shuffle=shuffle)\n batches = np.ceil(len(data.indexes) / batch_size)\n return loader, batches\n\n\ndef EnsembleTrain():\n torch.autograd.set_detect_anomaly(True)\n\n device = torch.device('cuda:2' if torch.cuda.is_available() else 'cpu')\n input_shape = (192, 192)\n total_epoch = 10000\n batch_size = 24\n model_folder = MakeFolder(model_root + '/ResnextCBAM')\n\n param_config = {\n RotateTransform.name: {'theta': ['uniform', -10, 10]},\n ShiftTransform.name: {'horizontal_shift': ['uniform', -0.05, 0.05],\n 'vertical_shift': ['uniform', -0.05, 0.05]},\n ZoomTransform.name: {'horizontal_zoom': ['uniform', 0.95, 1.05],\n 'vertical_zoom': ['uniform', 0.95, 1.05]},\n FlipTransform.name: {'horizontal_flip': ['choice', True, False]},\n BiasTransform.name: {'center': ['uniform', -1., 1., 2],\n 'drop_ratio': ['uniform', 0., 1.]},\n NoiseTransform.name: {'noise_sigma': ['uniform', 0., 0.03]},\n ContrastTransform.name: {'factor': ['uniform', 0.8, 1.2]},\n GammaTransform.name: {'gamma': ['uniform', 0.8, 1.2]},\n ElasticTransform.name: ['elastic', 1, 0.1, 256]\n }\n\n sub_train_path = data_root + '/train_name_basemodel.csv'\n sub_val_path = data_root + '/val_name_basemodel.csv'\n sub_train = pd.read_csv(sub_train_path).values.tolist()[0]\n sub_val = pd.read_csv(sub_val_path).values.tolist()[0]\n train_loader, train_batches = _GetLoader(sub_train, param_config, input_shape, batch_size, True)\n val_loader, val_batches = _GetLoader(sub_val, param_config, input_shape, batch_size, True)\n\n model = ResnextCBAM(3, 2).to(device)\n model.apply(HeWeightInit)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n loss1 = torch.nn.NLLLoss()\n\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', patience=10, factor=0.5,\n verbose=True)\n early_stopping = EarlyStopping(store_path=os.path.join(model_folder, '{}-{:.6f}.pt'), patience=50, verbose=True)\n writer = SummaryWriter(log_dir=os.path.join(model_folder, 'log'), comment='Net')\n\n for epoch in range(total_epoch):\n train_loss, val_loss = 0., 0.\n\n model.train()\n pred_list, label_list = [], []\n for ind, (inputs, outputs) in enumerate(train_loader):\n optimizer.zero_grad()\n\n inputs = MoveTensorsToDevice(inputs, device)\n outputs = MoveTensorsToDevice(outputs, device)\n\n preds = model(*inputs)\n\n loss = loss1(preds, outputs.long())\n\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n\n pred_list.extend(preds[:, 1].cpu().data.numpy().tolist())\n label_list.extend(outputs.cpu().data.numpy().tolist())\n\n train_auc = roc_auc_score(label_list, pred_list)\n\n model.eval()\n pred_list, label_list = [], []\n with torch.no_grad():\n for ind, (inputs, outputs) in enumerate(val_loader):\n inputs = MoveTensorsToDevice(inputs, device)\n outputs = MoveTensorsToDevice(outputs, device)\n\n preds = model(*inputs)\n\n loss = loss1(preds, outputs.long())\n\n val_loss += loss.item()\n\n pred_list.extend(preds[:, 1].cpu().data.numpy().tolist())\n label_list.extend(outputs.cpu().data.numpy().tolist())\n\n val_auc = roc_auc_score(label_list, pred_list)\n\n # Save Tensor Board\n for index, (name, param) in enumerate(model.named_parameters()):\n if 'bn' not in name:\n writer.add_histogram(name + '_data', param.cpu().data.numpy(), epoch + 1)\n\n writer.add_scalars('Loss',\n {'train_loss': train_loss / train_batches,\n 'val_loss': val_loss / val_batches}, epoch + 1)\n writer.add_scalars('Auc',\n {'train_auc': train_auc,\n 'val_auc': val_auc}, epoch + 1)\n\n print('Epoch {}: loss: {:.3f}, val-loss: {:.3f}, auc: {:.3f}, val-auc: {:.3f}'.format(\n epoch + 1, train_loss / train_batches, val_loss / val_batches,\n train_auc, val_auc\n ))\n\n scheduler.step(val_loss)\n early_stopping(val_loss, model, (epoch + 1, val_loss))\n\n if early_stopping.early_stop:\n print(\"Early stopping\")\n break\n\n writer.flush()\n writer.close()\n\n\nif __name__ == '__main__':\n # test()\n EnsembleTrain()","sub_path":"ControlGroup/BaseModel/ResnextCBAM.py","file_name":"ResnextCBAM.py","file_ext":"py","file_size_in_byte":14547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"361257551","text":"#!/usr/bin/env python3\n\n\ndef my_for(iterable, func):\n iterator = iter(iterable)\n while True:\n try:\n i = next(iterator)\n except StopIteration:\n print('End of Iterator')\n break\n else:\n func(i)\n\n\ndef square(x):\n print(x ** 2)\n\n\na: list = [1, 2, 3, 4, 5]\nb: str = 'Hello, World!'\n\nmy_for(a, print)\nmy_for(b, print)\nmy_for(a, square)\n","sub_path":"lessons/12_iterators_generators/0-1_for.py","file_name":"0-1_for.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"530383915","text":"import sys\r\ndef get_matrix():\r\n\tmatrix = []\r\n\trownum,columnnum = map(int,input().split())\r\n\tfor row in range(rownum):\r\n\t\tcurrent_list = []\r\n\t\tcurrent_row = input().split()\r\n\t\tfor column in current_row:\r\n\t\t\tcurrent_list.append(float(column))\r\n\t\tmatrix.append(current_list)\r\n\treturn matrix\r\n\t\t\r\n#Only for n by n-1 equation\r\n'''def get_matrix():\r\n\tmatrix = []\r\n\tmatrix.append([])\r\n\tcurrent_row = input().split()\r\n\tlength = len(current_row) - 1\r\n\tfor element in current_row:\r\n\t\tmatrix[0].append(int(element))\r\n\tfor row in range(length - 1):\r\n\t\tlist1 = []\r\n\t\tcurrent_row = input().split()\r\n\t\tfor element in current_row:\r\n\t\t\tlist1.append(int(element))\r\n\t\tmatrix.append(list1)\r\n\treturn matrix''' \r\n\r\ndef print_matrix(matrix):\r\n\tfor row in range(len(matrix)):\r\n\t\tfor column in range(len(matrix[row])):\r\n\t\t\tprint(matrix[row][column],end = ' ')\r\n\t\tprint()\r\n\r\n#Gauss algorithm first step:elimination\r\ndef Gauss1_elimination(matrix):\r\n\tfor row in range(len(matrix) - 1):\r\n\t\twhile matrix[row][row] == 0:\r\n\t\t\tmatrix[row],matrix[row+1] = matrix[row+1],matrix[row]\r\n\t\tfor i in range(row +1,len(matrix)):\r\n\t\t\tdivde = matrix[i][row] / matrix[row][row]\r\n\t\t\tfor column in range(row,len(matrix[row+1])):\r\n\t\t\t\tmatrix[i][column] -= matrix[row][column] * divde\r\n\treturn matrix\r\n\t\r\n#Gauss algorithm second step:substitution\r\ndef Gauss2_substitution(matrix):\r\n\tfor row in range(len(matrix)-1,0,-1):\r\n\t\tif matrix[row][len(matrix[row]) - 1] == 0:\r\n\t\t\tfor column in matrix[row]:\r\n\t\t\t\tif column != 0:\r\n\t\t\t\t\tprint('-----------result-------------')\r\n\t\t\t\t\tprint('No root')\r\n\t\t\t\t\tsys.exit(0)\r\n\r\n\t\tfine = False\r\n\t\tif matrix[row][len(matrix[row]) - 1] != 0:\r\n\t\t\tfor j in range(len(matrix[row]) -1):\r\n\t\t\t\tif matrix[row][j] != 0:\r\n\t\t\t\t\tfine = True\r\n\t\t\tif not fine:\r\n\t\t\t\tprint('-----------result-------------')\r\n\t\t\t\tprint('No root')\r\n\t\t\t\tsys.exit(0)\r\n\t\t\t\tbreak\r\n\t\tif matrix[row][row] == 0:\r\n\t\t\tcontinue\r\n\t\telse:\r\n\t\t\tfor i in range(row-1,-1,-1):\r\n\t\t\t\tdivde = matrix[i][row] / matrix[row][row]\r\n\t\t\t\tfor column in range(len(matrix[i])-1,row-1,-1):\r\n\t\t\t\t\tmatrix[i][column] -= divde * matrix[row][column]\r\n\t\t\tfor column in range(len(matrix[row]) - 1,row - 1, -1):\r\n\t\t\t\tmatrix[row][column] /= matrix[row][row]\r\n\tfor column in range(len(matrix[0]) - 1,-1,-1):\r\n\t\tmatrix[0][column] /= matrix[0][0]\r\n\treturn matrix\r\n\r\ndef main():\r\n\t matrix1 = get_matrix()\r\n\t matrix2 = Gauss1_elimination(matrix1)\r\n\t matrix3 = Gauss2_substitution(matrix2)\r\n\t print('-----------result-------------')\r\n\t print_matrix(matrix3)\r\n\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n\r\n","sub_path":"Algorithm/Gauss-Jordan_Algorithm.py","file_name":"Gauss-Jordan_Algorithm.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"124538658","text":"import csv\nimport psycopg2\nfrom setting import run_path\n\n\ndef add_csv_datas(csvname, csvrow):\n # dataPath = os.path.dirname(path) # 获取当前路径上级目录\n csvPath = run_path + '\\\\data\\\\' + csvname\n rows = []\n with open(csvPath, 'r') as csvFile:\n datas = csv.reader(csvFile)\n for row in datas:\n rows.append(row)\n return rows[csvrow][1]\n\n\nclass postgreSql:\n \"\"\"操作数据库\"\"\"\n\n def __init__(self):\n super(postgreSql, self).__init__()\n self.conn = psycopg2.connect(database='testnewoms', user='postgres', password='123123', host='192.168.0.252', port='5432')\n self.cur = self.conn.cursor()\n\n def isConsExist(self, customerCode):\n # 判断给的客户代码是否存在,存在返回True\n order = \"select * from tbl_customer where customer_code='%s'\" % customerCode\n self.cur.execute(order)\n rows = self.cur.fetchall()\n if rows == []:\n return False\n else:\n return True\n\n def tableCount(self, table):\n # 返回表中有多少条记录\n order = \"select count (*) from %s\" % table\n self.cur.execute(order)\n count = self.cur.fetchone()\n return count[0]\n\n def consOderBy(self):\n # 找出最后新增的客户代码\n order = \"SELECT customer_code FROM tbl_customer ORDER BY customer_code DESC\"\n self.cur.execute(order)\n code = self.cur.fetchone()\n return code[0]\n\n def __del__(self):\n self.conn.close()\n del self.conn\n# a = postgreSql()\n# print(a.consOderBy())\n","sub_path":"test_case/public/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"187764355","text":"import re\nimport math\nimport datetime\n\nt = \"I want to be a professor, but only one thing I can solve is x=exp(8)+sqrt(17)+log(5)\"\n\n\ndef process(text):\n re_match = re.search('x=', text)\n equation = text[re_match.span()[1]:]\n ops = {'exp': math.exp, 'sqrt': math.sqrt, 'log': math.log}\n actions = equation.split('+')\n result = 0\n for action in actions:\n action = action.replace('(', ' ').replace(')', '')\n op, value = action.split(' ')\n result += ops[op](int(value))\n report = \"Result = {response}; Date {date}\".format(\n response=result,\n date=datetime.datetime.now().strftime('%d/%m/%y')\n )\n print(report)\n\n\nprocess(t)\n","sub_path":"007_Samples/tasks_2.py","file_name":"tasks_2.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"304865585","text":"from sklearn.feature_selection import VarianceThreshold\r\nfrom skimage.exposure import equalize_adapthist\r\nfrom skimage.util import view_as_windows, view_as_blocks\r\nfrom skimage.data import load\r\nfrom scipy.signal import medfilt\r\nimport numpy as np\r\nimport matplotlib.image as mpimg\r\nimport matplotlib.cm as cm\r\nfrom skimage.draw import circle_perimeter\r\nimport multiprocessing\r\nfrom sklearn.cluster import MiniBatchKMeans\r\nimport csv\r\nimport time\r\nimport os\r\nfrom ..distance.distance import cdist as native_cdist\r\n\r\n\r\n#calculates entropy of an image\r\n#used as a feature for dataset clustering\r\ndef calc_entropy(img):\r\n from matplotlib.pylab import hist\r\n from scipy.stats import entropy\r\n hist, bins = np.histogram(img.ravel(), 256, [0,256])\r\n distribution = hist.astype(np.float32)/bins.shape[0]\r\n return entropy(distribution)\r\n\r\n\r\n#clusters images into k clusters\r\ndef cluster_images(path, k, batch_size):\r\n \"\"\"\r\n :param path: path to a folder with image files\r\n :return:\r\n \"\"\"\r\n dir = os.listdir(path)\r\n images = np.zeros((len(dir), 3))\r\n\r\n for i, imgname in enumerate(dir):\r\n img = load(path + imgname)\r\n images[i, 0] = np.mean(img)\r\n images[i, 1] = np.var(img)\r\n images[i, 2] = calc_entropy(img)\r\n print(str(i) + \"/\" + str(len(dir)))\r\n\r\n\r\n estimator = MiniBatchKMeans(n_clusters=k, verbose=True, batch_size=batch_size)\r\n estimator.fit(images)\r\n from sklearn.externals import joblib\r\n joblib.dump(estimator, 'estimator.pkl')\r\n np.save('data.npy', images)\r\n\r\n\r\n#select centroids with variance higher than average\r\ndef select_centroids(centroids):\r\n \"\"\"\r\n :param centroids: learned centroids\r\n :return: new_centroids: (without centroids with variance < avg_variance(centroids))\r\n \"\"\"\r\n sel = VarianceThreshold(threshold=np.var(centroids))\r\n new_centroids = sel.fit_transform(centroids.T)\r\n new_centroids = new_centroids.T\r\n return new_centroids\r\n\r\n\r\n#create centroids image\r\n#quadratic grid of centroids\r\ndef centroids_to_image(centroids, image_size, rfsize, g=0):\r\n \"\"\"\r\n\r\n :param image_size: image is nxn\r\n :param rfsize: receptive field size\r\n :param g:\r\n :return: .png image with centroid images in a grid\r\n \"\"\"\r\n margin = image_size / rfsize\r\n horizontal = []\r\n for i in range(margin):\r\n for j in range(margin):\r\n if j == 0:\r\n m_img = np.reshape(centroids[i*margin + j,:], (rfsize, rfsize))\r\n else:\r\n m_img = np.hstack((m_img, np.reshape(centroids[i*margin + j,:], (rfsize, rfsize))))\r\n horizontal.append(m_img)\r\n\r\n img = horizontal[0]\r\n for m_img in horizontal[1:]:\r\n img = np.vstack((img, m_img))\r\n\r\n mpimg.imsave('centroids_{0}{1}.png'.format(rfsize, g), img, cmap=cm.Greys_r)\r\n\r\n#extract patches from an image\r\ndef extract_patches(path, numPatchesPerImage, patchSize):\r\n\r\n \"\"\"\r\n :param path: path to a RGB fundus image\r\n :param numPatchesPerImage: number of patches to extract per image\r\n :param patchSize: patch is nxn size\r\n :return: patches: matrix with an image patch in each row\r\n \"\"\"\r\n\r\n img = load(path)\r\n img = img[:,:,1]\r\n #contrast enhancemenet\r\n img = equalize_adapthist(img)\r\n windows = view_as_windows(img, (patchSize,patchSize))\r\n j = 0\r\n patches = np.zeros((numPatchesPerImage, patchSize*patchSize))\r\n while(j < numPatchesPerImage):\r\n \r\n sx = np.random.randint(0, windows.shape[0] - 1) \r\n sy = np.random.randint(0, windows.shape[0] - 1)\r\n x = (patchSize/2 - 1) + sx\r\n y = (patchSize/2 - 1) + sy\r\n r = (img.shape[0]/2) - 1\r\n\r\n if np.sqrt((x - r) ** 2 + (y - r) **2 ) < r:\r\n patch = windows[sx, sy, :].flatten() \r\n patches[j,:] = patch\r\n j += 1 \r\n else:\r\n if j > 0:\r\n j -= 1 \r\n \r\n return patches\r\n\r\n\r\n#retina shape can be approximated with a circle of a radius = img.height/2 - 1\r\ndef get_perimeter(img):\r\n \"\"\"\r\n :param img:\r\n :return:\r\n \"\"\"\r\n cx = img.shape[0]/2\r\n cc, rr = circle_perimeter(cx - 1, cx - 1, 256)\r\n return rr, cc\r\n\r\n#parallelized function for patch extraction\r\n#sets the pool size to the number of cpus available\r\n#actually usable only locally. not really portable to work\r\ndef extract(rfSize, path):\r\n \"\"\"\r\n :param rfSize: receptive field size\r\n :param path:\r\n :return:\r\n \"\"\"\r\n nums = {0:1, 1:5, 2:2, 3:10, 4:10}\r\n #labels_dict = open_csv()\r\n #currently a dummy variable, should contain labels of train examples\r\n labels_dict = {}\r\n\r\n for j in range(0, 5):\r\n if j == 0:\r\n images = labels_dict[j][:10000]\r\n\r\n numOfPatches = nums[j]\r\n# path = '/home/jbzik/Documents/Diplomski_Bzik/jbzik_kaggle_data/data/resized/trainOriginal/'\r\n\r\n patches = np.zeros((len(images)*numOfPatches, rfSize*rfSize))\r\n \r\n pool = multiprocessing.Pool(multiprocessing.cpu_count())\r\n results = [pool.apply_async(extract_patches, args=(path + images[i] + '.jpeg', numOfPatches, rfSize,)) for i in range(len(images))]\r\n \r\n\r\n for i in range(len(images)):\r\n patches[i*numOfPatches: (i*numOfPatches) + numOfPatches,:] = np.array(results[i].get())\r\n\r\n if i % 10000 == 0:\r\n print(\"Extracting {0}/{1} patch\".format(i*nums[j], nums[j]*len(images)))\r\n\r\n print(\"Finished\")\r\n patches = patches[~np.isnan(patches).any(axis=1)]\r\n \r\n np.save('core/patches_{0}_{1}.npy'.format(j, rfSize), patches)\r\n\r\n\r\n#@profile\r\ndef kmeans(rfsize):\r\n \"\"\"\r\n :param rfsize: receptive field size\r\n :return: centroids (patches that represent some cluster of similar patches)\r\n \"\"\"\r\n\r\n ###########################################################\r\n #in this part patches should be generated or read from a file to the variable patches\r\n #my code is not portable\r\n #it's tied to files that were generated locally\r\n patches0 = np.load('core/patches_0_{0}.npy'.format(rfsize))\r\n patches1 = np.load('core/patches_1_{0}.npy'.format(rfsize))\r\n patches = np.vstack((patches0, patches1))\r\n patches0 = np.load('core/patches_2_{0}.npy'.format(rfsize))\r\n patches1 = np.load('core/patches_3_{0}.npy'.format(rfsize))\r\n patches = np.vstack((patches, patches0, patches1))\r\n patches0 = np.load('core/patches_4_{0}.npy'.format(rfsize))\r\n patches = np.vstack((patches, patches0))\r\n ############################################################\r\n\r\n\r\n #data normalization (standardization)\r\n p_mean = np.mean(patches, axis=1, dtype=np.float32, keepdims=True)\r\n p_var = np.var(patches, axis=1, dtype=np.float32, keepdims=True)\r\n off_matrix = 10.0 * np.ones(p_var.shape)\r\n patches = (patches - p_mean) / np.sqrt(p_var + off_matrix)\r\n\r\n #data whitening\r\n covariance_matrix = np.cov(patches, y=None, rowvar=0, ddof=1).T\r\n mean = np.mean(patches, axis=0, dtype=np.float32, keepdims=False)\r\n eigenvalues, eigenvectors = np.linalg.eig(covariance_matrix)\r\n U = np.dot(np.dot(eigenvectors, np.diag(np.diag(np.sqrt(1./(np.diagflat(eigenvalues) + 0.1))))), eigenvectors.T)\r\n\r\n #load whitening parameters that were saved earlier\r\n #M = np.load('train_mean_windowsize.npy')\r\n #P = np.load('train_eigenvectors_windowsize.npy')\r\n patches = np.dot((patches - mean), U)\r\n\r\n\r\n #save whitening parameters for use later\r\n np.save('core/train_mean_windowsize', mean)\r\n np.save('core/train_eigenvectors_windowsize', U)\r\n\r\n #set n_clusters and estimate centroids using minibatch kmeans\r\n #https://algorithmicthoughts.wordpress.com/2013/07/26/machine-learning-mini-batch-k-means/\r\n n_clusters = int(np.sqrt(patches.shape[0]/2))\r\n estimator = MiniBatchKMeans(n_clusters=n_clusters, verbose=True, batch_size=1000, compute_labels=False)\r\n estimator.fit(patches)\r\n\r\n #save centroids\r\n np.save('core/centroids.npy', estimator.cluster_centers_)\r\n\r\ndef pool(q):\r\n \"\"\"\r\n :param q:\r\n :return:\r\n \"\"\"\r\n return np.array([np.sum(np.sum(q, axis=1), axis=0)])\r\n\r\n#quadrant pooling\r\ndef pool_quadrant(patches, rooti, rootj, i, j, iz, jw):\r\n \"\"\"\r\n :param patches:\r\n :param rooti:\r\n :param rootj:\r\n :param i:\r\n :param j:\r\n :param iz:\r\n :param jw:\r\n :return:\r\n \"\"\"\r\n q1 = pool(patches[rooti:rooti+i, rootj:rootj+j, :])\r\n #q2 = pool(patches[rooti+i:iz, rootj:rootj+j, :])\r\n q3 = pool(patches[rooti:rooti+i, rootj+j:jw, :])\r\n #q4 = pool(patches[rooti+i:iz, rootj+j:jw, :])\r\n\r\n q = np.vstack((q1,q3)).flatten()\r\n return q\r\n\r\n\r\n#generate features for an image based on learned centroids\r\ndef extract_features_img(path, centroids, rfSize, M, U, stride, normal_pooling=True):\r\n \"\"\"\r\n :param path: path to RGB retina image\r\n :param centroids: learned centroids\r\n :param rfSize: receptive field size\r\n :param M: whitening parameter\r\n :param P: whitening parameter\r\n :param stride: parameter that defines the density of windows that are extracted from an image\r\n :param normal_pooling: if true:\r\n divide in 4 regions and pool each one\r\n else: divide in 16 regions and pool each one\r\n :return:feature_vector\r\n \"\"\"\r\n\r\n img = load(path)\r\n try:\r\n img = img[:,:,1]\r\n except:\r\n return None\r\n\r\n #contrast enhancing\r\n img = equalize_adapthist(img)\r\n numFeats = img.shape[0] * img.shape[1]\r\n numCentroids = centroids.shape[0]\r\n\r\n #extract dense patches with predefined stride\r\n #smaller the stride, slower the function\r\n windows = view_as_windows(img, (rfSize, rfSize), stride)\r\n patches = np.reshape(windows, (windows.shape[0]*windows.shape[1], rfSize*rfSize))\r\n\r\n #data normalization\r\n p_mean = np.mean(patches, axis=1, dtype=np.float32, keepdims=True)\r\n p_var = np.var(patches, axis=1, dtype=np.float32, ddof=1, keepdims=True)\r\n off_matrix = 10.0 * np.ones(p_var.shape)\r\n patches = (patches - p_mean) / np.sqrt(p_var + off_matrix)\r\n \r\n patches = np.dot((patches - M), U)\r\n \r\n #calculate distance from all patches to all centroids\r\n z = native_cdist(patches, centroids)\r\n \r\n #mean distance from each patch to all centroids\r\n #triangle activation function\r\n mu = np.tile(np.array([np.mean(z, axis = 1)]).T, (1, centroids.shape[0]))\r\n patches = np.maximum(mu - z, np.zeros(mu.shape))\r\n\r\n rows = (img.shape[0] - rfSize + stride)/stride\r\n columns = (img.shape[1] - rfSize + stride)/stride\r\n \r\n patches = np.reshape(patches, (rows, columns, numCentroids))\r\n\r\n #starting points\r\n #central point # of the patches \"image\"\r\n halfr = np.round(float(rows)/2)\r\n halfc = np.round(float(columns)/2)\r\n\r\n #pool quadrants\r\n if normal_pooling: \r\n q1 = np.array([np.sum(np.sum(patches[0:halfc, 0:halfr, :], axis = 1),axis = 0)])\r\n q2 = np.array([np.sum(np.sum(patches[halfc:patches.shape[0], 0:halfr, :], axis = 1),axis = 0)])\r\n q3 = np.array([np.sum(np.sum(patches[0:halfc, halfr:patches.shape[1], :], axis = 1),axis = 0)])\r\n q4 = np.array([np.sum(np.sum(patches[halfc:patches.shape[0], halfr:patches.shape[1], :], axis = 1),axis = 0)])\r\n feature_vector = np.vstack((q1,q2,q3,q4)).flatten()\r\n\r\n else:\r\n \r\n quartr = np.round(float(rows)/4)\r\n quartc = np.round(float(columns)/2)\r\n q1 = pool_quadrant(patches, 0, 0, quartc, quartr, halfc, halfr) \r\n q2 = pool_quadrant(patches, halfc, 0, quartc, quartr, patches.shape[0], halfr) \r\n q3 = pool_quadrant(patches, 0, halfr, quartc, quartr, halfc, patches.shape[1])\r\n q4 = pool_quadrant(patches, halfc, halfr, quartc, quartr, patches.shape[0], patches.shape[1])\r\n feature_vector = np.vstack((q1, q2, q3, q4)).flatten()\r\n\r\n \r\n return feature_vector\r\n\r\n\r\n\r\n#function is not really usable\r\n#extracts features from all training images\r\ndef extract_features_all(rfSize, stride=False, train=True):\r\n images_dict = []\r\n if train:\r\n path = ''\r\n else:\r\n path =''\r\n images = []\r\n labels = []\r\n\r\n for folder in os.listdir(path):\r\n if 'train_sm' in folder:\r\n temp = os.listdir(path + folder)\r\n images += [path + folder + '/' + c for c in temp if c.split('.')[0] not in images_dict[0]]\r\n \r\n for l in temp:\r\n label = l.split('.')[0]\r\n if label in images_dict[1]:\r\n labels.append(1)\r\n elif label in images_dict[2]:\r\n labels.append(2)\r\n elif label in images_dict[3]:\r\n labels.append(3)\r\n elif label in images_dict[4]:\r\n labels.append(4)\r\n \r\n if stride:\r\n stride = stride\r\n else:\r\n stride = rfSize/2\r\n\r\n centroids = np.load('core/{0}vs512/centroids16_selected.npy'.format(rfSize))\r\n M = np.load('core/{0}vs512/train_mean_windowsize.npy'.format(rfSize))\r\n P = np.load('core/{0}vs512/train_eigenvectors_windowsize.npy'.format(rfSize))\r\n \r\n if train:\r\n numOfExamples = 0\r\n images = []\r\n for i in range(5):\r\n numOfExamples += len(images_dict[i])\r\n images += images_dict[i]\r\n else:\r\n# images = os.listdir(path)\r\n numOfExamples = len(images)\r\n \r\n X = np.zeros((numOfExamples, centroids.shape[0]*8))\r\n \r\n for i in range(len(images)):\r\n s = time.time()\r\n if train:\r\n X[i,:] = extract_features_img(path + images[i] + '.jpeg', centroids, rfSize, M, P, stride, False)\r\n else:\r\n print(images[i])\r\n X[i,:] = extract_features_img(images[i], centroids, rfSize, M, P, stride, False)\r\n \r\n\r\n #print X[i,:]\r\n print(X[i,:].shape)\r\n end = time.time()\r\n print(\"Sample {0}/{1}\".format(i, len(images)))\r\n print(images[i])\r\n print(end - s)\r\n\r\n if train: \r\n np.save('train_features_{0}_dense.npy'.format(rfSize), X)\r\n else:\r\n np.save('additional_features_{0}.npy'.format(rfSize), X)\r\n np.save('additional_labels.npy', np.array(labels))\r\n\r\n\r\n##########################################################################\r\n#Not sure if this part with second layer features works\r\n#It should be the same process again\r\n#It resulted with a mild increase of precision and recall of the final model\r\ndef extract_second_layer_centroids(features):\r\n \r\n patches = np.zeros((features.shape[0]*100, 100))\r\n\r\n for i in range(features.shape[0]):\r\n for j in range(50):\r\n patch = np.random.choice(features[i,:], 100)\r\n print(patch)\r\n patches[i*50 + j,] = patch\r\n print(\"{0}/{1}\".format(i*50+j, features.shape[0]*50)) \r\n \r\n p_mean = np.mean(patches, axis=1, dtype=np.float32, keepdims=True)\r\n p_var = np.var(patches, axis=1, dtype=np.float32, ddof=1, keepdims=True)\r\n off_matrix = 10.0 * np.ones(p_var.shape)\r\n patches = (patches - p_mean) / np.sqrt(p_var+ off_matrix)\r\n\r\n numCentroids = int(np.sqrt(patches.shape[0]/2))\r\n estimator = MiniBatchKMeans(numCentroids, verbose=True, batch_size=1000, compute_labels=False)\r\n patches = patches[~np.isnan(patches).any(axis=1)]\r\n estimator.fit(patches)\r\n\r\n\r\n np.save('second_layer_centroids.npy', estimator.cluster_centers_)\r\n\r\ndef extract_second_layer_features(centroids, features):\r\n \r\n s_features = np.zeros((features.shape[0], 4*centroids.shape[0]))\r\n\r\n for i in range(features.shape[0]):\r\n \r\n patches = np.zeros((features.shape[1]/100 + 1, 100))\r\n for j in range(0, features.shape[1], 100):\r\n if features[i, j:j+100].shape[0] == 100:\r\n patches[j/100,:] = features[i, j:j+100]\r\n \r\n patchesMean = np.mean(patches, axis=1, dtype=np.float32, keepdims=True)\r\n patchesVar = np.var(patches, axis=1, dtype=np.float32, keepdims=True)\r\n offsetMatrix = 10.0 * np.ones(patchesVar.shape)\r\n patches = (patches - patchesMean) / np.sqrt(patchesVar + offsetMatrix)\r\n\r\n z = native_cdist(patches, centroids)\r\n \r\n mu = np.tile(np.array([np.mean(z, axis = 1)]).T, (1, centroids.shape[0]))\r\n patches = np.maximum(mu - z, np.zeros(mu.shape))\r\n \r\n q = patches.shape[0]/4\r\n q1 = np.sum(patches[0:q,:], axis=0) \r\n\r\n q_all = q1\r\n for j in range(1,4):\r\n q_all = np.hstack((q_all,np.sum(patches[j*q:(j+1)*q,:], axis=0)))\r\n\r\n s_features[i,:] = q_all\r\n\r\n np.save('second_layer_additional_features.npy', s_features)","sub_path":"data/external/repositories_2to3/190816/MasterThesisDiabeticRetinopathy-master/scripts/core/main_lib.py","file_name":"main_lib.py","file_ext":"py","file_size_in_byte":16710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"151785751","text":"# -*- coding: utf-8 -*-\n\"\"\"\nЗадание 19.3\n\nСоздать функцию send_commands (для подключения по SSH используется netmiko).\n\nПараметры функции:\n* device - словарь с параметрами подключения к устройству, которому надо передать команды\n* show - одна команда show (строка)\n* config - список с командами, которые надо выполнить в конфигурационном режиме\n\nВ зависимости от того, какой аргумент был передан, функция вызывает разные функции внутри.\nПри вызове функции send_commands, всегда будет передаваться только один из аргументов show, config.\n\nДалее комбинация из аргумента и соответствующей функции:\n* show - функция send_show_command из задания 19.1\n* config - функция send_config_commands из задания 19.2\n\nФункция возвращает строку с результатами выполнения команд или команды.\n\nПроверить работу функции:\n* со списком команд commands\n* командой command\n\nПример работы функции:\n\nIn [14]: send_commands(r1, show='sh clock')\nOut[14]: '*17:06:12.278 UTC Wed Mar 13 2019'\n\nIn [15]: send_commands(r1, config=['username user5 password pass5', 'username user6 password pass6'])\nOut[15]: 'config term\\nEnter configuration commands, one per line. End with CNTL/Z.\\nR1(config)#username user5 password pass5\\nR1(config)#username user6 password pass6\\nR1(config)#end\\nR1#'\n\"\"\"\nfrom task_19_1 import send_show_command\nfrom task_19_2 import send_config_commands\nfrom netmiko import ConnectHandler\nimport yaml\nfrom pprint import pprint\n\ncommands = [\"logging 10.255.255.1\", \"logging buffered 20010\", \"no logging console\"]\ncommand = \"sh ip int br\"\ndevice = {\"ip\": '172.16.1.2', # СЛОВАРЬ С ПАРАМЕТРАМИ ПОДКЛЮЧЕНИЯ К УСТРОЙСТВУ\n \"username\": \"cisco\",\n \"password\": \"cisco\",\n \"secret\": \"cisco\",\n \"device_type\": \"cisco_ios\"}\n\n\ndef send_commands(device, **kwargs):\n if 'show' in kwargs: #если есть show выполняем функцию send_show_command из задания 19.1\n# print('we need show')\n command = kwargs['show']\n# print(command)\n result = send_show_command(device, command)\n elif 'config' in kwargs: #если есть config выполняем функцию send_config_commands из задания 19.2\n# print('we have config')\n conf_commands = kwargs['config'] #список команд\n# print(conf_commands)\n result = send_config_commands(device, conf_commands)\n else: # Если нет ни show ни config\n print('something wrong in ' + kwargs)\n return result\n\nif __name__==\"__main__\":\n#print(send_commands(device, show = 'sh ip int br' ))\n print(send_commands(device, config=[\"logging 10.255.255.1\", \"logging buffered 20010\", \"no logging console\"]))\n","sub_path":"exercises/19_ssh_telnet/task_19_3.py","file_name":"task_19_3.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"453531809","text":"# Copyright (c) 2013, Li Sijin (lisijin7@gmail.com)\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# - Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# \n# - Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom ibasic_convdata import *\n\nclass NoahDataProviderError(Exception):\n pass\n\nclass CroppedImageNetDataProvider(CroppedImageDataProvider):\n \"\"\"\n \n \"\"\"\n def __init__(self, data_dir, image_range, init_epoch=1, init_batchnum=None, dp_params={}, test=False):\n CroppedImageDataProvider.__init__(self, data_dir, image_range, init_epoch, init_batchnum, dp_params, test)\n self.labels = self.batch_meta['labels'] #\n self.labelnames = self.batch_meta['labelnames'] #\n self.labelwords = self.batch_meta['labelwords'] #\n self.test = test\n\n ## Add alias name ( Since it will be called by cost function)\n self.batch_meta['label_names'] = self.batch_meta['labelnames'] #\n # override images_path\n self.images_path = map(lambda x:iu.fullfile(data_dir,x), \\\n map(lambda x:iu.getpath(x, 1),\\\n self.batch_meta['images_path']))\n self.num_classes = 1000\n def get_next_batch(self):\n if self.data_dic is None or len(self.batch_range) > 1:\n self.data_dic = self.get_batch(self.curr_batchnum)\n epoch, batchnum = self.curr_epoch, self.curr_batchnum\n self.advance_batch()\n ndata = self.data_dic['data'].shape[-1]\n alldata = [np.require(self.data_dic['data'].reshape((-1,ndata),order='F'),dtype=np.single, requirements='C'), \\\n np.require(np.asarray(self.data_dic['labels']).reshape((1,ndata)), dtype=np.single, requirements='C')]\n return epoch, batchnum, alldata\n # _joint_batches use the parant class's method\n def get_batch(self, batch_num):\n \"\"\"\n batch_num in self.image_range\n \"\"\"\n dic = CroppedImageDataProvider.get_batch(self, batch_num)\n labels = map(lambda x:self.labels[0, x], dic['cur_batch_indexes'])\n dic['labels'] = labels;\n return dic\n def get_data_dims(self, idx=0):\n return iprod(self.input_image_dim) if idx==0 else 1\n\n","sub_path":"noah_convdata.py","file_name":"noah_convdata.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"523221116","text":"import string\r\n\r\nfrom onyx.lms._models import assignment\r\nfrom onyx.lms._models import grading_option\r\nfrom onyx.lms._models import group\r\nfrom onyx.lms._models import section\r\n\r\ndef create_token(length=7):\r\n from onyx.rand import rand_string\r\n return rand_string(len=length, chars=string.ascii_uppercase + string.digits)\r\n\r\ndef create_section(db, state, teacher_id, section_name):\r\n str_length = 7\r\n for i in range(25):\r\n try:\r\n lms.section.insert(db, create_token(str_length+i), class_name, teacher_id)\r\n break\r\n except:\r\n pass\r\n \r\ndef join_section(db, state, token, student_id):\r\n _section = section.get_by_token(db, token)\r\n \r\n if not _section:\r\n state.success = False\r\n state.errors['class_code'] = \"Invalid Class Code\"\r\n return\r\n \r\n section.add_student_to_section(db, _section.id, student_id, accepted=True)\r\n ","sub_path":"web/onyx/lms/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"469663762","text":"import csv\n\nxp_player = [] #PLayers with XP\nnoxp_player = [] #Players without XP\n\nsharks = [] #Team Sharks\ndragons = [] #Team Dragons\nraptors = [] #Team Rapotrs\n\ndef divide_by_xp(): #divide the players by experience\n with open(\"soccer_players.csv\") as soccer_players_csv:\n players = csv.reader(soccer_players_csv, delimiter=',')\n for player in players:\n if player[2] == \"YES\":\n xp_player.append(player)\n elif player[2] == \"NO\":\n noxp_player.append(player)\n\n\ndef welcome(teamname, teamlist): #makes the welcome letters for the players of the team\n while teamlist:\n player = teamlist.pop()\n x = player[0].split()\n filename = x[0].lower()+\"_\"+x[1].lower()+\".txt\"\n letter = \"\"\"Dear {},\nCongratulations to {} because she/he is now just become a member of {} team!\nFirst practice starts in October 5th 15:00 in the stadium.\nBest Regards:\n Mr. Administrator\n \"\"\".format(player[3], player[0], teamname)\n with open(filename, \"w\") as file:\n file.write(letter)\n\n\ndef teamsort(teamlist): #splitting the input list into the 3 different teams\n teamcounter = 1\n while teamlist:\n if teamcounter == 1:\n sharks.append(teamlist.pop())\n teamcounter += 1\n elif teamcounter == 2:\n dragons.append(teamlist.pop())\n teamcounter += 1\n elif teamcounter == 3:\n raptors.append(teamlist.pop())\n teamcounter = 1\n\n\ndef teamlist(teamname, teamlist): #returning the team list as a string with team name\n teamstring = teamname+\"\\n\"\n for player in teamlist:\n teamstring += (player[0])+\", \"+(player[2])+\", \"+(player[3])+\"\\n\"\n return(teamstring)\n\n\ndef teamexport(): #exporting the teams into the txt\n with open(\"teams.txt\", \"w\") as file:\n exportstring = teamlist(\"Sharks\", sharks)+\"\\n\"\n exportstring += teamlist(\"Dragons\", dragons)+\"\\n\"\n exportstring += teamlist(\"Raptors\", raptors)\n file.write(str(exportstring))\n welcome(\"Sharks\", sharks)\n welcome(\"Dragons\", dragons)\n welcome(\"Raptors\", raptors)\n\n\nif __name__ == \"__main__\":\n divide_by_xp()\n teamsort(xp_player)\n teamsort(noxp_player)\n teamexport()\n","sub_path":"league_builder.py","file_name":"league_builder.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"500896377","text":"import sys\nsys.path.append('/home/achanna/poroelast9/imports/misc')\nfrom header import *\n\nimport detrend\nimport tradeoffs\nimport likelihoods\nimport forwardModels\nimport postProcessors\nfrom connect_info import *\n\ncon = MySQLdb.connect( db_addr, db_user, db_pswd )\ncur = con.cursor()\ncur.execute(\"DROP DATABASE IF EXISTS %s\" % db_title)\ncon.commit()\ncur.execute(\"CREATE DATABASE %s\" % db_title)\ncur.execute(\"USE %s\" % db_title)\ncon.commit()\ncon.close()\n\nstart_bucket(aws_id,aws_key,bucket)\n\nengine = sqlalchemy.create_engine('mysql+mysqldb://%s:%s@localhost/%s'%(db_user,db_pswd,db_title), echo=False)\nBase.metadata.create_all(engine)\n\nSession = sessionmaker()\nSession.configure(bind=engine)\nsession = Session()\n\n# define physical properties\nproperties=[]\nproperties.append( Property( title='x01', abv='x01', unit='1' ))\nproperties.append( Property( title='x02', abv='x02', unit='1' ))\n\n# define geometry and model domains\ndomains=[]\ndomains.append( Domain( title='Domain', geom=np.array([[0,2000],[0,2000],[-250,+500]]) ))\n\n# define prior probability distributions, define pxds\n\npriorModel0 = Uniform( -5.00, +5.00, 0.05 )\npriorModel1 = Uniform( -5.00, +5.00, 0.05 )\n\npxds=[]\npxds.append( Prop_X_Domain( property=properties[0], domain=domains[0], priorModel=priorModel0 ))\npxds.append( Prop_X_Domain( property=properties[1], domain=domains[0], priorModel=priorModel1 ))\n\nconstraints=[]\n\n# define instruments\ninstrumentTypes=[]\ninstrumentTypes.append( InstrumentType(title='f1',abv='f1',unit='1') )\n\ngeom = np.array([0,0,0])\n\ninstruments=[]\ninstruments.append( Instrument(title='f1',geom=geom) )\ninstruments[0].instType = instrumentTypes[0]\n\n# define forward models\nfmods=[]\nfmods.append(ForwardModel(fmod=forwardModels.buildInput))\n\nindVars = np.array([0])\n\nmeasurements = []\nmeasurements.append(Measurement( indVars_prescribed=indVars, indVars_actual=indVars.copy(), data=indVars.copy(), sigsq=1.0, weight=1.0, detrend=detrend.detrend ))\n\nmeasurements[0].instruments.append( instruments[0] )\n\nsignalTypes = []\nsignalTypes.append( SignalType(forwardModel=fmods[0],measurement=measurements[0]) )\n\n# define objectives\nobjectives = []\nobjectives.append( Objective( post=postProcessors.f1 ) )\n\nobjectives[0].measurements.append( measurements[0] )\n\nopts = []\nopts.append(OptMC(aws_id=aws_id,aws_key=aws_key,bucket=bucket))\nopts.append(OptMCMC(aws_id=aws_id,aws_key=aws_key,bucket=bucket))\nopts.append(OptGD(aws_id=aws_id,aws_key=aws_key,bucket=bucket))\nsession.add_all(opts)\n\nopts[0].objectives+=objectives\nopts[0].pxds+=pxds\nopts[1].objectives+=objectives\nopts[1].pxds+=pxds\nopts[2].objectives+=objectives\nopts[2].pxds+=pxds\n\n\nopts[0].parameters=opts[0].random_parameterization()\nopts[1].parameters=opts[0].parameters\nopts[2].parameters=opts[0].parameters\n\nopts[1].create_chain(5,runLocal=forwardModels.rosenbrock)\nopts[2].create_chain(5,runLocal=forwardModels.rosenbrock)\n\nsession.commit()\n\ntimes = np.zeros([0,7],dtype='float')\npickle.dump(times,open('times.pkl','wb'))\n","sub_path":"scripts/rosenbrock/initialize.py","file_name":"initialize.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"399006035","text":"import socket\nimport threading\nimport cv2\nimport numpy as np\n\nclass SocketHandler:\n def __init__(self, on_data, on_image):\n self.lt = threading.Thread(target=self.handle_connections, daemon=True)\n \n self.on_data = on_data\n self.on_image = on_image\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n \n self.s.bind((\"\", 16505))\n self.s.listen(5)\n \n \n def start_listeners(self):\n self.lt.start()\n \n \n def handle_connections(self):\n while True:\n (cs, addr) = self.s.accept()\n \n data = cs.recv(5000000)\n \n header = \"\"\n str_data = \"\"\n \n for b in data:\n header = header + chr(b)\n if chr(b) == \"}\":\n break\n \n if header[len(header) - 4:len(header) - 1] == \"IMG\":\n data = data[len(header):len(data)]\n data = np.fromstring(data, np.uint8)\n image = cv2.imdecode(data, cv2.IMREAD_COLOR)\n r = self.on_image(header, image)\n cs.send(self.on_image(header, image).encode(\"utf-8\"))\n else:\n str_data = data.decode(\"utf-8\")\n r = self.on_data(str_data)\n \n if not r[1]:\n cs.send(r[0].encode(\"utf-8\"))\n else:\n cs.send(r[0])\n \n cs.close()\n \ndef picture_to_data(img):\n return b\"IMG:\" + cv2.imencode('.jpg', img)[1].tostring()\n \n","sub_path":"Server/ServerSockets.py","file_name":"ServerSockets.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"524397236","text":"# Ear Training App\n# Developed by Sylwester Stremlau\n# 2018\n# University of West London\n\nimport cx_Freeze\n\nexecutables = [cx_freeze.Executable(\"main.py\")]\n\ncx_freeze.setup(\n name = \"Ear Training App\",\n options = {\"build_exe\": {\"packages\":[\"pygame\"],\n \"included_files\":[]}}\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"68925435","text":"# -*- coding: utf-8 -*-\nimport sys\nsys.setrecursionlimit(10**9)\nINF=10**18\nMOD=10**9+7\ninput=lambda: sys.stdin.readline().rstrip()\nYesNo=lambda b: bool([print('Yes')] if b else print('No'))\nYESNO=lambda b: bool([print('YES')] if b else print('NO'))\nint1=lambda x:int(x)-1\n\nX=int(input())\nif X>=1800:\n print(1)\nelif X>=1600:\n print(2)\nelif X>=1400:\n print(3)\nelif X>=1200:\n print(4)\nelif X>=1000:\n print(5)\nelif X>=800:\n print(6)\nelif X>=600:\n print(7)\nelif X>=400:\n print(8)","sub_path":"Python_codes/p02600/s244835700.py","file_name":"s244835700.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"495168324","text":"#This is a python program for the game Rock, Paper, Scissors\nimport random\n\n# Gets player's name\t\ndef playerName():\n\tname = input(\"Please enter your name\\n\")\n\treturn name\n\t\n# Welcomes the user\ndef welcomeMessage(name):\n\tprint (\"Hello \" + name + \"! Welcome to the game Rock, Paper, Scissors! Good luck!\\n\")\n\n# Gets player's choice\t\ndef playerChoice():\n\tplayChoice = input(\"Please enter Rock, Paper, or Scissors, or Done if your finished\\n\")\n\t\n\tplayChoice.lower()\n\t\n\t# Loops until player chooses acceptable option\n\twhile playChoice != \"rock\" and playChoice != \"paper\" and playChoice != \"scissors\" and playChoice != \"done\":\n\t\tplayChoice = input(\"Please enter Rock, Paper, or Scissors, or Done if your finish\\n\")\n\t\tplayChoice.lower()\n\t\n\treturn playChoice\n\n# Generates computer's choice\t\ndef computerChoice():\n\t\n\t# Generates random number\n\tselectionNum = random.randint(1,3)\n\t\n\t# Selects choice based on random number\n\tif selectionNum == 1:\n\t\tprint (\"Computer choose rock\")\n\t\tcomChoice = \"rock\"\n\t\t\n\tif selectionNum == 2:\n\t\tprint (\"Computer choose paper\")\n\t\tcomChoice = \"paper\"\n\t\t\n\tif selectionNum == 3:\n\t\tprint (\"Computer choose scissors\")\n\t\tcomChoice = \"scissors\"\n\t\t\n\treturn comChoice\n\t\t\n\n# Determines the winner of round\ndef winner(playChoice, comChoice):\n\n\t\t# Compares user's choice and computer's choice to determine winner\n\t\tif playChoice == \"rock\" and comChoice == \"scissors\":\n\t\t\tprint (\"The user wins!\\n\")\n\t\t\tresult = \"userWins\"\n\t\t\t\n\t\tif playChoice == \"rock\" and comChoice == \"paper\":\n\t\t\tprint (\"The computer wins\\n\")\n\t\t\tresult = \"computerWins\"\n\t\t\t\n\t\tif playChoice == \"scissors\" and comChoice == \"paper\":\n\t\t\tprint (\"The user wins!\\n\")\n\t\t\tresult = \"userWins\"\n\t\t\t\n\t\tif playChoice == \"scissors\" and comChoice == \"rock\":\n\t\t\tprint (\"The computer wins!\\n\")\n\t\t\tresult = \"computerWins\"\n\t\t\t\n\t\tif playChoice == \"paper\" and comChoice == \"scissors\":\n\t\t\tprint (\"The computer wins!\\n\")\n\t\t\tresult = \"computerWins\"\n\t\t\t\n\t\tif playChoice == \"paper\" and comChoice == \"rock\":\n\t\t\tprint (\"The user wins!\\n\")\n\t\t\tresult = \"userWins\"\n\t\t\n\t\tif playChoice == comChoice:\n\t\t\tprint (\"It's a tie!\\n\")\n\t\t\tresult = \"ties\"\n\t\t\t\n\t\t# Returns result to main\n\t\treturn result\n\n# Displays information regarding the total number of wins, ties, and games played\t\t\ndef scoreboard(computerWins, userWins, ties, gamesPlayed):\n\tprint (\"The computer won \" + str(computerWins) + \" times\\n\")\n\tprint (\"The user won \" + str(userWins) + \" times\\n\")\n\tprint (\"The two players tied \" + str(ties) + \" times\\n\")\n\tprint (\"Altogether \" + str(gamesPlayed) + \" games were played\\n\")\n\ndef main():\t\n\t# Calls method to get players name\n\tname = playerName()\n\t# Calls method to welcome the player\n\twelcomeMessage(name)\n\n\t# Sets variables to 0\n\tuserChoice = None\n\tcomputerWins = 0\n\tuserWins = 0\n\tties = 0\n\tgamesPlayed = 0\n\n\t# Loops the main functions of the game\n\twhile userChoice != \"done\":\n\t\tuserChoice = playerChoice()\n\t\tif userChoice != \"done\":\n\t\t\tcomChoice = computerChoice()\n\t\t\tresult = winner(userChoice, comChoice)\n\t\t\tgamesPlayed += 1\n\t\n\t\t\tif result == \"userWins\": \n\t\t\t\tuserWins += 1\n\t\t\t\n\t\t\tif result == \"computerWins\": \n\t\t\t\tcomputerWins += 1\n\t \n\t\t\tif result == \"ties\":\n\t\t\t\tties += 1\n\n\t# Calls method that displays the results\n\tscoreboard(computerWins, userWins, ties, gamesPlayed)\n\t\t\nmain()","sub_path":"rockPaperScissors.py","file_name":"rockPaperScissors.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"96737694","text":"#COLORES: RGB(rojo, verde, azul)\n\nimport pygame#.sys\nfrom pygame.locals import *\nfrom random import randint\n\nBlanco= (255,255,255)\nRojo= (255, 0, 0)\nVerde= (0, 255, 0)\nAzul= (0,0, 255)\nAzulMarino= (0, 0, 50)\nNegro= (0,0,0)\nColor2= pygame.Color(255, 120, 9)\n\nTamanioX, TamanioY= 900, 700\nnumInvaders = randint(10, 50)\nmiFuente= miFuente\nnumLevel= miFuente.render(\"Prueba fuente\", 0, (255,255,255))\n\npygame.init ()\nventana = pygame.display.set_mode((TamanioX,TamanioY))\npygame.display.set_caption(\"Space Invaders\") #Nombre de la ventana\n\n#Cargar imagen\nspaceInvader= pygame.image.load(\"imagenes/spaceInvaders.png\")\nbulbasaur= pygame.image.load(\"imagenes/bulbasaur.png\")\ncohete= pygame.image.load(\"imagenes/cohete.png\")\n\nposX, posY= 100, 70 #randint(0,TamanioX), randint(0, TamanioY)\nposXC, posYC= (int)(TamanioX/2), (TamanioY-100)\nvelocidad, velBulbasaur, velCohete = 2, 20, 5\nmovement= False\n\ninvadersPositions = []\n\nposAux= [posX, posY]\n\n\nfor i in range(0, numInvaders):\n if posAux[0]< (TamanioX-60):\n invadersPositions= invadersPositions+ posAux\n posAux[0]+= 70\n #print (\"HOLAAAA\")\n #print (invadersPositions, numInvaders, posAux)\n else:\n posAux[0],posAux[1]=100, posAux[1]+100\n #print (\"CHAOOOO\")\n #print (invadersPositions, numInvaders)\n\nprint (invadersPositions, numInvaders)\n\ndef creationMap (color, posAuxX, posAuxY):\n #Colorear la ventana\n ventana.fill(color)\n #ventana.fill(Color2)\n\n #Incluye la imagen en la ventana siendo x e y la esquina superior izda\n for i in range(len(invadersPositions)):\n if i%2==0:\n posAuxX=invadersPositions[i]\n else:\n posAuxY=invadersPositions[i]\n ventana.blit(spaceInvader, (posAuxX,posAuxY))\n\n\n\n\n#loop para el juego\nwhile True:\n creationMap(AzulMarino, posX, posY)\n\n ventana.blit(bulbasaur, (posXC, posYC))\n #Cuando el usuario presiona la X superior derecha se termina el programa\n for evento in pygame.event.get():\n if evento.type== QUIT:\n pygame.quit()\n sys.exit()\n\n #Mover imagen con el teclado\n elif evento.type == pygame.KEYDOWN:\n if evento.key== K_LEFT:\n if posXC>10:\n posXC-= velBulbasaur\n elif evento.key == K_RIGHT:\n if posXC<(TamanioX-100):\n posXC+= velBulbasaur\n elif evento.key == K_UP:\n posXCo=posXC\n posYCo=posYC\n movement= True\n #elif evento.key == K_DOWN:\n # posYC+= velocidad2\n\n#mueve el objeto hacia la derecha e izquierda hasta que llega al final del tablero\n #print (posX, derecha, TamanioX)\n if movement == True:\n if posYCo>-100:\n ventana.blit(cohete, (posXCo, posYCo))\n posYCo-= velCohete\n else:\n movement=False\n\n\n #uso del cursor\n #posXC, posYC= pygame.mouse.get_pos()\n\n #Actualiza lo que esta pasando en la ventana\n pygame.display.update()\n","sub_path":"Python/Snake/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"447166234","text":"# Copyright (c) 2019 Kyle Lopin (Naresuan University) \n\n\"\"\"\nFunctions to make data sets that are to be analyzed\n\"\"\"\n\n# installed libraries\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n# import statsmodels.api as sm\n# local files\nimport helper_functions as funcs\n\nplt.style.use('seaborn')\n\n__author__ = \"Kyle Vitatus Lopin\"\n\n# plt.xkcd()\n\nas7262_files = funcs.get_all_files_with_stub('as7262', 'mango_data')\nprint(as7262_files)\n# background = [250271, 176275, 216334, 219763, 230788, 129603]\nall_data = None\nstarting = True\n\nfor file in as7262_files:\n print(file)\n try:\n file_data = pd.read_excel(file, sheet_name=\"Sheet1\")\n except:\n file_data = pd.read_excel(file, sheet_name=\"Sheet2\")\n if starting:\n all_data = file_data\n starting = False\n else:\n print('appending')\n all_data = all_data.append(file_data)\n # print(all_data)\n\nprint('=========')\nprint(all_data)\n\naverage_data = all_data.groupby('Leaf number', as_index=True).mean()\n\nprint('++++++')\nprint(average_data)\n\nchloro_data_filename = funcs.get_all_files_with_stub('absorbance', 'mango_data')[0]\nchlor_data = pd.read_excel(chloro_data_filename, sheet_name='Summary')\nprint(chlor_data)\n# print(chlor_data.index)\n# print(chlor_data)\nchlor_data['leaf number:'] = 'Leaf: ' + chlor_data['Leaf number'].astype(str)\nprint(chlor_data.columns)\n# chlor_data.rename(columns={'leaf number:', 'Leaf number'}, inplace=True)\n# chlor_data.rename(columns={'Leaf number', 'leaf number:'}, inplace=True)\nchlor_data['Leaf number'] = chlor_data['leaf number:']\ndel chlor_data['leaf number:']\nchlor_data.set_index('Leaf number', inplace=True)\nprint(chlor_data)\n\n\n# average_data.insert(len(average_data.columns),)\n# total_data = average_data.merge(chlor_data, left_index=True, right_index=True)\n#total_data\n\nprint(chlor_data.columns)\nfor _data in chlor_data.columns:\n average_data[_data] = chlor_data[_data]\n# pd.set_option('display.max_columns', 500)\nprint('===========')\nprint(average_data)\nprint(average_data.columns)\n\ndata_columns = ['450.1', '500.1', '550.1', '570.1', '600.1', '650.1']\nchloro_columns = ['Chlorophyll a (ug/ml)', 'Chlorophyll b (ug/ml)',\n 'Total Chlorophyll (ug/ml)']\ny_name = 'Total Chlorophyll (ug/ml)'\n\ndata = pd.DataFrame()\n\n\ndef reg_m(_y, _x):\n ones = np.ones(len(_x))\n _X = sm.add_constant(np.column_stack((_x, ones)))\n _results = sm.OLS(_y, _X).fit()\n return _results\n\n\n# for data_column in data_columns:\ndef fit_n_plot_data(data_column, axis, add_xlabel, figure_letter,\n _background):\n # average_data.plot.scatter(data_column, y_name)\n y = average_data[y_name]\n print('y = ', y)\n # y = y / _background\n # print('y = ', y )\n x = average_data[data_column]\n print('x = ', x)\n x = x / _background\n print('x = ', x)\n\n results = reg_m(y, x)\n r_sq_adj = results.rsquared_adj\n print(results.params)\n fitted_x = (average_data[data_column] * results.params.x1 +\n results.params.const)\n fitted_y = results.predict()\n\n axis.plot(x, y, 'o', color='mediumseagreen', markersize=4)\n axis.plot(x, fitted_y, color='rebeccapurple')\n axis.annotate(u\"R_adj\\u00B2 =\\n{:.3f}\".format(r_sq_adj), xy=(0.7, 0.8),\n xycoords='axes fraction', color='#101028')\n axis.annotate(figure_letter, xy=(-.2, .98),\n xycoords='axes fraction', size=19,\n weight='bold')\n wavelength = data_column.split('.')[0]\n # axis.set_xlabel(\"counts\".format(wavelength))\n # axis.set_title(\"{0} nm measurement\".format(wavelength))\n axis.set_ylabel(y_name)\n print(wavelength)\n axis.title.set_text(\"{0} nm sensor channel\".format(wavelength))\n axis.set_ylim([0., 1.1])\n if add_xlabel:\n print('add y lable:', add_xlabel)\n axis.set_xlabel('Reflectance')\n\n\nfigure, axes, = plt.subplots(3, 2, figsize=(7.5, 8.75), constrained_layout=True)\nprint(axes)\nfigure.suptitle(\"{0} \".format(y_name.split('(')[0]), size=28,\n fontname='Franklin Gothic Medium')\naxes = [axes[0][0], axes[0][1], axes[1][0],\n axes[1][1], axes[2][0], axes[2][1]]\nletters = ['A', 'B', 'C', 'D', 'E', 'F']\nfor i, data_column in enumerate(data_columns):\n print(axes[i])\n print(data_column)\n print('fitting')\n fit_n_plot_data(data_column, axes[i], (i >= 4), letters[i], background[i])\n\n\n# plt.tight_layout(0.5)\nplt.show()\n\n\n\n# sorted_data = average_data.sort_values(by=['Total Chlorophyll (ug/ml)'])\n# # pd.set_option('display.max_columns', 500)\n# print(sorted_data['Total Chlorophyll (ug/ml)'])\n# print(sorted_data.index)\n\n","sub_path":"data_maker.py","file_name":"data_maker.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"369295944","text":"import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\nimport skimage\nfrom skimage import exposure, morphology, color\n\nfrom handlers import normalize_hist, quantize, remove_small_areas, get_channel\n\n\"\"\"\n Здесь массивы функций - способы обработки изображений\n каждая функция на верхнем уровне должна удовлетворять 2 требованиям:\n 1. Принимать изображение и возвращать изображение, которое ожидает принять следующая функция;\n 2. Возвращать изображение, которе может быть отображено с помощью utils -> view\n\n Для вложенных массивов условие 2 необязательно, но условие 1 необходимо выполнять\n\"\"\"\n\n\"\"\"\nПопытка 1\n1. Квантование\n2. Расширение гистограммы (маловажно)\n3. Перевод в градации серого\n4. Адаптивная бинаризация\n5. Удаление мелких объектов и дыр\n\nВывод: при семешении 3 каналов в градации серого теряется очень много информации\nНужно работать с каждым цветовым каналом отдельно\n\"\"\"\neffort1 = [\n lambda img: quantize(img, 5),\n lambda img: normalize_hist(img),\n lambda img: cv2.cvtColor(img, cv2.COLOR_RGB2GRAY),\n lambda img: cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2),\n [\n lambda img: morphology.remove_small_objects(img.astype(bool)),\n lambda img: morphology.remove_small_holes(img),\n lambda img: skimage.img_as_ubyte(img)\n ]\n]\n\n\n\"\"\"\nПопытка 2\n1. Квантование\n2. Расширение гистограммы (маловажно)\n3. Разделение по цветовым каналам\n4. Адаптивная бинаризация (для каждого канала)\n5. У��аление мелких объектов и дыр (для каждого канала)\n\"\"\"\n\n\ndef effort2(channel: str):\n return [\n lambda img: quantize(img, 5),\n lambda img: normalize_hist(img),\n lambda img: get_channel(img, channel),\n # lambda img: cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2),\n # lambda img: remove_small_areas(img)\n ]\n","sub_path":"src/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"485096797","text":"\r\nfrom googleapiclient.discovery import build\r\nfrom pymongo import MongoClient\r\n\r\nDEVELOPER_KEY = 'Youtube_developer_API' #add your API Key\r\nYOUTUBE_API_SERVICE_NAME = 'youtube'\r\nYOUTUBE_API_VERSION = 'v3'\r\n\r\n#starting a connection to MongoDB database server\r\nmyclient = MongoClient(\"mongodb://localhost:27017/\")\r\nmydb = myclient[\"DB_search\"]\r\n\r\n#Creating collections\r\ncol_video = mydb[\"Video\"]\r\ncol_channel = mydb[\"Channel\"]\r\ncol_playlist = mydb[\"Playlist\"]\r\n\r\n\r\ndef YoutubeAPI(query, max_results):\r\n \r\n \r\n youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\r\n developerKey=DEVELOPER_KEY)\r\n \r\n search_response = youtube.search().list(\r\n q=query,\r\n part='id,snippet',\r\n maxResults=max_results\r\n ).execute()\r\n del youtube\r\n \r\n results = search_response.get('items', [])\r\n del search_response\r\n \r\n videos = []\r\n channels = []\r\n playlists = []\r\n \r\n for search_result in results:\r\n \r\n if search_result['id']['kind'] == 'youtube#video':\r\n videos.append('%s (%s)' % (search_result['snippet']['title'],\r\n search_result['id']['videoId']))\r\n \r\n elif search_result['id']['kind'] == 'youtube#channel':\r\n channels.append('%s (%s)' % (search_result['snippet']['title'],\r\n search_result['id']['channelId']))\r\n \r\n elif search_result['id']['kind'] == 'youtube#playlist':\r\n playlists.append('%s (%s)' % (search_result['snippet']['title'],\r\n search_result['id']['playlistId']))\r\n \r\n del results \r\n print ('Videos:\\n', '\\n'.join(videos), '\\n')\r\n print ('Channels:\\n', '\\n'.join(channels), '\\n')\r\n print ('Playlists:\\n', '\\n'.join(playlists), '\\n')\r\n \r\n # saving extracted data to MongoDB database\r\n list_ = []\r\n for s in videos:\r\n \r\n text = s.split('(')\r\n link = text [-1][:-1]\r\n text_find = s.find(link)-1\r\n full_link = \"https://www.youtube.com/watch?v=\" + link\r\n print('Video Title:', s[:text_find] ,'\\n')\r\n print('Video Link:', full_link ,'\\n')\r\n mydict = { \"Video Title\": s[:text_find], \"Video Link\": full_link }\r\n list_.append(mydict)\r\n \r\n col_video.insert_many(list_)\r\n del videos\r\n \r\n liste_ = [] \r\n for s in channels: \r\n \r\n text = s.split('(')\r\n link = text [-1][:-1]\r\n text_find = s.find(link)-1\r\n full_link = \"https://www.youtube.com/channel/\" + link\r\n print('Channel Title:', s[:text_find] ,'\\n')\r\n print('Channel Link:', full_link ,'\\n')\r\n \r\n mydict = { \"Channel Title\": s[:text_find], \"Channel Link\": full_link }\r\n liste_.append(mydict)\r\n \r\n col_channel.insert_many(liste_) \r\n del channels \r\n \r\n liste_ = [] \r\n for s in playlists: \r\n \r\n text = s.split('(')\r\n link = text [-1][:-1]\r\n text_find = s.find(link)-1\r\n full_link = \"https://www.youtube.com/watch?v=WW2DKBGCvEs&list=\" + link\r\n print('Playlist Title:', s[:text_find] ,'\\n')\r\n print('Playlist Link:', full_link ,'\\n')\r\n \r\n mydict = { \"Playlist Title\": s[:text_find], \"Playlist Link\": full_link }\r\n liste_.append(mydict)\r\n \r\n col_playlist.insert_many(liste_) \r\n del playlists\r\n \r\n print(\"MongoDB Data saved\")\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n max_results = 50\r\n query = \"Youtube\"\r\n \r\n YoutubeAPI(query, max_results)\r\n \r\n","sub_path":"YoutubeAPI.py","file_name":"YoutubeAPI.py","file_ext":"py","file_size_in_byte":4540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"2086224","text":"\"\"\"问题描述: \n * 考虑这样的一种排序问题,即无法准确地知道待排序的各个数字到底是多少。对于其中的每个数字, \n * 我们只知道它落在实轴上的某个区间内。亦即,给定的是n个形如[a(i), b(i)]的闭区间(这里小括 \n * 后起下标的作用,后同),其中a(i) <= b(i)。算法的目标是对这些区间进行模糊排序 \n * (fuzzy-sort),亦即,产生各区间的一个排列,使得存在一个c(j)属于 \n * 区间[a(i(j)), b(i(j))],满足c(1) <= c(2) <= c(3) <= ... <= c(n)。 \n * a) 为n个区间的模糊排序设计一个算法。你的算法应该具有算法的一般结构,它可以快速排序左部 \n * 端点(即各a(i)),也要能充分利用重叠区间来改善运行时间。(随着各区间重叠得越来越多, \n * 对各区间进行模糊排序的问题会变得越来越容易。你的算法应能充分利用这种重叠。) \n * b) 证明:在一般情况下,你的算法的期望运行时间为Θ(nlgn),但当所有的区间都重叠时,期望的 \n * 运行时间为Θ(n)(亦即,当存在一个值x,使得对所有的i,都有x∈[a(i), b(i)])。你的算法 \n * 不应显式地检查这种情况,而是应随着重叠量的增加,性能自然地有所改善。\n\n http://courses.csail.mit.edu/6.046/spring04/handouts/ps2-sol.pdf \n \"\"\"\nimport random\nfrom util import Interval \n\n\ndef compareInterval(interval1, interval2):\n\t\"\"\"\n\tif [interval1] < [interval2] is True, return True\n\tif [interval1] > [interval2] is True, return False\n\tif [interval1] and [interval2] has overlap, return None, as they are equal\n\n\tIn the question, we assume no interval is completely contained in another interval\n\n\t\"\"\"\n\tif interval1.end < interval2. start:\n\t\treturn True\n\tif interval1.start > interval2.end:\n\t\treturn False\n\tif interval1.end >= interval2.start or interval1.start >= interval2.end:\n\t\treturn None\n\ndef merge(interval1, interval2):\n\t\"\"\"\n\tinterval1 and interval2 must have overlap\n\t\"\"\"\n\tif interval1.start > interval2.end or interval2.start > interval1.end:\n\t\t#print('wront intervals, cannot merge')\n\t\treturn None\n\tmergedInterval = Interval(min(interval1.start, interval2.start), max(interval1.end, interval2.end))\n\treturn mergedInterval\n\n#testcase:\n# merge(Interval(1, 2), Interval(3,4))\n# merge(Interval(1,3), Interval(2,4)).printInterval( )\n# merge(Interval(2,4), Interval(1,3)).printInterval( )\n\ndef swap(intervals, index1, index2):\n\tintervals[index1], intervals[index2] = intervals[index2], intervals[index1]\n\n\ndef partition(intervals, p, q):\n\tpivot = random.choice(range(p, q+1))\n\tswap(intervals, pivot, q)\n\n\tpivotInterval = intervals[q]\n\tprint('Chosen pivot')\n\tpivotInterval.printInterval( )\n\tprint('sort')\n\tfor _ in intervals[p: q+1]:\n\t\t_.printInterval( )\n\tprint('********************')\n\n\tleftPointer = p - 1\n\trightPointer = p\n\tpivotPointer = q\n\n\twhile rightPointer < pivotPointer:\n\t\tif compareInterval(intervals[rightPointer], pivotInterval) == None:\n\t\t\tpivotPointer -= 1\n\t\t\tswap(intervals, rightPointer, pivotPointer)\n\t\t\tpivotInterval = merge(pivotInterval, intervals[pivotPointer]) # update pivotInterval\n\n\t\telif compareInterval(intervals[rightPointer], pivotInterval) == True:\n\t\t\t\"\"\"\n\t\t\tBecareful of the order of the following three lines\n\t\t\t\"\"\"\n\t\t\tleftPointer +=1\n\t\t\tswap(intervals, leftPointer, rightPointer)\n\t\t\trightPointer += 1\n\n\t\telif compareInterval(intervals[rightPointer], pivotInterval) == False:\n\t\t\trightPointer += 1\n\n\t\tfor _ in intervals:\n\t\t\t_.printInterval( )\n\t\t\t\n\t\tprint (\"@@@@@@@@@@@@@@@@@\")\n\n\t# Put the pivots back\n\ttmpIndex = q\n\ttmpIndex2 = leftPointer+1\n\n\twhile tmpIndex >= pivotPointer:\n\t\tswap(intervals, tmpIndex, tmpIndex2)\n\t\tfor _ in intervals:\n\t\t\t_.printInterval( )\n\t\ttmpIndex -= 1\n\t\ttmpIndex2 += 1\n\n\n\t# tmpIntervals = intervals[pivotPointer : q]\n\t# tmpIntervals2 = intervals[leftPointer+1: rightPointer]\n\n\t# del intervals[leftPointer+1: q]\n\t# intervals.extend(tmpIntervals)\n\t# intervals.extend(tmpIntervals2)\n\n\tfor _ in intervals:\n\t\t_.printInterval( )\n\tprint(\"$$$$$$$$$$$$$$$$\")\n\n\treturn leftPointer + 1, leftPointer + len(intervals) - pivotPointer + 1\n\n#test\n#_intervals = [Interval(1,2), Interval(2,3), Interval(6,7), Interval(4,5), Interval(9,10)]\n# result = partition(_intervals, 0, len(_intervals)-1)\n# print(result)\n# for _ in _intervals:\n# \t_.printInterval( )\n\n\ndef fuzzySorting(intervals, start, end):\n\t\"\"\"\n\tAt first, I used some sort of intarvals[:leftpointer] in the recursion.\n\tHowever, note that this returns a copy of the segment of intervals, it doesn't refer to the\n\toriginal intervals. To implement an in-place sort here, I finally changed the recursion \n\tfunction and added two more variables.\n\t\"\"\"\n\tif start < end:\n\t\tpivotLeft, pivotRight = partition(intervals, start, end)\n\t\tfuzzySorting(intervals, start, pivotLeft)\n\t\tfuzzySorting(intervals, pivotRight, end)\n\n_intervals = [Interval(1,2), Interval(2,3), Interval(6,7), Interval(4,5), Interval(9,10)]\t\t\nfuzzySorting(_intervals, 0, len(_intervals) -1)\nprint(\"*******end*************\")\nfor _ in _intervals:\n\t_.printInterval( )\n\n#test\n\n\n\n\n\n","sub_path":"FuzzySorting.py","file_name":"FuzzySorting.py","file_ext":"py","file_size_in_byte":5097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"130037167","text":"\"\"\" -----------------------------------------------------------------------------------\n Main Module of the Cryostat-GUI built for a custom setup PPMS at TU Wien, Austria\n (Technical University of Vienna, Austria)\n The cryostat is an Oxford Spectromag, controlled by:\n - Oxford:\n - Intelligent Temperature Controller (ITC) 503\n - Intelligent Level Meter (ILM) 211\n - Intelligent Power Supply (IPS) 120-10\n - LakeShore 350 Temperature Controller\n Measurements will be performed with:\n - Keithley:\n - 2182A Nanovoltmeter (x3)\n - 6221 Current Source (AC and DC)\n - DMM7510 7 1/2 Digital Multimeter\n - 2700 Multimeter / Data Acquisition System\n Classes:\n mainWindow:\n The main GUI class for the PyQt application\n Author(s):\n bklebel (Benjamin Klebel)\n adtera\n Acronis\n----------------------------------------------------------------------------------------\n\"\"\"\n\nfrom PyQt5 import QtWidgets # , QtGui\n# from PyQt5.QtCore import QObject\nfrom PyQt5.QtCore import QThread\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtCore import QTimer\n# from PyQt5.QtWidgets import QtAlignRight\nfrom PyQt5.uic import loadUi\n\nimport sys\nimport time\nimport datetime\nfrom threading import Lock\nimport numpy as np\nfrom copy import deepcopy\nimport sqlite3\n\n\nfrom Oxford.ITC_control import ITC_Updater\nfrom Oxford.ILM_control import ILM_Updater\nfrom Oxford.IPS_control import IPS_Updater\nfrom LakeShore.LakeShore350_Control import LakeShore350_Updater\n\nfrom pyvisa.errors import VisaIOError\n\nfrom logger import main_Logger, live_Logger\nfrom logger import Logger_configuration\nfrom util import Window_ui, Window_plotting\n\n\nITC_Instrumentadress = 'ASRL6::INSTR'\nILM_Instrumentadress = 'ASRL5::INSTR'\nIPS_Instrumentadress = 'ASRL4::INSTR'\nLakeShore_InstrumentAddress = 'GPIB0::1::INSTR'\n\n\ndef convert_time(ts):\n \"\"\"converts timestamps from time.time() into reasonable string format\"\"\"\n return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n\n\nclass mainWindow(QtWidgets.QMainWindow):\n \"\"\"This is the main GUI Window, where other windows will be spawned from\"\"\"\n\n sig_arbitrary = pyqtSignal()\n sig_logging = pyqtSignal(dict)\n sig_logging_newconf = pyqtSignal(dict)\n sig_running_new_thread = pyqtSignal()\n\n def __init__(self, app, **kwargs):\n super().__init__(**kwargs)\n loadUi('.\\\\configurations\\\\Cryostat GUI.ui', self)\n # self.setupUi(self)\n self.threads = dict()\n self.data = dict()\n self.logging_bools = dict()\n\n self.logging_running_ITC = False\n self.logging_running_logger = False\n\n self.dataLock = Lock()\n self.dataLock_live = Lock()\n self.app = app\n\n QTimer.singleShot(0, self.initialize_all_windows)\n\n def closeEvent(self, event):\n super(mainWindow, self).closeEvent(event)\n self.app.quit()\n\n def initialize_all_windows(self):\n self.initialize_window_ITC()\n self.initialize_window_ILM()\n self.initialize_window_IPS()\n self.initialize_window_Log_conf()\n self.initialize_window_LakeShore350()\n self.initialize_window_Errors()\n self.show_data()\n self.actionLogging_LIVE.triggered['bool'].connect(self.run_logger_live)\n\n def running_thread(self, worker, dataname, threadname, info=None, **kwargs):\n \"\"\"Set up a new Thread, and insert the worker class, which runs in the new thread\n Args:\n worker - the class (as a class instance) which should run inside\n dataname - the name for which a dict entry should be made in the self.data dict,\n in case the Thread is passing data (e.g. sensors, instrument status...)\n threadname - the name as which the thread will be listed in self.threads,\n to be used for e.g. signals\n listing the thread in self.threads is also important to protect it\n from garbage collection!\n Returns:\n the worker class instance, useful for connecting signals directly\n \"\"\"\n\n thread = QThread()\n self.threads[threadname] = (worker, thread)\n worker.moveToThread(thread)\n\n if dataname in self.data or dataname == None:\n pass\n else:\n with self.dataLock:\n self.data[dataname] = dict()\n\n thread.started.connect(worker.work)\n thread.start()\n self.sig_running_new_thread.emit()\n return worker\n\n def stopping_thread(self, threadname):\n \"\"\"Stop the thread specified by the argument threadname, delete its entry in self.threads\"\"\"\n\n # self.threads[threadname][0].stop()\n self.threads[threadname][1].quit()\n self.threads[threadname][1].wait()\n del self.threads[threadname]\n\n def show_error_textBrowser(self, text):\n \"\"\" append error to Error window\"\"\"\n self.Errors_window.textErrors.append('{} - {}'.format(convert_time(time.time()),text))\n\n def connectdb(self, dbname):\n \"\"\"connect to the database, provide the cursor for the whole class\"\"\"\n try:\n self.conn = sqlite3.connect(dbname)\n self.mycursor = self.conn.cursor()\n except sqlite3.connect.Error as err:\n raise AssertionError(\"Logger: Couldn't establish connection {}\".format(err))\n\n def show_data(self): # a lot of work to do\n \"\"\"connect GUI signals for plotting, setting up some of the needs of plotting\"\"\"\n self.action_plotDatabase.triggered.connect(self.show_dataplotdb_configuration)\n self.action_plotLive.triggered.connect(self.show_dataplotlive_configuration)\n self.windows_plotting = []\n\n # these will hold the strings which the user selects to extract the data from db with the sql query and plot it\n # x,y1.. is for tablenames, x,y1.._plot is for column names in the tables respectively\n self.plotting_instrument_for_x = 0\n self.plotting_instrument_for_y1 = 0\n self.plotting_instrument_for_y2 = 0\n\n self.plotting_comboValue_Axis_X_plot = 0\n self.plotting_comboValue_Axis_Y1_plot = 0\n self.plotting_data_y2_plot = 0\n\n def show_dataplotdb_configuration(self):\n self.dataplot_db = Window_ui(ui_file='.\\\\configurations\\\\Data_display_selection_database.ui')\n self.dataplot_db.show()\n # populating the combobox instruments tab with tablenames:\n self.mycursor.execute(\"SELECT name FROM sqlite_master where type='table'\")\n axis2 = self.mycursor.fetchall()\n axis2.insert(0, (\"-\",))\n\n self.dataplot_db.comboInstr_Axis_X.clear()\n self.dataplot_db.comboInstr_Axis_Y1.clear()\n self.dataplot_db.comboInstr_Axis_Y2.clear()\n self.dataplot_db.comboInstr_Axis_Y3.clear()\n self.dataplot_db.comboInstr_Axis_Y4.clear()\n self.dataplot_db.comboInstr_Axis_Y5.clear()\n\n for i in axis2:\n self.dataplot_db.comboInstr_Axis_X.addItems(i)\n self.dataplot_db.comboInstr_Axis_Y1.addItems(i)\n self.dataplot_db.comboInstr_Axis_Y2.addItems(i)\n self.dataplot_db.comboInstr_Axis_Y3.addItems(i)\n self.dataplot_db.comboInstr_Axis_Y4.addItems(i)\n self.dataplot_db.comboInstr_Axis_Y5.addItems(i)\n self.dataplot_db.comboInstr_Axis_X.activated.connect(self.selection_x)\n self.dataplot_db.comboInstr_Axis_Y1.activated.connect(self.selection_y1)\n self.dataplot_db.buttonBox.clicked.connect(self.plotstart)\n\n def show_dataplotlive_configuration(self):\n \"\"\"\n open the window for configuration of the Live-plotting to be done,\n fill the comboboxes with respective values, to choose from instruments\n connect to actions being taken in this configuration window\n \"\"\"\n self.dataplot_live_conf = Window_ui(ui_file='.\\\\configurations\\\\Data_display_selection_live.ui')\n \n # initialize some \"storage space\" for data\n self.dataplot_live_conf.axes = dict()\n self.dataplot_live_conf.data = dict()\n\n if not hasattr(self, \"data_live\"):\n self.show_error_textBrowser('no live data to plot!')\n self.show_error_textBrowser('If you want to see live data, start the live logger!')\n return\n self.dataplot_live_conf.show()\n\n with self.dataLock_live:\n axis_instrument = list(self.data_live) # all the dictionary keys\n axis_instrument.insert(0, \"-\") # for no chosen value by default\n self.dataplot_live_conf.comboInstr_Axis_X.clear()\n self.dataplot_live_conf.comboInstr_Axis_Y1.clear()\n self.dataplot_live_conf.comboInstr_Axis_Y2.clear()\n self.dataplot_live_conf.comboInstr_Axis_Y3.clear()\n self.dataplot_live_conf.comboInstr_Axis_Y4.clear()\n self.dataplot_live_conf.comboInstr_Axis_Y5.clear()\n\n # for i in axis_instrument: # filling the comboboxes for the instrument\n # print(i, type(i))\n self.dataplot_live_conf.comboInstr_Axis_X.addItems(axis_instrument)\n self.dataplot_live_conf.comboInstr_Axis_Y1.addItems(axis_instrument)\n self.dataplot_live_conf.comboInstr_Axis_Y2.addItems(axis_instrument)\n self.dataplot_live_conf.comboInstr_Axis_Y3.addItems(axis_instrument)\n self.dataplot_live_conf.comboInstr_Axis_Y4.addItems(axis_instrument)\n self.dataplot_live_conf.comboInstr_Axis_Y5.addItems(axis_instrument)\n # actions in case instruments are chosen in comboboxes\n self.dataplot_live_conf.comboInstr_Axis_X.activated.connect(lambda: self.plotting_selection_instrument(GUI_value=self.dataplot_live_conf.comboValue_Axis_X,\n GUI_instr=self.dataplot_live_conf.comboInstr_Axis_X,\n livevsdb=\"LIVE\",\n axis='X', \n dataplot=self.dataplot_live_conf))\n self.dataplot_live_conf.comboInstr_Axis_Y1.activated.connect(lambda: self.plotting_selection_instrument(GUI_value=self.dataplot_live_conf.comboValue_Axis_Y1,\n GUI_instr=self.dataplot_live_conf.comboInstr_Axis_Y1,\n livevsdb=\"LIVE\",\n axis='Y1', \n dataplot=self.dataplot_live_conf))\n self.dataplot_live_conf.comboInstr_Axis_Y2.activated.connect(lambda: self.plotting_selection_instrument(GUI_value=self.dataplot_live_conf.comboValue_Axis_Y2,\n GUI_instr=self.dataplot_live_conf.comboInstr_Axis_Y2,\n livevsdb=\"LIVE\",\n axis='Y2', \n dataplot=self.dataplot_live_conf))\n self.dataplot_live_conf.comboInstr_Axis_Y3.activated.connect(lambda: self.plotting_selection_instrument(GUI_value=self.dataplot_live_conf.comboValue_Axis_Y3,\n GUI_instr=self.dataplot_live_conf.comboInstr_Axis_Y3,\n livevsdb=\"LIVE\",\n axis='Y3', \n dataplot=self.dataplot_live_conf))\n self.dataplot_live_conf.comboInstr_Axis_Y4.activated.connect(lambda: self.plotting_selection_instrument(GUI_value=self.dataplot_live_conf.comboValue_Axis_Y4,\n GUI_instr=self.dataplot_live_conf.comboInstr_Axis_Y4,\n livevsdb=\"LIVE\",\n axis='Y4', \n dataplot=self.dataplot_live_conf))\n self.dataplot_live_conf.comboInstr_Axis_Y5.activated.connect(lambda: self.plotting_selection_instrument(GUI_value=self.dataplot_live_conf.comboValue_Axis_Y5,\n GUI_instr=self.dataplot_live_conf.comboInstr_Axis_Y5,\n livevsdb=\"LIVE\",\n axis='Y5', \n dataplot=self.dataplot_live_conf))\n\n self.dataplot_live_conf.buttonBox.clicked.connect(lambda: self.plotting_display(dataplot=self.dataplot_live_conf))\n self.dataplot_live_conf.buttonBox.clicked.connect(lambda: self.dataplot_live_conf.close())\n self.dataplot_live_conf.buttonCancel.clicked.connect(lambda: self.dataplot_live_conf.close())\n\n def plotting_selection_instrument(self, livevsdb, GUI_instr, GUI_value, axis, dataplot):\n \"\"\"\n filling the Value column combobox in case the corresponding\n element of the instrument column combobox was chosen\n thus:\n - check for the chosen instrument,\n - get the data for the new combobox\n - chose the action\n \"\"\"\n GUI_value.addItems((\"-\",))\n instrument_name = GUI_instr.currentText()\n # print(\"instrument for x was set to: \",self.plotting_instrument_for_x)\n if livevsdb == \"LIVE\":\n with self.dataLock_live:\n value_names = list(self.data_live[instrument_name])\n # elif livevsdb == \"DB\":\n # axis = []\n # self.mycursor.execute(\"SELECT * FROM {}\".format(self.plotting_instrument_for_x))\n # colnames= self.mycursor.description\n # for row in colnames:\n # axis.append(row[0])\n GUI_value.addItems(value_names)\n GUI_value.activated.connect(lambda: self.plotting_selection_value(GUI_instr=GUI_instr,\n GUI_value=GUI_value,\n livevsdb=\"LIVE\",\n axis=axis,\n dataplot=dataplot))\n\n def x_changed(self):\n self.plotting_comboValue_Axis_X_plot=self.dataplot.comboValue_Axis_X.currentText()\n\n def plotting_selection_value(self, GUI_instr, GUI_value, livevsdb, axis, dataplot):\n value_name = GUI_value.currentText()\n instrument_name = GUI_instr.currentText()\n dataplot.axes[axis] = value_name\n\n if livevsdb == 'LIVE':\n with self.dataLock_live:\n dataplot.data[axis] = self.data_live[instrument_name][value_name]\n\n def plotting_display(self, dataplot):\n y = None\n try:\n x = dataplot.data['X']\n y = [dataplot.data[key] for key in dataplot.data if key != 'X' ]\n except KeyError:\n self.show_error_textBrowser('Plotting: You certainly did not choose an X axis, try again!')\n return\n if y is None:\n self.show_error_textBrowser('Plotting: You did not choose a single Y axis to plot, try again!')\n return\n data = [[x, yn] for yn in y]\n label_y = None\n try:\n label_y = dataplot.axes['Y1']\n except KeyError:\n for key in dataplot.axes:\n try:\n label_y = dataplot.axes[key]\n except KeyError:\n pass\n if label_y is None:\n self.show_error_textBrowser('Plotting: You did not choose a single Y axis to plot, try again!')\n return\n window = Window_plotting(data=data, label_x=dataplot.axes['X'], label_y=label_y, title='your advertisment could be here!')\n window.show()\n # window.sig_closing.connect(lambda: self.deleting_object(dataplot))\n self.windows_plotting.append(window)\n\n def deleting_object(self, object_to_delete):\n del object_to_delete\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n def selection_y1(self, dataplot, livevsdb):\n dataplot.comboValue_Axis_Y1.addItems(tuple(\"-\"))\n instrument_for_y1 = self.dataplot.comboInstr_Axis_Y1.currentText()\n\n axis = []\n if livevsdb == \"LIVE\":\n axis = list(self.data_live[instrument_for_y1])\n # elif livevsdb == \"DB\":\n # self.mycursor.execute(\"SELECT * FROM {}\".format(self.plotting_instrument_for_y1))\n # colnames= self.mycursor.description\n # for row in colnames:\n # axis.append(row[0])\n self.dataplot.comboValue_Axis_Y1.addItems(axis)\n self.dataplot.comboValue_Axis_Y1.activated.connect(self.y1_changed)\n\n def y1_changed(self):\n self.plotting_comboValue_Axis_Y1_plot=self.dataplot.comboValue_Axis_Y1.currentText()\n\n #gotta have an if statement for the case when x and y values are from different tables\n def plotstart(self):\n print(self.plotting_comboValue_Axis_X_plot,self.plotting_comboValue_Axis_Y1_plot, self.plotting_instrument_for_x)\n array1=[]\n array2=[]\n if self.plotting_instrument_for_x==self.plotting_instrument_for_y1:\n sql=\"SELECT {},{} from {} \".format(self.plotting_comboValue_Axis_X_plot,self.plotting_comboValue_Axis_Y1_plot,self.plotting_instrument_for_x)\n self.mycursor.execute(sql)\n data =self.mycursor.fetchall()\n\n for row in data:\n array1.append(list(row))\n\n #this is for is for omiting 'None' values from the array, skipping this step would cause the plot to break!\n\n nparray = np.asarray(array1)[np.asarray(array1) != np.array(None)]\n\n #After renaming x to instrument_for_x and y1 to instrument_for_y1, the nparray became 1 dimensional, so the\n #original code:nparray_x = nparray[:,[0]] did not work, this is a workaround, i have no idea what caused it.\n #selecting different instruments for x and y doesn't have this problem as the data is stored in separate arrays.\n\n nparray_x = nparray[0::2]\n nparray_y = nparray[1::2]\n\n plt.figure()\n plt.plot(nparray_x,nparray_y)\n #labels:\n plt.xlabel(self.plotting_comboValue_Axis_X_plot)\n plt.ylabel(self.plotting_comboValue_Axis_Y1_plot)\n\n\n plt.draw()\n\n plt.show()\n else:\n sql=\"SELECT {} FROM {}\".format(self.plotting_comboValue_Axis_X_plot,self.plotting_instrument_for_x)\n self.mycursor.execute(sql)\n data=self.mycursor.fetchall()\n\n for row in data:\n array1.append(list(row))\n nparray_x=np.asarray(array1)[np.asarray(array1) != np.array(None)]\n\n sql=\"SELECT {} FROM {}\".format(self.plotting_comboValue_Axis_Y1_plot,self.plotting_instrument_for_y1)\n self.mycursor.execute(sql)\n data=self.mycursor.fetchall()\n\n for row in data:\n array2.append(list(row))\n nparray_y=np.asarray(array2)[np.asarray(array2) != np.array(None)]\n\n #there can be still some problems if the dimensions don't match so:\n if len(nparray_x)>len(nparray_y):\n nparray_x=nparray_x[0:len(nparray_y)]\n else:\n nparray_y=nparray_y[0:len(nparray_x)]\n\n plt.figure()\n plt.plot(nparray_x,nparray_y)\n #labels:\n plt.xlabel(self.plotting_comboValue_Axis_X_plot+\" from table: \"+str(self.plotting_instrument_for_x))\n plt.ylabel(self.plotting_comboValue_Axis_Y1_plot+\" from table: \"+str(self.plotting_instrument_for_y1))\n\n\n plt.draw()\n\n plt.show()\n\n\n # ------- Oxford Instruments\n # ------- ------- ITC\n def initialize_window_ITC(self):\n \"\"\"initialize ITC Window\"\"\"\n self.ITC_window = Window_ui(ui_file='.\\\\Oxford\\\\ITC_control.ui')\n self.ITC_window.sig_closing.connect(lambda: self.action_show_ITC.setChecked(False))\n\n self.action_run_ITC.triggered['bool'].connect(self.run_ITC)\n self.action_show_ITC.triggered['bool'].connect(self.show_ITC)\n # self.mdiArea.addSubWindow(self.ITC_window)\n\n @pyqtSlot(bool)\n def run_ITC(self, boolean):\n \"\"\"method to start/stop the thread which controls the Oxford ITC\"\"\"\n\n if boolean:\n try:\n # self.ITC = itc503('COM6')\n # getInfodata = cls_itc(self.ITC)\n getInfodata = self.running_thread(ITC_Updater(ITC_Instrumentadress), 'ITC', 'control_ITC')\n\n getInfodata.sig_Infodata.connect(self.store_data_itc)\n # getInfodata.sig_visaerror.connect(self.printing)\n getInfodata.sig_visaerror.connect(self.show_error_textBrowser)\n # getInfodata.sig_assertion.connect(self.printing)\n getInfodata.sig_assertion.connect(self.show_error_textBrowser)\n getInfodata.sig_visatimeout.connect(lambda: self.show_error_textBrowser('ITC: timeout'))\n\n self.data['ITC'] = dict(set_temperature = 0,\n Sensor_1_K =0,\n Sensor_2_K =0,\n Sensor_3_K =0,\n temperature_error =0,\n heater_output_as_percent =0,\n heater_output_as_voltage =0,\n gas_flow_output =0,\n proportional_band =0,\n integral_action_time =0,\n derivative_action_time = 0)\n integration_length = 7\n self.ITC_Kpmin = dict(newtime = [time.time()]*integration_length,\n Sensor_1_K = [0]*integration_length,\n Sensor_2_K = [0]*integration_length,\n Sensor_3_K = [0]*integration_length,\n Sensor_4_K = [0]*integration_length)\n\n # setting ITC values by GUI ITC window\n self.ITC_window.spinsetTemp.valueChanged.connect(lambda value: self.threads['control_ITC'][0].gettoset_Temperature(value))\n self.ITC_window.spinsetTemp.editingFinished.connect(lambda: self.threads['control_ITC'][0].setTemperature())\n\n self.ITC_window.spinsetGasOutput.valueChanged.connect(lambda value: self.threads['control_ITC'][0].gettoset_GasOutput(value))\n self.ITC_window.spinsetGasOutput.editingFinished.connect(lambda : self.threads['control_ITC'][0].setGasOutput())\n\n self.ITC_window.spinsetHeaterPercent.valueChanged.connect(lambda value: self.threads['control_ITC'][0].gettoset_HeaterOutput(value))\n self.ITC_window.spinsetHeaterPercent.editingFinished.connect(lambda : self.threads['control_ITC'][0].setHeaterOutput())\n\n self.ITC_window.spinsetProportionalID.valueChanged.connect(lambda value: self.threads['control_ITC'][0].gettoset_Proportional(value))\n self.ITC_window.spinsetProportionalID.editingFinished.connect(lambda : self.threads['control_ITC'][0].setProportional())\n\n self.ITC_window.spinsetPIntegrationD.valueChanged.connect(lambda value: self.threads['control_ITC'][0].gettoset_Integral(value))\n self.ITC_window.spinsetPIntegrationD.editingFinished.connect(lambda : self.threads['control_ITC'][0].setIntegral())\n\n self.ITC_window.spinsetPIDerivative.valueChanged.connect(lambda value: self.threads['control_ITC'][0].gettoset_Derivative(value))\n self.ITC_window.spinsetPIDerivative.editingFinished.connect(lambda : self.threads['control_ITC'][0].setDerivative())\n\n self.ITC_window.combosetHeatersens.activated['int'].connect(lambda value: self.threads['control_ITC'][0].setHeaterSensor(value + 1))\n\n self.ITC_window.combosetAutocontrol.activated['int'].connect(lambda value: self.threads['control_ITC'][0].setAutoControl(value))\n\n self.ITC_window.spin_threadinterval.valueChanged.connect(lambda value: self.threads['control_ITC'][0].setInterval(value))\n\n # thread.started.connect(getInfodata.work)\n # thread.start()\n self.action_run_ITC.setChecked(True)\n self.logging_running_ITC = True\n except VisaIOError as e:\n self.action_run_ITC.setChecked(False)\n self.show_error_textBrowser(e)\n # print(e) # TODO: open window displaying the error message\n\n else:\n # possibly implement putting the instrument back to local operation\n self.ITC_window.spinsetTemp.valueChanged.disconnect()\n self.ITC_window.spinsetTemp.editingFinished.disconnect()\n self.ITC_window.spinsetGasOutput.valueChanged.disconnect()\n self.ITC_window.spinsetGasOutput.editingFinished.disconnect()\n self.ITC_window.spinsetHeaterPercent.valueChanged.disconnect()\n self.ITC_window.spinsetHeaterPercent.editingFinished.disconnect()\n self.ITC_window.spinsetProportionalID.valueChanged.disconnect()\n self.ITC_window.spinsetProportionalID.editingFinished.disconnect()\n self.ITC_window.spinsetPIntegrationD.valueChanged.disconnect()\n self.ITC_window.spinsetPIntegrationD.editingFinished.disconnect()\n self.ITC_window.spinsetPIDerivative.valueChanged.disconnect()\n self.ITC_window.spinsetPIDerivative.editingFinished.disconnect()\n self.ITC_window.combosetHeatersens.activated['int'].disconnect()\n self.ITC_window.combosetAutocontrol.activated['int'].disconnect()\n self.ITC_window.spin_threadinterval.valueChanged.disconnect()\n\n self.stopping_thread('control_ITC')\n self.action_run_ITC.setChecked(False)\n self.logging_running_ITC = False\n\n @pyqtSlot(bool)\n def show_ITC(self, boolean):\n \"\"\"display/close the ITC data & control window\"\"\"\n if boolean:\n self.ITC_window.show()\n else:\n self.ITC_window.close()\n\n @pyqtSlot(dict)\n def store_data_itc(self, data):\n \"\"\"\n Calculate the rate of change of Temperature on the sensors [K/min]\n Store ITC data in self.data['ITC'], update ITC_window\n \"\"\"\n\n timediffs = [(entry-self.ITC_Kpmin['newtime'][i+1])/60 for i, entry in enumerate(self.ITC_Kpmin['newtime'][:-1])]# -self.ITC_Kpmin['newtime'])/60\n tempdiffs = dict(Sensor_1_Kpmin=[entry-self.ITC_Kpmin['Sensor_1_K'][i+1] for i, entry in enumerate(self.ITC_Kpmin['Sensor_1_K'][:-1])],\n Sensor_2_Kpmin=[entry-self.ITC_Kpmin['Sensor_2_K'][i+1] for i, entry in enumerate(self.ITC_Kpmin['Sensor_2_K'][:-1])],\n Sensor_3_Kpmin=[entry-self.ITC_Kpmin['Sensor_3_K'][i+1] for i, entry in enumerate(self.ITC_Kpmin['Sensor_3_K'][:-1])])\n # integrating over the lists, to get an integrated rate of Kelvin/min\n integrated_diff = dict(Sensor_1_Kpmin=np.mean(np.array(tempdiffs['Sensor_1_Kpmin'])/np.array(timediffs)),\n Sensor_2_Kpmin=np.mean(np.array(tempdiffs['Sensor_2_Kpmin'])/np.array(timediffs)),\n Sensor_3_Kpmin=np.mean(np.array(tempdiffs['Sensor_3_Kpmin'])/np.array(timediffs)))\n\n if not integrated_diff['Sensor_1_Kpmin'] == 0:\n self.ITC_window.lcdTemp_sens1_Kpmin.display(integrated_diff['Sensor_1_Kpmin'])\n if not integrated_diff['Sensor_2_Kpmin'] == 0:\n self.ITC_window.lcdTemp_sens2_Kpmin.display(integrated_diff['Sensor_2_Kpmin'])\n if not integrated_diff['Sensor_3_Kpmin'] == 0:\n self.ITC_window.lcdTemp_sens3_Kpmin.display(integrated_diff['Sensor_3_Kpmin'])\n\n # advancing entries to the next slot\n for i, entry in enumerate(self.ITC_Kpmin['newtime'][:-1]):\n self.ITC_Kpmin['newtime'][i+1] = entry\n self.ITC_Kpmin['Sensor_1_K'][i+1] = self.ITC_Kpmin['Sensor_1_K'][i]\n self.ITC_Kpmin['Sensor_2_K'][i+1] = self.ITC_Kpmin['Sensor_2_K'][i]\n self.ITC_Kpmin['Sensor_3_K'][i+1] = self.ITC_Kpmin['Sensor_3_K'][i]\n\n # including the new values\n self.ITC_Kpmin['newtime'][0] = time.time()\n self.ITC_Kpmin['Sensor_1_K'][0] = deepcopy(data['Sensor_1_K']) if not data['Sensor_1_K'] == None else 0\n self.ITC_Kpmin['Sensor_2_K'][0] = deepcopy(data['Sensor_2_K']) if not data['Sensor_1_K'] == None else 0\n self.ITC_Kpmin['Sensor_3_K'][0] = deepcopy(data['Sensor_3_K']) if not data['Sensor_1_K'] == None else 0\n data.update(dict(Sensor_1_Kpmin=integrated_diff['Sensor_1_Kpmin'],\n Sensor_2_Kpmin=integrated_diff['Sensor_2_Kpmin'],\n Sensor_3_Kpmin=integrated_diff['Sensor_3_Kpmin']))\n\n data['date'] = convert_time(time.time())\n with self.dataLock:\n self.data['ITC'].update(data)\n # this needs to draw from the self.data['INSTRUMENT'] so that in case one of the keys did not show up,\n # since the command failed in the communication with the device, the last value is retained\n if not self.data['ITC']['Sensor_1_K'] == None:\n self.ITC_window.lcdTemp_sens1_K.display(self.data['ITC']['Sensor_1_K'])\n if not self.data['ITC']['Sensor_2_K'] == None:\n self.ITC_window.lcdTemp_sens2_K.display(self.data['ITC']['Sensor_2_K'])\n if not self.data['ITC']['Sensor_3_K'] == None:\n self.ITC_window.lcdTemp_sens3_K.display(self.data['ITC']['Sensor_3_K'])\n\n if not self.data['ITC']['set_temperature'] == None:\n self.ITC_window.lcdTemp_set.display(self.data['ITC']['set_temperature'])\n if not self.data['ITC']['temperature_error'] == None:\n self.ITC_window.lcdTemp_err.display(self.data['ITC']['temperature_error'])\n if not self.data['ITC']['heater_output_as_percent'] == None:\n self.ITC_window.progressHeaterPercent.setValue(self.data['ITC']['heater_output_as_percent'])\n if not self.data['ITC']['heater_output_as_voltage'] == None:\n self.ITC_window.lcdHeaterVoltage.display(self.data['ITC']['heater_output_as_voltage'])\n if not self.data['ITC']['gas_flow_output'] == None:\n self.ITC_window.progressNeedleValve.setValue(self.data['ITC']['gas_flow_output'])\n if not self.data['ITC']['gas_flow_output'] == None:\n self.ITC_window.lcdNeedleValve_percent.display(self.data['ITC']['gas_flow_output'])\n if not self.data['ITC']['proportional_band'] == None:\n self.ITC_window.lcdProportionalID.display(self.data['ITC']['proportional_band'])\n if not self.data['ITC']['integral_action_time'] == None:\n self.ITC_window.lcdPIntegrationD.display(self.data['ITC']['integral_action_time'])\n if not self.data['ITC']['derivative_action_time'] == None:\n self.ITC_window.lcdPIDerivative.display(self.data['ITC']['derivative_action_time'])\n\n\n # ------- ------- ILM\n def initialize_window_ILM(self):\n \"\"\"initialize ILM Window\"\"\"\n self.ILM_window = Window_ui(ui_file='.\\\\Oxford\\\\ILM_control.ui')\n self.ILM_window.sig_closing.connect(lambda: self.action_show_ILM.setChecked(False))\n\n self.action_run_ILM.triggered['bool'].connect(self.run_ILM)\n self.action_show_ILM.triggered['bool'].connect(self.show_ILM)\n\n @pyqtSlot(bool)\n def run_ILM(self, boolean):\n \"\"\"start/stop the Level Meter thread\"\"\"\n\n\n if boolean:\n try:\n getInfodata = self.running_thread(ILM_Updater(InstrumentAddress=ILM_Instrumentadress),'ILM', 'control_ILM')\n\n getInfodata.sig_Infodata.connect(self.store_data_ilm)\n # getInfodata.sig_visaerror.connect(self.printing)\n # getInfodata.sig_assertion.connect(self.printing)\n getInfodata.sig_visaerror.connect(self.show_error_textBrowser)\n getInfodata.sig_assertion.connect(self.show_error_textBrowser)\n getInfodata.sig_visatimeout.connect(lambda: self.show_error_textBrowser('ILM: timeout'))\n\n self.ILM_window.combosetProbingRate_chan1.activated['int'].connect(lambda value: self.threads['control_ILM'][0].setProbingSpeed(value, 1))\n # self.ILM_window.combosetProbingRate_chan2.activated['int'].connect(lambda value: self.threads['control_ILM'][0].setProbingSpeed(value, 2))\n\n self.ILM_window.spin_threadinterval.valueChanged.connect(lambda value: self.threads['control_ILM'][0].setInterval(value))\n\n self.action_run_ILM.setChecked(True)\n\n except VisaIOError as e:\n self.action_run_ILM.setChecked(False)\n self.show_error_textBrowser(e)\n # print(e) # TODO: open window displaying the error message\n else:\n self.action_run_ILM.setChecked(False)\n self.stopping_thread('control_ILM')\n\n @pyqtSlot(bool)\n def show_ILM(self, boolean):\n \"\"\"display/close the ILM data & control window\"\"\"\n if boolean:\n self.ILM_window.show()\n else:\n self.ILM_window.close()\n\n @pyqtSlot(dict)\n def store_data_ilm(self, data):\n \"\"\"Store ILM data in self.data['ILM'], update ILM_window\"\"\"\n with self.dataLock:\n data['date'] = convert_time(time.time())\n self.data['ILM'].update(data)\n # this needs to draw from the self.data['INSTRUMENT'] so that in case one of the keys did not show up,\n # since the command failed in the communication with the device, the last value is retained\n chan1 = 100 if self.data['ILM']['channel_1_level'] > 100 else self.data['ILM']['channel_1_level']\n chan2 = 100 if self.data['ILM']['channel_2_level'] > 100 else self.data['ILM']['channel_2_level']\n self.ILM_window.progressLevelHe.setValue(chan1)\n self.ILM_window.progressLevelN2.setValue(chan2)\n\n self.ILM_window.lcdLevelHe.display(self.data['ILM']['channel_1_level'])\n self.ILM_window.lcdLevelN2.display(self.data['ILM']['channel_2_level'])\n\n self.MainDock_HeLevel.setValue(chan1)\n self.MainDock_N2Level.setValue(chan2)\n # print(self.data['ILM']['channel_1_level'], self.data['ILM']['channel_2_level'])\n\n # ------- ------- IPS\n def initialize_window_IPS(self):\n \"\"\"initialize PS Window\"\"\"\n self.IPS_window = Window_ui(ui_file='.\\\\Oxford\\\\IPS_control.ui')\n self.IPS_window.sig_closing.connect(lambda: self.action_show_IPS.setChecked(False))\n\n self.action_run_IPS.triggered['bool'].connect(self.run_IPS)\n self.action_show_IPS.triggered['bool'].connect(self.show_IPS)\n\n self.IPS_window.labelStatusMagnet.setText('')\n self.IPS_window.labelStatusCurrent.setText('')\n self.IPS_window.labelStatusActivity.setText('')\n self.IPS_window.labelStatusLocRem.setText('')\n self.IPS_window.labelStatusSwitchHeater.setText('')\n\n @pyqtSlot(bool)\n def run_IPS(self, boolean):\n \"\"\"start/stop the Powersupply thread\"\"\"\n\n if boolean:\n try:\n getInfodata = self.running_thread(IPS_Updater(InstrumentAddress=IPS_Instrumentadress),'IPS', 'control_IPS')\n\n getInfodata.sig_Infodata.connect(self.store_data_ips)\n # getInfodata.sig_visaerror.connect(self.printing)\n # getInfodata.sig_assertion.connect(self.printing)\n getInfodata.sig_visaerror.connect(self.show_error_textBrowser)\n getInfodata.sig_assertion.connect(self.show_error_textBrowser)\n getInfodata.sig_visatimeout.connect(lambda: self.show_error_textBrowser('IPS: timeout'))\n\n self.IPS_window.comboSetActivity.activated['int'].connect(lambda value: self.threads['control_IPS'][0].setActivity(value))\n self.IPS_window.comboSetSwitchHeater.activated['int'].connect(lambda value: self.threads['control_IPS'][0].setSwitchHeater(value))\n\n self.IPS_window.spinSetFieldSetPoint.valueChanged.connect(lambda value: self.threads['control_IPS'][0].gettoset_FieldSetPoint(value))\n self.IPS_window.spinSetFieldSetPoint.editingFinished.connect(lambda: self.threads['control_IPS'][0].setFieldSetPoint())\n\n self.IPS_window.spinSetFieldSweepRate.valueChanged.connect(lambda value: self.threads['control_IPS'][0].gettoset_FieldSweepRate(value))\n self.IPS_window.spinSetFieldSweepRate.editingFinished.connect(lambda: self.threads['control_IPS'][0].setFieldSweepRate())\n\n self.IPS_window.spin_threadinterval.valueChanged.connect(lambda value: self.threads['control_IPS'][0].setInterval(value))\n\n self.action_run_IPS.setChecked(True)\n\n except VisaIOError as e:\n self.action_run_IPS.setChecked(False)\n self.show_error_textBrowser(e)\n # print(e) # TODO: open window displaying the error message\n else:\n self.action_run_IPS.setChecked(False)\n self.stopping_thread('control_IPS')\n\n @pyqtSlot(bool)\n def show_IPS(self, boolean):\n \"\"\"display/close the ILM data & control window\"\"\"\n if boolean:\n self.IPS_window.show()\n else:\n self.IPS_window.close()\n\n @pyqtSlot(dict)\n def store_data_ips(self, data):\n \"\"\"Store PS data in self.data['ILM'], update PS_window\"\"\"\n with self.dataLock:\n data['date'] = convert_time(time.time())\n self.data['IPS'].update(data)\n # this needs to draw from the self.data['INSTRUMENT'] so that in case one of the keys did not show up,\n # since the command failed in the communication with the device, the last value is retained\n self.IPS_window.lcdFieldSetPoint.display(self.data['IPS']['FIELD_set_point'])\n self.IPS_window.lcdFieldSweepRate.display(self.data['IPS']['FIELD_sweep_rate'])\n\n self.IPS_window.lcdOutputField.display(self.data['IPS']['FIELD_output'])\n self.IPS_window.lcdMeasuredMagnetCurrent.display(self.data['IPS']['measured_magnet_current'])\n self.IPS_window.lcdOutputCurrent.display(self.data['IPS']['CURRENT_output'])\n # self.IPS_window.lcdXXX.display(self.data['IPS']['CURRENT_set_point'])\n # self.IPS_window.lcdXXX.display(self.data['IPS']['CURRENT_sweep_rate'])\n\n self.IPS_window.lcdLeadResistance.display(self.data['IPS']['lead_resistance'])\n\n self.IPS_window.lcdPersistentMagnetField.display(self.data['IPS']['persistent_magnet_field'])\n self.IPS_window.lcdTripField.display(self.data['IPS']['trip_field'])\n self.IPS_window.lcdPersistentMagnetCurrent.display(self.data['IPS']['persistent_magnet_current'])\n self.IPS_window.lcdTripCurrent.display(self.data['IPS']['trip_current'])\n\n self.IPS_window.labelStatusMagnet.setText(self.data['IPS']['status_magnet'])\n self.IPS_window.labelStatusCurrent.setText(self.data['IPS']['status_current'])\n self.IPS_window.labelStatusActivity.setText(self.data['IPS']['status_activity'])\n self.IPS_window.labelStatusLocRem.setText(self.data['IPS']['status_locrem'])\n self.IPS_window.labelStatusSwitchHeater.setText(self.data['IPS']['status_switchheater'])\n\n\n # ------- LakeShore 350 -------\n def initialize_window_LakeShore350(self):\n \"\"\"initialize LakeShore Window\"\"\"\n self.LakeShore350_window = Window_ui(ui_file='.\\\\LakeShore\\\\LakeShore350_control.ui')\n self.LakeShore350_window.sig_closing.connect(lambda: self.action_show_LakeShore350.setChecked(False))\n\n # self.LakeShore350_window.textSensor1_Kpmin.setAlignment(QtAlignRight)\n\n self.action_run_LakeShore350.triggered['bool'].connect(self.run_LakeShore350)\n self.action_show_LakeShore350.triggered['bool'].connect(self.show_LakeShore350)\n self.LakeShore350_Kpmin = None\n\n def func_LakeShore350_setKpminLength(self, length):\n \"\"\"set the number of measurements the calculation should be conducted over\"\"\"\n if not self.LakeShore350_Kpmin:\n self.LakeShore350_Kpmin = dict( newtime=[time.time()]*length,\n Sensors=dict(\n Sensor_1_K=[0]*length,\n Sensor_2_K=[0]*length,\n Sensor_3_K=[0]*length,\n Sensor_4_K=[0]*length),\n length=length)\n elif self.LakeShore350_Kpmin['length'] > length:\n self.LakeShore350_Kpmin['newtime'] = self.LakeShore350_Kpmin['newtime'][:length]\n for sensor in self.LakeShore350_Kpmin['Sensors']:\n sensor = sensor[:length]\n self.LakeShore350_Kpmin['length'] = length\n elif self.LakeShore350_Kpmin['length'] < length:\n self.LakeShore350_Kpmin['newtime'] += [time.time()]*(length-self.LakeShore350_Kpmin['length'])\n for sensor in self.LakeShore350_Kpmin['Sensors']:\n sensor += [0]*(length-self.LakeShore350_Kpmin['length'])\n self.LakeShore350_Kpmin['length'] = length\n\n\n @pyqtSlot(bool)\n def run_LakeShore350(self, boolean):\n \"\"\"start/stop the LakeShore350 thread\"\"\"\n\n if boolean:\n try:\n getInfodata = self.running_thread(LakeShore350_Updater(InstrumentAddress=LakeShore_InstrumentAddress),'LakeShore350', 'control_LakeShore350')\n\n getInfodata.sig_Infodata.connect(self.store_data_LakeShore350)\n # getInfodata.sig_visaerror.connect(self.printing)\n getInfodata.sig_visaerror.connect(self.show_error_textBrowser)\n # getInfodata.sig_assertion.connect(self.printing)\n getInfodata.sig_assertion.connect(self.show_error_textBrowser)\n getInfodata.sig_visatimeout.connect(lambda: self.show_error_textBrowser('LakeShore350: timeout'))\n\n self.func_LakeShore350_setKpminLength(5)\n\n # setting LakeShore values by GUI LakeShore window\n self.LakeShore350_window.spinSetTemp_K.valueChanged.connect(lambda value: self.threads['control_LakeShore350'][0].gettoset_Temp_K(value))\n self.LakeShore350_window.spinSetTemp_K.editingFinished.connect(lambda: self.threads['control_LakeShore350'][0].setTemp_K())\n\n self.LakeShore350_window.spinSetRampRate_Kpmin.valueChanged.connect(lambda value: self.threads['control_LakeShore350'][0].gettoset_Ramp_Rate_K(value))\n self.LakeShore350_window.spinSetRampRate_Kpmin.editingFinished.connect(lambda: self.threads['control_LakeShore350'][0].setRamp_Rate_K())\n\n # allows to choose from different inputs to connect to output 1 control loop. default is input 1.\n\n self.LakeShore350_window.comboSetInput_Sensor.activated['int'].connect(lambda value: self.threads['control_LakeShore350'][0].setInput(value + 1))\n # self.LakeShore350_window.spinSetInput_Sensor.editingFinished.(lambda value: self.threads['control_LakeShore350'][0].setInput())\n\n\n \"\"\" NEW GUI controls P, I and D values for Control Loop PID Values Command\n # \"\"\"\n # self.LakeShore350_window.spinSetLoopP_Param.valueChanged.connect(lambda value: self.threads['control_LakeShore350'][0].gettoset_LoopP_Param(value))\n # self.LakeShore350_window.spinSetLoopP_Param.Finished.connect(lambda: self.threads['control_LakeShore350'][0].setLoopP_Param())\n\n # self.LakeShore350_window.spinSetLoopI_Param.valueChanged.connect(lambda value: self.threads['control_LakeShore350'][0].gettoset_LoopI_Param(value))\n # self.LakeShore350_window.spinSetLoopI_Param.Finished.connect(lambda: self.threads['control_LakeShore350'][0].setLoopI_Param())\n\n # self.LakeShore350_window.spinSetLoopD_Param.valueChanged.connect(lambda value: self.threads['control_LakeShore350'][0].gettoset_LoopD_Param(value))\n # self.LakeShore350_window.spinSetLoopD_Param.Finished.connect(lambda: self.threads['control_LakeShore350'][0].setLoopD_Param())\n\n \"\"\" NEW GUI Heater Range and Ouput Zone\n \"\"\"\n\n # self.LakeShore350_window.comboSetHeater_Range.activated['int'].connect(lambda value: self.threads['control_LakeShore350'][0].setHeater_Range(value))\n\n #self.LakeShore350_window.spinSetHeater_Range.valueChanged.connect(lambda value: self.threads['control_LakeShore350'][0].gettoset_Heater_Range(value))\n #self.LakeShore350_window.spinSetHeater_Range.Finished.connect(lambda: self.threads['control_LakeShore350'][0].setHeater_Range())\n\n # self.LakeShore350_window.spinSetUpper_Bound.valueChanged.connect(lambda value: self.threads['control_LakeShore350'][0].gettoset_Upper_Bound(value))\n # self.LakeShore350_window.spinSetZoneP_Param.valueChanged.connect(lambda value: self.threads['control_LakeShore350'][0].gettoset_ZoneP_Param(value))\n # self.LakeShore350_window.spinSetZoneI_Param.valueChanged.connect(lambda value: self.threads['control_LakeShore350'][0].gettoset_ZoneI_Param(value))\n # self.LakeShore350_window.spinSetZoneD_Param.valueChanged.connect(lambda value: self.threads['control_LakeShore350'][0].gettoset_ZoneD_Param(value))\n # self.LakeShore350_window.spinSetZoneMout.valueChanged.connect(lambda value: self.threads['control_LakeShore350'][0].gettoset_ZoneMout(value))\n # self.LakeShore350_window.spinSetZone_Range.valueChanged.connect(lambda value: self.threads['control_LakeShore350'][0].gettoset_Zone_Range(value))\n # self.LakeShore350_window.spinSetZone_Rate.valueChanged.connect(lambda value: self.threads['control_LakeShore350'][0].gettoset_Zone_Rate(value))\n\n\n self.LakeShore350_window.spin_threadinterval.valueChanged.connect(lambda value: self.threads['control_LakeShore350'][0].setInterval(value))\n\n\n self.action_run_LakeShore350.setChecked(True)\n\n except VisaIOError as e:\n self.action_run_LakeShore350.setChecked(False)\n self.show_error_textBrowser('running: {}'.format(e))\n else:\n self.action_run_LakeShore350.setChecked(False)\n self.stopping_thread('control_LakeShore350')\n\n self.LakeShore350_window.spinSetTemp_K.valueChanged.disconnect()\n self.LakeShore350_window.spinSetTemp_K.editingFinished.disconnect()\n self.LakeShore350_window.spinSetRampRate_Kpmin.valueChanged.disconnect()\n self.LakeShore350_window.spinSetRampRate_Kpmin.editingFinished.disconnect()\n self.LakeShore350_window.comboSetInput_Sensor.activated['int'].disconnect()\n\n @pyqtSlot(bool)\n def show_LakeShore350(self, boolean):\n \"\"\"display/close the ILM data & control window\"\"\"\n if boolean:\n self.LakeShore350_window.show()\n else:\n self.LakeShore350_window.close()\n\n def calculate_Kpmin(self, data):\n \"\"\"calculate the rate of change of Temperature\"\"\"\n coeffs = []\n for sensordata in self.LakeShore350_Kpmin['Sensors'].values():\n coeffs.append(np.polynomial.polynomial.polyfit(self.LakeShore350_Kpmin['newtime'], sensordata, deg=1))\n\n integrated_diff = dict(Sensor_1_Kpmin=coeffs[0][1]*60,\n Sensor_2_Kpmin=coeffs[1][1]*60,\n Sensor_3_Kpmin=coeffs[2][1]*60,\n Sensor_4_Kpmin=coeffs[3][1]*60)\n\n data.update(integrated_diff)\n\n\n # advancing entries to the next slot\n for i, entry in enumerate(self.LakeShore350_Kpmin['newtime'][:-1]):\n self.LakeShore350_Kpmin['newtime'][i+1] = entry\n self.LakeShore350_Kpmin['newtime'][0] = time.time()\n for key in self.LakeShore350_Kpmin['Sensors'].keys():\n self.LakeShore350_Kpmin['Sensors'][key][i+1] = self.LakeShore350_Kpmin['Sensors'][key][i]\n self.LakeShore350_Kpmin['Sensors'][key][0] = deepcopy(data[key])\n\n\n # self.LakeShore350_Kpmin['Sensors']['Sensor_2_K'][i+1] = self.LakeShore350_Kpmin['Sensors']['Sensor_2_K'][i]\n # self.LakeShore350_Kpmin['Sensors']['Sensor_3_K'][i+1] = self.LakeShore350_Kpmin['Sensors']['Sensor_3_K'][i]\n # self.LakeShore350_Kpmin['Sensors']['Sensor_4_K'][i+1] = self.LakeShore350_Kpmin['Sensors']['Sensor_4_K'][i]\n\n # including the new values\n # self.LakeShore350_Kpmin['Sensors']['Sensor_2_K'][0] = deepcopy(data['Sensor_2_K'])\n # self.LakeShore350_Kpmin['Sensors']['Sensor_3_K'][0] = deepcopy(data['Sensor_3_K'])\n # self.LakeShore350_Kpmin['Sensors']['Sensor_4_K'][0] = deepcopy(data['Sensor_4_K'])\n\n # data.update(dict(Sensor_1_Kpmin=integrated_diff['Sensor_1_Kpmin'],\n # Sensor_2_Kpmin=integrated_diff['Sensor_2_Kpmin'],\n # Sensor_3_Kpmin=integrated_diff['Sensor_3_Kpmin'],\n # Sensor_4_Kpmin=integrated_diff['Sensor_4_Kpmin']))\n\n return integrated_diff, data\n\n @pyqtSlot(dict)\n def store_data_LakeShore350(self, data):\n \"\"\"\n Calculate the rate of change of Temperature on the sensors [K/min]\n Store LakeShore350 data in self.data['LakeShore350'], update LakeShore350_window\n \"\"\"\n\n coeffs, data = self.calculate_Kpmin(data)\n\n for GUI_element, co in zip([self.LakeShore350_window.textSensor1_Kpmin,\n self.LakeShore350_window.textSensor2_Kpmin,\n self.LakeShore350_window.textSensor3_Kpmin,\n self.LakeShore350_window.textSensor4_Kpmin],\n coeffs.values()):\n if not co == 0:\n GUI_element.setText('{num:=+10.4f}'.format(num=co))\n\n data['date'] = convert_time(time.time())\n with self.dataLock:\n self.data['LakeShore350'].update(data)\n # this needs to draw from the self.data['INSTRUMENT'] so that in case one of the keys did not show up,\n # since the command failed in the communication with the device, the last value is retained\n\n self.LakeShore350_window.progressHeaterOutput_percentage.setValue(self.data['LakeShore350']['Heater_Output_percentage'])\n self.LakeShore350_window.lcdHeaterOutput_mW.display(self.data['LakeShore350']['Heater_Output_mW'])\n self.LakeShore350_window.lcdSetTemp_K.display(self.data['LakeShore350']['Temp_K'])\n # self.LakeShore350_window.lcdRampeRate_Status.display(self.data['LakeShore350']['RampRate_Status'])\n self.LakeShore350_window.lcdSetRampRate_Kpmin.display(self.data['LakeShore350']['Ramp_Rate'])\n\n self.LakeShore350_window.comboSetInput_Sensor.setCurrentIndex(int(self.data['LakeShore350']['Input_Sensor'])-1)\n self.LakeShore350_window.lcdSensor1_K.display(self.data['LakeShore350']['Sensor_1_K'])\n self.LakeShore350_window.lcdSensor2_K.display(self.data['LakeShore350']['Sensor_2_K'])\n self.LakeShore350_window.lcdSensor3_K.display(self.data['LakeShore350']['Sensor_3_K'])\n self.LakeShore350_window.lcdSensor4_K.display(self.data['LakeShore350']['Sensor_4_K'])\n\n \"\"\"NEW GUI to display P,I and D Parameters\n \"\"\"\n # self.LakeShore350_window.lcdLoopP_Param.display(self.data['LakeShore350']['Loop_P_Param'])\n # self.LakeShore350_window.lcdLoopI_Param.display(self.data['LakeShore350']['Loop_I_Param'])\n # self.LakeShore350_window.lcdLoopD_Param.display(self.data['LakeShore350']['Loop_D_Param'])\n\n # self.LakeShore350_window.lcdHeater_Range.display(self.date['LakeShore350']['Heater_Range'])\n\n\n\n # ------- MISC -------\n\n def printing(self,b):\n \"\"\"arbitrary example function\"\"\"\n print(b)\n\n def initialize_window_Log_conf(self):\n \"\"\"initialize Logging configuration window\"\"\"\n self.Log_conf_window = Logger_configuration()\n self.Log_conf_window.sig_closing.connect(lambda: self.action_Logging_configuration.setChecked(False))\n self.Log_conf_window.sig_send_conf.connect(lambda conf: self.sig_logging_newconf.emit(conf))\n\n self.action_Logging.triggered['bool'].connect(self.run_logger)\n self.action_Logging_configuration.triggered['bool'].connect(self.show_logging_configuration)\n\n @pyqtSlot(bool)\n def run_logger(self, boolean):\n \"\"\"start/stop the logging thread\"\"\"\n\n # read the last configuration of what shall be logged from a respective file\n\n if boolean:\n logger = self.running_thread(main_Logger(self), None, 'logger')\n logger.sig_log.connect(lambda : self.sig_logging.emit(deepcopy(self.data)))\n logger.sig_configuring.connect(self.show_logging_configuration)\n self.logging_running_logger = True\n\n else:\n self.stopping_thread('logger')\n self.logging_running_logger = False\n\n @pyqtSlot(bool)\n def show_logging_configuration(self, boolean):\n \"\"\"display/close the logging configuration window\"\"\"\n if boolean:\n self.Log_conf_window.show()\n else:\n self.Log_conf_window.close()\n\n @pyqtSlot(bool)\n def run_logger_live(self, boolean):\n \"\"\"method to start/stop the thread which controls the Oxford ITC\"\"\"\n\n if boolean:\n # try:\n\n getInfodata = self.running_thread(live_Logger(self), None, 'control_Logging_live')\n getInfodata.sig_assertion.connect(self.show_error_textBrowser)\n\n self.actionLogging_LIVE.setChecked(True)\n print('logging live online')\n # except VisaIOError as e:\n # self.actionLogging_LIVE.setChecked(False)\n # self.show_error_textBrowser(e)\n # print(e) # TODO: open window displaying the error message\n else:\n self.stopping_thread('control_Logging_live')\n self.actionLogging_LIVE.setChecked(False)\n\n def initialize_window_Errors(self):\n \"\"\"initialize Error Window\"\"\"\n self.Errors_window = Window_ui(ui_file='.\\\\configurations\\\\Errors.ui')\n self.Errors_window.sig_closing.connect(lambda: self.action_show_Errors.setChecked(False))\n\n self.Errors_window.textErrors.setHtml('')\n\n # self.action_run_Errors.triggered['bool'].connect(self.run_ITC)\n self.action_show_Errors.triggered['bool'].connect(self.show_Errors)\n\n @pyqtSlot(bool)\n def show_Errors(self, boolean):\n \"\"\"display/close the Error window\"\"\"\n if boolean:\n self.Errors_window.show()\n else:\n self.Errors_window.close()\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n a = time.time()\n form = mainWindow(app=app)\n form.show()\n print(time.time()-a)\n sys.exit(app.exec_())\n","sub_path":"mainWindow.py","file_name":"mainWindow.py","file_ext":"py","file_size_in_byte":57233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"330366379","text":"import videos\nfrom videos import (VideoFromYoutubeURL, VideoFromImageURL, VideoFromText,\n VideoFromFrameFromYoutubeVideo, VideoSaveStream)\nfrom filedict import FileDict\nfrom functions import in_new_thread\n\n\ndef brakes_decorator(func):\n def wrapper(*args, **kwargs):\n res = func(*args, **kwargs)\n return \"(\" + res + \")\"\n return wrapper\n\n\n# @brakes_decorator\ndef to_url(video):\n if type(video) == videos.VideoFromYoutubeURL:\n return f\"'{video.video_id}'\"\n elif type(video) == videos.PartOfVideo:\n return to_url(video.video)+f\"[{video.start_time}: {video.end_time}]\"\n elif type(video) == videos.SumOfVideo:\n rt = \"+\".join([to_url(elem) for elem in video.videos_list])\n return rt\n elif type(video) == videos.PartsOfOneVideo:\n rt = to_url(video.video) + '['\n rt += \", \".join(f\"{s.start}: {s.stop}\" for s in video.slices_list)\n return rt + ']'\n elif type(video) == videos.SeparatedVideoAndAudio:\n return f\"{to_url(video.frames_video)} / {to_url(video.sound_video)}\"\n elif type(video) == videos.SmartAcceleratedVideo:\n return \"(\" + to_url(video.video) + \")\" + str(video.settings)[8:]\n\n \ndef video_from_very_short_str(string):\n it = 0 # iterator\n operators_info = {\"+\": {\"operands_numb\": 2, \"priority\": 1},\n \"*\": {\"operands_numb\": 2, \"priority\": 2},\n \"/\": {\"operands_numb\": 2, \"priority\": 2}}\n stack = [\"end\"]\n while it < len(s):\n if string[it] == \"'\":\n new_it = string[it:].find(\"'\") + 1\n video_id = string[i: new_it]\n stack.append(videos.VideoFromYoutubeURL(video_id))\n it = new_it\n elif string[it] == '\"':\n new_it = string[it:].find('\"') + 1\n image_link = string[i: new_it]\n stack.append(videos.VideoFromImageURL(image_link))\n it = new_it\n elif string[it] == \"[\":\n new_it = string[it:].find(\"]\") + 1\n slices = list(string[i: new_it].split(\",\"))\n slices = [list(map(float, elem.split(\":\"))) for elem in slices]\n slices = [slice(*elem) for elem in slices]\n stack[-1] = stack[-1][slices]\n it = new_it\n elif 1:\n pass\n\n@in_new_thread\ndef process_str(s, folder, chunk=5):\n import os\n try:\n os.stat(folder)\n except:\n os.makedirs(folder)\n\n stream = VideoSaveStream(eval(s))\n it, file_counter = 0, 0\n dur = stream.video.get_duration()\n while it < dur - chunk:\n # print(f\"Writing... {it, it + chunk, folder, file_counter}\")\n file_counter = stream.save_part(it, it + chunk, folder, file_counter)\n # file_counter -= 1\n it += chunk\n\n if it != dur:\n # print(f\"last {it, dur}\")\n stream.save_part(it, dur, folder, file_counter)\n print(f\"Task '{s}' sucssesfully completed\")\n\n\ndef bad_code_read_one_line_from_test_txt_and_process_it():\n with open(\"test.txt\") as f:\n s = f.readline()\n print(eval(s))\n # s = f\"VideoFromYoutubeURL('KWbANha2iws')[71:77] + VideoFromImageURL('{image_url}', 7)\"\n name = \"111\"\n folder = r\"C:\\Users\\m\\Desktop\\PythonProjects\\YouTube_GlueAndScissors\\Code\\glue_scissors_for_youtube\\video\\{}/\".format(name)\n print(folder)\n process_str(s, folder, chunk=3 * 60 * 60)\n\n\nbad_code_read_one_line_from_test_txt_and_process_it()\n\n# r\"\"\"\n# image_url = r\"https://img2.akspic.ru/image/88423-burdzh_halifa-neboskreb-vyshka-zdanie-liniya_gorizonta-1920x1200.jpg\"\n# s = \"VideoFromYoutubeURL('2WemzwuAQF4')[56: 63, 66: 69]/ VideoFromYoutubeURL('qiZLHchtX8c')[239:249](volume_cooficient = 1.2)\"\n# s = r\"VideoFromText('Подборка самых\\nжизненных фраз\\nOneTwo', 3) + VideoFromYoutubeURL('KWbANha2iws')[307:309, 307:309:0.66](volume_cooficient=0) + VideoFromYoutubeURL('KWbANha2iws')[71:77] + VideoFromYoutubeURL('U3-6jv0NCkk')[206:212] + VideoFromYoutubeURL('A8Fon7DWho4')[65:69]\"\n # \"\"\"\n\n\"\"\"\ntemp = videos.VideoFromYoutubeURL('V1sRabJhGWs')\nv1 = temp[0:10, 15:20]\nv2 = temp[0:10]\nv3 = v1 + v2\nv4 = v3 / temp[0:25]\nv5 = v4(volume_cooficient=0)\nfor v in [v1, v2, v3, v4, v5]:\n # print(v.short_str())\n print(to_url(v)) #https://youtu.be/1at7kKzBYxI \"\"\"\n","sub_path":"url_parser.py","file_name":"url_parser.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"638020842","text":"import json\n\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.views import View\nfrom django.utils.decorators import method_decorator\nfrom tools.logging_dec import logging_check, get_user_by_request\nfrom .models import Topic\nfrom user.models import UserProfile\n\n#异常码 10300 - 10399\n\n\n# Create your views here.\nclass TopicViews(View):\n\n\n def make_topics_res(self, author, author_topics):\n #博主主页 文章列表页的返回值\n\n topics_res = []\n for topic in author_topics:\n d = {}\n d['id'] = topic.id\n d['title'] = topic.title\n d['category'] = topic.category\n #2018-09-03 10:30:20\n d['introduce'] = topic.introduce\n d['created_time'] = topic.created_time.strftime('%Y-%m-%d %H:%M:%S')\n d['author'] = author.nickname\n topics_res.append(d)\n\n res = {'code': 200, 'data': {}}\n res['data']['topics'] = topics_res\n res['data']['nickname'] = author.nickname\n return res\n\n\n\n @method_decorator(logging_check)\n def post(self, request, author_id):\n #发表文章\n author = request.myuser\n json_str = request.body\n json_obj = json.loads(json_str)\n #获取json串内容\n #{\"content\":\"

aaaasdasdasd

\",\"content_text\":\"aaaasdasdasd\",\"limit\":\"public\",\"title\":\"aaaaaaa\",\"category\":\"tec\"}\n #带有html的 文章内容\n content = json_obj['content']\n #纯文本的文章内容 - 用来截取文章简介\n content_text = json_obj['content_text']\n #根据 content_text 前20个字为文章简介\n introduce = content_text[:20]\n title = json_obj['title']\n\n\n limit = json_obj['limit']\n if limit not in ['public', 'private']:\n result = {'code':10300, 'error':'The limit is error'}\n return JsonResponse(result)\n\n category = json_obj['category']\n if category not in ['tec', 'no-tec']:\n result = {'code':10301, 'error':'The category is error'}\n return JsonResponse(result)\n\n #数据入库\n Topic.objects.create(title=title, content=content,limit=limit, category=category,introduce=introduce,user_profile=author)\n\n return JsonResponse({'code':200, 'username':author.username})\n\n\n\n def get(self, request, author_id):\n #/v1/topics/guoxiaonao\n # /v1/topics/guoxiaonao?category=tec|no-tec\n #获取用户guoxiaonao的文章列表\n #1,访问者 visitor\n #2, 博主 author\n\n try:\n author = UserProfile.objects.get(username=author_id)\n except Exception as e:\n result = {'code':10305, 'error':'The author id is error'}\n return JsonResponse(result)\n #尝试获取访问者的身份\n visitor_username = get_user_by_request(request)\n\n category = request.GET.get('category')\n filter_category = False\n if category in ['tec', 'no-tec']:\n filter_category = True\n\n if visitor_username == author_id:\n #博主访问自己的博客\n if filter_category:\n author_topics = Topic.objects.filter(user_profile_id=author_id, category=category)\n else:\n author_topics = Topic.objects.filter(user_profile_id=author_id)\n else:\n #非博主访问博主的博客\n if filter_category:\n author_topics = Topic.objects.filter(user_profile_id=author_id, limit='public', category=category)\n else:\n author_topics = Topic.objects.filter(user_profile_id=author_id, limit='public')\n\n res = self.make_topics_res(author, author_topics)\n return JsonResponse(res)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"topic/views2.py","file_name":"views2.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"113750677","text":"# coding=utf-8\nimport json\nimport logging\nimport os\nimport sys\nfrom datetime import datetime\n\nclass FindFiles:\n def __init__(self, folder_name):\n self.folder_name = folder_name\n folders = {\n \"project/crawled_players\" : \"player_data_\",\n \"project/parsed_players\" : \"parsed_player_data_\",\n \"project/crawled_tournaments\" : \"tournament_data_\",\n \"project/parsed_tournaments\" : \"parsed_tournament_data_\"\n }\n try:\n self.file_text = folders[self.folder_name]\n except:\n sys.exit('Unable to find folder with the name mentioned')\n\n def days_since_today(self, file_date):\n today = str(datetime.today()).split(' ')[0]\n d1 = datetime.strptime(today, \"%Y-%m-%d\")\n d2 = datetime.strptime(file_date, \"%Y-%m-%d\")\n return abs((d2 - d1).days)\n\n def find_latest_file(self):\n for file in os.listdir(\"./\" + self.folder_name):\n latest_file = \"\"\n crawled_since = 1000\n if file.endswith(\".json\"):\n file_date = file.replace(self.file_text, '').replace('.json', '')\n time_difference = self.days_since_today(file_date)\n if time_difference < crawled_since:\n crawled_since = time_difference\n latest_file = os.path.join(\".\\\\\" + self.folder_name, file)\n\n if latest_file == \"\":\n sys.exit('Failed to find latest file')\n else:\n return (latest_file)\n\n def find_all_files(self):\n all_files_in_folder = []\n for file in os.listdir(\"./\" + self.folder_name):\n if file.endswith(\".json\"):\n file = os.path.join(\".\\\\\" + self.folder_name, file)\n all_files_in_folder.append(file)\n\n return (all_files_in_folder)\n\n def find_one_file(self, specific_file):\n for file in os.listdir(\"./\" + self.folder_name):\n if file.endswith(\".json\") and file == specific_file:\n file = os.path.join(\".\\\\\" + self.folder_name, file)\n return (file)\n\n\n#print (FindFiles('crawled_players').find_one_file(\"player_data_2019-09-13.json\"))\n","sub_path":"project/utils/find_file.py","file_name":"find_file.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"178033237","text":"#!/home/kikos/anaconda3/bin/python3\nfrom os import popen\nfrom random import randint\n\ndef gen():\n\twith open('F.in', 'w') as f:\n\t\tn = randint(1, 10)\n\t\tf.write(str(n) + '\\n')\n\t\tfor i in range(n):\n\t\t\ta = randint(1, 20)\n\t\t\tf.write(str(a) + ' ')\n\npopen('g++ F.cpp -o F -std=c++14')\npopen('g++ Fs.cpp -o Fs -std=c++14')\nfor test in range(1, 100):\n\tgen()\n\tans1 = popen('./F < F.in').read()\n\tans2 = popen('./Fs < F.in').read()\n\tif ans1 != ans2:\n\t\tprint('WA', test)\n\t\tprint(ans1)\n\t\tprint(ans2)\n\t\tbreak\n\telse:\n\t\tprint('OK', test)\n","sub_path":"algo/linal/stress.py","file_name":"stress.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"237106041","text":"import string\nfilename = 'program_poll.txt'\nprint(\"Please enter 'q' to terminate program..\")\nwhile True:\n\treason = input(\"Please enter why you like programming: \")\n\tif reason == 'q':\n\t\tbreak\n\n\twith open(filename, 'a') as file_object:\n\t\tfile_object.write(string.capwords(reason) + \"\\n\" )\n","sub_path":"chapter10/programming_poll.py","file_name":"programming_poll.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"504450318","text":"# -*- coding: utf-8 -*-\n# author:lyh\n# datetime:2020/7/13 23:55\n\"\"\"\n188. 买卖股票的最佳时机 IV\n\n给定一个数组,它的第 i 个元素是一支给定的股票在第 i 天的价格。\n\n设计一个算法来计算你所能获取的最大利润。你最多可以完成 k 笔交易。\n\n注意: 你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。\n\n示例 1:\n\n输入: [2,4,1], k = 2\n输出: 2\n解释: 在第 1 天 (股票价格 = 2) 的时候买入,在第 2 天 (股票价格 = 4) 的时候卖出,这笔交易所能获得利润 = 4-2 = 2 。\n\n示例 2:\n\n输入: [3,2,6,5,0,3], k = 2\n输出: 7\n解释: 在第 2 天 (股票价格 = 2) 的时候买入,在第 3 天 (股票价格 = 6) 的时候卖出, 这笔交易所能获得利润 = 6-2 = 4 。\n 随后,在第 5 天 (股票价格 = 0) 的时候买入,在第 6 天 (股票价格 = 3) 的时候卖出, 这笔交易所能获得利润 = 3-0 = 3 。\n\"\"\"\nfrom typing import List\n\n\nclass Node:\n def __init__(self, dv, mv):\n self.dv = dv # 删除损失\n self.mv = mv # 归并损失\n\n self.pre = None\n self.next = None\n self.index = 0\n\n @classmethod\n def link(cls, left, right):\n if isinstance(left, cls):\n left.next = right\n if isinstance(right, cls):\n right.pre = left\n\n @classmethod\n def drop(cls, n):\n if isinstance(n, cls):\n if isinstance(n.pre, cls):\n n.pre.next = n.next\n if isinstance(n.next, cls):\n n.next.pre = n.pre\n n.pre, n.next = None, None\n\n def __lt__(self, other):\n return min(self.dv, self.mv) < min(other.dv, other.mv)\n\n\nclass LinkHeap:\n def __init__(self):\n self.heap = []\n self.root = None\n self.end = None\n\n def _up(self, index):\n while index > 0:\n f = (index - 1) // 2\n if self.heap[index] < self.heap[f]:\n self.heap[f], self.heap[index] = self.heap[index], self.heap[f]\n self.heap[f].index = f\n self.heap[index].index = index\n index = f\n else:\n break\n\n def _down(self, index):\n left = index * 2 + 1\n while left < len(self.heap):\n target = index\n right = left + 1\n if self.heap[left] < self.heap[target]:\n target = left\n if right < len(self.heap) and self.heap[right] < self.heap[target]:\n target = right\n if target == index:\n break\n self.heap[target], self.heap[index] = self.heap[index], self.heap[target]\n self.heap[target].index = target\n self.heap[index].index = index\n index = target\n left = index * 2 + 1\n\n def update(self, index):\n self._down(index)\n self._up(index)\n\n def push(self, n: Node):\n n.index = len(self.heap)\n self.heap.append(n)\n self.update(n.index)\n\n def pop(self, index=0) -> [Node, None]:\n if index >= len(self.heap):\n return None\n res = self.heap[index]\n self.heap[index] = self.heap[-1]\n self.heap[index].index = index\n self.heap.pop()\n self.update(index)\n return res\n\n def top(self) -> [Node, None]:\n return self.heap[0] if len(self.heap) > 0 else None\n\n def __len__(self):\n return len(self.heap)\n\n\nclass Solution:\n def maxProfit(self, k: int, prices: List[int]) -> int:\n k = min(k, len(prices) // 2)\n if k == 0 or len(prices) == 0:\n return 0\n\n # 根据价格升序创建交易区间\n sales = []\n buy = prices[0]\n for i in range(1, len(prices)):\n if prices[i] < prices[i-1]:\n if buy < prices[i-1]:\n sales.append((buy, prices[i-1]))\n buy = prices[i]\n sales.append((buy, prices[-1]))\n\n # 统计各区间删除、归并的损失,向左归并,头区间不能删除\n profits = LinkHeap()\n left = Node(sales[0][1] - sales[0][0], int(1e9))\n profits.push(left)\n for i in range(1, len(sales)):\n right = Node(sales[i][1] - sales[i][0], sales[i - 1][1] - sales[i][0])\n Node.link(left, right)\n profits.push(right)\n left = right\n\n # 贪心归并区间\n while len(profits) > k:\n # 查损失最小区间\n n = profits.top()\n\n # 删除、归并区间\n if n.dv < n.mv:\n if n.next is not None:\n n.next.mv += n.mv - n.dv\n profits.update(n.next.index)\n profits.pop(n.index)\n Node.drop(n)\n else:\n n.pre.dv += n.dv - n.mv\n profits.update(n.pre.index)\n profits.pop(n.index)\n Node.drop(n)\n\n # 统计收益\n res = 0\n for n in profits.heap:\n res += n.dv\n return res\n\n\nif __name__ == '__main__':\n print(Solution().maxProfit(2, [2, 4, 1]), 2)\n print(Solution().maxProfit(2, [3, 2, 6, 5, 0, 3]), 7)\n print(Solution().maxProfit(3, [2]), 0)\n print(Solution().maxProfit(0, [2, 4, 1]), 0)\n","sub_path":"Solutions/0188.maxProfit.hard.py","file_name":"0188.maxProfit.hard.py","file_ext":"py","file_size_in_byte":5286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"546144521","text":"import argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\n\nfrom net import MnistNet, MLP\nfrom utils import simplex_grid\n\nfrom tqdm import tqdm, trange\n\nimport numpy as np\nfrom copy import deepcopy\n\nweight_dict_1 = torch.load('model_weights/mlp_weights_0.pth', map_location='cpu')\nweight_dict_2 = torch.load('model_weights/mlp_weights_1.pth', map_location='cpu')\nweight_dict_3 = torch.load('model_weights/mlp_weights_2.pth', map_location='cpu')\n\nx = np.linspace(-0.4, 1.3, 50)\ny = np.linspace(-0.4, 1.3, 50)\n\nX, Y = np.meshgrid(x, y)\nZ = 1 - X - Y\n\ngrid = simplex_grid(3, 25) / 25\ngrid_val = []\n\ndef multiply_weights(state_dict, val):\n new_state_dict = deepcopy(state_dict)\n for key in new_state_dict.keys(): new_state_dict[key] *= val\n return new_state_dict\n\ndef sum_weights(list_of_state_dicts):\n for i in range(1, len(list_of_state_dicts)):\n for key in list_of_state_dicts[0].keys():\n list_of_state_dicts[0][key] += list_of_state_dicts[i][key]\n return list_of_state_dicts[0]\n\ntest_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=100, shuffle=True)\n\ndef test(model, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.cuda(), target.cuda().long()\n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item()\n pred = output.max(1, keepdim=True)[1]\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n return test_loss\n\nZ_ = []\nfor i in trange(X.shape[0]):\n Z_ += [[]]\n for j in trange(Y.shape[0]):\n convex_hull_weights = sum_weights([multiply_weights(weight_dict_1, X[i,j]),\n multiply_weights(weight_dict_2, Y[i,j]),\n multiply_weights(weight_dict_3, Z[i,j])\n ])\n\n net = MLP().cuda()\n net.load_state_dict(convex_hull_weights)\n Z_[i].append(test(net, test_loader))\n\nnp.save('./plots/X_mnist', X)\nnp.save('./plots/Y_mnist', Y)\nnp.save('./plots/Z_mnist', Z_)\n","sub_path":"make_points_mnist.py","file_name":"make_points_mnist.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"530846695","text":"from os import remove, rename\n\n\nclass File:\n def __init__(self, name):\n self._name = name\n\n f = open(name, 'w')\n f.close()\n\n self._entries = []\n\n def write(self, n, s):\n n = n if isinstance(n, str) else str(n)\n if n in self._entries:\n print('Already exist\\'s')\n return\n\n self._entries.append(n)\n with open(self._name, 'a') as f:\n f.write(n + ' ' + s + '\\n')\n\n def read(self, n):\n n = n if isinstance(n, str) else str(n)\n\n if n not in self._entries:\n print('%s doesn\\'nt exist' % n)\n return\n\n res = ''\n with open(self._name, 'r') as f:\n for line in f:\n if line.startswith(n):\n res = line\n return res.split()[1]\n\n def delete(self, n):\n n = n if isinstance(n, str) else str(n)\n\n if n not in self._entries:\n print('Nothing to delete')\n return\n\n f2 = open('.temp', 'w')\n with open(self._name, 'r') as f:\n for line in f:\n if n in line:\n continue\n f2.write(line)\n os.remove(self._name)\n os.rename('.temp', self._name)\n\n def __del__(self):\n print('Deleting file')\n remove(self._name)\n","sub_path":"asd.py","file_name":"asd.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"646076803","text":"''' Open the file romeo.txt and read it line by line. For each line, split the line into a list of words using the split() method. \nThe program should build a list of words. For each word on each line check to see if the word is already in the list and if not append it to the list.\nWhen the program completes, sort and print the resulting words in alphabetical order. '''\n\n\n#CODE:\nfname = input(\"Enter file name:\") \nfh = open(fname) \nIst=list() \nfor line in fh: \n words=line.split() \n for word in words: \n Ist.append(word) \n s=set(Ist) \n Ist=list(s) \n Ist.sort() \nprint(Ist) \n\n\n#input:romeo.txt\n\n'''OUTPUT:\n['Arise', 'But', 'It', 'Juliet', 'Who', 'already', 'and', 'breaks', 'east','envious', 'fair', 'grief', 'is', 'kill', 'light', 'moon', 'pale','sick', \n'soft', 'sun', 'the', 'through', 'what', 'window', 'with', 'yonder'] '''\n\n\n\n\n","sub_path":"python data structures/assignment 4.1/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"621448071","text":"#!/usr/local/bin/python3\n# -*- coding: utf-8 -*-\n\n# exifの度分秒をfloatに変換\ndef arcTupleToFloat(arcTuple):\n return arcTuple[0][0] / arcTuple[0][1] + (arcTuple[1][0] / arcTuple[1][1]) / 60 + (arcTuple[2][0] / arcTuple[2][1]) / 3600\n\n# floatから度分秒に変換\ndef floatToArcTuple(lat):\n deg = math.floor(lat)\n lat -= deg\n lat *= 60\n min = math.floor(lat)\n lat -= min\n lat *= 60 * 100\n sec = round(lat)\n return ((deg, 1), (min, 1), (sec, 100))\n\n# exifの緯度経度をfloatの緯度経度に変換\ndef gpsTuplesToFloat(gpsinfo):\n print(gpsinfo)\n print(type(gpsinfo[1]))\n if type(gpsinfo[1]) == bytes:\n gpsinfo[1] = gpsinfo[1].decode('utf-8')\n if type(gpsinfo[3]) == bytes:\n gpsinfo[3] = gpsinfo[3].decode('utf-8')\n\n ns = 1 if u'N' == gpsinfo[1] else -1\n ew = 1 if u'E' == gpsinfo[3] else -1\n\n return (ns * arcTupleToFloat(gpsinfo[2]), ew * arcTupleToFloat(gpsinfo[4]))\n\n# floatの緯度経度からexifの緯度経度に変換\ndef floatLatLngToGpsTuple(latlng):\n (lat, lng) = latlng\n\n nsSign = b'N'\n if lat < 0:\n nsSign = b'S'\n lat = -lat\n\n ewSign = b'E'\n if lng < 0:\n ewSign = b'W'\n lng = -lng\n\n return {1: nsSign, 2:floatToArcTuple(lat), 3:ewSign, 4:floatToArcTuple(lng), 18: b'WGS-84'}\n","sub_path":"exif_utils.py","file_name":"exif_utils.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"475076412","text":"\nimport pandas as pd\n#import nltk\nimport spacy\nfrom tqdm import tqdm\nimport re\nfrom utils.Utils import permanent_hash, append_to_git_root\nfrom utils.text_cleaner import TextCleaner\n\nimport os\n\n# saving/loading data\nimport pickle\n\nclass TrainingData:\n\n # @classmethod\n # def from_training_data(cls, data: TrainingData):\n # \"\"\"\n # Constructor using TrainingDtata.\n # \"\"\"\n # sentences = data.headlines['Clean Headline'].tolist() + data.sentences['Clean Sentence'].tolist()\n # return cls(sentences=sentences)\n\n saving_dir = append_to_git_root(what = \"training_data\", alternate_root=\".\")\n if not os.path.exists(saving_dir):\n print(\"Creating directory {} to save TrainingData instances\".format(saving_dir))\n os.mkdir(saving_dir)\n\n def file_name(self) -> str:\n d = os.path.join(\"training_data\",\"training_data_{}.pickle\".format(self.uid))\n return append_to_git_root(what = d, alternate_root=\".\")\n\n def load(self, verbose: bool = True) -> bool:\n file_name = self.file_name()\n try:\n if os.path.isfile(file_name):\n with open(file_name, 'rb') as f:\n pickle_data = pickle.load(f)\n self.stances = pickle_data['stances']\n self.headlines = pickle_data['headlines']\n self.bodies = pickle_data['bodies']\n self.sentences = pickle_data['sentences']\n del pickle_data # Free up memory\n if verbose:\n print(\"TrainingData :: loaded definitions from {}\".format(file_name))\n return True\n else:\n if verbose:\n print(\"TrainingData :: impossible to load from '{}', as file doesn't exist\".format(file_name))\n return False\n except Exception as e:\n if verbose:\n print(\"TrainingData :: impossible to load from {}: {}\".format(file_name, str(e)))\n return False\n\n\n def __init__(self, stances_file_name: str, bodies_file_name: str, \n replacements: dict, nlp=None, pos_to_remove: list=[],\n lemmatize=False, stem=False):\n \"\"\"\n \n Args:\n stances_file_name: \n bodies_file_name: \n replacements: \n nlp: \n pos_to_remove: \n lemmatize: \n stem: \n \"\"\"\n\n if nlp is None:\n self.nlp = spacy.load('en')\n else:\n self.nlp = nlp\n\n #\n self.replacements = replacements\n self.pos_to_remove = pos_to_remove\n self.cleaner = TextCleaner(self.replacements, self.nlp, self.pos_to_remove, \n lemmatize=lemmatize, stem=stem)\n\n\n\n self.uid = permanent_hash(stances_file_name + bodies_file_name) + self.cleaner.uid\n # do I have this data already processed somewhere?\n if not self.load(verbose=False):\n #\n # We load the datasets from the fake news challenge into pandas data frames.\n # The article bodies are split into their component sentences. The article bodies\n # datatest thus processed is merged with the headlines and stances dataset.\n self.stances = pd.read_csv(stances_file_name)\n\n # Assign IDs to all headlines even if they're duplicated (almost like row numbering)\n self.stances['Headline ID'] = range(self.stances.shape[0])\n\n # Assign UIDs to headlines\n u_headlines = list(self.stances['Headline'].unique())\n headline_to_uid = {h: i for i, h in enumerate(u_headlines)}\n self.stances['Headline UID'] = self.stances['Headline'].map(lambda x: headline_to_uid[x])\n\n # Create unique headlines dataframe\n self.headlines = self.stances[['Headline', 'Headline UID']].drop_duplicates()\n self.headlines['HTag'] = 'head_' + self.headlines['Headline UID'].astype(str)\n\n # Read article bodies data\n self.bodies = pd.read_csv(bodies_file_name)\n\n # Tokenize article bodies into sentences\n # And clean the sentences here\n print(\"TrainingData :: clean data\")\n sentences = []\n sentence_ids = []\n body_ids = []\n for row in tqdm(self.bodies.iterrows()):\n bid = row[1]['Body ID']\n text = row[1]['articleBody']\n doc = self.nlp(text)\n lines = [str(s) for s in doc.sents]\n sentences += lines\n sentence_ids += range(len(lines))\n body_ids += [bid] * len(lines)\n\n # Create sentences DataFrame\n print(\"TrainingData :: creating and cleaning sentences\")\n self.sentences = pd.DataFrame({'Sentence': sentences, 'Sentence ID': sentence_ids, 'Body ID': body_ids})\n self.sentences['STag'] = 'body_' + self.sentences['Body ID'].astype(str) + '_sent_' + self.sentences['Sentence ID'].astype(str)\n self.sentences['Clean Sentence'] = [self.cleaner.clean_text(s) \n for s in tqdm(self.sentences['Sentence'], total=self.sentences.shape[0])]\n \n # Create column of clean headlines in headlines\n print(\"TrainingData :: cleaning headlines\")\n self.headlines['Clean Headline'] = [self.cleaner.clean_text(s) \n for s in tqdm(self.headlines['Headline'], total=self.headlines.shape[0])]\n\n print(\"TrainingData initialized\")\n self.save()\n \n def save(self):\n \"\"\"\n Raises Exception on failure.\n Returns:\n\n \"\"\"\n file_name = self.file_name()\n print(\"TrainingData :: Saving data to pickle file '{}'...\".format(file_name))\n try:\n with open(file_name, 'wb') as pfile:\n pickle.dump(\n {\n 'stances': self.stances,\n 'headlines': self.headlines,\n 'bodies': self.bodies,\n 'sentences': self.sentences,\n },\n pfile, pickle.HIGHEST_PROTOCOL)\n except Exception as e:\n print('TrainingData :: Unable to save data:', e)\n raise\n\n print('TrainingData :: Data cached in pickle file.')\n","sub_path":"utils/doc2vec/training_data.py","file_name":"training_data.py","file_ext":"py","file_size_in_byte":6338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"569264867","text":"import os\nimport sys\n\nfrom academic_helper.utils.logger import log\n\nbranches = [\"asaf\", \"kirsh\", \" evyatar\", \"avihai\", \"shani\"]\n\n\ndef execute(cmd: str):\n log.info(cmd)\n os.system(cmd)\n\n\ndef clear_cache():\n execute(f\"rm -f .git/FETCH_HEAD\")\n\n\ndef for_branch(*commands: str):\n for branch in branches:\n clear_cache()\n execute(f\"git checkout {branch}\")\n clear_cache()\n for cmd in commands:\n cmd = cmd.replace(\"\", branch)\n execute(cmd)\n\n\ndef pull():\n for_branch(\"git pull\")\n\n\ndef push():\n for_branch(\"git push\")\n\n\ndef rebase(to: str):\n execute(f\"git checkout {to}\")\n execute(f\"git push\")\n pull()\n for_branch(f\"git rebase {to}\", \"git push\")\n\n\nif __name__ == \"__main__\":\n command = sys.argv[1]\n if command == \"pull\":\n pull()\n elif command == \"push\":\n push()\n elif command == \"rebase\":\n rebase(\"master\")\n execute(\"git checkout master\")\n","sub_path":"scripts/gits.py","file_name":"gits.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"382126085","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: C:\\Users\\SH\\AppData\\Local\\Temp\\pip-install-1sehz1ij\\PyAlgoTrade\\pyalgotrade\\bitstamp\\livebroker.py\n# Compiled at: 2018-10-21 21:07:45\n# Size of source mod 2**32: 12032 bytes\n\"\"\"\n.. moduleauthor:: Gabriel Martin Becedillas Ruiz \n\"\"\"\nimport threading, time\nfrom six.moves import queue\nfrom pyalgotrade import broker\nfrom pyalgotrade.bitstamp import httpclient\nfrom pyalgotrade.bitstamp import common\n\ndef build_order_from_open_order(openOrder, instrumentTraits):\n if openOrder.isBuy():\n action = broker.Order.Action.BUY\n else:\n if openOrder.isSell():\n action = broker.Order.Action.SELL\n else:\n raise Exception('Invalid order type')\n ret = broker.LimitOrder(action, common.btc_symbol, openOrder.getPrice(), openOrder.getAmount(), instrumentTraits)\n ret.setSubmitted(openOrder.getId(), openOrder.getDateTime())\n ret.setState(broker.Order.State.ACCEPTED)\n return ret\n\n\nclass TradeMonitor(threading.Thread):\n POLL_FREQUENCY = 2\n ON_USER_TRADE = 1\n\n def __init__(self, httpClient):\n super(TradeMonitor, self).__init__()\n self._TradeMonitor__lastTradeId = -1\n self._TradeMonitor__httpClient = httpClient\n self._TradeMonitor__queue = queue.Queue()\n self._TradeMonitor__stop = False\n\n def _getNewTrades(self):\n userTrades = self._TradeMonitor__httpClient.getUserTransactions(httpclient.HTTPClient.UserTransactionType.MARKET_TRADE)\n ret = [t for t in userTrades if t.getId() > self._TradeMonitor__lastTradeId]\n return sorted(ret, key=(lambda t: t.getId()))\n\n def getQueue(self):\n return self._TradeMonitor__queue\n\n def start(self):\n trades = self._getNewTrades()\n if len(trades):\n self._TradeMonitor__lastTradeId = trades[(-1)].getId()\n common.logger.info('Last trade found: %d' % self._TradeMonitor__lastTradeId)\n super(TradeMonitor, self).start()\n\n def run(self):\n while not self._TradeMonitor__stop:\n try:\n trades = self._getNewTrades()\n if len(trades):\n self._TradeMonitor__lastTradeId = trades[(-1)].getId()\n common.logger.info('%d new trade/s found' % len(trades))\n self._TradeMonitor__queue.put((TradeMonitor.ON_USER_TRADE, trades))\n except Exception as e:\n try:\n common.logger.critical('Error retrieving user transactions', exc_info=e)\n finally:\n e = None\n del e\n\n time.sleep(TradeMonitor.POLL_FREQUENCY)\n\n def stop(self):\n self._TradeMonitor__stop = True\n\n\nclass LiveBroker(broker.Broker):\n __doc__ = 'A Bitstamp live broker.\\n\\n :param clientId: Client id.\\n :type clientId: string.\\n :param key: API key.\\n :type key: string.\\n :param secret: API secret.\\n :type secret: string.\\n\\n\\n .. note::\\n * Only limit orders are supported.\\n * Orders are automatically set as **goodTillCanceled=True** and **allOrNone=False**.\\n * BUY_TO_COVER orders are mapped to BUY orders.\\n * SELL_SHORT orders are mapped to SELL orders.\\n * API access permissions should include:\\n\\n * Account balance\\n * Open orders\\n * Buy limit order\\n * User transactions\\n * Cancel order\\n * Sell limit order\\n '\n QUEUE_TIMEOUT = 0.01\n\n def __init__(self, clientId, key, secret):\n super(LiveBroker, self).__init__()\n self._LiveBroker__stop = False\n self._LiveBroker__httpClient = self.buildHTTPClient(clientId, key, secret)\n self._LiveBroker__tradeMonitor = TradeMonitor(self._LiveBroker__httpClient)\n self._LiveBroker__cash = 0\n self._LiveBroker__shares = {}\n self._LiveBroker__activeOrders = {}\n\n def _registerOrder(self, order):\n assert order.getId() not in self._LiveBroker__activeOrders\n assert order.getId() is not None\n self._LiveBroker__activeOrders[order.getId()] = order\n\n def _unregisterOrder(self, order):\n assert order.getId() in self._LiveBroker__activeOrders\n assert order.getId() is not None\n del self._LiveBroker__activeOrders[order.getId()]\n\n def buildHTTPClient(self, clientId, key, secret):\n return httpclient.HTTPClient(clientId, key, secret)\n\n def refreshAccountBalance(self):\n \"\"\"Refreshes cash and BTC balance.\"\"\"\n self._LiveBroker__stop = True\n common.logger.info('Retrieving account balance.')\n balance = self._LiveBroker__httpClient.getAccountBalance()\n self._LiveBroker__cash = round(balance.getUSDAvailable(), 2)\n common.logger.info('%s USD' % self._LiveBroker__cash)\n btc = balance.getBTCAvailable()\n if btc:\n self._LiveBroker__shares = {common.btc_symbol: btc}\n else:\n self._LiveBroker__shares = {}\n common.logger.info('%s BTC' % btc)\n self._LiveBroker__stop = False\n\n def refreshOpenOrders(self):\n self._LiveBroker__stop = True\n common.logger.info('Retrieving open orders.')\n openOrders = self._LiveBroker__httpClient.getOpenOrders()\n for openOrder in openOrders:\n self._registerOrder(build_order_from_open_order(openOrder, self.getInstrumentTraits(common.btc_symbol)))\n\n common.logger.info('%d open order/s found' % len(openOrders))\n self._LiveBroker__stop = False\n\n def _startTradeMonitor(self):\n self._LiveBroker__stop = True\n common.logger.info('Initializing trade monitor.')\n self._LiveBroker__tradeMonitor.start()\n self._LiveBroker__stop = False\n\n def _onUserTrades(self, trades):\n for trade in trades:\n order = self._LiveBroker__activeOrders.get(trade.getOrderId())\n if order is not None:\n fee = trade.getFee()\n fillPrice = trade.getBTCUSD()\n btcAmount = trade.getBTC()\n dateTime = trade.getDateTime()\n self.refreshAccountBalance()\n orderExecutionInfo = broker.OrderExecutionInfo(fillPrice, abs(btcAmount), fee, dateTime)\n order.addExecutionInfo(orderExecutionInfo)\n if not order.isActive():\n self._unregisterOrder(order)\n elif order.isFilled():\n eventType = broker.OrderEvent.Type.FILLED\n else:\n eventType = broker.OrderEvent.Type.PARTIALLY_FILLED\n self.notifyOrderEvent(broker.OrderEvent(order, eventType, orderExecutionInfo))\n else:\n common.logger.info('Trade %d refered to order %d that is not active' % (trade.getId(), trade.getOrderId()))\n\n def start(self):\n super(LiveBroker, self).start()\n self.refreshAccountBalance()\n self.refreshOpenOrders()\n self._startTradeMonitor()\n\n def stop(self):\n self._LiveBroker__stop = True\n common.logger.info('Shutting down trade monitor.')\n self._LiveBroker__tradeMonitor.stop()\n\n def join(self):\n if self._LiveBroker__tradeMonitor.isAlive():\n self._LiveBroker__tradeMonitor.join()\n\n def eof(self):\n return self._LiveBroker__stop\n\n def dispatch(self):\n ordersToProcess = list(self._LiveBroker__activeOrders.values())\n for order in ordersToProcess:\n if order.isSubmitted():\n order.switchState(broker.Order.State.ACCEPTED)\n self.notifyOrderEvent(broker.OrderEvent(order, broker.OrderEvent.Type.ACCEPTED, None))\n\n try:\n eventType, eventData = self._LiveBroker__tradeMonitor.getQueue().get(True, LiveBroker.QUEUE_TIMEOUT)\n if eventType == TradeMonitor.ON_USER_TRADE:\n self._onUserTrades(eventData)\n else:\n common.logger.error('Invalid event received to dispatch: %s - %s' % (eventType, eventData))\n except queue.Empty:\n pass\n\n def peekDateTime(self):\n pass\n\n def getCash(self, includeShort=True):\n return self._LiveBroker__cash\n\n def getInstrumentTraits(self, instrument):\n return common.BTCTraits()\n\n def getShares(self, instrument):\n return self._LiveBroker__shares.get(instrument, 0)\n\n def getPositions(self):\n return self._LiveBroker__shares\n\n def getActiveOrders(self, instrument=None):\n return list(self._LiveBroker__activeOrders.values())\n\n def submitOrder(self, order):\n if order.isInitial():\n order.setAllOrNone(False)\n order.setGoodTillCanceled(True)\n if order.isBuy():\n bitstampOrder = self._LiveBroker__httpClient.buyLimit(order.getLimitPrice(), order.getQuantity())\n else:\n bitstampOrder = self._LiveBroker__httpClient.sellLimit(order.getLimitPrice(), order.getQuantity())\n order.setSubmitted(bitstampOrder.getId(), bitstampOrder.getDateTime())\n self._registerOrder(order)\n order.switchState(broker.Order.State.SUBMITTED)\n else:\n raise Exception('The order was already processed')\n\n def createMarketOrder(self, action, instrument, quantity, onClose=False):\n raise Exception('Market orders are not supported')\n\n def createLimitOrder(self, action, instrument, limitPrice, quantity):\n if instrument != common.btc_symbol:\n raise Exception('Only BTC instrument is supported')\n elif action == broker.Order.Action.BUY_TO_COVER:\n action = broker.Order.Action.BUY\n else:\n if action == broker.Order.Action.SELL_SHORT:\n action = broker.Order.Action.SELL\n if action not in [broker.Order.Action.BUY, broker.Order.Action.SELL]:\n raise Exception('Only BUY/SELL orders are supported')\n instrumentTraits = self.getInstrumentTraits(instrument)\n limitPrice = round(limitPrice, 2)\n quantity = instrumentTraits.roundQuantity(quantity)\n return broker.LimitOrder(action, instrument, limitPrice, quantity, instrumentTraits)\n\n def createStopOrder(self, action, instrument, stopPrice, quantity):\n raise Exception('Stop orders are not supported')\n\n def createStopLimitOrder(self, action, instrument, stopPrice, limitPrice, quantity):\n raise Exception('Stop limit orders are not supported')\n\n def cancelOrder(self, order):\n activeOrder = self._LiveBroker__activeOrders.get(order.getId())\n if activeOrder is None:\n raise Exception('The order is not active anymore')\n if activeOrder.isFilled():\n raise Exception(\"Can't cancel order that has already been filled\")\n self._LiveBroker__httpClient.cancelOrder(order.getId())\n self._unregisterOrder(order)\n order.switchState(broker.Order.State.CANCELED)\n self.refreshAccountBalance()\n self.notifyOrderEvent(broker.OrderEvent(order, broker.OrderEvent.Type.CANCELED, 'User requested cancellation'))","sub_path":"pycfiles/quantlwsdk-0.0.11-py3-none-any/livebroker.cpython-37.py","file_name":"livebroker.cpython-37.py","file_ext":"py","file_size_in_byte":11224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"216106902","text":"from datetime import datetime\nfrom flask import current_app\nfrom sqlalchemy.exc import SQLAlchemyError\n\nfrom app.extensions import db\nfrom .include.user_info import User\n\n\nclass Base(db.Model):\n __abstract__ = True\n\n def save(self, commit=True):\n db.session.add(self)\n if commit:\n try:\n db.session.commit()\n except SQLAlchemyError as e:\n db.session.rollback()\n raise e\n\n\nclass AuditMixin(object):\n create_user = db.Column(db.String(60), nullable=False, default=User().get_user_username)\n create_timestamp = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n update_user = db.Column(db.String(60),\n nullable=False,\n default=User().get_user_username,\n onupdate=User().get_user_username)\n update_timestamp = db.Column(db.DateTime,\n nullable=False,\n default=datetime.utcnow,\n onupdate=datetime.utcnow)\n","sub_path":"services/document-manager/backend/app/utils/models_mixins.py","file_name":"models_mixins.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"520700605","text":"from __future__ import print_function, absolute_import\nimport os\nimport sys\nimport time\nimport datetime\nimport argparse\nimport os.path as osp\nimport numpy as np\nnp.random.seed(1)\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nfrom torch.optim import lr_scheduler\n\nfrom utils.lr_schedulers import WarmupMultiStepLR\nfrom utils.video_loader import VideoDataset\nimport utils.transforms as T\nimport models\nfrom models.losses import CrossEntropyLabelSmooth, TripletLoss, RegularLoss\nfrom utils.utils import AverageMeter, Logger, save_checkpoint\nfrom utils.eval_metrics import evaluate\nfrom utils.samplers import RandomIdentitySampler\n\nfrom data import data_manager\n\ndef main(args=None):\n torch.manual_seed(args.seed)\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\n # torch.cuda.set_device(0)\n use_gpu = torch.cuda.is_available()\n\n if args.use_cpu:use_gpu = False\n\n if not args.evaluate:\n sys.stdout = Logger(osp.join(args.save_dir, args.log_train))\n else:\n sys.stdout = Logger(osp.join(args.save_dir, args.log_test))\n print(\"==========\\nArgs:{}\\n==========\".format(args))\n\n if use_gpu:\n print(\"Currently using GPU {}\".format(args.gpu_devices))\n cudnn.benchmark = True\n torch.cuda.manual_seed_all(args.seed)\n else:\n print(\"Currently using CPU (GPU is highly recommended)\")\n\n print(\"Initializing dataset {}\".format(args.dataset))\n dataset = data_manager.init_dataset(name=args.dataset)\n\n transform_train = T.Compose([\n T.ToTensor(),\n T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n T.RandomErasing(),\n ])\n\n transform_train2 = T.Compose([\n T.Resize((args.height, args.width)),\n T.Random2DTranslation(args.height, args.width),\n ])\n\n transform_test = T.Compose([\n T.Resize((args.height, args.width)),\n T.ToTensor(),\n T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n\n pin_memory = False\n\n trainloader = DataLoader(\n VideoDataset(dataset.train, data_name=args.dataset, seq_len=args.seq_len, sample='random', transform=transform_train, transform2=transform_train2, type = \"train\"),\n sampler=RandomIdentitySampler(dataset.train, num_instances=args.num_instances),\n batch_size=args.train_batch, num_workers=args.workers,\n pin_memory=pin_memory, drop_last=True,\n )\n\n queryloader = DataLoader(\n VideoDataset(dataset.query, data_name=args.dataset, seq_len=args.seq_len, sample='dense', transform=transform_test, type=\"test\"),\n batch_size=args.test_batch, shuffle=False, num_workers=args.workers,\n pin_memory=pin_memory, drop_last=False,\n )\n\n galleryloader = DataLoader(\n VideoDataset(dataset.gallery, data_name=args.dataset, seq_len=args.seq_len, sample='dense', transform=transform_test, type=\"test\"),\n batch_size=args.test_batch, shuffle=False, num_workers=args.workers,\n pin_memory=pin_memory, drop_last=False,\n )\n\n print(\"Initializing models: {}\".format(args.arch))\n model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, final_dim = args.feat_dim)\n print(\"Model size: {:.5f}M\".format(sum(p.numel() for p in model.parameters())/1000000.0))\n\n crossEntropyLoss = CrossEntropyLabelSmooth(num_classes=dataset.num_train_pids, use_gpu=use_gpu)\n tripletLoss = TripletLoss(margin=args.margin)\n regularLoss = RegularLoss(use_gpu=use_gpu)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n scheduler = WarmupMultiStepLR(optimizer, args.stepsize, args.gamma, args.warmup_factor, args.warmup_items, args.warmup_method)\n start_epoch = args.start_epoch\n\n if use_gpu:\n model = nn.DataParallel(model).cuda()\n\n if args.evaluate:\n print(\"Evaluate only\")\n atest(model, queryloader, galleryloader, use_gpu)\n return\n\n start_time = time.time()\n best_rank1 = -np.inf\n for epoch in range(start_epoch, args.max_epoch):\n print(\"==> Epoch {}/{}\".format(epoch+1, args.max_epoch))\n\n train(model, crossEntropyLoss, tripletLoss, regularLoss, optimizer, trainloader, use_gpu)\n\n # if args.stepsize > 0:\n scheduler.step()\n\n if (epoch+1) >= 200 and (epoch+1) % args.eval_step == 0:\n print(\"==> Test\")\n rank1 = atest(model, queryloader, galleryloader, use_gpu)\n is_best = rank1 > best_rank1\n if is_best: best_rank1 = rank1\n\n if use_gpu:\n state_dict = model.module.state_dict()\n else:\n state_dict = model.state_dict()\n save_checkpoint({\n 'state_dict': state_dict,\n }, is_best, osp.join(args.save_dir, args.model_name + str(epoch+1) + '.pth.tar'))\n\n elapsed = round(time.time() - start_time)\n elapsed = str(datetime.timedelta(seconds=elapsed))\n print(\"Finished. Total elapsed time (h:m:s): {}\".format(elapsed))\n\ndef train(model, crossEntropyLoss, tripletLoss, regularLoss, optimizer, trainloader, use_gpu):\n model.train()\n losses = AverageMeter()\n print('batch number:' + str(len(trainloader)))\n for batch_idx, (imgs, pids, _, head_map, body_map, leg_map) in enumerate(trainloader):\n # print(img_align)\n if use_gpu:\n imgs, pids = imgs.cuda(), pids.cuda()\n imgs, pids = Variable(imgs), Variable(pids)\n global_label, global_f, region1_label, region1_f, region2_label, region2_f, region3_label, region3_f, \\\n align_output1, align_output2, align_output3, align_output4, align_output5, align_output6\\\n = model(imgs, head_map, body_map, leg_map)\n loss = calculate_loss(global_label, global_f, region1_label, region1_f, region2_label, region2_f, region3_label, region3_f,\\\n align_output1, align_output2, align_output3, align_output4, align_output5, align_output6,\\\n crossEntropyLoss, tripletLoss, regularLoss, pids)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n losses.update(loss.item(), pids.size(0))\n\n if (batch_idx+1) % args.print_freq == 0:\n print(\"Batch {}/{}\\t Loss1 {:.6f} ({:.6f})\".format(batch_idx+1, len(trainloader), losses.val, losses.avg))\n\ndef calculate_loss(global_label, global_f, region1_label, region1_f, region2_label, region2_f, region3_label, region3_f,\\\n align_output1, align_output2, align_output3, align_output4, align_output5, align_output6,\\\n crossEntropyLoss, tripletLoss, regularLoss, pids):\n # global branch\n global_cross_loss = crossEntropyLoss(global_label, pids)\n global_trip_loss, _, _ = tripletLoss(global_f, pids)\n global_loss = global_cross_loss + global_trip_loss\n # align branch\n region1_cross_loss = crossEntropyLoss(region1_label, pids)\n region1_trip_loss, _, _ = tripletLoss(region1_f, pids)\n region2_cross_loss = crossEntropyLoss(region2_label, pids)\n region2_trip_loss, _, _ = tripletLoss(region2_f, pids)\n region3_cross_loss = crossEntropyLoss(region3_label, pids)\n region3_trip_loss, _, _ = tripletLoss(region3_f, pids)\n region_loss = region1_cross_loss + region1_trip_loss + region2_cross_loss + region2_trip_loss + region3_cross_loss + region3_trip_loss\n # consistency Regularization\n regular_loss1 = regularLoss(align_output1)\n regular_loss2 = regularLoss(align_output2)\n regular_loss3 = regularLoss(align_output3)\n regular_loss4 = regularLoss(align_output4)\n regular_loss5 = regularLoss(align_output5)\n regular_loss6 = regularLoss(align_output6)\n regular_loss = regular_loss1 + regular_loss2 + regular_loss3 + regular_loss4 + regular_loss5 + regular_loss6\n # print(\"global loss: \" + str(global_loss.item()) + \"; part loss: \" + str(part_loss.item()) + \"; region loss: \" + str(region_loss.item()) + \"; regular loss: \" + str(regular_loss.item()))\n loss = args.a1 * global_loss + args.a2 * region_loss + args.a3 * regular_loss\n return loss\n\ndef atest(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):\n with torch.no_grad():\n model.eval()\n qf, q_pids, q_camids = [], [], []\n for batch_idx, (imgs, pids, camids, head_map, body_map, leg_map) in enumerate(queryloader):\n if batch_idx % 100 == 0:\n print(\"current query:\" + str(batch_idx))\n if use_gpu:\n imgs = imgs.cuda()\n b, n, s, c, h, w = imgs.size()\n assert(b==1)\n imgs = imgs.view(b*n, s, c, h, w)\n features = model(imgs, head_map, body_map, leg_map)\n features = features.view(n, -1)\n features = torch.mean(features, 0)\n features = features.data.cpu()\n qf.append(features)\n q_pids.extend(pids)\n q_camids.extend(camids)\n\n qf = torch.stack(qf)\n q_pids = np.asarray(q_pids)\n q_camids = np.asarray(q_camids)\n\n print(\"Extracted features for query set, obtained {}-by-{} matrix\".format(qf.size(0), qf.size(1)))\n\n gf, g_pids, g_camids = [], [], []\n print('gallery num:' + str(len(galleryloader)))\n\n for batch_idx, (imgs, pids, camids, head_map, body_map, leg_map) in enumerate(galleryloader):\n if batch_idx % 100 == 0:\n print(\"current gallery:\" + str(batch_idx))\n if use_gpu:\n imgs = imgs.cuda()\n # imgs = Variable(imgs, volatile=True)\n b, n, s, c, h, w = imgs.size()\n imgs = imgs.view(b*n, s , c, h, w)\n assert(b==1)\n features = model(imgs, head_map, body_map, leg_map)\n features = features.view(n, -1)\n features = torch.mean(features, 0)\n features = features.data.cpu()\n gf.append(features)\n g_pids.extend(pids)\n g_camids.extend(camids)\n gf = torch.stack(gf)\n g_pids = np.asarray(g_pids)\n g_camids = np.asarray(g_camids)\n\n print(\"Extracted features for gallery set, obtained {}-by-{} matrix\".format(gf.size(0), gf.size(1)))\n print(\"Computing distance matrix\")\n\n m, n = qf.size(0), gf.size(0)\n distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \\\n torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()\n distmat.addmm_(1, -2, qf, gf.t())\n distmat = distmat.numpy()\n\n print(\"Computing CMC and mAP\")\n cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)\n\n print(\"Results ----------\")\n print(\"mAP: {:.1%}\".format(mAP))\n print(\"CMC curve\")\n for r in ranks:\n print(\"Rank-{:<3}: {:.1%}\".format(r, cmc[r-1]))\n print(\"------------------\")\n\n return cmc[0]\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Train video models with cross entropy loss')\n # Datasets\n parser.add_argument('-d', '--dataset', type=str, default='mars',\n choices=data_manager.get_names())\n parser.add_argument('-j', '--workers', default=4, type=int,\n help=\"number of data loading workers (default: 4)\")\n parser.add_argument('--height', type=int, default=256,\n help=\"height of an image (default: 224)\")\n parser.add_argument('--width', type=int, default=128,\n help=\"width of an image (default: 112)\")\n parser.add_argument('--seq-len', type=int, default=4, help=\"number of images to sample in a tracklet\")\n # Optimization options\n parser.add_argument('--max-epoch', default=400, type=int,\n help=\"maximum epochs to run\")\n parser.add_argument('--start-epoch', default=0, type=int,\n help=\"manual epoch number (useful on restarts)\")\n parser.add_argument('--train-batch', default=32, type=int,\n help=\"train batch size\")\n parser.add_argument('--test-batch', default=1, type=int, help=\"has to be 1\")\n parser.add_argument('--lr', '--learning-rate', default=0.00035, type=float,\n help=\"initial learning rate, use 0.0001 for rnn, use 0.0003 for pooling and attention\")\n parser.add_argument('--stepsize', default=(100, 200, 300), type=tuple,\n help=\"stepsize to decay learning rate (>0 means this is enabled)\")\n parser.add_argument('--gamma', default=0.1, type=float,\n help=\"learning rate decay\")\n parser.add_argument('--weight-decay', default=5e-04, type=float,\n help=\"weight decay (default: 5e-04)\")\n parser.add_argument('--margin', type=float, default=0.5, help=\"margin for triplet loss\")\n parser.add_argument('--num-instances', type=int, default=4,\n help=\"number of instances per identity\")\n parser.add_argument('--print-freq', type=int, default=10, help=\"print frequency\")\n parser.add_argument('--seed', type=int, default=1, help=\"manual seed\")\n parser.add_argument('--evaluate', default=False, action='store_true', help=\"evaluation only\")\n parser.add_argument('--eval-step', type=int, default=10,\n help=\"run evaluation for every N epochs (set to -1 to test after training)\")\n parser.add_argument('--save-dir', type=str, default='log/final_log')\n parser.add_argument('--use-cpu', action='store_true', help=\"use cpu\")\n parser.add_argument('--gpu-devices', default='4', type=str, help='gpu device ids for CUDA_VISIBLE_DEVICES')\n parser.add_argument('--arch', default='Net', type=str, help='models name')\n parser.add_argument('--warmup_factor', default=0.01, type=float, help='warmup factor')\n parser.add_argument('--warmup_items', default=10, type=int, help='warmup items')\n parser.add_argument('--warmup_method', default='linear', type=str, help='warmup method')\n parser.add_argument('--log_train', default='log_train.txt', type=str, help='train log file name')\n parser.add_argument('--log_test', default='log_test.txt', type=str, help='test log file name')\n parser.add_argument('--model_name', default='checkpoint_ep', type=str, help='model file name')\n parser.add_argument('--feat_dim', default=1024, type=int, help='feature dim is feat_dim x 4')\n parser.add_argument('--a1', default=1, type=int, help='global loss weight')\n parser.add_argument('--a2', default=1, type=int, help='region loss weight')\n parser.add_argument('--a3', default=0.0003, type=int, help='regular loss weight')\n args = parser.parse_args()\n main(args)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":14665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"609507985","text":"# @author Huaze Shen\n# @date 2020-01-25\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\ndef is_balanced(root):\n if root is None:\n return True\n return is_balanced(root.left) and is_balanced(root.right) and abs(depth(root.left) - depth(root.right)) <= 1\n\n\ndef depth(root):\n if root is None:\n return 0\n return max(depth(root.left), depth(root.right)) + 1\n\n\nif __name__ == '__main__':\n root_ = TreeNode(3)\n root_.left = TreeNode(9)\n root_.right = TreeNode(20)\n root_.right.left = TreeNode(15)\n root_.right.right = TreeNode(7)\n print(is_balanced(root_))\n","sub_path":"python/balanced_binary_tree.py","file_name":"balanced_binary_tree.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"426921585","text":"import json\nclass DelStu():\n def __init__(self, socket_client):\n self.socket_client = socket_client\n\n def execute(self):\n try:\n print(\"刪除學生\")\n name = str(input(\" Please input a student's name: \"))\n has_item = False\n student_dict = dict()\n query_student_dict = dict()\n query_student_dict[name] = {}\n\n #先查詢query是否存在此學生名稱\n self.socket_client.send_command(\"query\", query_student_dict)\n print(\"\\nclient send data to server => \\'command\\':{}, \\'parameters\\':{}\".format(\"query\", query_student_dict))\n\n boolean, result = self.socket_client.wait_response()\n result = json.loads(result) # convert dictionary string to dictionary\n if(result['status'] == \"Fail\"):\n print(\"\\nThe name {} is not found\".format(name))\n has_item = False\n success = False\n else:\n student_dict = result['parameters'] #讀取原始所有學生資料\n del student_dict[name]\n has_item = True\n success = True\n\n \n except Exception as e: #若try有錯誤,則執行except\n print(\"The exception {} occurs.\".format(e))\n success = False\n finally: #不管try有沒有錯誤,最後一定會執行final\n if(success == True):\n #=========================== socket_client 傳送指令和資料給server==================\n self.socket_client.send_command(\"del\", student_dict)\n print(\"\\nclient send data to server => \\'command\\':{}, \\'parameters\\':{}\".format(\"del\", student_dict))\n\n boolean, result = self.socket_client.wait_response()\n result = json.loads(result) # convert dictionary string to dictionary\n\n if result['status'] == \"OK\":\n print(\" delete {} success\".format(name))\n print(\"刪除成功\")\n else:\n print(\" delete {} fail\".format(name))\n else:\n print(\"刪除失敗\")\n print(\"Execution result is {}\".format(success))\n return student_dict","sub_path":"pythone_classPractice/week9(inheritance_socket_FullVersion_Optimize)/Week9_Quiz_106360101謝尚泓/Client/DelStu.py","file_name":"DelStu.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"447098668","text":"#!/usr/bin/env python\n\"\"\"\nCompare the TW Hya results with a drift limited distribution\n\"\"\"\nfrom numpy import * #@UnusedWildImport\nfrom constants import * #@UnusedWildImport\nfrom uTILities import dlydlx\nfrom matplotlib.pyplot import * #@UnusedWildImport\nfrom matplotlib.mlab import find\nimport h5py\nimport os\n\nfiles = [\n# '~/Desktop/new_results/sean/deadzone-paper/active_iter1/data_iter1_fixeddisk_AL22_M20_161.mat',\n '~/Desktop/new_results/sean/deadzone-paper/active_iter1/data_iter1_fixeddisk_AL33_M20_161.mat',\n '~/Desktop/new_results/sean/deadzone-paper/active_iter1/data_iter1_fixeddisk_AL44_M20_161.mat',\n '~/Desktop/new_results/sean/deadzone-paper/active_iter1/data_iter1_fixeddisk_AL55_M20_161.mat'\n ]\nTIME = 5e6*year\n#\n# loop through the files and get the necessary data\n#\nSD = []\nR = []\nA = []\nAF = []\nAD = []\nAM = []\nMX = -Inf\nfor filename in files:\n #\n # load the matlab file\n #\n f = h5py.File(os.path.expanduser(filename),'r')\n sigma_g = f['sigma_g_1'][...].transpose()\n sigma_d_1 = f['sigma_d_1'][...].transpose()\n alpha = f['alpha_1'][...].transpose()\n T = f['T_1'][...].transpose()\n v_dust = f['v_dust_1'][...].transpose()\n timesteps = f['timesteps_1'][...].transpose()\n x = f['x_1'][...].flatten()\n a = f['grainsizes_1'][...].flatten()\n m = f['m_grid_1'][...].flatten()\n V_FRAG = f['V_FRAG'][...].flatten()\n T_COAG_START = f['T_COAG_START'][...].flatten()\n RHO_S = f['RHO_S'][...].flatten()\n m_star = f['m_star_1'][...].flatten()\n grains = size(a)\n a_0 = a[0]\n n_r = size(x)\n it = find(timesteps>=TIME)[0]\n E_drift = 1.\n #\n # calculate the size limits\n #\n sigma_d = sum(sigma_d_1[it*grains+arange(grains),:],0)\n fudge_fr = 0.37\n fudge_dr = 0.55\n gamma = dlydlx(x,sigma_g[it])+0.5*dlydlx(x,T[it])-1.5\n a_fr = fudge_fr*2*sigma_g[it,:]*V_FRAG**2./(3*pi*alpha[it]*RHO_S*k_b*T[it]/mu/m_p)\n a_dr = fudge_dr/E_drift*2/pi*sigma_d/RHO_S*x**2.*(Grav*m_star[it]/x**3)/(abs(gamma)*(k_b*T[it]/mu/m_p))\n N = 0.5\n a_df = fudge_fr*2*sigma_g[it]/(RHO_S*pi)*V_FRAG*sqrt(Grav*m_star[it]/x)/(abs(gamma)*k_b*T[it]/mu/m_p*(1.-N))\n mask = a_dr None:\n self.patientInfoRegex = {\n \"Name\": re.compile(\"Patient:\\s*(.*)\\s*\"),\n \"Birth\": re.compile(\"Date of Birth:\\s*(.*)\\s*\"),\n \"Gender\": re.compile(\"Gender:\\s*(.*)\"),\n \"ID\": re.compile(\"Patient ID:\\s*(.*)\"),\n #\"FIXMON\": re.compile(\"Fixation Monitor: (.*)\"),\n #\"FIXTAR\": re.compile(\"Fixation Target: (.*)\"),\n }\n self.resultInfoRegex = {\n \"GHT\": re.compile(\"GHT.*:(.*)\"),\n \"VFI\": re.compile(\"VFI.*:(.*)\"),\n \"MD\": re.compile(\"MD.*:(.*)dB\"),\n \"MDp\": re.compile(\"MD.*(P\\s*<\\s*.*%)\"),\n \"PSD\": re.compile(\"PSD.*:(.*)dB\"),\n \"PSDp\": re.compile(\"PSD.*(P\\s*<\\s*.*%)\"),\n }\n self.patternRegex = re.compile(\"24-2|30-2|10-2\")\n self.fieldLocations = {\n \"FIXLOS\": (400, 465, 650, 495),\n \"FPR\": (400, 495, 650, 525),\n \"Duration\": (400, 550, 650, 585),\n \"FNR\": (400, 525, 650, 555),\n \"Fovea\": (400, 590, 650, 620),\n \"Stimulus\": (1000, 405, 1250, 435),\n \"Date\": (1380, 405, 1650, 435),\n \"Background\": (1000, 435, 1250, 465),\n \"Time\": (1380, 435, 1650, 465),\n \"Strategy\": (1000, 465, 1250, 495),\n }\n self.ageLocation = (1380, 465, 1430, 495)\n self.values = {}\n self.numdB_pattern = re.compile('FIELD\\s*(\\S*)\\s*FIELD')\n self.numdB_aux = Image.open(\"numdB_aux.png\")\n def pdf_to_img(self, pdf_file):\n \"\"\"Converts a .pdf file to an Image\n\n Args:\n pdf_file (str): The path to the file\n\n Returns:\n List: The pages of the pdf file\n \"\"\"\n return pdf2image.convert_from_path(pdf_file, poppler_path= Constants.POPPLER_PATH, use_cropbox= False)\n\n def ocr_num(self, image):\n \"\"\"Extracts the text from a preprocessed image of a numeric dB graph value.\n\n Due to the limitations of Pytesseract, different configurations were needed for short numerical values.\n\n Args:\n image (Image): The input image\n\n Returns:\n str: The text inside the image\n \"\"\"\n text = pytesseract.image_to_string(image, config= \"--psm 7 -c tessedit_char_whitelist=FIELD-<0123456789\")\n return text\n\n def ocr_core(self, file):\n \"\"\"Extracts the text from an image.\n\n Args:\n image (Image): The input image\n\n Returns:\n str: The text inside the image\n \"\"\"\n text = pytesseract.image_to_string(file)\n return text\n\n def readImage(self, dir, filename):\n \"\"\"Extracts the information from an VFT report\n\n We assumed that all information fields will be in the same relative location for all reports.\n The method first resizes the image to the same size as our test data. Secondly, several parts\n of the image are cropped and passed to the OCR engine to extract the information. Finally, the \n information are combined into a VFTReport object, which will be returned by the method.\n\n Note:\n Short numeric values, such as the patient's age, and values in numeric dB graphs are treated\n differently than normal strings. See readNum for how we processes these values\n\n Args:\n dir (str): The directory leading to the file.\n filename (str): The name of the file.\n\n Returns:\n VFTReport: The VFT report in the image.\n \"\"\"\n filepath = os.path.join(dir, filename)\n if filename.endswith(\".Pdf\") or filename.endswith(\".pdf\"):\n images = self.pdf_to_img(filepath)\n img = images[0]\n else:\n img = Image.open(filepath)\n #Resize image\n img =img.resize((1655, 2340))\n arr = np.array(img)\n\n #Crop regions of interests\n sensGraph = img.crop((361, 622, 852, 1115))\n MDGraph = img.crop((190, 1090, 518, 1416))\n PSDGraph = img.crop((700, 1090, 1028, 1416))\n resultInfo = img.crop((1100, 1250, 1600, 1550))\n eyeLabel = img.crop((200, 320, 255, 380))\n patientInfo = img.crop((0, 0, 910, 230)) \n patternInfo = img.crop((910, 320, 1650, 380))\n \n #Extract text from cropped regions\n self.values[\"Pattern\"] = self.patternRegex.search(self.ocr_core(patternInfo)).group(0)\n patientInfoText = self.ocr_core(patientInfo)\n resultInfoText = self.ocr_core(resultInfo)\n print(patientInfoText)\n print(resultInfoText)\n \n #Extract information from raw text using regular expressions\n for k, v in self.patientInfoRegex.items():\n try:\n self.values[k] = v.search(patientInfoText).group(1).strip()\n except AttributeError:\n self.values[k] = \"\"\n for k, v in self.resultInfoRegex.items():\n try:\n self.values[k] = v.search(resultInfoText).group(1).strip()\n except AttributeError:\n self.values[k] = \"\"\n for k, v in self.fieldLocations.items(): \n fieldImg = img.crop(v)\n self.values[k] = self.ocr_core(fieldImg).strip()\n\n self.values[\"Age\"] = self.readNum(img.crop(self.ageLocation))\n\n eyeLabel = eyeLabel.convert(\"1\")\n eye = self.ocr_core(eyeLabel)\n if \"os\" in eye.lower():\n eye = \"Left\"\n elif \"od\" in eye.lower():\n eye= \"Right\"\n else:\n eye = \"\"\n \n sensGraph_arr = np.array(sensGraph)\n MDGraph_arr = np.array(MDGraph) \n PSDGraph_arr = np.array(PSDGraph)\n\n #FIXLOS and FIXTST gets special treatment due to FIXTST needing to be interpreted from Fixation Losses\n try:\n FIXLOS, FIXTST = self.values[\"FIXLOS\"].split(\"/\")\n except ValueError:\n FIXLOS = self.values[\"FIXLOS\"]\n FIXTST = \"\"\n return VFTReport(filename,self.values[\"Name\"], eye, self.values[\"Date\"] + \" \" + self.values[\"Time\"], self.values[\"Age\"], self.values[\"Birth\"], self.values[\"ID\"], FIXLOS, FIXTST, self.values[\"FNR\"], self.values[\"FPR\"], self.values[\"Duration\"],self.values[\"GHT\"], self.values[\"VFI\"], self.values[\"MD\"], self.values[\"MDp\"], self.values[\"PSD\"], self.values[\"PSDp\"], self.values[\"Pattern\"], self.values[\"Strategy\"], self.values[\"Stimulus\"], self.values[\"Background\"], self.values[\"Fovea\"] ,self.image2data(sensGraph_arr), self.image2data(MDGraph_arr), self.image2data(PSDGraph_arr), 0)\n\n def image2data(self, image:np.array):\n \"\"\"Converts an image of a numeric dB graph to its representation inside the application.\n\n During our development, we noticed that the accuracy of the OCR engine was very poor\n for the numeric dB graphs. Therefore special treatment was needed to enhance the accuracy.\n\n The method first remove the axis from the graphs. Then it splits the image to a 10 x 10 grid.\n Then for each cell in the grid, the number in the cell is read.\n\n Args:\n image (np.array): The image of the numeric dB graph\n\n Returns:\n List[List[str]]: A 10 x 10 matrix containing the values\n \"\"\"\n #Crops the axis from the image\n image[(floor(image.shape[1]/2)-6):(floor(image.shape[1]/2)+6),:] = 255\n image[:, (floor(image.shape[0]/2)-6):(floor(image.shape[0]/2)+6)] = 255\n \n index = 0\n #Initialize an empty array\n arr = [[0 for i in range(10)] for j in range(10)]\n\n for i in range(10):\n for j in range(10):\n #Crops a region in the graph\n val_img = Image.fromarray(image[floor(i * image.shape[0] / 10):floor((i+1) * image.shape[0] / 10),floor(j * image.shape[1] / 10):floor((j+1) * image.shape[1] / 10)])\n arr[i][j] = self.readNum(val_img)\n for i in arr:\n print(i)\n print(\"\\n\"*3)\n\n return arr\n def readNum(self, numImg):\n \"\"\"Reads a short number from an image\n\n From our experimentation, we found out that Pytesseract performs very poorly on short numerical strings,\n especially negative numbers. The main idea of this method was that Pytesseract performs better on longer strings.\n\n The method first combine the image of the number with the auxiliary image, located in numdB_aux.png. Then \n the combined image is passed to the OCR engine to extract the text. Finally, the number is filtered from \n the text using regular expressions\n\n Args:\n numImg (Image): The image of a number\n\n Returns:\n str: The number inside the image\n \"\"\"\n numImg.resize((50, 50))\n images = [self.numdB_aux.copy(), numImg, self.numdB_aux.copy()]\n widths, heights = zip(*(i.size for i in images))\n\n total_width = sum(widths)\n max_height = max(heights)\n new_im = Image.new('RGB', (total_width, max_height), color=\"white\")\n \n x_offset = 0\n for im in images:\n y_offset = floor((50 - im.height)/2)\n new_im.paste(im, (x_offset,y_offset))\n x_offset += im.size[0]\n\n match = self.numdB_pattern.search(self.ocr_num(new_im))\n if match and match.group(1) != \"-\":\n num = match.group(1)\n else:\n num = \"\"\n return num\n\n def grid(self, image):\n \"\"\"Debugging tool. Shows a 10 x 10 grid on an image\n\n Args:\n image (Any): The input image\n \"\"\"\n for i in range(10):\n image[floor(image.shape[1]/10 * i) ,:] = 0\n image[:,floor(image.shape[0]/10 * i)] = 0\n \n Image.fromarray(image).show()","sub_path":"reader/HFAv3Reader.py","file_name":"HFAv3Reader.py","file_ext":"py","file_size_in_byte":10575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"471504800","text":"import pytest\n\nfrom easydata import parsers\nfrom easydata.queries import key, pq\nfrom tests.factory import data_dict, data_text\n\n\n@pytest.mark.parametrize(\n \"query, test_data, result\",\n [\n (key(\"stock\"), data_dict.stock, True),\n (key(\"stock2\"), data_dict.stock, False),\n ],\n)\ndef test_bool(query, test_data, result):\n bool_parser = parsers.Bool(query)\n assert bool_parser.parse(test_data) is result\n\n\n@pytest.mark.parametrize(\n \"test_data, result\",\n [\n (123, True),\n (123.45, True),\n (0.15, True),\n (-0.15, True),\n (0, False),\n (\"True\", True),\n (\"False\", False),\n (\"true\", True),\n (\"false\", False),\n ],\n)\ndef test_bool_various_types(test_data, result):\n assert parsers.Bool().parse(test_data) is result\n\n\n@pytest.mark.parametrize(\n \"test_data, result\",\n [\n (123, False),\n (123.45, False),\n (0.15, False),\n (-0.15, False),\n (0, True),\n (\"True\", False),\n (\"False\", True),\n (\"true\", False),\n (\"false\", True),\n ],\n)\ndef test_ibool_various_types(test_data, result):\n assert parsers.IBool().parse(test_data) is result\n\n\n@pytest.mark.parametrize(\n \"contains_keys, test_data, result\",\n [\n ([\"pro 13\"], data_text.title, True),\n ([\"something\", \"pro 13\"], data_text.title, True),\n ([\"pros 13\"], data_text.title, False),\n ],\n)\ndef test_bool_contains(contains_keys, test_data, result):\n bool_parser = parsers.Bool(contains=contains_keys)\n assert bool_parser.parse(test_data) is result\n\n\n@pytest.mark.parametrize(\n \"ccontains_keys, test_data, result\",\n [\n ([\"Pro 13\"], data_text.title, True),\n ([\"something\", \"Pro 13\"], data_text.title, True),\n ([\"pro 13\"], data_text.title, False),\n ],\n)\ndef test_bool_contains_case(ccontains_keys, test_data, result):\n bool_parser = parsers.Bool(ccontains=ccontains_keys)\n assert bool_parser.parse(test_data) is result\n\n\n@pytest.mark.parametrize(\n \"query, contains_query, test_data, result\",\n [\n (pq(\"#full-name::text\"), pq(\".brand::text\"), \"Easybook Pro 13\", False),\n (pq(\"#full-name::text\"), pq(\".brand::text-items\"), \"Easybook Pro 13\", False),\n ],\n)\ndef test_bool_contains_query(query, contains_query, test_data, result):\n bool_parser = parsers.Bool(query, contains_query=contains_query)\n assert bool_parser.parse(test_data) is result\n","sub_path":"tests/parsers/test_bool.py","file_name":"test_bool.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"136916590","text":"# -*- encoding: utf-8 -*-\n\"\"\"\nCopyright (c) 2019 - present AppSeed.us\n\"\"\"\nfrom flask import Flask\nfrom flask import render_template, redirect, url_for, request,jsonify\nfrom jinja2 import TemplateNotFound\nfrom google.cloud import bigquery\nimport json\nimport pandas as pd\nimport sys\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nimport plotly\n\napp = Flask(__name__)\n\ndef waste_table():\n unitdict = {\"lb\":1, \"cup\":0.53,\"tbsp\":0.03,\"tsp\":0.013,\"oz\":0.06}\n client = bigquery.Client()\n demand = (\n 'SELECT * FROM `food-waste-329921.Foodwaste.Demand`;'\n )\n inventory = (\n 'SELECT * FROM `food-waste-329921.Foodwaste.Inventory`;'\n )\n predicted = (\n 'SELECT * FROM `food-waste-329921.Foodwaste.Predicted`;'\n )\n recipe = (\n 'SELECT * FROM `food-waste-329921.Foodwaste.Recipe`;'\n )\n\n Demand = client.query(demand).to_dataframe().sort_values(by=['Date', 'Recipe'])\n Inventory = client.query(inventory).to_dataframe()\n Predicted = client.query(predicted).to_dataframe().sort_values(by=['Date', 'ITEM'])\n Recipe = client.query(recipe).to_dataframe()\n diff=Predicted['Predicted_Servings']-Demand['Quantity']\n leftoverdf=Demand.copy()\n leftoverdf['Quantity']=diff\n qtyleft_dfs=[]\n\n for i in range(len(leftoverdf)):\n recipe_i = leftoverdf.Recipe[i] \n recip=Recipe[Recipe['Recipe_Name']==recipe_i]\n divid=leftoverdf['Quantity'][i]/recip['Servings'].unique()[0]\n df_i = pd.DataFrame(data={\"ingredient\":recip['Ingredients'],\"qtywasted\":divid,\"unit\":recip['Unit']})\n qtyleft_dfs.append(df_i)\n dfwasted = pd.concat(qtyleft_dfs,ignore_index=True).sort_values(by=['ingredient'])\n dfwasted1=dfwasted.groupby('ingredient')['qtywasted'].sum().reset_index()\n dfwasted1['ingredient']=dfwasted1['ingredient'].str.lower()\n dfwasted1['unit']=dfwasted['unit']\n dfwasted1['qtywasted_converted'] = [dfwasted1['qtywasted'][i]*unitdict[dfwasted1['unit'][i]] for i in range(len(dfwasted1))]\n moneywasted=[]\n percentage=[]\n for i in range(len(dfwasted1)):\n #print(dfwasted1['ingredient'][i])\n tyu=Inventory[Inventory['Product']==dfwasted1['ingredient'][i]]\n moneywasted.append(float(dfwasted1['qtywasted_converted'][i]*tyu['Unit_Price__']))\n percentage.append(float((dfwasted1['qtywasted_converted'][i]*100)/tyu['Count_No']))\n moneyinvent=pd.DataFrame(data={'ingredient':dfwasted1.ingredient,'qtywasted':dfwasted1.qtywasted_converted,'moneywasted':moneywasted,'percentage':percentage})\n moneyinvent = moneyinvent.sort_values(by=[\"percentage\"])\n moneyinvent = moneyinvent.round(2)\n return moneyinvent\n\ndef plot():\n df = waste_table()\n df.sort_values(by=['percentage'], inplace=True,ascending=False)\n df['percentage']= df['percentage'].astype(float)\n df['unused'] = 100 -df['percentage']\n\n labels = [\"Wasted\",\"Used\"]\n # Create subplots: use 'domain' type for Pie subplot\n fig = make_subplots(rows=2, cols=3, specs=[[{'type':'domain'},{'type':'domain'},{'type':'domain'}],[{'type':'domain'},{'type':'domain'},{'type':'domain'}]])\n value = [[float(df['percentage'][i]),float(df['unused'][i])] for i in range(6)]\n\n for i in range(6):\n row = 1 if i<3 else 2\n column = 1+i if i<3 else i-2 \n fig.add_trace(go.Pie(labels=labels, values=value[i], name=df['ingredient'][i]), row, column)\n\n fig.update_traces(hole=.4, hoverinfo=\"label+percent+name\")\n fig.update_layout(\n title_text=\"Top 6 Food Wasted\",\n # Add annotations in the center of the donut pies.\n annotations=[\n dict(text=df['ingredient'][0], x=0.12, y=0.8, font_size=10, showarrow=False),\n dict(text=df['ingredient'][1], x=0.50, y=0.8, font_size=10, showarrow=False),\n dict(text=df['ingredient'][2], x=0.89, y=0.8, font_size=10, showarrow=False),\n dict(text=df['ingredient'][3], x=0.12, y=0.2, font_size=10, showarrow=False),\n dict(text=df['ingredient'][4], x=0.50, y=0.2, font_size=10, showarrow=False),\n dict(text=df['ingredient'][5], x=0.89, y=0.2, font_size=10, showarrow=False) \n ])\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n return graphJSON\n\n@app.route('/')\ndef index():\n graphJSON = plot()\n return render_template('index.html', segment='index',graphJSON = graphJSON)\n\n@app.route('/table')\ndef table():\n try:\n # if not template.endswith( '.html' ):\n # template += '.html'\n\n # Detect the current page\n print(\"here\")\n segment = get_segment( request )\n table = waste_table()\n \n # parsing the DataFrame in json format.\n json_records = table.reset_index().to_json(orient ='records')\n data = []\n data = json.loads(json_records)\n print(data)\n # context = {'d': data}\n return render_template('table.html', table=data)\n\n except TemplateNotFound:\n return render_template('page-404.html'), 404\n \n except:\n return render_template('page-500.html'), 500\n\n@app.route('/gettable',methods=['GET'])\ndef gettable():\n table = waste_table()\n print(\"here0\",table.head())\n leftoverdf = table[['ingredient','qtywasted']]\n print(\"here\",leftoverdf.head())\n\n # parsing the DataFrame in json format.\n json_records = leftoverdf.reset_index().to_json(orient ='records')\n data = []\n data = json.loads(json_records)\n response = jsonify(message= data)\n response.headers.add(\"Access-Control-Allow-Origin\",\"*\")\n return response \n\n\n# Helper - Extract current page name from request \ndef get_segment( request ): \n try:\n segment = request.path.split('/')[-1]\n if segment == '':\n segment = 'index'\n\n return segment \n except:\n return None \n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',port=8080)","sub_path":"flask_api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"17596395","text":"import yaml\n\nfrom mtxapi.api.Base_Api import Base_Api\nimport os\n\nclass Mtx_Login(Base_Api):\n # fpath = os.path()+'../data/mtx_login.yml'\n # fpath = os.path.abspath(os.path.dirname(os.getcwd())) + '/data/mtx_login.yml'\n with open('/Users/chenjinfei/project/pythonProject/mtxapi/data/mtx_login.yml') as f:\n data = yaml.load(f,Loader=yaml.FullLoader)\n\n def login(self,accounts,pwd):\n path = '/mtx/index.php?s=/index/user/login.html'\n data = {\n 'accounts': accounts,\n 'pwd': pwd\n }\n res = self.mtx_post(path,data)\n return res\n\nif __name__ == '__main__':\n obj = Mtx_Login()\n obj.login()\n","sub_path":"mtxapi/api/Mtx_Login.py","file_name":"Mtx_Login.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"76693898","text":"import logging\n\nfrom typing import Callable\n\nfrom ...base_handler import BaseHandler\n\n# from ..messages.connection_invitation import ConnectionInvitation\n\n\nclass ConnectionInvitationHandler(BaseHandler):\n def __init__(self, message: \"ConnectionInvitation\") -> None:\n self.logger = logging.getLogger(__name__)\n self.message = message\n\n def handle(self, thread_state):\n self.logger.debug(\n \"ConnectionInvitationHandler called with thread_state \" + f\"{thread_state}\"\n )\n","sub_path":"agent/indy_catalyst_agent/messaging/connections/handlers/connection_invitation_handler.py","file_name":"connection_invitation_handler.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"237587198","text":"#coding:utf-8\nimport bson\nfrom query import QueryManager\nclass BaseField(object):\n\n\n def __init__(self, **kwargs):\n\n required=kwargs[\"required\"] if \"required\" in kwargs else False\n default=kwargs['default'] if \"default\" in kwargs else None\n self.required = required\n self.default = default\n name=kwargs['name'] if \"name\" in kwargs else None\n self.name=name\n\n\n def __get__(self, doc_instance, owner):\n if doc_instance is None:return self\n value=doc_instance.doc_data.get(self.name,None)\n if value is None:\n return self.default\n return value\n\n\n def __set__(self, instance, value):\n instance.doc_data[self.name]=value\n\n def python_value(self,value):\n return value\n\n def db_value(self,value):\n return self.python_value(value)\n\n def validate(self,value):\n pass\n\nclass ObjectIdField(BaseField):\n\n def python_value(self,value):\n #return str(value)\n return value\n\n def db_value(self,value):\n if isinstance(value,bson.objectid.ObjectId):\n return value\n else:\n return bson.objectid.ObjectId(value)\n\n def validate(self,value):\n if not isinstance(value,bson.objectid.ObjectId):\n raise ValueError(\"Invalid ObjectId\")\n\n\nclass BaseMetaclass(type):\n \"\"\"Metaclass for all documents.\n \"\"\"\n\n def __new__(cls, name, bases, attrs):\n metaclass = attrs.get('__metaclass__')\n #super_new = super(ModelMetaclass, cls).__new__\n if metaclass and issubclass(metaclass, BaseMetaclass):\n return type.__new__(cls, name, bases, attrs)\n return type.__new__(cls, name, bases, attrs)\n\n\nclass ModelMetaclass(BaseMetaclass):\n\n\n def __new__(cls, name, bases, attrs):\n\n if attrs.get('__metaclass__') == ModelMetaclass:\n return BaseMetaclass.__new__(cls, name, bases, attrs)\n collection = name.lower()\n attrs['_collection'] = collection\n attrs['_id'] = ObjectIdField(name=\"id\")\n doc_fields = {}\n class_name = [name]\n\n for base_class in bases:\n if hasattr(base_class, '_fields'):\n doc_fields.update(base_class._fields)\n class_name.append(base_class.class_name)\n\n attrs['class_name'] = '.'.join(reversed(class_name))\n\n for attr_name, attr_value in attrs.items():\n if hasattr(attr_value, \"__class__\") and \\\n issubclass(attr_value.__class__, BaseField): #判断是不是继承BaseField的子类,因为这是我们要用的数据,每个field相当于json里面的一个key\n\n if not attr_value.name:\n attr_value.name = attr_name\n doc_fields[attr_name] = attr_value\n attrs['_fields'] = doc_fields\n new_class = BaseMetaclass.__new__(cls, name, bases, attrs)\n new_class.query = QueryManager(new_class,collection)\n return new_class\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"249720370","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/nsfw_dl/loaders/konachan.py\n# Compiled at: 2017-09-19 09:41:52\n# Size of source mod 2**32: 817 bytes\n\"\"\"\nRead the license at:\nhttps://github.com/IzunaDevs/nsfw_dl/blob/master/LICENSE\n\"\"\"\nfrom nsfw_dl.bases import BaseSearchJSON\n\nclass KonachanRandom:\n __doc__ = '\\n Gets a random image from konachan.\\n '\n data_format = 'bs4/html'\n\n @staticmethod\n def prepare_url(args):\n \"\"\" ... \"\"\"\n type(args)\n return ('https://konachan.com/post/random', {}, {})\n\n @staticmethod\n def get_image(data):\n \"\"\" ... \"\"\"\n return f\"https:{data.find(id='highres').get('href')}\"\n\n\nclass KonachanSearch(BaseSearchJSON):\n __doc__ = ' Gets a random image with a specific tag from konachan. '\n data_format = 'json'\n\n @staticmethod\n def prepare_url(args):\n \"\"\" ... \"\"\"\n return (\n f\"https://konachan.com/post.json?page=dapi&s=post&q=index&tags={args}\", {}, {})","sub_path":"pycfiles/nsfw_dl-0.4.1-py3.6/konachan.cpython-36.py","file_name":"konachan.cpython-36.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"45331298","text":"import io\n\nfrom setuptools import find_packages, setup\n\nwith io.open('README.md', 'rt', encoding='utf8') as f:\n readme = f.read()\n\nsetup(\n name='managr',\n version='1.0.0',\n url='http://github.com/xChenny/bankr',\n license='MIT',\n maintainer='Andrew and David Chen',\n maintainer_email='andrew.chen923@gmail.com',\n description='The Bankr web application that helps freshers learn more about how to manage their money.',\n long_description=readme,\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'flask_cors',\n 'flask',\n 'werkzeug',\n 'mongoengine',\n 'matplotlib',\n 'mpld3'\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"494785819","text":"# -*- coding: utf-8 -*-\n# @Author: Lu Shaohao(Bravo)\n# @Date: 2018-11-01 14:54:12\n# @Last Modified by: Lu Shaohao(Bravo)\n# @Last Modified time: 2018-11-07 20:26:50\n\nimport pdb\n\n# def add(a,b):\n# \treturn a+b\n\n# def cal(a,b):\n# \tpdb.set_trace()\n# \tc = add(a,b)\n# \tprint(c)\n\n\n# if __name__ == '__main__':\n# \tcal(3,4)\n\ndef pdb_test(arg):\n\tfor i in range(arg):\n\t\tprint(i)\n\treturn arg\n\npdb.run(\"pdb_test(3)\")","sub_path":"Python Learning/PDBDeomo.py","file_name":"PDBDeomo.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"428552103","text":"from .. import namespaces\r\nfrom .. import sdcdevice\r\nfrom .. import pmtypes\r\nfrom ..nomenclature import NomenclatureCodes as nc\r\nfrom . import providerbase\r\n\r\n# coded values for SDC ntp and time zone\r\nMDC_OP_SET_TIME_SYNC_REF_SRC = pmtypes.CodedValue(nc.MDC_OP_SET_TIME_SYNC_REF_SRC)\r\nMDC_ACT_SET_TIME_ZONE = pmtypes.CodedValue(nc.MDC_ACT_SET_TIME_ZONE)\r\n\r\nOP_SET_NTP = pmtypes.CodedValue(nc.OP_SET_NTP)\r\nOP_SET_TZ = pmtypes.CodedValue(nc.OP_SET_TZ)\r\n\r\nclass GenericSDCClockProvider(providerbase.ProviderRole):\r\n \"\"\" Handles operations for setting ntp server and time zone.\r\n It guarantees that mdib has a clock descriptor and that there operations for setting\r\n ReferenceSource and Timezone of clock state.\"\"\"\r\n\r\n def __init__(self, log_prefix):\r\n super(GenericSDCClockProvider, self).__init__(log_prefix)\r\n self._set_ntp_operations = []\r\n self._set_tz_operations = []\r\n\r\n def initOperations(self, mdib):\r\n super(GenericSDCClockProvider, self).initOperations(mdib)\r\n # create a clock descriptor and state if they do not exist in mdib\r\n clockDescriptor = self._mdib.descriptions.NODETYPE.getOne(namespaces.domTag('ClockDescriptor'), allowNone=True)\r\n if clockDescriptor is None:\r\n mdsContainer = self._mdib.descriptions.NODETYPE.getOne(namespaces.domTag('MdsDescriptor'))\r\n clock_descr_handle = 'clock_' + mdsContainer.handle\r\n self._logger.info('creating a clock descriptor, handle={}'.format(clock_descr_handle))\r\n clockDescriptor = self._mdib.createClockDescriptorContainer(handle=clock_descr_handle,\r\n parentHandle=mdsContainer.handle,\r\n codedValue=pmtypes.CodedValue(123),\r\n safetyClassification=pmtypes.SafetyClassification.INF)\r\n\r\n clockState = self._mdib.states.descriptorHandle.getOne(clockDescriptor.handle, allowNone = True)\r\n if clockState is None:\r\n clockState = self._mdib.mkStateContainerFromDescriptor(clockDescriptor)\r\n self._mdib.addState(clockState)\r\n\r\n\r\n def makeOperationInstance(self, operationDescriptorContainer):\r\n if operationDescriptorContainer.coding in (MDC_OP_SET_TIME_SYNC_REF_SRC.coding, OP_SET_NTP.coding):\r\n self._logger.info('instantiating \"set ntp server\" operation from existing descriptor handle={}'.format(\r\n operationDescriptorContainer.handle))\r\n set_ntp_operation = self._mkOperationFromOperationDescriptor(operationDescriptorContainer,\r\n currentArgumentHandler=self._setNTPString)\r\n self._set_ntp_operations.append(set_ntp_operation)\r\n return set_ntp_operation\r\n elif operationDescriptorContainer.coding in (MDC_ACT_SET_TIME_ZONE.coding, OP_SET_TZ.coding):\r\n self._logger.info('instantiating \"set time zone\" operation from existing descriptor handle={}'.format(\r\n operationDescriptorContainer.handle))\r\n set_tz_operation = self._mkOperationFromOperationDescriptor(operationDescriptorContainer,\r\n currentArgumentHandler=self._setTZString)\r\n self._set_tz_operations.append(set_tz_operation)\r\n return set_tz_operation\r\n return None # ?\r\n\r\n def makeMissingOperations(self):\r\n ops = []\r\n mdsContainer = self._mdib.descriptions.NODETYPE.getOne(namespaces.domTag('MdsDescriptor'))\r\n clockDescriptor = self._mdib.descriptions.NODETYPE.getOne(namespaces.domTag('ClockDescriptor'), allowNone=True)\r\n if not self._set_ntp_operations:\r\n self._logger.info('adding \"set ntp server\" operation, code = {}'.format(nc.MDC_OP_SET_TIME_SYNC_REF_SRC))\r\n set_ntp_operation = self._mkOperation(sdcdevice.sco.SetStringOperation,\r\n handle='SET_NTP_SRV_'+ mdsContainer.handle,\r\n operationTargetHandle=clockDescriptor.handle,\r\n codedValue=MDC_OP_SET_TIME_SYNC_REF_SRC,\r\n currentArgumentHandler=self._setNTPString)\r\n self._set_ntp_operations.append(set_ntp_operation)\r\n ops.append(set_ntp_operation)\r\n if not self._set_tz_operations:\r\n self._logger.info('adding \"set time zone\" operation, code = {}'.format(nc.MDC_ACT_SET_TIME_ZONE))\r\n set_tz_operation = self._mkOperation(sdcdevice.sco.SetStringOperation,\r\n handle='SET_TZONE_'+ mdsContainer.handle,\r\n operationTargetHandle=clockDescriptor.handle,\r\n codedValue=MDC_ACT_SET_TIME_ZONE,\r\n currentArgumentHandler=self._setTZString)\r\n self._set_tz_operations.append(set_tz_operation)\r\n ops.append(set_tz_operation)\r\n return ops\r\n\r\n\r\n def _setNTPString(self, operationInstance, value):\r\n '''This is the handler for the set ntp server operation.\r\n It sets the ReferenceSource value of clock state'''\r\n operationDescriptorHandle = operationInstance.handle\r\n operationDescriptorContainer = self._mdib.descriptions.handle.getOne(operationDescriptorHandle)\r\n operationTargetHandle = operationDescriptorContainer.OperationTarget\r\n self._logger.info('set value {} from {} to {}', operationTargetHandle, operationInstance.currentValue,\r\n value)\r\n with self._mdib.mdibUpdateTransaction() as mgr:\r\n state = mgr.getComponentState(operationTargetHandle)\r\n if state.NODETYPE == namespaces.domTag('MdsState'):\r\n mdsHandle = state.descriptorHandle\r\n mgr.ungetState(state)\r\n # look for the ClockState child\r\n clockDescriptors = self._mdib.descriptions.NODETYPE.get(namespaces.domTag('ClockDescriptor'),[])\r\n clockDescriptors = [ c for c in clockDescriptors if c.parentHandle == mdsHandle]\r\n if len(clockDescriptors) == 1:\r\n state = mgr.getComponentState(clockDescriptors[0].handle)\r\n if state.NODETYPE != namespaces.domTag('ClockState'):\r\n raise RuntimeError('_setNTPString: expected ClockState, got {}'.format(state.NODETYPE.localname))\r\n state.ReferenceSource = [pmtypes.ElementWithTextOnly(value)]\r\n\r\n\r\n def _setTZString(self, operationInstance, value):\r\n '''This is the handler for the set time zone operation.\r\n It sets the TimeZone value of clock state.'''\r\n operationDescriptorHandle = operationInstance.handle\r\n operationDescriptorContainer = self._mdib.descriptions.handle.getOne(operationDescriptorHandle)\r\n operationTargetHandle = operationDescriptorContainer.OperationTarget\r\n self._logger.info('set value {} from {} to {}', operationTargetHandle, operationInstance.currentValue,\r\n value)\r\n with self._mdib.mdibUpdateTransaction() as mgr:\r\n state = mgr.getComponentState(operationTargetHandle)\r\n if state.NODETYPE == namespaces.domTag('MdsState'):\r\n mdsHandle = state.descriptorHandle\r\n mgr.ungetState(state)\r\n # look for the ClockState child\r\n clockDescriptors = self._mdib.descriptions.NODETYPE.get(namespaces.domTag('ClockDescriptor'),[])\r\n clockDescriptors = [ c for c in clockDescriptors if c.parentHandle == mdsHandle]\r\n if len(clockDescriptors) == 1:\r\n state = mgr.getComponentState(clockDescriptors[0].handle)\r\n\r\n if state.NODETYPE != namespaces.domTag('ClockState'):\r\n raise RuntimeError('_setNTPString: expected Clockstate, got {}'.format(state.NODETYPE.localname))\r\n state.TimeZone = value\r\n","sub_path":"sdc11073/roles/clockprovider.py","file_name":"clockprovider.py","file_ext":"py","file_size_in_byte":8210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"365923027","text":"# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# lambdafcns contains symbolic links to lambda functions in boss-tools/lambda.\n# Since lambda is a reserved word, this allows importing from that folder \n# without updating scripts responsible for deploying the lambda code.\nfrom lambdafcns.write_id_index_lambda import handler, get_class_name\n\nimport botocore\nimport json\n#from spdb.spatialdb.object_indices import ObjectIndices\nimport unittest\nfrom unittest.mock import patch\n\nclass TestWriteIdIndexLambda(unittest.TestCase):\n def test_handler_ClientError(self):\n event = {\n 'id_index_table': 'idIndex',\n 's3_index_table': 's3Index',\n 'id_count_table': 'idCount',\n 'cuboid_bucket': 'cuboidBucket',\n 'id_index_new_chunk_threshold': 100,\n 'cuboid_object_key': 'blah',\n 'id_group': ['1', '2', '3'],\n 'version': 0,\n 'write_id_index_status': {\n 'done': False,\n 'delay': 0,\n 'retries_left': 2\n }\n }\n\n context = None\n resp = {}\n\n with patch('lambdafcns.write_id_index_lambda.ObjectIndices') as fake_obj_ind:\n ex = botocore.exceptions.ClientError(resp, 'UpdateItem')\n ex.errno = 10\n ex.message = 'blah'\n ex.strerror = 'blah'\n fake_obj_ind.return_value.write_id_index.side_effect = ex\n with patch(\n 'lambdafcns.write_id_index_lambda.get_region', \n return_value='us-east-1'\n ):\n # Function under test.\n actual = handler(event, context)\n\n self.assertFalse(actual['write_id_index_status']['done'])\n self.assertGreater(actual['write_id_index_status']['delay'], 0)\n self.assertEqual(1, actual['write_id_index_status']['retries_left'])\n self.assertIn('result', actual)\n\n\n def test_handler_raise_ClientError(self):\n \"\"\"\n Test that error is raised when retries_left == 0.\n \"\"\"\n event = {\n 'id_index_table': 'idIndex',\n 's3_index_table': 's3Index',\n 'id_count_table': 'idCount',\n 'cuboid_bucket': 'cuboidBucket',\n 'id_index_new_chunk_threshold': 100,\n 'cuboid_object_key': 'blah',\n 'id_group': ['1', '2', '3'],\n 'version': 0,\n 'write_id_index_status': {\n 'done': False,\n 'delay': 0,\n 'retries_left': 0\n }\n }\n\n context = None\n resp = {}\n\n with patch('lambdafcns.write_id_index_lambda.ObjectIndices') as fake_obj_ind:\n ex = botocore.exceptions.ClientError(resp, 'UpdateItem')\n ex.errno = 10\n ex.message = 'blah'\n ex.strerror = 'blah'\n fake_obj_ind.return_value.write_id_index.side_effect = ex\n with patch(\n 'lambdafcns.write_id_index_lambda.get_region', \n return_value='us-east-1'\n ):\n with self.assertRaises(botocore.exceptions.ClientError):\n # Function under test.\n handler(event, context)\n\n\n def test_get_class_name(self):\n resp = {}\n ex = botocore.exceptions.ClientError(resp, 'UpdateItem')\n actual = get_class_name(ex.__class__)\n self.assertEqual('ClientError', actual)\n\n\n def test_get_class_name_no_period(self):\n actual = get_class_name('foo')\n self.assertIsNone(actual)\n","sub_path":"lmbdtest/test_write_id_index_lambda.py","file_name":"test_write_id_index_lambda.py","file_ext":"py","file_size_in_byte":4109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"390568095","text":"# Jeremy Lo\n# CTEC 121 / Winter 2019\n# Module 4 / Problem Set 5\n# Problem 3 (25 points)\n\n\"\"\"\nDevelop a program that draws some sort of substantial face that includes two eyes, a nose, a mouth with some teeth, two ears and some hair.\n\nYou will find faces that were drawn by students in prior classes in a file named faces.png.\n\"\"\"\n\nfrom graphics import *\n\ndef main():\n win = GraphWin(\"Face\",800,800)\n ear1 = Oval(Point(456.0, 411.0),Point(476.0, 444.0))\n ear1.setOutline(\"tan\")\n ear1.setFill(\"tan\")\n ear1.draw(win)\n\n ear2 = ear1.clone()\n ear2.move(-202,0)\n ear2.draw(win)\n \n face1 = Oval(Point(241.0, 301.0),Point(441.0, 594.0))\n face1.move(24,0)\n face1.setOutline(\"tan\")\n face1.setFill(\"tan\")\n face1.draw(win)\n\n \n # face2 = Line(Point(273.0, 506.0),Point(343.0, 590.0))\n # face2.draw(win)\n \n # eyes1 = Oval(Point(291.0, 420.0),Point(321.0, 384.0))\n eyes1 = Oval(Point(286.0, 396.0),Point(329.0, 425.0))\n\n eyes1.setFill(\"white\")\n eyes1.move(20,0)\n eyes1.draw(win)\n \n eyes2 = eyes1.clone()\n eyes2.setFill(\"white\")\n eyes2.move(80,0)\n eyes2.draw(win)\n\n nose1 = Polygon(Point(362.0, 415.0),Point(347.0, 463.0),Point(362.0, 466.0))\n nose1.draw(win)\n\n nose2 = Line(Point(362.0, 467.0),Point(362.0, 415.0))\n nose2.setOutline(\"tan\")\n nose2.draw(win)\n\n mouth1 = Oval(Point(351.0, 490.0),Point(373.0, 500.0))\n mouth1.setFill(\"white\")\n mouth1.move(0,10)\n mouth1.draw(win)\n\n teeth1 = Line(Point(351.0, 505.0),Point(373.0, 505.0))\n teeth1.draw(win)\n\n teeth2 = Line(Point(358.0, 501.0),Point(358.0, 509.0))\n teeth2.draw(win)\n\n teeth3 = teeth2.clone()\n teeth3.move(8,0)\n teeth3.draw(win)\n\n hair1 = Polygon(Point(369.0, 353.0),Point(392.0, 396.0),Point(394.0, 338.0))\n hair1.setFill(\"black\")\n hair1.draw(win)\n\n hair2 = Oval(Point(267.0, 355.0),Point(461.0, 289.0))\n hair2.move(0,10)\n hair2.setFill(\"black\")\n hair2.draw(win)\n\n hair3 = hair1.clone()\n hair3.move(20,0)\n hair3.draw(win)\n\n hair4 = hair3.clone()\n hair4.move(20,0)\n hair4.draw(win)\n\n hair5 = hair4.clone()\n hair5.move(20,0)\n hair5.draw(win)\n\n hair6 = Polygon(Point(450.0, 350.0),Point(461.0, 403.0),Point(461.0, 331.0))\n hair6.setFill(\"black\")\n hair6.draw(win)\n\n hair7 = hair1.clone()\n hair7.move(-20,0)\n hair7.draw(win)\n\n hair8 = hair7.clone()\n hair8.move(-20,0)\n hair8.draw(win)\n\n hair9 = hair7.clone()\n hair9.move(-20,0)\n hair9.draw(win)\n\n hair10 = hair9.clone()\n hair10.move(-20,0)\n hair10.draw(win)\n\n hair11 = hair10.clone()\n hair11.move(-20,0)\n hair11.draw(win)\n\n hair12 = Polygon(Point(265.0, 333.0),Point(292.0, 395.0),Point(295.0, 345.0))\n hair12.setFill(\"black\")\n hair12.draw(win)\n\n hair13 = Polygon(Point(266.0, 335.0),Point(266.0, 407.0),Point(285.0, 343.0))\n hair13.setFill(\"black\")\n hair13.draw(win)\n\n hair14 = Polygon(Point(271.0, 349.0),Point(255.0, 374.0),Point(270.0, 324.0))\n hair14.setFill(\"black\")\n hair14.draw(win)\n\n input(\"\")\n\n\nmain()","sub_path":"problem-set-5-problem-3.py","file_name":"problem-set-5-problem-3.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"135391827","text":"import requests\nimport xml.etree.ElementTree as ET\nfrom collections import defaultdict\nimport json\nimport os\nfrom zipfile import ZipFile\nfrom io import BytesIO\nimport sys\nimport csv\nimport pandas as pd\nimport datetime\n\nfrom AbstractUfoCatcherWrapper import AbstractUfoCatcherWrapper\n\nBASE_URL = 'http://resource.ufocatch.com/atom/edinetx/query/'\nNAMESPACE = '{http://www.w3.org/2005/Atom}'\n\n\nclass EDINETCatcher(AbstractUfoCatcherWrapper):\n def __init__(self):\n self.url = BASE_URL\n self.namespace = NAMESPACE\n\n def _get_target_info_dict(self, tree, target_name, namespace, date_from, date_to):\n target_dict = defaultdict(dict)\n for el in tree.findall('.//' + namespace + 'entry'):\n title = el.find(namespace + 'title').text # 要素のタイトル\n date_full = el.find(namespace + 'updated').text # その要素が登録された日付(TZや時刻込)\n date = self._convert_str_to_date(date_full[0:10]) # 年月日だけ抽出\n \n if not (date_from <= date and date <= date_to): continue # 日付でフィルタ\n if not target_name in str(title) : continue # タイトルでフィルタ\n \n _id = el.find(namespace + 'id').text\n xbrl_url = ''\n for link in el.findall('./' + namespace + 'link[@type=\"text/xml\"]'):\n url = link.attrib['href']\n if not (\"PublicDoc\" in url and \".xbrl\" in url): continue # Summaryのxmlだけ抽出\n xbrl_url = url\n break\n\n target_dict[_id] = {'id':_id, 'title':title, 'url':xbrl_url, 'date':date_full}\n return target_dict\n \n \n def _download_file(self, t_symbol, info, output_dir): \n if info['url'] == \"\": return\n response = requests.get(info['url'])\n if response.ok:\n save_path = output_dir + '/' + t_symbol + '/'\n os.makedirs(save_path, exist_ok=True)\n with open(save_path + info['id'] + '.xbrl', mode='w') as f:\n f.write(response.content.decode())\n","sub_path":"src/EDINETCatcher.py","file_name":"EDINETCatcher.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"126939629","text":"from .util import call_class_function\n\n_SETS_INDEX_KEY = \"sets\"\n_RANGE_INDEX_KEY = \"range\"\n_DEFAULT_INDEX_KEY = \"default\"\n\nclass PyomoItemCreator(object):\n\t''' Helper class for creating various Pyomo items '''\n\t@staticmethod\n\tdef _create_pyomo_item(pyomo_item_creator, pyomo_item_creator_arg):\n\t\treturn pyomo_item_creator(pyomo_item_creator_arg)\n\n\t@staticmethod\n\tdef _set_abstract_model_property(abstract_model, property_name, value):\n\t\tabstract_model.__setattr__(property_name, value)\n\t\t\n\t@staticmethod\n\tdef _create_and_set_abstract_model_property_item(abstract_model, property_name, pyomo_item_creator, pyomo_item_creator_arg):\n\t\t# Create the Pyomo item\n\t\tpyomo_item = PyomoItemCreator._create_pyomo_item(pyomo_item_creator, pyomo_item_creator_arg)\n\t\t# Set the property to the created item\n\t\tPyomoItemCreator._set_abstract_model_property(abstract_model, property_name, pyomo_item)\n\t\treturn pyomo_item\n\t\t\n\t@staticmethod\n\tdef _create_and_set_abstract_model_indexed_property_item(abstract_model, property_name_base, property_name_index, pyomo_item_creator, pyomo_item_creator_arg):\n\t\treturn PyomoItemCreator._create_and_set_abstract_model_indexed_property_item(abstract_model, property_name_base + property_name_index, pyomo_item_creator, pyomo_item_creator_arg)\n\t\t\n\t@staticmethod\n\tdef create_and_set_abstract_model_indexed_property_item(abstract_model, nameable, property_name_base, property_name_index, pyomo_item_creator, pyomo_item_creator_arg):\n\t\tname = nameable.get_name()\n\t\tif len(name) > 0:\n\t\t\treturn PyomoItemCreator._create_and_set_abstract_model_property_item(abstract_model, name, pyomo_item_creator, pyomo_item_creator_arg)\n\t\telse:\n\t\t\treturn PyomoItemCreator._create_and_set_abstract_model_indexed_property_item(abstract_model, property_name_base, property_name_index, pyomo_item_creator, pyomo_item_creator_arg)\n\t\t\t\n\t@staticmethod\n\tdef _create_pyomo_indexable_item_creator(indexable_type):\n\t\tdef _create_pyomo_indexable_type_item(indexable_item_dict):\n\t\t\tif len(indexable_item_dict[_SETS_INDEX_KEY]) > 0:\n\t\t\t\tif indexable_item_dict[_DEFAULT_INDEX_KEY] is None:\n\t\t\t\t\treturn indexable_type(*indexable_item_dict[_SETS_INDEX_KEY], within=indexable_item_dict[_RANGE_INDEX_KEY])\n\t\t\t\telse:\n\t\t\t\t\treturn indexable_type(*indexable_item_dict[_SETS_INDEX_KEY], within=indexable_item_dict[_RANGE_INDEX_KEY], default=indexable_item_dict[_DEFAULT_INDEX_KEY])\n\t\t\telse:\n\t\t\t\tif indexable_item_dict[_DEFAULT_INDEX_KEY] is None:\n\t\t\t\t\treturn indexable_type(within=indexable_item_dict[_RANGE_INDEX_KEY])\n\t\t\t\telse:\n\t\t\t\t\treturn indexable_type(within=indexable_item_dict[_RANGE_INDEX_KEY], default=indexable_item_dict[_DEFAULT_INDEX_KEY])\n\t\treturn _create_pyomo_indexable_type_item\n\t\t\n\t@staticmethod\n\tdef create_pyomo_indexable_items(abstract_model, set_map, indexable_items, property_base_name, indexable_item_type):\t\t\t\n\t\tmap = dict()\n\t\tfor i in range(len(indexable_items)):\n\t\t\tindexable_item = indexable_items[i]\n\t\t\tdefault = call_class_function(indexable_item, \"get_default\", None)\n\t\t\tindexable_item_arguments = {\n\t\t\t\t\t\t\t\t\t\t\t_SETS_INDEX_KEY: [set_map[set] for set in indexable_item.get_sets()],\n\t\t\t\t\t\t\t\t\t\t\t_RANGE_INDEX_KEY: set_map[indexable_item.get_range()],\n\t\t\t\t\t\t\t\t\t\t\t_DEFAULT_INDEX_KEY: default\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\tmap[indexable_item] = PyomoItemCreator.create_and_set_abstract_model_indexed_property_item(abstract_model,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tindexable_item,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tproperty_base_name,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ti,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tPyomoItemCreator._create_pyomo_indexable_item_creator(indexable_item_type),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tindexable_item_arguments)\n\t\treturn map","sub_path":"readablepyomo/pyomoitemcreator.py","file_name":"pyomoitemcreator.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"532686550","text":"import csv\r\nimport os\r\nimport src.db as db\r\nimport src.transform as transform\r\n\r\ndef start_transformation(csv_data):\r\n\r\n cafe_data = transform.Transform(csv_data)\r\n # if __name__ == '__main__':\r\n \r\n db.create_products_table_in_cafe_db()\r\n print('Created products table')\r\n db.create_cafe_locations_table_in_cafe_db()\r\n print('Created cafe_locations table')\r\n db.create_orders_table_in_cafe_db()\r\n print('Created orders table')\r\n db.create_products_in_orders_table_in_cafe_db()\r\n print('Created products_in_orders table')\r\n\r\n cafe_data.remove_names()\r\n cafe_data.remove_payment_details()\r\n cafe_data.split_date_time()\r\n cafe_data.reverse_date()\r\n cafe_data.rejoin_date_time()\r\n cafe_data.add_id()\r\n print('Completed first set of transformation')\r\n \r\n db.load_into_cafe_locations_table(cafe_data.data)\r\n print('Loaded into cafe_locations table')\r\n db.load_into_orders_table_and_update_local_ids(cafe_data.data)\r\n print('Loaded into orders table')\r\n \r\n cafe_data.split_products()\r\n cafe_data.split_product_price()\r\n cafe_data.sort_by_id()\r\n print('Completed second set of transformation')\r\n \r\n db.load_into_products_table(cafe_data.data)\r\n print('Loaded into products table')\r\n db.load_into_products_in_orders_table(cafe_data.data)\r\n print('Loaded into products_in_orders table')\r\n","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"402838755","text":"# Copyright (c) 2013 OpenStack Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sqlalchemy as sa\nfrom neutron.db import model_base\nfrom neutron.db import models_v2\nimport neutron.db.api as dbapi\nfrom oslo_log import log\nfrom oslo_utils import timeutils\n\nSTR_LEN = 255\nMAC_LEN = 16\nIF_LEN = 48\nIP_LEN = 48\nUUID_LEN = 36\nSEGTYPE_LEN = 12\nROLE_LEN = 16\nTYPE_LEN = 8\n\nLOG = log.getLogger(__name__)\n\n\nclass H3cHostTopology(model_base.BASEV2):\n \"\"\"computer node and corresponding leaf port topology information\"\"\"\n __tablename__ = 'h3c_host_topology'\n\n host_name = sa.Column(sa.String(STR_LEN), primary_key=True)\n host_mac = sa.Column(sa.String(MAC_LEN), nullable=False)\n leaf_port = sa.Column(sa.String(IF_LEN), primary_key=True)\n leaf_mac = sa.Column(sa.String(MAC_LEN), primary_key=True)\n leaf_bagg = sa.Column(sa.String(IF_LEN), nullable=True)\n created_at = sa.Column(sa.DateTime, nullable=True)\n\n\nclass H3cDeviceTopology(model_base.BASEV2):\n \"\"\"spine and leaf topology information\"\"\"\n __tablename__ = 'h3c_device_topology'\n\n device_mac = sa.Column(sa.String(MAC_LEN), primary_key=True)\n port = sa.Column(sa.String(IF_LEN), primary_key=True)\n peer_mac = sa.Column(sa.String(MAC_LEN), nullable=False)\n peer_port = sa.Column(sa.String(IF_LEN), nullable=False)\n port_type = sa.Column(sa.String(TYPE_LEN), nullable=False)\n role = sa.Column(sa.String(ROLE_LEN), nullable=False)\n created_at = sa.Column(sa.DateTime, nullable=True)\n bagg = sa.Column(sa.String(IF_LEN), nullable=True)\n\n\nclass H3cRelatedNetworks(model_base.BASEV2, models_v2.HasId,\n models_v2.HasTenant):\n \"\"\" Representation for table comware_related_nets\n A network id corresponding a segmentation ID.\n \"\"\"\n __tablename__ = 'h3c_related_nets'\n\n network_id = sa.Column(sa.String(UUID_LEN))\n segmentation_id = sa.Column(sa.Integer)\n segmentation_type = sa.Column(sa.String(SEGTYPE_LEN))\n created_at = sa.Column(sa.DateTime, nullable=True)\n\n\nclass H3cRelatedPorts(model_base.BASEV2, models_v2.HasId,\n models_v2.HasTenant):\n \"\"\" Representation for table comware_related_vms\n This table stores all the port informations.\n \"\"\"\n __tablename__ = 'h3c_related_ports'\n\n device_id = sa.Column(sa.String(STR_LEN))\n host_id = sa.Column(sa.String(STR_LEN))\n port_id = sa.Column(sa.String(UUID_LEN))\n network_id = sa.Column(sa.String(UUID_LEN))\n segmentation_id = sa.Column(sa.Integer)\n created_at = sa.Column(sa.DateTime, nullable=True)\n\n\nclass db_lib(object):\n def __init__(self):\n self.session = dbapi.get_session()\n self.smooth = False\n self.smoothTopology = {}\n self.smooth_host = {}\n\n def add_device_db(self, context, device_mac, port,\n peer_mac, peer_port,\n port_type, role, bagg=None):\n dev_db = H3cDeviceTopology(device_mac=device_mac,\n port=port,\n peer_mac=peer_mac,\n peer_port=peer_port,\n port_type=port_type,\n role=role,\n created_at=timeutils.utcnow(),\n bagg=bagg)\n context.session.add(dev_db)\n\n def create_device_topology(self, context, msg, batch):\n LOG.info(\"create_device_topology %s\", msg)\n with context.session.begin(subtransactions=True):\n for topo in msg:\n if self.smooth is True:\n self.smooth_topology(topo)\n\n query = context.session.query(H3cDeviceTopology)\n qry = query.filter_by(device_mac=topo['device_mac'],\n port=topo['port'])\n if qry.count() > 0:\n qry.delete()\n self.add_device_db(context,\n device_mac=topo['device_mac'],\n port=topo['port'],\n peer_mac=topo['peer_mac'],\n peer_port=topo['peer_port'],\n port_type=topo['port_type'],\n role=topo['role'],\n bagg=topo.get('bagg'))\n\n def delete_device_topology(self, context, msg):\n \"\"\"plug network cable or IP change or device replace\"\"\"\n with context.session.begin(subtransactions=True):\n for topo in msg:\n query = context.session.query(H3cDeviceTopology)\n query.filter_by(device_mac=topo['device_mac'],\n port=topo['port']).delete()\n\n def update_device_topology(self, context, msg):\n with context.session.begin(subtransactions=True):\n for topo in msg:\n query = context.session.query(H3cDeviceTopology)\n query.filter_by(device_mac=topo['device_mac'],\n port=topo['port']).delete()\n self.add_device_db(context,\n device_mac=topo['device_mac'],\n port=topo['port'],\n peer_mac=topo['peer_mac'],\n peer_port=topo['peer_port'],\n port_type=topo['port_type'],\n role=topo['role'],\n bagg=topo.get('bagg'))\n\n def del_hostdb(self, context, host_name, host_mac,\n leaf_mac, leaf_port):\n query = context.session.query(H3cHostTopology)\n query.filter_by(host_name=host_name,\n host_mac=host_mac,\n leaf_mac=leaf_mac,\n leaf_port=leaf_port).delete()\n\n def add_hostdb(self, context, host_name, host_mac,\n leaf_port, leaf_mac, leaf_bagg=None):\n com_db = H3cHostTopology(host_name=host_name,\n host_mac=host_mac,\n leaf_port=leaf_port,\n leaf_mac=leaf_mac,\n leaf_bagg=leaf_bagg,\n created_at=timeutils.utcnow())\n context.session.add(com_db)\n\n def create_host_topogoly(self, context, msg, batch):\n LOG.info(\"create_host_topogoly %s\", msg)\n with context.session.begin(subtransactions=True):\n for topo in msg:\n if self.smooth is True:\n self.smooth_host_topology(topo)\n qry = context.session.query(H3cHostTopology). \\\n filter_by(host_name=topo['host_name'],\n leaf_port=topo['leaf_port'],\n leaf_mac=topo['leaf_mac'])\n if qry.count() > 0:\n qry.delete()\n\n self.add_hostdb(context,\n host_name=topo['host_name'],\n host_mac=topo['host_mac'],\n leaf_port=topo['leaf_port'],\n leaf_mac=topo['leaf_mac'],\n leaf_bagg=topo.get('bagg'))\n\n def delete_host_topology(self, context, msg):\n LOG.info(\"delete_host_topology %s\", msg)\n with context.session.begin(subtransactions=True):\n for topo in msg:\n \"\"\"all is equal\"\"\"\n self.del_hostdb(context,\n host_name=topo['host_name'],\n host_mac=topo['host_mac'],\n leaf_mac=topo['leaf_mac'],\n leaf_port=topo['leaf_port'])\n\n def update_host_topology(self, context, msg):\n with context.session.begin(subtransactions=True):\n for topo in msg:\n try:\n context.session.query(H3cHostTopology).filter_by(\n host_name=topo['old_host_name'],\n leaf_port=topo['leaf_port'],\n leaf_mac=topo['leaf_mac']).delete()\n except Exception as e:\n LOG.warn(\"update_host_topology delete failed %s\", e)\n\n self.add_hostdb(context,\n host_name=topo['host_name'],\n host_mac=topo['host_mac'],\n leaf_port=topo['leaf_port'],\n leaf_mac=topo['leaf_mac'],\n leaf_bagg=topo.get('bagg'))\n\n def smooth_topology(self, topo):\n if topo['device_mac'] in self.smoothTopology:\n topo_set = self.smoothTopology[topo['device_mac']]\n if topo['port'] in topo_set:\n topo_set.remove(topo['port'])\n\n def smooth_host_topology(self, topo):\n if topo['leaf_mac'] in self.smooth_host:\n host_set = self.smooth_host[topo['leaf_mac']]\n if topo['host_mac'] in host_set:\n host_set.remove(topo['host_mac'])\n\n def smoothstart_topology(self, context, mac):\n with context.session.begin(subtransactions=True):\n \"\"\"device\"\"\"\n qry = (context.session.query(H3cDeviceTopology).\n filter_by(device_mac=mac))\n\n self.smoothTopology[mac] = set()\n if qry.count() > 0:\n self.smooth = True\n topo_set = self.smoothTopology[mac]\n for one in qry:\n topo_set.add(one['port'])\n\n \"\"\"host\"\"\"\n qry = context.session.query(H3cHostTopology).filter_by(leaf_mac=mac)\n\n self.smooth_host[mac] = set()\n if qry.count() > 0:\n self.smooth = True\n host_set = self.smooth_host[mac]\n for one in qry:\n host_set.add(one['host_mac'])\n\n def smoothend_topology(self, context, mac):\n self.smooth = False\n if mac in self.smoothTopology:\n topo_set = self.smoothTopology[mac]\n del self.smoothTopology[mac]\n else:\n topo_set = set()\n\n if mac in self.smooth_host:\n host_set = self.smooth_host[mac]\n del self.smooth_host[mac]\n else:\n host_set = set()\n\n if ((len(topo_set) == 0) and\n (len(host_set) == 0)):\n return\n\n with context.session.begin(subtransactions=True):\n for port in topo_set:\n (context.session.query(H3cDeviceTopology).\n filter_by(device_mac=mac,\n port=port).delete())\n for host_mac in host_set:\n query = context.session.query(H3cHostTopology)\n query.filter_by(leaf_mac=mac,\n host_mac=host_mac).delete()\n\n def update_aggr_info(self, context, mac, net_type, role,\n ifname, aggr_name):\n proc_host = False\n proc_dev = False\n if role == 'spine':\n if net_type == 'vlan':\n proc_dev = True\n else:\n proc_host = True\n if net_type == 'vlan':\n proc_dev = True\n\n with context.session.begin(subtransactions=True):\n if proc_host is True:\n \"\"\"vlan or vxlan\"\"\"\n qry = context.session.query(H3cHostTopology). \\\n filter_by(leaf_mac=mac,\n leaf_port=ifname)\n for one in qry:\n one.leaf_bagg = aggr_name\n one.update(one)\n\n if proc_dev is True:\n \"\"\"only vlan\"\"\"\n qry = context.session.query(H3cDeviceTopology). \\\n filter_by(device_mac=mac,\n port=ifname)\n for one in qry:\n one.bagg = aggr_name\n one.update(one)\n\n def is_network_created(self, tenant_id, network_id, seg_id=None):\n \"\"\"Checks if a networks is already known to COMWARE.\"\"\"\n session = self.session\n with session.begin(subtransactions=True):\n if not seg_id:\n num_nets = (session.query(H3cRelatedNetworks).\n filter_by(tenant_id=tenant_id,\n network_id=network_id).count())\n else:\n num_nets = (session.query(H3cRelatedNetworks).\n filter_by(tenant_id=tenant_id,\n network_id=network_id,\n segmentation_id=seg_id).count())\n\n return num_nets > 0\n\n def create_network(self, tenant_id, network_id, segment_id, segment_type):\n \"\"\" Store a network relationship in db. \"\"\"\n session = self.session\n with session.begin(subtransactions=True):\n network = H3cRelatedNetworks(tenant_id=tenant_id,\n network_id=network_id,\n segmentation_id=segment_id,\n segmentation_type=segment_type,\n created_at=timeutils.utcnow())\n session.add(network)\n\n def delete_network(self, tenant_id, network_id):\n \"\"\" Remove a network relationship from comware db. \"\"\"\n session = self.session\n with session.begin(subtransactions=True):\n query = session.query(H3cRelatedNetworks)\n query.filter_by(network_id=network_id).delete()\n\n def get_vm_host(self, port_id,\n network_id, tenant_id,\n segmentation_id=0):\n session = self.session\n with session.begin(subtransactions=True):\n qry = (session.query(H3cRelatedPorts).\n filter_by(tenant_id=tenant_id,\n port_id=port_id,\n network_id=network_id,\n segmentation_id=segmentation_id))\n for one in qry:\n return one['host_id']\n\n return None\n\n def is_vm_created(self, device_id, host_id, port_id,\n network_id, tenant_id):\n \"\"\"Checks if a VM is already known to comware. \"\"\"\n session = self.session\n with session.begin(subtransactions=True):\n num_vm = (session.query(H3cRelatedPorts).\n filter_by(tenant_id=tenant_id,\n port_id=port_id,\n network_id=network_id,\n host_id=host_id).count())\n return num_vm > 0\n\n def is_vm_update(self, device_id, host_id, port_id,\n network_id, tenant_id,\n segmentation_id):\n session = self.session\n with session.begin(subtransactions=True):\n num_vm = (session.query(H3cRelatedPorts).\n filter_by(tenant_id=tenant_id,\n port_id=port_id,\n network_id=network_id,\n host_id=host_id,\n segmentation_id=segmentation_id).count())\n return num_vm > 0\n return False\n\n def get_vm_count(self, network_id, host_id, segmentation_id):\n \"\"\" Return the number vm in the same network. \"\"\"\n session = self.session\n with session.begin(subtransactions=True):\n return (session.query(H3cRelatedPorts).\n filter_by(network_id=network_id,\n host_id=host_id,\n segmentation_id=segmentation_id).count())\n\n def is_leaf_vm_exist(self, network_id, leafmac):\n \"\"\" Return the number vm in the same network. \"\"\"\n session = self.session\n with session.begin(subtransactions=True):\n query = (session.query(H3cRelatedPorts).\n filter_by(network_id=network_id))\n for one in query:\n count = (session.query(H3cHostTopology).\n filter_by(host_name=one['host_id'],\n leaf_mac=leafmac).count())\n if count > 0:\n return True\n return False\n\n def update_vm(self, device_id, host_id, port_id,\n network_id, tenant_id,\n segmentation_id):\n session = self.session\n with session.begin(subtransactions=True):\n result = (session.query(H3cRelatedPorts).\n filter_by(tenant_id=tenant_id,\n device_id=device_id,\n host_id=host_id,\n port_id=port_id,\n network_id=network_id))\n for vm in result:\n vm.segmentation_id = segmentation_id\n vm.update(vm)\n\n def create_vm(self, device_id, host_id, port_id,\n network_id, tenant_id,\n segmentation_id=0):\n \"\"\" Create a vm with comware. \"\"\"\n session = self.session\n with session.begin(subtransactions=True):\n vm = H3cRelatedPorts(device_id=device_id,\n host_id=host_id,\n port_id=port_id,\n network_id=network_id,\n tenant_id=tenant_id,\n segmentation_id=segmentation_id,\n created_at=timeutils.utcnow())\n session.add(vm)\n\n def delete_vm(self, device_id, host_id, port_id,\n network_id, tenant_id,\n segmentation_id=0):\n \"\"\"Removes all relevant information about a VM from repository.\"\"\"\n session = self.session\n with session.begin(subtransactions=True):\n (session.query(H3cRelatedPorts).\n filter_by(host_id=host_id,\n port_id=port_id, tenant_id=tenant_id,\n network_id=network_id,\n segmentation_id=segmentation_id).delete())\n\n def new_spine(self, spine_list, topo):\n for spine in spine_list:\n if spine['device_mac'] == topo['device_mac']:\n \"\"\"add spine downport\"\"\"\n if topo['bagg'] is not None:\n if topo['bagg'] not in spine['down_port']:\n spine['down_port'].append(topo['bagg'])\n else:\n if topo['port'] not in spine['down_port']:\n spine['down_port'].append(topo['port'])\n return\n dev_dict = dict()\n dev_dict['device_mac'] = topo['device_mac']\n if topo['bagg'] is not None:\n dev_dict['down_port'] = [topo['bagg']]\n else:\n dev_dict['down_port'] = [topo['port']]\n spine_list.append(dev_dict)\n\n def new_leaf(self, leaf):\n leaf_dict = dict()\n leaf_dict['device_mac'] = leaf['leaf_mac']\n if leaf['leaf_bagg'] is not None:\n leaf_dict['down_port'] = [leaf['leaf_bagg']]\n else:\n leaf_dict['down_port'] = [leaf['leaf_port']]\n return leaf_dict\n\n def is_new_leaf(self, dev_dict, new_leaf):\n for one_dev in dev_dict:\n leaf = one_dev['leaf']\n if leaf['device_mac'] == new_leaf['leaf_mac']:\n return leaf\n return None\n\n def get_host_topology(self, host_id):\n dev_dict = []\n session = self.session\n with session.begin(subtransactions=True):\n query = (session.query(H3cHostTopology).\n filter_by(host_name=host_id))\n for one in query:\n onetopo = {}\n \"\"\"leaf\"\"\"\n old_leaf = self.is_new_leaf(dev_dict, one)\n if old_leaf is None:\n leaf_dev = self.new_leaf(one)\n leaf_dev['up_port'] = []\n upport = leaf_dev['up_port']\n onetopo['leaf'] = leaf_dev\n else:\n if one['leaf_bagg'] is not None:\n if one['leaf_bagg'] not in old_leaf['down_port']:\n old_leaf['down_port'].append(one['leaf_bagg'])\n else:\n if one['leaf_port'] not in old_leaf['down_port']:\n old_leaf['down_port'].append(one['leaf_port'])\n continue\n\n \"\"\"leaf up-port\"\"\"\n leaf_dev_topos = (session.query(H3cDeviceTopology).\n filter_by(device_mac=one['leaf_mac']))\n for leaf_topo in leaf_dev_topos:\n if leaf_topo['bagg'] is not None:\n if leaf_topo['bagg'] not in upport:\n upport.append(leaf_topo['bagg'])\n else:\n if leaf_topo['port'] not in upport:\n upport.append(leaf_topo['port'])\n\n \"\"\"spine down-port\"\"\"\n onetopo['spine'] = []\n spine_list = onetopo['spine']\n spine_topos = (session.query(H3cDeviceTopology).\n filter_by(peer_mac=one['leaf_mac']))\n for spine_topo in spine_topos:\n self.new_spine(spine_list, spine_topo)\n dev_dict.append(onetopo)\n return dev_dict\n\n def get_segment(self, session, host_name, segments=None):\n segment = []\n network_id = set()\n vms = (session.query(H3cRelatedPorts).filter_by(host_id=host_name))\n for vm in vms:\n if len(network_id) == 0:\n network_id.add(vm['network_id'])\n elif vm['network_id'] in network_id:\n continue\n else:\n network_id.add(vm['network_id'])\n\n try:\n network = (session.query(H3cRelatedNetworks).\n filter_by(tenant_id=vm['tenant_id'],\n network_id=vm['network_id']).one())\n except Exception as err:\n LOG.warn(\"host %s network:%s get_segment failed: %s\",\n host_name, vm['network_id'], err)\n continue\n if network:\n one = {}\n seg_type = network['segmentation_type']\n one['segment_type'] = seg_type\n if seg_type == 'vlan':\n one['net_segment'] = network['segmentation_id']\n elif seg_type == 'h3c_vxlan' or seg_type == 'vxlan':\n one['net_segment'] = network['segmentation_id']\n one['vlanId'] = vm['segmentation_id']\n \"\"\"single\"\"\"\n segment.append(one)\n \"\"\"All\"\"\"\n if segments is None:\n continue\n if len(segments) == 0:\n segments.append(one)\n else:\n find = False\n for seg in segments:\n if (seg['segment_type'] == one['segment_type'] and\n seg['net_segment'] == one['net_segment'] and\n seg.get('vlanId') == one.get('vlanId')):\n find = True\n break\n\n if not find:\n segments.append(one)\n return segment\n\n def get_spine_cfg(self, session, spin_mac):\n msg = dict()\n msg['role'] = 'spine'\n spines = (session.query(H3cDeviceTopology).\n filter_by(device_mac=spin_mac))\n for spine_topo in spines:\n if spine_topo['bagg'] is not None:\n down_port = spine_topo['bagg']\n else:\n down_port = spine_topo['port']\n if down_port in msg:\n continue\n host_topos = (session.query(H3cHostTopology).\n filter_by(leaf_mac=spine_topo['peer_mac']))\n segments = []\n msg[down_port] = segments\n for host_topo in host_topos:\n self.get_segment(session, host_topo['host_name'], segments)\n return msg\n\n def get_leaf_cfg(self, session, leaf_mac, net_type):\n msg = dict()\n msg['role'] = 'leaf'\n upport = []\n downports = []\n\n msg['upport'] = upport\n msg['up-segment'] = []\n msg['downport'] = downports\n if net_type == 'vlan':\n topos = (session.query(H3cDeviceTopology).\n filter_by(device_mac=leaf_mac))\n for topo in topos:\n if topo['bagg']:\n if topo['bagg'] not in upport:\n upport.append(topo['bagg'])\n else:\n if topo['port'] not in upport:\n upport.append(topo['port'])\n segments = msg['up-segment']\n else:\n segments = None\n\n query = (session.query(H3cHostTopology).\n filter_by(leaf_mac=leaf_mac))\n for host_topo in query:\n segment = self.get_segment(session,\n host_topo['host_name'],\n segments)\n if len(segment) == 0:\n continue\n find = False\n for one in downports:\n if host_topo['leaf_bagg'] is not None:\n tmp_port = host_topo['leaf_bagg']\n else:\n tmp_port = host_topo['leaf_port']\n if tmp_port in one['if']:\n #for one_segment in segment:\n # one['segment'].append(one_segment)\n find = True\n break\n if find is True:\n continue\n downport = {}\n if host_topo['leaf_bagg'] is not None:\n downport['if'] = host_topo['leaf_bagg']\n else:\n downport['if'] = host_topo['leaf_port']\n downport['segment'] = segment\n downports.append(downport)\n return msg\n\n def batch_device_cfg(self, context, mac, role, net_type):\n msg = None\n with context.session.begin(subtransactions=True):\n try:\n if role == 'spine' and net_type == 'vlan':\n msg = self.get_spine_cfg(context.session, mac)\n elif role == 'leaf':\n msg = self.get_leaf_cfg(context.session, mac, net_type)\n except Exception as e:\n LOG.exception('batch_device_cfg error %s', e)\n return msg\n\n def get_vlan_by_device(self, session, topo_list, mac, is_del=False):\n msg = {}\n if mac not in self.smoothTopology or is_del is True:\n role = topo_list[0]['role']\n if role != 'spine' and role != 'leaf':\n LOG.warn(\"device %s role error %s\", mac, topo_list)\n return msg\n\n msg[mac] = {}\n msg[mac]['role'] = role\n if role == 'leaf':\n upport = []\n segments = []\n msg[mac]['upport'] = upport\n msg[mac]['up-segment'] = segments\n msg[mac]['downport'] = []\n with session.begin(subtransactions=True):\n for topo in topo_list:\n if role == 'spine':\n if 'bagg' in topo:\n if is_del is True:\n count = self.get_dev_topo_count(session, topo)\n if count > 1:\n down_port = topo['port']\n else:\n down_port = topo['bagg']\n else:\n down_port = topo['bagg']\n else:\n down_port = topo['port']\n segments = []\n msg[mac][down_port] = segments\n leaf_mac = topo['peer_mac'] \n else:\n if 'bagg' in topo:\n if is_del is True:\n count = self.get_dev_topo_count(session, topo)\n if count > 1:\n up_port = topo['port']\n else:\n up_port = topo['bagg']\n else:\n up_port = topo['bagg']\n else:\n up_port = topo['port']\n if up_port not in upport:\n upport.append(up_port)\n leaf_mac=topo['device_mac']\n\n query = (session.query(H3cHostTopology).\n filter_by(leaf_mac=leaf_mac))\n for port in query:\n self.get_segment(session, port['host_name'], segments)\n return msg\n\n def get_vlan_up_info(self, session, msg):\n segments = []\n up_segments = []\n del_segments = []\n\n for mac in msg:\n if msg[mac]['role'] == 'leaf':\n up_segments = msg[mac]['up-segment']\n if len(up_segments) == 0:\n return\n with session.begin(subtransactions=True):\n query = (session.query(H3cHostTopology).\n filter_by(leaf_mac=mac))\n for one in query:\n self.get_segment(session, one['host_name'], segments)\n \"\"\"only one leaf\"\"\"\n break\n for old in up_segments:\n if old not in segments:\n del_segments.append(old)\n for mac in msg:\n if msg[mac]['role'] == 'leaf':\n msg[mac]['up-segment'] = del_segments\n elif msg[mac]['role'] == 'spine':\n for key in msg[mac]:\n if key != 'role':\n msg[mac][key] = del_segments\n\n def get_host_vlan_by_topology(self, session, topos, mac, is_del=False):\n msg = {}\n if mac not in self.smooth_host or is_del is True:\n msg[mac] = {}\n upport = []\n downports = []\n segments = []\n msg[mac]['upport'] = upport\n msg[mac]['up-segment'] = segments\n msg[mac]['downport'] = downports\n msg[mac]['role'] = 'leaf'\n with session.begin(subtransactions=True):\n for one in topos:\n downport = {}\n if 'bagg' in one:\n if is_del is True:\n count = self.get_host_topo_count(session,\n one['host_name'],\n one['leaf_mac']) \n if count > 1:\n \"\"\"have other topology, no process\"\"\"\n continue\n downport['if'] = one['bagg']\n else:\n downport['if'] = one['leaf_port']\n segment = self.get_segment(session,\n one['host_name'],\n segments)\n downport['segment'] = segment\n downports.append(downport)\n\n leaf_topos = (session.query(H3cDeviceTopology).\n filter_by(device_mac=one['leaf_mac']))\n for leaf_topo in leaf_topos:\n \"\"\"upport\"\"\"\n if leaf_topo['bagg'] is not None:\n tmp_port = leaf_topo['bagg']\n else:\n tmp_port = leaf_topo['port']\n if tmp_port not in upport:\n upport.append(tmp_port)\n \"\"\"spine\"\"\"\n peer_mac = leaf_topo['peer_mac']\n if peer_mac not in msg:\n msg[peer_mac] = {}\n msg[peer_mac]['role'] = 'spine'\n spine_topos = (session.query(H3cDeviceTopology).\n filter_by(device_mac=peer_mac,\n peer_mac=one['leaf_mac']))\n for spine_topo in spine_topos:\n if spine_topo['bagg'] is not None:\n tmp_port = spine_topo['bagg']\n else:\n tmp_port = spine_topo['port']\n msg[peer_mac][tmp_port] = segments\n if len(segments) == 0:\n msg = {}\n return msg\n\n def get_leaf_vxlan_by_topology(self, session, topos, mac, is_del=False):\n msg = {}\n if mac not in self.smooth_host or is_del is True:\n downports = []\n msg['role'] = 'leaf'\n msg['upport'] = []\n msg['up-segment'] = []\n msg['downport'] = downports\n with session.begin(subtransactions=True):\n for one in topos:\n segment = self.get_segment(session,\n one['host_name'])\n downport = {}\n if 'bagg' in one:\n if is_del is True:\n count = self.get_host_topo_count(session,\n one['host_name'],\n one['leaf_mac']) \n if count > 1:\n \"\"\"have other topology, no process\"\"\"\n continue\n downport['if'] = one['bagg']\n else:\n downport['if'] = one['leaf_port']\n downport['segment'] = segment\n if len(segment) > 0:\n downports.append(downport)\n if len(downports) == 0:\n msg = {}\n return msg\n\n def get_host_topo_count(self, session, host_name, leaf_mac):\n return (session.query(H3cHostTopology).\n filter_by(host_name=host_name,\n leaf_mac=leaf_mac).count())\n\n def get_dev_topo_count(self, session, topo):\n return (session.query(H3cDeviceTopology).\n filter_by(device_mac=topo['device_mac'],\n peer_mac=topo['peer_mac']).count())\n\n def aging_device_topology(self, device_mac):\n try:\n query = self.session.query(H3cDeviceTopology)\n query.filter_by(device_mac=device_mac).delete()\n LOG.info(\"aging device topology, device mac:%s\", device_mac)\n except Exception as e:\n LOG.warn(\"aging device topology failed %s\", e)\n\n def aging_host_topology(self, leaf_mac):\n try:\n query = self.session.query(H3cHostTopology)\n query.filter_by(leaf_mac=leaf_mac).delete()\n LOG.info(\"aging host topology, devic mac:%s\", leaf_mac)\n except Exception as e:\n LOG.warn(\"aging host topology failed %s\", e)\n","sub_path":"src/h3c/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":36281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"156304129","text":"#==============================================================================\n# 1.6ms, 50 move, 150 excite, 1400 transient\n# 1MHz clock rate (1mus timebins)\n# 5 frames\n# 150kX mag\n# standard: 300pixels\n# 10kV\n# 30mum == 379pA\n#with filter: 592 dicrhoic + 550/***32***nm in blue pmt + 650/54nm in red pmt, semrock brightline\n#IL chanel config: using preamp from Supra room\n#doing filter than amplifying\n#preamplifier set to: channel A, floating input, filter at 5kHz, LP, slope 12, gain 1 x100, DC coupling\n#==============================================================================\n\nimport os\nimport sys\nsys.path.append(\"/usr/bin\") # necessary for the tex fonts\nsys.path.append(\"../Python modules/\") # necessary for the tex fonts\nimport scipy as sp\nimport scipy.misc\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport h5py\nimport numpy as np\n#from BackgroundCorrection import *\n#from TConversionThermocoupler import *\nimport matplotlib.cm as cm\nimport scipy.ndimage as ndimage\n#from matplotlib_scalebar.scalebar import ScaleBar #### Has issue with plotting using latex font. only import when needed, then unimport\n#from mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom MakePdf import *\nfrom matplotlib.pyplot import cm #to plot following colors of rainbow\nfrom matplotlib import rc\n#from CreateDatasets import *\nimport warnings\nwarnings.simplefilter(action = \"ignore\", category = RuntimeWarning)\nwarnings.simplefilter(action = \"ignore\", category = DeprecationWarning)\nwarnings.simplefilter(action = \"ignore\", category = FutureWarning)\nwarnings.simplefilter(action = \"ignore\", category = PendingDeprecationWarning)\n#from Registration import * \n#from tifffile import *\n#from sklearn.mixture import GMM \nimport matplotlib.cm as cm\n#from FluoDecay import *\n#from PlottingFcts import *\n#from mpl_toolkits.axes_grid1 import make_axes_locatable\nimport scipy.misc\n#import matplotlib.animation as animation\nimport gc\nimport tempfile\nfrom tempfile import TemporaryFile\n\nimport skimage\nfrom skimage import exposure\nfrom my_fits import *\n\nimport pickle\nimport my_fits\nfrom uncertainties import unumpy\nfrom numpy import genfromtxt\nfrom CreateDatasets import *\n#PARAMS THAT NEED TO BE CHANGED\n###############################################################################\n###############################################################################\n###############################################################################\n#######\n\nTime_bin = 1000#in ns; \nnominal_time_on = 150.0 #time during which e-beam nominally on, in mus\ntotaltrpoints = 1400 #total number of time-resolved points\n\n\n\ndescription = ['Andrea small NaYF4:Er'] # (20kV, 30$\\mu$m, ' + str(Ps[index]) + 'nm pixels, ' + str(No_experiments[index]) + 'expts., InLens registered)'] #, \\n' #+ obs[index] '] \n \nkv = [10]\n\n#nominal Temps\nnametr = ['2017-03-26-1237_ImageSequence__150.000kX_10.000kV_30mu_5',\n '2017-03-26-1344_ImageSequence__150.000kX_10.000kV_30mu_6',\n '2017-03-26-1455_ImageSequence__150.000kX_10.000kV_30mu_7',\n '2017-03-26-1555_ImageSequence__150.000kX_10.000kV_30mu_8',\n '2017-03-26-1707_ImageSequence__150.000kX_10.000kV_30mu_9',\n '2017-03-26-1802_ImageSequence__150.000kX_10.000kV_30mu_10']\n\nPixel_size = 2.48#nm\nPs = Pixel_size #pixel size in nm, the numbers above with round nm precision\nNo_experiments = 5*np.ones([6])\n\nlet = ['RT','N30','N40','N50','N60', 'N70']\ntemp = [24.9, 30.4, 39.75, 51.05, 60.05, 70.4]\ntempstd = [0.1, 0.3, 0.25, 0.95, 0.35, 0.7]\n######################################## Plot with dose for different apertures\n##files below exist \n\nno_avg = 5\n\nil_data = np.zeros([no_avg,len(nametr)])\nil_data_std = np.zeros([no_avg,len(nametr)])\n\nblue_int_array = np.zeros([no_avg,len(nametr)])\nred_int_array = np.zeros([no_avg,len(nametr)])\n\nblue_std_array = np.zeros([no_avg,len(nametr)])\nred_std_array = np.zeros([no_avg,len(nametr)])\n\nred_decay_array = np.zeros([no_avg,len(nametr),1398])\nblue_decay_array = np.zeros([no_avg,len(nametr),1398])\n\nbgblue_int_array = np.zeros([no_avg,len(nametr)])\nbgred_int_array = np.zeros([no_avg,len(nametr)])\n\nbgblue_std_array = np.zeros([no_avg,len(nametr)])\nbgred_std_array = np.zeros([no_avg,len(nametr)])\n\nbgred_decay_array = np.zeros([no_avg,len(nametr),1398])\nbgblue_decay_array = np.zeros([no_avg,len(nametr),1398])\n\npisize =Pixel_size\n\nlistofindex =np.arange(0,len(nametr))#,11]\n\n\nconsider_whole_light = []; #[0,1,2,3,4,5] #0,1,2,3,4,5,6]\n\nax0 = plt.subplot2grid((2,3), (0,0), colspan=1, rowspan=1)\nax1 = plt.subplot2grid((2,3), (0,1), colspan=1, rowspan=1)\nax2 = plt.subplot2grid((2,3), (0,2), colspan=1, rowspan=1)\nax3= plt.subplot2grid((2,3), (1,0), colspan=1, rowspan=1)\nax4 = plt.subplot2grid((2,3), (1,1), colspan=1, rowspan=1)\nax5 = plt.subplot2grid((2,3), (1,2), colspan=1, rowspan=1)\n\naxvec = [ax0, ax1,ax2,ax3,ax4,ax5]\n\n#all cuts adjusted so that if one less pixel, loses contrast\nxinit = [ 16, 5, 12, 22, 4, 13] \nxend = [-16, -5,-12, -22, -4, -13]\nyinit = [ 12, 6, 1, 31, 2, 27] \nyend = [-12, -6, -1, -31, -2, -27]\n\nlistofindex =np.arange(0,len(nametr))\nfor index in listofindex:\n \n print(index)\n il_data[index] = temp[index]\n il_data_std[index] = tempstd[index]\n \n #ANTIGO \n#==============================================================================\n# #IL\n# il = np.load(str(let[index]) +'ILchannel.npz') \n# # ###IL\n# # if index == 7:\n# # il_data[index] = 31.2\n# # else:\n# # aparam = 0.233801\n# # bparam = 0.000144\n# # delta = aparam*np.average(il['data'], axis = (0,1,2)) + bparam\n# # il_data[index] = KThermocouplerconversion(np.average(il['data'], axis = (0,1,2)) + 1.0e-3 + delta)\n# \n# \n# # if index == 0:\n# # deltav2 = +0.240e-3 # KThermocouplerconversion(1.0e-3) #take average per frame \n# # if index == 1:\n# # deltav2 = +0.229e-3\n# # if index == 2:\n# # deltav2 = +0.100e-3\n# # if index == 3:\n# # deltav2 = +0.225e-3\n# # if index == 4:\n# # deltav2 = +0.359e-3\n# # if index == 5:\n# # deltav2 = +0.505e-3\n# # if index == 6:\n# # deltav2 = +0.660e-3\n# # \n# # \n# # hulper = np.average(il['data'], axis = (1,2)) \n# # il_data[index] = KThermocouplerconversion(hulper[0] + 1.0e-3 + deltav2) #take average per frame \n# \n# il_data[index] = temp[index] \n# #print(hulper[0])\n# #print(np.average(il['data'], axis = (0,1,2)))\n# #print(il_data)\n# \n# \n# \n# \n# ############################## FFT to cut noise in Temperature/IL data\n# #result = []\n# #print(dataALLred.shape[1])\n# total_time = totaltrpoints*Time_bin/1000.0 # in us\n# #print(total_time)\n# se1 = np.array(il['data'])\n# #se = se[0, :, :]\n# t = se1.flatten()\n# x = np.linspace(0, total_time, len(t))\n# #plt.figure(1)\n# #plt.plot( x * 1e-6/1e-3, t)\n# #plt.figure(2)\n# fft_y = np.fft.fft(t)\n# n = t.size\n# timestep = Time_bin\n# freq = np.fft.fftfreq(n, d = timestep)\n# #plt.semilogy(freq, np.abs(fft_y))\n# ind = np.abs(freq) > 1000\n# fft_y_cut = np.copy(fft_y)\n# fft_y_cut[ind] = 0.0\n# #plt.semilogy(freq, np.abs(fft_y_cut), 'r')\n# new_y = np.abs(np.fft.ifft(fft_y_cut))\n# #plt.figure(1)\n# #plt.plot( x * 1e-6/1e-3, new_y, label = str(k))\n# #plt.legend()\n# #result.append(np.mean(new_y[:-10]))\n# #result = np.array(result)\n# #deltav =0# -0.190e-3\n# #il_data[index] = KThermocouplerconversion(np.array(np.mean(new_y[:-10])) + 1.0e-3)\n# il_data_std[index] = KThermocouplerconversion(np.std(new_y[:-10]) )\n# #print(il_data)\n# #print(il_data_std)\n# # \n# \n# #il_data_std[index] = KThermocouplerconversion(np.std(il['data'], axis = (1,2)) )\n#==============================================================================\n \n #print(il_data_std) \n #del il, se1, t, fft_y, fft_y_cut, freq, ind, x, new_y\n #gc.collect()\n \n print('after il data') \n \n print('before loading')\n \n #\n \n se = np.load(str(let[index]) +'SEchannel.npz',mmap_mode='r') \n \n \n \n \n \n \n backgdinit = 50\n initbin = (150+50+3)-1\n\n print('after skimage')\n \n #################\n \n #to plot the pics, uncomment 5 next lines\n if True:\n #axvec[index].imshow(se['data'][xinit[index]:xend[index],yinit[index]:yend[index]],cmap=cm.Greys) #or 'OrRd'\n \n boe_hlp = se['data'][xinit[index]:xend[index],yinit[index]:yend[index]] \n \n fft_hlp = np.fft.fft2(boe_hlp)\n\n #axvec[index].pcolor(np.log(np.abs(fft_hlp)))#, vmin = 3, vmax = 4)\n \n #cla test\n fft_hlp[225:240, 0:70] = 0\n fft_hlp[190:260, 25:30] = 0\n fft_hlp[10:90, 250:270] = 0\n fft_hlp[45:60, 230:280] = 0\n\n# \n# fft_hlp[220:246, 20:35] = 0\n# fft_hlp[220:246, 250:270] = 0\n# \n# fft_hlp[45:60, 250:270] = 0\n# fft_hlp[45:60, 20:35] = 0\n\n new_pic = np.fft.ifft2(fft_hlp)\n \n\n #axvec[index+1].pcolor(boe_hlp)#, vmin = 3, vmax = 4)\n #axvec[index].pcolor(new_pic)#, vmin = 3, vmax = 4)\n \n #plt.show()\n\n #axvec[index].pcolor(new_pic,cmap=cm.Greys) \n \n \n print('after imshow') \n #del segmm, red, blue,se\n gc.collect()\n \n red0 = np.load(str(let[index]) +'Redbright.npz',mmap_mode='r') \n blue0 = np.load(str(let[index]) +'Bluebright.npz',mmap_mode='r') \n \n red = red0['data'] \n blue = blue0['data']\n del red0, blue0\n gc.collect()\n \n segmm, means, covars, weights = gmmone_tr_in_masked_channel_modif_memory_issue(np.abs(new_pic)) \n ##############################################################\n red = red[:,:,xinit[index]:xend[index],yinit[index]:yend[index]]\n blue = blue[:,:,xinit[index]:xend[index],yinit[index]:yend[index]]\n \n del means, covars, weights\n gc.collect()\n \n# axvec[index].imshow(segmm,cmap=cm.Greys)\n# print(segmm)\n# print(np.nanmax(segmm))\n# print(np.nanmin(segmm))\n# del segmm#, red\n# gc.collect()\n \n#multipage_longer('Checkcuts.pdf',dpi=80)\n#plt.show()\n#multipage_longer('Checkfft.pdf',dpi=80)\n#multipage_longer('Checksegmm.pdf',dpi=80)\n#klklkk \n#if True:\n \n #INSIDE\n if index in consider_whole_light:\n hlp = 1.0 #outside, consider all light\n else:\n hlp = np.copy(segmm)\n hlp[~np.isnan(hlp)] = 1.0 #inside\n \n # OUTSIDE\n if index in consider_whole_light:\n hlpd = 0.0 #consider all light\n else:\n hlpd = np.copy(segmm)\n hlpd[~np.isnan(hlpd)] = 0.0 \n hlpd[np.isnan(hlpd)] = 1.0\n \n datared = np.average(red, axis = (0))\n datablue = np.average(blue, axis = (0))\n \n if True is False:\n pass\n else:\n initbin = (150+50+3)-1 #init bin for decay\n backgdinit = 50\n ### 700ns /40ns = 7. ....\n datared_init = datared[0:backgdinit,:,:]\n datared = datared[initbin:,:,:]\n datablue_init = datablue[0:backgdinit,:,:]\n datablue = datablue[initbin:,:,:]\n\n \n del datared, datablue, datablue_init, datared_init\n gc.collect()\n \n dataALLred = red[:,:,:,:]\n dataALLblue = blue[:,:,:,:]\n \n #nominal_time_on = 150.0\n \n print('bef nanmean')\n \n red_int_array[index] = np.nanmean(dataALLred[:,backgdinit:initbin,:,:]*hlp,axis=(0,1,2,3)) \n gc.collect()\n print('1')\n blue_int_array[index] = np.nanmean(dataALLblue[:,backgdinit:initbin,:,:]*hlp,axis=(0,1,2,3)) \n gc.collect()\n print('2')\n \n red_decay_array[index,:] = np.nanmean(dataALLred[:,initbin:,:,:]*hlp,axis=(0,2,3))\n gc.collect()\n print('3')\n blue_decay_array[index,:] = np.nanmean(dataALLblue[:,initbin:,:,:]*hlp,axis=(0,2,3))\n gc.collect()\n print('4')\n \n red_std_array[index] = np.nanstd(dataALLred[:,backgdinit:initbin,:,:]*hlp,axis=(0,1,2,3)) \n gc.collect()\n print('5')\n blue_std_array[index] = np.nanstd(dataALLblue[:,backgdinit:initbin,:,:]*hlp,axis=(0,1,2,3)) \n gc.collect()\n print('6')\n \n bgred_int_array[index] = np.nanmean(dataALLred[:,backgdinit:initbin,:,:]*hlpd,axis=(0,1,2,3)) \n gc.collect()\n print('7')\n bgblue_int_array[index] = np.nanmean(dataALLblue[:,backgdinit:initbin,:,:]*hlpd,axis=(0,1,2,3)) \n gc.collect()\n print('8')\n \n bgred_decay_array[index,:] = np.nanmean(dataALLred[:,initbin:,:,:]*hlpd,axis=(0,2,3))\n gc.collect()\n print('9')\n bgblue_decay_array[index,:] = np.nanmean(dataALLblue[:,initbin:,:,:]*hlpd,axis=(0,2,3))\n gc.collect()\n print('10')\n \n bgred_std_array[index] = np.nanstd(dataALLred[:,backgdinit:initbin,:,:]*hlpd,axis=(0,1,2,3))\n gc.collect()\n print('11')\n bgblue_std_array[index] = np.nanstd(dataALLblue[:,backgdinit:initbin,:,:]*hlpd,axis=(0,1,2,3)) \n gc.collect()\n \n print('after nanmean')\n \n del dataALLred, dataALLblue, red, blue, se, segmm\n gc.collect()\n \n##### ONCE ALL FITS WORK, \n###### NEED TO RUN THESE LINES BELOW SO THAT ALL NPZ FILES ARE CREATED \n \nmycode = 'Il_data = tempfile.NamedTemporaryFile(delete=False)'\nexec(mycode)\nnp.savez('Il_data', data = il_data)\n\nmycode = 'Il_data_std = tempfile.NamedTemporaryFile(delete=False)'\nexec(mycode)\nnp.savez('Il_data_std', data = il_data_std)\n\n#foreground\n \nmycode = 'Red_std_array = tempfile.NamedTemporaryFile(delete=False)'\nexec(mycode)\nnp.savez('Red_std_array', data = red_std_array)\n \nmycode = 'Blue_std_array = tempfile.NamedTemporaryFile(delete=False)'\nexec(mycode)\nnp.savez('Blue_std_array', data = blue_std_array)\n \nmycode = 'Red_int_array = tempfile.NamedTemporaryFile(delete=False)'\nexec(mycode)\nnp.savez('Red_int_array', data = red_int_array)\n \nmycode = 'Blue_int_array = tempfile.NamedTemporaryFile(delete=False)'\nexec(mycode)\nnp.savez('Blue_int_array', data = blue_int_array)\n \nmycode = 'Red_decay_array = tempfile.NamedTemporaryFile(delete=False)'\nexec(mycode)\nnp.savez('Red_decay_array', data = red_decay_array)\n\nmycode = 'Blue_decay_array = tempfile.NamedTemporaryFile(delete=False)'\nexec(mycode)\nnp.savez('Blue_decay_array', data = blue_decay_array)\n\n###background\n\nmycode = 'bgRed_std_array = tempfile.NamedTemporaryFile(delete=False)'\nexec(mycode)\nnp.savez('bgRed_std_array', data = bgred_std_array)\n \nmycode = 'bgBlue_std_array = tempfile.NamedTemporaryFile(delete=False)'\nexec(mycode)\nnp.savez('bgBlue_std_array', data = bgblue_std_array)\n \nmycode = 'bgRed_int_array = tempfile.NamedTemporaryFile(delete=False)'\nexec(mycode)\nnp.savez('bgRed_int_array', data = bgred_int_array)\n \nmycode = 'bgBlue_int_array = tempfile.NamedTemporaryFile(delete=False)'\nexec(mycode)\nnp.savez('bgBlue_int_array', data = bgblue_int_array)\n\nmycode = 'bgRed_decay_array = tempfile.NamedTemporaryFile(delete=False)'\nexec(mycode)\nnp.savez('bgRed_decay_array', data =bgred_decay_array)\n\nmycode = 'bgBlue_decay_array = tempfile.NamedTemporaryFile(delete=False)'\nexec(mycode)\nnp.savez('bgBlue_decay_array', data = bgblue_decay_array)\n \nkjjhjh\n","sub_path":"2017-03-26_Andrea_NPs_NewTempData_ThemorcoupleOnSample+FilterNEWNEW/AnalysisOfLTLong_stremlinedcode.py","file_name":"AnalysisOfLTLong_stremlinedcode.py","file_ext":"py","file_size_in_byte":15182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"631340744","text":"# gaussian smoothing to low illumination noisy image using cv2.GaussianBlur function\n\nimport cv2\n\n\n# Reading Grayscale low-illumination noisy image from the disk and displaying it.\nimg = cv2.imread('./resources/low_illu.jpg',0);\ncv2.imshow('Original Image',img);\n\n# Applying GauusianFiltering on the grayscale image and displaying the image\nimg1=cv2.GaussianBlur(img,(5,5),3);\ncv2.imshow('Blurred',img1)\n\n\n# Wait till any key is pressed\ncv2.waitKey(0)\n# Destroy all the windows created by the imshow() function of the OpenCV\ncv2.destroyAllWindows()","sub_path":"ClassAssignments/Assignment1/Ques6a.py","file_name":"Ques6a.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"495185077","text":"import os\r\nimport time\r\nimport datetime\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.backends.cudnn as cudnn\r\nfrom torch.utils.data import DataLoader\r\n\r\nfrom tensorboardX import SummaryWriter\r\nimport torchvision.utils as vutils\r\n\r\nimport network\r\nimport dataset\r\nimport utils\r\nimport wandb\r\n\r\nclass Logger:\r\n def __init__(self, opt):\r\n wandb.init(project=\"inpainting\")\r\n wandb.run.name = wandb.run.id\r\n wandb.run.save()\r\n\r\n self.writer = SummaryWriter()\r\n self.current_iteration = 0\r\n\r\n def begin(self, n_iter):\r\n self.current_iteration = n_iter\r\n \r\n def add_image(self, image, name='image'):\r\n \"\"\"Logs image grid\r\n \r\n Args:\r\n image: [B, 3, H, W] or [B, 1, H, W]\r\n \"\"\"\r\n x = vutils.make_grid(image, normalize=True, scale_each=True)\r\n self.writer.add_image(name, x, self.current_iteration)\r\n wandb.log({name: [wandb.Image(x, caption=name)]})\r\n \r\n def add_scalars(self, dictionary):\r\n for k, v in dictionary.items():\r\n self.writer.add_scalar(k, v, self.current_iteration)\r\n wandb.log(dictionary)\r\n\r\n\r\ndef create_networks(opt, checkpoint=None):\r\n generator = utils.create_generator(opt)\r\n discriminator = utils.create_discriminator(opt)\r\n perceptualnet = utils.create_perceptualnet()\r\n \r\n if checkpoint:\r\n # Restore the network state\r\n generator.load_state_dict(checkpoint['G'])\r\n discriminator.load_state_dict(checkpoint['D'])\r\n \r\n # To device\r\n if opt.multi_gpu == True:\r\n generator = nn.DataParallel(generator)\r\n discriminator = nn.DataParallel(discriminator)\r\n perceptualnet = nn.DataParallel(perceptualnet)\r\n generator = generator.cuda()\r\n discriminator = discriminator.cuda()\r\n perceptualnet = perceptualnet.cuda()\r\n else:\r\n generator = generator.cuda()\r\n discriminator = discriminator.cuda()\r\n perceptualnet = perceptualnet.cuda()\r\n \r\n return generator, discriminator, perceptualnet\r\n\r\n\r\ndef create_optimizers(generator, discriminator, opt, checkpoint=None):\r\n optimizer_g = torch.optim.Adam(generator.parameters(), lr = opt.lr_g, betas = (opt.b1, opt.b2), weight_decay = opt.weight_decay)\r\n optimizer_d = torch.optim.Adam(discriminator.parameters(), lr = opt.lr_d, betas = (opt.b1, opt.b2), weight_decay = opt.weight_decay)\r\n \r\n def load_optimizer(optimizer, name):\r\n optimizer.load_state_dict(checkpoint[name])\r\n\r\n if checkpoint:\r\n load_optimizer(optimizer_g, 'optimizer_g')\r\n load_optimizer(optimizer_d, 'optimizer_d')\r\n \r\n return optimizer_g, optimizer_d\r\n\r\n\r\ndef auto_sync_checkpoints_to_wandb():\r\n # Save any files starting with \"checkpoint\" as they're written to\r\n wandb.save(os.path.join(wandb.run.dir, \"checkpoint*\"))\r\n\r\n\r\n# Learning rate decrease\r\ndef adjust_learning_rate(lr_in, optimizer, epoch, opt):\r\n \"\"\"Set the learning rate to the initial LR decayed by \"lr_decrease_factor\" every \"lr_decrease_epoch\" epochs\"\"\"\r\n lr = lr_in * (opt.lr_decrease_factor ** (epoch // opt.lr_decrease_epoch))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr\r\n\r\n\r\ndef restore(opt):\r\n if not opt.restore:\r\n return None\r\n \r\n print('-' * 30)\r\n print(f'Restoring from {opt.wandb_runpath}/{opt.restore}...')\r\n try:\r\n if '/home' in opt.wandb_runpath:\r\n # Restore from local directory\r\n from shutil import copyfile\r\n copyfile(opt.wandb_runpath, os.path.join(wandb.run.dir, f'checkpoint_{opt.restore}.pth'))\r\n else:\r\n # Copy from a previous run to the current run directory\r\n wandb.restore(f'checkpoint_{opt.restore}.pth', run_path=opt.wandb_runpath)\r\n \r\n # Load the checkpoint\r\n checkpoint = torch.load(os.path.join(wandb.run.dir, f'checkpoint_{opt.restore}.pth'))\r\n return checkpoint\r\n except Exception as e:\r\n print('Restoring failed :(', e)\r\n return None\r\n \r\n\r\ndef save_state( epoch,batch,n_iter,\r\n G, optimizer_g,\r\n D, optimizer_d,\r\n loss, opt):\r\n package = lambda model: model.module.state_dict() if opt.multi_gpu else model.state_dict()\r\n \r\n state = {\r\n 'epoch': epoch,\r\n 'G': package(G),\r\n 'optimizer_g': package(optimizer_g),\r\n 'D': package(D),\r\n 'optimizer_d': package(optimizer_d),\r\n 'n_iter': n_iter,\r\n 'loss': loss,\r\n }\r\n \r\n print('-' * 30)\r\n \r\n path = os.path.join(wandb.run.dir, f'checkpoint_{n_iter}.pth')\r\n print(f' Saving at {path}')\r\n torch.save(state, path)\r\n \r\n # also save to latest\r\n path = os.path.join(wandb.run.dir, f'checkpoint_latest.pth')\r\n print(f' You can restore this checkpoint with \"--restore latest\"')\r\n torch.save(state, path)\r\n \r\n print('-' * 30)\r\n \r\n\r\ndef WGAN_trainer(opt):\r\n # ----------------------------------------\r\n # Initialize training parameters\r\n # ----------------------------------------\r\n logger = Logger(opt)\r\n checkpoint = restore(opt)\r\n \r\n # cudnn benchmark accelerates the network\r\n if opt.cudnn_benchmark == True:\r\n cudnn.benchmark = True\r\n else:\r\n cudnn.benchmark = False\r\n \r\n # --------------------------------------\r\n # Initialize models \r\n # --------------------------------------\r\n generator, discriminator, perceptualnet = create_networks(opt, checkpoint)\r\n \r\n # Loss functions\r\n L1Loss = nn.L1Loss()\r\n #FeatureMatchingLoss = FML1Loss(opt.fm_param)\r\n \r\n # Optimizers\r\n optimizer_g, optimizer_d = create_optimizers(generator, discriminator, opt, checkpoint)\r\n\r\n # Log metrics with wandb\r\n wandb.watch(generator)\r\n wandb.config.update(opt)\r\n auto_sync_checkpoints_to_wandb()\r\n\r\n\r\n # ----------------------------------------\r\n # Initialize training dataset\r\n # ----------------------------------------\r\n\r\n # Define the dataset\r\n trainset = dataset.InpaintDataset(opt)\r\n print('The overall number of images equals to %d' % len(trainset))\r\n\r\n # Define the dataloader\r\n dataloader = DataLoader(trainset, batch_size = opt.batch_size, shuffle = True, num_workers = opt.num_workers, pin_memory = True)\r\n \r\n # ----------------------------------------\r\n # Training and Testing\r\n # ----------------------------------------\r\n\r\n # Initialize start time\r\n prev_time = time.time()\r\n\r\n initial_epoch = checkpoint['epoch'] if opt.restore else 0\r\n n_iter = checkpoint['n_iter'] if opt.restore else 0\r\n \r\n # training loop\r\n for epoch in range(initial_epoch, opt.epochs):\r\n for batch_idx, (img, mask) in enumerate(dataloader):\r\n n_iter += 1\r\n logger.begin(n_iter)\r\n \r\n # Load mask (shape: [B, 1, H, W]), masked_img (shape: [B, 3, H, W]), img (shape: [B, 3, H, W]) and put it to cuda\r\n img = img.cuda()\r\n mask = mask.cuda()\r\n \r\n ### Train discriminator\r\n optimizer_d.zero_grad()\r\n \r\n # Generator output\r\n first_out, second_out = generator(img, mask)\r\n \r\n # forward propagation\r\n first_out_wholeimg = img * (1 - mask) + first_out * mask # in range [-1, 1]\r\n second_out_wholeimg = img * (1 - mask) + second_out * mask # in range [-1, 1]\r\n \r\n if n_iter % opt.log_every == 1:\r\n logger.add_image(img, 'image/training')\r\n logger.add_image(mask, 'mask/training')\r\n logger.add_image(first_out_wholeimg, 'image/first iteration')\r\n logger.add_image(second_out_wholeimg, 'image/second iteration')\r\n \r\n # Fake samples\r\n fake_scalar = discriminator(second_out_wholeimg.detach(), mask)\r\n # True samples\r\n true_scalar = discriminator(img, mask)\r\n \r\n # Overall Loss and optimize\r\n loss_D = - torch.mean(true_scalar) + torch.mean(fake_scalar)\r\n loss_D.backward()\r\n optimizer_d.step()\r\n\r\n ### Train Generator\r\n optimizer_g.zero_grad()\r\n\r\n # Mask L1 Loss\r\n first_MaskL1Loss = L1Loss(first_out_wholeimg, img)\r\n second_MaskL1Loss = L1Loss(second_out_wholeimg, img)\r\n \r\n # GAN Loss\r\n fake_scalar = discriminator(second_out_wholeimg, mask)\r\n GAN_Loss = - torch.mean(fake_scalar)\r\n\r\n # Get the deep semantic feature maps, and compute Perceptual Loss\r\n img = (img + 1) / 2 # in range [0, 1]\r\n img = utils.normalize_ImageNet_stats(img) # in range of ImageNet\r\n img_featuremaps = perceptualnet(img) # feature maps\r\n second_out_wholeimg = (second_out_wholeimg + 1) / 2 # in range [0, 1]\r\n second_out_wholeimg = utils.normalize_ImageNet_stats(second_out_wholeimg)\r\n second_out_wholeimg_featuremaps = perceptualnet(second_out_wholeimg)\r\n second_PerceptualLoss = L1Loss(second_out_wholeimg_featuremaps, img_featuremaps)\r\n\r\n # Compute losses\r\n loss = first_MaskL1Loss + second_MaskL1Loss + opt.perceptual_param * second_PerceptualLoss + opt.gan_param * GAN_Loss\r\n loss.backward()\r\n optimizer_g.step()\r\n\r\n # Determine approximate time left\r\n batches_done = n_iter\r\n batches_left = opt.epochs * len(dataloader) - batches_done\r\n time_left = datetime.timedelta(seconds = batches_left * (time.time() - prev_time))\r\n prev_time = time.time()\r\n \r\n logger.add_scalars({\r\n 'Epoch': epoch + 1,\r\n 'Iteration': n_iter,\r\n 'loss/first Mask L1 Loss': first_MaskL1Loss.item(),\r\n 'loss/second Mask L1 Loss': second_MaskL1Loss.item(),\r\n 'gan/D Loss': loss_D.item(),\r\n 'gan/G Loss': GAN_Loss.item(),\r\n 'Perceptual Loss': second_PerceptualLoss.item()\r\n })\r\n \r\n # Print log\r\n if n_iter % opt.log_every == 1:\r\n print(\"\\r[Epoch %d/%d] [Batch %d/%d] iteration %d\" %\r\n ((epoch + 1), opt.epochs, batch_idx, len(dataloader), n_iter))\r\n print(\"\\r[D Loss: %.5f] [G Loss: %.5f] [Perceptual Loss: %.5f] time_left: %s\" %\r\n (loss_D.item(), GAN_Loss.item(), second_PerceptualLoss.item(), time_left))\r\n \r\n if n_iter % opt.checkpoint_every == 1:\r\n save_state(\r\n epoch=epoch,\r\n batch=batch_idx,\r\n n_iter=n_iter,\r\n G=generator,\r\n optimizer_g=optimizer_g,\r\n D=discriminator,\r\n optimizer_d=optimizer_d,\r\n loss=loss,\r\n opt=opt\r\n )\r\n\r\n # Learning rate decrease\r\n adjust_learning_rate(opt.lr_g, optimizer_g, (epoch + 1), opt)\r\n adjust_learning_rate(opt.lr_d, optimizer_d, (epoch + 1), opt)\r\n\r\n\r\ndef LSGAN_trainer(opt):\r\n # ----------------------------------------\r\n # Initialize training parameters\r\n # ----------------------------------------\r\n\r\n # cudnn benchmark accelerates the network\r\n if opt.cudnn_benchmark == True:\r\n cudnn.benchmark = True\r\n else:\r\n cudnn.benchmark = False\r\n\r\n # Build networks\r\n generator = utils.create_generator(opt)\r\n discriminator = utils.create_discriminator(opt)\r\n perceptualnet = utils.create_perceptualnet()\r\n\r\n # To device\r\n if opt.multi_gpu == True:\r\n generator = nn.DataParallel(generator)\r\n discriminator = nn.DataParallel(discriminator)\r\n perceptualnet = nn.DataParallel(perceptualnet)\r\n generator = generator.cuda()\r\n discriminator = discriminator.cuda()\r\n perceptualnet = perceptualnet.cuda()\r\n else:\r\n generator = generator.cuda()\r\n discriminator = discriminator.cuda()\r\n perceptualnet = perceptualnet.cuda()\r\n\r\n # Loss functions\r\n L1Loss = nn.L1Loss()\r\n MSELoss = nn.MSELoss()\r\n #FeatureMatchingLoss = FML1Loss(opt.fm_param)\r\n\r\n # Optimizers\r\n optimizer_g = torch.optim.Adam(generator.parameters(), lr = opt.lr_g, betas = (opt.b1, opt.b2), weight_decay = opt.weight_decay)\r\n optimizer_d = torch.optim.Adam(generator.parameters(), lr = opt.lr_d, betas = (opt.b1, opt.b2), weight_decay = opt.weight_decay)\r\n\r\n # Learning rate decrease\r\n def adjust_learning_rate(lr_in, optimizer, epoch, opt):\r\n \"\"\"Set the learning rate to the initial LR decayed by \"lr_decrease_factor\" every \"lr_decrease_epoch\" epochs\"\"\"\r\n lr = lr_in * (opt.lr_decrease_factor ** (epoch // opt.lr_decrease_epoch))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr\r\n \r\n # Save the model if pre_train == True\r\n def save_model(net, epoch, opt):\r\n \"\"\"Save the model at \"checkpoint_interval\" and its multiple\"\"\"\r\n if opt.multi_gpu == True:\r\n if epoch % opt.checkpoint_interval == 0:\r\n torch.save(net.module, 'deepfillNet_epoch%d_batchsize%d.pth' % (epoch, opt.batch_size))\r\n print('The trained model is successfully saved at epoch %d' % (epoch))\r\n else:\r\n if epoch % opt.checkpoint_interval == 0:\r\n torch.save(net, 'deepfillNet_epoch%d_batchsize%d.pth' % (epoch, opt.batch_size))\r\n print('The trained model is successfully saved at epoch %d' % (epoch))\r\n \r\n # ----------------------------------------\r\n # Initialize training dataset\r\n # ----------------------------------------\r\n\r\n # Define the dataset\r\n trainset = dataset.InpaintDataset(opt)\r\n print('The overall number of images equals to %d' % len(trainset))\r\n\r\n # Define the dataloader\r\n dataloader = DataLoader(trainset, batch_size = opt.batch_size, shuffle = True, num_workers = opt.num_workers, pin_memory = True)\r\n \r\n # ----------------------------------------\r\n # Training and Testing\r\n # ----------------------------------------\r\n\r\n # Initialize start time\r\n prev_time = time.time()\r\n \r\n # Tensor type\r\n Tensor = torch.cuda.FloatTensor\r\n\r\n # Training loop\r\n for epoch in range(opt.epochs):\r\n for batch_idx, (img, mask) in enumerate(dataloader):\r\n\r\n # Load mask (shape: [B, 1, H, W]), masked_img (shape: [B, 3, H, W]), img (shape: [B, 3, H, W]) and put it to cuda\r\n img = img.cuda()\r\n mask = mask.cuda()\r\n\r\n # LSGAN vectors\r\n valid = Tensor(np.ones((img.shape[0], 1, 8, 8)))\r\n fake = Tensor(np.zeros((img.shape[0], 1, 8, 8)))\r\n\r\n ### Train Discriminator\r\n optimizer_d.zero_grad()\r\n\r\n # Generator output\r\n first_out, second_out = generator(img, mask)\r\n\r\n # forward propagation\r\n first_out_wholeimg = img * (1 - mask) + first_out * mask # in range [-1, 1]\r\n second_out_wholeimg = img * (1 - mask) + second_out * mask # in range [-1, 1]\r\n\r\n # Fake samples\r\n fake_scalar = discriminator(second_out_wholeimg.detach(), mask)\r\n # True samples\r\n true_scalar = discriminator(img, mask)\r\n \r\n # Overall Loss and optimize\r\n loss_fake = MSELoss(fake_scalar, fake)\r\n loss_true = MSELoss(true_scalar, valid)\r\n # Overall Loss and optimize\r\n loss_D = 0.5 * (loss_fake + loss_true)\r\n loss_D.backward()\r\n optimizer_d.step()\r\n\r\n ### Train Generator\r\n optimizer_g.zero_grad()\r\n\r\n # Mask L1 Loss\r\n first_MaskL1Loss = L1Loss(first_out_wholeimg, img)\r\n second_MaskL1Loss = L1Loss(second_out_wholeimg, img)\r\n \r\n # GAN Loss\r\n fake_scalar = discriminator(second_out_wholeimg, mask)\r\n GAN_Loss = MSELoss(fake_scalar, valid)\r\n\r\n # Get the deep semantic feature maps, and compute Perceptual Loss\r\n img = (img + 1) / 2 # in range [0, 1]\r\n img = utils.normalize_ImageNet_stats(img) # in range of ImageNet\r\n img_featuremaps = perceptualnet(img) # feature maps\r\n second_out_wholeimg = (second_out_wholeimg + 1) / 2 # in range [0, 1]\r\n second_out_wholeimg = utils.normalize_ImageNet_stats(second_out_wholeimg)\r\n second_out_wholeimg_featuremaps = perceptualnet(second_out_wholeimg)\r\n second_PerceptualLoss = L1Loss(second_out_wholeimg_featuremaps, img_featuremaps)\r\n\r\n # Compute losses\r\n loss = first_MaskL1Loss + second_MaskL1Loss + opt.perceptual_param * second_PerceptualLoss + opt.gan_param * GAN_Loss\r\n loss.backward()\r\n optimizer_g.step()\r\n\r\n # Determine approximate time left\r\n batches_done = epoch * len(dataloader) + batch_idx\r\n batches_left = opt.epochs * len(dataloader) - batches_done\r\n time_left = datetime.timedelta(seconds = batches_left * (time.time() - prev_time))\r\n prev_time = time.time()\r\n\r\n # Print log\r\n print(\"\\r[Epoch %d/%d] [Batch %d/%d] [first Mask L1 Loss: %.5f] [second Mask L1 Loss: %.5f]\" %\r\n ((epoch + 1), opt.epochs, batch_idx, len(dataloader), first_MaskL1Loss.item(), second_MaskL1Loss.item()))\r\n print(\"\\r[D Loss: %.5f] [G Loss: %.5f] [Perceptual Loss: %.5f] time_left: %s\" %\r\n (loss_D.item(), GAN_Loss.item(), second_PerceptualLoss.item(), time_left))\r\n\r\n # Learning rate decrease\r\n adjust_learning_rate(opt.lr_g, optimizer_g, (epoch + 1), opt)\r\n adjust_learning_rate(opt.lr_d, optimizer_d, (epoch + 1), opt)\r\n\r\n # Save the model\r\n save_model(generator, (epoch + 1), opt)\r\n","sub_path":"deepfillv2/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":18298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"391697269","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('usuarioperfil', '0007_auto_20141204_2058'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='userprofile',\n options={'permissions': (('list_app', 'Can view list app'), ('view_app', 'Can view app'), ('add_app', 'Can add app'), ('change_app', 'Can change app'), ('delete_app', 'Can delete app'))},\n ),\n ]\n","sub_path":"userprofile/usuarioperfil/migrations/0008_auto_20141208_1014.py","file_name":"0008_auto_20141208_1014.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"42314222","text":"import requests\nimport time\nimport random\nfrom pprint import pprint\n\ntoken = \"\"\nurl = \"https://api.telegram.org/bot\" + token + \"/\"\n\nlast_id = 0\ncurrent_update = 0\ncurrent_message = None\n\nQUOTES = [\n \"Yep, Youssef is a total dick\",\n \"Youssef, dick king\",\n \"I am Youssef, lord of cocks\",\n \"Something something Youssef is a dick\",\n \"If a dick could walk it would be called Youssef\",\n \"Youssef is dicktastic ( ͡° ͜ʖ ͡°)\"\n]\n\ndef get_quote():\n return random.choice(QUOTES)\n\ndef get_updates():\n return requests.get(url + 'getUpdates?offset={}'.format(current_update)).json()['result']\n\n\ndef get_new_message():\n global current_update\n updates = get_updates()\n if len(updates) > 0:\n update = updates[-1]\n update_id = update['update_id']\n message_id = update['message']['message_id']\n\n if update_id > current_update:\n current_update = update_id\n return update\n else:\n current_update = update_id\n return None\n else:\n return None\n\n\ndef send_text_reply(text, chat_id, message_id):\n send_url = url + \\\n 'sendMessage?chat_id={chat_id}&text={text}&reply_to_message_id={message_id}'\n requests.get(send_url.format(\n text=text,\n chat_id=chat_id,\n message_id=message_id\n ))\n\n\ndef handle_message(message):\n message = message['message']\n message_id = message['message_id']\n text = message['text']\n chat_id = message['chat']['id']\n\n if ('dick' in text.lower()):\n send_text_reply(get_quote(), chat_id, message_id)\n\n\ndef run():\n while True:\n try:\n message = get_new_message()\n if message:\n handle_message(message)\n time.sleep(1.5)\n except:\n time.sleep(1.5)\n\nif __name__ == '__main__':\n run()\n","sub_path":"telegram.py","file_name":"telegram.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"163081886","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.3-fat/egg/minideblib/SafeWriteFile.py\n# Compiled at: 2007-11-06 15:08:00\nfrom types import StringType\nfrom shutil import copy2\nfrom string import find\nfrom os import rename\n\nclass ObjectNotAllowed(Exception):\n pass\n\n\nclass InvalidMode(Exception):\n pass\n\n\nclass SafeWriteFile:\n\n def __init__(self, newname, realname, mode='w', bufsize=-1):\n if type(newname) != StringType:\n raise ObjectNotAllowed(newname)\n if type(realname) != StringType:\n raise ObjectNotAllowed(realname)\n if find(mode, 'r') >= 0:\n raise InvalidMode(mode)\n if find(mode, 'a') >= 0 or find(mode, '+') >= 0:\n copy2(realname, newname)\n self.fobj = open(newname, mode, bufsize)\n self.newname = newname\n self.realname = realname\n self.__abort = 0\n\n def close(self):\n self.fobj.close()\n if not (self.closed and self.__abort):\n rename(self.newname, self.realname)\n\n def abort(self):\n self.__abort = 1\n\n def __del__(self):\n self.abort()\n del self.fobj\n\n def __getattr__(self, attr):\n try:\n return self.__dict__[attr]\n except:\n return eval('self.fobj.' + attr)\n\n\nif __name__ == '__main__':\n import time\n f = SafeWriteFile('sf.new', 'sf.data')\n f.write('test\\n')\n f.flush()\n time.sleep(1)\n f.close()","sub_path":"pycfiles/minideblib-0.6.21.29-py2.5/SafeWriteFile.py","file_name":"SafeWriteFile.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"299420703","text":"# Libraries\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.io as sio # Used to load data stored in .mat file\nfrom sklearn.metrics import confusion_matrix\nimport time\n\n\n\"\"\" Section 0: Preparing the dataset\n\nX: Contains the facedata, each column represents one face image. \n Each element in a column is a pixel value for the coordinate of one image. \nl: Contains label (face identity for each image) \n \n \"\"\"\n\nmat_content = sio.loadmat('face.mat')\nface_data = mat_content['X'] # Array with dims 2576x520\nface_labels = mat_content['l'] # Array with dims 1x520\nnum_images = face_data.shape[1] # Number of images\nD = face_data.shape[0] # Num of pixels per image = 2576\nIMAGES_PER_PERSON = 10\nnum_people = int(num_images / IMAGES_PER_PERSON) # Number of people the full dataset captures\n\n\"\"\" Split up data to training and testing:\n\n Literature suggests: a split of 60-80 : 40-20 for testing, training respectively to avoid underfitting/overfitting\n Source: https://towardsdatascience.com/train-test-split-and-cross-validation-in-python-80b61beca4b6\n\n Note: Images are ordered in according to their label numbers \n i.e. first set of 10 images have label 1, second set of 10 images have label 2 etc...\n\"\"\"\n\n# Initialise training/testing percentage and training/testing numbers\nTRAIN_PC = 0.8 # Percentage of total data used for training\nTEST_PC = 0.2\nTRAIN_RAT = 8\nTEST_RAT = 2\nno_train = int(TRAIN_PC*num_images) # Number of training data: 416\nno_test = int(TEST_PC*num_images) # Number of testing data: 104\n\n# Initialize arrays for training and testing with appropriate dimensions\ntrain_data = np.zeros((D, no_train))\ntest_data = np.zeros((D, no_test))\ntrain_labels = np.zeros((1, no_train))\ntest_labels = np.zeros((1, no_test))\n\n# Filling in the arrays from face data\nfor i in range(num_people):\n person_faces = face_data[:, IMAGES_PER_PERSON*i:(IMAGES_PER_PERSON*i)+IMAGES_PER_PERSON]\n train_data[:, TRAIN_RAT*i:(TRAIN_RAT*i)+TRAIN_RAT] = person_faces[:, 0:TRAIN_RAT] # Separate 8 of the same person's face in the training set\n train_labels[0, TRAIN_RAT*i:(TRAIN_RAT*i)+TRAIN_RAT] = i+1\n test_data[:, TEST_RAT*i:(TEST_RAT*i)+TEST_RAT] = person_faces[:, TRAIN_RAT:] # Separate 2 of the same person's face in the testing set\n test_labels[0, TEST_RAT*i:(TEST_RAT*i)+TEST_RAT] = i+1\n\n\"\"\" Split up data to training and testing:\n\n Literature suggests: a split of 60-80 : 40-20 for testing, training respectively to avoid underfitting/overfitting\n Source: https://towardsdatascience.com/train-test-split-and-cross-validation-in-python-80b61beca4b6\n\n Note: Images are ordered in according to their label numbers \n i.e. first set of 10 images have label 1, second set of 10 images have label 2 etc...\n\"\"\"\n\n# Initialise training/testing percentage and training/testing numbers\nTRAIN_PC = 0.8 # Percentage of total data used for training\nTEST_PC = 0.2\nTRAIN_RAT = 8\nTEST_RAT = 2\nno_train = int(TRAIN_PC*num_images) # Number of training data: 416\nno_test = int(TEST_PC*num_images) # Number of testing data: 104\n\n# Initialize arrays for training and testing with appropriate dimensions\ntrain_data = np.zeros((D, no_train))\ntest_data = np.zeros((D, no_test))\ntrain_labels = np.zeros((1, no_train))\ntest_labels = np.zeros((1, no_test))\n\n# Filling in the arrays from face data\nfor i in range(num_people):\n person_faces = face_data[:, IMAGES_PER_PERSON*i:(IMAGES_PER_PERSON*i)+IMAGES_PER_PERSON]\n train_data[:, TRAIN_RAT*i:(TRAIN_RAT*i)+TRAIN_RAT] = person_faces[:, 0:TRAIN_RAT] # Separate 8 of the same person's face in the training set\n train_labels[0, TRAIN_RAT*i:(TRAIN_RAT*i)+TRAIN_RAT] = i+1\n test_data[:, TEST_RAT*i:(TEST_RAT*i)+TEST_RAT] = person_faces[:, TRAIN_RAT:] # Separate 2 of the same person's face in the testing set\n test_labels[0, TEST_RAT*i:(TEST_RAT*i)+TEST_RAT] = i+1\n\n\"Separate training set into 4 training subsets \"\ntraining_set_a = np.zeros((D, 104))\ntraining_set_b = np.zeros((D, 104))\ntraining_set_c = np.zeros((D, 104))\ntraining_set_d = np.zeros((D, 104))\ntrain_labels_set = np.zeros((1, 104))\n\nfor i in range(num_people):\n person_faces = train_data[:, 8 * i:(8 * i) + 8]\n training_set_a[:, 2 * i:(2 * i) + 2] = person_faces[:, 0:2]\n training_set_b[:, 2 * i:(2 * i) + 2] = person_faces[:, 2:4]\n training_set_c[:, 2 * i:(2 * i) + 2] = person_faces[:, 4:6]\n training_set_d[:, 2 * i:(2 * i) + 2] = person_faces[:, 6:8]\n train_labels_set[0, 2 * i:(2 * i) + 2] = i + 1\nTRAINING_SETS = [training_set_a, training_set_b, training_set_c, training_set_d]\n\n\"\"\" BATCH PCA FUNCTIONS \"\"\"\ndef compute_avface_cardinality_phimatrix(data):\n \"\"\"\n :param data: can be either training or testing\n :return: avg_face, N, A\n \"\"\"\n N = data.shape[1] # Cardinality: No of cols = no of images N\n\n avg_face = np.mean(data, axis=1) # returns \"col vector\" of average image, rankless array with dims Dx1\n # print(avg_face.shape)\n\n # Subtract mean from every face: A = [x1 - x_mean, x2 - x_mean, ...]\n A = data - np.reshape(avg_face, (len(avg_face), 1)) # 2nd term makes avg_face have a rank, A dims =DXN\n # print('Shape of A is : DxN ', A.shape)\n\n return N, avg_face, A\n\ndef compute_lowdim_pca(training_data):\n \"\"\"\n :param training_data: obtained from data preparation\n :return: eigenvalues and eigenvectors of lowdim PCA and time taken to run\n \"\"\"\n\n start_time = time.time()\n\n N, avg_face, A = compute_avface_cardinality_phimatrix(training_data)\n\n lowdim_matrix = np.matmul(A.T, A)/N\n eigvals, eigvecs = np.linalg.eig(lowdim_matrix)\n eigvals_rounded = (np.round(eigvals, 2)).real\n eigvals = eigvals.real\n eigvecs = eigvecs.real\n\n lowdim_eigvecs = np.matmul(A, eigvecs)\n\n mag_lowdim_eigvecs = np.linalg.norm(lowdim_eigvecs, axis=0)\n reshaped = np.reshape(mag_lowdim_eigvecs, (1, len(mag_lowdim_eigvecs)))\n norm_lowdim_eigvecs = lowdim_eigvecs / reshaped # Normalised eigenvectors\n\n print('No of nonzero eigenvalues for low dim PCA: {}'.format(np.unique(eigvals_rounded).size))\n # test = np.matmul( (norm_lowdim_eigvecs[:, 0]).T, norm_lowdim_eigvecs[:, 0])\n\n # lowdim PCA is randomly organized (shifted around a bit) after 70 vecss in so it is necessary to resort\n idx = eigvals.argsort()[::-1]\n eigvals = eigvals[idx]\n norm_lowdim_eigvecs = norm_lowdim_eigvecs[:, idx]\n\n run_time = time.time() - start_time #Returns low dim run time\n\n return norm_lowdim_eigvecs.real, eigvals, run_time\n\n#Trainingsets for question 2!\ntraining_set = np.concatenate(((training_set_a, training_set_b)), axis=1)\ntraining_set_2 = np.concatenate(((training_set, training_set_c)), axis=1)\ntraining_set_3 = np.concatenate(((training_set_2, training_set_d)), axis=1)\n\n#CALCULATING BATCH PCA FOR WHOLE SUBSET AND TRAINING SUBSET\nlow_eigvecs, low_eigvals, lowdim_runtime = compute_lowdim_pca(train_data) #Returns runtime for entire training data\nlow_eigvecs_single, low_eigvals_single, lowdim_runtime_single = compute_lowdim_pca(training_set_3) #Returns runtime for one subset of training data\nprint(lowdim_runtime_single)\n\n\ndef face_reconstruct_batch(data, M, eigvecs1):\n \"\"\"\n :param data: Training OR testing\n :param M: Lowdim bases used for reconstruction\n :return: Reconstruction Error for Each image, and projection coefficent matrix\n \"\"\"\n\n N, avg_face, A = compute_avface_cardinality_phimatrix(data)\n\n # Each row in wn contains the projection coefficients for one datapoint (image)\n wn = np.matmul(A.T, eigvecs1[:, 0:M].real) # NxD * DxM, Project faces onto eigenvector values, project coefficients: shape NxM\n\n reconst_faces = np.zeros((D, N))\n\n for num in range(N):\n proj_cofs_datapoint = wn[num, :]\n reconst_face = avg_face + np.matmul(eigvecs1[:, 0:M].real, proj_cofs_datapoint) # Can do this because no ruch thing as \"row\" or \"col\" vector\n reconst_faces[:, num] = reconst_face\n\n recon_error_for_each_image = np.sum((np.linalg.norm(data-reconst_faces)) **2) / N\n\n return recon_error_for_each_image, wn\n\ndef compute_NN(wn_train, wn_test):\n \"\"\"\n Nearest neighbour function: Calculates the difference between eigenface projections of test image with each training image\n Finds the minimum summed absolute difference\n Assigns test data an index of the training data with the minimum summed difference\n\n :param wn_train: Matrix of eigenspace coefficients for training data\n :param wn_test: Single col vector of SINGLE test image eigenspace coefficients\n :return: index of label identified\n \"\"\"\n # 1. Find difference values: w - wn , dimension MxN: M is number of bases, N is no of training images used\n\n recognition_vector = np.linalg.norm((wn_train.T - np.reshape(wn_test, (len(wn_test), 1))), axis=0) # Note wn must be transposed!\n index = np.argmin(recognition_vector)\n # index = np.argwhere(np.min(recognition_vector))\n\n\n return index\n\n\n\"\"\" QUESTION 2 FUNCTIONS \"\"\"\n\ndef compute_avface_N_S(data):\n \"\"\"\n :param data: can be either training or testing\n :return: avg_face, N, A\n \"\"\"\n N = data.shape[1] # Cardinality: No of cols = no of images N\n\n avg_face = np.mean(data, axis=1) # returns \"col vector\" of average image, rankless array with dims Dx1\n # print(avg_face.shape)\n\n # Subtract mean from every face: A = [x1 - x_mean, x2 - x_mean, ...]\n A = data - np.reshape(avg_face, (len(avg_face), 1)) # 2nd term makes avg_face have a rank, A dims =DXN\n\n S = np.matmul(A, A.T)/N # High dimensional\n\n fake_S = np.matmul(A.T, A)/N # Low dimensional\n\n # print('Shape of A is : DxN ', A.shape)\n\n return avg_face, N, S, A, fake_S\n\ndef compute_combined_data(set1, set2):\n \"\"\"\n :param set1: The base set to which another set (usually smaller) will be added to\n :param set2: The smaller set added to the base set\n :return:\n \"\"\"\n\n #Perform PCA on set 1\n start_time = time.time()\n\n avface1, N1, S1, A1, fake_S1 = compute_avface_N_S(set1)\n eigvals1, eigvecs1 = np.linalg.eig(fake_S1) #Reduction of covariance matrix: NxN\n eigvecs1 = np.matmul(A1, eigvecs1) #DxN * NxN --> D*N\n eigvecs1 = eigvecs1 / np.linalg.norm(eigvecs1, axis=0) #Normalise the eigenvectors\n\n run_time_set1 = time.time() - start_time #Runtime of set 1 : How long it takes to compute PCA on Set 1\n\n eigvals1 = eigvals1.real\n eigvecs1 = eigvecs1.real\n idx = eigvals1.argsort()[::-1]\n eigvals1 = eigvals1[idx]\n P1 = eigvecs1[:, idx] #Returns P1 eigenvectors\n\n\n #Perform PCA on set 2\n start_time_2 = time.time()\n avface2, N2, S2, A2, fake_S2 = compute_avface_N_S(set2)\n eigvals2, eigvecs2 = np.linalg.eig(fake_S2)\n eigvecs2 = np.matmul(A2, eigvecs2)\n eigvecs2 = eigvecs2 / np.linalg.norm(eigvecs2, axis=0) #Normalise the eigenvectors\n\n run_time_set2 = time.time() - start_time_2 #Runtime of set 2 : How long it takes to compute PCA on Set 2\n\n #Ordering eigenvalues in descending order\n eigvals2 = eigvals2.real\n eigvecs2 = eigvecs2.real\n idx = eigvals2.argsort()[::-1]\n eigvals2 = eigvals2[idx]\n P2 = eigvecs2[:, idx]\n\n start_time_3 = time.time()\n N3 = N1 + N2\n avface3 = (N1 * avface1 + N2 * avface2)/N3\n mean_diff = avface1 - avface2\n S3 = (N1/N3) * S1 + (N2/N3) * S2 + ((N1*N2)/(N3**2)) * np.matmul(mean_diff, mean_diff.T)\n run_time_set3 = time.time() - start_time_3 #Run_time_set 3 is 1st part of meerging time: how long it takes to combine Set 1 and Set 2: ie find S3 and N3\n\n return avface3, N3, S3, P1, P2, avface1, avface2, run_time_set1, run_time_set2, run_time_set3\n\ndef compute_ssmatrix(P1, P2, m1_2, d1, d2):\n \"\"\"\n Computing PHI (sufficient spanning matrix)\n param: P1 : eigenvectors for set A : each column is an eigenvector\n param: P2: eigenvectors for set B : each column is an eigenvector\n param: m1_2: avg_face1 - avg_face2\n param: d : Number of significant eigenvectors for set 1\n param: d: number of significant eigenvector for set 2\n return: phi = 2x3\n \"\"\"\n\n # Obtain only the first d1 and d2 eigenvectors from P1 and P2\n P_d1 = P1[:, 0: d1]\n P_d2 = P2[:, 0: d2]\n\n # Combine the arrays one array X: should have Dx(d1+d2+1) dimensions\n intermediate = np.concatenate((P_d1, P_d2), axis=1)\n X = np.concatenate((intermediate, np.reshape(m1_2, (D, 1))), axis=1)\n\n # Returns orthonormal basis array: Q of dimensions d1+d2+1 using gram schmidt orthonormalization function\n PHI, R = np.linalg.qr(X) #PHI is of dimensions Dx(d1+d2+1)\n\n return PHI\n\ndef compute_P3(PHI, S3):\n\n first_mat_mul = np.matmul(PHI.T, S3)\n second_mat_mul = np.matmul(first_mat_mul, PHI)\n R1, eigvals_3, R2 = np.linalg.svd(second_mat_mul) #SVD returns the eigenvectors of matrix product\n\n return R1, eigvals_3, R2\n\n\n\"\"\" Calculating final P3 \"\"\"\n\ntraining_set = np.array(training_set_a)\n\nrun_times_set1 = []\nrun_times_set2 = []\nrun_times_set3 = []\nmerge_times = []\n\nstart_time_tot = time.time() #Start\n\n\ndef compute_avface_cardinality_phimatrix(data):\n \"\"\"\n :param data: can be either training or testing\n :return: avg_face, N, A\n \"\"\"\n N = data.shape[1] # Cardinality: No of cols = no of images N\n avg_face = np.mean(data, axis=1) # returns \"col vector\" of average image, rankless array with dims Dx1\n # print(avg_face.shape)\n\n # Subtract mean from every face: A = [x1 - x_mean, x2 - x_mean, ...]\n A = data - np.reshape(avg_face, (len(avg_face), 1)) # 2nd term makes avg_face have a rank, A dims =DXN\n # print('Shape of A is : DxN ', A.shape)\n\n return N, avg_face, A\n\ndef face_reconstruct_incremental(data, M):\n \"\"\"\n :param data: Training OR testing\n :param M: Lowdim bases used for reconstruction\n :return: Reconstruction Error for Each image, and projection coefficent matrix\n\n \"\"\"\n N, avg_face, A = compute_avface_cardinality_phimatrix(data)\n\n\n # Each row in wn contains the projection coefficients for one datapoint (image)\n # wn_batch = np.matmul(A.T, low_eigvecs[:, 0:M].real) # NxD * DxM, Project faces onto eigenvector values, project coefficients: shape NxM\n wn_incr = np.matmul(A.T, P3[:, 0:M].real)\n reconst_faces = np.zeros((D, N))\n\n for num in range(N):\n proj_cofs_datapoint = wn_incr[num, :]\n reconst_face = avg_face + np.matmul(P3[:, 0:M].real, proj_cofs_datapoint) # Can do this because no ruch thing as \"row\" or \"col\" vector\n reconst_faces[:, num] = reconst_face\n recon_error_for_each_image = np.sum((np.linalg.norm(data-reconst_faces)) **2) / N\n return recon_error_for_each_image, wn_incr\n\n#Obtains final P3 needed for incremental PCA\nfor ix, set in enumerate(TRAINING_SETS[0:3]):\n\n # Computes P1, P2, S3, N3\n avface3, N3, S3, P1, P2, avface1, avface2, run_time_set1, run_time_set2, run_time_set3 = compute_combined_data(training_set, TRAINING_SETS[ix+1])\n\n run_times_set1.append(run_time_set1) #Times for PCA on set 1\n run_times_set2.append(run_time_set2) #Times for PCA on set 2\n run_times_set3.append(run_time_set3) #First part of merging: calculating S3 and N3 : second part of merging is finding P3 and Lamda 3\n\n #Finding P3 and Lamda 3\n start_time = time.time() #Second part of merging: fidning PHI, R and finally P3\n\n avface_diff = avface1 - avface2 #M1 - M2\n PHI = compute_ssmatrix(P1, P2, avface_diff, 104*(ix+1),104) #50*(ix+1) #Finding PHI\n R1, eigvals3, R2 = compute_P3(PHI, S3) #Finding R\n P3 = np.matmul(PHI, R1) #Finding P3\n idx = eigvals3.argsort()[::-1] #Sorting P3 in order\n P3 = P3[:, idx]\n\n merge_time = time.time() - start_time #Second part of merging: for every new set\n merge_times.append(merge_time) #Merge time append contains all the merging times, for every set\n\n training_set = np.concatenate(((training_set, TRAINING_SETS[ix+1])), axis=1) #Concatenates the training sets\n\ntraining_model_time = time.time() - start_time_tot #Total training time for incremental PCA\n\"\"\" Recognition error for M = 50 using different parameters of d1, d2\n\"\"\"\n\nM = 100\nerror, wn_inc = face_reconstruct_incremental(train_data, M)\nprint('Reconstruction error for M = 100 ', error)\n\n\nprint('Debug')\n\"\"\" COMPARING RECONSTRUCTION ERROR FUNCTIONS \"\"\"\n\n#Reconstruction error arrays : training data\nrecon_error_batch = []\nrecon_error_batch_single = []\nrecon_error_inc = []\n\n#Recognition errors:\nassigned_labels_batch = []\nassigned_labels_batch_single = []\nassigned_labels_inc = []\nactual_labels = []\n\n#Accuracies for each M\naccuracy_batch = []\naccuracy_single = []\naccuracy_inc = []\n\nN = 400\n#Plotting reconstruction and recognitino error for each method\nfor m in range(5, N, 1):\n\n #BATCH\n error, wn_batch = face_reconstruct_batch(train_data, m, low_eigvecs)\n error, wn_batch_test = face_reconstruct_batch(test_data, m, low_eigvecs)\n recon_error_batch.append(error)\n\n #BATCH SINGLE\n #error, wn_single = face_reconstruct_batch(training_set_a, m, low_eigvecs_single)\n #error, wn_single_test = face_reconstruct_batch(test_data, m, low_eigvecs_single)\n #recon_error_batch_single.append(error)\n\n #INCREMENTAL\n error, wn_inc = face_reconstruct_incremental(train_data, m)\n error, wn_inc_test = face_reconstruct_incremental(test_data, m)\n recon_error_inc.append(error)\n\n\n #RECOGNITION ACCURACIES\n for img in range(test_data.shape[1]):\n\n #perform recognition on each image\n index_batch = compute_NN(wn_batch, wn_batch_test[img, :])\n #index_single = compute_NN(wn_single, wn_single_test[img, :])\n index_inc = compute_NN(wn_inc, wn_inc_test[img, :])\n\n #BATCH PCA\n assigned_label_batch = train_labels[0, index_batch]\n assigned_labels_batch.append(assigned_label_batch)\n\n # BATCH SINGLE PCA\n #assigned_label_batch_single = train_labels[0, index_single]\n #assigned_labels_batch_single.append(assigned_label_batch_single)\n\n #INCREMENTAL PCA\n assigned_label_inc = train_labels[0, index_inc]\n assigned_labels_inc.append(assigned_label_inc)\n\n #test image labels\n actual_label = test_labels[0, img]\n actual_labels.append(actual_label)\n\n\n #BOOL ACCURACY, BATCH\n bool_batch = np.array(actual_labels) == np.array(assigned_labels_batch) # Fixed bool array syntax\n a_batch = np.sum(bool_batch * 1) / np.size(bool_batch)\n accuracy_batch.append(a_batch)\n\n #BOOL ACCURACY, SINGLE\n #bool_single = np.array(actual_labels) == np.array(assigned_labels_batch_single) # Fixed bool array syntax\n #a_single = np.sum(bool_single * 1) / np.size(bool_single)\n #accuracy_single.append(a_single)\n\n #BOOL ACCURACY, INC\n bool_inc = np.array(actual_labels) == np.array(assigned_labels_inc) # Fixed bool array syntax\n a_inc = np.sum(bool_inc * 1) / np.size(bool_inc)\n accuracy_inc.append(a_inc)\n\n\n#PLOTTING RECONSTRUCTION ACCURACIES\n#plt.plot(range(5, N, 1), recon_error_inc, label = 'Inc PCA')\nplt.plot(range(5, N, 1), recon_error_batch, label = 'Batch PCA')\n#plt.plot(range(5, N, 1), recon_error_batch_single, label = 'Batch PCA on single subset')\nplt.legend()\nplt.xlabel('M values')\nplt.ylabel('Reconstruction error')\nplt.title('Reconstruction error (training) comparisons')\nplt.show()\n\n\n#PLOTTING RECOGNITION ACCURACIES\n#plt.plot(range(5, N, 1), 100*np.asarray(accuracy_inc), label = 'Inc PCA')\nplt.plot(range(5, N, 1), 100*np.asarray(accuracy_batch), label = 'Batch PCA')\n#plt.plot(range(5, N, 1), 100*np.asarray(accuracy_single), label = 'Batch PCA on single subset')\nplt.legend()\nplt.xlabel('M values')\nplt.ylabel('Recognition accuracy')\nplt.title('Recognition accuracy for Low Dimensional PCA')\nplt.show()\n\n\nprint('debug')","sub_path":"plotting_reconstruction_errors.py","file_name":"plotting_reconstruction_errors.py","file_ext":"py","file_size_in_byte":19654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"468844486","text":"from django.core.mail import send_mail\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.http.response import HttpResponse\n\ndef notifyUser(u, m,pname):\n subject = 'CodeLink: You have been added to a project.'\n message = 'You have been added to the project ' + pname + ' as a '+m.mem_type+'. Log in to view the project here.\\nhttp://codelink.me' \n from_email = settings.EMAIL_HOST_USER\n send_mail(subject, message,from_email,[u.email],fail_silently=True)\n\ndef notifyUser2(request):\n pname = 'big duck project'\n subject = 'You have been added to a project'\n message = 'steven has added you to the project ' + pname + '.\\nLog in to view the project here.\\nhttp://codelink.me' \n from_email = settings.EMAIL_HOST_USER\n send_mail(subject, message,from_email,['rmevans88@gmail.com'],fail_silently=True)\n return HttpResponse(\"success?\")\n\n#need to fill in code for actual authentication\ndef verifyUser(u): \n subject = 'codelink account confirmation'\n message = 'Hey ' + u.username + ', welcome to codelink. We need to verify your email in order to activate your ' + 'account. Please click the link below to complete the verification process'\n from_email = settings.EMAIL_HOST_USER\n send_mail(subject, message, from_email, [u.email], fail_silently=True)\n ","sub_path":"codeapp/views/view_email.py","file_name":"view_email.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"551642162","text":"from tensorflow.keras.models import load_model\nimport pickle\nimport numpy as np \nimport os\nimport cv2\n\n# define base path\nBASE_PATH = os.path.dirname(__file__)\n\n# image dimensions\nIMG_DIM = 224\n\n# load label mapper\nwith open(os.path.join(BASE_PATH, 'labels_mapper.pkl'), 'rb') as f:\n\tLABELS = pickle.load(f)\n\ntest_path = r'/media/gagandeep/2E92405C92402AA3/Work/UoN/Dissertation/Datasets/Prepared/Data/Hyundai_Veloster_2013/front/Hyundai_Veloster_2013_21_18_130_16_4_70_55_166_28_FWD_4_3_2dr_GBw.jpg'\n\nclass Classifier(object):\n\n\tdef __init__(self):\n\t\t# load model\n\t\tself.model = load_model(os.path.join(BASE_PATH, 'model_mobile_net.h5'))\n\t\t# image_path\n\t\tself.img = None\n\t\tself.res = None\n\n\tdef preprocess_image(self, img_path):\n\t\tself.img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE).astype(\"float32\")\n\t\tself.img = cv2.resize(self.img, (IMG_DIM, IMG_DIM))\n\t\tself.img = self.img/255.\n\t\tself.img = np.expand_dims(np.stack((self.img,)*3, axis=-1), axis=0)\n\n\tdef predict(self, img_path):\n\t\tself.preprocess_image(img_path)\n\t\tself.res = np.argmax(self.model.predict(self.img), axis=-1)[0]\n\t\treturn LABELS[self.res]","sub_path":"Monolith/Classify.py","file_name":"Classify.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"427870015","text":"\"\"\"\nConstants that are used globally\n\"\"\"\n\n# roman number to numerical except for unranked\nTIER_RANK_MAP = {\"I\": \"1\", \"II\": \"2\", \"III\": \"3\", \"IV\": \"4\"}\n\n# maximum number of players that can be added\nMAX_NUM_PLAYERS_TEAM = 10\n\n# tiers with default I rank numbers, which include master, grandmaster, challenger\nUNCOMMON_TIERS = [\"UNRANKED\", \"MASTER\", \"GRANDMASTER\", \"CHALLENGER\"]\n\n# for displaying uranked, master, grandmaster, challenger\nUNCOMMON_TIER_DISPLAY_MAP = {\n \"UNRANKED\": \"UR\",\n \"MASTER\": \"MA\",\n \"GRANDMASTER\": \"GM\",\n \"CHALLENGER\": \"CH\",\n}\nTIER_VALUE = {\n \"UNRANKED\": 3,\n \"IRON\": 1,\n \"BRONZE\": 2,\n \"SILVER\": 3,\n \"GOLD\": 4,\n \"PLATINUM\": 5,\n \"DIAMOND\": 6,\n # master, grandmaster, challenger only have lp points starting 0lp for master,\n # around 300lp for grandmaster and 650lp for master\n # these are not fixed lp points as they are assigned to GM or Challenger\n # depending on the ranking not based on how much lp points they have\n # Challenger: 1 ~ 300, GrandMaster: 301 ~ 1000\n \"MASTER\": 7,\n \"GRANDMASTER\": 7,\n \"CHALLENGER\": 7,\n}\n\nRANK_VALUE = {\"I\": 0.75, \"II\": 0.5, \"III\": 0.25, \"IV\": 0}\n","sub_path":"utils/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"595098116","text":"#!/usr/bin/env python\n# coding=utf-8\n# The example of embedding generation and classification demostration.\n# Royrong(royrong@tencent.com) 2018/10/24\n# The parameters are defined in conf.py\nimport argparse\nimport logging\nimport os\n\nimport conf\n\n\ndef parse_args():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--dataset', type=str, default='cora', required=True)\n\n\treturn parser.parse_args()\n\n\nif __name__ == \"__main__\":\n\targs = parse_args()\n\tdataset = args.dataset\n\n\tmethod = conf.method\n\tinput_folder = conf.input_folder + str(dataset)\n\n\toutput_folder = conf.output_folder + \"/\" + str(dataset)\n\n\tif not os.path.exists(output_folder):\n\t\tos.makedirs(output_folder)\n\n\tl = conf.l\n\tr = conf.r\n\tp = conf.p\n\n\td = conf.d\n\tk = conf.k\n\te = conf.e\n\tneg = conf.neg\n\tit = conf.it\n\temb_alpha = conf.emb_alpha\n\n\tres_file = os.path.join(output_folder, \"./%s.res\" % (method))\n\temb_file = os.path.join(output_folder, \"./%s.emb\" % (method))\n\tlogging.info(\"This is the demo for logistic regression using the embedding vectors\")\n\n\t# Performing example logistic regression\n\tif os.path.exists(emb_file):\n\t\tmax_iter = 300\n\t\tlr_cmd = \"python3 ./classifier/logistic_regression.py --verbose 0 --input_folder %s --emb_file %s --res_file %s --max_iter %d\" % (\n\t\t\tinput_folder, emb_file, res_file, it)\n\n\t\tos.system(lr_cmd)\n\telse:\n\t\tlogging.info(\"no emb file\")\n\t\texit(1)\n\tres_file = res_file + \".prec_rec\"\n\n\tif os.path.exists(res_file):\n\t\tfin = open(res_file, 'r')\n\t\tap = 0\n\t\tfor l in fin:\n\t\t\tap = ap + float(l.strip().split(' ')[-2])\n\t\tfout = open(os.path.join(output_folder, \"result_file\"), 'w')\n\t\tfout.write(\"object_value=%f\" % (ap))\n\t\tfout.close()\n\telse:\n\t\tlogging.info(\"No res file.\")\n\t\texit(1)\n","sub_path":"BGNN/Node2Vec/binary_classification.py","file_name":"binary_classification.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"88994137","text":"# For a prime p let S(p) = (∑ (p-k)!) mod(p) for 1 ≤ k ≤ 5.\n#\n# For example, if p=7,\n# (7-1)! + (7-2)! + (7-3)! + (7-4)! + (7-5)! = 6! + 5! + 4! + 3! + 2!\n# = 720 + 120 + 24 + 6 + 2 = 872.\n# As 872 mod(7) = 4, S(7) = 4.\n#\n# It can be verified that ∑ S(p) = 480 for 5 ≤ p < 100.\n#\n# Find ∑ S(p) for 5 ≤ p < 10^8.\n\n# THEORY:\n#\n# (p-1)! + (p-2)! = (p-1 + 1) * (p-2)! = p * (p-2)! = 0 mod p\n# So we can safely disregard the first two terms.\n#\n# (p-3)! + (p-4)! + (p-5)! = ((p-3)(p-4) + (p-4) + 1) * (p-5)!\n# = 9 * (p-5)! mod p.\n#\n# The numbers 1, ..., p-1 mod p form a cyclic multiplicative group of order p-1.\n# Thus they're generated by some Y.\n# Then (p-1)! = Y^0 * Y^1 * Y^2 * ... * Y^(p-2)\n# = Y^(0 + 1 + 2 + ... + (p-2))\n# = Y^( (p-2) * (p-1) / 2 )\n# = (-1)^(p-2)\n# = -1\n# because p is odd; therefore (p-1)! = (p-1) mod p, so (p-2)! = 1 mod p.\n#\n# It follows that S(p) = 9 * (p-2)^-1 * (p-3)^-1 * (p-4)^-1\n# = 9 * (-24)^-1 mod p\n# = -3 / 8 mod p.\n# If p % 4 = 1, then S(p) = -3 / 8 * (p^2 - 2 * p + 1).\n# If p % 4 = 3, then S(p) = -3 / 8 * (3 * p^2 - 4 * p + 1).\n\nfrom time import time\nimport sys\nsys.path.append(\"../Library\")\nfrom peresult import peresult\nfrom primefns import primesbelow\n\ndef solve(cap = 10 ** 8):\n primes = primesbelow(cap)\n result = 0\n for p in primes[2:]: # Ignoring 2 and 3\n if p % 4 == 1:\n result += -3 * (p ** 2 - 2 * p + 1) // 8 % p\n else:\n result += -3 * (3 * p ** 2 - 4 * p + 1) // 8 % p\n return result\n\nif __name__ == \"__main__\":\n start = time()\n peresult(381, solve(), time() - start)\n","sub_path":"Problems 301-400/p381_PrimeMinusKFactorial.py","file_name":"p381_PrimeMinusKFactorial.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"544867742","text":"import torch\nimport torch.optim as optim\nimport numpy as np\nimport os, sys\nimport argparse\nimport time, datetime\nfrom functions import my_f1_score, my_acc_score, my_precision_score, weighted_cross_entropy_loss, wce_huber_loss, \\\n wce_huber_loss_8, my_recall_score, debug_ce, cross_entropy_loss, wce_dice_huber_loss\nfrom torch.nn import init\nfrom dataset import DataParser, gen_band_gt\nfrom model.model_812 import Net\nfrom PIL import Image\nimport shutil\nfrom torch.optim import lr_scheduler\nimport matplotlib.pyplot as plt\nfrom tensorboardX import SummaryWriter\nfrom utils import Logger, Averagvalue, weights_init, load_pretrained, save_mid_result,send_msn\nfrom os.path import join, split, isdir, isfile, splitext, split, abspath, dirname\n\n\"\"\"\nCreated by HaoRan\ntime: 11/5\ndescription:\n1. stage one training\n\"\"\"\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\" 参数 \"\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nparser = argparse.ArgumentParser(description='PyTorch Training')\nparser.add_argument('--batch_size', default=5, type=int, metavar='BT',\n help='batch size')\n\n# =============== optimizer\nparser.add_argument('--lr', '--learning_rate', default=1e-3, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight_decay', '--weight_decay', default=2e-2, type=float,\n metavar='W', help='default weight decay')\nparser.add_argument('--stepsize', default=4, type=int,\n metavar='SS', help='learning rate step size')\nparser.add_argument('--gamma', '--gm', default=0.1, type=float,\n help='learning rate decay parameter: Gamma')\nparser.add_argument('--maxepoch', default=1000, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--itersize', default=10, type=int,\n metavar='IS', help='iter size')\n# =============== misc\nparser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('--print_freq', '-p', default=10, type=int,\n metavar='N', help='print frequency (default: 50)')\nparser.add_argument('--gpu', default='0', type=str,\n help='GPU ID')\n\nparser.add_argument('--resume', default='/home/liu/chenhaoran/Mymodel/save_model/model_stage_one_casia_template_sp_train/1211_casia_template_sp_negative_checkpoint28-stage1-0.151637-f10.713804-precision0.859841-acc0.987976-recall0.627347.pth', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--tmp', help='tmp folder', default='tmp/HED')\nparser.add_argument('--mid_result_root', type=str, help='mid_result_root', default='./save')\nparser.add_argument('--model_save_dir', type=str, help='model_save_dir', default='./save_model/stage1_template_cod10k_cm_sp_negative_train')\nparser.add_argument('--mid_result_index', type=list, help='mid_result_index', default=[0])\nparser.add_argument('--per_epoch_freq', type=int, help='per_epoch_freq', default=50)\n\nparser.add_argument('--fuse_loss_weight', type=int, help='fuse_loss_weight', default=12)\n# ================ dataset\n\nparser.add_argument('--dataset', help='root folder of dataset', default='dta/HED-BSD')\nparser.add_argument('--band_mode', help='weather using band of normal gt', type=bool, default=True)\nparser.add_argument('--save_mid_result', help='weather save mid result', type=bool, default=False)\nargs = parser.parse_args()\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\" # see issue #152\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\" 路径 \"\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nmodel_save_dir = abspath(dirname(__file__))\nmodel_save_dir = join(model_save_dir, args.model_save_dir)\nif not isdir(model_save_dir):\n os.makedirs(model_save_dir)\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\" ↓↓↓↓需要修改的参数↓↓↓↓ \"\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n\n\n# tensorboard 使用\nwriter = SummaryWriter(\n 'runs/' + '1225_%d-%d_tensorboard_TEST' % (datetime.datetime.now().month, datetime.datetime.now().day))\noutput_name_file_name = '1225_template_sp_negative_COD10K_checkpoint%d-stage1-%f-f1%f-precision%f-acc%f-recall%f.pth'\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\" ↑↑↑↑需要修改的参数↑↑↑↑ \"\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\ndef generate_minibatches(dataParser, train=True):\n while True:\n if train:\n batch_ids = np.random.choice(dataParser.X_train, dataParser.batch_size)\n ims, ems, double_edge, chanel1, chanel2, chanel3, chanel4, chanel5, chanel6, chanel7, chanel8, chanel_fuse, edgemaps_4, edgemaps_8, edgemaps_16, _ = dataParser.get_batch(\n batch_ids)\n else:\n batch_ids = np.random.choice(dataParser.X_test, dataParser.batch_size)\n ims, ems, double_edge, chanel1, chanel2, chanel3, chanel4, chanel5, chanel6, chanel7, chanel8, chanel_fuse, edgemaps_4, edgemaps_8, edgemaps_16, _ = dataParser.get_batch(\n batch_ids, train=False)\n\n # 通道位置转化\n ims = ims.transpose(0, 3, 1, 2)\n chanel1 = chanel1.transpose(0, 3, 1, 2)\n chanel2 = chanel2.transpose(0, 3, 1, 2)\n chanel3 = chanel3.transpose(0, 3, 1, 2)\n chanel4 = chanel4.transpose(0, 3, 1, 2)\n chanel5 = chanel5.transpose(0, 3, 1, 2)\n chanel6 = chanel6.transpose(0, 3, 1, 2)\n chanel7 = chanel7.transpose(0, 3, 1, 2)\n chanel8 = chanel8.transpose(0, 3, 1, 2)\n double_edge = double_edge.transpose(0, 3, 1, 2)\n\n # 设置是否要使用条带\n if True:\n double_edge = gen_band_gt(double_edge)\n pass\n # ims_t = ims.transpose(0,1,2,3)\n # plt.figure('ims')\n # plt.imshow(ims[0,0,:,:]*255)\n # plt.show()\n #\n # # plt.show()\n # # plt.savefig(\"temp_ims.png\")\n #\n # plt.figure('gt')\n # plt.imshow(double_edge[0,0,:,:])\n # plt.show()\n\n # plt.show()\n # plt.savefig(\"temp_gt.png\")\n yield (ims, [double_edge, chanel1, chanel2, chanel3, chanel4, chanel5, chanel6, chanel7, chanel8])\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\" 程序入口 \"\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n\ndef main():\n args.cuda = True\n # data\n dataParser = DataParser(args.batch_size)\n # model\n model = Net()\n if torch.cuda.is_available():\n model.cuda()\n else:\n model.cpu()\n\n model.apply(weights_init)\n # 模型初始化\n # 如果没有这一步会根据正态分布自动初始化\n # model.apply(weights_init)\n\n # 模型可持续化\n # 这是tensorflow代码中的配置: optimizer = Adam(lr=1e-2, beta_1=0.9, beta_2=0.999)\n optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999), eps=1e-8)\n\n if args.resume:\n if isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n model.load_state_dict(checkpoint['state_dict'])\n print(\"=> loaded checkpoint '{}'\".format(args.resume))\n # optimizer.load_state_dict(checkpoint['optimizer'])\n\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n sys.exit()\n\n # 调整学习率1\n scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma)\n\n # 调整学习率2\n # scheduler = lr_scheduler.ReduceLROnPlateau()\n # 数据迭代器\n\n for epoch in range(args.start_epoch, args.maxepoch):\n train_avg = train(model=model, optimizer=optimizer, dataParser=dataParser, epoch=epoch)\n val_avg = val(model=model, dataParser=dataParser, epoch=epoch)\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n \" 写入图 \"\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n writer.add_scalar('tr_avg_loss_per_epoch', train_avg['loss_avg'], global_step=epoch)\n writer.add_scalar('tr_avg_f1_per_epoch', train_avg['f1_avg'], global_step=epoch)\n writer.add_scalar('tr_avg_precision_per_epoch', train_avg['precision_avg'], global_step=epoch)\n writer.add_scalar('tr_avg_acc_per_epoch', train_avg['accuracy_avg'], global_step=epoch)\n writer.add_scalar('tr_avg_recall_per_epoch', train_avg['recall_avg'], global_step=epoch)\n\n writer.add_scalar('val_avg_loss_per_epoch', val_avg['loss_avg'], global_step=epoch)\n writer.add_scalar('val_avg_f1_per_epoch', val_avg['f1_avg'], global_step=epoch)\n writer.add_scalar('val_avg_precision_per_epoch', val_avg['precision_avg'], global_step=epoch)\n writer.add_scalar('val_avg_acc_per_epoch', val_avg['accuracy_avg'], global_step=epoch)\n writer.add_scalar('val_avg_recall_per_epoch', val_avg['recall_avg'], global_step=epoch)\n\n writer.add_scalar('lr_per_epoch', scheduler.get_lr(), global_step=epoch)\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n \" 写入图 \"\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n # 保存模型\n \"\"\"\n info = 'Epoch: [{0}/{1}][{2}/{3}] '.format(epoch, args.maxepoch, batch_index, dataParser.steps_per_epoch) + \\\n 'Time {batch_time.val:.3f} (avg:{batch_time.avg:.3f}) '.format(batch_time=batch_time) + \\\n 'Loss {loss.val:f} (avg:{loss.avg:f}) '.format(loss=losses) + \\\n 'f1_score {f1.val:f} (avg:{f1.avg:f}) '.format(f1=f1_value) + \\\n 'precision_score: {precision.val:f} (avg:{precision.avg:f}) '.format(precision=precision_value) + \\\n 'acc_score {acc.val:f} (avg:{acc.avg:f})'.format(acc=acc_value) +\\\n 'recall_score {recall.val:f} (avg:{recall.avg:f})'.format(recall=recall_value)\n\n \"\"\"\n\n output_name = output_name_file_name % \\\n (epoch,val_avg['loss_avg'],\n val_avg['f1_avg'],\n val_avg['precision_avg'],\n val_avg['accuracy_avg'],\n val_avg['recall_avg'])\n try:\n send_msn(epoch,f1=val_avg['f1_avg'])\n except:\n pass\n if epoch % 1 == 0:\n save_model_name = os.path.join(args.model_save_dir, output_name)\n torch.save({'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()},\n save_model_name)\n\n scheduler.step(epoch)\n\n print('训练已完成!')\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\" 训练 \"\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n\ndef train(model, optimizer, dataParser, epoch):\n # 读取数据的迭代器\n train_epoch = int(dataParser.steps_per_epoch)\n # 变量保存\n batch_time = Averagvalue()\n data_time = Averagvalue()\n losses = Averagvalue()\n f1_value = Averagvalue()\n acc_value = Averagvalue()\n recall_value = Averagvalue()\n precision_value = Averagvalue()\n map8_loss_value = Averagvalue()\n\n # switch to train mode\n model.train()\n end = time.time()\n\n for batch_index, (images, labels_numpy) in enumerate(generate_minibatches(dataParser, True)):\n # 读取数据的时间\n data_time.update(time.time() - end)\n\n # 对读取的numpy类型数据进行调整\n labels = []\n if torch.cuda.is_available():\n images = torch.from_numpy(images).cuda()\n for item in labels_numpy:\n labels.append(torch.from_numpy(item).cuda())\n else:\n images = torch.from_numpy(images)\n for item in labels_numpy:\n labels.append(torch.from_numpy(item))\n\n if torch.cuda.is_available():\n loss = torch.zeros(1).cuda()\n loss_8t = torch.zeros(()).cuda()\n else:\n loss = torch.zeros(1)\n loss_8t = torch.zeros(())\n\n with torch.set_grad_enabled(True):\n images.requires_grad = True\n optimizer.zero_grad()\n # 网络输出\n outputs = model(images)\n # 这里放保存中间结果的代码\n if args.save_mid_result:\n if batch_index in args.mid_result_index:\n save_mid_result(outputs, labels, epoch, batch_index, args.mid_result_root, save_8map=True,\n train_phase=True)\n else:\n pass\n else:\n pass\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n \" Loss 函数 \"\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n if not args.band_mode:\n # 如果不是使用band_mode 则需要计算8张图的loss\n loss = wce_dice_huber_loss(outputs[0], labels[0]) * args.fuse_loss_weight\n\n writer.add_scalar('fuse_loss_per_epoch', loss.item() / args.fuse_loss_weight,\n global_step=epoch * train_epoch + batch_index)\n\n for c_index, c in enumerate(outputs[1:]):\n one_loss_t = wce_dice_huber_loss(c, labels[c_index + 1])\n loss_8t += one_loss_t\n writer.add_scalar('%d_map_loss' % (c_index), one_loss_t.item(), global_step=train_epoch)\n loss += loss_8t\n loss = loss / 20\n else:\n loss = wce_dice_huber_loss(outputs[0], labels[0])\n writer.add_scalar('fuse_loss_per_epoch', loss.item(),\n global_step=epoch * train_epoch + batch_index)\n\n loss.backward()\n optimizer.step()\n\n # 将各种数据记录到专门的对象中\n losses.update(loss.item())\n map8_loss_value.update(loss_8t.item())\n batch_time.update(time.time() - end)\n end = time.time()\n\n # 评价指标\n f1score = my_f1_score(outputs[0], labels[0])\n precisionscore = my_precision_score(outputs[0], labels[0])\n accscore = my_acc_score(outputs[0], labels[0])\n recallscore = my_recall_score(outputs[0], labels[0])\n\n writer.add_scalar('f1_score', f1score, global_step=epoch * train_epoch + batch_index)\n writer.add_scalar('precision_score', precisionscore, global_step=epoch * train_epoch + batch_index)\n writer.add_scalar('acc_score', accscore, global_step=epoch * train_epoch + batch_index)\n writer.add_scalar('recall_score', recallscore, global_step=epoch * train_epoch + batch_index)\n ################################\n\n f1_value.update(f1score)\n precision_value.update(precisionscore)\n acc_value.update(accscore)\n recall_value.update(recallscore)\n\n if batch_index % args.print_freq == 0:\n info = 'Epoch: [{0}/{1}][{2}/{3}] '.format(epoch, args.maxepoch, batch_index, dataParser.steps_per_epoch) + \\\n 'Time {batch_time.val:.3f} (avg:{batch_time.avg:.3f}) '.format(batch_time=batch_time) + \\\n 'Loss {loss.val:f} (avg:{loss.avg:f}) '.format(loss=losses) + \\\n 'f1_score {f1.val:f} (avg:{f1.avg:f}) '.format(f1=f1_value) + \\\n 'precision_score: {precision.val:f} (avg:{precision.avg:f}) '.format(precision=precision_value) + \\\n 'acc_score {acc.val:f} (avg:{acc.avg:f})'.format(acc=acc_value) + \\\n 'recall_score {recall.val:f} (avg:{recall.avg:f})'.format(recall=recall_value)\n\n print(info)\n\n if batch_index >= train_epoch:\n break\n\n return {'loss_avg': losses.avg,\n 'f1_avg': f1_value.avg,\n 'precision_avg': precision_value.avg,\n 'accuracy_avg': acc_value.avg,\n 'recall_avg': recall_value.avg}\n\n\n@torch.no_grad()\ndef val(model, dataParser, epoch):\n # 读取数据的迭代器\n train_epoch = int(dataParser.val_steps)\n # 变量保存\n batch_time = Averagvalue()\n data_time = Averagvalue()\n losses = Averagvalue()\n f1_value = Averagvalue()\n acc_value = Averagvalue()\n recall_value = Averagvalue()\n precision_value = Averagvalue()\n map8_loss_value = Averagvalue()\n\n # switch to test mode\n model.eval()\n end = time.time()\n\n for batch_index, (images, labels_numpy) in enumerate(generate_minibatches(dataParser, False)):\n # 读取数据的时间\n data_time.update(time.time() - end)\n\n # 对读取的numpy类型数据进行调整\n labels = []\n if torch.cuda.is_available():\n images = torch.from_numpy(images).cuda()\n for item in labels_numpy:\n labels.append(torch.from_numpy(item).cuda())\n else:\n images = torch.from_numpy(images)\n for item in labels_numpy:\n labels.append(torch.from_numpy(item))\n\n if torch.cuda.is_available():\n loss = torch.zeros(1).cuda()\n loss_8t = torch.zeros(()).cuda()\n else:\n loss = torch.zeros(1)\n loss_8t = torch.zeros(())\n\n # 网络输出\n outputs = model(images)\n # 这里放保存中间结果的代码\n if args.save_mid_result:\n if batch_index in args.mid_result_index:\n save_mid_result(outputs, labels, epoch, batch_index, args.mid_result_root, save_8map=True,\n train_phase=True)\n else:\n pass\n else:\n pass\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n \" Loss 函数 \"\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n if not args.band_mode:\n # 如果不是使用band_mode 则需要计算8张图的loss\n loss = wce_dice_huber_loss(outputs[0], labels[0]) * args.fuse_loss_weight\n\n writer.add_scalar('val_fuse_loss_per_epoch', loss.item() / args.fuse_loss_weight,\n global_step=epoch * train_epoch + batch_index)\n\n for c_index, c in enumerate(outputs[1:]):\n one_loss_t = wce_dice_huber_loss(c, labels[c_index + 1])\n loss_8t += one_loss_t\n writer.add_scalar('val_%d_map_loss' % (c_index), one_loss_t.item(), global_step=train_epoch)\n loss += loss_8t\n loss = loss / 20\n else:\n loss = wce_dice_huber_loss(outputs[0], labels[0])\n writer.add_scalar('val_fuse_loss_per_epoch', loss.item(),\n global_step=epoch * train_epoch + batch_index)\n\n # 将各种数据记录到专门的对象中\n losses.update(loss.item())\n map8_loss_value.update(loss_8t.item())\n batch_time.update(time.time() - end)\n end = time.time()\n\n # 评价指标\n f1score = my_f1_score(outputs[0], labels[0])\n precisionscore = my_precision_score(outputs[0], labels[0])\n accscore = my_acc_score(outputs[0], labels[0])\n recallscore = my_recall_score(outputs[0], labels[0])\n\n writer.add_scalar('val_f1_score', f1score, global_step=epoch * train_epoch + batch_index)\n writer.add_scalar('val_precision_score', precisionscore, global_step=epoch * train_epoch + batch_index)\n writer.add_scalar('val_acc_score', accscore, global_step=epoch * train_epoch + batch_index)\n writer.add_scalar('val_recall_score', recallscore, global_step=epoch * train_epoch + batch_index)\n\n ################################\n\n f1_value.update(f1score)\n precision_value.update(precisionscore)\n acc_value.update(accscore)\n recall_value.update(recallscore)\n\n if batch_index % args.print_freq == 0:\n info = 'Epoch: [{0}/{1}][{2}/{3}] '.format(epoch, args.maxepoch, batch_index, dataParser.val_steps) + \\\n 'Time {batch_time.val:.3f} (avg:{batch_time.avg:.3f}) '.format(batch_time=batch_time) + \\\n 'vla_Loss {loss.val:f} (avg:{loss.avg:f}) '.format(loss=losses) + \\\n 'val_f1_score {f1.val:f} (avg:{f1.avg:f}) '.format(f1=f1_value) + \\\n 'val_precision_score: {precision.val:f} (avg:{precision.avg:f}) '.format(precision=precision_value) + \\\n 'val_acc_score {acc.val:f} (avg:{acc.avg:f})'.format(acc=acc_value) + \\\n 'val_recall_score {recall.val:f} (avg:{recall.avg:f})'.format(recall=recall_value)\n\n print(info)\n\n if batch_index >= train_epoch:\n break\n\n return {'loss_avg': losses.avg,\n 'f1_avg': f1_value.avg,\n 'precision_avg': precision_value.avg,\n 'accuracy_avg': acc_value.avg,\n 'recall_avg': recall_value.avg}\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"stage_one_train.py","file_name":"stage_one_train.py","file_ext":"py","file_size_in_byte":20808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"94615505","text":"import numpy as np\nimport pytest\n\nfrom ..parameter_averages import G16\nfrom ..averages import G03_SMCBar\nfrom .test_f99 import get_axav_cor_vals as get_axav_cor_vals_fA_1\n\n\ndef test_extinction_G16_fA_1_values():\n # get the correct values\n x, cor_vals, tolerance = get_axav_cor_vals_fA_1()\n\n # initialize extinction model\n tmodel = G16(RvA=3.1, fA=1.0)\n\n # test\n np.testing.assert_allclose(tmodel(x), cor_vals, rtol=tolerance)\n\n\ndef test_extinction_G16_fA_0_values():\n # initialize the model\n tmodel = G16(fA=0.0)\n\n # get the correct values\n gmodel = G03_SMCBar()\n x = gmodel.obsdata_x\n cor_vals = gmodel.obsdata_axav\n tolerance = gmodel.obsdata_tolerance\n\n # test\n np.testing.assert_allclose(tmodel(x), cor_vals, rtol=tolerance)\n\n\nx_vals, axav_vals, tolerance = get_axav_cor_vals_fA_1()\ntest_vals = zip(x_vals, axav_vals, np.full(len(x_vals), tolerance))\n\n\n@pytest.mark.parametrize(\"test_vals\", test_vals)\ndef test_extinction_G16_fA_1_single_values(test_vals):\n x, cor_val, tolerance = test_vals\n\n # initialize extinction model\n tmodel = G16(RvA=3.1, fA=1.0)\n\n # test\n np.testing.assert_allclose(tmodel(x), cor_val, rtol=tolerance)\n np.testing.assert_allclose(tmodel.evaluate(x, 3.1, 1.0), cor_val, rtol=tolerance)\n","sub_path":"dust_extinction/tests/test_g16.py","file_name":"test_g16.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"114768734","text":"import featuresAndClassifiers as fb\nimport numpy as np\nfrom sklearn.cross_validation import KFold\n\n\"\"\"\n The following command converts the correctly named files to BOW:\n The files should be stored in ./SystematicReviewDataset/ and \n have the names listed in dataProcess.py \n Comment out this line if preprocessing has already been done and\n the relevant numpy objects save\n\"\"\"\n#fb.dataProcess.process_and_save()\n\n\n# The following is a grid search over the parameters gamma and the\n# class weight multipliers for an SVM with RBF Kernel. It will \n# read data created by the dataProcess command above. \ngSTEPS = 11 \ngInc = 0.02\n\nwSTEPS = 3\nwInc = 0.05\n\ndrugClasses = [ 'units_Estrogens', 'units_OralHypoglycemics', 'units_Triptan','units_BB' ]\nsectionIdx = { \"Title\": 0, \"Abstract\": 1, \"Meta\": 2, \"Mesh\": 3 }\n\ndrugClass = drugClasses[0]\n#for drugClass in drugClasses:\nfor i in range(1):\n \n gamma = 0.16\n drugResults = []\n\n classes, features = fb.getFeatures(drugClass)\n\n for i in range(gSTEPS):\n gammaResutls = []\n weights = [0.75, 1.25]\n for j in range(wSTEPS):\n cvResults = []\n for k in range(5):\n cvResult = fb.runCV(classes, features, gamma, weights)\n cvResults = np.mean(cvResult, axis=0)\n \n drugResults.append(cvResults)\n\n \n weights[0] += wInc\n weights[1] -= wInc\n gamma += gInc\n \n np.save(drugClass, drugResults)\n\n\n\"\"\"\n\nfor drugClass in drugClasses:\n drugClass = drugClasses[2]\n gamma = 0.04\n drugResults = []\n\n for i in range(gSTEPS):\n gammaResults = []\n weights = [0.85, 1.15]\n for j in range(wSTEPS):\n\n print \"Drug:\", drugClass\n\n classes, allSections = fb.load_data(drugClass, True, [ \\\n sectionIdx['Title'], \\\n sectionIdx['Abstract'], \\\n sectionIdx['Meta'], \\\n sectionIdx['Mesh'] \\\n ] )\n tfidf = fb.get_Tfidf(allSections)\n data = tfidf\n print \"Proportion included:\", \\\n float(np.shape(np.where(classes == fb.dataProcess.INCLUDED)[0])[0])/np.shape(classes)[0]\n print float(np.shape(np.where(classes == fb.dataProcess.EXCLUDED)[0])[0])/np.shape(classes)[0]\n kf = KFold(classes.shape[0], 10)\n\n cvResults = []\n for trainIdx, testIdx in kf:\n trueVals = classes[testIdx]\n\n predictions = fb.run_svm(data[trainIdx], data[testIdx], classes[trainIdx], gamma, weights)\n\n cm, wss, recall = fb.interpret(predictions, trueVals, testIdx)\n\n cvResults.append([cm[0][0], cm[0][1], cm[1][0], cm[1][1], wss, recall, gamma, weights[0], weights[1] ])\n print\n print \"Weights:\", weights, \" Gamma:\", gamma\n print \"**********\"\n drugResults.append(cvResults)\n\n weights[0] += wInc\n weights[1] -= wInc\n gamma += gInc\n\n np.save(drugClass, drugResults)\n #SAVE THINGS\n\"\"\"\n","sub_path":"MedReview/runPreprocessAndGridSearch.py","file_name":"runPreprocessAndGridSearch.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"489012743","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport random\n\nclass CoTeachingLoss(nn.Module):\n def __init__(self, noise_rate=0.1):\n super(CoTeachingLoss, self).__init__()\n self.mse = nn.MSELoss(reduction='none')\n self.noise_rate = noise_rate\n\n def forward(self, xr1, xr2, x):\n mse1 = self.mse(xr1, x).mean(dim=(1,2,3))\n mse2 = self.mse(xr2, x).mean(dim=(1,2,3))\n idxsortd1 = mse1.argsort().detach()\n idxsortd2 = mse2.argsort().detach()\n #idxsortd1 = np.argsort(mse1.cpu().data)\n #idxsortd2 = np.argsort(mse2.cpu().data)\n #print(idxsortd1)\n #print(mse1,mse2,idxsortd1,idxsortd2)\n #return mse1.mean(), mse2.mean()\n rem_num = int(x.size(0) * (1. - self.noise_rate))\n return mse1[idxsortd2[:rem_num]].mean(), \\\n mse2[idxsortd1[:rem_num]].mean()\n\nclass CoTeachingResnetLoss(nn.Module):\n def __init__(self, noise_rate=0.1, score_mode='pl_mean'):\n super(CoTeachingResnetLoss, self).__init__()\n self.noise_rate = noise_rate\n self.score_mode = score_mode\n self.softmax = nn.Softmax(dim=1)\n\n def forward(self, o1, o2, labels):\n ce1 = F.cross_entropy(o1, labels, reduction='none')\n ce2 = F.cross_entropy(o2, labels, reduction='none')\n idxsortd1 = ce1.argsort().detach()\n idxsortd2 = ce2.argsort().detach()\n rem_num = int(o1.size(0) * (1. - self.noise_rate))\n return ce1[idxsortd2[:rem_num]].mean(), \\\n ce2[idxsortd1[:rem_num]].mean()\n\n def pl_mean(self, x, labels):\n raise NotImplementedError\n\n def neg_entropy(self, x):\n ne = self.softmax(x)\n return (ne *torch.log2(ne)).sum(dim=1)\n\n def predict(self, x, labels):\n if self.score_mode == 'pl_mean':\n return self.pl_mean(x, labels)\n else:\n return self.neg_entropy(x)\n\nclass InCoTeachingEstLoss(nn.Module):\n def __init__(self, lamd, cpd_channels, mode, noise_rate=0.1):\n super(InCoTeachingEstLoss, self).__init__()\n self.mse = nn.MSELoss(reduction='none')\n self.noise_rate = noise_rate\n self.lamd = lamd\n #self.lamd2 = 0.05\n self.coteach_mode = mode # 'exchange' 'union' 'intersect'\n\n\n self.cpd_channels = cpd_channels\n\n # Avoid nans\n self.eps = np.finfo(float).eps\n\n def L21_error(self, x, x_r):\n x = x.view(x.shape[0], -1)\n x_r = x_r.view(x_r.shape[0], -1)\n le = (torch.norm(x - x_r, p=2, dim=1)).mean()\n return le\n\n def Autoregress_error(self, z, z_dist):\n z_d = z.detach()\n\n # Apply softmax\n z_dist = F.softmax(z_dist, dim=1)\n\n # Flatten out codes and distributions\n z_d = z_d.view(len(z_d), -1).contiguous()\n z_dist = z_dist.view(len(z_d), self.cpd_channels, -1).contiguous()\n\n # Log (regularized), pick the right ones\n z_dist = torch.clamp(z_dist, self.eps, 1 - self.eps)\n log_z_dist = torch.log(z_dist)\n index = torch.clamp(torch.unsqueeze(z_d, dim=1) * self.cpd_channels, min=0,\n max=(self.cpd_channels - 1)).long()\n selected = torch.gather(log_z_dist, dim=1, index=index)\n selected = torch.squeeze(selected, dim=1)\n\n # Sum and mean\n S = torch.sum(selected, dim=-1)\n nll = -S\n\n return nll\n\n def forward(self, xr, x, z, z_dist):\n x = x.view(x.shape[0], -1)\n xr = xr.view(xr.shape[0], -1)\n lmse = torch.norm(x - xr, p=2, dim=1)\n idxsorted = lmse.argsort().detach().cpu().numpy()\n\n rem_num = int(x.size(0) * (1. - self.noise_rate))\n arg_err = self.Autoregress_error(z, z_dist)\n zidxsorted = arg_err.argsort().detach().cpu().numpy()\n if self.coteach_mode == 'exchange':\n a = lmse[zidxsorted[:rem_num]].mean()\n b = arg_err[idxsorted[:rem_num]].mean()\n #c = arg_err[idxsorted[rem_num:]].mean()\n loss = a + b * self.lamd\n elif self.coteach_mode == 'neg':\n a = lmse[zidxsorted[:rem_num]].mean()\n b = arg_err[idxsorted[:rem_num]].mean()\n c = arg_err[idxsorted[rem_num:]].mean()\n loss = a + (b - c) * self.lamd\n else:\n loss = lmse.mean() + self.lamd * arg_err.mean()\n #print(a.item(), b.item())\n return loss, lmse, arg_err\n\n\nclass InCoTeachingHiddenLoss(nn.Module):\n def __init__(self, lamd, noise_rate=0.1, group=2):\n super(InCoTeachingHiddenLoss, self).__init__()\n self.mse = nn.MSELoss(reduction='none')\n self.noise_rate = noise_rate\n self.group = group\n self.lamd = lamd\n self.lamd2 = 0.05\n\n def L21_error(self, x, x_r):\n x = x.view(x.shape[0], -1)\n x_r = x_r.view(x_r.shape[0], -1)\n le = (torch.norm(x - x_r, p=2, dim=1)).mean()\n return le\n\n def forward(self, xr, x, z):\n L_mse = []\n idxsorted = []\n x = x.view(x.shape[0], -1)\n #print(len(xr), xr[0].shape, x.shape, z.shape)\n for ixr in xr:\n ixr = ixr.view(ixr.shape[0], -1)\n #print(x.shape, ixr.shape)\n #lmse = self.mse(ixr, x).mean(dim=1)\n lmse = torch.norm(x - ixr, p=2, dim=1)\n #lmse = ixr.sub(x).pow(2).view(ixr.size(0), -1).sum(dim=1, keepdim=False)\n L_mse.append(lmse)\n idxsorted.append(lmse.argsort().detach().cpu().numpy())\n rem_num = int(x.size(0) * (1. - self.noise_rate))\n znorm = torch.norm(z, p=2, dim=1)\n zidxsorted = znorm.argsort().detach().cpu().numpy()\n shift = random.randint(0, self.group - 1)\n loss = 0\n #print(rem_num)\n for i in range(self.group):\n loss += L_mse[i][zidxsorted[:rem_num]].mean()\n #loss += L_mse[i].mean()\n\n return znorm[idxsorted[(shift)%self.group][:rem_num]].mean() * self.lamd + loss\n #return znorm.mean()+loss\n\nclass InCoTeachingAgreeLoss(nn.Module):\n def __init__(self, noise_rate=0.1, group=2):\n super(InCoTeachingAgreeLoss, self).__init__()\n self.mse = nn.MSELoss(reduction='none')\n self.noise_rate = noise_rate\n self.group = group\n\n def forward(self, xr, x):\n L_mse = []\n idxsorted = []\n for ixr in xr:\n lmse = self.mse(ixr, x).mean(dim=(1,2,3))\n L_mse.append(lmse)\n idxsorted.append(lmse.argsort().detach().cpu().numpy())\n rem_idx = int(x.size(0) * (1. - self.noise_rate))\n loss = 0\n agrees = idxsorted[0][:rem_idx]\n for i in range(1, self.group):\n agrees = np.intersect1d(agrees,idxsorted[i])\n\n for i in range(self.group):\n loss += L_mse[i][agrees].mean()\n #loss += L_mse[i].mean()\n return loss\n\n\nclass InCoTeachingLoss(nn.Module):\n def __init__(self, noise_rate=0.1, group=2):\n super(InCoTeachingLoss, self).__init__()\n self.mse = nn.MSELoss(reduction='none')\n self.noise_rate = noise_rate\n self.group = group\n\n def forward(self, xr, x):\n L_mse = []\n idxsorted = []\n for ixr in xr:\n lmse = self.mse(ixr, x).mean(dim=(1,2,3))\n L_mse.append(lmse)\n idxsorted.append(lmse.argsort().detach())\n rem_num = int(x.size(0) * (1. - self.noise_rate))\n shift = random.randint(1, self.group - 1)\n loss = 0\n for i in range(self.group):\n loss += L_mse[i][idxsorted[(i+shift)%self.group][:rem_num]].mean()\n #loss += L_mse[i].mean()\n return loss\n\nclass MulCEInCoTeachingLoss(nn.Module):\n def __init__(self, noise_rate=0.1, group=(2, 3, 3, 4), score_mode='pl_mean',\n iter_per_epoch=100, smooth_epoch=0, oe_scale=None,\n mask_group=None):\n super(MulCEInCoTeachingLoss, self).__init__()\n self.noise_rate = noise_rate\n self.group = group\n self.gsize = len(group)\n self.softmax = nn.Softmax(dim=1)\n self.score_mode = score_mode\n self.noise_rate_schedule = []\n for i in range(smooth_epoch):\n self.noise_rate_schedule += [self.noise_rate * i / smooth_epoch] * iter_per_epoch\n self.iter_count = 0\n self.oe_scale = oe_scale\n\n if mask_group is None:\n self.mask_group = [False] * len(self.group)\n else:\n self.mask_group = mask_group\n\n def get_noist_rate(self):\n if self.iter_count < len(self.noise_rate_schedule):\n ns = self.noise_rate_schedule[self.iter_count]\n else:\n ns = self.noise_rate\n self.iter_count += 1\n return ns\n\n def forward(self, x, labels):\n noise_rate = self.get_noist_rate()\n Lce = []\n now = 0\n idxsorted = []\n for i in range(len(self.group)):\n if not self.mask_group[i]:\n lce = F.cross_entropy(x[:, now: now+self.group[i]], labels[i], reduction='none')\n Lce.append(lce)\n idxsorted.append(lce.argsort().detach())\n now += self.group[i]\n #print(now)\n rem_num = int(x.size(0) * (1. - noise_rate))\n shift = random.randint(0, len(Lce) - 1)\n loss = 0\n for i in range(len(Lce)):\n loss += Lce[i][idxsorted[(i+shift)%len(Lce)][:rem_num]].mean()\n #loss += Lce[i].mean()\n\n if self.oe_scale is not None:\n oe_num = -int(x.size(0) * noise_rate * self.oe_scale)\n now = 0\n for i in range(len(self.group)):\n if not self.mask_group[i]:\n xi = x[idxsorted[(i + shift) % len(Lce)][oe_num:], now:now + self.group[i]]\n loss += -0.1 * (xi.mean(dim=1) - torch.logsumexp(xi, dim=1)).mean()\n now += self.group[i]\n return loss\n\n def pl_mean(self, x, labels):\n Lce = []\n now = 0\n #print(x.shape, labels[0].shape)\n for i in range(len(self.group)):\n if not self.mask_group[i]:\n lce = -F.cross_entropy(x[:, now: now + self.group[i]], labels[i], reduction='none')\n Lce.append(lce)\n now += self.group[i]\n loss = 0\n for i in range(len(Lce)):\n loss += Lce[i]\n return loss\n\n def neg_entropy(self, x):\n neg_entropy = 0\n now = 0\n for i in range(len(self.group)):\n if not self.mask_group[i]:\n ne = self.softmax(x[:, now: now+self.group[i]])\n ne = ne * torch.log2(ne)\n ne = ne.sum(dim=1)\n neg_entropy += ne\n now += self.group[i]\n return neg_entropy\n\n def predict(self, x, labels):\n if self.score_mode == 'pl_mean':\n return self.pl_mean(x, labels)\n else:\n return self.neg_entropy(x)","sub_path":"loss_functions/coteaching_loss.py","file_name":"coteaching_loss.py","file_ext":"py","file_size_in_byte":10845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"93426462","text":"import graphene\nimport django_filters\nfrom graphene_django import DjangoObjectType\nfrom graphene_django.filter import DjangoFilterConnectionField\n\nfrom .models import Project, Task, Doc\n\nclass CodeReferenceFilter(django_filters.FilterSet):\n class Meta:\n model = CodeReference\n fields = ['proj_name', 'file_name', 'ver_num', 'link', 'prog_lang']\n\nclass CodeReferenceNode(DjangoObjectType):\n class Meta:\n model = CodeReference\n interfaces = (graphene.relay.Node, )\n\nclass RelayQuery(graphene.ObjectType):\n relay_CodeReference = graphene.relay.Node.Field(CodeReferenceNode)\n relay_codes = DjangoFilterConnectionField(CodeReferenceNode, filterset_class=CodeReferenceFilter)\n\nclass RelayCreateCodeReference(graphene.relay.ClientIDMutation):\n code_reference = graphene.Field(CodeReferenceNode)\n\n class Input:\n proj_name = graphene.String()\n file_name = graphene.String()\n ver_num = graphene.String()\n link = graphene.String()\n prog_lang = graphene.String()\n difficulty = graphene.Int()\n\n def mutate_and_get_payload(self, info, **input):\n code_reference = CodeReference(\n proj_name=input.get('proj_name'),\n file_name=input.get('file_name'),\n ver_num=input.get('ver_num'),\n link=input.get('link'),\n prog_lang=input.get('prog_lang'),\n difficulty=input.get('difficulty')\n )\n code_reference.save()\n\n return RelayCreateCodeReference(code_reference=code_reference)\n\nclass RelayMutation(graphene.AbstractType):\n relay_create_code_reference = RelayCreateCodeReference.Field()\n\n","sub_path":"api/code_references/schema_relay.py","file_name":"schema_relay.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"506671926","text":"from __future__ import print_function\nfrom pyomo.environ import *\n# from nmpc_mhe.dync.MHEGen import MheGen\nfrom nmpc_mhe.dync.NMPCGen import NmpcGen\nfrom nmpc_mhe.mods.distl.dist_col import *\nimport sys\nimport itertools, sys\n\nstates = [\"x\", \"M\"]\nx_noisy = [\"x\", \"M\"]\nu = [\"u1\", \"u2\"]\nref_state = {(\"T\", (29,)): 343.15, (\"T\", (14,)): 361.15}\nu_bounds = {\"u1\": (0.0001, 9.9999e-1), \"u2\": (0, 1e+08)}\n# weights = {(\"T\", (29,)): 1000., (\"T\", (14,)): 1000.}\n# Known targets 0.38, 0.4, 0.5\n# Let's roll with the Temperature of the gas-emulsion, pressure and gas_velocity\n\ny = [\"T\", \"Mv\", \"Mv1\", \"Mvn\"]\n\nntrays = 42\ny_vars = {\"T\": [(i,) for i in range(1, ntrays + 1)],\n \"Mv\": [(i,) for i in range(2, ntrays)],\n \"Mv1\": [((),)],\n \"Mvn\": [((),)]}\n\nx_vars = {\"x\": [(i,) for i in range(1, ntrays + 1)],\n \"M\": [(i,) for i in range(1, ntrays + 1)]}\nnfet = 10\ntfe = [i for i in range(1, nfet + 1)]\n\n\nc = NmpcGen(d_mod=DistDiehlNegrete,\n u=u,\n states=states,\n ref_state=ref_state,\n u_bounds=u_bounds)\n\n\nc.load_iguess_ss()\nc.solve_ss()\nc.load_d_s(c.d1)\nc.solve_d(c.d1)\n\nc.update_state_real() # update the current state\n\nc.find_target_ss()\nc.create_nmpc()\nc.update_targets_nmpc()\nc.compute_QR_nmpc(n=-1)\nc.new_weights_olnmpc(10000, 1e+06)\nc.d1.create_bounds()\n\nc.create_predictor()\nc.predictor_step(c.d1, \"real\")\n\nq_cov = {}\nfor j in range(1, ntrays + 1):\n q_cov[(\"x\", (j,))] = 1e-05\n q_cov[(\"M\", (j,))] = 1\n\n\nc.make_noisy(q_cov)\nfor i in range(1, 1000):\n c.solve_d(c.d1, stop_if_nopt=True, o_tee=True)\n\n # Dot_sens\n\n with open(\"debug1.txt\", \"w\") as f:\n c.d1.w_pnoisy.display(ostream=f)\n c.randomize_noize(q_cov)\n c.update_state_real() # update the current state\n c.update_soi_sp_nmpc()\n\n c.predictor_step(c.d1, \"real\")\n c.update_state_predicted()\n c.compute_offset_state(\"real\")\n c.initialize_olnmpc(c.d2, \"predicted\")\n c.load_init_state_nmpc(src_kind=\"predicted\")\n\n c.solve_d(c.olnmpc, stop_if_nopt=True, skip_update=False, iter_max=1000)\n c.update_u(c.olnmpc)\n c.print_r_nmpc()\n c.cycle_ics(plant_step=True)\n # c.cycle_ics_noisy()\n c.plant_input_gen(c.d1, src_kind=\"dict\")\n\n\n\n","sub_path":"nmpc_mhe/nmpc_dist_off.py","file_name":"nmpc_dist_off.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"422633196","text":"# -*- coding: utf-8 -*-\n'''\n@author: Carry\n@file: processer.py\n@time: 2020/10/27 19:04 \n@desc: \n'''\nimport base64\nimport functools\nimport logging\nimport time\n\nimport cv2\nimport numpy as np\n\ntemplate_boxs = []\nname = ['buyer_name', 'buyer_id', 'buyer_address_phone', 'buyer_bank_no', 'seller_name', 'seller_id',\n 'seller_address_phone', 'seller_bank_no', 'bill_no', 'price', 'tax', 'account_cap', 'account_lower', 'date']\n\nbuyer_name = [375, 272, 1092, 272, 1092, 316, 375, 316]\nbuyer_id = [375, 316, 1092, 316, 1092, 360, 375, 360]\nbuyer_address_phone = [375, 360, 1092, 360, 1092, 407, 375, 407] # nation=[245,105,308,105,308,135,245,135]\nbuyer_bank_no = [375, 407, 1092, 407, 1092, 453, 375, 453] # birthday=[116,156,335,156,335,184,116,184]\nseller_name = [375, 922, 1092, 922, 1092, 966, 375, 966]\nseller_id = [375, 966, 1092, 966, 1092, 1009, 375, 1009]\nseller_address_phone = [375, 1009, 1092, 1009, 1092, 1050, 375, 1050]\nseller_bank_no = [375, 1050, 1092, 1050, 1092, 1089, 375, 1089]\nbill_no = [1407, 72, 1654, 72, 1654, 143, 1407, 143]\nprice = [1232, 793, 1489, 793, 1489, 838, 1232, 838]\ntax = [1586, 793, 1842, 793, 1842, 838, 1586, 838]\naccount_cap = [629, 860, 1356, 860, 1356, 908, 629, 908]\naccount_lower = [1459, 860, 1838, 860, 1838, 910, 1459, 910]\ndate = [1540, 193, 1848, 193, 1848, 241, 1540, 241]\n\ntemplate_boxs.append(buyer_name)\ntemplate_boxs.append(buyer_id)\ntemplate_boxs.append(buyer_address_phone)\ntemplate_boxs.append(buyer_bank_no)\ntemplate_boxs.append(seller_name)\ntemplate_boxs.append(seller_id)\ntemplate_boxs.append(seller_address_phone)\ntemplate_boxs.append(seller_bank_no)\ntemplate_boxs.append(bill_no)\ntemplate_boxs.append(price)\ntemplate_boxs.append(tax)\ntemplate_boxs.append(account_cap)\ntemplate_boxs.append(account_lower)\ntemplate_boxs.append(date)\n\nbill_dict = dict(zip(name, template_boxs))\n\nx_threshold = 10\ny_threshold = 10\nx_Lengthen = 5\ny_lengthen = 5\n\nkp1 = des1 = hf1 = wf1 = None\n\n\ndef call_time(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n start_time = time.time()\n result = func(*args, **kwargs)\n end_time = time.time()\n logging.info('-' * 20 + ' {} call_time:{} '.format(func.__name__, str(end_time - start_time)) + '-' * 20)\n return result\n\n return wrapper\n\n\ndef img2base64(imgpath):\n with open(imgpath, \"rb\") as f: # 转为二进制格式\n base64_data = base64.b64encode(f.read()).decode() # 使用base64进行加密\n return base64_data\n\n\ndef img_resize(imggray, dwidth):\n '''\n 等比缩放\n :param imggray:\n :param dwidth:\n :return:\n '''\n crop = imggray\n size = crop.get().shape\n height = size[0]\n width = size[1]\n height = height * dwidth / width\n crop = cv2.resize(src=crop, dsize=(dwidth, int(height)), interpolation=cv2.INTER_CUBIC)\n return crop\n\n\n@call_time\ndef get_perspective_img(parse_img_name):\n '''\n 获取透视变换后的图片\n :return:\n '''\n logging.info('-' * 20 + ' 开始处理: ' + '-' * 20)\n W = 1920 # resize后的宽\n template_img_name = './images/template_bak.jpg'\n # test_img = './carry_zone/test_04.jpg'\n\n if 1:\n # SIFT 精度高 慢 20s\n surf = cv2.xfeatures2d.SIFT_create()\n else:\n # SURF(加速稳健特征)算法 速度快\n surf = cv2.xfeatures2d.SURF_create(1000) # 默认100,关键点检测的阈值,越高监测的点越少\n global kp1, des1, hf1, wf1\n if kp1 is None:\n # UMat是一个图像容器\n template_img = img_resize(cv2.UMat(cv2.imread(template_img_name, cv2.IMREAD_GRAYSCALE)), W)\n hf1, wf1 = cv2.UMat.get(template_img).shape\n\n # 返回keypoints是检测关键点,descriptor是描述符,这是图像一种表示方式,可以比较两个图像的关键点描述符,可作为特征匹配的一种方法。\n kp1, des1 = surf.detectAndCompute(template_img, None)\n\n # 画出关键点(特征点)\n # kpImgA = cv2.drawKeypoints(imageA, kp1, imageA)\n # # kpImgB = cv2.drawKeypoints(grayB, keypointsB, imageB)\n # cv2.imshow(\"kpImgA\", kpImgA)\n\n parse_img = img_resize(cv2.UMat(parse_img_name), W)\n # parse_img = img_resize(cv2.UMat(cv2.imread(test_img, cv2.IMREAD_GRAYSCALE)), W)\n\n kp2, des2 = surf.detectAndCompute(parse_img, None)\n\n # 用FlannBasedMatcher方法进行特征点匹配,寻找最近邻近似匹配\n FLANN_INDEX_KDTREE = 0\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\n search_params = dict(checks=10)\n # 匹配耗时 待优化\n matches = cv2.FlannBasedMatcher(index_params, search_params).knnMatch(des1, des2, k=2)\n\n # 通过描述符的距离进行选择需要的点\n coff = 0.5 # 0.1 0.2 0.8\n good_matches = [m for m, n in matches if m.distance < coff * n.distance]\n if len(good_matches) < 10:\n return None\n\n # 获取映射变换矩阵,findHomography 计算多个二维点对之间的最优单映射变换矩阵H\n src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)\n m, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n\n m_r = np.linalg.inv(m)\n result_img = cv2.warpPerspective(parse_img, m_r, (wf1, hf1), borderValue=[255, 255, 255])\n cv2.imwrite(\"./images/_perspective.jpg\", result_img.get())\n result_img = result_img.get().astype(np.uint8)\n return result_img\n\n\ndef make_iou(box1, box2):\n \"\"\"\n 计算两个矩形的交集和并集的比\n :param box1:[x1,y1,x2,y2] 左上角的坐标与右下角的坐标\n :param box2:[x1,y1,x2,y2]\n :return: iou_ratio--交并比\n \"\"\"\n width1 = abs(box1[2] - box1[0])\n height1 = abs(box1[1] - box1[3]) # 这里y1-y2是因为一般情况y1>y2,为了方便采用绝对值\n width2 = abs(box2[2] - box2[0])\n height2 = abs(box2[1] - box2[3])\n x_max = max(box1[0], box1[2], box2[0], box2[2])\n y_max = max(box1[1], box1[3], box2[1], box2[3])\n x_min = min(box1[0], box1[2], box2[0], box2[2])\n y_min = min(box1[1], box1[3], box2[1], box2[3])\n iou_width = x_min + width1 + width2 - x_max\n iou_height = y_min + height1 + height2 - y_max\n if iou_width <= 0 or iou_height <= 0:\n iou_ratio = 0\n else:\n iou_area = iou_width * iou_height # 交集的面积\n box1_area = width1 * height1\n box2_area = width2 * height2\n iou_ratio = iou_area / (box1_area + box2_area - iou_area) # 并集的面积\n return iou_ratio\n\n\n@call_time\ndef get_area(detect_boxes, w, h):\n '''\n 遍历检测框 获取指定区域\n :param detect_boxes:\n :return:\n '''\n boxes = four_point_2_two(detect_boxes)\n\n boxes_new = two_point_2_four(boxes)\n\n new_rectangle_list = merge_boxs(boxes_new, w, h)\n print(len(detect_boxes), len(new_rectangle_list))\n\n draw_rectangle(new_rectangle_list)\n\n ret_dic = {}\n for i, key in enumerate(bill_dict.keys()):\n mark_iou_ratio = 0.1\n template_box = bill_dict[key]\n template_box_new = [template_box[0], template_box[1],\n template_box[4], template_box[5]]\n for detect_boxe in new_rectangle_list:\n detect_boxe_new = detect_boxe\n iou_ratio = make_iou(detect_boxe_new, template_box_new)\n if iou_ratio > mark_iou_ratio:\n mark_iou_ratio = iou_ratio\n print(key, iou_ratio)\n ret_dic[key] = detect_boxe\n return ret_dic\n\n\ndef merge_boxs(point_list, w, h):\n \"\"\"\n 将list中矩形合并,并将新矩形增加高度和宽度返回\n :param point_list: 矩形list\n :param w: 图片宽\n :param h: 图片高\n :return: 新的矩形list\n \"\"\"\n\n def two_point_compare(point_1, point_2):\n \"\"\"\n 看两个矩形是否靠近,如果两个矩形的距离在一定范围之内,则生成一个新的矩形,\n 新矩形的左上角点为原先两个矩形左边矩形的左上角点\n 新矩形的右下角点为原先两个矩形右边边矩形的右下角点\n :param point_1: 矩形1\n :param point_2: 矩形2\n :return: 新矩形\n \"\"\"\n if point_1[0] >= point_2[0]:\n # point_1 在 point_2 右边\n point_change_1 = point_2\n point_change_2 = point_1\n else:\n point_change_1 = point_1\n point_change_2 = point_2\n if abs(point_change_1[2] - point_change_2[0]) <= x_threshold and abs(\n point_change_1[3] - point_change_2[1]) <= y_threshold:\n if point_change_1[3] - point_change_2[1] > 0:\n new_point = (\n point_change_1[0], point_change_2[1],\n point_change_2[2], point_change_2[3],\n point_change_2[4], point_change_2[5],\n point_change_1[6], point_change_1[7])\n else:\n new_point = (\n point_change_1[0], point_change_1[1],\n point_change_2[2], point_change_2[3],\n point_change_2[4], point_change_2[5],\n point_change_1[6], point_change_1[7])\n else:\n new_point = ()\n\n return new_point\n\n def get_new_list(old_list):\n \"\"\"\n 将原list中的矩形判断距离,距离在一定范围之内的矩形按照上面的方式合并成一个新的矩形,并将新矩形添加到list中\n ,并将原先两个矩形从list中移除。\n :param old_list: 矩形的list\n :return: 新生成的list\n \"\"\"\n for i in range(len(old_list)):\n if i == len(old_list):\n break\n point1 = old_list[i]\n for j in range(i + 1, len(old_list)):\n point2 = old_list[j]\n if point1 != point2:\n new_point = two_point_compare(point1, point2)\n if new_point != ():\n old_list.remove(point1)\n old_list.remove(point2)\n old_list.append(new_point)\n break\n else:\n pass\n\n return old_list\n\n def get_new_point_list(point_list):\n \"\"\"\n 将list中的在一定范围内的矩形都合并\n :param point_list: 矩阵list\n :return: 合并矩阵之后的矩阵\n \"\"\"\n len_ori = len(point_list)\n while True:\n new_list = get_new_list(point_list)\n len_new = len(new_list)\n if len_ori == len_new:\n break\n len_ori = len(new_list)\n return new_list\n\n def point_change(point, w, h):\n \"\"\"\n 将矩形增加宽度和高度,但是增加后的矩形的顶点要在图片中\n :param point: 矩形\n :param w: 图片的宽度\n :param h: 图片的高度\n :return: 新矩形\n \"\"\"\n x1 = point[0]\n y1 = point[1]\n x2 = point[4]\n y2 = point[5]\n return (x1 - x_Lengthen if (x1 - x_Lengthen > 0) else 0,\n y1 - y_lengthen if (y1 - y_lengthen > 0) else 0,\n x2 + x_Lengthen if (x2 + x_Lengthen < w) else w,\n y2 + y_lengthen if (y2 + y_lengthen < h) else h)\n\n new_list = []\n old_list = get_new_point_list(point_list)\n for old_point in old_list:\n new_point = point_change(old_point, w, h)\n if new_point[0] < new_point[2] and new_point[1] < new_point[3]:\n new_list.append(new_point)\n\n return new_list\n\n\ndef draw_rectangle(new_rectangle_list):\n '''\n 画出矩形\n :param new_rectangle_list:\n :return:\n '''\n img_path = './images/detect_boxs.jpg'\n img = cv2.imread(img_path)\n for detect_boxes in new_rectangle_list:\n # 画矩形,红色的线框出来。\n if len(detect_boxes) == 4:\n cv2.rectangle(img=img, pt1=(int(detect_boxes[0]), int(detect_boxes[1])),\n pt2=(int(detect_boxes[2]), int(detect_boxes[3])),\n color=(0, 0, 255), thickness=2)\n cv2.imwrite('./images/merge_boxs.jpg', img)\n\n\ndef change_boxs(detect_boxes):\n new_boxs = []\n for detect_box in detect_boxes:\n new_boxs.append((detect_box[2][0], detect_box[2][1],\n detect_box[3][0], detect_box[3][1],\n detect_box[0][0], detect_box[0][1],\n detect_box[1][0], detect_box[1][1]))\n return new_boxs\n\n\ndef four_point_2_two(point_list):\n '''\n 四个乱序坐标转左上和右下两个点坐标\n :param point_list:\n :return:\n '''\n result = []\n for old_point in point_list:\n new_list = []\n x = [int(old_point[0][0]), int(old_point[1][0]), int(old_point[2][0]), int(old_point[3][0])]\n y = [int(old_point[0][1]), int(old_point[1][1]), int(old_point[2][1]), int(old_point[3][1])]\n new_list.append(min(x))\n new_list.append(min(y))\n new_list.append(max(x) + 5)\n new_list.append(max(y) + 1)\n result.append(new_list)\n return result\n\n\ndef two_point_2_four(point_list):\n '''\n 左上和右下两个点坐标转四个点坐标\n :param point_list:\n :return:\n '''\n result = []\n for old_point in point_list:\n w = old_point[2] - old_point[0]\n h = old_point[3] - old_point[1]\n new_list = []\n new_list.append(old_point[0])\n new_list.append(old_point[1])\n new_list.append(old_point[0] + w)\n new_list.append(old_point[1])\n new_list.append(old_point[2])\n new_list.append(old_point[3])\n new_list.append(old_point[0])\n new_list.append(old_point[1] + h)\n result.append(new_list)\n return result\n\n\ndef get_cut_image(coordinate_point, image):\n \"\"\"\n 根据四个顶点的坐标,将坐标围成的长方形图片从原图中裁减出来\n :param coordinate_point:四个顶点的坐标\n :param image: 原图\n :return: 裁减出来的图片\n \"\"\"\n x0 = coordinate_point[0]\n x1 = coordinate_point[2]\n y0 = coordinate_point[1]\n y1 = coordinate_point[3]\n cropped = image[y0:y1, x0:x1]\n return cropped\n\n\n# def fix_ret(key):\n# if key == ''\n\n\nif __name__ == '__main__':\n import re\n\n name = '名,。称:上海团迈贸易有限公司'\n name = '称:上海团迈贸易有限公司'\n name = '名 称:上海团称迈贸易有限公司'\n s = re.sub('名?.*称:', \"\", name)\n # if ret_dic['buyer_name'].startswith('称:'):\n # ret_dic['buyer_name'] = ret_dic['buyer_name'].replace('称:', '')\n # print(ret_dic['buyer_name'])\n\n print(s)\n","sub_path":"processer.py","file_name":"processer.py","file_ext":"py","file_size_in_byte":14661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"10145865","text":"# Import the necessary methods from tweepy library\nfrom tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nimport json\n\n# Variables that contains the user credentials to access Twitter API\nfrom harvester import Database\n\naccess_token = \"1083653718581497857-VSyJpAIMjFZaWpg0eJ0M8G409KPkJJ\"\naccess_token_secret = \"3SGS9VfU3UvaXw84y0yRULfdIXFDryxIuxpYD83aMMygP\"\nconsumer_key = \"DYMWGxnSrF8aG5rISt1oBSBSO\"\nconsumer_secret = \"of33s312AnD247lDcCQGHHK6ciAsdVmqqbm58nwiJo9TAp0lj9\"\n\nAUS_BOUND_BOX = (113.338953078, -43.6345972634, 153.569469029, -10.6681857235)\n\n\n# This is a basic listener that just prints received tweets to stdout.\nclass StdOutListener(StreamListener):\n def __init__(self, auth, db):\n self.auth = auth\n self.db = db\n\n def on_data(self, data):\n # self.file.writelines(json.dumps(data, ensure_ascii=False))\n # write_tweets(self.file, data, self.count)\n print(data)\n self.db.store(data)\n return True\n\n def on_status(self, status):\n print(\"status gives data: \", status)\n\n def on_error(self, status_code):\n print(status_code)\n\n\ndef write_tweets(file, data, count):\n if count == 1:\n file.write('{ \"rows\":[\\n')\n\n tweet = json.loads(data)\n jsonStr = json.dumps(tweet, ensure_ascii=False)\n file.write(jsonStr + ',\\n')\n\n if count == 100:\n file.write(\"]}\\n\")\n if count > 100:\n exit(0)\n\n\nif __name__ == '__main__':\n keywords = ['teat', 'shithappens', 'assjockey', 'asscowboy']\n url = \"http://localhost:5984\"\n db_name = 'new_test'\n db = Database.DB(url, db_name)\n # This handles Twitter authetification and the connection to Twitter Streaming API\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n l = StdOutListener(auth, db)\n stream = Stream(auth, l)\n\n# This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'\n stream.filter(track=keywords, locations=AUS_BOUND_BOX)\n","sub_path":"Test/Test1.py","file_name":"Test1.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"108953281","text":"with open(r\"\\Users\\JK31434\\Desktop\\Python docs\\Project Euler\\p22_names.txt\") as f:\r\n names = sorted(f.read().replace('\"', '').split(','))\r\nf.closed\r\n\r\nts = 0\r\nalph = 'abcdefghijklmnopqrstuvwxyz'\r\nalphnum ={}\r\nfor i, l in enumerate(alph):\r\n alphnum[l] = i + 1\r\n\r\nfor i, n in enumerate(names):\r\n s = 0\r\n for j in n:\r\n s += alphnum[j.lower()]\r\n ts += (i + 1)*s\r\n\r\nprint(ts)","sub_path":"P22.py","file_name":"P22.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"503876658","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 9:54 2018/10/13\n\n@author: Liu Jinbao\n@mail: liu.jinbao@outlook.com\n@project: PlasmaChemistry\n@IDE: PyCharm\n\"\"\"\n\nfrom plasmistry.io import test_func\n\nline = 'E + @A@ => E + @B@'\nreplc_list = ['@A@ = H2(1) H2(2) H2(3)',\n '@B@ = H2(1) H2(2) H2(3)',\n '@CONDITION : @A@[3]>=@B@[3]']\noutput = test_func(line, replc_list)\n","sub_path":"demo_add_condition_to_plasmistry.io.py","file_name":"demo_add_condition_to_plasmistry.io.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"103297674","text":"from django.urls import register_converter\n\n\nclass CatergoryConverter(object):\n regex = r'\\w+|(\\w+\\+\\w+)+'\n\n def to_python(self, value):\n result = value.split('+')\n return result\n\n def to_url(self, value):\n if isinstance(value, list):\n result = \"+\".join(value)\n return result\n else:\n raise RuntimeError(\"转换url的时候,转换参数必须为列表!\")\n\n\nregister_converter(CatergoryConverter, 'cate')\n","sub_path":"article/converters.py","file_name":"converters.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"568223514","text":"#ready ....\n# can find python 3 module index from google\n# all modules w/ python\nimport random\n\n\nfor i in range(3):\n print(round(random.random()*10))\n print(random.randint(10, 20))\n\nmembers =['John','Mary', 'Bob', 'Josh']\n\nleader = random.choice(members)\nprint(leader)\n\ndef roll():\n dice = (1,2,3,4,5,6)\n result = random.choice(dice)\n print(result)\n\nroll()\n\n\ndef roll2():\n first = random.randint(1,6)\n second = random.randint(1,6)\n\n\nroll2()\nroll2()","sub_path":"17_Generating_random_values.py","file_name":"17_Generating_random_values.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"64609287","text":"import os,re,cv2,pywt,scipy\nimport scipy.stats\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom time import time\n\n\ndef reducirImagen(img,porcentajeReduccion): #Input: Imagen de cv2, porcentaje que desea mantener de la img original en rango (0-100)\n width = int(img.shape[1] * porcentajeReduccion / 100)\n height = int(img.shape[0] * porcentajeReduccion / 100)\n dim = (width, height)\n return cv2.resize(img, dim)\n\ndef segmentarKmeans(img):\n pixel_values = img.reshape((-1, 3)) # reshape the image to a 2D array of pixels and 3 color values (RGB)\n pixel_values = np.float32(pixel_values) # convert to float\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.2) # define stopping criteria\n k = 2 # number of clusters (K)\n _, labels, (centers) = cv2.kmeans(pixel_values, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)\n centers = np.uint8(centers)\n labels = labels.flatten()\n segmented_image = centers[labels.flatten()]\n segmented_image = segmented_image.reshape(img.shape)\n return segmented_image\n\ndef histograma(I, bins):\n (c1, hc1) = np.histogram((I[:, :, 0]), bins)\n (c2, hc2) = np.histogram((I[:, :, 1]), bins)\n (c3, hc3) = np.histogram((I[:, :, 2]), bins)\n c1 = np.append(c1, c2)\n c3 = np.append(c1, c3)\n return (c3)\n\n\ndef fu(I):\n f = np.real(np.fft.rfft2(I))\n f = np.asarray(f).reshape(-1)\n FFTM = f.mean()\n FFTD = f.std()\n FFTK = scipy.stats.kurtosis(f)\n FFTS = scipy.stats.skew(f)\n return ([FFTM, FFTD, FFTK, FFTS])\n\n\ndef fourier(I,prueba): #Puse un parmtro entrada prueba, para usar hilos, no lo necesita, pero fue más fácil así\n I1 = fu(I[:, :, 0])\n I2 = fu(I[:, :, 1])\n I3 = fu(I[:, :, 2])\n return ([I1, I2, I3])\n\n\ndef dwt(I): # TRANSFORMADA WAVELET\n\n coeffs2 = pywt.dwt2(I, 'bior1.3')\n LL, (LH, HL, HH) = coeffs2\n LH = np.asarray(LH).reshape(-1)\n HL = np.asarray(HL).reshape(-1)\n HH = np.asarray(HH).reshape(-1)\n mlh = scipy.mean(LH)\n stdlh = scipy.std(LH)\n slh = scipy.stats.skew(LH)\n klh = scipy.stats.kurtosis(LH)\n mhl = scipy.mean(HL)\n stdhl = scipy.std(HL)\n shl = scipy.stats.skew(HL)\n khl = scipy.stats.kurtosis(HL)\n mhh = scipy.mean(HH)\n stdhh = scipy.std(HH)\n shh = scipy.stats.skew(HH)\n khh = scipy.stats.kurtosis(HH)\n return ([mlh, stdlh, slh, klh, mhl, stdhl, shl, khl, mhh, stdhh, shh, khh])\n\n\ndef wavelet(I,prueba):\n I1 = dwt(I[:, :, 0])\n I2 = dwt(I[:, :, 1])\n I3 = dwt(I[:, :, 2])\n return ([I1, I2, I3])\n\ndef hogDescriptor(I,prueba):\n hog = cv2.HOGDescriptor()\n hogDesc = hog.compute(I)\n hogMedia = hogDesc.mean()\n hogStd = hogDesc.std()\n return [hogMedia,hogStd]\n\n\ndef siftGrayDescriptor(gray,prueba): #computa sift para un solo canal\n sift = cv2.SIFT_create()\n kp, des = sift.detectAndCompute(gray, None)\n meanSiftGray = des.mean()\n stdSiftGray = des.std()\n skewSiftGray = scipy.stats.skew(des.ravel())\n kurtSiftGray = scipy.stats.kurtosis(des.ravel())\n return [meanSiftGray,stdSiftGray,skewSiftGray,kurtSiftGray]\n\n\ndef siftDescriptor(img,prueba):\n return [siftGrayDescriptor(img[:,:,0],0),siftGrayDescriptor(img[:,:,1],0),siftGrayDescriptor(img[:,:,2],0)]\n\n\n\n","sub_path":"funcionesCafe.py","file_name":"funcionesCafe.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"66502007","text":"import numpy as np\nimport cv2\nfrom collections import deque\nfrom src.window import Window\n\nclass Lane(object):\n\n def __init__(self, h, w, perspective_transformer,n_windows = 9):\n # Image height in pixels\n self.h = h\n # Image width in pixel\n self.w = w\n # List of recent polynomial coefficients\n self.coefficients = deque(maxlen=5)\n # List of recent polynomial coefficients\n self.last_coefficients = None\n # Amount of not found lanes in recent frames\n self.not_found = 0\n # Windows amount\n self.n_windows = n_windows\n # Window height\n self.window_height = int(self.h / self.n_windows)\n # List of searching windows\n self.windows = []\n self.perspective_transformer = perspective_transformer\n\n def track(self, nonzero, x_start = None):\n if x_start is not None:\n indices = self.init_windows(nonzero, x_start)\n else:\n indices = self.scan_windows(nonzero)\n\n self.process_points(nonzero[1][indices], nonzero[0][indices])\n\n def scan_windows(self, nonzero):\n indices = np.empty([0], dtype=np.int)\n window_x = None\n for window in self.windows:\n indices = np.append(indices, window.pixels_in(nonzero, window_x), axis=0)\n window_x = window.mean_x\n return indices\n\n def init_windows(self, nonzero, x_start):\n indices = np.empty([0], dtype=np.int)\n self.windows = []\n for i in range(self.n_windows):\n window = Window(\n y1=self.h - (i + 1) * self.window_height,\n y2=self.h - i * self.window_height,\n x=self.windows[-1].mean_x if len(self.windows) > 0 else x_start\n )\n indices = np.append(indices, window.pixels(nonzero), axis=0)\n self.windows.append(window)\n return indices\n\n def is_good_lane(self, x, y):\n enough_points = len(y) > 0 and np.max(y) - np.min(y) > self.h * .625\n return enough_points\n\n def process_points(self, x, y):\n if self.is_good_lane(x, y) or len(self.coefficients) == 0:\n self.fit(x, y)\n self.not_found = 0\n else:\n self.not_found += 1\n\n def get_points(self):\n y = np.linspace(0, self.h - 1, self.h)\n current_fit = self.averaged_fit()\n return np.stack((\n current_fit[0] * y ** 2 + current_fit[1] * y + current_fit[2],\n y\n )).astype(np.int).T\n\n def averaged_fit(self):\n return np.array(self.coefficients).mean(axis=0)\n\n def fit(self, x, y):\n if len(x) != 0:\n self.last_coefficients = np.polyfit(y, x, 2)\n self.coefficients.append(self.last_coefficients)\n else:\n self.last_coefficients = None\n\n def radius_of_curvature(self):\n points = self.get_points()\n y = points[:, 1]\n x = points[:, 0]\n fit_cr = np.polyfit(y * self.perspective_transformer.ym_per_pix, x * self.perspective_transformer.xm_per_pix, 2)\n # Estimate radius of curvature in meters.\n return int(((1 + (2 * fit_cr[0] * 720 * self.perspective_transformer.ym_per_pix + fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * fit_cr[0]))\n\n def camera_distance(self):\n # Estimated distance to camera in meters.\n points = self.get_points()\n x = points[np.max(points[:, 1])][0]\n return np.absolute((self.w // 2 - x) * self.perspective_transformer.xm_per_pix)\n\n\n\n def draw(self, image):\n cv2.polylines(image, [self.get_points()], False, (255, 0, 0), 2)","sub_path":"src/lane.py","file_name":"lane.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"457876398","text":"from __future__ import print_function\n\nimport base64\nimport urllib\nimport boto3\nimport json\nimport os\n\nprint('Loading function')\n\n\ndef respond(err, res=None):\n return {\n 'statusCode': '400' if err else '200',\n 'body': err.message if err else json.dumps(res),\n 'headers': {\n 'Content-Type': 'application/json',\n },\n }\n\n\ndef lambda_handler(event, context):\n '''Demonstrates a simple HTTP endpoint using API Gateway. You have full\n access to the request and response payload, including headers and\n status code.\n\n To scan a DynamoDB table, make a GET request with the TableName as a\n query string parameter. To put, update, or delete an item, make a POST,\n PUT, or DELETE request respectively, passing in the payload to the\n DynamoDB API as a JSON body.\n '''\n s3 = boto3.resource('s3')\n print(\"Received event: \" + json.dumps(event, indent=2))\n\n operations = {\n 'DELETE': lambda dynamo, x: dynamo.delete_item(**x),\n 'GET': lambda dynamo, x: dynamo.scan(**x),\n 'POST': lambda dynamo, x: dynamo.put_item(**x),\n 'PUT': lambda dynamo, x: dynamo.update_item(**x),\n }\n\n operation = event['httpMethod']\n # if operation in operations:\n # payload = event['queryStringParameters'] if operation == 'GET' else json.loads(event['body'])\n # dynamo = boto3.resource('dynamodb').Table(payload['TableName'])\n # response = dynamo.scan(FilterExpression=Attr(payload['Attribute']).eq(payload['Value']))\n # items = response['Items']\n # # print(items[0][payload['Attribute']])\n # result = {'ImageName': items[0]['ImageName'], 'Content': items[0]['Content'], 'Similarity': items[0]['Similarity']};\n # print(result)\n # # operations[operation](dynamo, payload)\n # return respond(None, list(result))\n # else:\n # return respond(ValueError('Unsupported method \"{}\"'.format(operation)))\n\n if operation in operations:\n if operation == 'GET':\n bucket = s3.Bucket(event['Records'][0]['s3']['bucket']['name'])\n key = urllib.unquote_plus(event['Records'][0]['s3']['object']['key'].encode('utf8'))\n try:\n # response = s3.get_object(Bucket=bucket, Key=key)\n f = open('temp.jpg.txt', 'wb')\n with f as data:\n bucket.download_fileobj(key, data)\n f.close()\n f = open('temp.jpg.txt', 'r')\n with f as myfile:\n content = myfile.read()\n f.close()\n\n os.remove('temp.jpg.txt')\n contentString = base64.b64encode(content)\n # print(\"CONTENT TYPE: \" + response['ContentType'])\n # print(\"Body: \" + response['Body'].read())\n except Exception as e:\n print(e)\n print(\n 'Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(\n key, bucket))\n raise e\n\n # byteArray = base64.b64encode(response['Body'].read())\n # content = byteArray.decode(\"utf-8\");\n result = {\"ImageName\": key, \"Content\": contentString}\n return respond(None, result)\n # elif operation == 'POST':\n # payload = event['body']\n # # print(payload)\n # dynamo = boto3.resource('dynamodb').Table(payload['TableName'])\n # response = dynamo.put_item(\n # Item={\n # 'ImageName': payload['ImageName'],\n # 'Content': payload['Content'],\n # }\n # )\n # return respond(None, response);\n # elif operation == \"DELETE\":\n # payload = event['body']\n # dynamo = boto3.resource('dynamodb').Table(payload['TableName'])\n # response = dynamo.delete_item(\n # Key={\n # 'ImageName': payload['ImageName']\n # }\n # )\n # return respond(None, response);\n else:\n return respond(ValueError('Unsupported PUT \"{}\"'.format(operation)));\n else:\n return respond(ValueError('Unsupported method \"{}\"'.format(operation)))","sub_path":"FaceRecognition_Server/src/org/lambda/ImageHandle/serviceDownload_file.py","file_name":"serviceDownload_file.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"373111309","text":"\"\"\"Test Gathering of Prefixes from IRR Data.\"\"\"\n\n# Standard Library\nimport sys\nimport asyncio\n\n# Project\nfrom routingpolicy.irr import get_prefixes\nfrom routingpolicy.log import log\n\n\nasync def _run_test(asn: str) -> None:\n for family in (4, 6):\n prefixes = get_prefixes(asn, family)\n log.info(\"IPv{} prefixes for {}:\", str(family), f\"AS{asn}\")\n async for prefix in prefixes:\n log.info(prefix)\n\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) > 1:\n asn = sys.argv[1].replace(\"AS\", \"\")\n else:\n asn = \"14525\"\n task = _run_test(asn)\n try:\n asyncio.run(task)\n except KeyboardInterrupt:\n task.close()\n log.critical(\"Stopped\")\n","sub_path":"tests/test_irr.py","file_name":"test_irr.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"160388221","text":"class HangmanOyunu:\n aranan_kelime = \"\"\n dogru_harf = list()\n yanlis = 0\n\n def kelime_yaz(self, word_list):\n for character in self.dogru_harf:\n print(character, end=\" \")\n\n def harf_al(self):\n self.aranan_kelime = input(\"Bulunacak kelimeyi yazın: \")\n self.dogru_harf = ['_' for i in range(len(self.aranan_kelime))]\n\n def harf_karsilastir(self, tahmin):\n sayac = 0\n for harf in self.aranan_kelime:\n if tahmin == harf:\n self.dogru_harf[sayac] = harf\n sayac += 1\n\n def oyun_basladi(self):\n durum = True\n while durum:\n print(\"Retries:\", (len(self.aranan_kelime))-self.yanlis)\n harf_tahmin = input(\"Senin tahminin:\")\n self.harf_karsilastir(harf_tahmin)\n self.kelime_yaz(self.dogru_harf)\n self.oyun_bitti()\n if len(self.aranan_kelime) - self.yanlis == 1:\n durum = False\n self.yanlis = self.yanlis + 1\n if harf_tahmin in self.aranan_kelime:\n durum = True\n self.yanlis -= 1\n else:\n print(\"\\nHakkın Bitti!!!\")\n quit()\n\n def oyun_bitti(self):\n if \"_\" not in self.dogru_harf:\n print(\"\\nOyun Bitti, Görüşürüz\")\n quit()\n\noyun = HangmanOyunu()\noyun.harf_al()\noyun.oyun_basladi()\n\n\n","sub_path":"Homeworks/HW4.py","file_name":"HW4.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"551596551","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n##################################################################\n# Asignatura: Sistemas de Comunicación ####\n# Tema 1: Introducción a la teoría de la señal y los sistemas ####\n# Ejercicio 1.1: Simulación de señales y sistemas. ####\n##################################################################\n# ####\n# Autor: Fernando Rosa González ####\n# Fecha: Abril 2015 ####\n# Area: Teorí­a de la Señal y Comunicaciones ####\n# ####\n# Las unidades se muestran entre corchetes [] ####\n##################################################################\n# ####\n# Descripción: Implementación de la clase de simulación de ####\n# señales y sistemas. ####\n# ####\n##################################################################\nimport pylab as pyl\n\n\nclass Signal:\n # La clase Signal permite implementar la simulación de señales\n # estableciendo la ventana de análisis en base a tres parámetros,\n # el instante inicial, el instante final y la frecuencia de muestreo.\n t1 = 0.0 # Instante inicial [s]\n t2 = 0.0 # Instante final [s]\n fm = 44100.0 # Frecuencia de muestreo [mue/s]\n T = 0. # Periodo de simulación [s]\n tau = 0. # Intervalo de muestreo [s]\n N = 0. # Número total de muestras [mue]\n # Tamaño de la ventana de análisis del espectrograma [mue]\n tam_ven = 1024\n res_frec = 0. # Resolución en frecuencia [Hz]\n tiempo = pyl.array([]) # Array de tiempos [s]\n frec = pyl.array([]) # Array de frecuencias centradas [Hz]\n frec_pos = pyl.array([]) # Array de frecuencias positivas [Hz]\n ss = pyl.array([]) # Array de la señal [unidades de la señal]\n ff = pyl.array([]) # Array del espectro [unidades de la señal]\n logpl = False # Estado del plot del espectro\n\n def __init__(self, t1=0., t2=1., fm=44100.):\n # Los objetos de la clase Signal tienen tres parámetros básicos.\n # t1: instante inicial [s]\n # t2: instante final [s]\n # fm: frecuencia de muestreo [mue/s]\n self.t1 = t1\n self.t2 = t2\n self.fm = fm\n self.T = t2 - t1\n self.tau = 1.0 / self.fm\n self.N = self.fm * self.T\n self.tiempo = pyl.arange(t1, t2, self.tau)\n self.tam_ven = self.N\n self.res_frec = fm / self.tam_ven\n self.frec = pyl.arange(-self.fm / 2., self.fm / 2., self.res_frec)\n self.frec = pyl.concatenate(\n (self.frec[self.N / 2:self.N], self.frec[0:self.N / 2]))\n self.frec_pos = pyl.arange(0., self.fm, self.res_frec)\n self.ss = pyl.zeros(self.N)\n self.ff = pyl.zeros(self.N)\n return(None)\n\n def step(self, amp_antes=0., amp_desp=1., pos=0.0):\n u'''\n Señal escalon con 'amp_antes' para los tiempos anteriores a pos,\n 'amp_desp' para los tiempos posteriores a pos y el valor medio en\n el tiempo 'pos'\n '''\n self.ss = pyl.zeros(self.tiempo.size)\n for n, x in enumerate(self.tiempo):\n if(x > pos):\n self.ss[n] = amp_desp\n elif (x < pos):\n self.ss[n] = amp_antes\n else:\n self.ss[n] = (amp_antes + amp_desp) / 2.\n self.ff = pyl.fft(self.ss) / self.N\n return(None)\n\n def tone(self, fr=1000., amp=1., fas=0.0):\n u'''\n Señal tono de frecuencia fr, amplitud amp, y fase fas\n '''\n self.ss = amp * pyl.cos(2.0 * pyl.pi * fr * self.tiempo + fas)\n self.ff = pyl.fft(self.ss) / self.N\n return(None)\n\n def white_noise(self, med=0.0, sig=1.0):\n u'''\n White noise signal with mean med and sigma sig\n '''\n self.ss = pyl.random.normal(med, sig, self.tiempo.size)\n self.ff = pyl.fft(self.ss) / self.N\n return(None)\n\n def gabor(self, amp=1.0, fr=1000., pos=0.0, sig=1.0, fas=0.0):\n tt = (self.tiempo - pos) / sig\n self.tone(fr, 1.0, fas)\n self.ss *= amp * pyl.exp(-tt * tt)\n self.ff = pyl.fft(self.ss) / self.N\n return(None)\n\n def pulse(self, amp=1.0, pos=0.5, wid=0.5):\n u'''\n Pulse signal with amplitude, amp, center in time pos, and width, wid.\n '''\n self.ss = pyl.zeros(self.tiempo.size)\n for n, x in enumerate(self.tiempo):\n if((x > pos - wid / 2.) and (x < pos + wid / 2.)):\n self.ss[n] = amp\n elif ((x < pos - wid / 2.) or (x > pos + wid / 2.)):\n self.ss[n] = 0.0\n else:\n self.ss[n] = amp / 2.\n self.ff = pyl.fft(self.ss) / self.N\n return(None)\n u'''\n From here ahead are the operators definitions.\n '''\n\n def __add__(self, simsig):\n if(type(simsig) == int or type(simsig) ==\n float or type(simsig) == complex):\n resultado = Signal(self.t1, self.t2, self.fm)\n resultado.ss = self.ss + simsig\n else:\n if (self.t1 != simsig.t1) or \\\n (self.t1 != simsig.t1) or \\\n (self.t1 != simsig.t1):\n print(u\"No se pueden sumar señales con \"\n u\"diferentes parámetros de simulación\")\n return\n resultado = Signal(self.t1, self.t2, self.fm)\n resultado.ss = self.ss + simsig.ss\n resultado.ff = pyl.fft(resultado.ss) / self.N\n return(resultado)\n\n __radd__ = __add__\n\n def __sub__(self, simsig):\n if(type(simsig) == int or type(simsig)\n == float or type(simsig) == complex):\n resultado = Signal(self.t1, self.t2, self.fm)\n resultado.ss = self.ss - simsig\n else:\n if (self.t1 != simsig.t1) or \\\n (self.t1 != simsig.t1) or \\\n (self.t1 != simsig.t1):\n print(\n u\"No se pueden sumar señales con diferentes parámetros de\"\n u\" simulación\")\n return\n resultado = Signal(self.t1, self.t2, self.fm)\n resultado.ss = self.ss - simsig.ss\n resultado.ff = pyl.fft(resultado.ss) / self.N\n return(resultado)\n\n def __neg__(self):\n resultado = Signal(self.t1, self.t2, self.fm)\n resultado.ss = -self.ss\n resultado.ff = -self.ff\n return(resultado)\n\n def __mul__(self, simsig):\n if(type(simsig) == int or type(simsig)\n == float or type(simsig) == complex):\n resultado = Signal(self.t1, self.t2, self.fm)\n resultado.ss = self.ss * simsig\n else:\n if (self.t1 != simsig.t1) or \\\n (self.t1 != simsig.t1) or \\\n (self.t1 != simsig.t1):\n print(\n u\"No se pueden multiplicar señales con diferentes\"\n u\" parámetros de simulación\")\n return\n resultado = Signal(self.t1, self.t2, self.fm)\n resultado.ss = self.ss * simsig.ss\n resultado.ff = pyl.fft(resultado.ss) / self.N\n return (resultado)\n\n __rmul__ = __mul__\n u'''\n From here ahead are the painting methods\n '''\n\n def pinta(self, titulo=u'sin titulo', texto=u' ', fxlim=None, xlim=None,\n ylim=None, fylim=None):\n x = self.tiempo\n y = self.ss\n font = {'family': 'serif',\n 'color': 'darkred',\n 'weight': 'normal',\n 'size': 16,\n }\n f, (ax1, ax2) = pyl.subplots(2, 1)\n ax1.plot(x, y, 'b', label=texto)\n ax1.plot([min(x), max(x), pyl.nan, 0.0, 0.0], [\n 0.0, 0.0, pyl.nan, min(y) - 0.1, max(y) + 0.1], 'k--')\n ax1.set_title(titulo, fontdict=font)\n # text(2, 0.65, texto, fontdict=font)\n ax1.set_xlabel('time [s]', fontdict=font)\n ax1.set_ylabel('voltage [V])', fontdict=font)\n ax1.grid(True)\n ax1.set_ylim((min(y) - 0.1, max(y) + 0.1))\n ax1.legend()\n # if(modo=='pos'):\n # off=self.ff;\n # else:\n # off=array(list(self.ff[self.N/2:self.N])+list(self.ff[0:self.N/2]))\n ax2.set_xlabel('frequency [Hz]', fontdict=font)\n if fxlim is not None:\n ax2.set_xlim(fxlim)\n if xlim is not None:\n ax1.set_xlim(xlim)\n if fylim is not None:\n ax2.set_ylim(fylim)\n if ylim is not None:\n ax1.set_ylim(ylim)\n if self.logpl is True:\n ax2.plot(self.frec, 20.0 * pyl.log10(abs(self.ff)\n + pyl.spacing(0)))\n ax2.set_ylabel('power [dBV]', fontdict=font)\n else:\n ax2.plot(self.frec, abs(self.ff))\n ax2.set_ylabel('Abs Amplitude [V]', fontdict=font)\n ax2.grid(True)\n # pyl.show()\n\n return(None)\n\n def specgram(self, tamven=tam_ven):\n self.tam_ven = tamven\n pyl.specgram(self.ss, self.tam_ven, self.fm)\n pyl.show()\n return(None)\n\n\nclass LTI_System:\n fm = 44100.0 # Frecuencia de muestreo [mue/s]\n T = 0. # Periodo de simulación [s]\n N = 0. # Número total de muestras [mue]\n res_frec = 0. # Resolución en frecuencia [Hz]\n tiempo = pyl.array([]) # Array de tiempos [s]\n frec = pyl.array([]) # Array de frecuencias centradas [Hz]\n # Respuesta impulsiva del sistema [Cociente unidades de salida/entrada]\n h = pyl.array([])\n # Función de transferencia [Cociente unidades de salida/entrada]\n H = pyl.array([])\n\n def __init__(self, T=1.0, fm=44100.0):\n self.fm = fm\n self.T = T\n self.N = T * fm\n self.Tau = 1 / fm\n self.tiempo = pyl.arange(0, self.T, self.Tau)\n self.res_frec = self.fm / self.N\n self.frec = pyl.arange(-self.fm / 2., self.fm / 2., self.res_frec)\n self.frec = pyl.concatenate(\n (self.frec[self.N / 2:self.N], self.frec[0:self.N / 2]))\n self.h = pyl.zeros(self.N)\n self.H = pyl.zeros(self.N, dtype='complex')\n return(None)\n\n def retraso_temporal(self, t0=0.1):\n u''' Sistema de retraso temporal\n '''\n self.H = pyl.exp(-1.0j * t0 * 2.0 * pyl.pi * self.frec)\n self.h = pyl.ifft(self.H * self.N).real\n return(None)\n\n def filtroPasaBajoIdeal(self, frcor=1000):\n for n, frec in enumerate(self.frec):\n if abs(frec) < frcor:\n self.H[n] = 1\n elif abs(frec) > frcor:\n self.H[n] = 0\n elif frec == frcor:\n self.H[n] = 0.5 ** 0.5\n\n self.h = pyl.ifft(self.H * self.N).real\n\n def __call__(self, ss):\n out = Signal(t1=ss.t1, t2=ss.t2, fm=ss.fm)\n out.ff = ss.ff * self.H\n out.ss = pyl.ifft(out.ff * out.N).real\n return(out)\n\n def filtroPasivoPasaBajos(self, R, L, C):\n u\"\"\"Filtro nº 7\"\"\"\n w = 2 * pyl.pi * self.frec\n self.H = 1 / ((R * 1j * w * C) - ((w ** 2) * C * L) + 1)\n # self.H = (1 / 1j * w * C)*(1 / (R + 1j*w*L + (1 / 1j * w * C)))\n self.h = pyl.ifft(self.H * self.N)\n return(None)\n","sub_path":"simdaq.py","file_name":"simdaq.py","file_ext":"py","file_size_in_byte":11613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"419308482","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nExtra Functions for data manipulation\r\n@author: joeyp\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\ndef timestamp_col(df):\r\n \"\"\"\r\n This function takes seperated time columns and combines them into a \r\n pandas.Timestamp() and replaces the seperated time columns with one \r\n \"TIMESTAMP\" column.\r\n\r\n Parameters\r\n ----------\r\n df : pandas.DataFrame()\r\n Dataframe that contains seperated time columns. See drop_col below.\r\n\r\n Returns\r\n -------\r\n df : pandas.DataFrame()\r\n Same dataframe as about but now with one time column: \"TIMESTAMP\"\r\n\r\n \"\"\"\r\n drop_col = [\"YYYY\", \"MM\", \"DD\", \"Hr\", \"Min\", \"Sec\"]\r\n timestamp_lst = list(np.full(len(df), np.nan))\r\n for t in range(len(df)):\r\n timestamp_lst[t] = pd.Timestamp(str(df[\"YYYY\"][t])+\"-\"+str(df[\"MM\"][t])+\"-\"+str(df[\"DD\"][t]) +\" \"+str(df[\"Hr\"][t])+\":\"+str(df[\"Min\"][t])+\":\"+str(df[\"Sec\"][t]),freq = \".1S\")\r\n \r\n df.insert(0, column= \"TIMESTAMP\", value = timestamp_lst)\r\n df = df.drop(drop_col, axis=1)\r\n \r\n return df\r\n\r\ndef scalar_wind_tilt_correction(u_i, v_i, theta = 135):\r\n \"\"\"\r\n This function takes U and V wind components and will correct by a \r\n specified amount. Default is 135 due to SERDP Flux tower.\r\n \r\n Inputs:\r\n u_i - the U wind component \r\n v_i - the V wind component \r\n theta - angle of desired correction in degrees (default 135 due to the\r\n march 2019 flux tower)\r\n \r\n Outputs:\r\n u_f - corrected U wind component\r\n v_f - corrected V wind component\r\n \"\"\"\r\n \r\n u_f = u_i * np.cos(theta*np.pi/180) - v_i * np.sin(theta*np.pi/180)\r\n v_f = u_i * np.sin(theta*np.pi/180) + v_i * np.cos(theta*np.pi/180)\r\n \r\n return u_f, v_f\r\n \r\ndef df_wind_tilt_correction(df, theta = 135, U_col = \"U(19m)\", V_col = \"V(19m)\"):\r\n \"\"\"\r\n This function combined with the 'scalar_wind_tilt_correction' function will\r\n correct a data frame with U or V columns and apply the angle of correction.\r\n \r\n Inputs:\r\n df - pandas dataframe containing the U and V columns\r\n theta - angle of desired correction in degrees (default \r\n +135 for SERDP March 2019 Flux tower)\r\n U_col - the column name containing the U wind components ( default \r\n \"U(19)\" for SERDP March 2019 Flux tower)\r\n V_col - the column name containing the V wind components ( default \r\n \"U(19)\" for SERDP March 2019 Flux tower)\r\n \r\n Outputs:\r\n df - The pandas dataframe with the corrected wind columns\r\n \"\"\"\r\n u_list, v_list = np.full(len(df), np.nan), np.full(len(df), np.nan)\r\n for i in range(len(df)):\r\n u_list[i], v_list[i] = scalar_wind_tilt_correction(float(df[U_col][i]),\\\r\n float(df[V_col][i]),theta)\r\n \r\n df[U_col], df[V_col] = list(u_list), list(v_list)\r\n\r\n return df\r\n\r\ndef formater(df,columns):\r\n \"\"\"\r\n This function takes a dataframe with timestamp columns and data columns and \r\n formats them so that each column looks like this: ##.## or -#.##\r\n \r\n input:\r\n df - pandas dataframe with the separated time columns and data columns\r\n columns - the columns that are not the time columns but are desired to \r\n be formated\r\n output:\r\n df - the formated dataframe \r\n \"\"\"\r\n \r\n for col in columns:\r\n lst_df=list(df[str(col)])\r\n for i in range(len(lst_df)):\r\n lst_df[i] = \"{:2.2f}\".format(float(lst_df[i])).zfill(5)\r\n df[col]= lst_df\r\n \r\n year_lst, month_lst,day_lst= list(df[\"YYYY\"]), list(df[\"MM\"]), list(df[\"DD\"])\r\n hour_lst, min_lst, second_lst = list(df[\"Hr\"]), list(df[\"Min\"]), list(df[\"Sec\"])\r\n for i in range(len(df)):\r\n year_lst[i] = \"{:.0f}\".format(float(year_lst[i])).zfill(4)\r\n month_lst[i] = \"{:.0f}\".format(float(month_lst[i])).zfill(2)\r\n day_lst[i] = \"{:.0f}\".format(float(day_lst[i])).zfill(2)\r\n hour_lst[i] = \"{:.0f}\".format(float(hour_lst[i])).zfill(2)\r\n min_lst[i] = \"{:.0f}\".format(float(min_lst[i])).zfill(2)\r\n second_lst[i]= \"{:.1f}\".format(float(second_lst[i])).zfill(4)\r\n \r\n df[\"YYYY\"] = year_lst\r\n df[\"MM\"] = month_lst\r\n df[\"DD\"] = day_lst\r\n df[\"Hr\"] = hour_lst\r\n df[\"Min\"] = min_lst\r\n df[\"Sec\"] = second_lst\r\n \r\n return df\r\n\r\ndef timestamp_correction(df):\r\n \"\"\"\r\n This function takes a df with a messed up timestamp column and creates one \r\n with full timestamps \r\n input:\r\n df - the pandas dataframe that has the messed up timestamp columns\r\n \r\n output:\r\n df - the dataframe with the fixed timestamp column\r\n \"\"\"\r\n \r\n time=list(df[\"TIMESTAMP\"])\r\n ### initialize the t_d\r\n for i in range(len(time)):\r\n if len(time[i]) > 8:\r\n t_d= time[i].replace(\":\",\" \").split()[:2]\r\n break\r\n \r\n for i in range(len(time)):\r\n\r\n if len(time[i]) > 8:\r\n t_d= time[i].replace(\":\",\" \").split()[:2]\r\n\r\n if i !=len(df)-1 and len(time[i])>8:\r\n t_s = time[i+1].split(\".\")[0]+\".0\"\r\n\r\n time[i] = t_d[0] +\" \"+t_d[1]+\":\"+t_s \r\n\r\n else:\r\n time[i]= t_d[0] +\" \"+ t_d[1]+\":\"+time[i]\r\n\r\n df[\"TIMESTAMP\"]=time\r\n df.drop(df.tail(1).index,inplace=True)\r\n df[\"TIMESTAMP\"] = pd.to_datetime(df['TIMESTAMP']) \r\n \r\n return df\r\n\r\ndef repeat(df):\r\n \r\n \"\"\"\r\n This function takes a df with a \"TIMESTAMP\" column and checks for \r\n repeated timestamps. If it finds repeats, it will return the index of all the \r\n repeated times and print out how many and what times it spans. If it \r\n doesn't find one it will return an index of zero's \r\n \r\n input:\r\n df - pandas dataframe to be checked\r\n \r\n output:\r\n repeat_index - A list of the index's that have repeated times in this\r\n format: [[],[]] \r\n \"\"\"\r\n #### Check for repeated times\r\n lst=list(df[\"TIMESTAMP\"])\r\n u= np.unique(lst)\r\n if len(u)!=len(df):\r\n \r\n repeat_index, repeat_1, repeat_2 = [], [], []\r\n for i in range(len(u)):\r\n ind = df.index[df[\"TIMESTAMP\"]==u[i]].tolist()\r\n if len(ind)>1:\r\n repeat_index.append(ind)\r\n repeat_1.append(df[\"TIMESTAMP\"][ind[0]])\r\n repeat_2.append(df[\"TIMESTAMP\"][ind[-1]])\r\n if len(repeat_index)>0:\r\n print(\"Yikes! Number of repeated times: \",len(repeat_index),)\r\n print(\"Start:\", repeat_index[0],\"End:\",repeat_index[-1])\r\n print(\"Time stamp repeats:\",repeat_1[0],\"-\", repeat_2[-1])\r\n \r\n return repeat_index\r\n else:\r\n print(\"Hurray! No time Repeats\")\r\n \r\n return [[0],[0]]\r\n \r\ndef timestamp_matcher(df_names, file_num):\r\n \"\"\"\r\n Takes a list of dataframes, prints the start and ends of all the files,\r\n then find the earliest timestamp that all dataframes have. Returns the \r\n start and end time that can be used to trim the dataframes\r\n \r\n input: \r\n df_names - list of dataframes \r\n file_num - list of names for the files, used only for the print\r\n \r\n output:\r\n time-start - the starting timestampt that works with all files \r\n time-end - the ending timestampt that works with all files\r\n \"\"\"\r\n \r\n min_lst, max_lst =[],[]\r\n\r\n for i in df_names:\r\n min_lst.append(i[\"TIMESTAMP\"].min())\r\n max_lst.append(i[\"TIMESTAMP\"].max())\r\n\r\n fmt = \"File: {} | Start: {} | End: {}\" \r\n for i in range(len(min_lst)):\r\n print(fmt.format(file_num[i], min_lst[i],max_lst[i] ))\r\n\r\n time_start, time_end = max(min_lst), min(max_lst) \r\n print()\r\n print(\"Start timestamp Pulled:\",time_start, \"| End Timestamp Pulled:\",\\\r\n time_end)\r\n\r\n finder = ['5T', '30S','S','.1S']\r\n for n in range(len(finder)):\r\n test_start=list(pd.date_range(start=time_start, end=time_end,\\\r\n freq = finder[n]))\r\n test_end = test_start[::-1]\r\n time=[]\r\n for t in range(len(test_start)): \r\n for df in range(len(df_names)):\r\n if test_start[t] not in list(df_names[df][\"TIMESTAMP\"]):\r\n break\r\n else:\r\n time.append(test_start[t])\r\n break\r\n if len(time)==1:\r\n if t ==0 or n == len(finder)-1:\r\n time_start = test_start[t]\r\n else:\r\n time_start = test_start[t-1]\r\n break\r\n\r\n\r\n time=[]\r\n for t in range(len(test_end)):\r\n for df in range(len(df_names)):\r\n if test_end[t] not in list(df_names[df][\"TIMESTAMP\"]):\r\n break\r\n else:\r\n time.append(test_end[t])\r\n break\r\n if len(time)==1:\r\n if t ==0 or n == len(finder)-1:\r\n time_end = test_end[t]\r\n else:\r\n time_end = test_end[t-1]\r\n break\r\n print()\r\n print(\"Timestamp that can actually be used to trim due to gaps:\")\r\n print(\"Start Time:\", str(time_start), \"| End Time:\", str(time_end))\r\n \r\n return time_start, time_end","sub_path":"Unused_Data_Functions.py","file_name":"Unused_Data_Functions.py","file_ext":"py","file_size_in_byte":9429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"271533916","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nCode to load data and to create batches of 2D slices from 3D images.\n\nInfo:\nDimensions order for DeepLearningBatchGenerator: (batch_size, channels, x, y, [z])\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom os.path import join\nimport random\nfrom time import sleep\nimport numpy as np\nimport nibabel as nib\n\nfrom batchgenerators.transforms.color_transforms import ContrastAugmentationTransform, BrightnessMultiplicativeTransform\nfrom batchgenerators.transforms.resample_transforms import ResampleTransform\nfrom batchgenerators.transforms.noise_transforms import GaussianNoiseTransform\nfrom batchgenerators.transforms.spatial_transforms import SpatialTransform, FlipVectorAxisTransform\nfrom batchgenerators.transforms.spatial_transforms import MirrorTransform\nfrom batchgenerators.transforms.crop_and_pad_transforms import PadToMultipleTransform\nfrom batchgenerators.transforms.sample_normalization_transforms import ZeroMeanUnitVarianceTransform\nfrom batchgenerators.transforms.abstract_transforms import Compose\nfrom batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter\nfrom batchgenerators.dataloading.data_loader import SlimDataLoaderBase\n\nfrom tractseg.libs.system_config import SystemConfig as C\nfrom tractseg.libs import dataset_utils\nfrom tractseg.libs import exp_utils\n\n\ndef load_training_data(Config, subject):\n \"\"\"\n Load data and labels for one subject from the training set. Cut and scale to make them have\n correct size.\n\n :param Config: config class\n :param subject: subject id (string)\n :return:\n \"\"\"\n for i in range(20):\n try:\n if Config.FEATURES_FILENAME == \"12g90g270g\":\n # if np.random.random() < 0.5:\n # data = nib.load(join(C.DATA_PATH, self.Config.DATASET_FOLDER, subjects[subject_idx], \"270g_125mm_peaks.nii.gz\")).get_data()\n # else:\n # data = nib.load(join(C.DATA_PATH, self.Config.DATASET_FOLDER, subjects[subject_idx], \"90g_125mm_peaks.nii.gz\")).get_data()\n\n rnd_choice = np.random.random()\n if rnd_choice < 0.33:\n data = nib.load(join(C.DATA_PATH, Config.DATASET_FOLDER, subject, \"270g_125mm_peaks.nii.gz\")).get_data()\n elif rnd_choice < 0.66:\n data = nib.load(join(C.DATA_PATH, Config.DATASET_FOLDER, subject, \"90g_125mm_peaks.nii.gz\")).get_data()\n else:\n data = nib.load(join(C.DATA_PATH, Config.DATASET_FOLDER, subject, \"12g_125mm_peaks.nii.gz\")).get_data()\n elif Config.FEATURES_FILENAME == \"T1_Peaks270g\":\n peaks = nib.load(join(C.DATA_PATH, Config.DATASET_FOLDER, subject, \"270g_125mm_peaks.nii.gz\")).get_data()\n t1 = nib.load(join(C.DATA_PATH, Config.DATASET_FOLDER, subject, \"T1.nii.gz\")).get_data()\n data = np.concatenate((peaks, t1), axis=3)\n elif Config.FEATURES_FILENAME == \"T1_Peaks12g90g270g\":\n rnd_choice = np.random.random()\n if rnd_choice < 0.33:\n peaks = nib.load(join(C.DATA_PATH, Config.DATASET_FOLDER, subject, \"270g_125mm_peaks.nii.gz\")).get_data()\n elif rnd_choice < 0.66:\n peaks = nib.load(join(C.DATA_PATH, Config.DATASET_FOLDER, subject, \"90g_125mm_peaks.nii.gz\")).get_data()\n else:\n peaks = nib.load(join(C.DATA_PATH, Config.DATASET_FOLDER, subject, \"12g_125mm_peaks.nii.gz\")).get_data()\n t1 = nib.load(join(C.DATA_PATH, Config.DATASET_FOLDER, subject, \"T1.nii.gz\")).get_data()\n data = np.concatenate((peaks, t1), axis=3)\n else:\n data = nib.load(\n join(C.DATA_PATH, Config.DATASET_FOLDER, subject, Config.FEATURES_FILENAME + \".nii.gz\")).get_data()\n\n break\n except IOError:\n exp_utils.print_and_save(Config, \"\\n\\nWARNING: Could not load file. Trying again in 20s (Try number: \" + str(i) + \").\\n\\n\")\n exp_utils.print_and_save(Config, \"Sleeping 20s\")\n sleep(20)\n data = np.nan_to_num(data) # Needed otherwise not working\n data = dataset_utils.scale_input_to_unet_shape(data, Config.DATASET, Config.RESOLUTION) # (x, y, z, channels)\n\n seg = nib.load(join(C.DATA_PATH, Config.DATASET_FOLDER, subject, Config.LABELS_FILENAME + \".nii.gz\")).get_data()\n seg = np.nan_to_num(seg)\n if Config.LABELS_FILENAME not in [\"bundle_peaks_11_808080\", \"bundle_peaks_20_808080\", \"bundle_peaks_808080\",\n \"bundle_masks_20_808080\", \"bundle_masks_72_808080\", \"bundle_peaks_Part1_808080\",\n \"bundle_peaks_Part2_808080\", \"bundle_peaks_Part3_808080\", \"bundle_peaks_Part4_808080\"]:\n if Config.DATASET in [\"HCP_2mm\", \"HCP_2.5mm\", \"HCP_32g\"]:\n # By using \"HCP\" but lower resolution scale_input_to_unet_shape will automatically downsample the HCP sized seg_mask to the lower resolution\n seg = dataset_utils.scale_input_to_unet_shape(seg, \"HCP\", Config.RESOLUTION)\n else:\n seg = dataset_utils.scale_input_to_unet_shape(seg, Config.DATASET, Config.RESOLUTION) # (x, y, z, classes)\n\n return data, seg\n\n\nclass BatchGenerator2D_Nifti_random(SlimDataLoaderBase):\n '''\n Randomly selects subjects and slices and creates batch of 2D slices.\n\n Takes image IDs provided via self._data, randomly selects one ID,\n loads the nifti image and randomly samples 2D slices from it.\n\n Timing:\n About 2.5s per 54-batch 75 bundles 1.25mm. ?\n About 2s per 54-batch 45 bundles 1.25mm.\n '''\n def __init__(self, *args, **kwargs):\n super(self.__class__, self).__init__(*args, **kwargs)\n self.Config = None\n\n def generate_train_batch(self):\n subjects = self._data[0]\n subject_idx = int(random.uniform(0, len(subjects))) # len(subjects)-1 not needed because int always rounds to floor\n\n data, seg = load_training_data(self.Config, subjects[subject_idx])\n\n slice_idxs = np.random.choice(data.shape[0], self.batch_size, False, None)\n x, y = dataset_utils.sample_slices(data, seg, slice_idxs,\n training_slice_direction=self.Config.TRAINING_SLICE_DIRECTION,\n labels_type=self.Config.LABELS_TYPE)\n\n data_dict = {\"data\": x, # (batch_size, channels, x, y, [z])\n \"seg\": y} # (batch_size, channels, x, y, [z])\n return data_dict\n\n\nclass BatchGenerator2D_Npy_random(SlimDataLoaderBase):\n '''\n Takes image ID provided via self._data, loads the Npy (numpy array) image and randomly samples 2D slices from it.\n\n Needed for fusion training.\n\n Timing:\n About 4s per 54-batch 75 bundles 1.25mm.\n About 2s per 54-batch 45 bundles 1.25mm.\n '''\n def __init__(self, *args, **kwargs):\n super(self.__class__, self).__init__(*args, **kwargs)\n self.Config = None\n\n def generate_train_batch(self):\n\n subjects = self._data[0]\n subject_idx = int(random.uniform(0, len(subjects))) # len(subjects)-1 not needed because int always rounds to floor\n\n if self.Config.TYPE == \"combined\":\n if np.random.random() < 0.5:\n data = np.load(join(C.DATA_PATH, \"HCP_fusion_npy_270g_125mm\", subjects[subject_idx], \"270g_125mm_xyz.npy\"), mmap_mode=\"r\")\n else:\n data = np.load(join(C.DATA_PATH, \"HCP_fusion_npy_32g_25mm\", subjects[subject_idx], \"32g_25mm_xyz.npy\"), mmap_mode=\"r\")\n data = np.reshape(data, (data.shape[0], data.shape[1], data.shape[2], data.shape[3] * data.shape[4]))\n seg = np.load(join(C.DATA_PATH, self.Config.DATASET_FOLDER, subjects[subject_idx], self.Config.LABELS_FILENAME + \".npy\"), mmap_mode=\"r\")\n else:\n data = np.load(join(C.DATA_PATH, self.Config.DATASET_FOLDER, subjects[subject_idx], self.Config.FEATURES_FILENAME + \".npy\"), mmap_mode=\"r\")\n seg = np.load(join(C.DATA_PATH, self.Config.DATASET_FOLDER, subjects[subject_idx], self.Config.LABELS_FILENAME + \".npy\"), mmap_mode=\"r\")\n\n data = np.nan_to_num(data)\n seg = np.nan_to_num(seg)\n\n slice_idxs = np.random.choice(data.shape[0], self.batch_size, False, None)\n x, y = dataset_utils.sample_slices(data, seg, slice_idxs,\n training_slice_direction=self.Config.TRAINING_SLICE_DIRECTION,\n labels_type=self.Config.LABELS_TYPE)\n\n data_dict = {\"data\": x, # (batch_size, channels, x, y, [z])\n \"seg\": y} # (batch_size, channels, x, y, [z])\n return data_dict\n\n\nclass DataLoaderTraining:\n\n def __init__(self, Config):\n self.Config = Config\n\n def _augment_data(self, batch_generator, type=None):\n\n if self.Config.DATA_AUGMENTATION:\n num_processes = 8 # 6 is a bit faster than 16\n else:\n num_processes = 6\n\n tfs = [] #transforms\n\n if self.Config.NORMALIZE_DATA:\n tfs.append(ZeroMeanUnitVarianceTransform(per_channel=self.Config.NORMALIZE_PER_CHANNEL))\n\n if self.Config.DATASET == \"Schizo\" and self.Config.RESOLUTION == \"2mm\":\n tfs.append(PadToMultipleTransform(16))\n\n if self.Config.DATA_AUGMENTATION:\n if type == \"train\":\n # scale: inverted: 0.5 -> bigger; 2 -> smaller\n # patch_center_dist_from_border: if 144/2=72 -> always exactly centered; otherwise a bit off center (brain can get off image and will be cut then)\n\n if self.Config.DAUG_SCALE:\n center_dist_from_border = int(self.Config.INPUT_DIM[0] / 2.) - 10 # (144,144) -> 62\n tfs.append(SpatialTransform(self.Config.INPUT_DIM,\n patch_center_dist_from_border=center_dist_from_border,\n do_elastic_deform=self.Config.DAUG_ELASTIC_DEFORM, alpha=(90., 120.), sigma=(9., 11.),\n do_rotation=self.Config.DAUG_ROTATE, angle_x=(-0.8, 0.8), angle_y=(-0.8, 0.8),\n angle_z=(-0.8, 0.8),\n do_scale=True, scale=(0.9, 1.5), border_mode_data='constant',\n border_cval_data=0,\n order_data=3,\n border_mode_seg='constant', border_cval_seg=0, order_seg=0, random_crop=True))\n\n if self.Config.DAUG_RESAMPLE:\n tfs.append(ResampleTransform(zoom_range=(0.5, 1)))\n\n if self.Config.DAUG_NOISE:\n tfs.append(GaussianNoiseTransform(noise_variance=(0, 0.05)))\n\n if self.Config.DAUG_MIRROR:\n tfs.append(MirrorTransform())\n\n if self.Config.DAUG_FLIP_PEAKS:\n tfs.append(FlipVectorAxisTransform())\n\n #num_cached_per_queue 1 or 2 does not really make a difference\n batch_gen = MultiThreadedAugmenter(batch_generator, Compose(tfs), num_processes=num_processes,\n num_cached_per_queue=1, seeds=None)\n return batch_gen # data: (batch_size, channels, x, y), seg: (batch_size, channels, x, y)\n\n\n def get_batch_generator(self, batch_size=128, type=None, subjects=None):\n data = subjects\n seg = []\n\n if self.Config.TYPE == \"combined\":\n batch_gen = BatchGenerator2D_Npy_random((data, seg), batch_size=batch_size)\n else:\n batch_gen = BatchGenerator2D_Nifti_random((data, seg), batch_size=batch_size)\n # batch_gen = SlicesBatchGeneratorRandomNiftiImg_5slices((data, seg), batch_size=batch_size)\n\n batch_gen.Config = self.Config\n\n batch_gen = self._augment_data(batch_gen, type=type)\n\n return batch_gen\n\n\n\n############################################################################################################\n# Backup\n############################################################################################################\n\nclass BatchGenerator2D_Nifti_random_5slices(SlimDataLoaderBase):\n '''\n Randomly selects subjects and slices and creates batch of 2D slices (+2 slices above and below).\n\n Takes image ID provided via self._data, loads the nifti image and randomly samples 2D slices\n from it. Always adds 2 slices above and below.\n\n Timing:\n About 2.5s per 54-batch 75 bundles 1.25mm. ?\n About 2s per 54-batch 45 bundles 1.25mm.\n '''\n def __init__(self, *args, **kwargs):\n super(self.__class__, self).__init__(*args, **kwargs)\n self.Config = None\n\n def generate_train_batch(self):\n subjects = self._data[0]\n subject_idx = int(random.uniform(0, len(subjects))) # len(subjects)-1 not needed because int always rounds to floor\n\n data, seg = load_training_data(self.Config, subjects[subject_idx])\n\n slice_idxs = np.random.choice(data.shape[0], self.batch_size, False, None)\n\n # Randomly sample slice orientation\n slice_direction = int(round(random.uniform(0,2)))\n\n if slice_direction == 0:\n y = seg[slice_idxs, :, :].astype(self.Config.LABELS_TYPE)\n y = np.array(y).transpose(0, 3, 1, 2) # nr_classes channel has to be before with and height for DataAugmentation (bs, nr_of_classes, x, y)\n elif slice_direction == 1:\n y = seg[:, slice_idxs, :].astype(self.Config.LABELS_TYPE)\n y = np.array(y).transpose(1, 3, 0, 2)\n elif slice_direction == 2:\n y = seg[:, :, slice_idxs].astype(self.Config.LABELS_TYPE)\n y = np.array(y).transpose(2, 3, 0, 1)\n\n\n sw = 5 #slice_window (only odd numbers allowed)\n pad = int((sw-1) / 2)\n\n data_pad = np.zeros((data.shape[0]+sw-1, data.shape[1]+sw-1, data.shape[2]+sw-1, data.shape[3])).astype(data.dtype)\n data_pad[pad:-pad, pad:-pad, pad:-pad, :] = data #padded with two slices of zeros on all sides\n batch=[]\n for s_idx in slice_idxs:\n if slice_direction == 0:\n #(s_idx+2)-2:(s_idx+2)+3 = s_idx:s_idx+5\n x = data_pad[s_idx:s_idx+sw:, pad:-pad, pad:-pad, :].astype(np.float32) # (5, y, z, channels)\n x = np.array(x).transpose(0, 3, 1, 2) # channels dim has to be before width and height for Unet (but after batches)\n x = np.reshape(x, (x.shape[0] * x.shape[1], x.shape[2], x.shape[3])) # (5*channels, y, z)\n batch.append(x)\n elif slice_direction == 1:\n x = data_pad[pad:-pad, s_idx:s_idx+sw, pad:-pad, :].astype(np.float32) # (5, y, z, channels)\n x = np.array(x).transpose(1, 3, 0, 2) # channels dim has to be before width and height for Unet (but after batches)\n x = np.reshape(x, (x.shape[0] * x.shape[1], x.shape[2], x.shape[3])) # (5*channels, y, z)\n batch.append(x)\n elif slice_direction == 2:\n x = data_pad[pad:-pad, pad:-pad, s_idx:s_idx+sw, :].astype(np.float32) # (5, y, z, channels)\n x = np.array(x).transpose(2, 3, 0, 1) # channels dim has to be before width and height for Unet (but after batches)\n x = np.reshape(x, (x.shape[0] * x.shape[1], x.shape[2], x.shape[3])) # (5*channels, y, z)\n batch.append(x)\n data_dict = {\"data\": np.array(batch), # (batch_size, channels, x, y, [z])\n \"seg\": y} # (batch_size, channels, x, y, [z])\n\n return data_dict\n\n\n","sub_path":"tractseg/data/data_loader_training.py","file_name":"data_loader_training.py","file_ext":"py","file_size_in_byte":16548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"76548709","text":"import requests, json, sys, os\r\n\r\ndef Track(target : str):\r\n try:\r\n resp = json.loads(requests.get(\"http://ip-api.com/json/\" + target).text)\r\n\r\n info = \"Tracking Results\".center(35, \"-\")\r\n info = info + \"\\nIP Address: {}\\n\".format(resp[\"query\"])\r\n info = info + \"Country: {}/{}\\n\".format(resp[\"country\"], resp[\"countryCode\"])\r\n info = info + \"Region: {}/{}\\n\".format(resp[\"regionName\"], resp[\"region\"])\r\n info = info + \"City: {}\\n\".format(resp[\"city\"])\r\n info = info + \"ZipCode: {}\\n\".format(resp[\"zip\"])\r\n info = info + \"Time Zone: {}\\n\".format(resp[\"timezone\"])\r\n info = info + \"Organization: {}\\n\".format(resp[\"org\"])\r\n info = info + \"ISP: {}\\n\".format(resp[\"isp\"])\r\n info = info + \"Latitude: {}\\n\".format(resp[\"lat\"])\r\n info = info + \"Longitude: {}\\n\".format(resp[\"lon\"])\r\n info = info + \"{}\\n\".format(resp[\"as\"])\r\n \r\n globe = \"\"\" ,,,,,, \r\n o#'9MMHb':'-,o, \r\n .oH\":HH$' \"' ' -*R&o, \r\n dMMM*\"\"'`' .oM\"HM?. \r\n ,MMM' \"HLbd< ?&H\\ \r\n .:MH .\"\\ ` MM MM&b \r\n. \"*H - &MMMMMMMMMH:\r\n. dboo MMMMMMMMMMMM.\r\n. dMMMMMMb *MMMMMMMMMP.\r\n. MMMMMMMP *MMMMMP .\r\n `#MMMMM MM6P , \r\n ' `MMMP\" HM*`, \r\n ' :MM .- , \r\n '. `#?.. . ..' \r\n -. . .- \r\n ''-.oo,oo.-'' \r\n\"\"\".split(\"\\n\")\r\n\r\n for line in globe:\r\n print(\" \" * 5 + line, end=\"\")\r\n try:\r\n if globe.index(line) > 1:\r\n print(\" \" * 5 + info.split(\"\\n\")[globe.index(line) - 2][:os.get_terminal_size().columns - 44], end=\"\")\r\n if len(info.split(\"\\n\")[globe.index(line) - 2]) >= os.get_terminal_size().columns - 44:\r\n print(\"...\", end=\"\")\r\n print(\"\")\r\n else:\r\n print(\"\")\r\n except Exception as e:\r\n print(\"\")\r\n except Exception as e:\r\n raise ValueError(\"Unable to locate {}. The specified host is probably unavaiable, inaccessible or doesn't exists.\".format(target))\r\n\r\nif __name__ == \"__main__\":\r\n print(\"\")\r\n if len(sys.argv) > 1:\r\n try:\r\n Track(sys.argv[1])\r\n except Exception as e:\r\n print(\"Error: {}\".format(str(e)))\r\n else:\r\n try:\r\n Track(\"\")\r\n except Exception as e:\r\n print(\"Error: {}\".format(str(e)))\r\n","sub_path":"Track.py","file_name":"Track.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"240047547","text":"import pandas as pd\nimport numpy as np\nimport nltk\nimport os\nfrom collections import Counter\nfrom six.moves import cPickle as pickle\n\nclass Preprocessor(object):\n\n TEST_PREFIX = 'test_'\n VOCABULARY_PREFIX = 'vocabulary_'\n TRAIN_PREFIX = 'train_'\n VALID_PREFIX = 'valid_'\n TEST_PREFIX = 'test_'\n \n UNK_ID = 1\n MAX_DATA_LENGTH = 200\n\n _PAD = ''\n _UNK = ''\n _EOS = ''\n\n def __init__(self, path, filename, vocabulary_size, train_size=0.6, \n valid_size=.2, max_data_length=MAX_DATA_LENGTH, pad = _PAD, \n unk = _UNK, eos = _EOS):\n self.path = path\n self.filename = filename\n self.vocabulary_size = vocabulary_size\n self.max_data_length = max_data_length\n self.train_size = train_size\n self.train_size = train_size\n self.valid_size = valid_size\n self.pad = pad\n self.unk = unk\n self.eos = eos\n\n self.separator = ','\n self._dictionary = {}\n self.tokenizer = nltk.TweetTokenizer()\n\n def read_data(self):\n self.data = pd.read_csv(self.path + self.filename + \".csv\")\n return self.data\n\n def _build_dictionary(self, data, data_column):\n all_text = []\n\n for sentence in data[data_column]:\n all_text.extend(self.tokenizer.tokenize(sentence))\n\n all_words = [(self.pad, -1), (self.unk, -1), (self.eos, -1)]\n all_words.extend(Counter(all_text).most_common(self.vocabulary_size - 3))\n\n for word in all_words:\n if word[0] not in self._dictionary:\n self._dictionary[word[0]] = len(self._dictionary)\n self.vocabulary_size = len(self._dictionary)\n\n print(\"Saving vocabulary...\")\n word_column = 'Word'\n vocabulary = pd.DataFrame(data=all_words, columns=[word_column, 'Frequency'])\n vocabulary.to_csv(self.path + self.VOCABULARY_PREFIX + \"frequency_\" + self.filename, sep=self.separator, index=False,\n encoding='utf-8')\n vocabulary[word_column].to_csv(self.path + self.VOCABULARY_PREFIX + self.filename, sep=self.separator, index=False,\n encoding='utf-8')\n return self._dictionary \n\n def preprocess(self, data_column, label_column):\n\n self.data_column = data_column\n self.label_column = label_column\n\n new_data = self.data[[data_column, label_column]].copy()\n new_data = new_data.loc[new_data[data_column].str.len() < self.max_data_length]\n \n print(\"Creating the dictionary...\")\n self._build_dictionary(new_data, data_column)\n \n print(\"Tokenize...\")\n new_data[data_column] = new_data[data_column].map(lambda x: self.tokenizer.tokenize(x))\n \n print(\"Replace the words with indexes...\")\n new_data[data_column] = new_data[data_column].map(\n lambda x: list(map(\n lambda x: self._dictionary[x] if x in self._dictionary else self.UNK_ID, x)))\n\n print(\"Convert labels...\")\n new_data[label_column] = new_data[label_column].apply(lambda x: 1 if x > 3 else 0)\n \n # print(\"Shuffle the data\")\n # new_data = new_data.iloc[np.random.permutation(len(new_data))]\n\n self.max_seq_len = new_data[data_column].map(len).max()\n self.new_data = new_data\n\n def save_data(self):\n # print(\"Creating train, validation and test set...\")\n train, valid, test = self.__train_validate_test_split(self.new_data)\n \n x_train = self.__create_np_array(train[self.data_column].values)\n y_train = train[self.label_column].values.reshape(-1, 1)\n\n x_valid = self.__create_np_array(valid[self.data_column].values)\n y_valid = valid[self.label_column].values.reshape(-1, 1)\n\n x_test = self.__create_np_array(test[self.data_column].values)\n y_test = test[self.label_column].values.reshape(-1, 1)\n # y_test = pd.get_dummies(test[self.label_column]).values.reshape(-1, 2)\n\n self.__save_to_file(x_train, y_train, self.path, self.TRAIN_PREFIX + self.filename)\n self.__save_to_file(x_valid, y_valid, self.path, self.VALID_PREFIX + self.filename)\n self.__save_to_file(x_test, y_test, self.path, self.TEST_PREFIX + self.filename)\n \n\n def __save_to_file(self, x, y, path, filename):\n pickle_file = os.path.join(path, filename)\n try:\n f = open(pickle_file, 'wb')\n save = {\n 'source': x,\n 'labels': y,\n }\n pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)\n f.close()\n except Exception as e:\n print('Unable to save data to', pickle_file, ':', e)\n raise\n\n def __create_np_array(self, data):\n # PAD the data\n data_matrix = np.zeros((data.shape[0], self.max_seq_len), dtype='int32')\n for i in range(data.shape[0]):\n data_matrix[i] = data[i][:self.max_seq_len] + [0] * (self.max_seq_len - len(data[i]))\n return data_matrix\n\n def __train_validate_test_split(self, data):\n size = len(data)\n train_end = int(self.train_size * size)\n valid_end = int(self.valid_size * size) + train_end\n train = data[:train_end]\n valid = data[train_end:valid_end]\n test = data[valid_end:]\n return train, valid, test\n","sub_path":"data/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":5362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"578748241","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 2 04:46:33 2016\n\n@author: oyu\n\"\"\"\nimport os\n# os.environ[\"CHAINER_TYPE_CHECK\"] = \"0\" #ここでオフに オンにしたかったら1にするかコメントアウト\nimport numpy as np\n# i = np.random()\n# np.random.seed()\nimport argparse\nimport chainer\nfrom chainer import cuda, serializers\nfrom tqdm import tqdm\nimport datetime\nimport importlib\nimport socket\nimport gc\nfrom mylib.my_functions import get_batch\nfrom mylib.my_logger import LOGGER\nfrom mylib.my_functions import copy_model\nfrom modelfile.model_at24 import SAF\n\nparser = argparse.ArgumentParser()\n\n# model selection\nparser.add_argument(\"-a\", \"--am\", type=str, default=\"model_pretrain24\",\n help=\"attention model\")\n# hyper parameters\nparser.add_argument(\"-e\", \"--epoch\", type=int, default=30,\n help=\"iterate training given epoch times\")\nparser.add_argument(\"-b\", \"--batch_size\", type=int, default=20,\n help=\"batch size\")\nparser.add_argument(\"-g\", \"--gpu\", type=int, default=-1,\n help=\"use gpu\")\n# load model id\n# log config\nparser.add_argument(\"-o\", \"--filename\", type=str, default=\"\",\n help=\"prefix of output file names\")\nparser.add_argument(\"-p\", \"--pre\", type=str, default=\"\",\n help=\"pre train\")\nargs = parser.parse_args()\n\nfile_id = args.filename\nn_epoch = args.epoch\ntrain_b = args.batch_size\ngpu_id = args.gpu\ncrop = 1\n\n# naruto ならGPUモード\nif socket.gethostname() == \"chainer\":\n gpu_id = 0\n log_dir = \"/home/y-murata/storage/traffic/pretrain/\"\n data_dir = \"/home/y-murata/traffic/data/\"\nelse:\n data_dir = \"C:/Users/waka-lab/Documents/data/data/\"\n log_dir = \"log/\"\n# load data\ndata_dir = data_dir + \"pretrain24_sp/\"\n# data_dir = data_dir + \"pretrain_24/\"\n\ndl = importlib.import_module(\"dataset.\" + \"pretrain24\")\ntrain_data = dl.MyDataset(data_dir, \"train\")\nval_data = dl.MyDataset(data_dir, \"test\")\n\nxp = cuda.cupy if gpu_id >= 0 else np\n\ndata_max = len(train_data)\ntest_max = len(val_data)\nnum_val = test_max\nnum_val_loop = 10 # val loop 10 times\n\nimg_size = 32\nn_target = 10\nnum_class = 5\ntarget_c = \"\"\n# test_b = test_max\n\n# モデルの作成\nmodel_file_name = args.am\n\nsss = importlib.import_module(\"modelfile.\" + model_file_name)\nmodel = sss.BASE()\n\nif len(args.pre) != 0:\n model_pretrain = SAF(n_out=5)\n serializers.load_npz('model/' + args.pre + '.model', model_pretrain)\n copy_model(model_pretrain, model)\n\n# オプティマイザの設定\noptimizer = chainer.optimizers.Adam()\noptimizer.setup(model)\n\n# gpuの設定\nif gpu_id >= 0:\n chainer.cuda.get_device_from_id(gpu_id).use()\n model.to_gpu()\n\n# log setting\nif file_id == \"\":\n file_id = datetime.datetime.now().strftime(\"%m%d%H%M%S\")\nlog_dir = log_dir + file_id + \"/\"\nos.mkdir(log_dir)\nlogger = LOGGER(log_dir, file_id, n_epoch=n_epoch)\n\nlogger.l_print(\"{} class recognition\\nclass:{} use traffic sign data set\".format(num_class, target_c))\nlogger.l_print(\"model:{}\".format(model_file_name))\nlogger.l_print(\"parameter\\n\")\nlogger.l_print(\"batch_size:{} crop:{}\".format(train_b, crop))\nlogger.l_print(\"log dir:{}\".format(log_dir))\nlogger.l_print(\"going to train {} epoch\".format(n_epoch))\nlogger.update_log()\n\nval_batch_size = int(num_val / num_val_loop)\ntrain_iterator = chainer.iterators.SerialIterator(train_data, train_b, shuffle=True)\ne_val_iterator = chainer.iterators.SerialIterator(val_data, val_batch_size, shuffle=True)\ne_train_iterator = chainer.iterators.SerialIterator(train_data, val_batch_size, shuffle=True)\n\nfor epoch in range(n_epoch):\n print(\"(epoch: {})\\n\".format(epoch + 1))\n for i in tqdm(range(0, data_max, train_b), ncols=60):\n model.cleargrads()\n with chainer.using_config('train', True):\n x, t = chainer.dataset.concat_examples(train_iterator.next(), device=gpu_id)\n loss = model(x, t)\n logger.set_loss(loss.data, epoch)\n loss.backward()\n optimizer.update()\n\n # evaluate\n train_acc = 0\n val_acc = 0\n for i in range(0, num_val_loop):\n with chainer.function.no_backprop_mode(), chainer.using_config('train', False):\n x, t = chainer.dataset.concat_examples(e_train_iterator.next(), device=gpu_id)\n train_acc += model(x, t)\n x, t = chainer.dataset.concat_examples(e_val_iterator.next(), device=gpu_id)\n val_acc += model(x, t)\n train_iterator.reset()\n e_train_iterator.reset()\n e_val_iterator.reset()\n # save accuracy\n logger.set_acc(train_acc / num_val, val_acc / num_val, epoch)\n logger.save_acc()\n logger.update_log()\n # save model\n if gpu_id >= 0:\n serializers.save_npz(log_dir + \"/\" + logger.best + file_id + '.model', model.to_cpu())\n model.to_gpu()\n else:\n serializers.save_npz(log_dir + \"/\" + logger.best + file_id + '.model', model)\n\n# logger.l_print(\"last acc:{} max_acc:{}\\n\".format(acc1_array[n_epoch - 1], max_acc))\n","sub_path":"pretrain.py","file_name":"pretrain.py","file_ext":"py","file_size_in_byte":4961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"100504620","text":"'''\nCreated on Jul 10, 2019\n\n@author: fsells\n'''\nimport datetime\nimport random\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom webapp import models\n\nfrom . import mydata_api \nfrom . import local_db_api\n\nclass Command(BaseCommand):\n help = 'copies data from MatrixCare to local DB'\n\n def add_arguments(self, parser):\n parser.add_argument('--start', action= 'store', help = 'first day of sweep mm/dd/yyyy, for testing',\n type=lambda s: datetime.datetime.strptime(s, '%m/%d/%Y'),\n default = datetime.date.today())\n parser.add_argument('--days', action= 'store', help = 'number of days for sweep, for testing', default=1, type=int)\n parser.add_argument('--fakeinput', action= 'store_true', help = 'create fake user entry for testing', default=False)\n \n def fake_user_input(self, hhdb, nos=0, blanks=0):\n hhdb.mark_current_inbed_yes()\n if blanks==0 and nos==0: return \n beds = models.BedCheck.objects.filter(Obsolete=0).order_by('unit', 'room', 'bed')\n for i, bed in enumerate(beds):\n print (i, bed)\n if i < nos:\n bed.inbed='No'\n bed.reason = random.choice(models.REASON_CHOICES)[0]\n bed.updatedby = 'fredtesting'\n bed.updatetime=datetime.datetime.now()\n bed.save()\n elif i < nos+blanks:\n bed.updatedby = 'fredtesting'\n bed.updatetime=datetime.datetime.now()\n bed.inbed = ''\n bed.save()\n else:\n break\n \n \n\n def handle(self, *args, **options):\n fake_input = options['fakeinput']\n print ('fakeit', fake_input)\n mydata = mydata_api.MyDataQueryManager()\n hhdb = local_db_api.HHDB()\n firstday = options['start'] \n ndays = options['days']\n almost_midnight = ' 11:45:00 PM'\n for n in range(ndays):\n sweeptime = firstday + datetime.timedelta(days=n)\n texttime = sweeptime.strftime('%m/%d/%Y') + almost_midnight\n beds = mydata.get_beds_x_patients( texttime)\n #for b in beds: print (b)\n hhdb.insert_bed_occupancy(beds)\n if fake_input:\n self.fake_user_input(hhdb, nos=3, blanks=2)\n print ('sweeptime={} nrecords={}'.format( texttime, len(beds)))\n\n print('sweep done')\n \n \n \n \n ","sub_path":"sweep.py","file_name":"sweep.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"545469800","text":"#Application to convert a Product ID to a standard ISBN-10 Number\n\n#Function to detect only numbers are entered by the users\n\ndef allowedNumber(ID):\n for char in productID:\n if not char in '1234567890.':\n return print(\"These arent numbers\")\n return True\n\n\n#Start of main program\n\nuserInput=input(\"Welcome! Got a Product ID to be converted into an ISBN-10 number? Press Enter\")\n\nwhile userInput!=\"Q\":\n print(\"To get your ISBN ---> Press 1\")\n print(\"To quit ---> Press Q\")\n userInput=(input(\"Enter your choice\"+\" \")).upper()\n \n while userInput==\"1\" and userInput !=\"N\":\n productID=input(\"Enter your Product ID number\"+\" \")\n\n #Remove prefix\n value=productID[3:]\n\n if(allowedNumber(productID)):\n True\n \n\n if len(value) !=9:\n print(\"Incorrect Product ID number\")\n userInput=\"N\"\n else:\n totalSum = 0\n for i in range(9):\n if 0 <= int(value[i]) <= 9: \n totalSum += int(value[i]) * (10 - i)\n else:\n False\n\n #next highest value multiple of 11\n x=(int((totalSum/11))+1)*11\n\n #error is the error control digit\n error=x-totalSum\n\n if error ==10:\n print('\\n'\"Your ISBN is\"+\" \"+value+\"x\")\n elif totalSum%11==0:\n print('\\n'\"Your ISBN is\"+\" \"+value+\"0\")\n else:\n print('\\n'\"Your ISBN is\"+\" \"+value+str(error))\n\n\n userInput=input(\"Enter N to return to main menu\"+\" \"+\"Enter 1 to convert more Product IDs\"+\" \").upper()\n\n","sub_path":"isbn.py","file_name":"isbn.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"479059399","text":"from collections import defaultdict\nclass Solution:\n def canFinish(self, numCourses , prerequisites ):\n indegree =defaultdict(set)\n outdegree = defaultdict(set) \n for x, y in prerequisites:\n outdegree[y].add(x)\n indegree[x].add(y)\n connection_removed =0 \n indegree_zero =[]\n for i in range (numCourses):\n if not indegree[i]:\n indegree_zero.append(i)\n connection_removed +=1\n while indegree_zero:\n node = indegree_zero.pop()\n for x in outdegree[node]:\n indegree[x].remove(node)\n if not indegree[x]:\n indegree_zero.append(x)\n connection_removed += 1\n return connection_removed == numCourses\ns = Solution()\nprint(s.canFinish(2, [[1,0]]))","sub_path":"May Leetcode Challenge/29_Course_Schedule.py","file_name":"29_Course_Schedule.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"125765482","text":"import logging\nimport os\n\nfrom cliff.show import ShowOne\nfrom cliff.lister import Lister\nfrom cliff.command import Command\n\nfrom watcherclient import client\nkwargs = {'os_username': 'admin',\n 'os_password': 'password',\n 'os_auth_url': 'http://172.16.1.1:5000/',\n 'os_tenant_name': 'admin'}\nwatcher = client.get_client(1, **kwargs)\n\n\nclass TestCmd(Command):\n \"A simple command that prints a message.\"\n\n log = logging.getLogger(__name__)\n\n def get_parser(self, prog_name):\n parser = super(TestCmd, self).get_parser(prog_name)\n return parser\n\n def take_action(self, parsed_args):\n self.log.info('sending greeting')\n self.log.debug('debugging')\n self.app.stdout.write('hi!\\n')\n\n\nclass TestList(Lister):\n \"Show a list of test\"\n\n log = logging.getLogger(__name__)\n\n def get_parser(self, prog_name):\n parser = super(TestList, self).get_parser(prog_name)\n return parser\n\n def take_action(self, parsed_args):\n data = watcher.strategy.list()\n return (('UUID', 'Name'),\n ((item.uuid, item.display_name) for item in data))\n\n\nclass TestShow(ShowOne):\n \"Show detail information of test\"\n\n log = logging.getLogger(__name__)\n\n def get_parser(self, prog_name):\n parser = super(TestShow, self).get_parser(prog_name)\n return parser\n\n def take_action(self):\n columns = ('ID',\n 'Name',\n 'Value'\n )\n data = ('1234',\n 'Test1',\n 'testestes'\n )\n\n return (columns, data)\n","sub_path":"baseclient/v1/test_shell.py","file_name":"test_shell.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"262841","text":"import numpy as np\nimport pytest\nimport xarray as xr\nfrom affine import Affine\nfrom shapely.geometry import Polygon\n\nfrom regionmask import Regions\nfrom regionmask.core.mask import (\n _determine_method,\n _inject_mask_docstring,\n _mask_pygeos,\n _mask_rasterize,\n _mask_rasterize_no_offset,\n _mask_shapely,\n _transform_from_latlon,\n)\nfrom regionmask.core.utils import _wrapAngle, create_lon_lat_dataarray_from_bounds\n\nfrom . import has_pygeos, requires_pygeos\nfrom .utils import (\n dummy_lat,\n dummy_lon,\n dummy_outlines,\n dummy_outlines_poly,\n dummy_region,\n expected_mask_2D,\n expected_mask_3D,\n)\n\nMASK_FUNCS = [\n _mask_rasterize,\n _mask_shapely,\n pytest.param(_mask_pygeos, marks=requires_pygeos),\n]\n\n\nMASK_METHODS = [\n \"rasterize\",\n \"shapely\",\n pytest.param(\"pygeos\", marks=requires_pygeos),\n]\n\nMASK_METHODS_IRREGULAR = [\n \"shapely\",\n pytest.param(\"pygeos\", marks=requires_pygeos),\n]\n\n# =============================================================================\n\n\n@pytest.mark.parametrize(\"func\", MASK_FUNCS)\ndef test_mask_func(func):\n\n # standard\n result = func(dummy_lon, dummy_lat, dummy_outlines_poly, numbers=[0, 1, 2])\n expected = expected_mask_2D()\n assert np.allclose(result, expected, equal_nan=True)\n\n result = func(dummy_lon, dummy_lat, dummy_outlines_poly, numbers=[0, 1, 2], fill=5)\n expected = expected_mask_2D(fill=5)\n assert np.allclose(result, expected, equal_nan=True)\n\n result = func(dummy_lon, dummy_lat, dummy_outlines_poly, numbers=[5, 6, 7])\n expected = expected_mask_2D(a=5, b=6)\n assert np.allclose(result, expected, equal_nan=True)\n\n\n@pytest.mark.parametrize(\n \"func\",\n [\n pytest.param(_mask_rasterize, marks=pytest.mark.xfail),\n _mask_shapely,\n pytest.param(_mask_pygeos, marks=requires_pygeos),\n ],\n)\ndef test_mask_wrong_number_fill(func):\n\n with pytest.raises(ValueError, match=\"The fill value should not\"):\n _mask_shapely(\n dummy_lon, dummy_lat, dummy_outlines_poly, numbers=[0, 1, 2], fill=0\n )\n\n with pytest.raises(ValueError, match=\"`numbers` and `coords` must have\"):\n _mask_shapely(dummy_lon, dummy_lat, dummy_outlines, numbers=[5])\n\n\n@pytest.mark.parametrize(\"method\", MASK_METHODS)\ndef test_mask(method):\n\n expected = expected_mask_2D()\n result = dummy_region.mask(dummy_lon, dummy_lat, method=method).values\n assert np.allclose(result, expected, equal_nan=True)\n\n\n@pytest.mark.skipif(has_pygeos, reason=\"Only errors if pygeos is missing\")\ndef test_missing_pygeos_error():\n\n with pytest.raises(ModuleNotFoundError, match=\"No module named 'pygeos'\"):\n dummy_region.mask(dummy_lon, dummy_lat, method=\"pygeos\")\n\n\n@pytest.mark.parametrize(\"method\", MASK_METHODS)\ndef test_mask_xarray(method):\n\n expected = expected_mask_2D()\n result = dummy_region.mask(dummy_lon, dummy_lat, method=method)\n\n assert isinstance(result, xr.DataArray)\n assert np.allclose(result, expected, equal_nan=True)\n assert np.all(np.equal(result.lat.values, dummy_lat))\n assert np.all(np.equal(result.lon.values, dummy_lon))\n\n\n@pytest.mark.parametrize(\"method\", MASK_METHODS)\ndef test_mask_poly_z_value(method):\n\n outl1 = Polygon(((0, 0, 1), (0, 1, 1), (1, 1.0, 1), (1, 0, 1)))\n outl2 = Polygon(((0, 1, 1), (0, 2, 1), (1, 2.0, 1), (1, 1, 1)))\n outlines = [outl1, outl2]\n\n r_z = Regions(outlines)\n\n expected = expected_mask_2D()\n result = r_z.mask(dummy_lon, dummy_lat, method=method)\n\n assert isinstance(result, xr.DataArray)\n assert np.allclose(result, expected, equal_nan=True)\n assert np.all(np.equal(result.lat.values, dummy_lat))\n assert np.all(np.equal(result.lon.values, dummy_lon))\n\n\n@pytest.mark.parametrize(\"method\", MASK_METHODS)\ndef test_mask_xarray_name(method):\n\n msk = dummy_region.mask(dummy_lon, dummy_lat, method=method)\n assert msk.name == \"region\"\n\n\n@pytest.mark.parametrize(\"ndims\", [(2, 1), (1, 2)])\ndef test_mask_unequal_ndim(ndims):\n\n lon = np.arange(ndims[0] * 2).reshape(ndims[0] * (2,))\n lat = np.arange(ndims[1] * 2).reshape(ndims[1] * (2,))\n\n with pytest.raises(ValueError, match=\"Equal number of dimensions required\"):\n dummy_region.mask(lon, lat)\n\n\ndef test_mask_unequal_2D_shapes():\n\n lon = np.zeros(shape=(2, 3))\n lat = np.zeros(shape=(2, 4))\n\n with pytest.raises(\n ValueError, match=\"2D lon and lat coordinates need to have the same shape\"\n ):\n dummy_region.mask(lon, lat)\n\n\n@pytest.mark.parametrize(\"ndim\", [0, 3, 4])\ndef test_mask_ndim_ne_1_2(ndim):\n\n lon = np.zeros(shape=ndim * (2,))\n lat = np.zeros(shape=ndim * (2,))\n\n with pytest.raises(ValueError, match=\"1D or 2D data required\"):\n dummy_region.mask(lon, lat)\n\n\n@pytest.mark.parametrize(\"lon_name\", [\"lon\", \"longitude\"])\n@pytest.mark.parametrize(\"lat_name\", [\"lat\", \"latitude\"])\n@pytest.mark.parametrize(\"method\", MASK_METHODS)\ndef test_mask_obj(lon_name, lat_name, method):\n\n expected = expected_mask_2D()\n\n obj = {lon_name: dummy_lon, lat_name: dummy_lat}\n result = dummy_region.mask(\n obj, method=method, lon_name=lon_name, lat_name=lat_name\n ).values\n\n assert np.allclose(result, expected, equal_nan=True)\n\n\n@pytest.mark.filterwarnings(\"ignore:No gridpoint belongs to any region.\")\n@pytest.mark.parametrize(\"method\", MASK_METHODS)\ndef test_mask_wrap(method):\n\n # create a test case where the outlines and the lon coordinates\n # are different\n\n # outline 0..359.9\n outl1 = ((359, 0), (359, 1), (0, 1.0), (0, 0))\n outl2 = ((359, 1), (359, 2), (0, 2.0), (0, 1))\n outlines = [outl1, outl2]\n\n r = Regions(outlines)\n\n # lon -180..179.9\n lon = [-1.5, -0.5]\n lat = [0.5, 1.5]\n\n result = r.mask(lon, lat, method=method, wrap_lon=False).values\n assert np.all(np.isnan(result))\n\n # this is the wrong wrapping\n result = r.mask(lon, lat, method=method, wrap_lon=180).values\n assert np.all(np.isnan(result))\n\n expected = expected_mask_2D()\n\n # determine the wrap automatically\n result = r.mask(lon, lat, method=method, wrap_lon=True).values\n assert np.allclose(result, expected, equal_nan=True)\n\n # determine the wrap by hand\n result = r.mask(lon, lat, method=method, wrap_lon=360).values\n assert np.allclose(result, expected, equal_nan=True)\n\n\n@pytest.mark.filterwarnings(\"ignore:No gridpoint belongs to any region.\")\n@pytest.mark.parametrize(\"meth\", [\"mask\", \"mask_3D\"])\ndef test_wrap_lon_no_error_wrap_lon_false(meth):\n\n # regions that exceed 360° longitude\n r = Regions([[[-180, 0], [-180, 10], [360, 10], [360, 0]]], numbers=[1])\n\n # lons that exceed 360° longitude\n lon = np.arange(-175, 360, 2.5)\n lat = np.arange(10, 1, -3)\n\n mask = getattr(r, meth)(lon, lat, wrap_lon=False)\n\n # the region index is 1 -> thus this works for 2D and 3D masks\n assert (mask == 1).all()\n np.testing.assert_equal(lon, mask.lon)\n np.testing.assert_equal(lat, mask.lat)\n\n # -180° is not special cased (no _mask_edgepoints_shapely)\n lon = [-180]\n mask = getattr(r, meth)(lon, lat, wrap_lon=False)\n assert (mask != 1).all()\n np.testing.assert_equal(lon, mask.lon)\n np.testing.assert_equal(lat, mask.lat)\n\n\n@pytest.mark.parametrize(\"meth\", [\"mask\", \"mask_3D\"])\ndef test_wrap_lon_error_wrap_lon(meth):\n\n # regions that exceed 360° longitude\n r = Regions([[[-180, 0], [-180, 10], [360, 10], [360, 0]]])\n\n # lons that exceed 360° longitude\n lon = np.arange(-180, 360, 2.5)\n lat = np.arange(10, 1, -3)\n\n with pytest.raises(ValueError, match=\"Set `wrap_lon=False` to skip this check.\"):\n getattr(r, meth)(lon, lat)\n\n\n@pytest.mark.parametrize(\"method\", MASK_METHODS)\ndef test_mask_autowrap(method):\n\n expected = expected_mask_2D()\n\n # create a test case where the outlines and the lon coordinates\n # are different - or the same - should work either way\n\n # 1. -180..180 regions and -180..180 lon\n lon = [0.5, 1.5]\n lat = [0.5, 1.5]\n result = dummy_region.mask(lon, lat, method=method).values\n assert np.allclose(result, expected, equal_nan=True)\n\n # 2. -180..180 regions and 0..360 lon\n # outline -180..180\n outl1 = ((-180, 0), (-180, 1), (-1, 1.0), (-1, 0))\n outl2 = ((-180, 1), (-180, 2), (-1, 2.0), (-1, 1))\n outlines = [outl1, outl2]\n\n r = Regions(outlines)\n\n # lon -180..179.9\n lon = [358.5, 359.5]\n lat = [0.5, 1.5]\n\n result = r.mask(lon, lat, method=method).values\n assert np.allclose(result, expected, equal_nan=True)\n\n # 3. 0..360 regions and -180..180 lon\n\n # outline 0..359.9\n outl1 = ((359, 0), (359, 1), (0, 1.0), (0, 0))\n outl2 = ((359, 1), (359, 2), (0, 2.0), (0, 1))\n outlines = [outl1, outl2]\n\n r = Regions(outlines)\n\n # lon -180..179.9\n lon = [-1.5, -0.5]\n lat = [0.5, 1.5]\n\n result = r.mask(lon, lat, method=method).values\n assert np.allclose(result, expected, equal_nan=True)\n\n # 3. 0..360 regions and 0..360 lon\n\n # lon 0..359.9\n lon = [0.5, 359.5]\n lat = [0.5, 1.5]\n\n result = r.mask(lon, lat, method=method).values\n assert np.allclose(result, expected, equal_nan=True)\n\n\ndef test_mask_wrong_method():\n\n msg = \"Method must be None or one of 'rasterize', 'shapely' and 'pygeos'.\"\n with pytest.raises(ValueError, match=msg):\n dummy_region.mask(dummy_lon, dummy_lat, method=\"wrong\")\n\n\n# ======================================================================\n\n# test 2D array\nlon_2D = [[0.5, 1.5], [0.5, 1.5]]\nlat_2D = [[0.5, 0.5], [1.5, 1.5]]\n\n\n@pytest.mark.parametrize(\"method\", MASK_METHODS_IRREGULAR)\ndef test_mask_2D(method):\n\n expected = expected_mask_2D()\n result = dummy_region.mask(lon_2D, lat_2D, method=method)\n\n assert isinstance(result, xr.DataArray)\n assert np.allclose(result, expected, equal_nan=True)\n\n assert np.all(np.equal(result.lat.values, lat_2D))\n assert np.all(np.equal(result.lon.values, lon_2D))\n\n assert np.all(np.equal(result.lat_idx.values, [0, 1]))\n assert np.all(np.equal(result.lon_idx.values, [0, 1]))\n\n\n@pytest.mark.parametrize(\"lon\", [lon_2D, [0, 1, 3], 0])\n@pytest.mark.parametrize(\"lat\", [lat_2D, [0, 1, 3], 0])\ndef test_mask_rasterize_irregular(lon, lat):\n\n with pytest.raises(ValueError, match=\"`lat` and `lon` must be equally spaced\"):\n dummy_region.mask(lon, lat, method=\"rasterize\")\n\n\n@pytest.mark.parametrize(\"method\", MASK_METHODS_IRREGULAR)\ndef test_mask_xarray_in_out_2D(method):\n # create xarray DataArray with 2D dims\n\n coords = {\n \"lat_1D\": [1, 2],\n \"lon_1D\": [1, 2],\n \"lat_2D\": ((\"lat_1D\", \"lon_1D\"), lat_2D),\n \"lon_2D\": ((\"lat_1D\", \"lon_1D\"), lon_2D),\n }\n\n d = np.random.rand(2, 2)\n\n data = xr.DataArray(d, coords=coords, dims=(\"lat_1D\", \"lon_1D\"))\n\n expected = expected_mask_2D()\n result = dummy_region.mask(\n data, lon_name=\"lon_2D\", lat_name=\"lat_2D\", method=method\n )\n\n assert isinstance(result, xr.DataArray)\n assert np.allclose(result, expected, equal_nan=True)\n assert np.all(np.equal(result.lat_2D.values, lat_2D))\n assert np.all(np.equal(result.lon_2D.values, lon_2D))\n\n assert np.all(np.equal(result.lat_1D.values, [1, 2]))\n assert np.all(np.equal(result.lon_1D.values, [1, 2]))\n\n\n@pytest.mark.parametrize(\"lon_start\", [0, 1, -5])\n@pytest.mark.parametrize(\"dlon\", [1, 2])\n@pytest.mark.parametrize(\"lat_start\", [0, 1, -5])\n@pytest.mark.parametrize(\"dlat\", [1, 2])\ndef test_transform_from_latlon(lon_start, dlon, lat_start, dlat):\n\n lon = np.arange(lon_start, 20, dlon)\n lat = np.arange(lat_start, 20, dlat)\n\n r = _transform_from_latlon(lon, lat)\n\n assert isinstance(r, Affine)\n\n expected = np.array(\n [dlon, 0, lon_start - dlon / 2, 0, dlat, lat_start - dlat / 2, 0, 0, 1]\n )\n\n assert np.allclose(np.array(r), expected)\n\n\n@pytest.mark.parametrize(\"a, b\", [(0, 1), (4, 5)])\n@pytest.mark.parametrize(\"fill\", [np.NaN, 3])\ndef test_rasterize(a, b, fill):\n\n expected = expected_mask_2D(a=a, b=b, fill=fill)\n\n result = _mask_rasterize(\n dummy_lon, dummy_lat, dummy_outlines_poly, numbers=[a, b], fill=fill\n )\n\n assert np.allclose(result, expected, equal_nan=True)\n\n\ndef test_mask_empty():\n\n with pytest.warns(UserWarning, match=\"No gridpoint belongs to any region.\"):\n result = dummy_region.mask([10, 11], [10, 11], method=\"shapely\")\n\n assert isinstance(result, xr.DataArray)\n assert result.shape == (2, 2)\n assert result.isnull().all()\n assert np.all(np.equal(result.lon.values, [10, 11]))\n assert np.all(np.equal(result.lat.values, [10, 11]))\n\n\n# =============================================================================\n# =============================================================================\n# test mask_3D: only basics (same algorithm as mask)\n\n\n@pytest.mark.parametrize(\"drop\", [True, False])\n@pytest.mark.parametrize(\"method\", MASK_METHODS)\ndef test_mask_3D(drop, method):\n\n expected = expected_mask_3D(drop)\n result = dummy_region.mask_3D(dummy_lon, dummy_lat, drop=drop, method=method)\n\n assert isinstance(result, xr.DataArray)\n assert result.shape == expected.shape\n assert np.allclose(result, expected, equal_nan=True)\n assert np.all(np.equal(result.lat.values, dummy_lat))\n assert np.all(np.equal(result.lon.values, dummy_lon))\n\n _dr = dummy_region[[0, 1]] if drop else dummy_region\n\n assert np.all(np.equal(result.region.values, _dr.numbers))\n assert np.all(result.abbrevs.values == _dr.abbrevs)\n assert np.all(result.names.values == _dr.names)\n\n\n@pytest.mark.parametrize(\"method\", MASK_METHODS)\ndef test_mask_3D_empty(method):\n\n with pytest.warns(UserWarning, match=\"No gridpoint belongs to any region.\"):\n result = dummy_region.mask_3D([10, 11], [10, 11], drop=True, method=method)\n\n assert isinstance(result, xr.DataArray)\n assert result.shape == (0, 2, 2)\n assert np.all(np.equal(result.lon.values, [10, 11]))\n assert np.all(np.equal(result.lat.values, [10, 11]))\n\n\n@pytest.mark.parametrize(\"lon_name\", [\"lon\", \"longitude\"])\n@pytest.mark.parametrize(\"lat_name\", [\"lat\", \"latitude\"])\n@pytest.mark.parametrize(\"drop\", [True, False])\n@pytest.mark.parametrize(\"method\", MASK_METHODS)\ndef test_mask_3D_obj(lon_name, lat_name, drop, method):\n\n expected = expected_mask_3D(drop)\n\n obj = {lon_name: dummy_lon, lat_name: dummy_lat}\n result = dummy_region.mask_3D(\n obj, method=method, drop=drop, lon_name=lon_name, lat_name=lat_name\n )\n\n assert isinstance(result, xr.DataArray)\n\n assert result.shape == expected.shape\n assert np.allclose(result, expected, equal_nan=True)\n\n assert np.all(np.equal(result[lat_name].values, dummy_lat))\n assert np.all(np.equal(result[lon_name].values, dummy_lon))\n\n _dr = dummy_region[[0, 1]] if drop else dummy_region\n\n assert np.all(np.equal(result.region.values, _dr.numbers))\n assert np.all(result.abbrevs.values == _dr.abbrevs)\n assert np.all(result.names.values == _dr.names)\n\n\n# =============================================================================\n# =============================================================================\n# =============================================================================\n\n# create a region such that the edge falls exactly on the lat/ lon coordinates\n# ===\n\nds_US_180 = create_lon_lat_dataarray_from_bounds(*(-161, -29, 2), *(75, 13, -2))\nds_US_360 = create_lon_lat_dataarray_from_bounds(\n *(360 + -161, 360 + -29, 2), *(75, 13, -2)\n)\n\noutline_180 = np.array([[-100.0, 50.0], [-100.0, 28.0], [-80.0, 28.0], [-80.0, 50.0]])\noutline_360 = outline_180 + [360, 0]\n\noutline_hole_180 = np.array(\n [[-86.0, 44.0], [-86.0, 34.0], [-94.0, 34.0], [-94.0, 44.0]]\n)\noutline_hole_360 = outline_hole_180 + [360, 0]\n\n\nr_US_180_ccw = Regions([outline_180]) # counter clockwise\nr_US_180_cw = Regions([outline_180[::-1]]) # clockwise\n\nr_US_360_ccw = Regions([outline_360]) # counter clockwise\nr_US_360_cw = Regions([outline_360[::-1]]) # clockwise\n\n# define poylgon with hole\npoly = Polygon(outline_180, [outline_hole_180])\nr_US_hole_180_cw = Regions([poly]) # clockwise\npoly = Polygon(outline_180, [outline_hole_180[::-1]])\nr_US_hole_180_ccw = Regions([poly]) # counter clockwise\n\npoly = Polygon(outline_360, [outline_hole_360])\nr_US_hole_360_cw = Regions([poly]) # clockwise\npoly = Polygon(outline_360, [outline_hole_360[::-1]])\nr_US_hole_360_ccw = Regions([poly]) # counter clockwise\n\n\ndef _expected_rectangle(ds, lon_min, lon_max, lat_min, lat_max, is_360):\n\n if is_360:\n lon_min += 360\n lon_max += 360\n\n LON = ds.LON\n LAT = ds.LAT\n\n expected = (LAT > lat_min) & (LAT <= lat_max)\n return expected & (LON > lon_min) & (LON <= lon_max)\n\n\ndef expected_mask_edge(ds, is_360, number=0, fill=np.NaN):\n\n expected = _expected_rectangle(ds, -100, -80, 28, 50, is_360)\n\n # set number and fill value\n expected = expected.where(expected, fill)\n expected = expected.where(expected != 1, number)\n\n return expected\n\n\ndef expected_mask_interior_and_edge(ds, is_360, number=0, fill=np.NaN):\n\n expected_outerior = _expected_rectangle(ds, -100, -80, 28, 50, is_360)\n expected_interior = _expected_rectangle(ds, -94, -86, 34, 44, is_360)\n\n expected = expected_outerior & ~expected_interior\n\n # set number and fill value\n expected = expected.where(expected, fill)\n expected = expected.where(expected != 1, number)\n\n return expected\n\n\n@pytest.mark.parametrize(\"method\", MASK_METHODS)\n@pytest.mark.parametrize(\n \"regions\", [r_US_180_ccw, r_US_180_cw, r_US_360_ccw, r_US_360_cw]\n)\n@pytest.mark.parametrize(\"ds_US, is_360\", [(ds_US_180, False), (ds_US_360, True)])\ndef test_mask_edge(method, regions, ds_US, is_360):\n\n expected = expected_mask_edge(ds_US, is_360)\n result = regions.mask(ds_US, method=method)\n\n assert isinstance(result, xr.DataArray)\n assert np.allclose(result, expected, equal_nan=True)\n assert np.all(np.equal(result.lat, ds_US.lat))\n assert np.all(np.equal(result.lon, ds_US.lon))\n\n\n@pytest.mark.parametrize(\"method\", MASK_METHODS)\n@pytest.mark.parametrize(\n \"regions\",\n [r_US_hole_180_cw, r_US_hole_180_ccw, r_US_hole_360_cw, r_US_hole_360_ccw],\n)\n@pytest.mark.parametrize(\"ds_US, is_360\", [(ds_US_180, False), (ds_US_360, True)])\ndef test_mask_interior_and_edge(method, regions, ds_US, is_360):\n\n expected = expected_mask_interior_and_edge(ds_US, is_360)\n result = regions.mask(ds_US, method=method)\n\n assert isinstance(result, xr.DataArray)\n assert np.allclose(result, expected, equal_nan=True)\n assert np.all(np.equal(result.lat.values, ds_US.lat))\n assert np.all(np.equal(result.lon.values, ds_US.lon))\n\n\n@pytest.mark.xfail(\n raises=AssertionError, reason=\"https://github.com/mapbox/rasterio/issues/1844\"\n)\ndef test_rasterize_edge():\n\n lon = ds_US_180.lon\n lat = ds_US_180.lat\n\n expected = expected_mask_edge(ds_US_180, is_360=False)\n result = _mask_rasterize_no_offset(lon, lat, r_US_180_ccw.polygons, numbers=[0])\n\n assert np.allclose(result, expected, equal_nan=True)\n\n\nds_for_45_deg = create_lon_lat_dataarray_from_bounds(*(-0.5, 16, 1), *(10.5, -0.5, -1))\n\n# add a small offset to y to avoid https://github.com/mapbox/rasterio/issues/1844\noutline_45_deg = np.array([[0, 10.1], [0, 0.1], [5.1, 0.1], [15.1, 10.1]])\n\nr_45_deg_ccw = Regions([outline_45_deg])\nr_45_deg_cw = Regions([outline_45_deg[::-1]])\n\n\n@pytest.mark.parametrize(\"regions\", [r_45_deg_ccw, r_45_deg_cw])\ndef test_deg45_rasterize_shapely_equal(regions):\n # https://github.com/regionmask/regionmask/issues/80\n\n shapely = regions.mask(ds_for_45_deg, method=\"shapely\")\n rasterize = regions.mask(ds_for_45_deg, method=\"rasterize\")\n\n xr.testing.assert_equal(shapely, rasterize)\n\n if has_pygeos:\n pygeos = regions.mask(ds_for_45_deg, method=\"pygeos\")\n xr.testing.assert_equal(pygeos, rasterize)\n\n\n@pytest.mark.parametrize(\"regions\", [r_45_deg_ccw, r_45_deg_cw])\ndef test_deg45_rasterize_offset_equal(regions):\n # https://github.com/regionmask/regionmask/issues/80\n\n polygons = regions.polygons\n lon = ds_for_45_deg.lon\n lat = ds_for_45_deg.lat\n\n result_no_offset = _mask_rasterize_no_offset(lon, lat, polygons, numbers=[0])\n result_offset = _mask_rasterize(lon, lat, polygons, numbers=[0])\n\n assert np.allclose(result_no_offset, result_offset, equal_nan=True)\n\n\n# =============================================================================\n\n# the whole globe -> can be re-arranged (_mask_rasterize_flip)\nds_GLOB_360 = create_lon_lat_dataarray_from_bounds(*(0, 360, 2), *(75, 13, -2))\n# not all lon -> must be masked twice (_mask_rasterize_split)\nds_GLOB_360_part = create_lon_lat_dataarray_from_bounds(*(0, 300, 2), *(75, 13, -2))\n\n\n@pytest.mark.parametrize(\"ds_360\", [ds_GLOB_360, ds_GLOB_360_part])\n@pytest.mark.parametrize(\"regions_180\", [r_US_180_ccw, r_US_180_cw])\ndef test_rasterize_on_split_lon(ds_360, regions_180):\n # https://github.com/regionmask/regionmask/issues/127\n\n # using regions_180 and ds_360 lon must be wrapped, making it\n # NOT equally_spaced\n result = regions_180.mask(ds_360, method=\"rasterize\")\n\n expected = expected_mask_edge(ds_360, is_360=True)\n assert isinstance(result, xr.DataArray)\n assert np.allclose(result, expected, equal_nan=True)\n assert np.all(np.equal(result.lat, expected.lat))\n assert np.all(np.equal(result.lon, expected.lon))\n\n expected_shapely = regions_180.mask(ds_360, method=\"shapely\")\n xr.testing.assert_equal(result, expected_shapely)\n\n\ndef test_rasterize_on_split_lon_asymmetric():\n # https://github.com/regionmask/regionmask/issues/266\n\n # for _mask_rasterize_flip: split_point not in the middle\n lon = np.hstack([np.arange(-90, 20, 2), np.arange(-180, -90, 2)])\n lat = np.arange(75, 13, -2)\n ds = xr.Dataset(coords=dict(lon=lon, lat=lat))\n\n assert _determine_method(ds.lon, ds.lat) == \"rasterize_flip\"\n\n result = r_US_180_cw.mask(ds, method=\"rasterize\")\n expected = r_US_180_cw.mask(ds, method=\"shapely\")\n xr.testing.assert_equal(result, expected)\n\n\nMETHOD_IRREGULAR = \"pygeos\" if has_pygeos else \"shapely\"\nMETHODS = {\n 0: \"rasterize\",\n 1: \"rasterize_flip\",\n 2: \"rasterize_split\",\n 3: METHOD_IRREGULAR,\n}\n\nequal = np.arange(0.5, 360)\ngrid_2D = np.arange(10).reshape(2, 5)\nun_equal = [0, 1, 2, 4, 5, 6.1]\nclose_to_equal = equal + np.random.randn(*equal.shape) * 10 ** -6\n\n\n@pytest.mark.parametrize(\n \"lon, m_lon\",\n [\n (equal, 0),\n (close_to_equal, 0),\n (_wrapAngle(equal), 1),\n (_wrapAngle(equal)[:-1], 2),\n ([1], 3),\n (grid_2D, 3),\n (un_equal, 3),\n ],\n)\n@pytest.mark.parametrize(\n \"lat, m_lat\",\n [(equal, 0), (close_to_equal, 0), ([1], 3), (grid_2D, 3), (un_equal, 3)],\n)\ndef test_determine_method(lon, m_lon, lat, m_lat):\n\n expected = METHODS[max((m_lon, m_lat))]\n\n assert _determine_method(lon, lat) == expected\n\n\n# =============================================================================\n# =============================================================================\n# =============================================================================\n\n# ensure a global region incudes all gridpoints - also the ones at\n# 0°E/ -180°E and -90°N (#GH159)\n\noutline_GLOB_180 = np.array(\n [[-180.0, 90.0], [-180.0, -90.0], [180.0, -90.0], [180.0, 90.0]]\n)\noutline_GLOB_360 = outline_GLOB_180 + [180, 0]\n\nr_GLOB_180 = Regions([outline_GLOB_180])\nr_GLOB_360 = Regions([outline_GLOB_360])\n\nlon180 = np.arange(-180, 180, 10)\nlon360 = np.arange(0, 360, 10)\n\n\n@pytest.mark.parametrize(\"method\", MASK_METHODS)\n@pytest.mark.parametrize(\"regions\", [r_GLOB_180, r_GLOB_360])\n@pytest.mark.parametrize(\"lon\", [lon180, lon360])\ndef test_mask_whole_grid(method, regions, lon):\n\n lat = np.arange(90, -91, -10)\n mask = regions.mask(lon, lat, method=method)\n\n assert (mask == 0).all()\n\n # with wrap_lon=False the edges are not masked\n mask = regions.mask(lon, lat, method=method, wrap_lon=False)\n assert mask.sel(lat=-90).isnull().all()\n\n\n@pytest.mark.parametrize(\"method\", MASK_METHODS)\n@pytest.mark.parametrize(\"regions\", [r_GLOB_180, r_GLOB_360])\ndef test_mask_whole_grid_unusual_lon(method, regions):\n # https://github.com/regionmask/regionmask/issues/213\n\n lat = np.arange(90, -91, -2.5)\n lon = np.arange(-300, 60, 2.5)\n mask = regions.mask(lon, lat, method=method)\n\n assert (mask == 0).all()\n\n\ndef test_inject_mask_docstring():\n\n result = _inject_mask_docstring(True, True)\n\n assert \"3D\" in result\n assert \"2D\" not in result\n assert \"drop :\" in result\n assert \"geodataframe\" in result\n\n result = _inject_mask_docstring(False, False)\n\n assert \"2D\" in result\n assert \"3D\" not in result\n assert \"drop :\" not in result\n assert \"geodataframe\" not in result\n","sub_path":"regionmask/tests/test_mask.py","file_name":"test_mask.py","file_ext":"py","file_size_in_byte":24756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"476994409","text":"# rlQExperienceReplay.py - Linear Reinforcement Learner with Experience Replay\n# AIFCA Python3 code Version 0.9.1 Documentation at http://aipython.org\n\n# Artificial Intelligence: Foundations of Computational Agents\n# http://artint.info\n# Copyright David L Poole and Alan K Mackworth 2017-2020.\n# This work is licensed under a Creative Commons\n# Attribution-NonCommercial-ShareAlike 4.0 International License.\n# See: http://creativecommons.org/licenses/by-nc-sa/4.0/deed.en\n\nfrom rlQLearner import Q_learner\nfrom utilities import flip\nimport random\n\nclass BoundedBuffer(object):\n def __init__(self, buffer_size=1000):\n self.buffer_size = buffer_size\n self.buffer = [0]*buffer_size\n self.number_added = 0\n\n def add(self,experience):\n if self.number_added < self.buffer_size:\n self.buffer[self.number_added] = experience\n else:\n if flip(self.buffer_size/self.number_added):\n position = random.randrange(self.buffer_size)\n self.buffer[position] = experience\n self.number_added += 1\n\n def get(self):\n return self.buffer[random.randrange(min(self.number_added, self.buffer_size))]\n\nclass Q_AR_learner(Q_learner):\n def __init__(self, env, discount, explore=0.1, fixed_alpha=True, alpha=0.2,\n alpha_fun=lambda k:1/k, qinit=0, label=\"Q_AR_learner\", max_buffer_size=5000,\n num_updates_per_action=5, burn_in=1000 ):\n Q_learner.__init__(self, env, discount, explore, fixed_alpha, alpha,\n alpha_fun, qinit, label)\n self.experience_buffer = BoundedBuffer(max_buffer_size)\n self.num_updates_per_action = num_updates_per_action\n self.burn_in = burn_in\n\n\n def do(self,num_steps=100):\n \"\"\"do num_steps of interaction with the environment\"\"\"\n self.display(2,\"s\\ta\\tr\\ts'\\tQ\")\n alpha = self.alpha\n for i in range(num_steps):\n action = self.select_action(self.state)\n next_state,reward = self.env.do(action)\n self.experience_buffer.add((self.state,action,reward,next_state)) #remember experience\n if not self.fixed_alpha:\n k = self.visits[(self.state, action)] = self.visits.get((self.state, action),0)+1\n alpha = self.alpha_fun(k)\n self.q[(self.state, action)] = (\n (1-alpha) * self.q.get((self.state, action),self.qinit)\n + alpha * (reward + self.discount\n * max(self.q.get((next_state, next_act),self.qinit)\n for next_act in self.actions)))\n self.display(2,self.state, action, reward, next_state, \n self.q[(self.state, action)], sep='\\t')\n self.state = next_state\n self.acc_rewards += reward\n # do some updates from experince buffer\n if self.experience_buffer.number_added > self.burn_in:\n for i in range(self.num_updates_per_action):\n (s,a,r,ns) = self.experience_buffer.get()\n if not self.fixed_alpha:\n k = self.visits[(s,a)]\n alpha = self.alpha_fun(k)\n self.q[(s,a)] = (\n (1-alpha) * self.q[(s,a)]\n + alpha * (reward + self.discount\n * max(self.q.get((ns,na),self.qinit)\n for na in self.actions)))\n\nfrom rlSimpleEnv import Simple_game_env\nfrom rlQTest import sag1, sag2, sag3\nfrom rlPlot import plot_rl\n\nsenv = Simple_game_env()\nsag1ar = Q_AR_learner(senv,0.9,explore=0.2,fixed_alpha=True,alpha=0.1)\n# plot_rl(sag1ar,steps_explore=100000,steps_exploit=100000,label=\"AR alpha=\"+str(sag1ar.alpha))\nsag2ar = Q_AR_learner(senv,0.9,explore=0.2,fixed_alpha=False)\n# plot_rl(sag2ar,steps_explore=100000,steps_exploit=100000,label=\"AR alpha=1/k\")\nsag3ar = Q_AR_learner(senv,0.9,explore=0.2,fixed_alpha=False,alpha_fun=lambda k:10/(9+k))\n# plot_rl(sag3ar,steps_explore=100000,steps_exploit=100000,label=\"AR alpha=10/(9+k)\")\n\n","sub_path":"Assignment 4/aipython/rlQExperienceReplay.py","file_name":"rlQExperienceReplay.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"528481341","text":"print('NeviraShell 1.0')\n \nx=1\nwhile True:\n startnum = 0\n cmd = input('>')\n if cmd=='/initialize log':\n print('WARNING: You are about to initialize the log. Are you sure you want to continue? (Y/N)')\n warn = input('/initialize:')\n if warn=='Y':\n try:\n print('(0/1 complete)Deleting log data...')\n logedit = open('discord.log', 'w')\n logedit.write('Log deleted by bot')\n logedit.close\n print('[OK]Log data deleted')\n print('--------LOG INITIALIZATION SUCCESSFUL--------')\n except:\n print('[* ]Failed to delete log data')\n print('----------LOG INITIALIZATION FAILED----------')\n elif cmd=='/token':\n print('/token is not available in the program which the shell is running in, but you')\n print('can use this command in the shell in nevirabot.py')\n elif cmd=='/exit':\n x=1\n while True:\n print('')\n print('Start Nevira(/start 1) View version(/version)')\n print('Start AeroBot(/start 2) Quit(/exit)')\n print('Start shell(/start 3)')\n action = input('>')\n if action=='/start 1':\n import Nevira\n elif action=='/start 2':\n import Aerobot\n elif action=='/start 3':\n print('')\n break\n elif action=='/version':\n print('Nevira.py Interactive V1.1')\n elif action=='/exit':\n quit()\n else:\n print('Unknown command, check if command is available')\n print('')\n elif cmd=='/start':\n try:\n print('Importing Nevira.py...')\n import Nevira\n except:\n print('No default file detected! Specify code file you want to run.')\n script = input('/start:')\n try:\n execfile(script)\n except:\n print('[**]Failed startup, script not detected')\n else:\n print('Invalid command, check if command is available')\n print('Available commands: /start /initialize log /exit')\n","sub_path":"Nevira-release1.2/install/runbase.py","file_name":"runbase.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"408488779","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 25 19:04:53 2017\n\n@author: mona\n\"\"\"\n\nfrom scipy.stats.mstats import gmean\nfrom math import isnan, exp, log\nfrom collections import deque\nimport sys\n\ndef geomean(x):\n return exp(sum( log(i) for i in x) / len(x))\n\nlocation1=deque()\nlocation2=deque()\nnum_lines=0\nfilepath=sys.argv[1]\nwith open(filepath) as f:\n for line in f:\n line_split=line.split(';')\n if not line.startswith('#'):\n try:\n value=float(line_split[2])\n if not isnan(value) and value>0.0:\n if int(line_split[1])==1:\n location1.append(value)\n elif int(line_split[1])==2:\n location2.append(value)\n\n except ValueError:\n num_lines+=1\n continue\n except IndexError:\n num_lines+=1\n continue\n #num_lines+=1\nlen1=len(location1)\nlen2=len(location2)\n\nprint('File: '+filepath+' with '+str(num_lines+len1+len2)+' lines')\nprint('Valid values Loc0: ' +str(len1)+' with '+'GeoMean: '+str(gmean(list(location1))))\nprint('Valid values Loc1: ' +str(len2)+' with '+'GeoMean: '+str(gmean(list(location2))))\n\n \n","sub_path":"Setje-Eilers/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"90953312","text":"import os\nimport torch\nimport csv\nfrom misc import bcolors\nfrom progress.bar import Bar\nimport time\nimport sys\n\nclass SummaryStore():\n def __init__(self):\n self.ite_mean = 0\n self.ep_mean = 0\n self.iters = 0\n self.ep_iters = 0\n\n def __call__(self, value):\n self.ite_mean = (self.ite_mean * self.iters + value)/(self.iters+1)\n self.iters+=1\n\n self.ep_mean = (self.ep_mean * self.ep_iters + value)/(self.ep_iters+1)\n self.ep_iters += 1\n\n def iter_mean(self):\n mean = self.ite_mean\n self.ite_mean = 0\n self.iters = 0\n return mean\n\n def epoch_mean(self):\n mean = self.ep_mean\n self.ite_mean = 0\n self.iters = 0\n self.ep_mean = 0\n self.ep_iters = 0\n return mean\n\nclass AverageStore(object):\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __call__(self, val, n=1):\n self.update(val, n=1)\n\nclass SummaryPrint():\n def __init__(self, args, names, log_dir, log_name='train', color=bcolors.OKGREEN):\n self.names = names\n self.stores = [AverageStore() for name in names]\n \n #write arguments to log\n self.argslog = open(os.path.join(log_dir, 'args_log_'+log_name+'.txt'),'w')\n self.argslog.write(str(args))\n self.argslog.close()\n\n #csv log\n self.log = open(os.path.join(log_dir, log_name+'_log.csv'),'w')\n self.csv_logger=csv.writer(self.log, delimiter=';', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n self.csv_logger.writerow(names)\n self.color=color\n\n self.format_str = '({0[0]}/{0[1]}) | Data: {0[2]:0.03f} | Batch: {0[3]:0.03f}'\n for i, name in enumerate(names):\n self.format_str += ' | %s: {0[%d]:0.5f}'%(name, i+4)\n self.data_time = AverageStore()\n self.batch_time = AverageStore()\n\n def __del__(self):\n self.close()\n\n def __call__(self, values):\n for store, value in zip(self.stores, values):\n store(value)\n\n def reset(self):\n self.data_time.reset()\n self.batch_time.reset()\n for store in self.stores:\n store.reset()\n\n def start_epoch(self, epoch, iters):\n self.iters = iters\n self.epoch = epoch\n #start bar and set init time\n sys.stdout.write(self.color)\n sys.stdout.flush()\n\n self.bar = Bar('epoch: %d'%epoch, max=iters)\n self.time = time.time()\n\n self.reset()\n\n def start_iter(self, i):\n #update time for data\n t = time.time()\n self.data_time.update(t-self.time)\n self.time = t\n\n def end_iter(self, i, values):\n #compute times and update bar\n t = time.time()\n self.batch_time.update(t-self.time)\n self.time = t\n\n #update stores\n for store, value in zip(self.stores, values):\n store.update(value)\n #update print output\n self.bar.suffix = self.format_str.format([i+1, self.iters, self.data_time.avg, self.batch_time.avg] + [store.avg for store in self.stores])\n self.bar.next()\n \n def end_epoch(self, epoch_metrics={}):\n #end the epoch, write averages to the log\n self.bar.finish()\n sys.stdout.write(bcolors.ENDC)\n sys.stdout.flush()\n\n #write to log\n store_list = [self.epoch] + [store.avg for store in self.stores] + [value for name,value in epoch_metrics.items()]\n self.csv_logger.writerow(store_list)\n\n if epoch_metrics:\n print(epoch_metrics)\n\n return store_list\n\n def close(self):\n self.log.close()\n #adding just to make sure we go back to white\n sys.stdout.write(bcolors.ENDC)\n sys.stdout.flush()\n","sub_path":"summary_prints.py","file_name":"summary_prints.py","file_ext":"py","file_size_in_byte":3980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"261271596","text":"import pdb\nimport sys\nfrom utils import Move, JustifyOver, printPebble\nfrom techniques import Alpha_Beta\nfrom nextstep import computer_next_step\n#!/usr/bin/python\n\n\ndef CvsC(computer_A, computer_B, Clever_Stupid, who_s_turn):\n\n d = 4\n# who_s_turn = 'A'\n dict = computer_next_step(computer_A, computer_B, d, Clever_Stupid, who_s_turn)\n next_list = dict['list']\n \n if who_s_turn=='B':\n selected_index = len(next_list) - 1 - dict['index']\n else:\n selected_index = dict['index']\n \n computer_A = next_list[0:len(next_list) / 2]\n computer_B = next_list[len(next_list) / 2 : len(next_list)]\n computer_B.reverse()\n\n return {\n \"computer_B\": computer_B, \n \"computer_A\": computer_A,\n \"selected_index\": selected_index\n }\n","sub_path":"algorithm/ComputerComputer.py","file_name":"ComputerComputer.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"378679323","text":"import unittest\nimport xml.etree.ElementTree as ET\n\nfrom programy.bot import Bot\nfrom programy.brain import Brain\nfrom programy.config import ClientConfiguration, BrainConfiguration\nfrom programy.dialog import Question, Sentence\nfrom programy.parser.template.graph import TemplateGraph\nfrom programy.parser.template.nodes import *\n\n\nclass TemplateGraphRandomTests(unittest.TestCase):\n\n def setUp(self):\n self.parser = TemplateGraph()\n self.assertIsNotNone(self.parser)\n\n self.test_brain = None\n self.test_sentence = Sentence(\"test sentence\")\n self.test_sentence._stars = ['one', 'two', 'three', 'four', 'five', 'six']\n self.test_sentence._thatstars = [\"*\"]\n self.test_sentence._topicstars = [\"*\"]\n\n test_config = ClientConfiguration()\n\n self.test_bot = Bot(Brain(BrainConfiguration()), config=test_config.bot_configuration)\n self.test_clientid = \"testid\"\n\n conversation = self.test_bot.get_conversation(self.test_clientid)\n question = Question.create_from_sentence(self.test_sentence)\n conversation._questions.append(question)\n\n def test_random_template_no_li(self):\n template = ET.fromstring(\"\"\"\n\t\t\t\n\t\t\t\"\"\")\n with self.assertRaises(ParserException):\n ast = self.parser.parse_template_expression(template)\n\n def test_random_template(self):\n template = ET.fromstring(\"\"\"\n\t\t\t\n\t\t\t\"\"\")\n ast = self.parser.parse_template_expression(template)\n self.assertIsNotNone(ast)\n\n self.assertIsNotNone(ast)\n self.assertIsInstance(ast, TemplateNode)\n self.assertIsNotNone(ast.children)\n self.assertIsNotNone(ast.children[0])\n self.assertIsInstance(ast.children[0], TemplateRandomNode)\n self.assertEqual(3, len(ast.children[0].children))\n\n self.assertIsInstance(ast.children[0].children[0], TemplateNode)\n self.assertIsInstance(ast.children[0].children[1], TemplateNode)\n self.assertIsInstance(ast.children[0].children[2], TemplateNode)\n\n selection = ast.children[0].resolve(self.test_bot, self.test_clientid)\n self.assertIsNotNone(selection)\n self.assertIn(selection, ['1', '2', '3'])\n\n def test_random_nested_template(self):\n template = ET.fromstring(\"\"\"\n\t\t\t\n\t\t\t\"\"\")\n ast = self.parser.parse_template_expression(template)\n\n self.assertIsNotNone(ast)\n self.assertIsInstance(ast, TemplateNode)\n self.assertIsNotNone(ast.children)\n self.assertIsNotNone(ast.children[0])\n self.assertIsInstance(ast.children[0], TemplateRandomNode)\n self.assertEqual(2, len(ast.children[0].children))\n\n self.assertIsInstance(ast.children[0].children[0], TemplateNode)\n self.assertEqual(1, len(ast.children[0].children[0].children))\n self.assertIsInstance(ast.children[0].children[0].children[0], TemplateRandomNode)\n self.assertEqual(2, len(ast.children[0].children[0].children[0].children))\n\n self.assertIsInstance(ast.children[0].children[1], TemplateNode)\n self.assertEqual(1, len(ast.children[0].children[1].children))\n self.assertIsInstance(ast.children[0].children[1].children[0], TemplateRandomNode)\n self.assertEqual(2, len(ast.children[0].children[1].children[0].children))\n\n selection = ast.children[0].resolve(self.test_bot, self.test_clientid)\n self.assertIsNotNone(selection)\n self.assertIn(selection, ['Say something', 'Say the other', 'Hello world!', 'Goodbye cruel world'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"AIML 2.0/keiffster-program-y/src/test/parser/template/graph/test_random.py","file_name":"test_random.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"219542097","text":"#!/usr/bin/env python\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nimport sys\nfrom subprocess import Popen, PIPE, STDOUT\nimport stat\nimport shlex\n\nimport bit.config as config\nimport bit.git as git\nimport bit.owncloud as oc\nimport bit.rsync as rsync\n\nimport multiprocessing as mp\n\ndef worker(call):\n out=Popen(shlex.split(call), stdout=PIPE, stdin=PIPE, stderr=PIPE)\n message=out.communicate()\n out.stdout.close()\n out.stdin.close()\n out.stderr.close()\n try:\n out.kill()\n except:\n pass\n return \"\\n********************\\n\"+call.split(\" \")[-1]+\"\\n\"+message[0]+\"\\n\"+message[1]\n\ndef main():\n\n import argparse\n\n parser = argparse.ArgumentParser(description=\"bit, [b]ermuda [i]nformation [t]riangle.\\\n bit is a git-based tool for the management of code and data. It uses git for code versioning\\\n and ownCloud for storing and exchanging data. It saves storage by avoiding versioning\\\n of data while logging changes in associated git wikis.\",\\\n formatter_class = argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"-i\", \"--input\", nargs='*', help=\"Input files\")\n parser.add_argument(\"-s\", \"--subfolder\", help=\"Subfolder to be created.\", default=None)\n parser.add_argument(\"-m\", \"--message\",nargs='*', help=\"Message to write on log file.\", default=None)\n parser.add_argument(\"-d\", \"--pick_a_date\", help=\"Pick an existing date folder to transfer data to/from. Format=YYYY-MM-DD\", default=None)\n parser.add_argument(\"-c\", \"--create_folder\", help=\"Create dropbox folder for user to upload data.\", action=\"store_true\")\n parser.add_argument(\"-g\", \"--getfolder\", help=\"Downloads a folder as zip file. Requires --pick_a_date. Defaults base_folder=upload:download to download\", action=\"store_true\")\n parser.add_argument(\"-t\", \"--days_to_share\", help=\"Number of days you wish to share this folder further.\", default=21)\n parser.add_argument(\"--issue\", help=\"Issue to comment on with --message and owncloud data links\", default=None)\n parser.add_argument(\"--scripts\",help=\"Needs -i and -m. Simultaneously sync the scripts.user folder when uploading data.\", action=\"store_true\")\n parser.add_argument(\"--start\", help=\"Project name of the format. PI_PROJECT_NAME. Initiates a project. This will create the required local folders and respective git repositories.\", default=None)\n parser.add_argument(\"--stdfolders\",nargs='*', help=\"Folders to be created in addition to scripts.user and and wiki.user when a project is started.\", default=[\"tmp\",\"slurm_logs\"])\n parser.add_argument(\"--adduser\",help=\"Add a user to a project creating his scripts.user and wiki.user folder\",action=\"store_true\")\n parser.add_argument(\"--sync\", nargs='*', help=\"Files or folders to syncronize with remote server using rsync over ssh.\",default=None)\n parser.add_argument(\"--sync_to\", help=\"Destination server to sync to in the form: @\", default=None)\n parser.add_argument(\"--sync_from\", help=\"Destination server to sync from in the form: @\", default=None)\n parser.add_argument(\"--cpus\",help=\"Number of CPUs/channels to open for rsync.\", default=1)\n parser.add_argument(\"--forceRemote\", help=\"If syncing from or to a remoter server force the import of a remote 'bit_config'.\", action=\"store_true\")\n parser.add_argument(\"--gitnossh\", help=\"Use password instead of git SSH keys.\", action=\"store_false\")\n parser.add_argument(\"--config\", help=\"Generate a config file.\", action=\"store_true\")\n args = parser.parse_args()\n\n if args.sync:\n if args.sync_to:\n calls=rsync.rsync_to(args.sync_to, args.sync, forceImport=args.forceRemote, \\\n sync_to=True, sync_from=False)\n elif args.sync_from:\n calls=rsync.rsync_from(args.sync_from, args.sync, forceImport=args.forceRemote, \\\n sync_to=False, sync_from=True)\n\n pool=mp.Pool(int(args.cpus))\n\n funclist=[]\n for call in calls:\n out=pool.apply_async(worker,[call])\n funclist.append(out)\n results=[]\n for ff in funclist:\n res=ff.get()\n print(res)\n results.append(res)\n\n if args.config:\n print(\"Setting up your config file.\")\n sys.stdout.flush()\n config.make_bitconfig()\n sys.exit(0)\n\n # initate a project\n if args.start:\n configdic=config.read_bitconfig()\n for r in config.start_reqs:\n if r != \"user_group\":\n while configdic[r] == None:\n configdic=config.check_reqs([r],configdic,config_file=None, gitssh=None)\n local_path=os.path.abspath(configdic[\"local_path\"])\n code_path=os.path.abspath(configdic[\"code_path\"])\n automation_path=os.path.abspath(configdic[\"automation_path\"])\n full_path=os.path.abspath(args.start)\n if code_path in full_path:\n full_path=full_path.split(code_path)[-1]\n full_path=f\"{local_path}{full_path}\"\n project_name=os.path.basename(full_path)\n path_to_code=os.path.join( code_path, \"/\".join(full_path.rsplit(\"/\",2)[-2:]) )\n path_to_automation=os.path.join( automation_path, \"/\".join(full_path.rsplit(\"/\",2)[-2:]) )\n\n\n # check format projects_folder/group_head/project_name\n if ( full_path.rsplit(\"/\",2)[0] != local_path ) and ( path_to_automation.rsplit(\"/\",2)[0] != automation_path ) and (full_path.rsplit(\"/\",2)[0] != code_path ):\n print(\"The path (%s) to this project does not obey the structure and/or defined local path (%s / %s). Check the reference structure:\\n%s\" \\\n %(full_path,path_to_automation,code_path ,config.structure) )\n sys.stdout.flush()\n sys.exit(0)\n\n # have the user rechecking that the the string for the project name is really correct\n checks=None\n while checks not in [\"Y\",\"N\"]:\n checks=str(input(\"Is the label %s in agreement with the structure PF_project_name where PF stands for the initials of the Parent_Folder? (Y/N) \" \\\n %project_name )) or None\n if checks==\"N\":\n sys.exit(0)\n\n # create the repo\n github_api=config.get_github_api(configdic[\"github_address\"])\n # github_api=github_api+configdic[\"github_organization\"]+\"/repos\"\n # create_call=[\"curl\",\"-u\",configdic[\"github_user\"]+\":\"+configdic[\"github_pass\"]\\\n # ,github_api,\"-d\",'{\"name\":\"'+project_name+'\",\"private\": true,\\\n # \"auto_init\": true }']\n\n # p = Popen(create_call, stdout=PIPE, stdin=PIPE, stderr=STDOUT)\n # print(p.communicate()[0].decode('utf-8').rstrip())\n # sys.stdout.flush()\n\n response = git.make_github_repo(github_api, project_name, configdic)\n response = git.make_github_issue(github_api, project_name, project_name, configdic, configdic[\"github_user\"] )\n response = git.make_github_card(response, github_api, configdic, \"77\")\n\n # !!removing the need for wiki!!\n # clone the repo and the wiki by initiating this user\n #input(\"\\n\\n*************\\n\\nPlease go to %s/%s/%s/wiki and click on 'Create the first page' and then 'Save Page'.\\n\\nPress Enter once you have saved the first wiki page.\\n\\n*************\\n\\n\" \\\n #%(configdic[\"github_address\"],configdic[\"github_organization\"],project_name) )\n\n config.init_user(full_path,path_to_automation,path_to_code,configdic[\"github_address\"],configdic[\"github_organization\"],\\\n project_name,github_user=configdic[\"github_user\"],\\\n github_pass=configdic[\"github_pass\"],gitssh=args.gitnossh)\n\n # create additional folders\n for f in args.stdfolders:\n if not os.path.exists(full_path+\"/\"+f):\n os.makedirs(full_path+\"/\"+f)\n\n # if configdic[\"user_group\"]:\n # user_group=configdic[\"user_group\"].split(\",\")\n # try:\n # for u in user_group:\n # for p in full_path, path_to_code, path_to_automation :\n # call=[\"setfacl\",\"-m\",f\"u:{u}:rwx\" , p]\n # out=Popen(call, stdout=PIPE, stdin=PIPE, stderr=STDOUT)\n # prt=str(out.communicate()[0].decode('utf-8').rstrip())\n # if prt:\n # print(prt)\n # sys.stdout.flush()\n # except:\n # print(\"Failed to setfacls.\")\n # sys.stdout.flush()\n # else:\n os.chmod(full_path, stat.S_IRWXU)\n\n # local_path_owner=os.stat(local_path)\n # local_path_owner=local_path_owner.st_uid\n #os.chown(full_path,local_path_owner,-1)\n\n sys.exit(0)\n\n if args.adduser:\n configdic=config.read_bitconfig()\n for r in config.start_reqs:\n while configdic[r] == None:\n configdic=config.check_reqs([r],configdic,config_file=None, gitssh=args.gitnossh )\n local_path=os.path.abspath(configdic[\"local_path\"])\n automation_path=os.path.abspath(configdic[\"automation_path\"])\n code_path=os.path.abspath(configdic[\"code_path\"])\n\n\n if args.start:\n full_path=os.path.abspath(args.start)\n else:\n full_path=os.path.abspath(os.getcwd())\n project_name=os.path.basename(full_path)\n\n path_to_code=os.path.join( code_path, \"/\".join(full_path.rsplit(\"/\",2)[-2:]) )\n path_to_automation=os.path.join( automation_path, \"/\".join(full_path.rsplit(\"/\",2)[-2:]) )\n\n # check format projects_folder/group_head/project_name\n if ( full_path.rsplit(\"/\",2)[0] != local_path ) and ( full_path.rsplit(\"/\",2)[0] != automation_path ) and (full_path.rsplit(\"/\",2)[0] != code_path ) : \n print(\"The path (%s) to this project does not obey the structure and/or defined local path (%s). Check the reference structure:\\n%s\" %(full_path,local_path,config.structure))\n sys.stdout.flush()\n sys.exit(0)\n\n config.init_user(full_path,path_to_automation,path_to_code,configdic[\"github_address\"],configdic[\"github_organization\"],project_name,github_user=configdic[\"github_user\"],github_pass=configdic[\"github_pass\"],gitssh=args.gitnossh)\n sys.exit(0)\n\n if args.input:\n if not args.message:\n print(\"ERROR\\nYou need to use -m to leave a message in the logs.\")\n sys.exit()\n oc.ownCloud_upload(input_files=args.input,message=args.message,gitssh=args.gitnossh,days_to_share=args.days_to_share,scripts=args.scripts,issue=args.issue, subfolder=args.subfolder,pick_a_date=args.pick_a_date)\n sys.exit(0)\n\n if args.create_folder:\n oc.ownCloud_create_folder(gitssh=args.gitnossh,pick_a_date=args.pick_a_date,days_to_share=args.days_to_share)\n sys.exit(0)\n\n if args.getfolder:\n if not args.pick_a_date:\n print(\"--getfolder implies --pick_a_date.\\nPlease use -d in combination with -g.\\nThank you!\")\n sys.exit(0)\n oc.ownCloud_download(gitssh=args.gitnossh,pick_a_date=args.pick_a_date)\n sys.exit(0)\n\n sys.exit(0)\n","sub_path":"bit/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"607640190","text":"#!/bin/env python\n\n################################################################################\n# Global imports\n################################################################################\nimport os\nimport sys\nimport time\n\n################################################################################\n# Parameters\n################################################################################\n\nnetwork_types = ['between_network', 'within_network', 'full_network']\nwindow_types = ['non-sliding', 'sliding']\ndata_analysis_types = ['BOLD', 'synchrony', 'graph_analysis']\ngroup_analysis_types = ['hutchenson', 'ttest', '1ANOVA']\nanalysis_types = ['rest', 'task']\nica_aroma_types = ['aggr', 'nonaggr', 'no_ica']\n\n############################################################################\n# Argument parsing\n############################################################################\n# This needs to be first, because even path settings depend on the\n# parameters passed to the command line.\nfrom argparse import ArgumentParser\nparser = ArgumentParser(\n description='Analyse the subjects.'\n)\n# Analysis type\nparser.add_argument(\n '--analysis-type', required=True,\n dest='analysis_type', metavar='ANALYSIS_TYPE',\n choices=analysis_types,\n help='Analysis type. Choose from: ' + ', '.join(analysis_types)\n)\nparser.add_argument(\n '-c' '--golden-subjects',\n action='store_true', dest='golden_subjects',\n help='Perform analysis with subset of healthy subjects'\n)\n# Number of subjects\nparser.add_argument(\n '-n', '--nsubjects',\n type=int, dest='nsubjects', metavar='NSUBJECTS', default=None,\n help='Number of healthy and schizophrenic subjects.'\n)\n# Possible activities/phases\nparser.add_argument(\n '-p', '--preprocess',\n action='store_true', dest='preprocess',\n help='Perform pre-processing of the data.'\n)\nparser.add_argument(\n '-w', '--extract_csf_wm', dest='extract_csf_wm',\n action='store_true',\n help='Perform extraction of CSF and WM'\n)\nparser.add_argument(\n '-r', '--extract_roi',\n action='store_true', dest='extract_roi',\n help='Perform ROI extraction.'\n)\nparser.add_argument(\n '-a', '--analyse-data',\n action='store_true', dest='analyse_data',\n help='Perform data analysis.'\n)\nparser.add_argument(\n '-g', '--analyse-data-group',\n action='store_true', dest='analyse_data_group',\n help='Perform group data analysis.'\n)\n# Options to pass to phases\n# Note: Not all options apply to all phases.\nparser.add_argument(\n '--network-type',\n dest='network_type', metavar='NETWORK_TYPE',\n choices=network_types,\n help='Network type. Choose from: ' + ', '.join(network_types)\n)\nparser.add_argument(\n '--window-type',\n dest='window_type', metavar='WINDOW_TYPE',\n choices=window_types,\n help='Window type. Choose from: ' + ', '.join(window_types)\n)\nparser.add_argument(\n '--data-analysis-type',\n dest='data_analysis_type', metavar='DATA_ANALYSIS_TYPE',\n choices=data_analysis_types,\n help='Data analysis type. Choose from: ' + ', '.join(data_analysis_types)\n)\nparser.add_argument(\n '--nclusters',\n type=int, dest='nclusters', metavar='NCLUSTERS',\n help='Number of clusters to use in data analysis.'\n)\nparser.add_argument(\n '--rand-ind',\n type=int, dest='rand_ind', metavar='RAND_IND',\n help='Random index to use in data analysis (graph_analysis only).'\n)\nparser.add_argument(\n '--group-analysis-type',\n dest='group_analysis_type', metavar='GROUP_ANALYSIS_TYPE',\n choices=group_analysis_types,\n help='Group analysis type. Choose from: ' + ', '.join(group_analysis_types)\n)\nparser.add_argument(\n '--ica_aroma-type',\n dest='ica_aroma_type', metavar='ICA_AROMA_TYPE',\n choices=ica_aroma_types,\n help='ICA aroma type. Chose from: ' + ', '.join(ica_aroma_types)\n)\nparser.add_argument(\n '--glm_denoise',\n dest='glm_denoise',\n action='store_true',\n help='Perfrom denoising with GLM'\n)\nargs = parser.parse_args()\n\n################################################################################\n# Path settings\n################################################################################\n# Base path for all input and output data.\nbase_path = os.path.join(os.path.sep, 'group', 'dynamics', 'scz_dynamics', 'ucla-la5')\nbase_path_in = os.path.join(base_path, 'data_in')\nbase_path_out = os.path.join(base_path, 'data_out', args.analysis_type)\n\n# Pre-processing\n# This folder contains all subjects' folders for the pre-processing phase.\n# FIXME: This has never been tested.\npreprocessing_input_basepath = os.path.join(base_path_in, 'reconall_data')\npreprocessing_output_basepath = os.path.join(base_path_out, 'preprocessing_out')\n# Input image for wm and csf extraction\nreconall_segmented_image_basepath = os.path.join(base_path_in, 'reconall_data')\n\n# ROI extraction\n# Input image for ROI extraction\nroi_input_segmented_image_filename = os.path.join(base_path_in, 'voi_extraction', 'seg_aparc_82roi_2mm.nii.gz')\nroi_input_lookuptable = os.path.join(base_path_in, 'voi_extraction', 'LookupTable')\n# Input region list for ROI extraction\nroi_input_segmented_regions_path = os.path.join(base_path_in, 'voi_extraction')\n# Image where between_network and within_network are specified.\nroi_input_network_filename = os.path.join(base_path_in, 'voi_extraction', 'PNAS_Smith09_rsn10.nii')\nroi_input_basepath = os.path.join(preprocessing_output_basepath, 'temp_filt')\nroi_output_basepath = os.path.join(base_path_out, 'extract_roi')\n\n# Data analysis\ndata_analysis_input_basepath = roi_output_basepath\ndata_analysis_output_basepath = os.path.join(base_path_out, 'data_analysis')\n\n# Group data analysis\ngroup_analysis_input_basepath = data_analysis_output_basepath\ngroup_analysis_output_basepath = os.path.join(base_path_out, 'group_analysis')\n\n# Subjects\n# FIXME: Move to data_in folder.\nsubjects_filename = 'subjects.json'\n\n################################################################################\n# Global logging\n################################################################################\n# Note: This needs to be setup before other local modules are imported and\n# and before any local code is executed.\nimport logging\nlogpath = os.path.join(data_analysis_output_basepath, args.network_type, args.window_type, args.data_analysis_type)\ntimestamp = time.strftime(\"%Y%m%d%H%M%S\")\nlog_filename = os.path.join(logpath, '%s_ucla5_%d.log' %(timestamp, args.nclusters))\nformatter = logging.Formatter('%(message)s')\nlog = logging.getLogger('')\nlog.setLevel(logging.DEBUG)\nfh = logging.FileHandler(log_filename)\nfh.setFormatter(formatter)\nlog.addHandler(fh)\nch = logging.StreamHandler(sys.stdout)\nch.setLevel(logging.INFO)\nch.setFormatter(formatter)\nlog.addHandler(ch)\n\n# logging.basicConfig(filename=log_filename,\n# level=logging.INFO,\n# format='%(message)s',\n# stream=sys.stdout)\n\n################################################################################\n# Local imports\n################################################################################\nfrom subjects import load_subjects\nfrom preprocessing_workflow import preprocessing_pipeline, get_lookuptable\nfrom extract_roi import extract_roi\nfrom data_analysis import data_analysis\nfrom group_analysis_pairwise import group_analysis_pairwise\n\n################################################################################\n# Load subjects.\n################################################################################\n# They must always be loaded, no matter the type of the analysis.\n# Note: If the user didn't specify nsubjects, we take all subjects (still\n# balanced).\nsubjects = load_subjects(subjects_filename, args.golden_subjects, args.nsubjects)\n\n############################################################################\n# Pre-processing\n############################################################################\nif args.preprocess:\n # Note: Preprocessing is only running on the old cluster\n print('Preprocessing')\n preprocessing_pipeline(subjects,\n preprocessing_input_basepath,\n preprocessing_output_basepath,\n )\n\n # extract csf and white matter\n print('Pre-processing.')\n\n############################################################################\n# ROI extraction\n############################################################################\nif args.extract_roi:\n if args.network_type is None:\n parser.error('You must specify: --network_type.')\n\n lookuptable = get_lookuptable(roi_input_lookuptable)\n\n # Extract ROIs.\n extract_roi(subjects,\n args.network_type,\n args.extract_csf_wm,\n args.glm_denoise,\n roi_input_basepath,\n roi_input_segmented_image_filename,\n lookuptable,\n roi_output_basepath,\n args.ica_aroma_type,\n network_mask_filename=roi_input_network_filename)\n\n############################################################################\n# Data analysis\n############################################################################\nif args.analyse_data:\n if args.network_type is None or \\\n args.window_type is None or \\\n args.data_analysis_type is None or \\\n args.nclusters is None or \\\n args.rand_ind is None:\n parser.error('You must specify: ' + \\\n '--network-type, ' + \\\n '--window-type, ' + \\\n '--data-analysis-type, ' + \\\n '--nclusters, ' + \\\n '--rand-ind.')\n\n # Analyse data.\n data_analysis(subjects,\n data_analysis_input_basepath,\n data_analysis_output_basepath,\n args.network_type,\n args.window_type,\n args.data_analysis_type,\n args.ica_aroma_type,\n args.glm_denoise,\n args.nclusters,\n args.rand_ind,\n args.golden_subjects)\n\n\n############################################################################\n# Group analysis\n############################################################################\nif args.analyse_data_group:\n if args.network_type is None or \\\n args.window_type is None or \\\n args.data_analysis_type is None or \\\n args.group_analysis_type is None or \\\n args.nclusters is None or \\\n args.rand_ind is None:\n parser.error('You must specify: ' + \\\n '--network-type, ' + \\\n '--window-type, ' + \\\n '--data-analysis-type, ' + \\\n '--group-analysis-type, ' + \\\n '--nclusters, ' + \\\n '--rand-ind.')\n\n group_analysis_pairwise(subjects,\n group_analysis_input_basepath,\n group_analysis_output_basepath,\n args.network_type,\n args.window_type,\n args.data_analysis_type,\n args.group_analysis_type,\n args.nclusters,\n args.rand_ind)\n\n# remember to close the handlers\nfor handler in log.handlers:\n handler.close()\n log.removeFilter(handler)","sub_path":"code/main_analysis.py","file_name":"main_analysis.py","file_ext":"py","file_size_in_byte":11396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"353425358","text":"import time\nimport VL53L0X\nimport RPi.GPIO as GPIO\n\n# GPIO for left sensor shutdown pin\nleft = 20\n# GPIO for right shutdown pin\nright = 16\n#GPIO for forward shutdown pin\nforward = 21\n\nGPIO.setwarnings(False)\n\n# Setup GPIO for shutdown pins on each VL53L0X\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(left, GPIO.OUT)\nGPIO.setup(right, GPIO.OUT)\nGPIO.setup(forward, GPIO.OUT)\n\n\n# Set all shutdown pins low to turn off each VL53L0X\nGPIO.output(left, GPIO.LOW)\nGPIO.output(right, GPIO.LOW)\nGPIO.output(forward, GPIO.LOW)\n\n# Keep all low for 500 ms or so to make sure they reset\ntime.sleep(0.50)\n\n# Create one object per VL53L0X passing the address to give to\n# each.\ntof_left = VL53L0X.VL53L0X(address=0x2B)\ntof_right = VL53L0X.VL53L0X(address=0x2D)\ntof_forward = VL53L0X.VL53L0X(address=0x29)\n\n# Set shutdown pin high for the first VL53L0X then \n# call to start ranging \nGPIO.output(left, GPIO.HIGH)\ntime.sleep(0.50)\ntof_left.start_ranging(VL53L0X.VL53L0X_BETTER_ACCURACY_MODE)\n\n# Set shutdown pin high for the second VL53L0X then \n# call to start ranging \nGPIO.output(right, GPIO.HIGH)\ntime.sleep(0.50)\ntof_right.start_ranging(VL53L0X.VL53L0X_BETTER_ACCURACY_MODE)\n\n# Set shutdown pin high for the first VL53L0X then \n# call to start ranging \nGPIO.output(forward, GPIO.HIGH)\ntime.sleep(0.50)\ntof_forward.start_ranging(VL53L0X.VL53L0X_BETTER_ACCURACY_MODE)\n\ntiming = tof_left.get_timing()\nif (timing < 20000):\n timing = 20000\nprint (\"Timing %d ms\" % (timing/1000))\n\nfor count in range(1,101):\n left_distance = tof_left.get_distance()\n if (left_distance > 0):\n print (\"left distance in mm is %d mm\" % left_distance)\n else:\n print (\"%d - Error\" % tof_left.my_object_number)\n\n right_distance = tof_right.get_distance()\n if (right_distance > 0):\n print (\"right distance in mm is %d mm\" % right_distance)\n else:\n print (\"%d - Error\" % tof_right.my_object_number)\n \n forward_distance = tof_right.get_distance()\n if (forward_distance > 0):\n print (\"forward distance in mm is %d mm\" % forward_distance)\n else:\n print (\"%d - Error\" % tof_forward.my_object_number)\n\n time.sleep(timing/1000000.00)\n\ntof_left.stop_ranging()\nGPIO.output(left, GPIO.LOW)\n\ntof_right.stop_ranging()\nGPIO.output(right, GPIO.LOW)\n\ntof_forward.stop_ranging()\nGPIO.output(forward, GPIO.LOW)\n","sub_path":"Initial_Sensor_Tests/distance_test.py","file_name":"distance_test.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"556126979","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom rest_framework.views import APIView\n\nimport json\nfrom datetime import timedelta, date\nimport datetime\nfrom pyecharts.charts import Sunburst, BMap, Line, WordCloud, Pie, Calendar, Bar, Grid, Funnel\nfrom pyecharts.globals import SymbolType, GeoType, ThemeType\nfrom pyecharts import options as opts\n\nfrom event.models import Street, Event, Property, Achieve, Type\nfrom .charts import Charts\n\nBAIDU_MAP_AK = 'X3ATCKQWRjRxLNLI1Wv9NiTMFAa5bh8W'\nproperties = Property.objects.all()\nstreet_list = Street.objects.all()\n\nclass IndexView(APIView):\n def get(self, request, *args, **kwargs):\n charts = Charts()\n context = {\n \"properties\": properties,\n \"streets\": street_list,\n 'pie_chart': charts.pie,\n 'line_charts': charts.get_line(),\n 'sunburst_chart': charts.sunburst,\n 'map_chart': charts.map,\n 'calendar_chart': charts.calendar,\n 'wordcloud_chart': charts.wordcloud,\n 'cur_page': \"charts\",\n 'bar_chart': charts.bar,\n 'funnel_chart': charts.funnel_base()\n }\n return render(request, 'chart/charts.html', context)\n\n\ndef get_pie_data(request):\n street_choice = request.GET.get(\"pie-choice\")\n start = datetime.datetime.now()\n data = []\n data_value = []\n for pro in Property.objects.all():\n event_list = Event.objects.filter(property=pro)\n cnt = 0\n for event in event_list:\n if event.community.street.name == street_choice:\n cnt += 1\n data.append(pro.name)\n data_value.append(cnt)\n\n c = (\n Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS))\n .add(\"\", [list(z) for z in zip(data, data_value)])\n .set_series_opts(label_opts=opts.LabelOpts(formatter=\"{b}: {c}\"))\n .dump_options_with_quotes()\n )\n\n end = datetime.datetime.now()\n print(\"Pie: \" + str(end - start))\n return HttpResponse(c, content_type='application/json')\n\n\ndef get_sun_data(request):\n sun_choice = request.GET.get(\"sun-choice\")\n street = Street.objects.get(name=sun_choice)\n sun_data = []\n statuses = Achieve.objects.all()\n type_list = Type.objects.all()\n for status in statuses:\n type_value = {}\n for v in type_list:\n type_value.update({v.name: 0})\n\n events = status.event.get_queryset()\n\n for event in events:\n if event.community.street != street:\n continue\n type_value[event.type.name] += 1\n\n time_list = []\n for key in type_value.keys():\n if type_value[key]:\n single = opts.SunburstItem(name=key, value=type_value[key])\n time_list.append(single)\n\n if len(time_list):\n name = status.name\n s_item = opts.SunburstItem(name=name, children=time_list)\n sun_data.append(s_item)\n\n start = datetime.datetime.now()\n data = sun_data\n c = (\n Sunburst()\n .add(series_name=\"\", data_pair=data, radius=[0, \"90%\"])\n .set_series_opts(label_opts=opts.LabelOpts(formatter=\"{b}\"))\n .dump_options_with_quotes()\n )\n end = datetime.datetime.now()\n print(\"Sunburst: \" + str(end - start))\n return HttpResponse(c, content_type='application/json')\n\n\ndef get_calender_data(request):\n def get_date(days):\n day = (date.today() - timedelta(days=555) - timedelta(days=days))\n return day\n property = Property.objects.get(name=request.GET.get(\"calender-choice\"))\n event_list = Event.objects.filter(property=property)\n\n start = datetime.datetime.now()\n\n begin = get_date(365)\n end = datetime.date.today() - timedelta(days=465)\n data = []\n cur_day = end\n count = 0\n for event in event_list:\n if event.create_time < cur_day:\n data.insert(0, (cur_day, count))\n count = 0\n cur_day -= timedelta(days=1)\n\n if cur_day < begin:\n break\n\n if event.create_time == cur_day:\n count += 1\n\n c = (\n Calendar()\n .add(\"\",\n data,\n calendar_opts=opts.CalendarOpts(\n range_=[begin, end],\n daylabel_opts=opts.CalendarDayLabelOpts(name_map=\"cn\"),\n monthlabel_opts=opts.CalendarMonthLabelOpts(name_map=\"cn\"),\n pos_right=\"20px\"\n ),\n )\n .set_global_opts(\n visualmap_opts=opts.VisualMapOpts(\n max_=501,\n min_=1,\n orient=\"horizontal\",\n is_piecewise=True,\n pos_left=\"40px\"\n ),\n )\n .dump_options_with_quotes()\n )\n\n end = datetime.datetime.now()\n print(\"Calendar: \" + str(end - start))\n return HttpResponse(c, content_type='application/json')\n","sub_path":"chart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"60"} +{"seq_id":"215968914","text":"import tkinter as tk\r\nfrom tkinter import messagebox\r\n\r\n#Primeiro de tudo, criar uma classe modelo\r\nclass ModelHero():\r\n def __init__(self, nome, identSecreta):\r\n self.__nome = nome\r\n self.__identSecreta = identSecreta\r\n\r\n def getNome(self):\r\n return self.__nome\r\n \r\n def getIdentSecreta(self):\r\n return self.__identSecreta\r\n\r\n#Depois, criar uma view, que é a visão que o usuário final terá (interface)\r\nclass View():\r\n def __init__(self, master, controller):\r\n self.controller = controller\r\n self.frame = tk.Frame(master)\r\n self.frame.pack()\r\n \r\n #Cria um viewPanel, passando referência da janela pricipal e do controller\r\n self.viewPanel = ViewPanel(master, controller)\r\n\r\n#Aqui é onde se cria a visão da janela\r\nclass ViewPanel():\r\n def __init__(self, root, controller):\r\n self.controller = controller\r\n self.janela = tk.Frame(root)\r\n self.janela.pack()\r\n self.frameNome = tk.Frame(self.janela)\r\n self.frameNome.pack()\r\n self.frameIdent = tk.Frame(self.janela)\r\n self.frameIdent.pack()\r\n self.frameButtons = tk.Frame(self.janela)\r\n self.frameButtons.pack()\r\n\r\n self.labelNome = tk.Label(self.frameNome, text = 'Nome: ')\r\n self.labelNome.pack(side = 'left')\r\n self.labelIdent = tk.Label(self.frameIdent, text = 'Identidade secreta: ')\r\n self.labelIdent.pack(side = 'left')\r\n\r\n self.inputNome = tk.Entry(self.frameNome, width = 20)\r\n self.inputNome.pack(side = 'left')\r\n self.inputIdent = tk.Entry(self.frameIdent, width = 20)\r\n self.inputIdent.pack(side = 'left')\r\n\r\n self.buttonCadastrar = tk.Button(self.frameButtons, text = 'Cadastrar', font = ('Arial black', 8))\r\n self.buttonCadastrar.pack(side = 'left')\r\n self.buttonCadastrar.bind(\"