diff --git "a/706.jsonl" "b/706.jsonl" new file mode 100644--- /dev/null +++ "b/706.jsonl" @@ -0,0 +1,743 @@ +{"seq_id":"274185705","text":"from rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ViewSet\n\nfrom sales.stores.models import Store\nfrom sales.transactions.models import Transaction\n\n\nclass TransactionReportViewSet(ViewSet):\n\n @staticmethod\n def get_real_value(value, signal):\n mult = -1 if signal == '-' else 1\n return value * mult\n\n def list(self, request):\n # Filters\n store = request.query_params.get(\"store\", \"\")\n\n # Data\n all_stores = Store.objects.search(store)\n all_transactions = Transaction.objects.all().order_by(\"store\", \"date\", \"hour\").distinct()\n\n # Controllers\n balance = 0\n data = []\n store_transactions = []\n\n for store in all_stores:\n for transaction in all_transactions:\n if store != transaction.store:\n continue\n\n value = self.get_real_value(transaction.value, transaction.type.signal)\n balance += float(value)\n store_transactions.append({\n \"date\": transaction.date.strftime(\"%d/%m/%Y\"),\n \"hour\": transaction.hour,\n \"value\": value,\n \"type\": transaction.type.description,\n \"signal\": transaction.type.signal,\n \"customer\": transaction.customer.cpf,\n \"customer_card\": transaction.customer_card.number,\n \"current_balance\": balance\n })\n\n # Accum Results\n data.append({\n \"store\": store.name,\n \"owner\": store.owner.name,\n \"owner_id\": store.owner.id,\n \"balance\": balance,\n \"transactions\": store_transactions,\n })\n balance = 0\n store_transactions = []\n\n return Response(\n data={\"transactions_by_store\": data},\n status=status.HTTP_200_OK,\n )\n\n\n\"\"\"\n ===============================================================================\n CONSULTA SQL PARA VALIDACAO DOS VALORES\n ===============================================================================\n WITH base as (\n SELECT\n t.value as \"value\",\n tt.signal as signal,\n ss.name as store,\n s.name as store_owner\n FROM transactions_transaction t\n INNER JOIN stores_store ss on ss.id = t.store_id\n INNER JOIN stores_storeowner s on s.id = ss.owner_id\n INNER JOIN transactions_transactiontype tt on tt.id = t.type_id\n order by store, date, hour\n )\n SELECT \n store, \n store_owner,\n SUM(case b.signal when '-' then b.value * -1 else b.value end) as balance\n from base b\n GROUP BY store, store_owner\n ORDER BY balance DESC\n\"\"\"\n","sub_path":"sales/reports/api/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"612061126","text":"from reportlab.pdfgen import canvas\nfrom PyPDF2 import PdfFileWriter, PdfFileReader\n\n# Create the watermark from an image\nc = canvas.Canvas('watermark.pdf')\nc.setFont(\"Courier\", 60)\n#This next setting with make the text of our\n#watermark gray, nice touch for a watermark.\nc.setFillGray(0.5,0.5)\n#Set up our watermark document. Our watermark\n#will be rotated 45 degrees from the direction\n#of our underlying document.\nc.saveState()\nc.translate(500,100)\nc.rotate(45)\nc.drawCentredString(0, 0, \"TESTED\")\nc.drawCentredString(0, 300, \"XXXK\")\nc.drawCentredString(0, 600, \"AxARK\")\nc.restoreState()\n# Draw the image at x, y. I positioned the x,y to be where i like here\nc.drawImage('test.png',0,720)\n\n# Add some custom text for good measure\n#~ c.drawString(15, 720,\"Hello World\")\nc.save()\n\n# Get the watermark file you just created\nwatermark = PdfFileReader(open(\"watermark.pdf\", \"rb\"))\n\n# Get our files ready\noutput_file = PdfFileWriter()\ninput_file = PdfFileReader(open(\"example.pdf\", \"rb\"))\n\n# Number of pages in input document\npage_count = input_file.getNumPages()\n\n# Go through all the input file pages to add a watermark to them\n#~ for page_number in range(page_count):\n #~ print(\"Watermarking page {} of {}\".format(page_number, page_count))\n #~ # merge the watermark with the page\ninput_page = input_file.getPage(0)\ninput_page.mergePage(watermark.getPage(0))\n# add page from input file to output document\noutput_file.addPage(input_page)\n\n# finally, write \"output\" to document-output.pdf\nwith open(\"document-output.pdf\", \"wb\") as outputStream:\n output_file.write(outputStream)\n","sub_path":"tests/pdfwatermark/pdf2.py","file_name":"pdf2.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"16492580","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2017-04-09 21:13:03\n# @Author : AlexanderZzzz (hjzhu@uvic.ca)\n# @Link : ${link}\n# @Version : v.2.0.0\n\nimport os\nimport time\nimport pandas as pd\nimport csv\n\nencod = 'cp1256'\n\n\n\ndata = ['GP', 'HE', 'AN','AI','DI','HC','PS']\nprint('Iutput file name in INPUT folder:')\nfilename =str(os.getcwd() + '\\\\Input\\\\' + input())\nprint('Output file name in Output folder:')\noutput_file = str(os.getcwd() + '\\\\Output\\\\' + input())\nstart_time = time.time()\n##################################\ndef str_recorder(strings, input_item):\n fir_char_flag = 0\n sec_char_flag = 0\n new_strings = input_item[0]\n strings = strings.replace('\\n', '')\n temp = ''\n b_flag = 0\n for idx, item in enumerate(strings):\n if not ((item == input_item[1]) and (fir_char_flag == 1) and (sec_char_flag == 0)):\n fir_char_flag = 0\n else:\n sec_char_flag = 1\n if not ((item == input_item[0]) and (fir_char_flag == 0)):\n fir_char_flag = 0\n else:\n if strings[idx-3] == '1':\n fir_char_flag = 1\n else:\n fir_char_flag = 0\n if not sec_char_flag == 1:\n pass\n else:\n new_strings = new_strings + item\n return new_strings, sec_char_flag\n##################################\n\n##################################\ndef line_counter(filename):\n\n# find out the line number\n with open(filename, encoding='cp1256') as csvfile:\n count = 0\n for i, l in enumerate(csvfile):\n count = i\n count = count + 1\n csvfile.close()\n return count\n##################################\n\ncount = line_counter(filename)\ncount =float(count)\n # creat output file object\nspamWriter = csv.writer(open(output_file, 'w', newline='', encoding='utf-8'))\nspamWriter.writerow(['item0', 'item1', 'item2', 'item3', 'item4', 'item5', 'item6', 'item7', 'item8', 'item9', 'item00', 'item11', 'item12]', 'item13', 'item14', 'item15', 'item16', 'item17', 'item18', 'item19'])\nspamWriter.writerow([\"Encoding type:\", encod])\n# process row\nwith open(filename, encoding=encod) as csvfile:\n test = 0\n last_record_flag = 0\n for idx, row in enumerate(csvfile):\n strings = ''\n del_count = 0\n if last_record_flag == 1:\n new_row = last_row + row\n last_record_flag = 0\n else:\n new_row = row\n for string in data:\n [newstrings, flag] = str_recorder(new_row,string)\n if not flag == 1:\n pass\n else:\n strings = newstrings\n del flag\n strings = strings.replace(\"*\", \", *\") # this is a list\n temp = strings.replace(\"[\", \"\").split(',') # this is a list\n L_origin = len(new_row)\n L_new = len(strings)\n # print(L_origin,L_new,row)\n # print('----------------')\n if not len(temp) > 2:\n pass\n else:\n if (L_origin-L_new) > 48 and (L_origin-L_new) < 51:\n spamWriter.writerow(['NEXT SECTION'])\n spamWriter.writerow(temp)\n print('\\r', \"{:5.1f}\".format(idx/count*100), '%', 'Woring on line:', idx,end='', flush=True)\n if not L_new == 0:\n pass\n else:\n last_record_flag = 1\n last_row = row\ncsvfile.close()\ncount2 = line_counter(output_file)\nelapsed_time = time.time() - start_time\ncount = int(count)\nprint()\nprint('..........................FINISHED..........................')\nprint('Input file name and location:',filename)\nprint('Output file name and location:', output_file)\nprint('Processed', count, 'lines', '; Droped', count - count2, 'lines')\nprint('Time elapsed:','{:10.4f}'.format(elapsed_time), 'sec')\nprint('Press ENTER key to continue')\n","sub_path":"NMEA0183toCSV/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"476616036","text":"# -*- coding: utf-8 -*-\n# !@time: 2020/6/26 下午5:56\n# !@author: superMC @email: 18758266469@163.com\n# !@fileName: __init__.py\nimport cv2\n\nfrom pid.yolov4.tool.darknet2pytorch import Darknet\nfrom pid.yolov4.tool.torch_utils import do_detect\nimport numpy as np\nfrom self_utils.image_tool import crop_box\n\n\nclass YoloV4:\n def __init__(self, cfg=\"pid/yolov4/cfg/yolov4.cfg\", weight=\"pid/yolov4/yolov4_checkpoints/yolov4.weights\",\n use_cuda=1):\n model = Darknet(cfg)\n\n # model.print_network()\n model.load_weights(weight)\n # 1print('Loading weights from %s... Done!' % (weight))\n if use_cuda:\n model.cuda()\n model.eval()\n self.model = model\n del model\n\n def __call__(self, image):\n rgb_image = cv2.resize(image, (self.model.width, self.model.height))\n rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB)\n boxes = do_detect(self.model, rgb_image, 0.5, 80, 0.4)\n new_boxes = []\n person_images = []\n for box in boxes:\n if box[-1] == 0:\n width = image.shape[1]\n height = image.shape[0]\n new_box = [0] * 4\n new_box[0] = np.maximum(int((box[0] - box[2] / 2.0) * width), 0)\n new_box[1] = np.maximum(int((box[1] - box[3] / 2.0) * height), 0)\n new_box[2] = np.minimum(int((box[0] + box[2] / 2.0) * width), width) # w\n new_box[3] = np.minimum(int((box[1] + box[3] / 2.0) * height), height)\n person_images.append(crop_box(image, new_box))\n new_boxes.append(new_box)\n return person_images, new_boxes\n\n\nif __name__ == '__main__':\n model = YoloV4()\n image = cv2.imread('data/aoa.jpg')\n person_images, boxes = model(image)\n for index, person_image in enumerate(person_images):\n cv2.imshow('demo', person_image)\n cv2.waitKey(0)\n","sub_path":"pid/yolov4/yolov4.py","file_name":"yolov4.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"244454195","text":"from sklearn.feature_extraction import DictVectorizer \nimport csv \nfrom sklearn import preprocessing \nfrom sklearn import tree \nfrom sklearn.externals.six import StringIO \nimport numpy as np\n\n \nimport scipy as sp \nfrom sklearn.metrics import precision_recall_curve \nfrom sklearn.metrics import classification_report \nfrom sklearn.model_selection import train_test_split \n\nfrom sklearn.datasets import load_iris\n#from sklearn import tree\nimport sys\nimport os \nos.environ[\"PATH\"] += os.pathsep + 'C:/Program Files/Graphviz2.38/bin'\n \nallElectronicsData = open(r\"spammer.csv\",\"r\",encoding='UTF-8') \nreader = csv.reader(allElectronicsData) \nheaders =next(reader) \n\nfeatureList = [] \nlabelList = [] \nfor row in reader: \n labelList.append(row[len(row)-1]) \n rowDic = {} \n for i in range(1,len(row)-1): \n rowDic[headers[i]] = row[i] \n featureList.append(rowDic) \n \nvec = DictVectorizer() \ndummyX = vec.fit_transform(featureList) .toarray() \n \nlb = preprocessing.LabelBinarizer() \ndummyY = lb.fit_transform(labelList) \n \nx_train, x_test, y_train, y_test = train_test_split(dummyX, dummyY, test_size = 0.2)\n \nclf = tree.DecisionTreeClassifier(criterion=\"entropy\") #创建一个分类器,entropy决定了用ID3算法 \nclf = clf.fit(x_train, y_train) \nprint (\"clf:\"+str(clf)) \n \nwith open(\"ID3.dot\", 'w') as f:\n f = tree.export_graphviz(clf, out_file=f)\n\nimport pydotplus \ndot_data = tree.export_graphviz(clf, out_file=None) \ngraph = pydotplus.graph_from_dot_data(dot_data) \ngraph.write_pdf(\"ID3.pdf\")\n\n'''''测试结果的打印''' \nanswer = clf.predict(x_train) \nprint(x_train) \nprint(answer) \nprint(y_train) \nprint(np.mean( answer == y_train)) \n \n'''''准确率与召回率''' \nprecision, recall, thresholds = precision_recall_curve(y_train, clf.predict(x_train)) \nanswer = clf.predict_proba(dummyX)[:,1] \nprint(classification_report(dummyY, answer, target_names = ['yes', 'no']))","sub_path":"ID3.py","file_name":"ID3.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"340936150","text":"# MIT License\n#\n# Copyright (c) 2017 BingZhang Hu\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport scipy.io as sio\nimport random as random\nimport numpy as np\nimport os\n\n\nclass FileReader():\n def __init__(self, name,data_dir, data_info, contain_val=False, val_data_dir='', val_list='', reproducible=True):\n\n if reproducible:\n np.random.seed(112358)\n random.seed(112358)\n # training data\n self.data_info = sio.loadmat(data_info)\n self.prefix = data_dir\n if name=='CACD':\n self.age = np.squeeze(self.data_info['celebrityImageData']['age'][0][0])\n self.total_images = len(self.age)\n self.identity = np.squeeze(self.data_info['celebrityImageData']['identity'][0][0])\n self.nof_identity = len(np.unique(self.identity))\n self.path = np.squeeze(self.data_info['celebrityImageData']['name'][0][0]).tolist()\n self.label_tsv = './data/label.tsv'\n self.sprite = './data/face.png'\n elif name=='MORPH' :\n self.age = np.squeeze(self.data_info['morph']['age'][0][0])\n self.total_images = len(self.age)\n self.path = np.squeeze(self.data_info['morph']['name'][0][0])\n self.label_tsv = './data/m_label.tsv'\n self.sprite = './data/test.png'\n else:\n print('No dataset named %s found!'% name)\n # val data\n if contain_val:\n self.val_data_dir = val_data_dir\n self.val_path = []\n self.val_size = 0\n self.current_val_idx = 0\n val_file = open(val_list)\n for i in val_file.readlines():\n self.val_path.append(os.path.join(self.val_data_dir, i.replace('\\n', '')))\n self.val_size += 1\n def __str__(self):\n return str(self.val_path)\n\n # def __str__(self):\n # return 'Data directory:\\t' + self.prefix + '\\nIdentity Num:\\t' + str(self.nof_identity)\n\n # def select_age(self, nof_age, nof_images):\n # images_and_labels=[]\n # ages_selected = random.sample(range(14,63),nof_age)\n # for i in ages_selected:\n # images_indices = np.where(self.age==i)[0]\n # # print('age:%d len:%d' % (i,len(images_indices)))\n # images_selected = random.sample(images_indices,nof_images)\n # for image in images_selected:\n # images_and_labels.append([image,i])\n # image_data = []\n # label_data = []\n # image_path = []\n # for image,label in images_and_labels:\n # image_data.append(self.read_jpeg_image(self.prefix+self.path[image][0].encode('utf-8')))\n # label_data.append(label)\n # image_path.append(self.prefix + self.path[image][0].encode('utf-8'))\n # return image_data, label_data, image_path, ages_selected\n\n def select_identity_path(self, nof_person, nof_images):\n paths = []\n labels = []\n ids_selected = random.sample(range(self.nof_identity), nof_person)\n for i in ids_selected:\n images_indices = np.where(self.identity == i + 1)[0]\n if len(images_indices) >= nof_images:\n images_selected = random.sample(list(images_indices), nof_images)\n else:\n images_selected = images_indices\n for image in images_selected:\n try:\n paths.append(os.path.join(self.prefix, self.path[image][0].encode('utf-8')))\n except:\n paths.append(os.path.join(self.prefix, self.path[image][0]))\n labels.append(i)\n return np.asarray(paths), np.asarray(labels)\n\n def select_age_path(self,nof_age,nof_images):\n paths = []\n labels = []\n ages_selected = random.sample(range(np.min(self.age),np.max(self.age)+1), nof_age)\n for i in ages_selected:\n images_indices = np.where(self.age == i)[0]\n if len(images_indices) >= nof_images:\n images_selected = random.sample(list(images_indices), nof_images)\n else:\n images_selected = images_indices\n for image in images_selected:\n try:\n # paths.append(os.path.join(self.prefix, self.path[image][0].encode('utf-8').replace('jpg','png')))\n paths.append(os.path.join(self.prefix, self.path[image][0].encode('utf-8')))\n except:\n # paths.append(os.path.join(self.prefix, self.path[image][0].replace('jpg','png')))\n paths.append(os.path.join(self.prefix, self.path[image][0]))\n labels.append(i)\n return np.asarray(paths), np.asarray(labels)\n\n # def select_identity(self, nof_person, nof_images):\n # images_and_labels = []\n # # ids_selected \\in [0,1999]\n # ids_selected = random.sample(range(self.nof_identity), nof_person)\n # for i in ids_selected:\n # # here we select id with 'i+1' as the index of identity in cele.mat starts from 1\n # images_indices = np.where(self.identity == i + 1)[0]\n # # print('id:%d len:%d' % (i + 1, len(images_indices)))\n # images_selected = random.sample(list(images_indices), nof_images)\n # for image in images_selected:\n # images_and_labels.append([image, i])\n # image_data = []\n # label_data = []\n # image_path = []\n # for image, label in images_and_labels:\n # image_data.append(self.read_jpeg_image(self.prefix + self.path[image][0].encode('utf-8')))\n # label_data.append(label)\n # image_path.append(self.prefix + self.path[image][0].encode('utf-8'))\n # return image_data, label_data, image_path, ids_selected\n\n # def select_quartet(self,nof_person, nof_images):\n # images_and_labels = []\n # ages = []\n # ids_selected = random.sample(xrange(self.nof_identity), nof_person)\n # for i in ids_selected:\n # ages.append(self.age[np.where(self.identity==i+1)[0]])\n # return ages\n\n # def read_triplet(self, image_path, label, triplet, i, len):\n # triplet_image = []\n # triplet_label = []\n # for idx in xrange(i, i + len):\n # anchor = self.read_jpeg_image(image_path[triplet[idx][0]])\n # pos = self.read_jpeg_image(image_path[triplet[idx][1]])\n # neg = self.read_jpeg_image(image_path[triplet[idx][2]])\n # triplet_image.append([anchor, pos, neg])\n # triplet_label.append([label[triplet[idx][0]], label[triplet[idx][1]], label[triplet[idx][2]]])\n # return triplet_image, triplet_label\n\n # def get_next_batch(self,batch_size):\n # img_data = []\n # label = []\n # for i in range(batch_size):\n # if self.current_index', cmd, ')')\n return_code = subprocess.call(cmd, shell=True)\n\n elif os.name == 'nt': # running Windows -- must use pywin32 to ask for elevation\n showCmd = win32con.SW_SHOWNORMAL\n try:\n params = quote(*cmdLine[1:])\n except IndexError:\n params = \"\"\n try:\n cmd = quote(cmdLine[0])\n except IndexError:\n cmd = \"_No_command_was_supplied_\"\n lpVerb = 'runas' # causes UAC elevation prompt.\n print()\n if wait:\n print(\"This window will be waiting while a child window is run as an Administrator...\")\n print(\"(Running command-->{} {})\".format(cmd, params))\n procInfo = ShellExecuteEx(nShow=showCmd,\n fMask=shellcon.SEE_MASK_NOCLOSEPROCESS,\n lpVerb=lpVerb,\n lpParameters=params,\n lpFile=cmd)\n if wait:\n procHandle = procInfo['hProcess']\n if procHandle is None:\n print(\"Windows Process Handle is Null. RunAsAdmin did not create a child process.\")\n return_code = 89 # Windows ERROR_NO_PROC_SLOTS\n else:\n win32event.WaitForSingleObject(procHandle, win32event.INFINITE)\n return_code = win32process.GetExitCodeProcess(procHandle)\n # print(\"Process handle %s returned code %s\" % (procHandle, return_code))\n procHandle.Close()\n print(\"(Now Returned from waiting...)\")\n else:\n return_code = None # asked not to wait for completion\n else:\n raise RuntimeError(\"Unsupported operating system for this module: {}\".format(os.name))\n return return_code\n\n\ndef get_context(flag=ELEVATION_FLAG):\n '''\n parse and return json dictionary from the --_context= argument.\n :return: dic\n '''\n for arg in sys.argv:\n if arg.startswith(flag):\n try:\n ctx = arg.split('=')[1]\n if not ctx.startswith('{'):\n ctx = '{' + ctx\n if not ctx.endswith('}'):\n ctx += '}'\n ret = json.loads(ctx)\n return ret\n except (IndexError, json.JSONDecodeError) as e:\n print(\"Decode Error in {}=>{}\".format(flag, e))\n print(\"sys.argv-->\", sys.argv)\n return {}\n return {}\n\n\ndef set_env_variables_permanently_win(key_value_pairs, whole_machine = False):\n \"\"\"\n Similar to os.environ[var_name] = var_value for all pairs provided, but instead of setting the variables in the\n current process, sets the environment variables permanently at the os MACHINE level.\n NOTE: process must be \"elevated\" before making this call. Use \"sudo\" first.\n\n Original Recipe from http://code.activestate.com/recipes/416087/\n :param key_value_pairs: a dictionary of variable name+value to set\n :param whole_machine: if True the env variables will be set at the MACHINE (HKLM) level.\n If False it will be done at USER level (HKCU)\n :return:\n \"\"\"\n if not isinstance(key_value_pairs, dict):\n raise ValueError('{!r} must be {}'.format(key_value_pairs, dict))\n if os.name != 'nt':\n raise ModuleNotFoundError('Attempting Windows operation on non-Windows')\n\n subkey = r'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment' if whole_machine else r'Environment'\n\n with winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE if whole_machine else winreg.HKEY_CURRENT_USER,\n subkey, 0, winreg.KEY_ALL_ACCESS) as key:\n for name, value in key_value_pairs.items():\n print(' setting environment variable -->', name, '=', value)\n try:\n present, value_type = winreg.QueryValueEx(key, name)\n except OSError:\n present = ''\n value_type = winreg.REG_SZ\n print('old value was {} = {}'.format(name, present))\n\n if name.upper() in ['PATH', 'PATHEXT']:\n if value.upper() in present.split(';'): # these two keys will always be present and contain \";\"\n print('Value {} already in {}'.format(value, present))\n continue\n else:\n print('\"{}\" will not be entirely changed. \"{}\" will be appended at the end.'.format(\n name, value))\n value = '{};{}'.format(present, value)\n if value:\n print(\"Setting ENVIRONMENT VARIABLE '{}' to '{}'\".format(name, value))\n winreg.SetValueEx(key, name, 0, value_type, value)\n else:\n print(\"Deleting ENV VARIABLE '{}'\".format(name))\n try:\n winreg.DeleteValue(key, name)\n except FileNotFoundError:\n pass # ignore if already deleted\n\n # tell all the world that a change has been made\n win32gui.SendMessageTimeout(win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE, 0, 'Environment',\n win32con.SMTO_ABORTIFHUNG, 1000)\n if has_context():\n input('Hit to continue . . .')\n\n\ndef test(command=None):\n try:\n if isinstance(command, str):\n command = command.split()\n if \"--test\" in command:\n command.remove(\"--test\")\n except TypeError:\n pass\n if not isUserAdmin():\n print(\"You're not an admin. You are running PID={} with command-->{}\".format(os.getpid(), command))\n if command is not None:\n return_code = runAsAdmin(command[1:])\n else:\n print(\"You ARE an admin. You are running PID={} with command-->{}\".format(os.getpid(), command))\n if command is not None and len(command) > 1:\n # noinspection PyUnresolvedReferences\n return_code = subprocess.call(quote(*command[1:]), shell=True)\n else:\n return_code = 0\n time.sleep(2)\n input('Press Enter to exit.')\n return return_code\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1 or sys.argv[1] in [\"--help\", \"-h\", \"su\", \"/?\", \"/help\"]:\n print('''usage:\n sudo # will run with elevated priviledges\n sudo --pause # will keep the command screen open until you hit a key\n sudo salt-xxx . . . # will call a command from C:\\Salt\\salt-xxx and then pause\n sudo --set-user-env={'arg1':'val1','arg2':'val2'} # adds values to the user's PERMANENT environment vars\n sudo --set-system-env='arg1':'val1','arg2':'val2' # adds values to the system's PERMANENT environment vars\n sudo --hosts # will open your /etc/hosts file for editing (at the weird Windows location)\n sudo --install-sudo-command # create a runnable copy of itself in C:\\Windows\n sudo bash # starts an Administrator Linux-subsystem-for-Windows window\n sudo cmd # starts an Administrator command window\n ''')\n elif sys.argv[1] == \"--version\":\n print('sudo version', VERSION)\n elif sys.argv[1] == \"--test\":\n print('......testing.......')\n test(sys.argv)\n elif sys.argv[1] == \"--hosts\":\n print('....... NEXT, a useful example ... editing the \"etc/hosts\" file ........')\n if os.name == 'nt':\n call = [\"notepad\", r\"C:\\Windows\\System32\\drivers\\etc\\hosts\"]\n else:\n call = ['nano', '/etc/hosts']\n runAsAdmin(call)\n elif sys.argv[1] == \"--install-sudo-command\" and os.name == 'nt':\n WINDOWS_PATH = r'C:\\Windows\\sudo.py'\n print('Installing \"sudo\" command...')\n if isUserAdmin():\n shutil.copy2(__file__, WINDOWS_PATH)\n shutil.copy2(os.path.dirname(os.path.abspath(__file__)) + r'\\argv_quote.py',\n os.path.dirname(WINDOWS_PATH) + r'\\argv_quote.py')\n shutil.copy2(os.path.dirname(os.path.abspath(__file__)) + r'\\pause_after.bat',\n os.path.dirname(WINDOWS_PATH) + r'\\pause_after.bat')\n set_env_variables_permanently_win({'PATHEXT': '.PY'}, whole_machine=True)\n else:\n runAsAdmin([os.path.abspath(__file__), '--install-sudo-command'], python_shell=True)\n elif any([arg.startswith(\"--set-system-env\") for arg in sys.argv]) and os.name == 'nt':\n ctx = get_context(\"--set-system-env\")\n set_env_variables_permanently_win(ctx, whole_machine=True)\n elif any([arg.startswith(\"--set-user-env\") for arg in sys.argv]) and os.name == 'nt':\n ctx = get_context(\"--set-user-env\")\n set_env_variables_permanently_win(ctx, whole_machine=False)\n else:\n if sys.argv[1].startswith('salt-'): # make \"sudo salt-call\" actually work without being in PATH\n sys.argv[1] = os.path.join('c:\\\\salt', sys.argv[1]) # call .bat file from c:\\salt\n sys.argv.insert(1, '--pause')\n\n if sys.argv[1] == '--pause':\n sys.argv[1] = 'pause_after'\n\n runAsAdmin(sys.argv[1:])\n","sub_path":"configure_machine/helpers/sudo.py","file_name":"sudo.py","file_ext":"py","file_size_in_byte":12659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"634475428","text":"import copy\nfrom environment import Environment as e \nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport progressbar\nnp.random.seed(0)\n\nclass DeepQLearningAgent(object):\n def __init__(self, discount, alpha, T, rho):\n self.discount = discount\n self.env = None\n self.alpha = alpha\n self.T = T\n self.rho = rho\n self.learning_rate = 0.001 \n self.initial_exploration_rate = 0.995\n self.min_exploration_rate = 0.1\n self.states_visited = np.zeros((self.T+1, self.T+1))\n\n # initialize state mapping and states\n self.state_mapping = {}\n self.states = []\n count = 0\n for a in range(T+1):\n for h in range(T+1):\n self.state_mapping[(a, h)] = count\n self.states.append((a, h))\n count += 1\n \n # deep q\n self.current_model = self.initializeModel()\n self.target_model = copy.deepcopy(self.current_model)\n self.memories = []\n self.training_memory_count = 50\n \n def initializeModel(self):\n model = Sequential()\n model.add(Dense(24, input_dim=2, activation='relu'))\n model.add(Dense(24, activation='relu'))\n model.add(Dense(3, activation='linear'))\n model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\n return model\n \n def chooseAction(self, current_state):\n legal_actions = self.env.getLegalActions()\n state_index = self.state_mapping[current_state]\n # explore\n current_explore_rate = np.power(self.initial_exploration_rate, self.states_visited[current_state])\n if current_explore_rate < self.min_exploration_rate:\n current_explore_rate = self.min_exploration_rate\n if np.random.uniform() < current_explore_rate:\n return np.random.choice(legal_actions)\n \n # exploit\n current_action = -1\n current_value = float(\"-inf\")\n action_values = self.current_model.predict(np.reshape(np.asarray(current_state), (1,2)))\n for action in legal_actions:\n new_value = action_values[0][action]\n if new_value > current_value:\n current_action = action\n current_value = new_value\n return current_action\n \n def evalReward(self, reward):\n return (1 - self.rho) * reward[0] - self.rho * reward[1]\n\n def syncModels(self):\n self.target_model = self.current_model\n\n def runTrial(self, iterations):\n self.env = e(self.alpha, self.T)\n self.initializeModel()\n bar = progressbar.ProgressBar()\n for i in bar(range(iterations)):\n current_state_tuple = self.env.current_state.getTupleRepresentation()\n self.states_visited[current_state_tuple] += 1\n action = self.chooseAction(current_state_tuple)\n new_state, reward = self.env.takeAction(action)\n reward_value = self.evalReward(reward)\n \n # creating a new memory\n memory = dict({\n 'current_state' : current_state_tuple,\n 'action' : action,\n 'reward' : reward_value,\n 'new_state' : new_state.getTupleRepresentation()\n })\n self.memories.append(memory)\n\n # training network\n if len(self.memories) > self.training_memory_count:\n self.trainNeuralNet()\n if len(self.memories) > 3000:\n self.memories.pop(0)\n if i % 10 == 0:\n self.syncModels()\n \n def trainNeuralNet(self):\n memory_subset = np.random.choice(self.memories, self.training_memory_count, replace=False)\n training_data, training_target = [], []\n for memory in memory_subset:\n print(memory)\n total_reward = memory['reward']\n total_reward += self.discount * max(self.target_model.predict(self.prepareInput(memory['new_state']))[0])\n target = self.target_model.predict(self.prepareInput(memory['current_state']))\n print(target)\n # this modifies the prediction to have new value for the action taken\n target[0][memory['action']] = total_reward\n training_data.append(memory['current_state'])\n training_target.append(target)\n print(target)\n\n # fiting model --- this is the neural net training \n self.current_model.fit(\n np.squeeze(np.asarray(training_data)), \n np.squeeze(np.asarray(training_target)), \n epochs=1, \n verbose=False)\n \n def prepareInput(self, state):\n return np.reshape(np.asarray(state), (1,2))\n \n def extractPolicy(self):\n policy = []\n for state in self.states:\n a, h = state\n # any action is legal\n if a > h:\n print('all legal')\n print(self.current_model.predict(self.prepareInput(state)))\n max_action = np.argmax(self.current_model.predict(self.prepareInput(state))[0])\n else:\n print('no override')\n print(self.current_model.predict(self.prepareInput(state)))\n arg_sorted = np.argsort(self.current_model.predict(self.prepareInput(state))[0])\n # override not legal\n if arg_sorted[0] == 1:\n max_action = arg_sorted[1]\n else:\n max_action = arg_sorted[0]\n policy.append(max_action)\n return policy\n\n def processPolicy(self, policy):\n results = ''\n print(policy)\n for a in range(9):\n results += '{} & '.format(a)\n for h in range(9):\n state_index = self.state_mapping[(a, h)]\n action = policy[state_index]\n assert(action in [0, 1, 2])\n if action == 0:\n results += 'a'\n elif action == 1:\n results += 'o'\n else:\n results += 'w'\n results += ' & '\n results = results[:-2]\n results += '\\\\\\\\ \\n'\n print(results)\n \n def plotStatesVisited(self):\n f, ax = plt.subplots(figsize=(10,10))\n im = ax.imshow(self.states_visited, cmap='hot', interpolation='nearest')\n f.colorbar(im)\n plt.savefig('states_visited.png')\n plt.show()\n \n def plotLogStatesVisited(self):\n f, ax = plt.subplots(figsize=(10,10))\n im = ax.imshow(np.log(self.states_visited+1), cmap='hot', interpolation='nearest')\n f.colorbar(im)\n plt.savefig('log_states_visited.png')\n plt.show()\n\ndef main():\n qlagent = DeepQLearningAgent(discount=1, alpha=1/3, T=9 , rho=0.33657073974609375)\n qlagent.runTrial(iterations=int(1000))\n qlagent.processPolicy(qlagent.extractPolicy())\n qlagent.plotStatesVisited()\n qlagent.plotLogStatesVisited()\n\nif __name__ == \"__main__\":\n main()","sub_path":"proof_of_work/deep_q/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":7135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"236010973","text":"from typing import List, Union, Tuple\n\nimport pymysql\n\nfrom dao.dao import get_connect\n\n\nclass User:\n UNCHECKED_STATUS = 'unchecked'\n FETCHING_STATUS = 'fetching'\n ACTIVE_STATUS = 'active'\n\n def __init__(self) -> None:\n self.id = 0\n self.name = ''\n self.account = ''\n self.motto = ''\n self.solved_num = 0\n self.status = self.UNCHECKED_STATUS\n\n def update(self) -> None:\n \"\"\"\n 更新用户\n \"\"\"\n sql = '''UPDATE users SET `name`=%s,account=%s,motto=%s,solved_num=%s,`status`=%s WHERE id=%s'''\n connect = get_connect()\n with connect.cursor() as cursor:\n cursor.execute(sql, (self.name, self.account, self.motto, self.solved_num, self.status, self.id))\n connect.commit()\n\n def confirm(self) -> None:\n \"\"\"\n 确认用户\n \"\"\"\n sql = '''UPDATE users SET `status`='fetching' WHERE id=%s'''\n connect = get_connect()\n with connect.cursor() as cursor:\n cursor.execute(sql, (self.id,))\n connect.commit()\n\n def remove(self) -> None:\n \"\"\"\n 删除用户\n \"\"\"\n sql = '''DELETE FROM users WHERE id=%s'''\n connect = get_connect()\n with connect.cursor() as cursor:\n cursor.execute(sql, (self.id,))\n connect.commit()\n\n\ndef exist_user(account: str) -> bool:\n '''\n 判断账号是否已经被占用\n :param account:\n :return: 被占用返回True,否则返回False\n '''\n sql = '''SELECT 1 FROM `users` WHERE account = %s LIMIT 1'''\n connect = get_connect()\n with connect.cursor() as cursor:\n cursor.execute(sql, (account,))\n if cursor.fetchone():\n return True\n return False\n\n\ndef create_user(name: str, account: str, motto: str) -> User:\n \"\"\"\n 创建用户\n :param name: 姓名\n :param account: 账号\n :param motto: 格言\n :return: 成功返回User\n \"\"\"\n user = User()\n user.name = name\n user.account = account\n user.motto = motto\n # sql = '''SELECT 1 FROM `users` WHERE account = %s LIMIT 1'''\n connect = get_connect()\n # with connect.cursor() as cursor:\n # cursor.execute(sql, (user.account,))\n # if cursor.fetchone():\n # return False\n sql = '''INSERT INTO users(`name`,account,motto,solved_num,`status`) VALUES(%s,%s,%s,%s,%s)'''\n\n with connect.cursor() as cursor:\n cursor.execute(sql, (user.name, user.account, user.motto, user.solved_num, user.status))\n connect.commit()\n user.id = cursor.lastrowid\n return user\n\n\ndef get_fetching_list() -> List[User]:\n \"\"\"\n 所有等待获取的用户列表\n :return:\n \"\"\"\n sql = '''SELECT id,`name`,account,motto,solved_num,`status` FROM `users` WHERE `status`!= 'unchecked' '''\n connect = get_connect()\n with connect.cursor() as cursor:\n cursor.execute(sql)\n rows = cursor.fetchall()\n user_list = []\n for row in rows:\n user = User()\n (user.id, user.name, user.account, user.motto, user.solved_num, user.status) = row\n user_list.append(user)\n\n return user_list\n\n\ndef get_rank() -> Tuple[tuple]:\n \"\"\"\n 获取排行榜\n :return:\n \"\"\"\n sql = '''SELECT users.`name`, users.account, users.motto, users.solved_num, users.`status`, users.id FROM users ORDER BY solved_num DESC '''\n connect = get_connect()\n with connect.cursor(cursor=pymysql.cursors.DictCursor) as cursor:\n cursor.execute(sql)\n rows = cursor.fetchall()\n return rows\n","sub_path":"dao/userDao.py","file_name":"userDao.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"138837504","text":"__author__ = 'Ibis'\n\nimport sqlite3\nimport statistics\n\ndef insertData(table, date, vc, vv):\n conn = sqlite3.connect('Titulos.db')\n c = conn.cursor()\n\n # Insert a row of data\n str = 'INSERT INTO table VALUES (?,?,?,?,?)'\n str = str.replace('table', \"'\" + table + \"'\")\n c.execute(str, (date, vc, vv))\n\n conn.commit()\n conn.close()\n return\n\ndef insertMany(table, data):\n\n conn = sqlite3.connect('Titulos.db')\n c = conn.cursor()\n\n str = 'INSERT INTO table VALUES (?,?,?,?,?)'\n str = str.replace('table', \"'\" + table + \"'\")\n c.executemany(str, data)\n\n conn.commit()\n conn.close()\n\n return\n\n'''def printData(table, date):\n conn = sqlite3.connect('titulos.db')\n c = conn.cursor()\n\n # Do this instead\n c.execute(\"SELECT * FROM 'LFT 210104' WHERE Date=?\", (date,))\n print (c.fetchone())\n\n # We can also close the connection if we are done with it.\n # Just be sure any changes have been committed or they will be lost.\n conn.close()\n\n return'''\n\ndef createTable(table):\n conn = sqlite3.connect('Titulos.db')\n c = conn.cursor()\n\n str = '''CREATE TABLE titulo (\n 'Date'\tTEXT NOT NULL UNIQUE,\n 'Taxa Compra' NUMERIC,\n 'Taxa Venda' NUMERIC,\n 'Compra'\tNUMERIC,\n 'Venda'\tNUMERIC,\n PRIMARY KEY(Date))'''\n\n str = str.replace('titulo', \"'\" + table + \"'\")\n\n try:\n # Create table\n c.execute(str)\n\n # Save (commit) the changes\n conn.commit()\n\n succeed = 1\n except:\n succeed = 0\n\n conn.close()\n\n return succeed\n\ndef risco(table, inicialDate, finalDate):\n\n conn = sqlite3.connect('Titulos.db')\n c = conn.cursor()\n\n str = 'select * from titulo where Date\\\n between inicialDate and finalDate\\\n ORDER BY Date'\n str = str.replace('titulo', \"'\" + table + \"'\")\\\n .replace('inicialDate', \"'\" + inicialDate + \"'\")\\\n .replace('finalDate', \"'\" + finalDate + \"'\")\n c.execute(str)\n dados = c.fetchall()\n\n conn.close()\n\n return\n\ndef getLastDate(table):\n\n conn = sqlite3.connect('Titulos.db')\n c = conn.cursor()\n\n str = 'select max(Date) from titulo'\n str = str.replace('titulo', \"'\" + table + \"'\")\n\n c.execute(str)\n\n a=c.fetchall()\n\n lastDate = ''.join(a[0])\n\n # if len(a)>0:\n # lastDate = ''.join(a[0])\n # else:\n # lastDate = '0001/01/01'\n\n conn.close()\n\n return lastDate\n\ndef getData(table, date):\n\n conn = sqlite3.connect('Titulos.db')\n c = conn.cursor()\n\n str = 'select * from titulo\\\n where Date >= date\\\n ORDER BY Date'\n str = str.replace('titulo', \"'\" + table + \"'\")\\\n .replace('date', \"'\" + date + \"'\")\n c.execute(str)\n\n dados = c.fetchall()\n\n conn.close()\n\n return dados\n\ndef getPreçoVenda(table, date):\n\n conn = sqlite3.connect('Titulos.db')\n c = conn.cursor()\n\n str = 'select Date, Venda from titulo\\\n where Date >= date\\\n ORDER BY Date'\n str = str.replace('titulo', \"'\" + table + \"'\")\\\n .replace('date', \"'\" + date + \"'\")\n c.execute(str)\n\n dados = c.fetchall()\n\n conn.close()\n\n return dados\n\n# Pega o preço de compra entre duas datas\ndef getCompra(table, inicio, fim):\n\n conn = sqlite3.connect('Titulos.db')\n c = conn.cursor()\n\n str = 'SELECT Compra FROM titulo\\\n WHERE (Date BETWEEN dateinic AND datefim)\\\n ORDER BY Date'\n str = str.replace('titulo', \"'\" + table + \"'\")\\\n .replace('dateinic', \"'\" + inicio + \"'\").replace('datefim', \"'\" + fim + \"'\")\n c.execute(str)\n\n dados = c.fetchall()\n\n conn.close()\n\n compra = [float(i[0]) for i in dados]\n\n return compra\n\n# Pega o preço de venda entre duas datas\ndef getVenda(table, inicio, fim):\n\n conn = sqlite3.connect('Titulos.db')\n c = conn.cursor()\n\n str = 'SELECT Venda FROM titulo\\\n WHERE (Date BETWEEN dateinic AND datefim)\\\n ORDER BY Date'\n str = str.replace('titulo', \"'\" + table + \"'\")\\\n .replace('dateinic', \"'\" + inicio + \"'\").replace('datefim', \"'\" + fim + \"'\")\n c.execute(str)\n\n dados = c.fetchall()\n\n conn.close()\n\n venda = [float(i[0]) for i in dados]\n\n return venda\n\n# Pega o preço de venda em uma data\ndef getVendaUnico(table, date):\n\n conn = sqlite3.connect('Titulos.db')\n c = conn.cursor()\n\n str = 'SELECT Venda FROM titulo\\\n WHERE Date = date'\n\n str = str.replace('titulo', \"'\" + table + \"'\")\\\n .replace('date', \"'\" + date + \"'\")\n c.execute(str)\n\n dado = c.fetchone()\n\n conn.close()\n\n return float(dado)\n\n# Pega a lista de datas entre as duas datas\ndef getDates(table, inicio, fim):\n\n conn = sqlite3.connect('Titulos.db')\n c = conn.cursor()\n\n str = 'SELECT Date FROM titulo\\\n WHERE (Date BETWEEN dateinic AND datefim)'\n str = str.replace('titulo', \"'\" + table + \"'\")\\\n .replace('dateinic', \"'\" + inicio + \"'\").replace('datefim', \"'\" + fim + \"'\")\n c.execute(str)\n\n dates = c.fetchall()\n\n conn.close()\n\n return dates","sub_path":"database2.py","file_name":"database2.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"585470872","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom photo.models import Photo\n\n# Create your models here.\nclass Profile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n followers = models.ManyToManyField(User, related_name='user_followers')\n following = models.ManyToManyField(User, related_name='user_following')\n PICOIN = models.PositiveIntegerField(default=0, verbose_name='PICOIN')\n\n def __str__(self):\n return self.user.email\n \n@receiver(post_save, sender=User)\ndef create_or_update_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)\n instance.profile.save()\n\n@receiver(post_save, sender=User)\ndef save_user_profile(sender, instance, **kwargs):\n instance.profile.save()\n\nclass ProfilePicoInfoLog(models.Model):\n profile = models.ForeignKey(Profile, on_delete=models.CASCADE, verbose_name='PROFILE')\n donator = models.ForeignKey(User, on_delete=models.DO_NOTHING, verbose_name='DONATOR', null=True)\n PICOIN = models.IntegerField(default=0, verbose_name='PICOIN')\n where = models.ForeignKey(Photo, on_delete=models.PROTECT, verbose_name='WHERE', null=True)\n donate_dt = models.DateTimeField('Donation Datetime', auto_now_add=True, null=True)\n\n class Meta:\n ordering = ('-donate_dt', )\n\n def __str__(self):\n return '%s %d' %(self.donator.username, self.PICOIN)\n\nclass PhotoicoInfoLog(models.Model):\n photo = models.ForeignKey(Photo, on_delete=models.CASCADE, verbose_name='PHOTO')\n donator = models.ForeignKey(User, on_delete=models.DO_NOTHING, verbose_name='DONATOR', null=True)\n PICOIN = models.PositiveIntegerField(default=0, verbose_name='PICOIN')\n donate_dt = models.DateTimeField('Donation Datetime', auto_now_add=True, null=True)\n\n class Meta:\n ordering = ('-donate_dt', )\n \n def __str__(self):\n return '%s %d' %(self.donator.username, self.PICOIN)","sub_path":"PICO_PROJECT/core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"72953912","text":"operators = {\r\n 'megacom': [\r\n '0558',\r\n '0551',\r\n '0555',\r\n '0557',\r\n '0553',\r\n '0996',\r\n '0550',\r\n '0559',\r\n ],\r\n 'beeline': [\r\n '0770',\r\n '0777',\r\n '0778',\r\n '0776',\r\n '0771',\r\n '0222'\r\n ],\r\n\r\n 'Ошка': [\r\n '0700',\r\n '0999',\r\n '0701',\r\n '0702'\r\n ],\r\n\r\n 'gorod': [\r\n '03222',\r\n '03231',\r\n '03200',\r\n '03230',\r\n '03211',\r\n ]\r\n\r\n}\r\n\r\n\r\ndef check_phone_number(nomer):\r\n if len(nomer) == 10 and nomer[0] == '0':\r\n print('Номер телефона правильный')\r\n code = nomer[0:4]\r\n print(nomer)\r\n if code in operators['Ошка']:\r\n print('Ваш оператор Ошка')\r\n elif code in operators['megacom']:\r\n print('Ваш оператор Megacom')\r\n elif code in operators['beeline']:\r\n print('Ваш оператор Beeline')\r\n code = nomer[0:5]\r\n if code in operators['gorod']:\r\n print('Городской номер')\r\n else:\r\n print('Вы ввели неправильный номер')\r\n\r\n\r\nphone = input('Введите номер телефона: ')\r\nphone2 = input('Введите номер телефона 1: ')\r\ncheck_phone_number(phone)\r\ncheck_phone_number(phone2)\r\n\r\n","sub_path":"python/day18/def_2.py","file_name":"def_2.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"83104190","text":"#!/usr/bin/python3\n\"\"\"\nDefines a custom node model\n\"\"\"\n\nfrom hashlib import sha1\nimport hmac, random, codecs, time\nfrom urllib.parse import quote\nimport base64\nimport os\nimport models\nfrom models.base import BaseNode, Base\nfrom sqlalchemy import Column, String, Integer, ForeignKey\nimport json\nimport requests\nfrom models.auth import Auth\n\nauth = Auth()\n\nclass CustomNode(BaseNode, Base):\n \"\"\"\n This model stores the needed data for the nodes\n \"\"\"\n __tablename__ = 'custom_nodes'\n user_id = Column(String(60), ForeignKey('users.id'), nullable=False)\n name = Column(String(60))\n work_type = Column(String(20), default='request') # Request or process\n api_url = Column(String(100), default='') # Format 'GET https://example.com\n api_endpoint = Column(String(100), default='') # Format 'users/{id}/nodes' this depends on self.data\n string = Column(String(200))\n headers = Column(String(500), default='{}') # Format { key1:value1, key2:value2 }\n innodes = Column(String(2000), default='[]') # Format [ id1, id2, id3 ]\n # Query data will store a JSON string\n data = Column(String(2000), default='{}') # Format { key1:value1, key2:value2 }\n outnodes = Column(String(2000), default='[]') # Format [ id1, id2, id3 ]\n analisis_mode = Column(String(20), default='JSON') # scrapping, JSON, comparision (for the triggers)\n # work_type == process and analisis_mode == 'comparision'\n # analisis_params format should be\n # [{value1:, value2:, cond:}]\n # and the Response will be Boolean True or false\n analisis_params = Column(String(2000), default='[]') # format [{ key: 'field', stop_val: '>' , path: ''}]\n trigger = Column(String(5)) # True or False\n timeout = Column(Integer, default=0)\n inner_connections = Column(String(2000), default='') # Format {outnodes}\n color = Column(String(16), default='#9bfa18') # Default node color\n board_id = Column(String(60), default='')\n # content = Column(String(16), default='', nullable=True) # this will store the inode request response\n\n\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\n def __str__(self):\n return json.dumps(self.__dict__)\n\n\n def request(self, data):\n \"\"\"\n Makes a request using the model data and a \n \"\"\"\n print('------------------------------')\n print('\\tRequest by', self.name)\n print('------------------------------')\n\n if len(self.api_url) <= 0:\n print('No URL defined')\n return (None, None)\n protocol, url = self.api_url.split(' ')\n headers = {}\n response = None\n url += self.parse_endpoint()\n if self.headers != '{}':\n headers = json.loads(self.headers)\n # print(headers)\n print(headers, type(headers), '\\n', protocol, url, '\\n', data, type(data))\n try:\n if protocol == 'GET':\n response = requests.get(url, params=data, headers=headers)\n elif protocol == 'POST':\n response = requests.post(url, params=data, headers=headers)\n print('\\033[34m*************Response*********')\n #print('*{:<30}*'.format(str(response.content)))\n print('*{:<30}*'.format(str(response.json())))\n #print('*{:<30}*\\033[0m'.format(str(response.reason)))\n print('*******************************\\033[0m')\n except Exception as e:\n # print('An error has ocurred while requesting, maybe there are not internet connection')\n print('\\033[31mRequest Error\\033[0m', e)\n if response.status_code == 200:\n try:\n print(self.name, json.dumps(response.json(), indent=2))\n response = response.json()\n except Exception as e:\n print(e)\n response = response.content\n else:\n response = {'error': response.reason, 'message': response.json()['errors'][0]['message'], 'code': response.status_code}\n # print(response.json())\n # print(response.json())\n return response, response\n\n\n def processResponse(self, response):\n \"\"\"\n Process the received result from request to the formated output fields defined by\n analisis_mode: scrapping, parse json\n analisis_params: {key:, stop_val:, path}\n \"\"\"\n resp = {}\n # print(type(self.analisis_params) == dict)\n if type(self.analisis_params) != dict:\n params = json.loads(self.analisis_params)\n else:\n params = self.analisis_params\n print('\\033[33mprocessing:\\033[0m ', self.name)\n # print(self.analisis_mode)\n if self.analisis_mode == 'comparision':\n print('## Comparission mode ##')\n # print('processing input', params, response)\n cond = None\n val1 = None\n val2 = None\n for par in params:\n if 'value1' in par:\n val1 = par['value1']\n if 'value2' in par:\n val2 = par['value2']\n if 'cond' in par:\n cond = par['cond']\n print(val1, cond, val2)\n res = eval(val1 + ' ' + cond + ' ' + val2)\n print(res)\n if res == None:\n return False\n return res\n if self.analisis_mode == 'gen_signature':\n print('==========Gen Signature=============')\n\n r = response\n print(r)\n method, url = r['url'].split(' ')\n data = json.loads(self.data)\n sign = auth.gen_sig(data['key1'], data['key2'], r['data'], url, method)\n print(sign)\n print('====================================')\n return sign\n # this process extracts the data from the reponse object\n if len(params) > 0:\n for param in params:\n # print(json.dumps(param, indent=2))\n paths = param['path'].split('/')\n print('paths: ', paths)\n obj = response\n last = len(paths) - 1\n for i, path in enumerate(paths):\n try:\n index = int(path)\n obj = obj[index]\n except:\n if path in obj:\n obj = obj[path]\n if last == i:\n resp[param['key']] = obj\n if self.analisis_mode == 'replace':\n parsed = self.parse_string(self.api_endpoint, resp)\n print('\\033[36mparsed string:\\033[0m\\n\\t', parsed,)\n return parsed\n return resp\n else:\n return response\n\n\n def run_node_task(self, params):\n \"\"\"\n If response is an empty dict, makes the request with the stored data,\n if the task is called from another node process it will use the data comming that node\n \"\"\"\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n print('\\t\\tRun', self.name, 'task\\nParams:\\n', params)\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n data = {}\n inn_resp = {}\n complete = None\n rand = ''\n time = '' # tag for the nounce generated values\n if len(json.loads(self.innodes)) > 0:\n innodes = json.loads(self.innodes)\n node = models.storage.get(CustomNode, innodes[0])\n if node.analisis_mode == 'gen_signature':\n heads = {}\n heads['data'] = json.loads(self.headers)\n for h in heads['data'].keys():\n if heads['data'][h] == 'random':\n rand = h\n heads['data'][h] = auth.gen_nonce()\n elif heads['data'][h] == 'time':\n time = h\n heads['data'][h] = auth.get_time()\n heads['url'] = self.api_url\n self.data = json.dumps({'status': params})\n heads['data']['status'] = params\n print('incoming params to', node.name, '\\n', heads)\n data, inn_resp = node.run_node_task(heads)\n headers = json.loads(self.headers)\n if rand in headers:\n headers[rand] = heads['data'][rand]\n if time in headers:\n headers[time] = heads['data'][time]\n headers['status'] = params\n headers['oauth_signature'] = data\n self.headers = json.dumps(headers)\n else:\n data, inn_resp = node.run_node_task(params)\n \n # print(data)\n # Handles the request if the worker is request\n # take in count the headers\n # and the behavior give by the innodes-data response\n if self.work_type == 'request':\n print('.......................')\n print('Request from', self.name)\n print('data:', data, '\\nparams:', params)\n headers = {}\n tmp_headers = self.headers\n if self.string == 'auth':\n headers['Authorization'] = auth.gen_header(json.loads(self.headers))\n print('headers: ', self.headers)\n print('___result_headers___')\n print(headers)\n self.headers = json.dumps(headers)\n print('.......................')\n data, inn_resp = self.request(self.parse_data(data))\n self.headers = tmp_headers\n # search signature an delete it from model, also restart nounce 'random' tag\n headers = json.loads(self.headers)\n if 'oauth_signature' in headers:\n del headers['oauth_signature']\n if rand in headers:\n headers[rand] = 'random'\n if time in headers:\n headers[time] = 'time'\n self.headers = json.dumps(headers)\n self.save()\n print(data)\n # Set this variable to be able to call multiple innodes\n # print('data before process\\n\\033[35m', data, complete, '\\033[0m')\n # The data here is filtered by the analisis_params parameters\n if len(params) > 0:\n data = self.parse_data(params)\n data = self.processResponse(data)\n # ==================================\n # Now ask if this node has outnodes execute them\n # This needs a way to handle efectively the recursion\n # sucessive nodes are processed in queu system\n # the returned data for each node determines the flow\n if len(json.loads(self.outnodes)) > 0:\n outnodes = json.loads(self.outnodes)\n print(self.name, inn_resp)\n for nod in outnodes:\n node = models.storage.get(CustomNode, nod)\n if node.analisis_mode == 'comparision':\n parms = json.loads(node.analisis_params)\n # print(type(data), data, dir(data), data.get(list(data.keys())[0]))\n # clean the value1 variable from the existing dict\n # ==================================\n for i, par in enumerate(parms):\n if 'value1' in par:\n del parms[i]\n # ==================================\n # append the value1 from the first key in the actual data content\n parms.append({'value1': data.get(list(data.keys())[0])})\n node.analisis_params = json.dumps(parms)\n node.save()\n print(node.analisis_params)\n data, comp = node.run_node_task(data)\n print(node.name + ' response:', data)\n if data is False:\n break\n else:\n # print(inn_resp)\n if len(inn_resp) <= 0:\n inn_resp = data\n print('outnode', node.name, data, inn_resp)\n data = node.run_node_task(inn_resp)[0]\n if type(data) == dict:\n data = self.parse_data(data)\n # print('after processing\\n\\033[34m', data, complete, '\\033[0m')\n # print(data, complete)\n print('\\033[32mresponse from', self.name, '\\033[0m')\n print('\\t', data)\n return data, inn_resp\n\n\n def colors(self):\n \"\"\"\n returns a list of color\n \"\"\"\n return ['#f32e9c', '#932989', '#9bfa18', '#ef912f', '#f9463a']\n\n\n def parse_data(self, params):\n \"\"\"\n Parse the params to take the extends the data object\n \"\"\"\n data = json.loads(self.data)\n if type(params) == dict:\n for key in params.keys():\n data[key] = params[key]\n return data # add also the incoming params\n\n\n def parse_string(self, format_string, data):\n \"\"\"\n Search patterns and replace values in the endpoint\n \"\"\"\n keys = [ '{' + k + '}' for k in data.keys()]\n # print(format, data)\n resp = format_string\n for i, k in enumerate(keys):\n resp = resp.replace(k, data[k[1:-1]])\n # print(format_string)\n # print('parsed response', resp)\n return resp\n\n\n def parse_endpoint(self):\n \"\"\"\n Search patterns and replace values in the endpoint\n \"\"\"\n keys = [ '{' + k + '}' for k in self.parse_data({}).keys()]\n resp = self.api_endpoint\n for k in keys:\n resp = resp.replace(k, self.parse_data({})[k[1:-1]])\n return resp\n","sub_path":"web/models/custom.py","file_name":"custom.py","file_ext":"py","file_size_in_byte":13901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"172592046","text":"#!python3\n\"\"\"\n##### Task 2\nCreate a function called largest.\nThe input is a list.\nThe return value is the largest value in the list\n(2 points)\n\"\"\"\n\ndef largest(List):\n List.sort()\n L=List[-1]\n return L\nprint(largest([3,1,4,7,13,9]))\nprint(largest([5,1,12.3]))","sub_path":"task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"142441633","text":"import pandas as pd\n\nminority_classes = ['Minería', 'Comercio Exterior', 'Gerencia', 'Comunicación', 'Seguros',\n 'Construcción', 'Legales', 'Diseño', 'Educación', 'Ingeniería']\n\naugmented_columns = ['back_en', 'back_te', 'back_zh', 'back_ro', 'back_ar', 'back_ja',\n 'back_jv', 'back_ko', 'back_vi', 'back_tr', 'back_yo']\n\nAUGMENTED_DATA_PATH = './Data/Class augmented/'\nDATA_PATH = './Data/Data_For_Backtranslation.csv'\n\ndf = pd.read_csv(DATA_PATH)\n\nfor min_class in minority_classes:\n path = AUGMENTED_DATA_PATH + '{}.xlsx'.format(min_class)\n min_df = pd.read_excel(path)\n classes = []\n texts = []\n for col in augmented_columns:\n for text in min_df[col]:\n texts.append(text)\n for element in min_df['clase']:\n classes.append(element)\n augmented_df = pd.DataFrame()\n augmented_df['texto'] = texts\n augmented_df['clase'] = classes\n df = df.append(augmented_df, ignore_index=True)\n\nprint(df['clase'].value_counts())\ndf.to_csv('./Data/Augmented_data.csv', index=False) \n \n","sub_path":"Code/Data_augmentation.py","file_name":"Data_augmentation.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"19298822","text":"from django.contrib import admin\nfrom django.utils.safestring import mark_safe\nfrom posthog.utils import compact_number\n\nfrom .models import OrganizationBilling, Plan\n\n\n@admin.register(OrganizationBilling)\nclass OrganizationBillingAdmin(admin.ModelAdmin):\n search_fields = (\n \"organization__name\",\n \"organization__members__email\",\n \"stripe_customer_id\",\n \"stripe_checkout_session\",\n \"stripe_subscription_item_id\",\n \"stripe_subscription_id\",\n )\n list_display = (\n \"get_organization_name\",\n \"stripe_customer_id\",\n \"stripe_subscription_id\",\n \"should_setup_billing\",\n \"billing_period_ends\",\n \"plan\",\n )\n readonly_fields = [\"stripe\", \"billing_docs\", \"is_billing_active\", \"event_allocation\"]\n fields = (\n \"organization\",\n \"stripe\",\n \"stripe_customer_id\",\n \"stripe_subscription_id\",\n \"stripe_checkout_session\",\n \"plan\",\n \"should_setup_billing\",\n \"billing_period_ends\",\n \"is_billing_active\",\n \"event_allocation\",\n \"billing_docs\",\n )\n\n def get_queryset(self, request):\n qs = super().get_queryset(request)\n return qs.order_by(\"should_setup_billing\")\n\n def get_organization_name(self, obj):\n return obj.organization.name\n\n def event_allocation(self, instance: OrganizationBilling) -> str:\n return \"Unlimited\" if not instance.event_allocation else compact_number(instance.event_allocation)\n\n def stripe(self, instance: OrganizationBilling) -> str:\n if not instance.stripe_customer_id:\n return \"Customer is not registered on Stripe\"\n\n return mark_safe(\n \"View customer on \"\n f''\n \"Stripe →\",\n )\n\n def billing_docs(self, *args, **kwargs) -> str:\n return mark_safe(\n \"When changing this object, please remember to read \"\n ''\n \"our internal docs →\",\n )\n\n\n@admin.register(Plan)\nclass PlanAdmin(admin.ModelAdmin):\n list_display = (\n \"key\",\n \"name\",\n \"price_id\",\n \"is_active\",\n \"self_serve\",\n \"event_allowance\",\n \"price_string\",\n )\n\n def get_queryset(self, request):\n qs = super().get_queryset(request)\n return qs.order_by(\"key\")\n","sub_path":"multi_tenancy/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"644509883","text":"import sys\nn,m = map(int,raw_input().strip().split())\nmap_ = {}\nlst = []\nfor _ in range(n):\n\tx,y = map(int,raw_input().strip().split())\n\tlst.append([x,y])\n\tmap_[x] = map_.setdefault(x,0) + 1\n\ndef find_max(map_):\n\tmax_ = 0\n\tmax_k = None\n\tfor k,v in map_.items():\n\t\tif v >= max_:\n\t\t\tmax_ = v\n\t\t\tmax_k = k\n\treturn max_k,max_\n\ndef find_ind(max_,lst):\n\tmin1 = sys.maxint\n\tind1 = 0\n\tmin2 = sys.maxint\n\tind2 = 0\n\tfor i in range(len(lst)):\n\t\tif lst[i][1] < min1 and lst[i][0] != 1:\n\t\t\tmin1 = lst[i][1]\n\t\t\tind1 = i\n\t\tif lst[i][1] < min2 and lst[i][1] == max_:\n\t\t\tmin2 = lst[i][1]\n\t\t\tind2 = i\n\treturn ind1,ind2\n\ndef dfs(cost,map_,lst):\n\tmax_k,max_ = find_max(map_) \n\tif max_k == 1:\n\t\treturn cost\n\telse:\n\t\tind1,ind2 = find_ind(max_,lst)\n\t\tmap_[1] = map_.setdefault(1,0) + 1\n\n\t\tmap_[lst[ind1][0]] -= 1\n\t\ttmp = lst[ind1][1]\n\t\tlst[ind1][1] = sys.maxint\n\t\tx1 = dfs(cost + tmp,map_,lst)\n\t\tx2 = sys.maxint\n\t\tif ind1 != ind2:\n\t\t\tmap_[lst[ind1][0]] += 1\n\t\t\tlst[ind1][1] = tmp\n\t\t\tmap_[lst[ind2][0]] -= 1\n\t\t\ttmp = lst[ind2][1]\n\t\t\tlst[ind2][1] = sys.maxint\n\t\t\tx2 = dfs(cost + tmp,map_,lst)\n\t\treturn min(x1,x2)\n\nprint(dfs(0,map_,lst))\n\n","sub_path":"niuke/148.py","file_name":"148.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"214270638","text":"# Create your views here.\n\"\"\"This module provides the views for CMU\"\"\"\n\nimport datetime\nimport logging\nimport os\nimport json\n\nfrom backend.models import ClientWarehouse\n\nfrom cmu.app_settings import RESULTS_PER_PAGE\nfrom cmu.app_settings import PICKUP_LOCATION_IRRELEVENT\nfrom cmu.forms import ClientManifestForm, UploadManifestForm\nfrom cmu.forms import UploadLogFilterForm\nfrom cmu.models import UploadLog\nfrom cmu.tasks import process_upload\n\nfrom dvutils.utils import check_and_deque\n\nfrom django.contrib.auth.decorators import user_passes_test, login_required\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django_mongokit import connection\nfrom dvutils.utils import get_connection_db\nfrom package.models import find_by_wbn\nfrom package.utils import upload_file_to_s3\nfrom cmu.app_settings import EXPIRY_TIME\n\nLOGGER = logging.getLogger(__name__)\n\n\n@login_required\ndef requires_pickup_location(request):\n '''\n Function which returns a boolean given package type\n to determine if client location is required for the same\n '''\n\n # Request package_type from request parameters\n package_type = request.GET.get('package_type', None)\n\n # Request id against package_type to support legacy\n # HierarchicalSelect\n if not package_type:\n package_type = request.GET.get('id', None)\n\n response = True\n if package_type in PICKUP_LOCATION_IRRELEVENT:\n response = False\n\n return HttpResponse(\n json.dumps(response), mimetype='application/json')\n\n\n@user_passes_test(lambda u: u.has_perm('package.can_apply_manual_cmu_upload'))\ndef manual_upload(request, waybill):\n if check_and_deque('cmu.tasks.process_upload', waybill):\n process_upload(waybill)\n return HttpResponseRedirect(\n reverse('show_manifest', args=[waybill]))\n\n\n@user_passes_test(lambda u: u.has_perm('package.can_add_package'))\ndef queue_uploader(request):\n user = request.user\n is_client = False\n\n try:\n profile = request.user.userprofile\n except UserProfile.DoesNotExist:\n profile = None\n\n if profile and profile.user_type == 'CL':\n is_client = True\n\n if request.method == 'POST':\n if is_client:\n form = ClientManifestForm(\n profile.client.name, request.POST, request.FILES)\n else:\n form = UploadManifestForm(request.POST, request.FILES)\n\n if form.is_valid():\n upload_file = request.FILES['upload_file']\n upload_log = connection.UploadLog()\n with open('/tmp/manifest-{0}-{1}'.format(\n user.username, datetime.datetime.now().strftime('%Y%m%d-%H%M')), 'wb+') as f:\n f.write(upload_file.read())\n s3_url = upload_file_to_s3(filename=f.name, expires=EXPIRY_TIME)\n upload_log.upl.url = s3_url\n upload_log.save()\n f.close()\n\n if is_client:\n client = profile.client.name\n upload_log.upl.au_sch = True\n upload_log.upl.au_dsp = True\n upload_log.upl.pd = datetime.datetime.now()\n else:\n client = form.cleaned_data['client']\n upload_log.upl.au_sch = True\n upload_log.upl.au_dsp = True\n upload_log.upl.pd = datetime.datetime.now()\n upload_log['rcd'] = form.cleaned_data['manifest_received_datetime']\n upload_log.upl.cl = client\n upload_log.upl.pt = form.cleaned_data['package_type']\n upload_log.upl.pl = form.cleaned_data['client_location']\n upload_log.upl.u = request.user.username\n upload_log.upl.ext = os.path.splitext('%s' % upload_file)[1]\n\n if (\n 'incoming_center' in form.cleaned_data) and (\n form.cleaned_data['incoming_center']):\n upload_log.upl.oc = form.cleaned_data['incoming_center']\n else:\n upload_log.upl.oc = u'{}'.format(ClientWarehouse.objects.get(\n name=upload_log.upl.pl).get_incoming_center())\n upload_log.save()\n process_upload.delay(upload_log.wbn)\n return HttpResponseRedirect(\n reverse('show_manifest', args=[upload_log.wbn]))\n else:\n form = UploadManifestForm(\n initial={'pickup_date_time': datetime.datetime.today()})\n if is_client:\n form = ClientManifestForm(profile.client)\n return render_to_response(\n 'cmu_upload.html', {\n 'form': form,\n 'is_client': is_client,\n }, context_instance=RequestContext(request))\n\n\n@login_required\ndef show_manifest(request, wbn):\n \"\"\"Render the results of a manifest upload\"\"\"\n upload = connection.UploadLog.one({'wbn': u'%s' % wbn})\n if upload:\n return render_to_response(\n 'cmu_results.html', {\n 'upload': upload,\n }, context_instance=RequestContext(request))\n else:\n raise Http404\n\n\ndef download_manifest(request, wbn):\n \"\"\"Download the file uploaded or the specified upload log number\"\"\"\n upload = connection.UploadLog.one({'wbn': u'%s' % wbn})\n if upload:\n if upload.upl.url:\n s3_url = upload.upl.url\n return HttpResponseRedirect(s3_url)\n else:\n raise Http404\n\n\n@user_passes_test(lambda u: u.has_perm('package.can_add_package'))\ndef list_uploads(request):\n \"\"\"List paginated uploads\"\"\"\n # for CMU uploads type is None\n filters = {}\n\n client = request.GET.get('client', '')\n if client:\n filters['upl.cl'] = client\n\n status = request.GET.get('status', 'Success')\n if status:\n filters['s'] = status\n\n form = UploadLogFilterForm(initial={'status': status})\n\n from bson import ObjectId\n from datetime import datetime, timedelta\n db = get_connection_db(UploadLog)\n\n d = datetime.now() + timedelta(days=-2)\n _id = ObjectId.from_datetime(d)\n filters['_id'] = {'$gte': _id}\n #Limit response- wbn, s, cd, ud, upl.cl,upl.u, fail, dup, suc, c.tot\n limit_response = {\"wbn\": 1, \"s\": 1, \"cd\": 1, \"ud\": 1,\n \"upl.cl\": 1, \"upl.u\": 1, \"fail\": 1,\n \"dup\": 1, \"suc\": 1, \"c.tot\": 1, \"rcd\": 1}\n try:\n page = int(request.GET.get('page', 1))\n except ValueError:\n page = 1\n last = True\n if page == 1:\n last = False\n skip = page * RESULTS_PER_PAGE\n if not db:\n upload_list = connection.UploadLog.find(filters, limit_response).sort('_id', -1).\\\n skip(skip).limit(RESULTS_PER_PAGE)\n else:\n upload_list = db.find(filters, limit_response).sort('_id', -1).\\\n skip(skip).limit(RESULTS_PER_PAGE)\n\n return render_to_response('cmu_list.html', {\n 'uploads': upload_list, 'last': last,\n 'form': form, 'page': page, 'client': client,\n 'status': status},\n context_instance=RequestContext(request))\n\n\n@login_required\ndef show_bulk_manifest(request, wbn):\n \"\"\"Render the results of a manifest upload\"\"\"\n upload = connection.BulkUploadLog.one({'wbn': u'%s' % wbn})\n if upload:\n return render_to_response(\n 'package/bulk_uploadlog_details.html', {\n 'upload': upload,\n }, context_instance=RequestContext(request))\n else:\n raise Http404\n\n\n@login_required\ndef download_bulk_manifest(request, wbn):\n \"\"\"Download the file uploaded or the specified upload log number\"\"\"\n upload = connection.BulkUploadLog.one({'wbn': u'%s' % wbn})\n if upload:\n if upload.upl.url:\n s3_url = upload.upl.url\n return HttpResponseRedirect(s3_url)\n else:\n file_read = upload.fs.get_last_version('Bulk').read()\n response = HttpResponse(file_read, mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=download.csv'\n return response\n else:\n raise Http404\n\n\n@user_passes_test(lambda u: u.has_perm('package.can_apply_manual_bulk_update'))\ndef manual_bulk(request, waybill):\n if check_and_deque('package.tasks.bulk_upload', waybill):\n # to prevent cyclic import\n from package.tasks import bulk_upload\n bulk_upload(waybill)\n return HttpResponseRedirect(\n reverse('show_bulk_manifest', args=[waybill]))\n\n\n@user_passes_test(lambda u: u.has_perm('package.can_edit_package'))\ndef show_package_url(request, waybill):\n package = find_by_wbn(waybill)\n if package and package.get('upl'):\n return HttpResponseRedirect(\n reverse('show_manifest', args=[package.get('upl')]))\n raise Http404\n","sub_path":"cmu/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"84832213","text":"#!/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n#\n# Copyright (c) 2020 Jordi Mas i Hernandez \n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Lesser General Public\n# License as published by the Free Software Foundation; either\n# version 2.1 of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the\n# Free Software Foundation, Inc., 59 Temple Place - Suite 330,\n# Boston, MA 02111-1307, USA.\n\nfrom __future__ import print_function\nfrom flask import Flask, request, Response\nfrom flask_cors import CORS, cross_origin\nimport json\nimport datetime\nfrom ctranslate import CTranslate\nimport pyonmttok\nfrom threading import Thread\nfrom texttokenizer import TextTokenizer\nfrom checksourcelanguage import CheckSourceLanguage\nfrom usage import Usage\nfrom batchfiles import *\nimport os\nimport uuid\n\napp = Flask(__name__)\nCORS(app)\n\nTOKENIZER_MODELS = '/srv/models/tokenizer'\nENG_CAT_MODEL = '/srv/models/eng-cat'\nCAT_ENG_MODEL = '/srv/models/cat-eng'\nUPLOAD_FOLDER = '/srv/data/files/'\nSAVED_TEXTS = '/srv/data/saved/'\n\nopenNMT_engcat = CTranslate(f\"{ENG_CAT_MODEL}\")\nopenNMT_engcat.tokenizer_source = pyonmttok.Tokenizer(mode=\"none\", sp_model_path=f\"{TOKENIZER_MODELS}/en_m.model\")\nopenNMT_engcat.tokenizer_target = pyonmttok.Tokenizer(mode=\"none\", sp_model_path=f\"{TOKENIZER_MODELS}/ca_m.model\")\n\nopenNMT_cateng = CTranslate(f\"{CAT_ENG_MODEL}\")\nopenNMT_cateng.tokenizer_source = pyonmttok.Tokenizer(mode=\"none\", sp_model_path=f\"{TOKENIZER_MODELS}/ca_m.model\")\nopenNMT_cateng.tokenizer_target = pyonmttok.Tokenizer(mode=\"none\", sp_model_path=f\"{TOKENIZER_MODELS}/en_m.model\")\n\n\ndef translate_thread(sentence, openNMT, i, results):\n if sentence.strip() == '':\n results[i] = ''\n else:\n results[i] = openNMT.translate(sentence)\n# print(\"{0} - {1} -> {2}\".format(i, sentence, results[i]))\n\ndef _launch_translate_threads(openNMT, text, sentences, translate):\n num_sentences = len(sentences)\n threads = []\n results = [\"\" for x in range(num_sentences)]\n for i in range(num_sentences):\n if translate[i] is False:\n continue\n \n process = Thread(target=translate_thread, args=[sentences[i], openNMT, i, results])\n process.start()\n threads.append(process)\n\n for process in threads:\n process.join()\n\n return results\n\n\n\n\n@cross_origin(origin='*',headers=['Content-Type','Authorization'])\n@app.route('/translate/', methods=['POST'])\ndef translate_api():\n start_time = datetime.datetime.now()\n text = request.json['text']\n languages = request.json['languages']\n savetext = 'savetext' in request.json and request.json['savetext'] == True\n\n if languages == 'eng-cat':\n openNMT = openNMT_engcat\n language = 'English'\n else:\n openNMT = openNMT_cateng\n language = 'Catalan'\n\n tokenizer = TextTokenizer()\n sentences, translate = tokenizer.tokenize(text, language)\n\n results = _launch_translate_threads(openNMT, text, sentences, translate)\n translated = tokenizer.sentence_from_tokens(sentences, translate, results)\n\n if savetext:\n saved_filename = os.path.join(SAVED_TEXTS, \"source.txt\")\n with open(saved_filename, \"a\") as text_file:\n t = text.replace('\\n', '')\n text_file.write(f'{languages}\\t{t}\\n')\n\n sourcelang = CheckSourceLanguage(SAVED_TEXTS, text, translated, languages)\n\n time_used = datetime.datetime.now() - start_time\n words = len(text.split(' '))\n usage = Usage()\n usage.log(languages, words, time_used)\n result = {}\n result['text'] = text\n result['translated'] = translated\n result['time'] = str(time_used)\n\n if sourcelang.is_wrong():\n result['source_language_wrong'] = True\n\n return json_answer(result)\n\ndef _get_processed_files(date):\n try:\n database.open()\n cnt = batchfiles = BatchFile.select().where(BatchFile.done ==1 and\\\n (BatchFile.date.year == date.year and\\\n BatchFile.date.month == date.month and\\\n BatchFile.date.day == date.day)).count()\n database.close()\n except:\n cnt = 0\n\n return cnt\n\n\n@app.route('/savedtexts/', methods=['GET'])\ndef savedtexts():\n saved_filename = os.path.join(SAVED_TEXTS, \"source.txt\")\n with open(saved_filename, \"r\") as text_file:\n return Response(text_file.read(), mimetype='text/plain')\n\n@app.route('/stats/', methods=['GET'])\ndef stats():\n requested = request.args.get('date')\n date_requested = datetime.datetime.strptime(requested, '%Y-%m-%d')\n usage = Usage()\n result = usage.get_stats(date_requested)\n\n cnt = _get_processed_files(date_requested)\n result[\"files\"] = cnt\n return json_answer(result)\n\n\n@app.route('/version/', methods=['GET'])\ndef version_api():\n\n with open(f\"{ENG_CAT_MODEL}/model_description.txt\", \"r\") as th_description:\n lines = th_description.read().splitlines()\n\n with open(f\"{CAT_ENG_MODEL}/model_description.txt\", \"r\") as th_description:\n lines_cat_eng = th_description.read().splitlines()\n\n lines += lines_cat_eng\n\n result = {}\n result['version'] = lines\n return json_answer(result)\n\ndef _allowed_file(filename):\n ALLOWED_EXTENSIONS = 'txt'\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef save_file_to_process(filename, email, model_name):\n database.open()\n db_entry = BatchFile()\n db_entry.filename = filename\n db_entry.email = email\n db_entry.model = model_name\n db_entry.save()\n \n database.close()\n\n\n@cross_origin(origin='*',headers=['Content-Type','Authorization'])\n@app.route('/translate_file/', methods=['POST'])\ndef upload_file():\n print(\"**Start\")\n file = request.files['file']\n email = request.values['email']\n model_name = request.values['model_name']\n \n if file.filename == '':\n result = {}\n result['error'] = \"No s'ha especificat el fitxer\"\n return json_answer(result, 404)\n\n if email == '':\n result = {}\n result['error'] = \"No s'ha especificat el correu\"\n return json_answer(result, 404)\n\n if file and _allowed_file(file.filename):\n filename = uuid.uuid4().hex;\n fullname = os.path.join(UPLOAD_FOLDER, filename)\n file.save(fullname)\n\n save_file_to_process(fullname, email, model_name)\n print(\"Saved file {0}\".format(fullname))\n result = []\n return json_answer(result)\n\n result['error'] = \"Error desconegut\"\n return json_answer(result, 500)\n\n\ndef json_answer(data, status = 200):\n json_data = json.dumps(data, indent=4, separators=(',', ': '))\n resp = Response(json_data, mimetype='application/json', status = status)\n resp.headers['Access-Control-Allow-Origin'] = '*'\n return resp\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n","sub_path":"serving/translate-service/translate-service.py","file_name":"translate-service.py","file_ext":"py","file_size_in_byte":7258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"131220936","text":"#! /usr/bin/env python\nimport sys\nimport re\nimport struct\nfrom ctypes import c_int\n\ndef bindigits(n, bits):\n s = bin(n & int(\"1\"*bits, 2))[2:]\n return (\"{0:0>%s}\" % (bits)).format(s)\n\ndef asr(num, shift):\n return ((num >> shift) | (0xffffffff << 32-shift)) & (0xffffffff)\n\nclass EmulationState:\n def __init__(self):\n self.instruction_memory = []\n self.pc = 0\n self.r = {}\n self.mem = {}\n self.N = 0\n self.Z = 0\n self.C = 0\n self.V = 0\n for i in range(32):\n self.r[i] = c_int(0)\n\n def load_byte(self, addr):\n if addr in self.mem:\n return self.mem[addr]\n else:\n self.mem[addr] = 0\n return 0\n\n def load(self, addr):\n val = c_int((self.load_byte(addr) << 24) | (self.load_byte(addr+1) << 16) |\n (self.load_byte(addr+2) << 8) | (self.load_byte(addr+3)))\n return val\n \n def store(self, addr, x):\n self.mem[addr] = (x.value & 0xff000000) >> 24\n self.mem[addr+1] = (x.value & 0x00ff0000) >> 16\n self.mem[addr+2] = (x.value & 0x0000ff00) >> 8\n self.mem[addr+3] = (x.value & 0x000000ff)\n\n\n def set_state(self, x, N, Z, C, V):\n if(N):\n self.N = (1 if x.value < 0 else 0)\n if(Z):\n self.Z = (1 if x.value == 0 else 0)\n if(C):\n self.C = 0 # TODO\n #print(str(hex(value)) + \" N\" + str(self.N)+ \" Z\" + str(self.Z)+ \" C\" + str(self.C)+ \" V\" + str(self.V))\n\nclass Instruction:\n def __init__(self, source_line):\n self.source = source_line.upper().strip().split()\n self.op = self.source[0]\n self.opcode = Instruction.OPCODE[self.op]\n self.source_args = \"\".join(self.source[1:])\n self.args = self.opcode[1].findall(self.source_args)\n\n def NOP(self, s):\n pass\n\n def ADD(self, s):\n result = c_int(s.r[self.SA()].value + s.r[self.SB()].value)\n s.set_state(result, True, True, True, True)\n s.r[self.DR()] = result\n if(result.value < s.r[self.SA()].value):\n s.V = 1\n \n def SUB(self, s):\n result = c_int(s.r[self.SA()].value - s.r[self.SB()].value)\n s.set_state(result, True, True, True, True)\n s.r[self.DR()] = result\n if(result.value > s.r[self.SA()].value):\n s.V = 1\n\n def AND(self, s):\n s.r[self.DR()] = c_int(s.r[self.SA()].value & s.r[self.SB()].value)\n s.set_state(s.r[self.DR()], True, True, False, False)\n\n def ORRI(self, s):\n s.r[self.DR()] = c_int(s.r[self.SA()].value | self.IMM())\n s.set_state(s.r[self.DR()], True, True, False, False)\n\n def LSL(self, s):\n result = c_int(s.r[self.SA()].value << self.IMM()) \n s.set_state(result, True, True, True, False)\n s.r[self.DR()] = result\n\n def ASR(self, s):\n s.r[self.DR()] = c_int(asr(s.r[self.SA()].value , self.IMM()))\n s.set_state(s.r[self.DR()], True, True, True, True)\n #TODO what is an overflow here?\n\n def B(self, s):\n s.pc += int(self.args[0][0])\n\n def BEQ(self, s):\n if s.Z == 1:\n s.pc += int(self.args[0][0])\n\n def BLT(self, s):\n if s.N != s.V:\n s.pc += int(self.args[0][0])\n\n def LDR(self, s):\n s.r[self.DR()] = s.load(s.r[self.SA()].value + s.r[self.SB()].value);\n\n def STR(self, s):\n s.store(s.r[self.SA()].value+s.r[self.SB()].value, s.r[self.DR()])\n\n def DR(self):\n return int(self.args[0][0])\n\n def SA(self):\n return int(self.args[0][1])\n \n def SB(self):\n return int(self.args[0][2])\n \n def IMM(self):\n return int(self.args[0][2])\n\n OPCODE = {\n 'NOP' : (\"000000\", re.compile(r'()()$'), (26,), NOP),\n 'ADD' : (\"010000\", re.compile(r'R(\\d+),R(\\d+),R(\\d+)$'), (5, 5, 5), ADD),\n 'SUB' : (\"010001\", re.compile(r'R(\\d+),R(\\d+),R(\\d+)$'), (5, 5, 5), SUB),\n 'AND' : (\"010010\", re.compile(r'R(\\d+),R(\\d+),R(\\d+)$'), (5, 5, 5), AND),\n 'ORRI': (\"110011\", re.compile(r'R(\\d+),R(\\d+),#([-\\+]?\\d+)$'), (5, 5, 16), ORRI),\n 'LSL' : (\"101101\", re.compile(r'R(\\d+),R(\\d+),#(\\d+)$'), (5, 5, 16), LSL),\n 'ASR' : (\"101111\", re.compile(r'R(\\d+),R(\\d+),#(\\d+)$'), (5, 5, 16), ASR),\n 'B' : (\"100000\", re.compile(r'([-\\+]?\\d+)()$'), (26,), B),\n 'B.EQ': (\"100001\", re.compile(r'([-\\+]?\\d+)()$'), (26,), BEQ),\n 'B.LT': (\"100011\", re.compile(r'([-\\+]?\\d+)()$'), (26,), BLT),\n 'LDR' : (\"011100\", re.compile(r'R(\\d+),\\[R(\\d+)[\\+|,]R(\\d+)\\]$'), (5, 5, 5), LDR),\n 'STR' : (\"011101\", re.compile(r'R(\\d+),\\[R(\\d+)[\\+|,]R(\\d+)\\]$'), (5, 5, 5), STR),\n }\n \n def get_binary_instr(self):\n string = \"\\\"\" + self.opcode[0] + \"\\\"\"\n\n length = 6 # 6 is opcode size\n for i, arg in enumerate(self.args[0]):\n if len(arg) == 0:\n break\n arg_len = self.opcode[2][i]\n length += arg_len\n string += \" & \\\"\"\n string += bindigits(int(arg), arg_len)\n string += \"\\\"\"\n\n if length < 32:\n string += \" & \\\"\" + format(0, \"0>\"+str(32-length)+\"b\") + \"\\\"\" \n return string\n \n def get_source_instr(self):\n return self.op + \" \" + \"\".join(self.source[1:])\n\n def execute(self, state):\n self.opcode[3](self, state)\n\ndef main():\n source_path = sys.argv[1]\n source = []\n with open(source_path) as f:\n source = f.readlines()\n \n if len(sys.argv) < 3:\n for i, line in enumerate(source):\n instr = Instruction(line)\n print(\"\\t-- \" + instr.get_source_instr())\n print(\"\\t\" + str(i) + \" => \" + instr.get_binary_instr() + \",\")\n return\n else:\n state = EmulationState()\n for i, line in enumerate(source):\n instr = Instruction(line)\n state.instruction_memory.append(instr)\n while(state.pc < len(state.instruction_memory)):\n state.instruction_memory[state.pc].execute(state)\n state.pc += 1\n for i in state.r:\n if(state.r[i].value):\n print(\"R\" + str(i) + \" = \" + str(hex(state.r[i].value)))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"assembler.py","file_name":"assembler.py","file_ext":"py","file_size_in_byte":6263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"548070571","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 28 11:11:18 2017\n\n@author: janleppa\n\"\"\"\n\nfrom fisherEstimator import fisherEstimator, parCorr\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\n\ndef createData(n, corr = 0.1):\n cov = np.array([[1, corr,], [corr, 1]])\n mean = [0,0]\n \n \n X = np.random.multivariate_normal(mean, cov, n)\n return(X)\n #print(np.corrcoef(X.T))\n \ndef createData2(n, C):\n \n X = np.random.multivariate_normal([0,0,0,0], C, n)\n \n #print(np.abs(parCorr(X[:,[0]],X[:,[3]],X[:,[1,2]])-(-IC[0,3])))\n \n return(X)\n \n# Z-transformed sample correlations should (approximately) follow a normal distribution with mean given by the transformed population\n# correlation and sd standard deviation 1/sqrt(N - 3). Plots the histogram and the pdf of normal distribution \ndef correlationTest(n,bins = 100, ntests = 10000, corr = 0):\n \n \n \n meanZ = np.arctanh(corr)\n sdZ = 1/np.sqrt((n - 3))\n \n X = []\n\n for ii in range(0,ntests):\n D = createData(n,corr)\n sampleCorr = np.corrcoef(D.T)[0,1]\n X.append(np.arctanh(sampleCorr))\n \n \n \n minX = np.min(X) - np.std(X)\n maxX = np.max(X) + np.std(X)\n \n grid = np.linspace(minX,maxX, 1000)\n \n \n fig = plt.figure(figsize=(8,8))\n ax = fig.add_subplot(111)\n \n histt = ax.hist(X,bins = bins, normed = True)\n h = ax.plot(grid, norm.pdf(grid, loc = meanZ, scale = sdZ), lw=2)\n \n plt.show()\n\n# same for partial correlation \ndef pcTest(n,bins = 100, ntests = 10000, x = 0, y = 1):\n \n IC = np.array([[1, 0, 0, 0.9], [0,1,0.2,0],[0,0.2,1,0.1],[0.9,0,0.1,1]])\n C = np.linalg.inv(IC)\n \n z = set(range(0,C.shape[1]))\n z.remove(x)\n z.remove(y)\n \n print(\"True partial correlation: \", -IC[x,y])\n meanZ = np.arctanh(-IC[x,y])\n sdZ = 1/np.sqrt((n - 3 - 2))\n \n X = []\n\n for ii in range(0,ntests):\n D = createData2(n,C)\n samplePC = parCorr(D[:,[x]],D[:,[y]],D[:,list(z)])\n \n X.append(np.arctanh(samplePC))\n \n \n \n minX = np.min(X) - np.std(X)\n maxX = np.max(X) + np.std(X)\n \n grid = np.linspace(minX,maxX, 1000)\n \n \n fig = plt.figure(figsize=(8,8))\n ax = fig.add_subplot(111)\n \n histt = ax.hist(X,bins = bins, normed = True)\n h = ax.plot(grid, norm.pdf(grid, loc = meanZ, scale = sdZ), lw=2)\n \n plt.show()\n\n# Check that the p-values used in fisherEstimator make sense\ndef correlationTest2(n, corr = 0, samples = 10000):\n \n D = createData(n,corr)\n sampleCorr = np.corrcoef(D.T)[0,1]\n \n ff = fisherEstimator()\n \n z = ff._fisherZ(sampleCorr)\n indep, pValue = ff._fisherTest(D[:,0],D[:,1])\n \n \n count = 0\n # sample data from the population distirbution and compute and count correaltions whose absolute value exceeds the observed one\n for ii in range(0,samples):\n D = createData(n,corr)\n sampleCorri = np.corrcoef(D.T)[0,1]\n zi = ff._fisherZ(sampleCorri)\n if(np.abs(zi) >= np.abs(z)):\n count += 1\n \n print(\"True correlation: \", corr)\n print(\"Observed: \", sampleCorr) \n print(\"P-value: \", pValue)\n print(\"proportion of transformed corrs. exceeding the observed: \", count/samples)\n \n# same for partial correlation. x and y refer to variables (the columns of data matrix, from 0 to 3)\ndef pcTest2(n, x = 0, y = 1, samples = 10000):\n \n # inverse of covariance\n IC = np.array([[1, 0, 0, 0.9], [0,1,0.2,0],[0,0.2,1,0.1],[0.9,0,0.1,1]])\n \n # covariance matrix\n C = np.linalg.inv(IC)\n \n z = set(range(0,C.shape[1]))\n z.remove(x)\n z.remove(y)\n \n D = createData2(n,C)\n \n samplePC = parCorr(D[:,[x]],D[:,[y]],D[:,list(z)])\n \n ff = fisherEstimator()\n \n Z = ff._fisherZ(samplePC)\n indep, pValue = ff._fisherTest(D[:,[x]],D[:,[y]],D[:,list(z)])\n \n \n count = 0\n \n # sample data from the population distirbution and compute and count transformed pcs whose absolute value exceeds the observed one\n for ii in range(0,samples):\n D = createData2(n,C)\n samplePCi = parCorr(D[:,[x]],D[:,[y]],D[:,list(z)])\n Zi = ff._fisherZ(samplePCi)\n if(np.abs(Zi) >= np.abs(Z)):\n count += 1\n \n print(\"True pc: \", -IC[x,y])\n print(\"Observed: \", samplePC)\n print(\"P-value: \", pValue)\n print(\"proportion of transformed pc exceeding the observed: \", count/samples)\n ","sub_path":"testPC_FisherInd.py","file_name":"testPC_FisherInd.py","file_ext":"py","file_size_in_byte":4845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"409672409","text":"\"\"\"\n This tutorial introduces denoising auto-encoders (dA) using Theano.\n\n Denoising autoencoders are the building blocks for SdA.\n They are based on auto-encoders as the ones used in Bengio et al. 2007.\n An autoencoder takes an input x and first maps it to a hidden representation\n y = f_{\\theta}(x) = s(Wx+b), parameterized by \\theta={W,b}. The resulting\n latent representation y is then mapped back to a \"reconstructed\" vector\n z \\in [0,1]^d in input space z = g_{\\theta'}(y) = s(W'y + b'). The weight\n matrix W' can optionally be constrained such that W' = W^T, in which case\n the autoencoder is said to have tied weights. The network is trained such\n that to minimize the reconstruction error (the error between x and z).\n\n For the denosing autoencoder, during training, first x is corrupted into\n \\tilde{x}, where \\tilde{x} is a partially destroyed version of x by means\n of a stochastic mapping. Afterwards y is computed as before (using\n \\tilde{x}), y = s(W\\tilde{x} + b) and z as s(W'y + b'). The reconstruction\n error is now measured between z and the uncorrupted input x, which is\n computed as the cross-entropy :\n - \\sum_{k=1}^d[ x_k \\log z_k + (1-x_k) \\log( 1-z_k)]\n\n\n References :\n - P. Vincent, H. Larochelle, Y. Bengio, P.A. Manzagol: Extracting and\n Composing Robust Features with Denoising Autoencoders, ICML'08, 1096-1103,\n 2008\n - Y. Bengio, P. Lamblin, D. Popovici, H. Larochelle: Greedy Layer-Wise\n Training of Deep Networks, Advances in Neural Information Processing\n Systems 19, 2007\n\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport timeit\n\nimport numpy\nimport pickle\nimport theano\nimport theano.tensor as T\nfrom theano.tensor.shared_randomstreams import RandomStreams\nfrom theano.tensor.nlinalg import matrix_inverse\nfrom random import shuffle\n#import matplotlib.pyplot as plt\n\ntry:\n import PIL.Image as Image\nexcept ImportError:\n import Image\n\n\ndef open_file(filename):\n with open(filename+'.pickle', 'rb') as handle:\n b = pickle.load(handle)\n return b\n\ndef save_obj(a,filename):\n with open(filename+'.pickle', 'wb') as handle:\n pickle.dump(a, handle)\n\nclass HiddenLayerNoInput(object):\n\n def __init__(self, numpy_rng, n_in, n_out,theano_rng, dropout_rate, W=None, b=None,b_prime=None, activation=None):\n\n if activation is None:\n self.activation = T.nnet.sigmoid\n else:\n self.activation = activation\n\n self.n_out = n_out\n self.p = dropout_rate\n self.theano_rng=theano_rng\n # Initialize W\n if W is None:\n W_values = numpy.asarray(\n numpy_rng.uniform(\n low=-numpy.sqrt(6. / (n_in + n_out)),\n high=numpy.sqrt(6. / (n_in + n_out)),\n size=(n_in, n_out)\n ),\n dtype=theano.config.floatX\n )\n if activation == T.nnet.sigmoid:\n W_values *= 4\n\n W = theano.shared(value=W_values, name='W', borrow=True)\n\n # 'b' biased vector initialization at vector of zeros\n if b is None:\n b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)\n b = theano.shared(value=b_values, name='b', borrow=True)\n\n if b_prime is None:\n b_prime_values = numpy.zeros((n_in,), dtype=theano.config.floatX)\n b_prime = theano.shared(value=b_prime_values, name='b_prime', borrow=True)\n\n # Affect variables built to the self variables of the layer\n self.W = W\n self.b = b\n self.b_prime = b_prime\n # self.b_prime = b_prime\n\n self.vW = theano.shared(value=numpy.zeros_like(W_values), name='vW', borrow=True)\n self.vb = theano.shared(value=numpy.zeros_like(b_values), name='vb', borrow=True)\n self.vb_prime = theano.shared(value=numpy.zeros_like(b_prime_values), name='vb_prime', borrow=True)\n\n # parameters of the layer\n self.params = [self.W, self.b, self.b_prime]\n\n # parameter of momentum of params of the layers\n self.vparams = [self.vW,self.vb,self.vb_prime]\n\n def mask_dropout(self):\n \"\"\"p is the probablity of dropping a unit\n \"\"\"\n # p=1-p because 1's indicate keep and p is prob of dropping\n mask = self.theano_rng.binomial(n=1, p=1-self.p, size=(1,self.n_out))\n # The cast is important because\n # int * float32 = float64 which pulls things off the gpu\n return T.cast(mask, theano.config.floatX)\n\nclass CollaborativeVector(object):\n def __init__(self,\n numpy_rng,\n theano_rng,\n n_users,\n alpha,\n lambda_u,\n lambda_v,\n xb=None,\n xb_update=None,\n Ri=None,\n Rj=None):\n\n self.Ri = Ri\n self.Rj = Rj\n self.Ci = Ri + alpha*T.ones_like(Ri)\n self.Cj = Rj + alpha*T.ones_like(Rj)\n self.xb_update = xb_update\n\n self.lambda_u = lambda_u\n self.lambda_v = lambda_v\n\n u_values = numpy.asarray(\n numpy_rng.normal(\n loc=0,\n scale=1.0/lambda_u,\n size=(xb.shape[1].eval(), n_users)\n ),\n dtype=theano.config.floatX\n )\n self.u = theano.shared(value=u_values, name='u', borrow=True)\n v_values = xb.T.eval() + numpy.asarray(\n numpy_rng.normal(\n loc=0,\n scale=1.0/self.lambda_v,\n size=(xb.T.shape.eval())\n ),\n dtype=theano.config.floatX\n )\n self.v = theano.shared(value=v_values, name='v', borrow=True)\n\n self.u_stock = theano.shared(value=numpy.dot(u_values,u_values.T), name='u_stock', borrow=True)\n self.v_stock = theano.shared(value=numpy.dot(v_values,v_values.T), name='v_stock', borrow=True)\n\n def update_ui(self):\n inv = matrix_inverse(self.v_stock+T.dot(T.dot(self.v,T.diag(self.Ri)),self.v.T)+\\\n self.lambda_u*T.identity_like(self.v_stock))\n return T.dot(T.dot(T.dot(inv,self.v),T.diag(self.Ci)),self.Ri.T)\n\n def update_vj(self):\n inv = matrix_inverse(self.u_stock+T.dot(T.dot(self.u,T.diag(self.Rj)),self.u.T)\\\n +self.lambda_v*T.identity_like(self.u_stock))\n coef = T.dot(T.dot(self.u,T.diag(self.Cj)),self.Rj.T) + self.xb_update.T\n return T.dot(inv,coef)\n\nclass SDAE(object):\n\n def __init__(\n self,\n numpy_rng,\n theano_rng,\n hidden_layers_size,\n activation,\n dropout_rate,\n lambda_w,\n v_update,\n lambda_n,\n lambda_v,\n input=None,\n ):\n self.activation = activation\n self.theano_rng = theano_rng\n self.n_layers = len(hidden_layers_size) - 1 \n self.hidden_layers_size = hidden_layers_size\n self.sigmoid_layers = []\n self.params = []\n self.vparams = []\n self.masks = []\n self.L2_sqr = []\n self.v_update = v_update\n\n self.lambda_n = lambda_n\n self.lambda_v = lambda_v\n # self.lambda_u = lambda_u\n self.lambda_w = lambda_w\n # self.batch = batch\n\n assert self.n_layers > 0\n\n ###################\n # Encoding layers #\n ###################\n for i in range(self.n_layers):\n # construct the sigmoidal layer\n\n # the size of the input is either the number of hidden units of\n # the layer below or the input size if we are on the first layer\n input_size = hidden_layers_size[i]\n output_size = hidden_layers_size[i+1]\n\n # the input to this layer is either the activation of the hidden\n # layer below or the input of the SdA if you are on the first\n # layer\n\n sigmoid_layer = HiddenLayerNoInput(numpy_rng=numpy_rng,\n n_in=input_size,\n n_out=output_size,\n activation=activation,\n dropout_rate=dropout_rate,\n theano_rng=theano_rng)\n\n self.sigmoid_layers.append(sigmoid_layer)\n self.params.extend(sigmoid_layer.params)\n self.vparams.extend(sigmoid_layer.vparams)\n self.masks.append(sigmoid_layer.mask_dropout())\n self.L2_sqr.append((sigmoid_layer.W ** 2).sum() +(sigmoid_layer.b ** 2).sum())\n\n\n # if no input is given, generate a variable representing the input\n if input is None:\n # we use a matrix because we expect a minibatch of several\n # examples, each example being a row\n self.x = T.matrix(name='input')\n else:\n self.x = input\n\n\n def get_corrupted_input(self, input, corruption_level):\n return self.theano_rng.binomial(size=input.shape, n=1,\n p=1 - corruption_level,\n dtype=theano.config.floatX) * input\n\n\n def get_hidden_values(self,x,dropout):\n \"\"\" Compute the values of the bottleneck layer \"\"\"\n # for i in range(self.n_layer)\n for i in range(self.n_layers):\n if i==0:\n z = self.activation(T.dot(x, self.sigmoid_layers[i].W) + self.sigmoid_layers[i].b)\n if dropout:\n z = z * T.dot(T.ones((z.shape[0],1),dtype=theano.config.floatX),self.masks[i])\n else:\n z = self.activation(T.dot(z, self.sigmoid_layers[i].W) + self.sigmoid_layers[i].b)\n if dropout:\n z = z * T.dot(T.ones((z.shape[0],1),dtype=theano.config.floatX),self.masks[i])\n return z\n\n def get_reconstructed_input(self,x,dropout): \n \"\"\"Computes the reconstructed input given the values of the\n bottleneck layer\n \"\"\"\n for i in reversed(range(self.n_layers)):\n # for i in numpy.arange(self.n_layers/2,self.n_layers):\n if i==(self.n_layers-1):\n z = self.activation(T.dot(x,self.sigmoid_layers[i].W.T) + self.sigmoid_layers[i].b_prime)\n if i!=0 and dropout :\n z = z * T.dot(T.ones((z.shape[0],1),dtype=theano.config.floatX),self.masks[i].T)\n elif i==0:\n z = self.activation(T.dot(z,self.sigmoid_layers[i].W.T) + self.sigmoid_layers[i].b_prime)\n z = z + self.theano_rng.normal(avg=T.zeros_like(z),std=(1.0/self.lambda_n)*T.ones_like(z),\\\n size=(z.shape),dtype=theano.config.floatX)\n else:\n z = self.activation(T.dot(z,self.sigmoid_layers[i].W.T) + self.sigmoid_layers[i].b_prime)\n if dropout:\n z = z * T.dot(T.ones((z.shape[0],1),dtype=theano.config.floatX),self.masks[i].T)\n return z\n\n\n\n def get_cost_updates(self, corruption_level, learning_rate,momentum):\n \"\"\" This function computes the cost and the updates for one trainng\n step of the dA \"\"\"\n # variables needed to compte cost function\n tilde_x = self.get_corrupted_input(self.x, corruption_level)\n y = self.get_hidden_values(tilde_x,dropout=False)\n z = self.get_reconstructed_input(y,dropout=False)\n # note : we sum over the size of a datapoint; if we are using\n # minibatches, L will be a vector, with one entry per\n # example in minibatch\n\n\n # L = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)\n # k = T.mean(- T.sum(y* T.log(self.v_update.T) + (1 - y) * T.log(1 - self.v_update.T), axis=1))\n\n L = self.lambda_n*T.sum((self.x-z)**2) + self.lambda_v*T.sum((y-self.v_update.T)**2)\n # note : L is now a vector, where each element is the\n # cross-entropy cost of the reconstruction of the\n # corresponding example of the minibatch. We need to\n # compute the average of all these to get the cost of\n # the minibatch\n cost = T.mean(L) + self.lambda_w*T.sum(self.L2_sqr)\n\n # compute the gradients of the cost of the `dA` with respect\n # to its parameters\n gparams = T.grad(cost, self.params)\n # generate the list of updates\n # updates = [\n # (param, param - learning_rate * gparam)\n # for param, gparam in zip(self.params, gparams)\n # ]\n\n # return (cost, updates)\n\n updates = []\n for param, gparam in zip(self.params,gparams):\n param_update = theano.shared(param.get_value()*0., broadcastable=param.broadcastable)\n # Each parameter is updated by taking a step in the direction of the gradient.\n # However, we also \"mix in\" the previous step according to the given momentum value.\n # Note that when updating param_update, we are using its old value and also the new gradient step.\n updates.append((param, param - learning_rate*param_update))\n # Note that we don't need to derive backpropagation to compute updates - just use T.grad!\n updates.append((param_update, momentum*param_update + (1. - momentum)*T.grad(cost, param)))\n\n return (cost, updates)\n\ndef test_dA(initial_learning_rate=0.1, initial_momentum=0.9, training_epochs=100,\n dataset='mnist.pkl.gz', lambda_w = 0.005, lambda_n=1000, design=[1542,500,40],\n alpha=1,lambda_u=100,lambda_v=10, batch_size=5, output_folder='dA_plots'):\n\n \"\"\"\n This demo is tested on MNIST\n\n :type learning_rate: float\n :param learning_rate: learning rate used for training the DeNosing\n AutoEncoder\n\n :type training_epochs: int\n :param training_epochs: number of epochs used for training\n\n :type dataset: string\n :param dataset: path to the picked dataset\n\n \"\"\"\n\n\n learning_rate = theano.shared(numpy.cast[theano.config.floatX](initial_learning_rate))\n momentum = theano.shared(numpy.cast[theano.config.floatX](initial_momentum), name='momentum')\n # datasets = load_data(dataset)\n # train_set_x, train_set_y = datasets[0]\n\n xc_values = numpy.float32(open_file(\"../xc_1000\"))\n xc_values[xc_values<0.01] = 0\n train_set_x = theano.shared(value=xc_values[:200,:], name='xc_train', borrow=True)\n valid_set_x = theano.shared(value=xc_values[200:300,:], name='xc_valid', borrow=True)\n # compute number of minibatches for training, validation and testing\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size\n\n n_items = 200\n n_users = 50\n R = open_file(\"R_1000\")\n R = R.todense()[:n_users,:n_items].view(dtype=numpy.ndarray)\n R = numpy.float32(R)\n\n tuple_nonzero = zip(numpy.nonzero(R)[0],numpy.nonzero(R)[1])\n shuffle(tuple_nonzero)\n a = int(round(2.0/3*(len(tuple_nonzero)),0))\n train_tuple = tuple_nonzero[:a]\n valid_tuple = tuple_nonzero[a:]\n\n R_train_value = numpy.zeros(R.shape)\n R_valid_value = numpy.zeros(R.shape)\n\n for tupl in train_tuple:\n R_train_value[tupl] = R[tupl]\n \n for tupl in valid_tuple:\n R_valid_value[tupl] = R[tupl]\n # Load Data R ranking matrix, x content matrix\n R_train = theano.shared(value=numpy.float32(R_train_value), name='R_train', borrow=True)\n R_valid = theano.shared(value=numpy.float32(R_valid_value), name='R_train', borrow=True)\n\n\n # allocate symbolic variables for the data\n index = T.lscalar()\n x = T.matrix('x')\n v_update = T.matrix('v_update')\n\n rng = numpy.random.RandomState(123)\n theano_rng = RandomStreams(rng.randint(2 ** 30))\n\n da = SDAE(\n numpy_rng=rng,\n theano_rng=theano_rng,\n hidden_layers_size=design,\n activation=T.nnet.sigmoid,\n dropout_rate=0.1,\n lambda_w=lambda_w,\n v_update=v_update,\n lambda_n=lambda_n,\n lambda_v=lambda_v,\n input=x\n )\n\n x0 = da.get_corrupted_input(train_set_x,0.3)\n xb_value = da.get_hidden_values(x0,dropout=False).eval()\n xb = theano.shared(value=xb_value,name='xb',borrow=True)\n\n xb_update = T.vector('xb_update')\n Ri = T.vector('Ri')\n Rj = T.vector('Rj')\n i = T.lscalar('i')\n\n cv = CollaborativeVector(\n numpy_rng=rng,\n theano_rng=theano_rng,\n n_users=n_users,\n xb=xb,\n xb_update=xb_update,\n Ri=Ri,\n Rj=Rj,\n alpha=alpha,\n lambda_u=lambda_u,\n lambda_v=lambda_v)\n\n # print(xb.eval())\n # print(cv.v.eval())\n # print(da.get_reconstructed_input(xb,dropout=False).eval())\n\n cost, updates = da.get_cost_updates(\n corruption_level=0.3,\n learning_rate=learning_rate,\n momentum=momentum\n )\n\n train_da = theano.function(\n [index],\n cost,\n updates=updates,\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n v_update: cv.v[:,T.arange(index * batch_size,(index + 1) * batch_size)]\n }\n )\n\n new_u = cv.update_ui()\n up_u = theano.function(\n [i],\n new_u,\n givens={\n Ri:R_train[i,:]\n }\n )\n\n v_prime = cv.update_vj()\n up_v = theano.function(\n [i],\n v_prime,\n givens={\n Rj:R_train[:,i],\n xb_update:xb[i,:]\n }\n )\n\n train_score = []\n valid_score = []\n start_time = timeit.default_timer()\n # go through training epochs\n for epoch in range(training_epochs):\n # go through trainng set\n c= []\n for batch_index in range(n_train_batches):\n c.append(train_da(batch_index))\n # print('Training epoch %d, cost ' % epoch, numpy.sum(c))\n\n xb_value = da.get_hidden_values(x0,dropout=False).eval()\n xb.set_value(xb_value)\n\n new_u=[]\n new_v=[]\n for i in range(min(n_users,n_items)):\n new_u.append(up_u(i))\n new_v.append(up_v(i))\n if n_users>n_items:\n for i in numpy.arange(n_items,n_users):\n new_u.append(up_u(i))\n else:\n for i in numpy.arange(n_users,n_items):\n new_v.append(up_v(i))\n\n new_u = numpy.vstack(new_u).T\n cv.u.set_value(new_u)\n cv.u_stock.set_value(numpy.dot(new_u,new_u.T))\n\n new_v = numpy.vstack(new_v).T\n cv.v.set_value(new_v)\n cv.v_stock.set_value(numpy.dot(new_v,new_v.T))\n\n new_learning_rate = learning_rate.get_value() * 0.985\n learning_rate.set_value(numpy.cast[theano.config.floatX](new_learning_rate))\n\n if momentum.get_value() < 0.99:\n new_momentum = 1. - (1. - momentum.get_value()) * 0.98\n momentum.set_value(numpy.cast[theano.config.floatX](new_momentum))\n\n R_estime = numpy.dot(new_u.T,new_v)\n train_score.append(numpy.mean(((R_train_value - R_estime)[R_train_value>0])**2))\n valid_score.append(numpy.mean(((R_valid_value - R_estime)[R_valid_value>0])**2))\n\n train_score_da = get_global_score_dropout(da,train_set_x,dropout=False)\n valid_score_da = get_global_score_dropout(da,valid_set_x,dropout=False)\n\n # plt.figure()\n # plt.plot(train_score,label='train')\n # plt.plot(valid_score,label='valid')\n # plt.legend()\n # plt.show()\n\n return [train_score[-1],valid_score[-1],train_score_da,valid_score_da,alpha,lambda_v,lambda_u,lambda_n]\n\n # train_score = get_global_score_dropout(da,train_set_x,dropout=False)\n # valid_score = get_global_score_dropout(da,valid_set_x,dropout=False)\n\n # end_time = timeit.default_timer()\n # training_time = (end_time - start_time)\n # print(('The 30% corruption code for file ' +\n # os.path.split(__file__)[1] +\n # ' ran for %.2fm' % (training_time / 60.)), file=sys.stderr)\n\n # plt.figure()\n # plt.plot(score,label='no momentum')\n # plt.plot(score_momentum,label='momentum')\n # plt.legend()\n # plt.show()\n # return [design,train_score, valid_score]\n\ndef get_global_score(da,x):\n tilde_x = da.get_corrupted_input(x,0.3)\n y = da.get_hidden_values(tilde_x)\n z = da.get_reconstructed_input(y)\n #return - T.mean(T.sum(x * T.log(z) + (1 - x) * T.log(1 - z), axis=1)).eval()\n\ndef get_global_score_dropout(da,x,dropout):\n tilde_x = da.get_corrupted_input(x,0.3)\n y = da.get_hidden_values(tilde_x,dropout=dropout)\n z = da.get_reconstructed_input(y,dropout=dropout)\n #return - T.mean(T.sum(x * T.log(z) + (1 - x) * T.log(1 - z), axis=1)).eval()\n return T.mean(T.sum((x-z)**2)).eval()\n\n\n\nif __name__ == '__main__':\n # result = []\n # designs = [[1542,50],[1542,100],[1542,500],[1542,1000],[1542,100,75],[1542,500,100]]\n # n_latents= [5,10,20,30,40,50]\n # # designs = [[1542,50]]\n # # n_latents= [5,10,20]\n # initial_learning_rates = [0.001,0.01,0.1,1,1]\n start_time = timeit.default_timer()\n result = []\n alphas = [0.1,1,10,100]\n lambda_ns = [0.01,0.1,1,10,100]\n lambda_vs = [0.01,0.1,1,10,100]\n lambda_us = [0.01,0.1,1,10,100]\n\n # alphas = [0.01]\n # lambda_ns = [0.01]\n # lambda_vs = [0.01]\n # lambda_us = [0.01]\n\n for alpha in alphas:\n for lambda_n in lambda_ns:\n for lambda_v in lambda_vs:\n for lambda_u in lambda_us:\n result.append(test_dA(alpha=alpha,lambda_n=lambda_n,lambda_v=lambda_v,lambda_u=lambda_u))\n # for n_latent in n_latents:\n # for design in designs:\n # design= numpy.hstack([design,n_latent])\n # result.append(test_dA(lambda_w=0.005,batch_size=5,design=design))\n result = numpy.vstack(result)\n save_obj(result,\"result_global\")\n\n # end_time = timeit.default_timer()\n # training_time = (end_time - start_time)\n # print(' ran for %.2fm' % (training_time / 60.))","sub_path":"implementation/cdl_tied.py","file_name":"cdl_tied.py","file_ext":"py","file_size_in_byte":21726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"401900945","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException \nfrom selenium.webdriver.common.action_chains import ActionChains\n\n\n\nfrom bot import instagram_bot\nfrom config import config, xpaths, post_tracker as pt\nfrom time import sleep\nfrom utils import random_sleep\nfrom bot import instagram_bot\nfrom config import config, post_tracker as pt, xpaths, post_keywords, location_keywords\nfrom utils import random_sleep, convert_to_json\nfrom geojson import Point, Feature\nfrom dataclasses import asdict\nimport json\n\nclass find_potentials(instagram_bot):\n def __init__ (self, driver, ig_parser, post_filter):\n super().__init__(driver)\n self.ig_parser = ig_parser\n self.post_filter = post_filter\n\n def collect_accounts(self, button_xpath, count_xpath):\n matched_accounts = []\n all_accounts = []\n d = self.driver\n p = self.ig_parser\n pf = self.post_filter\n actions = ActionChains(d) \n press_tab = actions.send_keys(Keys.TAB)\n\n self.open_link('https://www.instagram.com/tamuvsa/?hl=en')\n random_sleep()\n\n accounts = self.click_button(xpaths.followers_button, \"followers\")\n num = int(self.save_number(count_xpath)) # save number follow(er/ing)\n press_tab.perform() \n\n while len(accounts) < num - 10:\n try:\n profiles_in_view = self.save_links(xpaths.profile_link, '.com/')\n [all_accounts.append(x) for x in profiles_in_view if x not in all_accounts] \n for _ in range(len(profiles_in_view) * 3):\n press_tab.perform()\n sleep(0.5)\n except NoSuchElementException:\n continue\n print(len(all_accounts))\n random_sleep() \n\n for acc in enumerate(all_accounts):\n self.open_link(acc)\n profile = p.profile_parser(acc)\n filtered_profile = pf.filter_profile(acc, config, post_keywords)\n\n\n if filtered_profile.matches_keyword == True:\n matched_accounts.append(acc.bio)\n\n with open(\"matched_profiles.json\", \"w\") as outfile: \n json.dump(matched_accounts, outfile, indent = 4)\n","sub_path":"find_potentials.py","file_name":"find_potentials.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"218479801","text":"from functools import reduce\n\nn = int(input())\nspatial_info = [list(map(int, list(input()))) for _ in range(n)]\n\ndef get_space(spatial_info: list, size: int, row: int, col: int):\n space = spatial_info[row: row+size]\n space = list(map(lambda x: x[col:col+size], space))\n return space\n \ndef is_empty(space: list) -> int:\n if isinstance(space, int):\n return True if space == 1 else False\n else:\n if all(list(map(lambda z: reduce(lambda x, y: x*y, z), space))):\n return True\n else:\n return False\n\ntotal_cases = 0\nnum_cases_dict = {}\n\nfor size in range(1, n+1):\n num_cases = 0\n num_move = n - size + 1\n\n for row in range(num_move):\n for col in range(num_move):\n if size >= 2:\n space = get_space(spatial_info, size, row, col)\n else:\n space = spatial_info[row][col]\n if is_empty(space):\n num_cases += 1\n num_cases_dict[size] = num_cases\n total_cases += num_cases\n\nprint(f'total: {total_cases}')\nfor key, value in num_cases_dict.items():\n if value > 0:\n print(f'size[{key}]: {value}')\n","sub_path":"scofe2021/todayhome.py","file_name":"todayhome.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"137175160","text":"class Stack:\r\n def __init__(self):\r\n self.s=[]\r\n def push(self,val):\r\n self.s.append(val)\r\n def pop(self):\r\n if len(self.s)==0:\r\n return -1\r\n return self.s.pop()\r\n def display(self):\r\n return self.s\r\n\r\nob=Stack()\r\nwhile True:\r\n ch=int(input())\r\n if ch==1:\r\n ob.push(int(input()))\r\n elif ch==2:\r\n print(ob.pop())\r\n elif ch==3:\r\n print(ob.display())\r\n else:\r\n break\r\n","sub_path":"Stacks/Stack_Array.py","file_name":"Stack_Array.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"238786296","text":"from collections import OrderedDict\n\nclass FirstUnique:\n\n def __init__(self, nums: List[int]):\n self._unique = OrderedDict()\n self._non_unique = set()\n for n in nums:\n self.add(n)\n\n def showFirstUnique(self) -> int:\n if len(self._unique) == 0:\n return -1\n else:\n # Return first element of the\n # OrderedDict (the earliest)\n for x in self._unique:\n return x\n\n def add(self, value: int) -> None:\n if not self.is_seen_before(value):\n self._unique[value] = True\n elif value in self._unique:\n del self._unique[value]\n self._non_unique.add(value)\n \n def is_seen_before(self, value: int) -> bool:\n return value in self._unique or value in self._non_unique;\n\n\n# Your FirstUnique object will be instantiated and called as such:\n# obj = FirstUnique(nums)\n# param_1 = obj.showFirstUnique()\n# obj.add(value)\n","sub_path":"Practice-2020/InterviewLeetcode/Python/first_unique_number.py","file_name":"first_unique_number.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"47678938","text":"\"\"\"\nпростой настраиваемый компонент окна списка с прокруткой\n\"\"\"\n\nfrom tkinter import *\n\n\nclass ScrolledList(Frame):\n def __init__(self, options, parent=None, **kwargs):\n Frame.__init__(self, parent, **kwargs)\n self.pack(expand=YES, fill=BOTH) # сделать растягиваемым\n self.make_widgets(options)\n\n def handle_list(self, event):\n # index = self.listbox.curselection() # при двойном щелчке на списке\n # label = self.listbox.get(index) # извлечь выбранный текст\n # self.run_command(label) # и вызвать действие или get(ACTIVE)\n for ix in self.listbox.curselection():\n self.run_command(self.listbox.get(ix))\n\n def make_widgets(self, options):\n s_bar = Scrollbar(self)\n s_list = Listbox(self)\n s_bar.config(command=s_list.yview) # связать s_bar и s_list\n s_list.config(yscrollcommand=s_bar.set) # сдвиг одного = сдвиг другого\n s_bar.pack(side=RIGHT, fill=Y) # первым добавлен – посл. обрезан\n s_list.pack(side=LEFT, expand=YES, fill=BOTH) # список обрезается первым\n for label in options: # или enumerate(options)\n s_list.insert(END, label) # добавить в виджет списка\n s_list.config(selectmode=EXTENDED, setgrid=1) # режимы выбора, измен. разм.\n s_list.bind(\"\", self.handle_list) # установить обр-к события\n s_list.bind(\"\", self.handle_list) # установить обр-к события\n self.listbox = s_list\n\n def run_command(self, selection):\n print(\"You selected: <{}>\".format(selection))\n\n\nif __name__ == \"__main__\":\n options = map(lambda x: \"Lumberjack-{}\".format(x), range(20))\n ScrolledList(options).mainloop()\n","sub_path":"dev/GUI/Tour/scrolledlist.py","file_name":"scrolledlist.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"542530549","text":"\"\"\" \n@author: Frank\n@file: testflask.py \n@time: 18-4-17 上午10:15 \n\"\"\"\n\nfrom flask import Flask, current_app\n\napp = Flask(__name__)\n# 应用上下文 对象 Flask\n# 请求上下文 对象 Request\n# Flask APPContext\n# Request RequestContext\nctx = app.app_context()\nctx.push()\na = current_app\nd = current_app.config[\"DEBUG\"]\nctx.pop()\n# 离线引用 单元测试\n\n","sub_path":"testflask/testflask.py","file_name":"testflask.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"173594870","text":"import PyPDF2\nimport os\n\nif(os.path.isdir(\"temp\")==False):\n os.mkdir(\"temp\")\n\ntxtpath=\"\"\npdfpath=\"\"\n\npdfpath=input(\"Enter the name of your pdf file - please use backlash when typing in directory path: \")\ntxtpath=input(\"Enter the name of your txt file - please use backlash when typing in directory path: \")\n\nBASEDIR = os.path.realpath(\"temp\")\nprint(BASEDIR)\nif(len(txtpath)==0):\n txtpath = os.path.join(BASEDIR,os.path.basename(os.path.normpath(pdfpath)).replace(\".pdf\",\"\")+\".txt\")\npdfobj = open(pdfpath, 'rb')\n\npdfread = PyPDF2.PdfFileReader(pdfobj)\n\nx=pdfread.numPages\n\nfor i in range(x):\n pageObj = pdfread.getPage(i)\n with open(txtpath, 'a+')as f:\n f.write((pageObj.extractText()))\n print(pageObj.extractText())\n\npageObj.close()","sub_path":"first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"113272139","text":"from keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, LSTM\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nimport numpy as np\r\nimport pandas as pd\r\n#importação para gráficos:\r\nimport matplotlib.pyplot as plt\r\n\r\nbase = pd.read_csv('petr4-treinamento.csv')\r\n\r\n#excluindo valores nulos:\r\nbase = base.dropna()\r\n\r\nbase_treinamento = base.iloc[:, 1:2].values\r\n\r\n#normalização dos dados: \r\n#deixando dados entre 0 e 1\r\nnormalizador = MinMaxScaler(feature_range=(0,1))\r\nbase_treinamento_normalizada = normalizador.fit_transform(base_treinamento)\r\n\r\n#previsores -> ultimos 90 valores\r\nprevisores = []\r\npreco_real = []\r\n\r\n#onde começa até onde termina\r\nfor i in range (90, 1242):\r\n previsores.append(base_treinamento_normalizada[i-90: i,0])\r\n preco_real.append(base_treinamento_normalizada[i, 0])\r\n\r\n\r\n#passando para o formato numpy\r\nprevisores, preco_real=np.array(previsores), np.array(preco_real)\r\n\r\nprevisores = np.reshape(previsores, (previsores.shape[0], previsores.shape[1], 1))\r\n #tamanho total #90\r\n \r\n#CRIANDO REDE NEURAL \r\nregressor = Sequential()\r\nregressor.add(LSTM(units=100, return_sequences=True, input_shape = (previsores.shape[1], 1)))\r\n#numero de neuron\r\n#indica se terá mais de uma camada e se os dados devem ser passados\r\n#1 -> somente um atributo previsor\r\nregressor.add(Dropout(0,5))\r\n\r\nregressor.add(LSTM(units=50, return_sequences=True))\r\nregressor.add(Dropout(0,5))\r\n\r\nregressor.add(LSTM(units=50))\r\nregressor.add(Dropout(0,5))\r\n\r\nregressor.add(Dense(units=1, activation='sigmoid'))\r\n\r\nregressor.compile(optimizer='rmsprop', loss='mean_squared_error',\r\n metrics=['mean_absolute_error'])\r\n\r\nhistory = regressor.fit(previsores, preco_real, epochs=100, batch_size=32)\r\n\r\nbase_teste = pd.read_csv('petr4-teste.csv')\r\npreco_real_teste = base_teste.iloc[:,1:2]. values\r\n#pegou só a primeira coluna\r\n\r\nbase_completa = pd.concat((base['Open'], base_teste['Open']), axis=0)\r\n#concatenando base original com teste, para pegar as 90 anteriores\r\n\r\nentradas = base_completa[len(base_completa)-len(base_teste)-90:].values\r\n\r\nentradas = entradas.reshape(-1,1)\r\nentradas = normalizador.transform(entradas)\r\n\r\nX_teste = []\r\nfor i in range (90, 112):\r\n X_teste.append(entradas[i-90:i,0])\r\n \r\nX_teste = np.array(X_teste)\r\nX_teste = np.reshape(X_teste, (X_teste.shape[0], X_teste.shape[1], 1))\r\n\r\nprevisoes = regressor.predict(X_teste)\r\n#desnomarlizar para melhorar visualização\r\nprevisoes = normalizador.inverse_transform(previsoes)\r\n\r\n#media\r\nprevisoes.mean()\r\npreco_real_teste.mean()\r\n\r\n#GRÁFICO PARA COMPARAR RESULTADOS DO TESTE\r\nplt.figure(figsize = (15,5))\r\nplt.plot(preco_real_teste, color='red', label='Preco Real')\r\nplt.plot(previsoes, color='blue', label='Previsoes')\r\nplt.title('Previsão')\r\nplt.xlabel('Tempo')\r\nplt.ylabel('Valor')\r\nplt.legend()\r\nplt.show()\r\n\r\n######################################################################\r\n\r\nfrom sklearn.metrics import mean_squared_error, mean_squared_log_error, mean_absolute_error, r2_score, explained_variance_score, max_error\r\n\r\n\r\nevs = explained_variance_score(preco_real_teste, previsoes)\r\nprint(evs) #1\r\n\r\nr2 = r2_score(preco_real_teste, previsoes)\r\nprint(r2)#1\r\n\r\nmse = mean_squared_error(preco_real_teste, previsoes)\r\nprint(mse) #0\r\n\r\nmae = mean_absolute_error(preco_real_teste, previsoes)\r\nprint(mae) #0\r\n\r\n#####################################################################\r\npreco_real_treino=[]\r\nfor i in range (90, 1242):\r\n preco_real_treino.append(base_treinamento[i, 0])\r\n\r\nprevisoes_treino = regressor.predict(previsores)\r\nprevisoes_treino = normalizador.inverse_transform(previsoes_treino)\r\n\r\n#GRÁFICO PARA COMPARAR RESULTADOS DO TREINO\r\nplt.figure(figsize = (15,5))\r\nplt.plot(preco_real_treino, color='red', label='Preco Real')\r\nplt.plot(previsoes_treino, color='blue', label='Previsoes')\r\nplt.title('Previsão')\r\nplt.xlabel('Tempo')\r\nplt.ylabel('Valor')\r\nplt.legend()\r\nplt.show()","sub_path":"RNN/pet4.py","file_name":"pet4.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"441206635","text":"from Database import Database\nfrom PlayersParser import PlayersParser\nfrom SbcParser import SbcParser\n\n\ndef load_players():\n players_parser = PlayersParser()\n\n database.remove_players()\n\n players_data = players_parser.get_players()\n database.insert_player(players_data)\n\n print(database.count_players())\n print(database.get_players())\n\ndef load_challenges():\n sbc_parser = SbcParser()\n database.remove_challenges()\n\n sbc_data = sbc_parser.get_sbs()\n database.insert_challenge(sbc_data)\n\n print(database.count_challenges())\n print(database.get_challenges())\n\n\nif __name__ == \"__main__\":\n\n database = Database()\n\n print(\"Type 1 to load players\")\n print(\"Type 2 to load challenges\")\n print(\"Type 3 to load both\")\n choice = input()\n if choice.isdigit():\n choice = int(choice)\n else:\n choice = -1\n if choice == 1:\n load_players()\n elif choice == 2:\n load_challenges()\n elif choice == 3:\n load_players()\n load_challenges()\n else:\n print(\"Didn't understand your choice\")\n\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"531346793","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.http.response import HttpResponseRedirect, HttpResponseRedirectBase\nfrom django import forms\nimport ipaddress\nimport logging\n\ndef file_to_list():\n op = ''\n f1 = open ('allcustreport.log', 'r')\n for line in f1:\n op += line\n f1.close()\n op_html = ''\n mip_sid_cport_vsize_etc = []\n op_list = op.splitlines()\n for i in op_list:\n i = i.split()\n if i !=[]:\n try:\n ipaddress.ip_address(i[0])\n mip_sid_cport_vsize_etc.append([i[0],i[1],i[2],i[3],i[4],i[5],i[6],i[7],i[8],i[9],i[10],i[11],i[12],i[13],i[14],i[15],i[16],i[17]])\n except:\n continue\n return mip_sid_cport_vsize_etc\n\n\ndef populate_sets():\n mip_sid_cport_vsize_etc = file_to_list()\n city_set = set()\n state_set = set()\n country_set = set()\n continent_set = set()\n version_set = set()\n for [a0,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16,a17] in mip_sid_cport_vsize_etc:\n city_set.add(a11)\n state_set.add(a12)\n country_set.add(a13)\n continent_set.add(a14)\n version_set.add(a9)\n\n # print(city_set, state_set,country_set,continent_set,version_set)\n version_dict = {i:i for i in version_set}\n version_list = list(version_dict.items()) + [('','----')]\n\n city_dict = {i:i for i in city_set}\n city_list = list(city_dict.items())+ [('','----')]\n\n state_dict = {i:i for i in state_set}\n state_list = list(state_dict.items())+ [('','----')]\n\n country_dict = {i:i for i in country_set}\n country_list = list(country_dict.items())+ [('','----')]\n\n continent_dict = {i:i for i in continent_set}\n continent_list = list(continent_dict.items())+ [('','----')]\n return(version_list, city_list, state_list, country_list, continent_list)\n\nclass FindSportForm(forms.Form):\n (version_list, city_list, state_list, country_list, continent_list) = populate_sets()\n customer_id = forms.IntegerField(required=False)\n machine_ip = forms.GenericIPAddressField(required=False)\n version = forms.ChoiceField(required=False, choices = version_list)\n region = forms.IntegerField(required=False)\n city = forms.ChoiceField(required=False, choices = city_list)\n state = forms.ChoiceField(required=False, choices = state_list)\n country = forms.ChoiceField(required=False, choices = country_list)\n continent = forms.ChoiceField(required=False, choices = continent_list)\n \ndef findsport(request):\n fmt = '%(asctime)s %(levelname)s %(name)s %(lineno)s %(message)s'\n logging.basicConfig(level='DEBUG', format= fmt, filename='/Library/Frameworks/Python.framework/Versions/3.4/bin/django_jay/findsport.log')\n logger = logging.getLogger('sport')\n if request.method == 'POST':\n mip_sid_cport_vsize_etc=file_to_list()\n d = FindSportForm(request.POST)\n if d.is_valid():\n cd = d.cleaned_data\n # print(cd)\n searched_mip_sid_cport_vsize_etc = mip_sid_cport_vsize_etc\n if cd['customer_id'] != None:\n cid = format(cd['customer_id'], ',')\n searched_mip_sid_cport_vsize_etc = [i for i in searched_mip_sid_cport_vsize_etc if i[3] == cid]\n if cd['region'] != None:\n reg = format(cd['region'], ',')\n searched_mip_sid_cport_vsize_etc = [i for i in searched_mip_sid_cport_vsize_etc if i[10] == reg]\n if cd['machine_ip'] != '':\n searched_mip_sid_cport_vsize_etc = [i for i in searched_mip_sid_cport_vsize_etc if i[0] == cd['machine_ip']]\n if cd['version'] != '':\n searched_mip_sid_cport_vsize_etc = [i for i in searched_mip_sid_cport_vsize_etc if i[9] == cd['version']]\n if cd['city'] != '':\n searched_mip_sid_cport_vsize_etc = [i for i in searched_mip_sid_cport_vsize_etc if i[11] == cd['city']]\n if cd['state'] != '':\n searched_mip_sid_cport_vsize_etc = [i for i in searched_mip_sid_cport_vsize_etc if i[12] == cd['state']]\n if cd['country'] != '':\n searched_mip_sid_cport_vsize_etc = [i for i in searched_mip_sid_cport_vsize_etc if i[13] == cd['country']]\n if cd['continent'] != '':\n searched_mip_sid_cport_vsize_etc = [i for i in searched_mip_sid_cport_vsize_etc if i[14] == cd['continent']]\n logger.info(cd['customer_id'])\n logger.info(searched_mip_sid_cport_vsize_etc)\n count = 1\n op_html = ''\n for a0,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16,a17 in searched_mip_sid_cport_vsize_etc:\n op_html += ''+str(count)+''+a0+''+a1+''+a2+''+a3+''+a5+'-'+a6+'-'+a7+''+a9+''+a10+''+a11+''+a12+''+a13+''+a14+''+a15+''+a16+''+a17+''\n count += 1\n context = {\n \"op\" : op_html\n }\n logger.info(op_html)\n return render(request, 'findsport_result.html', context)\n else:\n context = {\n \"form\" : d\n }\n return render(request, 'findsport_form.html',context)\n else:\n d = FindSportForm()\n context = {\n \"form\" : d,\n }\n return render(request, 'findsport_form.html', context)\n","sub_path":"j_app/findsport_view.py","file_name":"findsport_view.py","file_ext":"py","file_size_in_byte":5135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"498676465","text":"from nio.metadata.properties.holder import PropertyHolder\nfrom nio.metadata.properties.object import ObjectProperty\nfrom nio.metadata.properties.string import StringProperty\nfrom nio.metadata.properties.timedelta import TimeDeltaProperty\nfrom nio.common.block.base import Block\nfrom nio.common.discovery import Discoverable, DiscoverableType\nfrom nio.modules.threading import spawn\nfrom nio.metadata.properties.bool import BoolProperty\nfrom nio.common.signal.base import Signal\n\nfrom .mixins.profiling.profile_tools import ProfileData\nfrom .mixins.profiling.profiling_mixin import get_profile_data_object\n\n\nfrom time import process_time, time, sleep\nfrom ast import literal_eval\nimport datetime\nnow = datetime.datetime.now\n\n\nclass Options(PropertyHolder):\n interval_only = BoolProperty(default=True, title=\"Intervals Only\")\n\n\nclass FormatOutput(PropertyHolder):\n format_output = BoolProperty(default=False, title=\"Format cProfile Data?\")\n delimiter = StringProperty(default=\"\\t\", title=\"delimiter\")\n\n\n@Discoverable(DiscoverableType.block)\nclass Profile(Block):\n signal_name = StringProperty(default='profile', title='Name')\n interval = TimeDeltaProperty(title='Interval', default={'seconds': 1})\n options = ObjectProperty(Options)\n format = ObjectProperty(FormatOutput)\n\n def start(self):\n \"\"\" Start simulating in a new thread.\n Overridden from the Block interface.\n\n Args:\n None\n\n Returns:\n None\n\n \"\"\"\n super().start()\n self._kill = False\n self.profile_data_object = None\n self._interval = self.interval.days * 24 * 60 * 60 + self.interval.seconds + self.interval.microseconds * 1e-6\n try:\n self.format._delimiter = literal_eval(self.format.delimiter)\n except SyntaxError:\n self.format._delimiter = literal_eval(\"'''\" + self.format.delimiter + \"'''\")\n\n spawn(self.profile)\n\n def profile(self, *args, **kwargs):\n # pull in local variables\n service_name = \"SERVICE\"\n tnow = now\n ptime = process_time\n\n cpu_start = ptime()\n clock_start = tnow()\n\n last = ProfileData(None, service_name, tnow(), 0, 0)\n last_profiles = {}\n\n while not self._kill:\n start = time()\n timestamp = tnow()\n runtime = ptime() - cpu_start\n clock_time = (timestamp - clock_start).total_seconds()\n\n data = ProfileData(None, service_name, timestamp, clock_time, runtime)\n\n if self.profile_data_object is None:\n self.profile_data_object = get_profile_data_object()\n\n if self.profile_data_object is not None:\n with self.profile_data_object as profiles:\n if last_profiles:\n out_profiles = (profiles[key] - last_profiles[key]\n if key in last_profiles else profiles[key]\n for key in profiles.keys())\n # only care about ones who's interval is > 0\n # out_profiles = (p for p in out_profiles if p.interval > 0)\n else:\n out_profiles = profiles.values()\n else:\n out_profiles = None\n\n self.output_profiled_data(data - last, out_profiles)\n last = data\n if out_profiles:\n last_profiles = profiles\n\n try:\n sleep(self._interval - (time() - start))\n except ValueError:\n pass\n\n def output_profiled_data(self, data, profiles):\n out = data.get_dict()\n if self.options.interval_only:\n out.pop(\"cpu_time\")\n out.pop(\"clock_time\")\n\n signals = [Signal({self.signal_name: out})]\n\n if profiles:\n if self.format.format_output:\n if type(profiles) is not tuple:\n profiles = tuple(profiles)\n for value in profiles:\n print(\"\\n\\n\")\n print(value.format(strip=True, delimiter=self.format._delimiter))\n\n signals.extend(Signal({self.signal_name: p.get_dict()}) for p in profiles)\n\n self.notify_signals(signals)\n\n def stop(self):\n self._kill = True\n super().stop()\n\n","sub_path":"profile_block.py","file_name":"profile_block.py","file_ext":"py","file_size_in_byte":4325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"396897189","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\n\nimport argparse\nimport asyncio\nimport logging\nimport logging.handlers\nimport os\nimport sys\n\nimport tornado.ioloop\n\nfrom . import conf\nfrom . import registry\nfrom . import route\nfrom . import server\nfrom . import utils\n\n\ndef main():\n parser = argparse.ArgumentParser(\n prog=\"turbo-tunnel\", description=\"TurboTunnel cmdline tool.\"\n )\n parser.add_argument(\"-c\", \"--config\", help=\"config yaml file path\")\n parser.add_argument(\"-l\", \"--listen\", help=\"listen url\")\n parser.add_argument(\"-t\", \"--tunnel\", action=\"append\", help=\"tunnel url\")\n parser.add_argument(\n \"--log-level\",\n help=\"log level, default is info\",\n choices=(\"debug\", \"info\", \"warn\", \"error\"),\n default=\"info\",\n )\n parser.add_argument(\"--log-file\", help=\"log file save path\")\n parser.add_argument(\"--retry\", type=int, help=\"retry connect count\", default=0)\n parser.add_argument(\n \"--auto-reload\",\n help=\"auto reload config file\",\n action=\"store_true\",\n default=False,\n )\n parser.add_argument(\n \"-d\", \"--daemon\", help=\"run as daemon\", action=\"store_true\", default=False\n )\n parser.add_argument(\"-p\", \"--plugin\", help=\"load plugin\", action=\"append\")\n\n args = sys.argv[1:]\n if not args:\n parser.print_help()\n return 0\n\n args = parser.parse_args(args)\n\n if args.plugin:\n for plugin in args.plugin:\n for module in (\"turbo_tunnel.plugins.%s\" % plugin, plugin):\n try:\n __import__(module)\n except ImportError:\n pass\n else:\n break\n else:\n utils.logger.error(\"Load plugin %s failed\" % plugin)\n\n tunnel_server = None\n if args.config:\n if not os.path.exists(args.config):\n print(\"Config file %s not exist\" % args.config, file=sys.stderr)\n return -1\n config = conf.TunnelConfiguration(args.config, args.auto_reload)\n router = route.TunnelRouter(config)\n tunnel_server = server.TunnelServer(config.listen_url, router)\n elif args.listen:\n tunnel = args.tunnel\n if not tunnel:\n tunnel = [\"tcp://\"]\n tunnel_server = server.TunnelServer(args.listen, tunnel)\n else:\n print(\"Argument --listen not specified\", file=sys.stderr)\n return -1\n\n log_file = None\n if args.log_file:\n log_file = os.path.abspath(args.log_file)\n\n if sys.platform != \"win32\" and args.daemon:\n import daemon\n\n daemon.DaemonContext(stderr=open(\"error.txt\", \"w\")).open()\n elif args.daemon:\n utils.win32_daemon()\n return 0\n\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\"[%(asctime)s][%(levelname)s]%(message)s\")\n handler.setFormatter(formatter)\n\n if args.log_level == \"debug\":\n utils.logger.setLevel(logging.DEBUG)\n elif args.log_level == \"info\":\n utils.logger.setLevel(logging.INFO)\n elif args.log_level == \"warn\":\n utils.logger.setLevel(logging.WARN)\n elif args.log_level == \"error\":\n utils.logger.setLevel(logging.ERROR)\n\n utils.logger.propagate = 0\n utils.logger.addHandler(handler)\n\n if log_file:\n handler = logging.handlers.RotatingFileHandler(\n log_file, maxBytes=10 * 1024 * 1024, backupCount=4\n )\n formatter = logging.Formatter(\n \"[%(asctime)s][%(levelname)s][%(filename)s][%(lineno)d]%(message)s\"\n )\n handler.setFormatter(formatter)\n utils.logger.addHandler(handler)\n\n if args.retry:\n server.TunnelServer.retry_count = args.retry\n\n if sys.platform == 'win32' and sys.version_info[1] >= 8:\n # on Windows, the default asyncio event loop is ProactorEventLoop from python3.8\n loop = asyncio.SelectorEventLoop()\n asyncio.set_event_loop(loop)\n\n tunnel_server.start()\n try:\n tornado.ioloop.IOLoop.current().start()\n except KeyboardInterrupt:\n registry.plugin_registry.notify(\"unload\")\n tasks = utils.AsyncTaskManager().running_tasks\n for task in tasks:\n print(\"Task %s can't auto exit\" % task, file=sys.stderr)\n print(\"Process exit warmly.\")\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"turbo_tunnel/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"142175844","text":"#!/usr/bin/env python\n\"\"\"\n_FileBased_\n\nFile based splitting algorithm that will chop a fileset into\na set of jobs based on file boundaries\n\"\"\"\n\nfrom WMCore.JobSplitting.JobFactory import JobFactory\nfrom WMCore.WMBS.File import File\n\n\nclass FileBased(JobFactory):\n\n def algorithm(self, *args, **kwargs):\n \"\"\"\n _algorithm_\n\n Split up all the available files such that each job will process a\n maximum of 'files_per_job'. If the 'files_per_job' parameters is not\n passed in jobs will process a maximum of 10 files.\n \"\"\"\n\n filesPerJob = int(kwargs.get(\"files_per_job\", 10))\n jobsPerGroup = int(kwargs.get(\"jobs_per_group\", 0))\n totalFiles = int(kwargs.get(\"total_files\", 0))\n runBoundaries = kwargs.get(\"respect_run_boundaries\", False)\n getParents = kwargs.get(\"include_parents\", False)\n filesInJob = 0\n listOfFiles = []\n timePerEvent, sizePerEvent, memoryRequirement = \\\n self.getPerformanceParameters(kwargs.get('performance', {}))\n\n #Get a dictionary of sites, files\n locationDict = self.sortByLocation()\n\n ## Make a list with all the files in the locationDict.\n files = []\n for filesPerLocSet in locationDict.values():\n for file in filesPerLocSet:\n files.append(file)\n ## Here we can apply a lumi-mask and remove files \n ## that are left with 0 lumis to process.\n ## Sort the list of files by LFN.\n if len(files) != 0:\n files = sorted(files, key = lambda f: f['lfn'])\n ## Keep only the first totalFiles files and remove\n ## the other files from the locationDict.\n if totalFiles > 0 and totalFiles < len(files):\n removedFiles = files[totalFiles:]\n files = files[:totalFiles]\n for file in removedFiles:\n for locSet in locationDict.keys():\n if file in locationDict[locSet]:\n locationDict[locSet].remove(file)\n\n for locSet in locationDict.keys():\n #Now we have all the files in a certain location set\n fileList = locationDict[locSet]\n filesInJob = 0\n jobsInGroup = 0\n self.newGroup()\n if len(fileList) == 0:\n continue\n jobRun = None\n for f in fileList:\n if getParents:\n parentLFNs = self.findParent(lfn = f['lfn'])\n for lfn in parentLFNs:\n parent = File(lfn = lfn)\n f['parents'].add(parent)\n fileRun = f.get('minrun', None)\n if filesInJob == 0 or filesInJob == filesPerJob or (runBoundaries and fileRun != jobRun):\n if jobsPerGroup:\n if jobsInGroup > jobsPerGroup:\n self.newGroup()\n jobsInGroup = 0\n\n self.newJob(name = self.getJobName())\n self.currentJob.addResourceEstimates(memory = memoryRequirement)\n\n filesInJob = 0\n jobsInGroup += 1\n jobRun = fileRun\n\n self.currentJob.addFile(f)\n filesInJob += 1\n fileTime = f['events'] * timePerEvent\n fileSize = f['events'] * sizePerEvent\n self.currentJob.addResourceEstimates(jobTime = fileTime,\n disk = fileSize)\n listOfFiles.append(f)\n\n return\n","sub_path":"src/python/WMCore/JobSplitting/FileBased.py","file_name":"FileBased.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"400101000","text":"import re\n\nfrom codegen.ast import *\nfrom codegen.iegenlib import IEGenLib\nfrom codegen.setlib import Var\nfrom codegen.helpers import VarHelper\n\ntry:\n import clang.cindex as clindex\n from clang.cindex import CursorKind\n _clangon = True\nexcept:\n _clangon = False\n\ntry:\n import islpy as isl\n _islon = True\nexcept:\n print(\"WARN: islpy package unavailable.\")\n _islon = False\n\nclass ASTFactory(object):\n @staticmethod\n def getNode(kind, value='', level=0, start=(0,0), end=(0,0), type=None):\n node = None\n if kind == CursorKind.TRANSLATION_UNIT:\n node = Program()\n elif kind == CursorKind.USING_DIRECTIVE:\n node = UsingDir()\n elif kind == CursorKind.NAMESPACE_REF:\n node = NamespaceRef()\n elif kind == CursorKind.NAMESPACE:\n node = Namespace()\n elif kind == CursorKind.FUNCTION_DECL:\n node = FunctionDecl()\n elif kind == CursorKind.PARM_DECL:\n node = ParamDecl()\n elif kind == CursorKind.COMPOUND_STMT:\n node = CompoundStmt()\n elif kind == CursorKind.DECL_STMT:\n node = DeclStmt()\n elif kind == CursorKind.VAR_DECL:\n node = VarDecl()\n elif kind == CursorKind.TYPE_REF:\n node = TypeRef()\n elif kind == CursorKind.IF_STMT:\n node = IfStmt()\n elif kind == CursorKind.FOR_STMT:\n node = ForStmt()\n elif kind == CursorKind.RETURN_STMT:\n node = ReturnStmt()\n elif kind == CursorKind.NULL_STMT:\n node = NullStmt()\n elif kind == CursorKind.LABEL_STMT:\n node = LabelStmt()\n elif kind == CursorKind.FLOATING_LITERAL:\n node = FloatLiteral()\n elif kind == CursorKind.INTEGER_LITERAL:\n node = IntLiteral()\n elif kind == CursorKind.STRING_LITERAL:\n node = StringLiteral()\n elif kind == CursorKind.UNARY_OPERATOR:\n node = UnaryOper()\n elif kind == CursorKind.BINARY_OPERATOR:\n node = BinaryOper()\n elif kind == CursorKind.COMPOUND_ASSIGNMENT_OPERATOR:\n node = CompoundAssignOper()\n elif kind == CursorKind.DECL_REF_EXPR:\n node = DeclRefExpr()\n elif kind == CursorKind.STRUCT_DECL:\n node = StructDecl()\n elif kind == CursorKind.CLASS_DECL:\n node = ClassDecl()\n elif kind == CursorKind.FIELD_DECL:\n node = FieldDecl()\n elif kind == CursorKind.TYPEDEF_DECL:\n node = TypedefDecl()\n elif kind == CursorKind.CXX_ACCESS_SPEC_DECL:\n node = AccessSpecDecl()\n elif kind == CursorKind.UNEXPOSED_EXPR:\n node = None #UnexposedExpr()\n elif kind == CursorKind.ARRAY_SUBSCRIPT_EXPR:\n node = ArraySubscriptExpr()\n elif kind == CursorKind.PAREN_EXPR:\n node = ParenExpr()\n elif kind == CursorKind.CALL_EXPR:\n node = CallExpr()\n elif kind == CursorKind.GNU_NULL_EXPR:\n node = NullExpr()\n elif kind == CursorKind.CONSTRUCTOR:\n node = Constructor()\n elif kind == CursorKind.DESTRUCTOR:\n node = Destructor()\n elif kind == CursorKind.CXX_METHOD:\n node = Method()\n elif kind == CursorKind.MEMBER_REF_EXPR:\n node = MemberRefExpr()\n else:\n #raise TypeError(\"Invalid type '%s'.\" % str(kind))\n print(\"Invalid type '%s'.\" % str(kind))\n\n if node is not None:\n node.value = value\n node.level = level\n node.start = start\n node.end = end\n node.type = type\n\n return node\n\nclass SetFactory(object):\n def __init__(self):\n pass # Nada yet\n\n def set(self, setstr, name=''):\n pass\n\n def map(self, mapstr, name=''):\n pass\n\n def codegen(self, schedule):\n pass\n\nclass ISLFactory(SetFactory):\n _instance = None\n\n def __init__(self, ctx=None):\n super().__init__()\n if _islon and ctx is None:\n ctx = isl.DEFAULT_CONTEXT\n self._context = ctx\n\n @classmethod\n def instance(cls):\n if ISLFactory._instance is None:\n ISLFactory._instance = ISLFactory()\n return ISLFactory._instance\n\n @staticmethod\n def validate(expression, name=''):\n # Ensure name is present if applicable\n pos = expression.find('{')\n stmt = expression[pos+1:expression.find('[')].strip()\n if len(name) > 0 and len(stmt) < 1:\n stmt = name\n if pos == 0:\n lhs = expression[0]\n else:\n lhs = expression[0:pos]\n expression = lhs + stmt + expression[pos+1:]\n\n pos = expression.find(':')\n consts = []\n operands = re.split('&&|and', expression[pos + 1:].rstrip(';').rstrip('}').strip())\n for operand in operands:\n constraint = Var.from_expr(operand)\n if re.match('[A-Za-z_]+', constraint.lower):\n consts.append(constraint.lower)\n if re.match('[A-Za-z_]+', constraint.upper):\n consts.append(constraint.upper)\n if len(consts) > 0:\n expression = '[%s] -> %s' % (','.join(consts), expression)\n return expression\n\n def replaceUFs(self, code, symtable):\n for symname in sorted(symtable.keys()):\n symbol = symtable[symname]\n if len(symbol) > 0:\n lpos = code.find('%s(' % symname)\n while lpos >= 0:\n rpos = code.find(')', lpos + 1)\n args = VarHelper.replaceChars(code[lpos+len(symname)+1:rpos])\n newname = '%s_%s_' % (symname, args)\n if len(symbol.sets[0]) < 1:\n symbol.sets[0] = args # Set the iterator\n bpos = code.find(']')\n npos = code.find(newname)\n if bpos > 0 and (npos < 0 or npos > bpos):\n code = code[0:bpos] + ',' + newname + code[bpos:lpos] + newname + code[rpos + 1:]\n else:\n code = code[0:lpos] + newname + code[rpos+1:]\n lpos = code.find('%s(' % symname)\n return code\n\n def restoreUFs(self, code, functions, iters=()):\n for function in functions:\n funcname = function.name\n pattern = r\"%s_\" % funcname\n lpos = code.find(pattern)\n while lpos >= 0:\n mpos = lpos + len(funcname) + 1\n rpos = code.find('_', mpos + 1)\n lhs = code[0:lpos]\n rhs = code[rpos+1:]\n mid = code[mpos:rpos]\n arg = VarHelper.restoreChars(mid)\n items = re.split(r'[\\+\\-\\*\\/\\\\\\%\\&\\|]+', arg)\n for i in range(len(items)):\n item = items[i]\n if item in iters:\n arg = arg.replace(item, 'c%d' % iters.index(item))\n newname = '%s[%s]' % (funcname, arg)\n code = lhs + newname + rhs\n lpos = code.find(pattern)\n return code\n\n def set(self, domain, name=''):\n if domain[0] != '[':\n domain = ISLFactory.validate(domain, name)\n if '(' in domain:\n domain = self.replaceUFs(domain)\n\n try:\n return isl.Set(domain, self._context)\n except Exception as ex:\n print(\"ERROR: Set '%s' caused error '%s'\" % (domain, str(ex)))\n raise ex\n\n def map(self, mapstr, name=''):\n try:\n return isl.Map(mapstr, self._context)\n except Exception as ex:\n print(\"Map '%s' caused error '%s'\" % (mapstr, str(ex)))\n raise ex\n\n def codegen(self, sched):\n bld = isl.AstBuild.from_context(isl.Set(\"{:}\"))\n mp = isl.Map.from_domain_and_range(sched, sched)\n mp = isl.Map.identity(mp.get_space())\n mp = isl.Map.from_domain(sched)\n ast = bld.ast_from_schedule(mp)\n ptr = isl.Printer.to_str(isl.DEFAULT_CONTEXT)\n ptr = ptr.set_output_format(isl.format.C)\n ptr.flush()\n ptr = ptr.print_ast_node(ast)\n\n # Restore uninterpreted functions...\n iters = sched.split('[')[1].split(']')[0].replace(' ', '').replace('0', 'z').split(',')\n code = self.restoreUFs(ptr.get_str(), iters)\n return code\n\n\nclass IEGenFactory(SetFactory):\n _instance = None\n\n def __init__(self):\n super().__init__()\n self._iegen = IEGenLib()\n self._constants = {}\n\n @classmethod\n def instance(cls):\n if IEGenFactory._instance is None:\n IEGenFactory._instance = IEGenFactory()\n return IEGenFactory._instance\n\n @property\n def constants(self):\n return self._constants.values()\n\n @constants.setter\n def constants(self, constants):\n for const in constants:\n self._constants[const.name] = const\n\n def set(self, setstr, name=''):\n return self._iegen.add(setstr, name)\n\n def map(self, mapstr, name=''):\n return self._iegen.add(mapstr, name)\n\n def codegen(self, schedule, statements=()):\n # This is where Omega comes in!\n omcode = self._iegen.to_omega(schedule.name)\n astcode = ''\n\n omcalc = '/usr/local/bin/omegacalc'\n if os.path.isfile(omcalc):\n from tools.system import run\n (out, err) = run(omcalc, omcode)\n if len(err) > 0:\n raise RuntimeError('Omega+ codegen failed with error: \\'%s\\'' % err)\n lines = list(filter(lambda line: '>>>' not in line, out.split(\"\\n\")))\n astcode = \"\\n\".join(lines)\n else: # Hardcode for now...\n if schedule.name == 'spmv':\n astcode = \"\"\"for(t1 = 0; t1 <= N_R-1; t1++) {\n for(t2 = index(t1); t2 <= index1(t1)-1; t2++) {\n s0(t1,t2);\n }\n}\"\"\"\n elif schedule.name == 'Icomp':\n astcode = \"\"\"for(t1 = 0; t1 <= intFloor(N_R-1,8); t1++) {\n for(t2 = 8*t1; t2 <= min(N_R-1,8*t1+7); t2++) {\n for(t3 = index(t1,t2); t3 <= index1(t1,t2)-1; t3++) {\n t4=col(t1,t2,t3);\n s0(t1,t2,t3,t4);\n }\n }\n}\"\"\"\n elif schedule.name == 'Iexec':\n astcode = \"\"\"for(t1 = 0; t1 <= intFloor(N_R-1,8); t1++) {\n for(t2 = b_index(t1); t2 <= b_index1(t1)-1; t2++) {\n for(t3 = 8*t1; t3 <= min(N_R-1,8*t1+7); t3++) {\n for(t4 = 8*b_col(t1,t2); t4 <= min(8*b_col(t1,t2)+7,N_C-1); t4++) {\n s0(t1,t2,t3,t4);\n }\n }\n }\n}\"\"\"\n\n # Reformat the code to look like ISCC output\n return self.reformat(schedule, astcode, statements)\n\n def reformat(self, schedule, code, statements=()):\n # Replace Omega statements (s0,...,sN) with macro names (e.g., spmv)\n for i in range(len(statements)):\n code = code.replace('s%d' % i, statements[i])\n\n # Replace built-in functions (e.g., min, max, etc.)\n builtins = {'intFloor': 'floord'}\n for builtin in builtins:\n code = code.replace(builtin, builtins[builtin])\n\n # Update iterators\n ieset = self._iegen[schedule.name]\n iters = ieset.iterators\n\n itype = 'itype'\n for i in range(len(iters)):\n olditer = 't%d' % (i+1)\n newiter = iters[i]\n pos = code.find(olditer)\n if pos >= 0:\n lhs = code[0:pos]\n rhs = code[pos+len(olditer):]\n code = lhs + itype + ' ' + newiter + rhs\n code = re.sub(r\"\\b%s\\b\" % olditer, newiter, code)\n\n # Replace uninterpreted functions...\n code = self.restoreUFs(code)\n\n # Finally, replace constants...\n code = self.restoreConsts(code)\n\n return code\n\n def restoreConsts(self, code):\n for const in self.constants:\n if len(const.value) > 0:\n code = re.sub(r\"\\b%s\\b\" % const.value, const.name, code)\n if re.search(r\"^[0-9]+$\", const.value):\n valm1 = str(int(const.value)-1)\n code = re.sub(r\"\\b%s\\b\" % valm1, '(%s-1)' % const.name, code)\n return code\n\n def restoreUFs(self, code):\n ufuncs = self._iegen.ufuncs\n for ufunc in ufuncs:\n ufname = ufunc['name']\n\n pos = code.find('%s(' % ufname)\n if pos >= 0:\n pos += len(ufname) + 1\n args = code[pos:code.find(')', pos + 1)]\n oldcall = '%s(%s)' % (ufname, args)\n arglist = args.split(',')\n\n renamed = len(ufunc['oldname']) > 0\n newargs = ufunc['arity'] > ufunc['oldarity'] and len(arglist) != ufunc['oldarity']\n\n if renamed:\n newcall = ufunc['oldname']\n else:\n newcall = ufname\n newcall += '['\n\n if renamed or newargs:\n if newargs:\n for i in range(ufunc['oldarity']):\n oldarg = ufunc['oldargs'][-i]\n if oldarg in ufunc['args']:\n ndx = ufunc['args'].index(oldarg)\n try:\n arg = arglist[ndx]\n except Exception as ex:\n stop=1\n newcall += arg + ','\n newcall = newcall.rstrip(',')\n arglist = arglist[len(arglist) - ufunc['oldarity']:]\n\n if renamed:\n for i in range(len(arglist)):\n arglist[i] = ufunc['oldargs'][i].replace(ufunc['args'][i], arglist[i])\n newcall += ','.join(arglist)\n else:\n newcall += args\n newcall += ']'\n code = code.replace(oldcall, newcall)\n return code\n","sub_path":"lib/pdfg-ir/scripts/codegen/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":13995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"59781520","text":"# -*- coding: utf-8 -*-\n\nimport datetime\n\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy.sql import func\nfrom sqlalchemy.sql.expression import literal\nfrom sqlalchemy import Unicode\nfrom tornado.web import HTTPError\n\nimport geo\n\nfrom base import BaseHandler, authenticated\nfrom note import BaseNoteHandler\n\nfrom model import Address, Note, Org, Orgtag, Event, \\\n org_address, event_address\n\n\n\nclass BaseAddressHandler(BaseHandler):\n def _get_address(self, address_id_string, options=None):\n address_id = int(address_id_string)\n \n query = self.orm.query(Address).\\\n filter_by(address_id=address_id)\n\n if options:\n query = query \\\n .options(*options)\n\n if not self.current_user:\n query = query \\\n .filter_by(public=True)\n\n try:\n address = query.one()\n except NoResultFound:\n raise HTTPError(404, \"%d: No such address\" % address_id)\n\n return address\n\n def _get_arguments(self):\n is_json = self.content_type(\"application/json\")\n postal = self.get_argument(\"postal\", json=is_json)\n source = self.get_argument(\"source\", json=is_json)\n lookup = self.get_argument(\"lookup\", None, json=is_json)\n manual_longitude = self.get_argument_float(\"manual_longitude\", None, json=is_json)\n manual_latitude = self.get_argument_float(\"manual_latitude\", None, json=is_json)\n public = self.get_argument_public(\"public\", json=is_json)\n return (postal, source, lookup, manual_longitude, manual_latitude, public)\n\n\n\nclass AddressHandler(BaseAddressHandler):\n def get(self, address_id_string):\n note_search = self.get_argument(\"note_search\", None)\n note_order = self.get_argument_order(\"note_order\", None)\n\n public = bool(self.current_user)\n\n if self.deep_visible():\n options = (\n joinedload(\"org_list\"),\n joinedload(\"event_list\"),\n joinedload(\"note_list\"),\n )\n else:\n options = (\n joinedload(\"org_list_public\"),\n joinedload(\"event_list_public\"),\n joinedload(\"note_list_public\"),\n )\n\n address = self._get_address(address_id_string, options=options)\n\n if self.deep_visible():\n org_list=address.org_list\n event_list=address.event_list\n note_list=address.note_list\n else:\n org_list=address.org_list_public\n event_list=address.event_list_public\n note_list=address.note_list_public\n\n org_list = [org.obj(public=public) for org in org_list]\n event_list = [event.obj(public=public) for event in event_list]\n note_list = [note.obj(public=public) for note in note_list]\n\n obj = address.obj(\n public=public,\n org_obj_list=org_list,\n event_obj_list=event_list,\n note_obj_list=note_list,\n )\n\n if self.accept_type(\"json\"):\n self.write_json(obj)\n else:\n self.render(\n 'address.html',\n obj=obj,\n note_search=note_search,\n note_order=note_order,\n entity_list=\"entity_list\",\n )\n\n @authenticated\n def put(self, address_id_string):\n address = self._get_address(address_id_string)\n\n postal, source, lookup, manual_longitude, manual_latitude, \\\n public = \\\n BaseAddressHandler._get_arguments(self)\n\n if address.postal == postal and \\\n address.source == source and \\\n address.lookup == lookup and \\\n address.manual_longitude == manual_longitude and \\\n address.manual_latitude == manual_latitude and \\\n address.public == public:\n self.redirect(self.next or address.url)\n return\n \n address.postal = postal\n address.source = source\n address.lookup = lookup\n address.manual_longitude = manual_longitude\n address.manual_latitude = manual_latitude\n address.public = public\n address.moderation_user = self.current_user\n\n address.geocode()\n self.orm.commit()\n self.redirect(self.next or address.url)\n\n\n\nclass AddressListHandler(BaseAddressHandler):\n def get(self):\n key = \"address:%s\" % [\"public\", \"all\"][self.deep_visible()]\n\n value = self.cache.get(key)\n if value:\n self.write(value)\n return\n\n address_list = self.orm.query(\n Address.address_id,\n func.coalesce(Address.latitude, Address.manual_latitude),\n func.coalesce(Address.longitude, Address.manual_longitude),\n ).filter(func.coalesce(\n Address.latitude, Address.manual_latitude,\n Address.longitude, Address.manual_longitude\n ) != None);\n\n org_list = address_list \\\n .join((org_address,\n Address.address_id == org_address.c.address_id)) \\\n .join((Org, Org.org_id == org_address.c.org_id)) \\\n .add_columns(Org.org_id, Org.name, literal(\"org\"))\n\n event_list = address_list \\\n .join((event_address,\n Address.address_id == event_address.c.address_id)) \\\n .join((Event, Event.event_id == event_address.c.event_id)) \\\n .add_columns(Event.event_id, Event.name, literal(\"event\"))\n\n today = datetime.datetime.now().date()\n event_list = event_list.filter(Event.start_date >= today)\n\n if not self.deep_visible():\n org_list = org_list.filter(Org.public==True)\n event_list = event_list.filter(Event.public==True)\n \n address_list = org_list.union(event_list)\n\n obj_list = []\n for result in address_list.all():\n obj_list.append(dict(zip(\n [\"address_id\", \"latitude\", \"longitude\", \"entity_id\", \"name\", \"entity\"],\n result)))\n\n value = self.dump_json(obj_list)\n self.cache.set(key, value)\n\n self.write(value)\n\n\n\nclass AddressLookupHandler(BaseAddressHandler):\n def get(self):\n is_json = self.content_type(\"application/json\")\n postal = self.get_argument(\"postal\", json=is_json)\n lookup = self.get_argument(\"lookup\", None, json=is_json)\n\n address = Address(postal, None, lookup)\n address.geocode()\n\n self.write_json(address.obj(public=bool(self.current_user)))\n\n\n\nclass AddressNoteListHandler(BaseAddressHandler, BaseNoteHandler):\n @authenticated\n def post(self, address_id_string):\n address = self._get_address(address_id_string)\n\n text, source, public = BaseNoteHandler._get_arguments(self)\n\n note = Note(text, source,\n moderation_user=self.current_user,\n public=public,\n )\n address.note_list.append(note)\n self.orm.commit()\n self.redirect(self.next or address.url)\n\n def get(self, address_id_string):\n address = self._get_address(address_id_string)\n self.next = address.url\n self.render(\n 'note.html',\n entity=address\n )\n\n\n\nclass AddressNoteHandler(BaseAddressHandler, BaseNoteHandler):\n @authenticated\n def put(self, address_id_string, note_id_string):\n address = self._get_address(address_id_string)\n note = self._get_note(note_id_string)\n if note not in address.note_list:\n address.note_list.append(note)\n self.orm.commit()\n self.redirect(self.next or address.url)\n\n @authenticated\n def delete(self, address_id_string, note_id_string):\n address = self._get_address(address_id_string)\n note = self._get_note(note_id_string)\n if note in address.note_list:\n address.note_list.remove(note)\n self.orm.commit()\n self.redirect(self.next or address.url)\n","sub_path":"handle/address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":8118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"495740032","text":"from __future__ import unicode_literals\n\nimport re\n\nfrom django import template\nfrom django.contrib.auth.models import User\nfrom django.template.context import RequestContext\nfrom django.utils.safestring import mark_safe\nfrom djblets.util.templatetags.djblets_js import json_dumps\n\nfrom reviewboard import get_version_string\nfrom reviewboard.admin.forms.change_form import ChangeFormFieldset\nfrom reviewboard.hostingsvcs.models import HostingServiceAccount\nfrom reviewboard.notifications.models import WebHookTarget\nfrom reviewboard.oauth.models import Application\nfrom reviewboard.reviews.models import DefaultReviewer, Group\nfrom reviewboard.scmtools.models import Repository\nfrom reviewboard.site.urlresolvers import local_site_reverse\n\n\nregister = template.Library()\n\n\n@register.inclusion_tag('admin/subnav_item.html', takes_context=True)\ndef admin_subnav(context, url_name, name, icon=\"\"):\n \"\"\"Return an
  • containing a link to the desired setting tab.\"\"\"\n request = context.get('request')\n url = local_site_reverse(url_name, request=request)\n\n return RequestContext(\n request, {\n 'url': url,\n 'name': name,\n 'current': request is not None and url == request.path,\n 'icon': icon,\n })\n\n\n@register.inclusion_tag('admin/sidebar.html', takes_context=True)\ndef admin_sidebar(context):\n \"\"\"Render the admin sidebar.\n\n This includes the configuration links and setting indicators.\n \"\"\"\n request = context.get('request')\n\n request_context = {\n 'count_users': User.objects.count(),\n 'count_review_groups': Group.objects.count(),\n 'count_default_reviewers': DefaultReviewer.objects.count(),\n 'count_oauth_applications': Application.objects.count(),\n 'count_repository': Repository.objects.accessible(\n request.user, visible_only=False).count(),\n 'count_webhooks': WebHookTarget.objects.count(),\n 'count_hosting_accounts': HostingServiceAccount.objects.count(),\n 'version': get_version_string(),\n }\n\n return RequestContext(request, request_context)\n\n\n@register.simple_tag()\ndef process_result_headers(result_headers):\n \"\"\"Process a Django ChangeList's result headers to aid in rendering.\n\n This will provide better information for our template so that we can\n more effectively render a datagrid.\n\n Args:\n result_headers (list of dict):\n The result headers to modify.\n \"\"\"\n class_attrib_re = re.compile(r'\\s*class=\"([^\"]+)\"')\n\n for header in result_headers:\n m = class_attrib_re.match(header['class_attrib'])\n\n if m:\n class_value = m.groups(1)[0]\n else:\n class_value = ''\n\n if class_value != 'action-checkbox-column':\n class_value = 'has-label %s' % class_value\n\n header['class_attrib'] = \\\n mark_safe(' class=\"datagrid-header %s\"' % class_value)\n\n if header['sortable'] and header['sort_priority'] > 0:\n if header['ascending']:\n sort_order = 'asc'\n else:\n sort_order = 'desc'\n\n if header['sort_priority'] == 1:\n sort_priority = 'primary'\n else:\n sort_priority = 'secondary'\n\n header['sort_icon'] = 'datagrid-icon-sort-%s-%s' % (\n sort_order, sort_priority)\n\n return ''\n\n\n@register.simple_tag(takes_context=True)\ndef changelist_js_model_attrs(context):\n \"\"\"Return serialized JSON attributes for the RB.Admin.ChangeListPage model.\n\n These will all be passed to the :js:class:`RB.Admin.ChangeListPage`\n constructor.\n\n Args:\n context (django.template.Context):\n The context for the page.\n\n Returns:\n django.utils.safestring.SafeText:\n A string containing the JSON attributes for the page model.\n \"\"\"\n action_form = context.get('action_form')\n cl = context['cl']\n\n model_data = {\n 'modelName': cl.opts.verbose_name,\n 'modelNamePlural': cl.opts.verbose_name_plural,\n }\n\n if action_form is not None:\n action_choices = action_form.fields['action'].choices\n\n model_data['actions'] = [\n {\n 'id': action_id,\n 'label': action_label,\n }\n for action_id, action_label in action_choices\n if action_id\n ]\n\n return json_dumps(model_data)\n\n\n@register.filter\ndef change_form_fieldsets(admin_form):\n \"\"\"Iterate through all fieldsets in an administration change form.\n\n This will provide each field as a\n :py:class:`~reviewboard.admin.forms.change_form.ChangeFormFieldset`.\n\n Args:\n admin_form (django.contrib.admin.helpers.AdminForm):\n The administration form.\n\n Yields:\n reviewboard.admin.forms.change_form.ChangeFormFieldset:\n Each fieldset in the form.\n \"\"\"\n form = admin_form.form\n readonly_fields = admin_form.readonly_fields\n model_admin = admin_form.model_admin\n\n for name, options in admin_form.fieldsets:\n yield ChangeFormFieldset(form=form,\n name=name,\n readonly_fields=readonly_fields,\n model_admin=model_admin,\n **options)\n","sub_path":"reviewboard/admin/templatetags/rbadmintags.py","file_name":"rbadmintags.py","file_ext":"py","file_size_in_byte":5300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"588903060","text":"import models.form_models.analytic_form_model as a_f_m\nimport models.form_models.table_data_model as t_d_m\nimport datetime\nimport modules.chart_builders.static_chart_builder as s_c_b\nimport models.processing_settings.processing_settings as p_s\n\n# convert project log\ndef convert_project_analysis_log(log):\n model = a_f_m.AForm()\n model.add_row(\"Лог выполнения проекта\")\n row = model.get_last_row()\n table_data = t_d_m.TableData()\n table_data.init_headers([\n t_d_m.HeaderModel('Дата', 'date'),\n t_d_m.HeaderModel('Сообщение', 'message')\n ])\n table_data.init_model(log)\n row.add_cell(table_data)\n return model\n pass\n\n\n# convert project document\ndef convert_project_documents(document):\n try:\n if (document.defined == False):\n return None\n\n model = a_f_m.AForm()\n model.add_row(\"Документ - \" + document.original_file_name)\n model.init_document_info(document.type, document.column_count, document.original_file_name,\n document.documentPeriod, document.companyName, document.documentNumber,\n document.accountNumber, document.startPeriod, document.endPeriod, document.a_period,document.version)\n row = model.get_last_row()\n\n table_data = t_d_m.TableData()\n headers = []\n for h in document.current_header:\n if (h.name==\"AccountName\"):\n headers.append(t_d_m.HeaderModel(\"Тип счета\", h.name))\n else:\n headers.append(t_d_m.HeaderModel(h.pattern, h.name))\n\n # additional headers\n headers.append(t_d_m.HeaderModel(\"Тип данных\", \"TypeName\"))\n table_data.init_headers(headers)\n\n rows = []\n\n if (str(document.type.value).startswith('1')):\n for r in document.transactionDocument.transactions:\n r.analyticsCredit = '\\n'.join(str(e) for e in r.analyticsCredit)\n r.analyticsDebet = '\\n'.join(str(e) for e in r.analyticsDebet)\n r.document = '\\n'.join(str(e) for e in r.document)\n date_str = \"\"\n try:\n dt_obj = datetime.datetime(r.period.year, r.period.month, r.period.day)\n date_str = dt_obj.strftime('%d.%m.%Y')\n r.period = date_str\n except Exception as e:\n continue\n # tt =datetime.datetime.strptime(str(r.period), '%d.%m.%Y')\n if (r.valueCredit == \"\"):\n r.valueCredit = 0\n if (r.valueDebet == \"\"):\n r.valueDebet = 0\n\n r.valueDebet = '{:0,.2f}'.format(round(r.valueDebet, 2))\n r.valueCredit = '{:0,.2f}'.format(round(r.valueCredit, 2))\n\n r.style = \"text-xs-center\"\n # r.type_name = r.type_name\n rows.append(r)\n\n elif (str(document.type.value).startswith('2')):\n for r in document.osvDocument.rows:\n r.style = \"text-xs-center\"\n r.startPeriodBalanceDebet = '{:0,.2f}'.format(round(r.startPeriodBalanceDebet, 2))\n r.startPeriodBalanceCredit = '{:0,.2f}'.format(round(r.startPeriodBalanceCredit, 2))\n r.endPeriodBalanceDebet = '{:0,.2f}'.format(round(r.endPeriodBalanceDebet, 2))\n r.endPeriodBalanceCredit = '{:0,.2f}'.format(round(r.endPeriodBalanceCredit, 2))\n r.periodTransactionsDebet = '{:0,.2f}'.format(round(r.periodTransactionsDebet, 2))\n r.periodTransactionsCredit = '{:0,.2f}'.format(round(r.periodTransactionsCredit, 2))\n # r.type_name = r.type_name\n rows.append(r)\n a = 1\n\n table_data.init_model(rows)\n row.add_cell(table_data)\n return model\n\n pass\n except Exception as e:\n print(\"Error converting to document \" + str(e))\n return None\n\n\n# Общее построение\ndef init_table(row, headers, report):\n table_data = t_d_m.TableData()\n header_models = []\n for h in headers:\n header_models.append(t_d_m.HeaderModel(h[0], h[1]))\n\n table_data.init_headers(header_models)\n\n table_data.init_model(report.details)\n row.add_cell(table_data)\n\n pass\n\n\ndef init_chart(row, column_names, c_values, report, isJoin=False):\n for c in c_values:\n values = []\n\n for detail in report.details:\n f_val = (getattr(detail, c[0]))\n s_val = (getattr(detail, c[1]))\n if (s_val<0):\n continue\n v = [f_val, s_val]\n values.append(v)\n\n if (isJoin == True):\n u_list = []\n t_list = []\n for v in values:\n b = v[0] in u_list\n if (b == False):\n u_list.append(v[0])\n\n for u_l in u_list:\n tt = sum([f[1] for f in values if f[0] == u_l])\n t_list.append([u_l, tt])\n values = t_list\n\n s_c_b.generate_balance_pie_charts(row, 'PieChart',\n c[2],\n 300,\n c[2], \"\", False, column_names, values)\n\n ##############################\n pass\n\n\n#######################\n\n\n\ndef convert_general_details(report):\n model = a_f_m.AForm()\n model.add_row(\"Графики\")\n row = model.get_last_row()\n\n # генерируем круговые диаграммы\n column_names = [['string', 'Наименование'], ['number', 'Показатели']]\n c_values = [['name', 'amount', 'Сумма задолженности (из графика исключены отрицательные значения)']\n # , ['name', 'percent_total_debt', '% от общей задолженности']\n ]\n # ,['name','percent_total','% от активов']\n init_chart(row, column_names, c_values, report)\n\n # генерируем круговые диаграммы\n column_names = [['string', 'Наименование'], ['number', 'Показатели']]\n c_values = [['status', 'amount', 'Статус']]\n\n init_chart(row, column_names, c_values, report, True)\n\n model.add_row(\"Данные\")\n row = model.get_last_row()\n\n table_headers = [\n ['Наименование', 'name'],\n ['Сумма задолженности', 'amount'],\n [\"% от общей задолженности\", \"percent_total_debt\"],\n [\"% от активов\", \"percent_total\"],\n [\"Статус\", \"status\"]\n ]\n init_table(row, table_headers, report)\n return model\n pass\n\n\ndef convert_receivable_details(report):\n model = a_f_m.AForm()\n model.add_row(\"Графики\")\n row = model.get_last_row()\n\n # генерируем круговые диаграммы\n column_names = [['string', 'Наименование'], ['number', 'Показатели']]\n c_values = [['name', 'amount', 'Сумма задолженности (из графика исключены отрицательные значения)']\n # , ['name', 'percent_total_debt', '% от общей задолженности']\n ]\n # ['name', 'percent_total', '% от активов']\n init_chart(row, column_names, c_values, report)\n\n # model.add_row(\"Графики периодов\")\n # row = model.get_last_row()\n\n # генерируем круговые диаграммы\n column_names = [['string', 'Наименование'], ['number', 'Показатели']]\n c_values = [['formed', 'amount', 'Период']]\n\n init_chart(row, column_names, c_values, report, True)\n\n model.add_row(\"Данные\")\n row = model.get_last_row()\n\n table_headers = [\n ['Наименование', 'name'],\n ['Сумма задолженности', 'amount'],\n [\"% от общей задолженности\", \"percent_total_debt\"],\n [\"% от активов\", \"percent_total\"],\n [\"Образована\", \"formed\"]\n ]\n init_table(row, table_headers, report)\n return model\n pass\n\n\ndef convert_product_details(report):\n model = a_f_m.AForm()\n model.add_row(\"Графики\")\n row = model.get_last_row()\n\n column_names = [['string', 'Наименование'], ['number', 'Показатели']]\n c_values = [['name', 'amount', 'Сумма задолженности (из графика исключены отрицательные значения)']\n # , ['name', 'percent', '% от задолженности']\n ]\n # ['name', 'turnover', 'Оборачиваемость']\n\n init_chart(row, column_names, c_values, report)\n\n # model.add_row(\"Графики ликвидности\")\n # row = model.get_last_row()\n\n column_names = [['string', 'Наименование'], ['number', 'Показатели']]\n c_values = [['type_name', 'amount', 'Ликвидность']]\n\n init_chart(row, column_names, c_values, report, True)\n\n model.add_row(\"Данные\")\n row = model.get_last_row()\n\n table_headers = [\n ['Наименование', 'name'],\n ['Сумма задолженности', 'amount'],\n [\"Процент задолженности\", \"percent\"],\n [\"Ликвидность\", 'type_name'],\n [\"Оборачиваемость\", \"turnover\"],\n [\"Общая оборачиваемость\", \"total_turnover\"]\n\n ]\n init_table(row, table_headers, report)\n return model\n pass\n\n\ndef convert_total_product_details(report):\n pass\n\n\ndef convert_balance_details(report):\n try:\n t = 0\n if (len(report.details) > 0):\n r_type = (type(report.details[0]))\n import models\n r_d = r_type is models.analysis_models.balance_details_model.ReceivablesDetail\n g_d = r_type is models.analysis_models.balance_details_model.GeneralDetail\n p_d = r_type is models.analysis_models.balance_details_model.ProductDetail\n t_p_d = r_type is models.analysis_models.balance_details_model.TotalProductDetail\n\n if (r_d):\n return convert_receivable_details(report)\n\n if (g_d):\n return convert_general_details(report)\n\n if (p_d):\n return convert_product_details(report)\n\n if (t_p_d):\n return convert_total_product_details(report)\n return None\n except Exception as e:\n return None\n","sub_path":"modules/converters/form_converter.py","file_name":"form_converter.py","file_ext":"py","file_size_in_byte":10661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"297768868","text":"import os\nimport json\nimport sys\nimport time\nimport uuid\nimport collections\n\nfrom nltk.corpus import stopwords\n\nCOMMON_WORDS = set(stopwords.words('english'))\nDATA_DIRECTORY = os.path.join(os.path.dirname(__file__), 'data')\nOUTPUT_DIRECTORY = os.path.join(os.path.dirname(__file__), 'output')\n\ndef save_file(filename, data):\n random_str = uuid.uuid4().hex\n outfile = '%s_%s.txt'%(filename.split('.')[0], random_str)\n with open(os.path.join(OUTPUT_DIRECTORY, outfile), 'w') as outfile:\n outfile.write(data)\n\ndef get_word_counts(filename):\n proc = os.getpid()\n print('Processed %s with process id: %s'%(filename, proc))\n wordcount = collections.Counter()\n # get counts\n with open(os.path.join(DATA_DIRECTORY, filename), 'r', encoding='utf8') as f:\n for line in f:\n wordcount.update(line.split())\n for word in COMMON_WORDS:\n del wordcount[word]\n # save file\n save_file(filename, json.dumps(dict(wordcount.most_common(20))))\n # simulate long-running task\n time.sleep(2)\n\nif __name__ == '__main__':\n get_word_counts(sys.argv[1])","sub_path":"Asynchronous-Task-Queue/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"532911024","text":"from moviepy.editor import *\nimport cv2\nimport numpy as np \nfrom moviepy.audio.fx import all\nfrom moviepy.video.fx.all import crop\nfrom matplotlib import pyplot as plt\nimport os\nimport glob\nfrom natsort import natsorted\n\nframe_dir = './frames/'\nleft_frame_dir = './leftframes/'\nright_frame_dir = './rightframes/'\nd3eye_frame_dir = './3deyed/'\n\n\ndef get_all_file(path):\n for root, dirs, files in os.walk(path): \n return files \n\n# 修改图片尺寸\ndef resize_image(image, width,height):\n dim = (width, height)\n resize_image = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)\n return resize_image\n\ndef jpg2png(img,op):\n r_channel, g_channel, b_channel = cv2.split(img) \n\n img_RGBA = np.insert(\n img,\n 3, #position in the pixel value [ r, g, b, a <-index [3] ]\n op, # or 1 if you're going for a float data type as you want the alpha to be fully white otherwise the entire image will be transparent.\n axis=2, #this is the depth where you are inserting this alpha channel into\n )\n# print(cv2.split(img))\n return img_RGBA\n# scp zhz1.mp4 ubuntu@139.155.179.142:/home/ubuntu/superbrain/bletest/public\n# width2d,height2d = 1954,1080\n\nif __name__ == '__main__':\n frame_result = './frames/'\n\n filename = 'pngMask.png'\n width2d,height2d = 1958,1080\n\n frame_dir = './frames/'\n d3eye_dir = './3deyed/'\n sbs_dir = './sbs/'\n gif_name = 'frame_'\n fps = 30\n\n #read all files from directory\n files = get_all_file(frame_dir)\n counter = 0\n\n width3d,height3d = int(width2d/2),height2d\n maskname = 'pngMask.png'\n mask = cv2.imread(maskname,cv2.IMREAD_UNCHANGED)\n\n for filename in files:\n print('filename is :',frame_dir + filename)\n img_RGBA = cv2.imread(frame_dir + filename,cv2.IMREAD_UNCHANGED)\n height, width, channels = img_RGBA.shape\n # print('filename shape is :',img_RGBA.shape)\n\n img_3DRGBA = np.zeros((height3d, width3d, 4), dtype=np.uint8)\n img_3DRGBA[:,0:width3d-1] = img_RGBA[:,0:width3d-1] #img1 \n # img2[:,0:newWidth-1] = img_RGBA[:,lfoffset:newWidth+lfoffset-1] #img2 is offset of lmg1\n # 读取mask,与image merge\n img_RGBA = resize_image(img_3DRGBA,width2d,height2d)\n # img_RGBA = cv2.addWeighted(img_RGBA, 1, mask, 1, 0)\n img_RGBA = cv2.bitwise_or(img_RGBA, mask, mask = None)\n\n offset = width \n lfoffset = 8 # 左右图的offset \n newWidth = int(offset-lfoffset) # new width of image\n\n img1 = np.zeros((height, width, 4), dtype=np.uint8)\n img2 = np.zeros((height, width, 4), dtype=np.uint8)\n\n img1[:,0:newWidth-1] = img_RGBA[:,0:newWidth-1] #img1 \n img2[:,0:newWidth-1] = img_RGBA[:,lfoffset:newWidth+lfoffset-1] #img2 is offset of lmg1\n\n # img3d = img1+img2\n img3d = cv2.bitwise_and(img1, img2, mask = None)\n cv2.imwrite(sbs_dir + filename.replace(\"jpg\",\"png\") , img3d)\n # print('preprocess done! ', filename.replace(\"jpg\",\"png\"))","sub_path":"dilireba/makevideo-3d.py","file_name":"makevideo-3d.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"240003276","text":"import json\n\nimport pytest\n\nfrom lumigo_tracer.parsers.http_data_classes import HttpRequest\nfrom lumigo_tracer.parsers.parser import (\n ServerlessAWSParser,\n Parser,\n get_parser,\n ApiGatewayV2Parser,\n DynamoParser,\n)\n\n\ndef test_serverless_aws_parser_fallback_doesnt_change():\n url = \"https://kvpuorrsqb.execute-api.us-west-2.amazonaws.com\"\n headers = {\"nothing\": \"relevant\"}\n serverless_parser = ServerlessAWSParser().parse_response(url, 200, headers=headers, body=b\"\")\n root_parser = Parser().parse_response(url, 200, headers=headers, body=b\"\")\n serverless_parser.pop(\"ended\")\n root_parser.pop(\"ended\")\n assert serverless_parser == root_parser\n\n\ndef test_get_parser_check_headers():\n url = \"api.rti.dev.toyota.com\"\n headers = {\"x-amzn-requestid\": \"1234\"}\n assert get_parser(url, headers) == ServerlessAWSParser\n\n\ndef test_get_parser_apigw():\n url = \"https://ne3kjv28fh.execute-api.us-west-2.amazonaws.com/doriaviram\"\n assert get_parser(url, {}) == ApiGatewayV2Parser\n\n\ndef test_apigw_parse_response():\n parser = ApiGatewayV2Parser()\n headers = {\"apigw-requestid\": \"LY_66j0dPHcESCg=\"}\n\n result = parser.parse_response(\"dummy\", 200, headers, body=b\"\")\n\n assert result[\"info\"] == {\n \"messageId\": \"LY_66j0dPHcESCg=\",\n \"httpInfo\": {\n \"host\": \"dummy\",\n \"response\": {\n \"headers\": '{\"apigw-requestid\": \"LY_66j0dPHcESCg=\"}',\n \"body\": \"\",\n \"statusCode\": 200,\n },\n },\n }\n\n\ndef test_apigw_parse_response_with_aws_request_id():\n parser = ApiGatewayV2Parser()\n headers = {\n \"apigw-requestid\": \"LY_66j0dPHcESCg=\",\n \"x-amzn-requestid\": \"x-amzn-requestid_LY_66j0dPHcESCg=\",\n }\n\n result = parser.parse_response(\"dummy\", 200, headers, body=b\"\")\n\n assert result[\"info\"] == {\n \"messageId\": \"x-amzn-requestid_LY_66j0dPHcESCg=\",\n \"httpInfo\": {\n \"host\": \"dummy\",\n \"response\": {\n \"headers\": '{\"apigw-requestid\": \"LY_66j0dPHcESCg=\", \"x-amzn-requestid\": \"x-amzn-requestid_LY_66j0dPHcESCg=\"}',\n \"body\": \"\",\n \"statusCode\": 200,\n },\n },\n }\n\n\n@pytest.mark.parametrize(\n \"method, body, message_id\",\n [\n (\"GetItem\", {\"TableName\": \"resourceName\"}, None),\n (\n \"PutItem\",\n {\"TableName\": \"resourceName\", \"Item\": {\"key\": {\"S\": \"value\"}}},\n \"1ad3dccc8064a706957c2c06ce3796bb\",\n ),\n (\n \"DeleteItem\",\n {\"TableName\": \"resourceName\", \"Key\": {\"key\": {\"S\": \"value\"}}},\n \"1ad3dccc8064a706957c2c06ce3796bb\",\n ),\n (\n \"UpdateItem\",\n {\"TableName\": \"resourceName\", \"Key\": {\"key\": {\"S\": \"value\"}}},\n \"1ad3dccc8064a706957c2c06ce3796bb\",\n ),\n (\n \"BatchWriteItem\",\n {\"RequestItems\": {\"resourceName\": [{\"PutRequest\": {\"Item\": {\"key\": {\"S\": \"value\"}}}}]}},\n \"1ad3dccc8064a706957c2c06ce3796bb\",\n ),\n (\n \"BatchWriteItem\",\n {\n \"RequestItems\": {\n \"resourceName\": [{\"DeleteRequest\": {\"Key\": {\"key\": {\"S\": \"value\"}}}}]\n }\n },\n \"1ad3dccc8064a706957c2c06ce3796bb\",\n ),\n ],\n)\ndef test_dynamodb_parser_happy_flow(method, body, message_id):\n parser = DynamoParser()\n params = HttpRequest(\n host=\"\",\n method=\"POST\",\n uri=\"\",\n headers={\"x-amz-target\": f\"DynamoDB_20120810.{method}\"},\n body=json.dumps(body),\n )\n response = parser.parse_request(params)\n assert response[\"info\"][\"resourceName\"] == \"resourceName\"\n assert response[\"info\"][\"dynamodbMethod\"] == method\n assert response[\"info\"][\"messageId\"] == message_id\n\n\ndef test_dynamodb_parser_sad_flow():\n parser = DynamoParser()\n params = HttpRequest(\n host=\"\",\n method=\"POST\",\n uri=\"\",\n headers={\"x-amz-target\": \"DynamoDB_20120810.GetItem\"},\n body=\"not a json\",\n )\n response = parser.parse_request(params)\n assert response[\"info\"][\"resourceName\"] is None\n assert response[\"info\"][\"dynamodbMethod\"] == \"GetItem\"\n assert response[\"info\"][\"messageId\"] is None\n\n\ndef test_dynamodb_parser_sad_flow_unsupported_query():\n parser = DynamoParser()\n params = HttpRequest(\n host=\"\",\n method=\"POST\",\n uri=\"\",\n headers={\"x-amz-target\": \"DynamoDB_20120810.BatchWriteItem\"},\n body='{\"RequestItems\": {}}',\n )\n with pytest.raises(Exception):\n parser.parse_request(params)\n","sub_path":"src/test/unit/parsers/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"331292822","text":"import json\nimport pickle\nfrom pyjarowinkler import distance\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import PorterStemmer, WordNetLemmatizer #词型还原\nfrom nltk.corpus import stopwords\nimport re\nimport string\n\n\nclass TextToVec:\n def __init__(self):\n super().__init__()\n self.my_stopwords = set(stopwords.words('english'))\n self.num_pattern = re.compile(r'\\d+')\n self.remove_punctuation = str.maketrans(string.punctuation, ' '*len(string.punctuation))\n self.stemmer = PorterStemmer()\n self.lemmatizer = WordNetLemmatizer()\n self.word2vec_dict = load_pickle('../data/glove.word2vec.dict.pkl')\n\n def clean_text(self, str_info):\n str_lower = str_info.lower().strip()\n result = str_lower.translate(self.remove_punctuation)\n result = self.num_pattern.sub('', result)\n tokens = word_tokenize(result)\n result = [word for word in tokens if word not in self.my_stopwords]\n result = [self.lemmatizer.lemmatize(word) for word in result]\n # print(result)\n return result\n\n def get_vec(self, str_info):\n result = self.clean_text(str_info)\n data = []\n for word in result:\n data.append(self.word2vec_dict.get(word, np.zeros(300)).tolist())\n if len(data) == 0:\n data = np.zeros(300)\n else:\n data = np.mean(np.array(data), axis=0)\n return data\n\n\ndef load_json(path):\n with open(path, 'r') as f:\n return json.load(f)\n\n\ndef load_pickle(path):\n with open(path, 'rb') as f:\n return pickle.load(f)\n\n\ndef save_json(data, path):\n with open(path, 'w') as f:\n json.dump(data, f, indent=4)\n\n\ndef save_pickle(data, path):\n with open(path, 'wb') as f:\n pickle.dump(data, f)\n\n\ndef clean_name(name):\n if name is None:\n return \"\"\n x = [k.strip() for k in name.lower().strip().replace(\".\", \"\").replace(\"-\", \" \").replace(\"_\", ' ').split()]\n full_name = '_'.join(x)\n return full_name\n\ndef get_name_index(target_name, name_list):\n scores = []\n for name in name_list:\n if name == '':\n scores.append(0)\n continue\n score = distance.get_jaro_distance(target_name, name, winkler=True, scaling=0.1)\n target_component = set(target_name.split('_'))\n name_component = set(name.split('_'))\n add_score = len(target_component & name_component) / len(target_component | name_component)\n score = score + add_score\n scores.append(score)\n # print('-'*50)\n # index = np.argsort(scores)\n # print(target_name)\n # print(np.array(name_list)[index][-5:])\n return np.argmax(scores)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"232410902","text":"# Copyright (c) Microsoft. All rights reserved.\n# Licensed under the MIT license. See LICENSE file in the project root for\n# full license information.\nimport os\nfrom opencensus.ext.azure import metrics_exporter\nfrom opencensus.stats import aggregation as aggregation_module\nfrom opencensus.stats import measure as measure_module\nfrom opencensus.stats import stats as stats_module\nfrom opencensus.stats import view as view_module\nfrom opencensus.tags import tag_map as tag_map_module\nimport azure_monitor\n\nstats = stats_module.stats\nview_manager = stats.view_manager\nstats_recorder = stats.stats_recorder\n\napp_insights_connection_string = os.environ[\"THIEF_APP_INSIGHTS_CONNECTION_STRING\"]\n\n\nclass MetricsReporter(object):\n def __init__(self):\n self.exporter = metrics_exporter.new_metrics_exporter(\n connection_string=app_insights_connection_string\n )\n self.exporter.add_telemetry_processor(azure_monitor.telemetry_processor_callback)\n view_manager.register_exporter(self.exporter)\n self.mmap = stats_recorder.new_measurement_map()\n self.tmap = tag_map_module.TagMap()\n\n def add_integer_measurement(self, python_name, metric_name, description, units):\n new_measure = measure_module.MeasureInt(metric_name, description, units)\n new_view = view_module.View(\n metric_name, description, [], new_measure, aggregation_module.LastValueAggregation()\n )\n view_manager.register_view(new_view)\n\n def new_setter(value):\n self.mmap.measure_int_put(new_measure, value)\n\n setattr(self, \"set_{}\".format(python_name), new_setter)\n\n def add_float_measurement(self, python_name, metric_name, description, units):\n new_measure = measure_module.MeasureFloat(metric_name, description, units)\n new_view = view_module.View(\n metric_name, description, [], new_measure, aggregation_module.LastValueAggregation()\n )\n view_manager.register_view(new_view)\n\n def new_setter(value):\n self.mmap.measure_float_put(new_measure, value)\n\n setattr(self, \"set_{}\".format(python_name), new_setter)\n\n def record(self):\n self.mmap.record(self.tmap)\n","sub_path":"python/device/azure_monitor_metrics.py","file_name":"azure_monitor_metrics.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"434916545","text":"def get_sheets():\n return databaker.load('resource/table-a02.xls', table_name='seasonally adjusted')\n\ndef per_sheet(sheet):\n sheet.filter(\"MGSL\").assert_one().shift(DOWN).fill(RIGHT).fill(DOWN).filter(is_number).is_obs()\n\n gender = sheet.col('A').one_of(['Male', 'Female', 'All Persons'])\n gender.is_header('gender', UP)\n\n sheet.col('A').fill(DOWN).regex(\"...-... (?:19|20)\\d\\d\").is_header('times', LEFT, strict=True)\n\n sheet.regex(\"All aged .*\").is_header('ages', UP)\n\n indicator = sheet.filter(\"Total economically active\").fill(LEFT).fill(RIGHT)\n indicator.is_header('indicator', UP, strict=True)\n\n \"uk\".is_header('geography')\n\n","sub_path":"example-recipes/language-variants/004-is-header.py","file_name":"004-is-header.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"326835702","text":"from bson import ObjectId\nfrom flask import Blueprint, request, jsonify\nfrom setting import MDB, RET\n\nuser_bp = Blueprint('user_bp', __name__)\n\n# 注册\n@user_bp.route('/reg', methods=['POST'])\ndef reg():\n user_info = request.form.to_dict()\n user_info['avatar'] = 'baba.jpg' if user_info.get(\"gender\") == \"2\" else \"mama.jpg\"\n user_info['bind_toys'] = []\n user_info['friend_list'] = []\n print(user_info)\n MDB.Users.insert_one(user_info)\n return jsonify(RET)\n\n# 登录\n@user_bp.route('/login', methods=['POST'])\ndef login():\n result = MDB.Users.find_one({'username': request.form.get('username')})\n if result:\n if request.form.get('password') == result.get('password'):\n result['_id'] = str(result.get('_id'))\n # 此处我用了nickname而非username,感觉用username有点突兀。\n RET['MSG'] = f\"欢迎{result.get('nickname')}\"\n RET['DATA'] = result\n return jsonify(RET)\n else:\n RET['CODE'] = 1\n RET['MSG'] = '用户名或者密码错误!请重新输入!'\n return jsonify(RET)\n else:\n RET['CODE'] = 1\n RET['MSG'] = '用户名或者密码错误!请重新输入!'\n return jsonify(RET)\n\n# 自动登录\n@user_bp.route('/auto_login', methods=['POST'])\ndef auto_login():\n user_id = request.form.get('_id')\n user_info = MDB.Users.find_one({'_id': ObjectId(user_id)})\n user_info['_id'] = user_id\n RET['MSG'] = f\"欢迎{user_info.get('nickname')}回来\"\n RET['DATA'] = user_info\n return jsonify(RET)\n\n","sub_path":"AIToyAdmin/Views/userviews.py","file_name":"userviews.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"506335333","text":"import MEA\nimport SDGB\nimport STWGB\nimport SN\n\ndef build(problem):\n figures = problem.getFigures()\n A = figures.get(\"A\")\n B = figures.get(\"B\")\n C = figures.get(\"C\")\n\n A_B = MEA.computeDelta(A, B, False)\n A_C = MEA.computeDelta(A, C, False)\n\n aStructure = SDGB.build(A)\n\n a2bTransformGraph = STWGB.buildWithAllRules(B, A_B, A_C);\n a2cTransformGraph = STWGB.buildWithAllRules(C, A_B, A_C);\n return [SN(aStructure, a2bTransformGraph), SN(aStructure, a2cTransformGraph)]\n","sub_path":"omscs/kbai-7637/project2/rpm/KB.py","file_name":"KB.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"68580126","text":"from django import forms\nfrom djangolettings.models import BookingEnquiry\n\n\nclass BookingFormClient(forms.ModelForm):\n class Meta:\n model = BookingEnquiry\n fields = (\n 'name',\n 'address',\n 'invoicing_address',\n 'contact_name',\n 'telephone_number',\n 'email_address'\n )\n widgets = {\n 'address': forms.Textarea(attrs={'rows': 4}),\n 'invoicing_address': forms.Textarea(attrs={'rows': 4}),\n }\n\n\nclass BookingFormEvent(forms.ModelForm):\n class Meta:\n model = BookingEnquiry\n fields = (\n 'date_of_event',\n 'event_title',\n 'purchase_order_number',\n 'purpose_of_event',\n 'access_time',\n 'start_time',\n 'end_time',\n 'number_of_attendees',\n )\n\n\nclass BookingFormRooms(forms.ModelForm):\n class Meta:\n model = BookingEnquiry\n fields = (\n 'rooms',\n 'whole_building',\n )\n widgets = {\n 'rooms': forms.CheckboxSelectMultiple\n }\n\n def clean(self):\n cleaned_data = super(BookingFormRooms, self).clean()\n rooms = cleaned_data.get('rooms')\n whole_building = cleaned_data.get('whole_building')\n\n num_set = 0\n if rooms:\n num_set += 1\n if whole_building:\n num_set += 1\n\n if num_set != 1:\n self._errors['rooms'] = ['Required']\n self._errors['whole_building'] = ['Required']\n raise forms.ValidationError(\n 'You must either register an interest in booking '\n 'the whole building, or register an interest in '\n 'booking individual rooms.'\n )\n\n return cleaned_data\n\n\nclass BookingFormMessage(forms.ModelForm):\n class Meta:\n model = BookingEnquiry\n fields = ('message', )\n widgets = {\n 'message': forms.Textarea(attrs={'rows': 4}),\n }\n","sub_path":"djangolettings/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"636220176","text":"import cv2\nimport numpy as np\nfrom model.inference import gaussian_blur, get_max_preds, taylor\nfrom model.unet import get_pose_net\nimport torch\n\n\ndef convert_img(img):\n \"\"\"\n pytorch 的训练方式是通道优先\n 将 HWC 转为 CWH\n \"\"\"\n\n # H W C ---> C H W ----> C W H\n img = torch.from_numpy(img).float()\n if len(img.shape) == 2:\n img = img.unsqueeze(0)\n elif len(img.shape) == 3:\n img = img.transpose(0, 2).transpose(1, 2)\n\n return img\n\n\n# -----------定义图片预处理--------------------------\ndef load_and_convert_image(ori_img, input_imag_size=(256, 512), convert=True):\n img = cv2.resize(ori_img, input_imag_size)\n\n ori_img = np.array(ori_img)\n ori_img = torch.from_numpy(ori_img).float()\n img = np.array(img)\n if convert:\n img = convert_img(img)\n img = img - img.mean()\n img /= img.std()\n img /= img.max()\n\n return img.unsqueeze(dim=0), ori_img.unsqueeze(dim=0) # chw: channel height width\n\n\ndef get_pred(hm):\n coords, maxvals = get_max_preds(hm)\n\n # post-processing\n hm = gaussian_blur(hm, 11)\n hm = np.maximum(hm, 1e-10)\n hm = np.log(hm)\n for n in range(coords.shape[0]):\n for p in range(coords.shape[1]):\n coords[n, p] = taylor(hm[n][p], coords[n][p])\n\n preds = coords.copy()\n return preds, maxvals\n\n\n# --------------------模型预测-----------------------------\n# 使用模型对指定图片文件路径完成图像分类,返回值为预测的种类名称\ndef predict_image(model, input_map, ori_img):\n output = model(input_map)\n out_shape = output.shape\n pred, _ = get_pred(output.detach().numpy())\n\n ori_w = ori_img.shape[2]\n ori_h = ori_img.shape[1]\n out_w = out_shape[3]\n out_h = out_shape[2]\n\n batch_size = output.shape[0]\n norm = np.array([ori_w / out_w, ori_h / out_h]).reshape(batch_size, 1, 2)\n pred = pred * norm\n return pred\n\n\ndef load_model(model_path='static/model_best.pth'):\n model = get_pose_net(37)\n model.load_state_dict(torch.load(model_path, map_location='cpu'), strict=False)\n return model\n\n\nmodel = load_model()\n\n\ndef auto_get_points(img):\n input_map, ori_img = load_and_convert_image(img)\n\n # 得到关键点\n result = predict_image(model, input_map, ori_img)\n\n return result[0]\n","sub_path":"model/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"129601569","text":"import os\nfrom collections import OrderedDict, defaultdict\n\nfrom conans.errors import ConanException\nfrom conans.model.env_info import EnvValues, unquote\nfrom conans.model.info import ConanInfo\nfrom conans.model.options import OptionsValues\nfrom conans.model.profile import Profile\nfrom conans.model.ref import ConanFileReference\nfrom conans.model.scope import Scopes\nfrom conans.paths import CONANINFO\nfrom conans.util.config_parser import ConfigParser\nfrom conans.util.files import load, mkdir\n\n\nclass ProfileParser(object):\n\n def __init__(self, text):\n self.vars = OrderedDict() # Order matters, if user declares F=1 and then FOO=12,\n # and in profile MYVAR=$FOO, it will\n self.includes = []\n self.profile_text = \"\"\n\n for counter, line in enumerate(text.splitlines()):\n if not line.strip() or line.strip().startswith(\"#\"):\n continue\n elif line.strip().startswith(\"[\"):\n self.profile_text = \"\\n\".join(text.splitlines()[counter:])\n break\n elif line.strip().startswith(\"include(\"):\n include = line.split(\"include(\", 1)[1]\n if not include.endswith(\")\"):\n raise ConanException(\"Invalid include statement\")\n include = include[:-1]\n self.includes.append(include)\n else:\n name, value = line.split(\"=\", 1)\n name = name.strip()\n if \" \" in name:\n raise ConanException(\"The names of the variables cannot contain spaces\")\n value = unquote(value)\n self.vars[name] = value\n\n def apply_vars(self, repl_vars):\n self.vars = self._apply_in_vars(repl_vars)\n self.includes = self._apply_in_includes(repl_vars)\n self.profile_text = self._apply_in_profile_text(repl_vars)\n\n def _apply_in_vars(self, repl_vars):\n tmp_vars = OrderedDict()\n for key, value in self.vars.items():\n for repl_key, repl_value in repl_vars.items():\n key = key.replace(\"$%s\" % repl_key, repl_value)\n value = value.replace(\"$%s\" % repl_key, repl_value)\n tmp_vars[key] = value\n return tmp_vars\n\n def _apply_in_includes(self, repl_vars):\n tmp_includes = []\n for include in self.includes:\n for repl_key, repl_value in repl_vars.items():\n include = include.replace(\"$%s\" % repl_key, repl_value)\n tmp_includes.append(include)\n return tmp_includes\n\n def _apply_in_profile_text(self, repl_vars):\n tmp_text = self.profile_text\n for repl_key, repl_value in repl_vars.items():\n tmp_text = tmp_text.replace(\"$%s\" % repl_key, repl_value)\n return tmp_text\n\n\ndef read_conaninfo_profile(current_path):\n conan_info_path = os.path.join(current_path, CONANINFO)\n if not os.path.exists(conan_info_path):\n return None\n existing_info = ConanInfo.load_file(conan_info_path)\n profile = Profile()\n profile.settings = OrderedDict(existing_info.full_settings.as_list())\n profile.options = existing_info.full_options\n profile.scopes = existing_info.scope\n profile.env_values = existing_info.env_values\n return profile\n\n\ndef get_profile_path(profile_name, default_folder, cwd, exists=True):\n def valid_path(profile_path):\n if exists and not os.path.isfile(profile_path):\n raise ConanException(\"Profile not found: %s\" % profile_path)\n return profile_path\n\n if os.path.isabs(profile_name):\n return valid_path(profile_name)\n\n if profile_name[:2] in (\"./\", \".\\\\\"): # local\n profile_path = os.path.abspath(os.path.join(cwd, profile_name))\n return valid_path(profile_path)\n\n if not os.path.exists(default_folder):\n mkdir(default_folder)\n profile_path = os.path.join(default_folder, profile_name)\n if exists:\n if not os.path.isfile(profile_path):\n profile_path = os.path.abspath(os.path.join(cwd, profile_name))\n if not os.path.isfile(profile_path):\n raise ConanException(\"Profile not found: %s\" % profile_name)\n return profile_path\n\n\ndef read_profile(profile_name, cwd, default_folder):\n \"\"\" Will look for \"profile_name\" in disk if profile_name is absolute path,\n in current folder if path is relative or in the default folder otherwise.\n return: a Profile object\n \"\"\"\n if not profile_name:\n return None, None\n\n profile_path = get_profile_path(profile_name, default_folder, cwd)\n text = load(profile_path)\n\n try:\n return _load_profile(text, profile_path, default_folder)\n except ConanException as exc:\n raise ConanException(\"Error reading '%s' profile: %s\" % (profile_name, exc))\n\n\ndef _load_profile(text, profile_path, default_folder):\n \"\"\" Parse and return a Profile object from a text config like representation.\n cwd is needed to be able to load the includes\n \"\"\"\n\n try:\n inherited_profile = Profile()\n cwd = os.path.dirname(os.path.abspath(profile_path)) if profile_path else None\n profile_parser = ProfileParser(text)\n inherited_vars = profile_parser.vars\n # Iterate the includes and call recursive to get the profile and variables\n # from parent profiles\n for include in profile_parser.includes:\n # Recursion !!\n profile, declared_vars = read_profile(include, cwd, default_folder)\n inherited_profile.update(profile)\n inherited_vars.update(declared_vars)\n\n # Apply the automatic PROFILE_DIR variable\n if cwd:\n inherited_vars[\"PROFILE_DIR\"] = os.path.abspath(cwd)\n # Allows PYTHONPATH=$PROFILE_DIR/pythontools\n\n # Replace the variables from parents in the current profile\n profile_parser.apply_vars(inherited_vars)\n\n # Current profile before update with parents (but parent variables already applied)\n doc = ConfigParser(profile_parser.profile_text,\n allowed_fields=[\"build_requires\", \"settings\", \"env\",\n \"scopes\", \"options\"])\n\n # Merge the inherited profile with the readed from current profile\n _apply_inner_profile(doc, inherited_profile)\n\n # Return the intherited vars to apply them in the parent profile if exists\n inherited_vars.update(profile_parser.vars)\n return inherited_profile, inherited_vars\n\n except ConanException:\n raise\n except Exception as exc:\n raise ConanException(\"Error parsing the profile text file: %s\" % str(exc))\n\n\ndef _load_single_build_require(profile, line):\n\n tokens = line.split(\":\", 1)\n if len(tokens) == 1:\n pattern, req_list = \"*\", line\n else:\n pattern, req_list = tokens\n req_list = [ConanFileReference.loads(r.strip()) for r in req_list.split(\",\")]\n profile.build_requires.setdefault(pattern, []).extend(req_list)\n\n\ndef _apply_inner_profile(doc, base_profile):\n \"\"\"\n\n :param doc: ConfigParser object from the current profile (excluding includes and vars,\n and with values already replaced)\n :param base_profile: Profile inherited, it's used as a base profile to modify it.\n :return: None\n \"\"\"\n\n def get_package_name_value(item):\n \"\"\"Parse items like package:name=value or name=value\"\"\"\n package_name = None\n if \":\" in item:\n tmp = item.split(\":\", 1)\n package_name, item = tmp\n\n name, value = item.split(\"=\", 1)\n name = name.strip()\n value = unquote(value)\n return package_name, name, value\n\n for setting in doc.settings.splitlines():\n setting = setting.strip()\n if setting and not setting.startswith(\"#\"):\n if \"=\" not in setting:\n raise ConanException(\"Invalid setting line '%s'\" % setting)\n package_name, name, value = get_package_name_value(setting)\n if package_name:\n base_profile.package_settings[package_name][name] = value\n else:\n base_profile.settings[name] = value\n\n if doc.build_requires:\n # FIXME CHECKS OF DUPLICATED?\n for req in doc.build_requires.splitlines():\n _load_single_build_require(base_profile, req)\n\n if doc.scopes:\n base_profile.update_scopes(Scopes.from_list(doc.scopes.splitlines()))\n\n if doc.options:\n base_profile.options.update(OptionsValues.loads(doc.options))\n\n base_profile.env_values.update(EnvValues.loads(doc.env))\n\n\ndef profile_from_args(profile, settings, options, env, scope, cwd, client_cache):\n \"\"\" Return a Profile object, as the result of merging a potentially existing Profile\n file and the args command-line arguments\n \"\"\"\n if profile is None:\n file_profile = client_cache.default_profile\n else:\n file_profile, _ = read_profile(profile, cwd, client_cache.profiles_path)\n args_profile = _profile_parse_args(settings, options, env, scope)\n\n if file_profile:\n file_profile.update(args_profile)\n return file_profile\n else:\n return args_profile\n\n\ndef _profile_parse_args(settings, options, envs, scopes):\n \"\"\" return a Profile object result of parsing raw data\n \"\"\"\n def _get_tuples_list_from_extender_arg(items):\n if not items:\n return []\n # Validate the pairs\n for item in items:\n chunks = item.split(\"=\", 1)\n if len(chunks) != 2:\n raise ConanException(\"Invalid input '%s', use 'name=value'\" % item)\n return [(item[0], item[1]) for item in [item.split(\"=\", 1) for item in items]]\n\n def _get_simple_and_package_tuples(items):\n \"\"\"Parse items like \"thing:item=value or item2=value2 and returns a tuple list for\n the simple items (name, value) and a dict for the package items\n {package: [(item, value)...)], ...}\n \"\"\"\n simple_items = []\n package_items = defaultdict(list)\n tuples = _get_tuples_list_from_extender_arg(items)\n for name, value in tuples:\n if \":\" in name: # Scoped items\n tmp = name.split(\":\", 1)\n ref_name = tmp[0]\n name = tmp[1]\n package_items[ref_name].append((name, value))\n else:\n simple_items.append((name, value))\n return simple_items, package_items\n\n def _get_env_values(env, package_env):\n env_values = EnvValues()\n for name, value in env:\n env_values.add(name, EnvValues.load_value(value))\n for package, data in package_env.items():\n for name, value in data:\n env_values.add(name, EnvValues.load_value(value), package)\n return env_values\n\n result = Profile()\n options = _get_tuples_list_from_extender_arg(options)\n result.options = OptionsValues(options)\n env, package_env = _get_simple_and_package_tuples(envs)\n env_values = _get_env_values(env, package_env)\n result.env_values = env_values\n settings, package_settings = _get_simple_and_package_tuples(settings)\n result.settings = OrderedDict(settings)\n for pkg, values in package_settings.items():\n result.package_settings[pkg] = OrderedDict(values)\n result.scopes = Scopes.from_list(scopes) if scopes else Scopes()\n return result\n","sub_path":"conans/client/profile_loader.py","file_name":"profile_loader.py","file_ext":"py","file_size_in_byte":11383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"379845666","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('v1', '0061_make_info_unit_headings_linkable'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='eventpage',\n name='live_stream_url',\n field=models.URLField(blank=True, help_text=b'Format: https://www.ustream.tv/embed/video_id or https://www.youtube.com/embed/video_id.', verbose_name=b'URL', validators=[django.core.validators.RegexValidator(regex=b'^https?:\\\\/\\\\/www\\\\.(ustream\\\\.tv|youtube\\\\.com)\\\\/embed\\\\/.*$')]),\n ),\n ]\n","sub_path":"cfgov/v1/migrations/0062_modifying_video_player.py","file_name":"0062_modifying_video_player.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"287998804","text":"import asyncio\nimport random\n\nimport discord\n\nfrom . import GameInterface, GameChannel, game_manager\n\n\nclass RussianRouletteGame(GameInterface):\n TITLE = '러시안룰렛'\n MIN_USER = 2\n MAX_USER = 6\n\n def __init__(self):\n GameInterface.__init__(self)\n self.COMMANDS = {'발사': self.shoot, '종료': self.end_game}\n self.busy = False\n self.gun = []\n self.user_list = []\n self.username_list = []\n self.user_index = 0\n\n async def start(self):\n self.busy = True\n self.on_member_changed()\n await self.show_next_turn()\n self.busy = False\n\n def on_member_changed(self):\n self.gun = [False for x in range(12)]\n self.gun[random.randint(0, 11)] = True\n self.user_index = 0\n self.username_list = list(self.users.keys())\n self.user_list = list(self.users.values())\n\n async def show_next_turn(self):\n await self.game_channel.channel.send(f'>>> 다음 차례는 <@{self.user_list[self.user_index].id}> 입니다.\\nㄱ발사 명령어로 시작하세요.')\n\n async def shoot(self, channel: GameChannel, query: list, msg: discord.Message):\n if self.busy or self != channel.running_game:\n return\n self.busy = True\n if msg.author.id != self.user_list[self.user_index].id:\n self.busy = False\n return\n await asyncio.sleep(random.randint(1, 3))\n if self.gun[0]:\n await self.game_channel.channel.send(f'<:tang:709200132922671106>')\n await self.game_channel.channel.send(f'>>> <@{self.user_list[self.user_index].id}> 가 죽었습니다.')\n\n del self.users[self.username_list[self.user_index]]\n self.on_member_changed()\n\n if len(self.user_list) == 1:\n await self.game_channel.channel.send(f'>>> 승자는 <@{self.user_list[0].id}> 입니다.\\nㄱ발사 명령어로 시작하세요.')\n self.busy = False\n await self.end_game(channel, query, msg)\n return\n await self.show_next_turn()\n else:\n await self.game_channel.channel.send(f'<:chulkuk:709200132381474890>')\n self.gun = self.gun[1:]\n self.user_index = (self.user_index + 1) % len(self.users)\n await self.show_next_turn()\n self.busy = False\n\n async def end_game(self, channel: GameChannel, query: list, msg: discord.Message):\n if self.busy or self != channel.running_game:\n return\n self.busy = True\n self.users = {}\n channel.running_game = None\n self.busy = False\n await msg.channel.send(f'>>> {self.TITLE}을 종료했습니다.')\n\n\ngame_manager.games['러시안룰렛'] = RussianRouletteGame\n","sub_path":"game/russian_roulette.py","file_name":"russian_roulette.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"224308641","text":"\"\"\"\nDoduo/Doduo.matcher\n\nCompiles pattern blueprints from `Doduo/configs/` and handles\nall query matching requests.\n\"\"\"\n\nfrom yaml import load, YAMLError\n\nfrom Doduo.query import parse_query\nfrom Doduo.template import Template\nfrom Doduo.config import ROOT_DIR\nfrom Doduo import ConfigException, InvalidUsage\n\n\nclass Matcher:\n \"\"\"\n Singleton class instantiated once by the Flask server.\n Loads in all relevant blueprints from the provided config file\n and stores them in Matcher.templates. The Matcher class\n provides a match(...) function to query against the compiled\n templates.\n \"\"\"\n\n def __init__(self, config_file=None):\n \"\"\"\n Args:\n config_file (str): Config file to locate template blueprints in.\n If None, no templates will be loaded.\n Returns:\n None\n \"\"\"\n # By default, allow Matcher to be created with no preloaded templates.\n if config_file is None:\n self.templates = {}\n return\n\n # Attempt to load in and parse the Yaml config.\n try:\n with open(ROOT_DIR + \"configs/\" + config_file, \"r\") as f:\n self.config = load(f)\n except (YAMLError, FileNotFoundError) as exc:\n raise ConfigException(\n \"Config file '{}' not found or invalid YAML.\".format(\n config_file\n )\n )\n\n # Build a map in self.templates from template_id's to\n # to lists (lists of compiled patterns).\n self.templates = {}\n for template_id, patterns in self.config.items():\n self.templates[template_id] = [\n self.build_template(blueprint)\n for blueprint in patterns[\"patterns\"]\n ]\n\n def build_template(self, blueprint):\n \"\"\"\n Build a template's blueprint into Template class tree.\n\n Args:\n blueprint (dict): deep parsed dict/list/str object parsed\n from a YAML config.\n Returns:\n A deep Template tree composing the compiled blueprint.\n \"\"\"\n template_args = dict(blueprint)\n\n # Compile the blueprints for each of the children.\n children = []\n if \"children\" in template_args:\n children = [\n self.build_template(child)\n for child in template_args[\"children\"]\n ]\n template_args.pop(\"children\")\n\n # Attempt to build the Template object for this level of the blueprint,\n # providing the compiled children to include in the tree's child list.\n try:\n new_template = Template(children=children, **template_args)\n except TypeError:\n raise ConfigException(\"Invalid config option provided.\")\n\n return new_template\n\n def match(self, query, template_ids):\n \"\"\"\n Match against query against potential templates.\n Args:\n query (str): string to query against our templates.\n template_ids (list): list of strings specifying which templates\n to search through. If set to None, the\n search space will be all known templates.\n Returns:\n Generator of dictionaries. Each dictionary corresponds\n to results for a single sentence.\n \"\"\"\n\n # Recursively attempt to match a pattern against each location\n # in the tree.\n def __match(parsed, pattern):\n slots = []\n result = pattern.match(parsed)\n if result is not False:\n slots += result\n for child in parsed.children:\n result = __match(child, pattern)\n if result is not False:\n slots += result\n return slots\n\n # Parse the list of template_ids. If template_ids is set,\n # limit our search space to those IDs. Otherwise, search\n # across all templates.\n if template_ids is None:\n templates = list(self.templates.items())\n else:\n templates = []\n for id in template_ids:\n try:\n templates.append((id, self.templates[id]))\n except KeyError:\n raise ConfigException(\n \"Provided template ID {} not in config.\".format(id)\n )\n\n # Parse the user query and catch any invalid `query` values.\n try:\n parsed_query = parse_query(query)\n except (TypeError, ValueError):\n raise InvalidUsage(\"Invalid query body.\")\n\n for parsed, sentence in parsed_query:\n # Include metadata for this sentence's payload.\n results = {}\n results[\"__sentence__\"] = sentence\n results[\"__alternatives__\"] = {}\n\n for template_id, patterns in templates:\n # Build up a list of possible results\n # across each pattern for the given template.\n slots = []\n for t in patterns:\n match_results = __match(parsed, t)\n if match_results is not False:\n slots += match_results\n\n # Split list of possible results into\n # \"main\" result and a set of alternatives.\n if slots:\n results[template_id] = slots.pop(0)\n if slots:\n results[\"__alternatives__\"][template_id] = slots\n\n yield results\n","sub_path":"Doduo/matcher.py","file_name":"matcher.py","file_ext":"py","file_size_in_byte":5611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"63961343","text":"from glob import glob\nimport numpy as np\nimport pandas as pd\nimport re\nfrom os.path import basename\nfrom scipy.stats import mannwhitneyu, rankdata\nfrom itertools import permutations, combinations\n\nDATA_DIR = 'data/quality_indicators'\n\nalgo_names = {'nsgaii': '\\\\nsga', 'pesa2': '\\\\pesa', 'spea2': '\\\\spea'}\n\nclass Experiment:\n def __init__(self, file):\n self.file = file\n self.values = np.loadtxt(file, delimiter = '\\n\\n', dtype=float)\n m = re.match(r'qi__(?P[^-]+)-(?P.+)-bytime-(?P
  • {label}
  • ') for label in names]\nresult = render(html('''\n
      \n {items}\n \n'''))\n# '
      • World
      • Universe
      '\n# end-before\nexpected = '
      • World
      • Universe
      '\n","sub_path":"examples/usage/loopingA.py","file_name":"loopingA.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"321193426","text":"from brave.overlays.text import TextOverlay\nfrom brave.overlays.effect import EffectOverlay\nfrom brave.overlays.clock import ClockOverlay\nfrom brave.abstract_collection import AbstractCollection\nfrom gi.repository import Gst\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger('brave.overlays')\n\n\nclass OverlayCollection(AbstractCollection):\n '''\n This is the collection of all created overlays.\n An overlay can be text or clock.\n '''\n\n def add(self, **args):\n args['id'] = self.get_new_id()\n\n if args['type'] == 'text':\n overlay = TextOverlay(**args, collection=self)\n elif args['type'] == 'effect':\n overlay = EffectOverlay(**args, collection=self)\n elif args['type'] == 'clock':\n overlay = ClockOverlay(**args, collection=self)\n else:\n raise Exception(f\"Invalid overlay type '{str(args['type'])}'\")\n\n self._items[args['id']] = overlay\n return overlay\n\n def ensure_overlays_are_correctly_connected(self, mixer):\n '''\n Ensure the provided mixer's pipeline contains the correct overlay elements.\n '''\n def _connect_overlays_once_blocked(*_):\n '''\n Called once the video is blocked, this arranges all the overlays in the pipeline.\n '''\n\n def getSortValue(overlay):\n return overlay.getSortValue()\n\n overlays = sorted(list(filter(lambda o: o.visible and o.mixer() == mixer, self._items.values())),\n key=getSortValue)\n\n if len(overlays) == 0:\n if not _link_if_not_already_linked(mixer.video_mixer_output_queue,\n mixer.end_capsfilter):\n mixer.logger.warn('Unable to connect from video mixer output queue to me')\n else:\n # The first should be linked to from the video mixer\n if not _link_if_not_already_linked(mixer.video_mixer_output_queue,\n overlays[0].element):\n overlays[0].logger.warn('Unable to connect from video mixer to me')\n\n # Connect the middle ones together:\n for n in range(len(overlays) - 1):\n if not _link_if_not_already_linked(overlays[n].element, overlays[n + 1].element):\n overlays[n].logger.warn('Unable to connect to the next overlay ' + str(overlays[n + 1]))\n\n # The last should be linked to the video mixer tee\n logger.debug('Now linking overlay %s to the video mixer tee' % overlays[-1].id)\n if not _link_if_not_already_linked(overlays[-1].element, mixer.end_capsfilter):\n overlays[-1].logger.warn('Unable to connect to the video mixer tee')\n\n #  Unblock everything\n for overlay in overlays:\n overlay.ensure_src_pad_not_blocked()\n\n logger.debug('Completed update of overlays, now unblocking')\n return Gst.PadProbeReturn.REMOVE\n\n # We block the video so that we can make changes to a live pipeline\n # As described at https://gstreamer.freedesktop.org/documentation/design/ ...\n # ... probes.html#dynamically-switching-an-element-in-a-playing-pipeline\n if mixer.get_state() in [Gst.State.PLAYING, Gst.State.PAUSED]:\n logger.debug('Overlays need updating, blocking pipeline temporarily')\n self.video_mixer_queue_src_pad = mixer.video_mixer_output_queue.get_static_pad('src')\n self.video_mixer_queue_src_pad_block_probe = self.video_mixer_queue_src_pad.add_probe(\n Gst.PadProbeType.BLOCK_DOWNSTREAM, _connect_overlays_once_blocked)\n else:\n _connect_overlays_once_blocked()\n\n\ndef _link_if_not_already_linked(element1, element2):\n\n # First, make sure element1 isn't linked to anything (except perhaps element2)\n element1_check = ensure_pad_not_linked(element1, 'src', correct_linked_element=element2)\n if not element1_check['success']:\n return False\n if element1_check['already_linked']:\n return True\n\n # Second, make sure element2 isn't linked to anything\n element2_check = ensure_pad_not_linked(element2, 'sink')\n if not element2_check['success']:\n return False\n\n # Finally, do the link\n logger.debug('Linking %s to %s' % (element1.get_name(), element2.get_name()))\n if not element1.link(element2):\n logger.warn('Cannot link %s to %s' % (element1.get_name(), element2.get_name()))\n return False\n return True\n\n\ndef ensure_pad_not_linked(element, pad_name, correct_linked_element=None):\n '''\n Given an element and pad name, ensures it is not linked to anything.\n Optionally, if correct_linked_element is provided, that element is permitted to be linked.\n '''\n pad = element.get_static_pad(pad_name)\n\n # TODO can we handle this nicer:\n if not pad and pad_name == 'sink':\n pad = element.get_static_pad('video_sink')\n\n if not pad:\n logger.warn('Cannot get %s pad of element %s to confirm it is not linked' % (pad_name, element.get_name()))\n return {'success': False}\n\n if not pad.is_linked():\n return {'success': True, 'already_linked': False}\n\n peer = pad.get_peer()\n if not peer:\n logger.warn('Cannot unlink %s of %s, no peer pad' % (pad_name, element.get_name()))\n return {'success': False}\n\n if correct_linked_element:\n peer_element = peer.get_parent_element()\n if peer_element == correct_linked_element:\n logger.debug('Already linked %s and %s, nothing to do' %\n (element.get_name(), correct_linked_element.get_name()))\n return {'success': True, 'already_linked': True}\n\n if pad_name == 'sink':\n unlink_response = peer.unlink(pad)\n else:\n unlink_response = pad.unlink(peer)\n if unlink_response:\n logger.debug('Unlinked %s pad of %s from %s' %\n (pad_name, element.get_name(), peer.get_parent_element().get_name()))\n return {'success': True, 'already_linked': False}\n\n logger.warn('Unable to unlink %s pad of %s' % (pad_name, element.get_name()))\n return {'success': False}\n","sub_path":"brave/overlays/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"4429587","text":"from .geom import geom\n\nclass geom_abline(geom):\n\n DEFAULT_AES = {'color': 'black', 'linetype': 'solid',\n 'alpha': None, 'size': 1.0, 'x': None,\n 'y': None}\n REQUIRED_AES = {'slope', 'intercept'}\n DEFAULT_PARAMS = {}\n\n _aes_renames = {'linetype': 'linestyle', 'size': 'linewidth'}\n\n def plot(self, ax, data, _aes):\n params = self._get_plot_args(data, _aes)\n variables = _aes.data\n\n slope = self.params.get('slope', 1)\n intercept = self.params.get('intercept', 0)\n _aes['slope'] = slope\n _aes['intercept'] = intercept\n\n x = ax.get_xticks()\n y = ax.get_xticks() * slope + intercept\n # don't need the original params from the aesthetics\n del params['x']\n del params['y']\n if 'slope' in params:\n del params['slope']\n if 'intercept' in params:\n del params['intercept']\n ax.plot(x, y, **params)\n","sub_path":"ggplot/geoms/geom_abline.py","file_name":"geom_abline.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"409837318","text":"def exp(x):\n\tdef topow(y):\n\t\treturn pow(y,x)\n\treturn topow\n\nsquare = exp(2)\ncube = exp(3)\n\n[print(n, 'square =', square(n)) for n in range(2,10)]\n[print(n, 'cube =', cube(n)) for n in range(2,10)]\n\nfrom time import time, sleep\ndef make_timer():\n\tlast_called = None\n\n\tdef elapsed():\n\t\tnonlocal last_called\n\t\tnow = time()\n\t\tif last_called is None:\n\t\t\tlast_called = now\n\t\t\treturn None\n\t\tresult = now - last_called\n\t\tlast_called = now\n\t\treturn result\n\n\treturn elapsed\n\nt1 = make_timer()\nprint(t1())\nsleep(2)\nprint(t1())\nsleep(5)\nprint(t1())\n\nt2 = make_timer()\nprint(t2())\nsleep(10)\nprint(t2())\nsleep(15)\nprint(t2())\n","sub_path":"beyond-the-basics/nested_functions/power.py","file_name":"power.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"110944370","text":"from ..source import URLSource\nfrom ..package import Package\nfrom ..util import target_arch\n\n\nclass NCurses(Package):\n version = '6.1-20180804'\n source = URLSource(f'https://invisible-mirror.net/archives/ncurses/current/ncurses-{version}.tgz', sig_suffix='.asc')\n validpgpkeys = ['C52048C0C0748FEE227D47A2702353E0F7E48EDB']\n\n def prepare(self):\n self.run_with_env([\n './configure',\n '--prefix=/usr',\n f'--host={target_arch().ANDROID_TARGET}',\n '--without-ada',\n '--enable-widec',\n '--without-shared',\n '--with-normal',\n '--without-debug',\n '--without-cxx-binding',\n '--enable-warnings',\n '--disable-stripping',\n ])\n\n def build(self):\n self.run(['make'])\n self.run(['make', 'install', f'DESTDIR={self.destdir()}'])\n","sub_path":"pybuild/packages/ncurses.py","file_name":"ncurses.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"99980131","text":"\r\nprint(\"\\n\\n********** QUESTION: CALCULATE AVERAGE TURN AROUND TIME WITH FIRST COME FIRST SERVE (FCFS) **********\\n\\n\")\r\n\r\nprint(\"\\n\\n********** QUESTION: CALCULATE AVERAGE TURN AROUND TIME WITH SHORTEST JOB FIRST (SJF) **********\\n\\n\")\r\n\r\nimport threading\r\nimport time\r\nfrom queue import Queue\r\nprint_lock=threading.Lock()\r\n\r\ndef threads(both):\r\n time.sleep(0.002)\r\n with print_lock:\r\n print(threading.current_thread().name,both)\r\n\r\ndef threader():\r\n fcfs=q.get()\r\n sjf=q.get()\r\n threads(fcfs)\r\n q.task_done()\r\n threads(sjf)\r\n q.task_done()\r\n \r\nq=Queue()\r\nt=threading.Thread(target=threader)\r\nt.killer=True\r\nt.start()\r\nstart=time.time()\r\n\r\nata=[]\r\nbta=[]\r\ncta=[]\r\ncta1=[]\r\ntata=[]\r\ntata1=[]\r\n\r\ndef Cloning(li1):\r\n li_copy = li1[:]\r\n return li_copy\r\nn=int(input(\"enter no of processes\\t\"))\r\nprint(\"\\n\\n**********ENTER ARRIVAL AND BURST TIME**********\\n\\n\")\r\nfor i in range(n):\r\n at=int(input(\"enter arival time \\t\"))\r\n ata.append(at)\r\n ata1=Cloning(ata)\r\n ata2=Cloning(ata)\r\n for j in range(i,i+1):\r\n bt=int(input(\"enter burst time \\t\"))\r\n bta.append(bt)\r\n bta1=Cloning(bta)\r\n bta2=Cloning(bta)\r\n\r\n\r\ndef fcfs():\r\n print(\"\\n\\n**********COMPLETION TIME FOR FCFS**********\\n\\n\")\r\n for u in range(n):\r\n cta.append(0)\r\n q=min(ata1)\r\n mi = ata1.index(min(ata1))\r\n s=q+bta1[mi]\r\n cta[mi]=s\r\n ata1[mi]= float('inf')\r\n bta1[mi]= float('inf')\r\n l=len(ata1)\r\n while l-1:\r\n q1=min(ata1)\r\n mi1= ata1.index(min(ata1))\r\n s=s+bta1[mi1]\r\n cta[mi1]=s\r\n ata1[mi1]= float('inf')\r\n bta1[mi1]= float('inf')\r\n l-=1\r\n for t in range(n):\r\n print(\"completion time is\\t\",cta[t] )\r\n \r\n print(\"\\n\\n**********TURN AROUND TIME FOR FCFS**********\\n\\n\") \r\n for l in range(n):\r\n tat=cta[l]-ata[l]\r\n print(\"turn around time \\t\",tat)\r\n tata.append(tat)\r\n print(\"\\n\\n**********AVERAGE TURN AROUND TIME FOR FCFS**********\\n\\n\")\r\n atat=0\r\n for m in range(n):\r\n atat=atat+tata[m]\r\n print(\"\\naverage turn around time with FCFS\\t\",atat/n)\r\n\r\ndef sjf():\r\n print(\"\\n\\n**********COMPLETION TIME FOR SJF**********\\n\\n\")\r\n for u in range(n):\r\n cta1.append(0)\r\n q2=min(ata2)\r\n mi2 = ata2.index(min(ata2))\r\n s1=q2+bta2[mi2]\r\n cta1[mi2]=s1\r\n ata2[mi2]= float('inf')\r\n bta2[mi2]= float('inf')\r\n l=len(ata2)\r\n while l-1:\r\n q3=min(bta2)\r\n mi3= bta2.index(min(bta2))\r\n if set([q3 for q3 in bta2 if bta2.count(q3) > 1]):\r\n for ii in range(len(bta2)): \r\n if bta2[ii]==q3:\r\n if (ata2[ii]/', views.ExtendedUser.as_view(), name='extendeduser'),\n path('', views.user_site, name='user_site'),\n # path('user/', views.user_site, name='user_site'),\n # path('user/change_extended_user_info/', views.change_extended_user_info)\n\n]\n","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"517786312","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 6 12:45:18 2019\n\n@author: venkatakarthikvadlamudi\n\"\"\"\n\n# import numpy, pandas and time\nimport numpy as np\nimport pandas as pd\n\n# visual libraries\nfrom matplotlib import pyplot as plt\nplt.style.use('ggplot')\n\n# sklearn libraries\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error \nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn import svm\nfrom sklearn.linear_model import LinearRegression\n\n# Read the data in the CSV file using pandas\n# Change the path to the .csv file accordingly!\ndataframe = pd.read_csv('train.csv')\ntestframe = pd.read_csv('test.csv')\n# Filling NaN with mean in training dataframe\ndataframe .isnull().any().sum()\ndataframe.fillna(-0.2,inplace=True)\n# Filling NaN with mean in testing dataframe\ntestframe .isnull().any().sum()\ntestframe.fillna(testframe.mean(),inplace=True)\n# Creating a dataframe to store target of training dataframe\ntarget = pd.DataFrame(dataframe['target'])\n# Creating a dataframe to store id of training dataframe\nunique_id = pd.DataFrame(dataframe['id'])\n# Creating a dataframe to store span of training dataframe\nspan = dataframe['span']\n# Extracting only features by dropping target and id from training dataframe\nfeatures = dataframe.drop(['target','id'], axis = 1)\nfeatures_array = features.values\n# Extracting only target of training dataframe\ntarget_array = target.values\nX_train = features_array\ny_train = target_array\n# Extracting only features by dropping id from testing dataframe\nX_test = testframe.drop(['id'], axis = 1)\nX_test = X_test.values\n\n# Standard scaler for scaling the data\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n# Defining classifer, here we have used RBF kernel. We could change the values of C and the kernel function and see the results\nclassifier = svm.SVC(C=4,kernel='sigmoid',gamma = \"scale\")\n#classifier = LinearRegression()\n# Encoding labels (this is required as the code was cribbing otherwise. There seems to be string data somewhere)\nlab_enc = LabelEncoder()\ny_train = y_train.ravel()\ny_train = lab_enc.fit_transform(y_train)\nprint(\"TRAINING START\")\n# Fitting the curve (learning)\nclassifier.fit(X_train,y_train)\n# Predicting the target (testing)\nprint(\"TESTING START\")\npredicted = classifier.predict(X_test)\npredicted = -pd.DataFrame(lab_enc.fit_transform(predicted))\n# Scaling the predicted target\nmmsc = MinMaxScaler(feature_range=(-1, 1))\npredicted = mmsc.fit_transform(predicted)\npredicted = mmsc.transform(predicted)\noutput=np.array(predicted)\npd.DataFrame(output).to_csv(\"file5.csv\")\nprint(\"Done\")\n# Check the values of predicted which are the predicted target values. We cannot validate the predicted target values.","sub_path":"forex_2.py","file_name":"forex_2.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"406404805","text":"from Tkinter import *\nimport itertools\nimport os\nimport binascii\nimport mnemonic\nimport random\n\n\nclass App:\n def __init__(self, master):\n frame = Frame(master)\n frame.pack()\n self.inputa_label = Label(frame, text=\"Input a list of bip39 words with spaces between them:\")\n self.inputa_label.pack()\n self.entry_seed = Entry(frame, bd=3, width=80)\n self.entry_seed.pack()\n self.button_shuffle = Button(frame, text=\"Shuffle words\", command=self.mix_words)\n self.button_shuffle.pack()\n self.result_description = Label(frame, text=\"In this order, your words are a valid bip39 seed:\")\n self.result_description.pack()\n self.result_label = Label(frame, text=\"\")\n self.result_label.pack()\n self.slogan = Button(frame,\n text=\"Force valid bip39 order\",\n command=self.main_validate)\n self.slogan.pack()\n self.button_quit = Button(frame,\n text=\"QUIT\", fg=\"red\",\n command=frame.quit)\n\n self.button_quit.pack()\n\n def main_validate(self):\n \"\"\"Checks for a valid bip39 seed from the given list of words.\"\"\"\n self.result_label['text'] = ''\n seed_input = []\n seed_input = self.entry_seed.get().split()\n m = mnemonic.Mnemonic('english')\n for subset in itertools.permutations(seed_input, len(seed_input)):\n if len(subset) == len(seed_input):\n if m.check(' '.join(subset)):\n if subset != seed_input:\n self.result_label['text'] = ' '.join(subset)\n else:\n self.result_label['text'] = \"There was a problem with the words you gave, maybe they are not on the bip39 word list or the number of words does not work.\"\n break # found a valid one, stop looking.\n\n def mix_words(self):\n \"\"\"Shuffles the words in the entry field.\"\"\"\n seed_input = []\n seed_input = self.entry_seed.get().split()\n # print(seed_input)\n shuffled = random.sample(seed_input, len(seed_input))\n # print(shuffled)\n self.entry_seed.delete(0, END)\n self.entry_seed.insert(0, \" \".join(shuffled))\n\n\nroot = Tk()\nroot.title(\"Force39\")\napp = App(root)\nroot.mainloop()\n","sub_path":"force39.py","file_name":"force39.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"401916892","text":"import json\nimport math\nimport os\nfrom scipy import optimize\n\nimport numpy as np\nimport torch\n\nfrom src.lif import lif_compute, spike_binary, id_synaptic_waveform, \\\n gaussian_kernel\nfrom src.ou_process import ouprocess_gaussian\n\n\nclass Layer:\n \n def __init__(self, num_neurons, std_noise=25.0):\n self.NUM_NEURONS = num_neurons\n self.tau_V = 10\n self.R = 1 # MOhm\n self.EL = -70.0\n self.V_th = -40.0\n self.std_noise = std_noise\n \n self.W = np.zeros((self.NUM_NEURONS, 1))\n self.train_input = None\n self.train_exp_output = None\n\n self.v_E = 0.0\n self.v_ave = -67.0\n\n self.tau_rise = 0.5\n self.tau_fall = 5.0\n self.syn_kernel_len = 50.0\n\n self._ETA = None\n\n def spike(self, i_inj, dt, t_stop, int_noise_regen=True):\n tt = np.arange(0.0, t_stop, dt)\n V = np.zeros((tt.shape[0], self.NUM_NEURONS)) # Membrane potential per neuron\n\n # Additive noise to individual neurons\n if self._ETA is None \\\n or int_noise_regen is True\\\n or self._ETA.shape != V.shape:\n self._ETA, _ = ouprocess_gaussian(5.0, dt, t_stop, self.NUM_NEURONS)\n\n F_binary = np.zeros((tt.shape[0], self.NUM_NEURONS))\n # avg_firing_rate = np.zeros(self.NUM_NEURONS)\n\n I_total = self.std_noise*self._ETA + i_inj\n\n V = lif_compute(I_total, self.R, self.tau_V, self.V_th, dt)\n F_binary = spike_binary(V)\n\n syn_waveform = id_synaptic_waveform(\n dt,\n self.syn_kernel_len,\n self.tau_rise,\n self.tau_fall)\n syn_wave_len = syn_waveform.shape[0]\n\n F_synaptic = np.zeros(F_binary.shape)\n # TODO: VECTORIZE\n for neuron in range(0, self.NUM_NEURONS):\n fr_fast = np.convolve(F_binary[:,neuron], syn_waveform)\n F_synaptic[:, neuron] = fr_fast[:-syn_wave_len+1]\n \n return V, F_binary, F_synaptic\n\n def output(self, i_inj, dt, t_stop, int_noise_regen=True):\n V, F_binary, F_synaptic = self.spike(i_inj, dt, t_stop, int_noise_regen=True)\n t_steps = F_binary.shape[0]\n\n ind_neur = np.arange(0, self.NUM_NEURONS)\n Phi = F_synaptic[:t_steps, ind_neur]\n X2 = -1.0*self.v_ave*np.ones((t_steps,ind_neur.shape[0])) + self.v_E\n\n A = np.multiply(Phi, X2)\n out = np.dot(A, self.W)\n\n return out, V, F_binary, F_synaptic\n\n def firing_rate(self, F_binary, dt, t_stop, grad=False):\n torch.set_grad_enabled(grad)\n\n tt = np.arange(0.0, t_stop, dt)\n\n gauss_kernel = gaussian_kernel(dt, 25.0)\n gauss_kernel_len = gauss_kernel.shape[0]\n\n # CONVOLUTION EXPLAINED\n # kernel:\n # out_channels = NUM_NEURONS\n # in_channels / groups = 1\n # kernel_size = gauss_kernel_len\n # input:\n # minibatch = 1\n # in_channels = NUM_NEURONS\n # input_size = len(tt)\n # => groups = NUM_NEURONS (do SAME convolution SEPARATELY over each neuron spike train)\n\n # inst_firing_rate = torch.zeros(F_binary.shape, device=self._device)\n gauss_kernel_tensor = torch.as_tensor(gauss_kernel).repeat(\n self.NUM_NEURONS, 1, 1)\n pad = math.ceil(gauss_kernel_len / 2.0)\n\n convolved_spikes = torch.nn.functional.conv1d(\n torch.as_tensor(F_binary).t()[None, :, :],\n gauss_kernel_tensor,\n groups=self.NUM_NEURONS,\n padding=pad)\n\n inst_firing_rate = convolved_spikes[0, :, :tt.shape[0]].t()\n\n return inst_firing_rate\n\n def train(self, i_inj, exp_output, dt, t_stop):\n _, _, _, F_synaptic = self.output(i_inj, dt, t_stop)\n\n t_steps = exp_output.shape[0]\n \n ind_neur = np.arange(0, self.NUM_NEURONS)\n Phi = F_synaptic[:t_steps, ind_neur]\n X2 = -1.0*self.v_ave*np.ones((t_steps,ind_neur.shape[0])) + self.v_E\n\n A = np.multiply(Phi, X2)\n self.W, residuals, rank, s = np.linalg.lstsq(A, exp_output)\n # print(self.W.shape)\n # self.W, residuals = optimize.nnls(A, exp_output.flatten())\n # self.W = self.W[:, None]\n self.train_input = i_inj\n self.train_exp_output = exp_output\n \n def as_dict(self):\n props_dict = {}\n props_dict['NUM_NEURONS'] = self.NUM_NEURONS\n props_dict['tau_V'] = self.tau_V\n props_dict['R'] = self.R\n props_dict['EL'] = self.EL\n props_dict['V_th'] = self.V_th\n props_dict['std_noise'] = self.std_noise\n\n props_dict['v_E'] = self.v_E\n props_dict['v_ave'] = self.v_ave\n\n props_dict['tau_rise'] = self.tau_rise\n props_dict['tau_fall'] = self.tau_fall\n props_dict['syn_kernel_len'] = self.syn_kernel_len\n\n return props_dict\n\n @classmethod\n def from_dict(cls, in_dict: dict) -> 'Layer':\n NUM_NEURONS = in_dict['NUM_NEURONS']\n tau_V = in_dict['tau_V']\n R = in_dict['R']\n EL = in_dict['EL']\n V_th = in_dict['V_th']\n std_noise = in_dict['std_noise']\n\n v_E = in_dict['v_E']\n v_ave = in_dict['v_ave']\n\n tau_rise = in_dict['tau_rise']\n tau_fall = in_dict['tau_fall']\n syn_kernel_len = in_dict['syn_kernel_len']\n\n layer = cls(NUM_NEURONS, std_noise)\n layer.tau_V = tau_V\n layer.R = R\n layer.EL = EL\n layer.V_th = V_th\n layer.std_noise = std_noise\n\n layer.v_E = v_E\n layer.v_ave = v_ave\n\n layer.tau_rise = tau_rise\n layer.tau_fall = tau_fall\n layer.syn_kernel_len = syn_kernel_len\n\n return layer\n\n def save(self, path, layer_name):\n LAYER_ATTRS_JSON = layer_name + \"_attrs.json\"\n LAYER_WEIGHTS_NPZ = layer_name + \"_weights.npz\"\n\n with open(os.path.join(path, LAYER_ATTRS_JSON), 'w') as outfile:\n json.dump(self.as_dict(), outfile)\n\n np.savez(open(os.path.join(path, LAYER_WEIGHTS_NPZ), 'wb'),\n W=self.W,\n train_input=self.train_input,\n train_exp_output=self.train_exp_output)\n\n @classmethod\n def load(cls, path: str, layer_name: str) -> 'Layer':\n in_dict = {}\n LAYER_ATTRS_JSON = layer_name + \"_attrs.json\"\n LAYER_WEIGHTS_NPZ = layer_name + \"_weights.npz\"\n\n with open(os.path.join(path, LAYER_ATTRS_JSON), 'r') as infile:\n in_dict = json.load(infile)\n layer = cls.from_dict(in_dict)\n\n data = np.load(open(os.path.join(path, LAYER_WEIGHTS_NPZ), 'rb'))\n layer.W = data['W']\n layer.train_input = data['train_input']\n layer.train_exp_output = data['train_exp_output']\n\n return layer\n\nclass PropogationNetwork(Layer):\n\n def __init__(self, depth, num_neurons, std_noise=25.0):\n super().__init__(num_neurons, std_noise)\n\n self.depth = depth\n\n def output(self, i_inj, dt, t_stop, int_noise_regen=True):\n out = i_inj\n V = None\n F_binary = None\n F_synaptic = None\n\n list_outs = []\n list_V = []\n list_F_binary = []\n list_F_synaptic = []\n\n for layer_num in range(self.depth):\n out, V, F_binary, F_synaptic =\\\n super().output(out, dt, t_stop, int_noise_regen=True)\n \n list_outs.append([out])\n list_V.append([V])\n list_F_binary.append([F_binary])\n list_F_synaptic.append([F_synaptic])\n \n return list_outs, list_V, list_F_binary, list_F_synaptic\n\n def as_dict(self):\n props_dict = super().as_dict()\n props_dict['depth'] = self.depth\n\n return props_dict\n\n @classmethod\n def from_layer(cls, layer: Layer, depth: int) -> 'PropogationNetwork':\n prop_ntwrk = PropogationNetwork(depth, layer.NUM_NEURONS, layer.std_noise)\n\n prop_ntwrk.tau_V = layer.tau_V\n prop_ntwrk.R = layer.R\n prop_ntwrk.EL = layer.EL\n prop_ntwrk.V_th = layer.V_th\n \n prop_ntwrk.W = layer.W\n prop_ntwrk.train_input = layer.train_input\n prop_ntwrk.train_exp_output = layer.train_exp_output\n\n prop_ntwrk.v_E = layer.v_E\n prop_ntwrk.v_ave = layer.v_ave\n\n prop_ntwrk.tau_rise = layer.tau_rise\n prop_ntwrk.tau_fall = layer.tau_fall\n prop_ntwrk.syn_kernel_len = layer.syn_kernel_len\n \n prop_ntwrk._ETA = layer._ETA\n\n return prop_ntwrk\n\n @classmethod\n def from_dict(cls, in_dict: dict) -> 'PropogationNetwork':\n NUM_NEURONS = in_dict['NUM_NEURONS']\n depth = in_dict['depth']\n tau_V = in_dict['tau_V']\n R = in_dict['R']\n EL = in_dict['EL']\n V_th = in_dict['V_th']\n std_noise = in_dict['std_noise']\n\n v_E = in_dict['v_E']\n v_ave = in_dict['v_ave']\n\n tau_rise = in_dict['tau_rise']\n tau_fall = in_dict['tau_fall']\n syn_kernel_len = in_dict['syn_kernel_len']\n\n network = cls(depth, NUM_NEURONS, std_noise)\n network.tau_V = tau_V\n network.R = R\n network.EL = EL\n network.V_th = V_th\n network.std_noise = std_noise\n\n network.v_E = v_E\n network.v_ave = v_ave\n\n network.tau_rise = tau_rise\n network.tau_fall = tau_fall\n network.syn_kernel_len = syn_kernel_len\n\n return network\n\n @classmethod\n def load(cls, path: str, layer_name: str) -> 'PropgationNetwork':\n super().load(path, layer_name)\n\nclass _FullyConnectedLayer(Layer):\n\n def __init__(self, num_neurons, std_noise=25.0):\n super().__init__(num_neurons, std_noise=std_noise)\n self.W = np.zeros((self.NUM_NEURONS, self.NUM_NEURONS))\n\nclass FullyConnectedLayerApprox(_FullyConnectedLayer):\n\n @classmethod\n def from_layer(cls, layer: Layer) -> 'FullyConnectedLayerApprox':\n prop_ntwrk = FullyConnectedLayerApprox(layer.NUM_NEURONS, layer.std_noise)\n\n prop_ntwrk.tau_V = layer.tau_V\n prop_ntwrk.R = layer.R\n prop_ntwrk.EL = layer.EL\n prop_ntwrk.V_th = layer.V_th\n \n prop_ntwrk.W = np.random.normal(np.mean(layer.W), np.std(layer.W), prop_ntwrk.W.shape)\n prop_ntwrk.train_input = layer.train_input\n prop_ntwrk.train_exp_output = layer.train_exp_output\n\n prop_ntwrk.v_E = layer.v_E\n prop_ntwrk.v_ave = layer.v_ave\n\n prop_ntwrk.tau_rise = layer.tau_rise\n prop_ntwrk.tau_fall = layer.tau_fall\n prop_ntwrk.syn_kernel_len = layer.syn_kernel_len\n \n prop_ntwrk._ETA = layer._ETA\n\n return prop_ntwrk\n\nclass FullyConnectedLayer(_FullyConnectedLayer):\n\n def train(self, i_inj, exp_output, dt, t_stop):\n _, _, _, F_synaptic = self.output(i_inj, dt, t_stop)\n\n t_steps = exp_output.shape[0]\n \n ind_neur = np.arange(0, self.NUM_NEURONS)\n Phi = F_synaptic[:t_steps, ind_neur]\n X2 = -1.0*self.v_ave*np.ones((t_steps,ind_neur.shape[0])) + self.v_E\n\n A = np.multiply(Phi, X2)\n self.W, residuals, rank, s = np.linalg.lstsq(A, exp_output)\n self.train_input = i_inj\n self.train_exp_output = exp_output","sub_path":"src/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":11112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"105989294","text":"import math\n\nclass Piece:\n def __init__(self):\n self.image=None\n self.xCoord=None\n self.yCoord=None\n self.owner=None\n \n\n def setImage(self,img):\n self.image=img\n\n def setOwner(self,player):\n self.owner=player\n \n def setCoords(self,x,y):\n self.xCoord=x\n self.yCoord=y\n\n def manhatDist(self,x,y): # gives the manhattan distance from the piece to the given coordinates\n xdiff = abs(self.xCoord-x)\n ydiff = abs(self.yCoord-y)\n return xdiff+ydiff\n\nclass Pawn(Piece):\n\n pieceType='Pawn'\n\n def __init__(self):\n self.hasMoved=0\n self.lastMoveSize=0\n\n def isLegalMove(self,x,y,piece):\n if(x<0 or y<0 or x>7 or y>7): #if move is out of bounds return false\n return False\n elif(self.owner=='BLACK'): # set rules for movement of black pawn\n if(self.hasMoved==0 and x==self.xCoord and (y==(self.yCoord+1) or y==(self.yCoord+2))):#on its first move, a pawn can move 1 or 2 spaces forward\n self.hasMoved=1\n self.lastMoveSize=2\n return True\n elif(x==self.xCoord and y==(self.yCoord+1) and piece==None): #pawn can move one space forward\n self.lastMoveSize=1\n self.hasMoved=1\n return True\n elif(y==(self.yCoord+1) and (x==(self.xCoord-1) or x==(self.xCoord+1)) and piece!=None and piece.owner=='WHITE'):#pawn can take pieces forward and diagonal from it\n self.hasMoved=1\n self.lastMoveSize=1\n return True\n elif(y==(self.yCoord+1) and (x==(self.xCoord-1) or x==(self.xCoord+1)) and piece.pieceType=='Pawn' and piece.lastMoveSize==2 and piece.owner=='WHITE'): # en passant rule\n self.lastMoveSize=1\n self.hasMoved=1\n return True\n else:\n return False\n else: #piece is a white pawn same logic as above but y direction is reversed\n if(self.hasMoved==0 and x==self.xCoord and (y==(self.yCoord-1) or y==(self.yCoord-2))):\n self.hasMoved=1\n self.lastMoveSize=2\n return True\n elif(x==self.xCoord and y==(self.yCoord-1) and piece==None):\n self.lastMoveSize=1\n return True\n elif(y==(self.yCoord-1) and (x==(self.xCoord-1) or x==(self.xCoord+1)) and piece!=None and piece.owner=='BLACK'):\n self.lastMoveSize=1\n return True\n elif(y==(self.yCoord-1) and (x==(self.xCoord-1) or x==(self.xCoord+1)) and piece.pieceType=='Pawn' and piece.lastMoveSize==2 and piece.owner=='BLACK'):\n self.lastMoveSize=1\n return True\n else:\n return False\n\nclass Rook(Piece):\n pieceType='Rook'\n \n\n def isLegalMove(self,x,y,piece):\n if(x>7 or y>7 or x<0 or y<0): #makes sure click is in bounds\n return False\n elif(piece==None or piece.owner!=self.owner): # if the space is empty or occupied by an opposing piece\n if((x==self.xCoord and y!=self.yCoord) or (x!=self.xCoord and y==self.yCoord)): #if the move only changes one coordinate\n return True\n return False\n\nclass Horse(Piece):\n pieceType='Horse'\n\n\n def isLegalMove(self,x,y,piece): # makes sure click is in bounds\n if(x>7 or y>7 or x<0 or y<0):\n return False\n elif(piece==None or piece.owner!=self.owner): # if space is occupied by enemy or nothing\n if(self.manhatDist(x,y)==3 and x!=self.xCoord and y!=self.yCoord): # knights must move such that the manhattan distance of the move is 3\n return True\n return False\n\nclass Bishop(Piece):\n pieceType='Bishop'\n\n\n def isLegalMove(self,x,y,piece): \n if(x>7 or y>7 or x<0 or y<0):\n return False\n elif(piece==None or piece.owner!=self.owner):\n if(self.manhatDist(x,y)%2==0 and self.manhatDist(x,y)>0 and x!=self.xCoord and y!=self.yCoord and (abs(self.xCoord-x)==abs(self.yCoord-y))): #the manhattan distance must be a multiple of 2, both coordinates must change and in the same amount\n return True\n return False\n\nclass Queen(Piece):\n pieceType='Queen'\n\n\n def isLegalMove(self,x,y,piece):\n if(x>7 or y>7 or x<0 or y<0):\n return False\n elif(piece==None or piece.owner!=self.owner):\n if((x==self.xCoord and y!=self.yCoord) or (x!=self.xCoord and y==self.yCoord)): # if the move is purely horizontal or vertical like a rook\n return True\n elif(self.manhatDist(x,y)%2==0 and self.manhatDist(x,y)>0 and x!=self.xCoord and y!=self.yCoord and (abs(self.xCoord-x)==abs(self.yCoord-y))): # if move is diagonal like a bishop\n return True\n return False\n\nclass King(Piece):\n pieceType='King'\n\n\n def isLegalMove(self,x,y,piece):\n if(x>7 or y>7 or x<0 or y<0):\n return False\n elif(piece==None or piece.owner!=self.owner):\n if(((x==self.xCoord and y!=self.yCoord) or (x!=self.xCoord and y==self.yCoord)) and self.manhatDist(x,y)==1): # if move is horizontal or vertical and only 1 square away\n return True\n elif(self.manhatDist(x,y)%2==0 and self.manhatDist(x,y)==2 and x!=self.xCoord and y!=self.yCoord):# if move is diagonal and 2 squares away\n return True\n return False\n \n","sub_path":"piece.py","file_name":"piece.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"23648860","text":"import random\nimport sys\nimport os\n\n\ndef quicksort(l, left, right):\n if left >= right:\n return\n i, j = left, right\n pivot = l[random.randint(left, right)]\n\n while i <= j:\n while l[i] < pivot: i += 1\n while l[j] > pivot: j -= 1\n if i <= j:\n l[i], l[j] = l[j], l[i]\n i, j = i + 1, j - 1\n quicksort(l, left, j)\n quicksort(l, i, right)\n\n\ndefault = \"CheckNumbs.txt\"\nif len(sys.argv) > 1:\n k = sys.argv[1]\n if os.path.exists(k):\n default = k\na = []\nwith open(default, \"r\") as inf:\n for x in inf:\n x = x.strip().split()\n for y in x:\n a.append(int(y))\nquicksort(a, 0, len(a)-1)\nprint(a)","sub_path":"Lab1_2.py","file_name":"Lab1_2.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"423777237","text":"from flask import (\n Blueprint, render_template, request, g, session\n)\n\nbp = Blueprint('welcome', __name__, url_prefix='/')\n\n@bp.before_app_request\ndef load_previous_save():\n save_game = session.get('save_game')\n\n if save_game is None:\n g.save_game = None\n else:\n g.save_game = save_game\n\n@bp.route('/')\ndef welcome():\n return render_template('/welcome.html')\n","sub_path":"sheol-adventure/welcome.py","file_name":"welcome.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"48744707","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/cgcloud_Crypto/PublicKey/_slowmath.py\n# Compiled at: 2016-11-22 15:21:45\n__doc__ = 'Pure Python implementation of the RSA-related portions of Crypto.PublicKey._fastmath.'\n__revision__ = '$Id$'\n__all__ = [\n 'rsa_construct']\nimport sys\nif sys.version_info[0] == 2 and sys.version_info[1] == 1:\n from cgcloud_Crypto.Util.py21compat import *\nfrom cgcloud_Crypto.Util.number import inverse\n\nclass error(Exception):\n pass\n\n\nclass _RSAKey(object):\n\n def has_private(self):\n return hasattr(self, 'd')\n\n\ndef rsa_construct(n, e, d=None, p=None, q=None, u=None):\n \"\"\"Construct an RSAKey object\"\"\"\n assert isinstance(n, long)\n assert isinstance(e, long)\n assert isinstance(d, (long, type(None)))\n assert isinstance(p, (long, type(None)))\n assert isinstance(q, (long, type(None)))\n assert isinstance(u, (long, type(None)))\n obj = _RSAKey()\n obj.n = n\n obj.e = e\n if d is None:\n return obj\n else:\n obj.d = d\n if p is not None and q is not None:\n obj.p = p\n obj.q = q\n else:\n assert False\n if u is not None:\n obj.u = u\n else:\n obj.u = inverse(obj.p, obj.q)\n return obj","sub_path":"pycfiles/cgdat-2.4.2-py3-none-any/_slowmath.py","file_name":"_slowmath.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"87345417","text":"from tkinter import *\nfrom list import*\nimport webbrowser\ndef immediately(event,data):\n \n if(event.widget==data.lb):\n index = data.lb.curselection()\n name = data.lb.get(index)\n list = data.linksDict.items()\n print(index,name)\n for element in list:\n if element[0]==name:\n data.htmlLink = element[1]\n webbrowser.open(data.htmlLink, new=2)\n \n \n print(data.htmlLink)\n\ndef init(data,master,linkslist,linksAndText,numberOfLinks):\n data.lb = Listbox(master,height=20,font=\"Myriad\",width=45)\n data.links = linkslist\n data.linksDict = linksAndText\n data.frontEndLinks = list(linksAndText.keys())\n data.label1 = Label(master,text = \"Node Names Matching Search Query\",font = (\"Myriad\",18))\n data.label2 = Label(master,text = \"Links Matching Search Query\",font = (\"Myriad\",18))\n data.label1.grid(row =0,column=0)\n data.label2.grid(row=4,column=0)\n data.lb.grid(row=5,column=0,rowspan=3)\n data.htmlLink=\"\"\n \n\n fillList(data.frontEndLinks,data.lb,data.links,numberOfLinks)\ndef mousePressed(event, data):\n if event.widget==(data.lb):\n \n data.lb.bind('<>', immediately(event,data))\n \ndef redrawAll(canvas, data):\n pass\n \n\n\ndef run1(master,linkslist,dict1,numberOfLinks,width=600, height=400):\n\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n \n \n \n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100 # milliseconds\n init(data,master,linkslist,dict1,numberOfLinks)\n # create the root and the canvas\n\n screen_width = int(master.winfo_screenwidth())\n screen_height = int(master.winfo_screenheight())\n #quitButton = Button(master,text=\"See WikiLinks For this Node\",command=print(\"hi\"))\n\n # placing the button on my window\n #quitButton.pack(ipadx=0, ipady=0)\n \n \n \n canvas = Canvas(master, width=data.width, height=data.height)\n canvas.grid()\n # set up events\n master.bind(\"\", lambda event:\n mousePressedWrapper(event, canvas, data))\n master.bind(\"\", lambda event:\n keyPressedWrapper(event, canvas, data))\n #timerFiredWrapper(canvas, data)\n \n # and launch the app\n \n #master.mainloop() # blocks until window is closed\n print(\"bye!\")\n\n","sub_path":"button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"508085570","text":"\"\"\"\nVarious utilities and helpers.\n\"\"\"\nimport datetime as dt\nimport functools\nimport inspect\nimport json\nimport logging\nimport os\nimport platform\nimport re\nimport warnings\nfrom collections import OrderedDict\nfrom pathlib import Path\nfrom typing import Any, Union, Tuple, Callable\n\nimport requests\nfrom deprecated import deprecated\n\nlogger = logging.getLogger(__name__)\n\n\nclass Rfc3339:\n \"\"\"\n Formatter for dates according to RFC-3339.\n\n Parses date(time)-like input and formats according to RFC-3339. Some examples:\n\n >>> rfc3339.date(\"2020:03:17\")\n \"2020-03-17\"\n >>> rfc3339.date(2020, 3, 17)\n \"2020-03-17\"\n >>> rfc3339.datetime(\"2020/03/17/12/34/56\")\n \"2020-03-17T12:34:56Z\"\n >>> rfc3339.datetime([2020, 3, 17, 12, 34, 56])\n \"2020-03-17T12:34:56Z\"\n >>> rfc3339.datetime(2020, 3, 17)\n \"2020-03-17T00:00:00Z\"\n >>> rfc3339.datetime(datetime(2020, 3, 17, 12, 34, 56))\n \"2020-03-17T12:34:56Z\"\n\n Or just normalize (automatically preserve date/datetime resolution):\n\n >>> rfc3339.normalize(\"2020/03/17\")\n \"2020-03-17\"\n >>> rfc3339.normalize(\"2020-03-17-12-34-56\")\n \"2020-03-17T12:34:56Z\"\n\n Also see https://tools.ietf.org/html/rfc3339#section-5.6\n \"\"\"\n # TODO: currently we hard code timezone 'Z' for simplicity. Add real time zone support?\n _FMT_DATE = '%Y-%m-%d'\n _FMT_TIME = '%H:%M:%SZ'\n _FMT_DATETIME = _FMT_DATE + \"T\" + _FMT_TIME\n\n _regex_datetime = re.compile(r\"\"\"\n ^(?P\\d{4})[:/_-](?P\\d{2})[:/_-](?P\\d{2})[T :/_-]?\n (?:(?P\\d{2})[:/_-](?P\\d{2})(?:[:/_-](?P\\d{2}))?)?\"\"\", re.VERBOSE)\n\n def __init__(self, propagate_none: bool = False):\n self._propagate_none = propagate_none\n\n def datetime(self, x: Any, *args) -> Union[str, None]:\n \"\"\"\n Format given date(time)-like object as RFC-3339 datetime string.\n \"\"\"\n if args:\n return self.datetime((x,) + args)\n elif isinstance(x, dt.datetime):\n return self._format_datetime(x)\n elif isinstance(x, dt.date):\n return self._format_datetime(dt.datetime.combine(x, dt.time()))\n elif isinstance(x, str):\n return self._format_datetime(dt.datetime(*self._parse_datetime(x)))\n elif isinstance(x, (tuple, list)):\n return self._format_datetime(dt.datetime(*(int(v) for v in x)))\n elif x is None and self._propagate_none:\n return None\n raise ValueError(x)\n\n def date(self, x: Any, *args) -> Union[str, None]:\n \"\"\"\n Format given date-like object as RFC-3339 date string.\n \"\"\"\n if args:\n return self.date((x,) + args)\n elif isinstance(x, (dt.date, dt.datetime)):\n return self._format_date(x)\n elif isinstance(x, str):\n return self._format_date(dt.datetime(*self._parse_datetime(x)))\n elif isinstance(x, (tuple, list)):\n return self._format_date(dt.datetime(*(int(v) for v in x)))\n elif x is None and self._propagate_none:\n return None\n raise ValueError(x)\n\n def normalize(self, x: Any, *args) -> Union[str, None]:\n \"\"\"\n Format given date(time)-like object as RFC-333 date or date-time string depending on given resolution\n\n >>> rfc3339.normalize(\"2020/03/17\")\n \"2020-03-17\"\n >>> rfc3339.normalize(\"2020/03/17/12/34/56\")\n \"2020-03-17T12:34:56Z\"\n \"\"\"\n if args:\n return self.normalize((x,) + args)\n elif isinstance(x, dt.datetime):\n return self.datetime(x)\n elif isinstance(x, dt.date):\n return self.date(x)\n elif isinstance(x, str):\n x = self._parse_datetime(x)\n return self.date(x) if len(x) <= 3 else self.datetime(x)\n elif isinstance(x, (tuple, list)):\n return self.date(x) if len(x) <= 3 else self.datetime(x)\n elif x is None and self._propagate_none:\n return None\n raise ValueError(x)\n\n def parse_date(self, x: Union[str, None]) -> Union[dt.date, None]:\n \"\"\"Parse given string as RFC3339 date.\"\"\"\n if isinstance(x, str):\n return dt.datetime.strptime(x, '%Y-%m-%d').date()\n elif x is None and self._propagate_none:\n return None\n raise ValueError(x)\n\n def parse_datetime(self, x: Union[str, None]) -> Union[dt.datetime, None]:\n \"\"\"Parse given string as RFC3339 date-time.\"\"\"\n if isinstance(x, str):\n return dt.datetime.strptime(x, '%Y-%m-%dT%H:%M:%SZ')\n elif x is None and self._propagate_none:\n return None\n raise ValueError(x)\n\n def parse_date_or_datetime(self, x: Union[str, None]) -> Union[dt.date, dt.datetime, None]:\n \"\"\"Parse given string as RFC3339 date or date-time.\"\"\"\n if isinstance(x, str):\n if len(x) > 10:\n return self.parse_datetime(x)\n else:\n return self.parse_date(x)\n elif x is None and self._propagate_none:\n return None\n raise ValueError(x)\n\n @classmethod\n def _format_datetime(cls, d: dt.datetime) -> str:\n \"\"\"Format given datetime as RFC-3339 date-time string.\"\"\"\n assert d.tzinfo is None, \"timezone handling not supported (TODO)\"\n return d.strftime(cls._FMT_DATETIME)\n\n @classmethod\n def _format_date(cls, d: dt.date) -> str:\n \"\"\"Format given datetime as RFC-3339 date-time string.\"\"\"\n return d.strftime(cls._FMT_DATE)\n\n @classmethod\n def _parse_datetime(cls, s: str) -> Tuple[int]:\n \"\"\"Try to parse string to a date(time) tuple\"\"\"\n try:\n return tuple(int(v) for v in cls._regex_datetime.match(s).groups() if v is not None)\n except Exception:\n raise ValueError(\"Can not parse as date: {s}\".format(s=s))\n\n\n# Default RFC3339 date-time formatter\nrfc3339 = Rfc3339()\n\n\n@deprecated(\"Use `rfc3339.normalize`, `rfc3339.date` or `rfc3339.datetime` instead\")\ndef date_to_rfc3339(d: Any) -> str:\n \"\"\"\n Convert date-like object to a RFC 3339 formatted date string\n\n see https://tools.ietf.org/html/rfc3339#section-5.6\n \"\"\"\n return rfc3339.normalize(d)\n\n\ndef dict_no_none(*args, **kwargs):\n \"\"\"\n Helper to build a dict containing given key-value pairs where the value is not None.\n \"\"\"\n return {\n k: v\n for k, v in dict(*args, **kwargs).items()\n if v is not None\n }\n\n\ndef first_not_none(*args):\n \"\"\"Return first item from given arguments that is not None.\"\"\"\n for item in args:\n if item is not None:\n return item\n raise ValueError(\"No not-None values given.\")\n\n\ndef ensure_dir(path: Union[str, Path]) -> Path:\n \"\"\"Create directory if it doesn't exist.\"\"\"\n path = Path(path)\n if not path.exists():\n path.mkdir(parents=True, exist_ok=True)\n assert path.is_dir()\n return path\n\n\ndef ensure_list(x):\n \"\"\"Convert given data structure to a list.\"\"\"\n try:\n return list(x)\n except TypeError:\n return [x]\n\n\ndef get_temporal_extent(*args,\n start_date: Union[str, dt.datetime, dt.date] = None,\n end_date: Union[str, dt.datetime, dt.date] = None,\n extent: Union[list, tuple] = None,\n convertor=rfc3339.normalize\n ) -> Tuple[Union[str, None], Union[str, None]]:\n \"\"\"\n Helper to derive a date extent from from various call forms:\n\n >>> get_temporal_extent(\"2019-01-01\")\n (\"2019-01-01\", None)\n >>> get_temporal_extent(\"2019-01-01\", \"2019-05-15\")\n (\"2019-01-01\", \"2019-05-15\")\n >>> get_temporal_extent([\"2019-01-01\", \"2019-05-15\"])\n (\"2019-01-01\", \"2019-05-15\")\n >>> get_temporal_extent(start_date=\"2019-01-01\", end_date=\"2019-05-15\"])\n (\"2019-01-01\", \"2019-05-15\")\n >>> get_temporal_extent(extent=[\"2019-01-01\", \"2019-05-15\"])\n (\"2019-01-01\", \"2019-05-15\")\n \"\"\"\n if args:\n assert start_date is None and end_date is None and extent is None\n if len(args) == 2:\n start_date, end_date = args\n elif len(args) == 1:\n arg = args[0]\n if isinstance(arg, (list, tuple)):\n start_date, end_date = arg\n else:\n start_date, end_date = arg, None\n else:\n raise ValueError('Unable to handle {a!r} as a date range'.format(a=args))\n elif extent:\n assert start_date is None and end_date is None\n start_date, end_date = extent\n return convertor(start_date) if start_date else None, convertor(end_date) if end_date else None\n\n\nclass TimingLogger:\n \"\"\"\n Context manager for quick and easy logging of start time, end time and elapsed time of some block of code\n\n Usage example:\n\n >>> with TimingLogger(\"Doing batch job\"):\n ... do_batch_job()\n\n At start of the code block the current time will be logged\n and at end of the code block the end time and elapsed time will be logged.\n\n Can also be used as a function/method decorator, for example:\n\n >>> @TimingLogger(\"Calculation going on\")\n ... def add(x, y):\n ... return x + y\n \"\"\"\n\n # Function that returns current datetime (overridable for unit tests)\n _now = dt.datetime.now\n\n def __init__(self, title: str = \"Timing\", logger: Union[logging.Logger, str, Callable] = logger):\n \"\"\"\n :param title: the title to use in the logging\n :param logger: how the timing should be logged.\n Can be specified as a logging.Logger object (in which case the INFO log level will be used),\n as a string (name of the logging.Logger object to construct),\n or as callable (e.g. to use the `print` function, or the `.debug` method of an existing logger)\n \"\"\"\n self.title = title\n if isinstance(logger, str):\n logger = logging.getLogger(logger)\n if isinstance(logger, logging.Logger):\n self._log = logger.info\n elif callable(logger):\n self._log = logger\n else:\n raise ValueError(\"Invalid logger {l!r}\".format(l=logger))\n\n self.start_time = self.end_time = self.elapsed = None\n\n def __enter__(self):\n self.start_time = self._now()\n self._log(\"{t}: start {s}\".format(t=self.title, s=self.start_time))\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.end_time = self._now()\n self.elapsed = self.end_time - self.start_time\n self._log(\"{t}: {s} {e}, elapsed {d}\".format(\n t=self.title,\n s=\"fail\" if exc_type else \"end\",\n e=self.end_time, d=self.elapsed\n ))\n\n def __call__(self, f: Callable):\n \"\"\"\n Use TimingLogger as function/method decorator\n \"\"\"\n\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n with self:\n return f(*args, **kwargs)\n\n return wrapper\n\n\nclass DeepKeyError(LookupError):\n def __init__(self, key, keys):\n super(DeepKeyError, self).__init__(\"{k!r} (from deep key {s!r})\".format(k=key, s=keys))\n\n\n# Sentinel object for `default` argument of `deep_get`\n_deep_get_default_undefined = object()\n\n\ndef deep_get(data: dict, *keys, default=_deep_get_default_undefined):\n \"\"\"\n Get value deeply from nested dictionaries/lists/tuples\n\n :param data: nested data structure of dicts, lists, tuples\n :param keys: sequence of keys/indexes to traverse\n :param default: default value when a key is missing.\n By default a DeepKeyError will be raised.\n :return:\n \"\"\"\n for key in keys:\n if isinstance(data, dict) and key in data:\n data = data[key]\n elif isinstance(data, (list, tuple)) and isinstance(key, int) and 0 <= key < len(data):\n data = data[key]\n else:\n if default is _deep_get_default_undefined:\n raise DeepKeyError(key, keys)\n else:\n return default\n return data\n\n\ndef deep_set(data: dict, *keys, value):\n \"\"\"\n Set a value deeply in nested dictionary\n\n :param data: nested data structure of dicts, lists, tuples\n :param keys: sequence of keys/indexes to traverse\n :param value: value to set\n \"\"\"\n if len(keys) == 1:\n data[keys[0]] = value\n elif len(keys) > 1:\n if isinstance(data, dict):\n deep_set(data.setdefault(keys[0], OrderedDict()), *keys[1:], value=value)\n elif isinstance(data, (list, tuple)):\n deep_set(data[keys[0]], *keys[1:], value=value)\n else:\n ValueError(data)\n else:\n raise ValueError(\"No keys given\")\n\n\ndef load_json(path: Union[Path, str]) -> dict:\n with Path(path).open(\"r\", encoding=\"utf-8\") as f:\n return json.load(f)\n\n\ndef load_json_resource(src: Union[str, Path]) -> dict:\n \"\"\"\n Helper to load some kind of JSON resource:\n - raw JSON string\n - path to JSON file\n - URL to JSON resource\n \"\"\"\n if isinstance(src, str) and src.strip().startswith(\"{\"):\n # Assume source is a raw JSON string\n return json.loads(src)\n elif isinstance(src, str) and re.match(r\"^https?://\", src, flags=re.I):\n # URL to remote JSON resource\n return requests.get(src).json()\n elif isinstance(src, Path) or (isinstance(src, str) and src.endswith(\".json\")):\n # Assume source is a local JSON file path\n return load_json(src)\n raise ValueError(src)\n\n\nDEFAULT_APP_NAME = \"openeo-python-client\"\n\n\ndef _get_user_dir(\n app_name=DEFAULT_APP_NAME,\n xdg_env_var='XDG_CONFIG_HOME',\n win_env_var='APPDATA',\n fallback='~/.config',\n win_fallback='~\\\\AppData\\\\Roaming',\n macos_fallback='~/Library/Preferences',\n auto_create=True,\n) -> Path:\n \"\"\"\n Get platform specific config/data/cache folder\n \"\"\"\n # Platform specific root locations (from highest priority to lowest)\n env = os.environ\n if platform.system() == 'Windows':\n roots = [env.get(win_env_var), win_fallback, fallback]\n elif platform.system() == 'Darwin':\n roots = [env.get(xdg_env_var), macos_fallback, fallback]\n else:\n # Assume unix\n roots = [env.get(xdg_env_var), fallback]\n\n # Filter out None's, expand user prefix and append app name\n dirs = [Path(r).expanduser() / app_name for r in roots if r]\n # Prepend with OPENEO_CONFIG_HOME if set.\n if env.get(\"OPENEO_CONFIG_HOME\"):\n dirs.insert(0, Path(env.get(\"OPENEO_CONFIG_HOME\")))\n\n # Use highest prio dir that already exists.\n for p in dirs:\n if p.exists() and p.is_dir():\n return p\n\n # No existing dir: create highest prio one (if possible)\n if auto_create:\n for p in dirs:\n try:\n p.mkdir(parents=True)\n logger.info(\"Created user dir for {a!r}: {p}\".format(a=app_name, p=p))\n return p\n except OSError:\n pass\n\n raise Exception(\"Failed to find user dir for {a!r}. Tried: {p!r}\".format(a=app_name, p=dirs))\n\n\ndef get_user_config_dir(app_name=DEFAULT_APP_NAME, auto_create=True) -> Path:\n \"\"\"\n Get platform specific config folder\n \"\"\"\n return _get_user_dir(\n app_name=app_name,\n xdg_env_var='XDG_CONFIG_HOME', win_env_var='APPDATA',\n fallback='~/.config', win_fallback='~\\\\AppData\\\\Roaming', macos_fallback='~/Library/Preferences',\n auto_create=auto_create\n )\n\n\ndef get_user_data_dir(app_name=DEFAULT_APP_NAME, auto_create=True) -> Path:\n \"\"\"\n Get platform specific data folder\n \"\"\"\n return _get_user_dir(\n app_name=app_name,\n xdg_env_var='XDG_DATA_HOME', win_env_var='APPDATA',\n fallback='~/.local/share', win_fallback='~\\\\AppData\\\\Roaming', macos_fallback='~/Library',\n auto_create=auto_create\n )\n\n\ndef legacy_alias(orig: Callable, name: str, action=\"always\", category=DeprecationWarning):\n \"\"\"\n Create legacy alias of given function/method/classmethod/staticmethod\n\n :param orig: function/method to create legacy alias for\n :param name: name of the alias\n :return:\n \"\"\"\n post_process = None\n if isinstance(orig, classmethod):\n post_process = classmethod\n orig = orig.__func__\n kind = \"class method\"\n elif isinstance(orig, staticmethod):\n post_process = staticmethod\n orig = orig.__func__\n kind = \"static method\"\n elif inspect.ismethod(orig) or \"self\" in inspect.signature(orig).parameters:\n kind = \"method\"\n elif inspect.isfunction(orig):\n kind = \"function\"\n else:\n raise ValueError(orig)\n\n msg = \"Call to deprecated {k} `{n}`, use `{o}` instead.\".format(k=kind, n=name, o=orig.__name__)\n\n @functools.wraps(orig)\n def wrapper(*args, **kwargs):\n # This is based on warning handling/throwing implemented in `deprecated` package\n with warnings.catch_warnings():\n warnings.simplefilter(action, category)\n warnings.warn(msg, category=category, stacklevel=2)\n\n return orig(*args, **kwargs)\n\n # TODO: make this more Sphinx aware\n wrapper.__doc__ = \"Use of this legacy {k} is deprecated, use :py:{r}:`.{o}` instead.\".format(\n k=kind, r=\"meth\" if \"method\" in kind else \"func\", o=orig.__name__\n )\n\n if post_process:\n wrapper = post_process(wrapper)\n return wrapper\n","sub_path":"openeo/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":17450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"241494691","text":"#Tagging and chunking\n\nimport nltk\nfrom nltk.corpus import state_union\nfrom nltk.tokenize import PunktSentenceTokenizer\n\n#Just obtaining texts\ntrain_text=state_union.raw(\"2005-GWBush.txt\") \nsample_text=state_union.raw(\"2006-GWBush.txt\")\n\n#Separating text by sentences\ncustom_sent_tokenizer=PunktSentenceTokenizer(sample_text)\ntokenized=custom_sent_tokenizer.tokenize(sample_text)\n\n#Recorre cada oración\nfor i in tokenized:\n words=nltk.word_tokenize(i)#Tokenize by word\n tagged=nltk.pos_tag(words)#Tag each word ***(Funciona para español???)***\n\n #Manually Chunking \n #chunkGram=r\"\"\"Chunk: {<.*>+}#Chunk everything \n # }+{\"\"\"#Chink (NOT chunk)\n #chunkParser=nltk.RegexpParser(chunkGram)\n #chunked=chunkParser.parse(tagged)\n \n #NLTK name entity chunking\n chunked=nltk.ne_chunk(tagged,binary=True)\n print(chunked)\n chunked.draw()\n\n\"\"\"\nCC coordinating conjunction\nCD cardinal digit\nDT determiner\nEX existential there (like: “there is” … think of it like “there exists”)\nFW foreign word\nIN preposition/subordinating conjunction\nJJ adjective ‘big’\nJJR adjective, comparative ‘bigger’\nJJS adjective, superlative ‘biggest’\nLS list marker 1)\nMD modal could, will\nNN noun, singular ‘desk’\nNNS noun plural ‘desks’\nNNP proper noun, singular ‘Harrison’\nNNPS proper noun, plural ‘Americans’\nPDT predeterminer ‘all the kids’\nPOS possessive ending parent’s\nPRP personal pronoun I, he, she\nPRP$ possessive pronoun my, his, hers\nRB adverb very, silently,\nRBR adverb, comparative better\nRBS adverb, superlative best\nRP particle give up\nTO, to go ‘to’ the store.\nUH interjection, errrrrrrrm\nVB verb, base form take\nVBD verb, past tense took\nVBG verb, gerund/present participle taking\nVBN verb, past participle taken\nVBP verb, sing. present, non-3d take\nVBZ verb, 3rd person sing. present takes\nWDT wh-determiner which\nWP wh-pronoun who, what\nWP$ possessive wh-pronoun whose\nWRB wh-abverb where, when\n\"\"\"","sub_path":"pruebas2.py","file_name":"pruebas2.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"310113021","text":"from __future__ import annotations\n\nimport typing\n\n\nclass ListNode:\n def __init__(self, val: int = 0, next: ListNode = None):\n self.val = val\n self.next = next\n\n\ndef merge2lists(l1: ListNode, l2: ListNode) -> ListNode:\n cur = None\n first = None\n while l1 or l2:\n if (l1 and l2 and l1.val <= l2.val) or (l1 and not l2):\n if cur:\n cur.next = l1\n else:\n first = l1\n cur = l1\n l1 = l1.next\n else:\n if cur:\n cur.next = l2\n else:\n first = l2\n cur = l2\n l2 = l2.next\n\n return first\n\n\ndef merge_k_lists(lists: typing.List[ListNode]) -> ListNode:\n last = len(lists) - 1\n while last > 0:\n first = 0\n while first < last:\n lists[first] = merge2lists(lists[first], lists[last])\n first += 1\n last -= 1\n\n return lists[0] if len(lists) > 0 else None\n","sub_path":"merge_k_sorted_lists/divide_and_conquer.py","file_name":"divide_and_conquer.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"82171379","text":"from torch.optim.lr_scheduler import _LRScheduler, ExponentialLR\n\n\nclass CustomScheduler(_LRScheduler):\n timestep: int = 0\n\n def __init__(self, optimizer, gamma, warmup=None):\n self.optimizer = optimizer\n self.after_warmup = ExponentialLR(optimizer, gamma=gamma)\n self.initial_lrs = [p_group['lr'] for p_group in self.optimizer.param_groups]\n self.warmup = 0 if warmup is None else warmup\n super(CustomScheduler, self).__init__(optimizer)\n\n def get_lr(self):\n return [self.timestep * group_init_lr / self.warmup for group_init_lr in self.initial_lrs] \\\n if self.timestep < self.warmup else self.after_warmup.get_lr()\n\n def step(self, epoch=None):\n if self.timestep < self.warmup:\n self.timestep += 1\n super(CustomScheduler, self).step(epoch)\n else:\n self.after_warmup.step(epoch)\n\n\nclass NoamScheduler(_LRScheduler):\n\n def __init__(self, optimizer, warmup):\n assert warmup > 0\n self.optimizer = optimizer\n self.initial_lrs = [p_group['lr'] for p_group in self.optimizer.param_groups]\n self.warmup = warmup\n self.timestep = 0\n super(NoamScheduler, self).__init__(optimizer)\n\n def get_lr(self):\n noam_lr = self.get_noam_lr()\n return [group_init_lr * noam_lr for group_init_lr in self.initial_lrs]\n\n def get_noam_lr(self):\n return min(self.timestep ** -0.5, self.timestep * self.warmup ** -1.5)\n\n def step(self, epoch=None):\n self.timestep += 1\n super(NoamScheduler, self).step(epoch)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n some test't\n \"\"\"\n\n import torch.nn as nn\n from pytorch_transformers.optimization import AdamW\n\n wrmp = 5\n net = nn.Sequential(\n nn.Linear(10, 10),\n nn.Linear(10, 10),\n )\n opt = AdamW([\n {'params': [p for n, p in net.named_parameters() if n.startswith('0')], 'lr': 0.05},\n {'params': [p for n, p in net.named_parameters() if n.startswith('1')], 'lr': 0.0001}\n ])\n\n scheduler = NoamScheduler(opt, warmup=wrmp)\n\n for i in range(15):\n print(f\"epoch: {i + 1}\")\n print(*[param['lr'] for param in opt.param_groups], sep=' | ', end='\\n\\n')\n scheduler.step()\n","sub_path":"pysrc/review/train/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"16917628","text":"# Them cac thu vien neu can\r\nimport numpy as np\r\nimport math\r\n\r\n#my functions\r\ndef LuuFile(path, data):\r\n file = open(path, 'a', encoding = 'utf-8')\r\n file.writelines(data)\r\n file.writelines('\\n')\r\n file.close()\r\n\r\ndef DocFile(path):\r\n arrSo = []\r\n file = open(path, 'r', encoding= 'utf-8')\r\n for line in file:\r\n data = line.strip()\r\n arr = [int(x) for x in data.split(' ')]\r\n arrSo.append(arr)\r\n file.close()\r\n return arrSo\r\n\r\n#ham tinh loi nhuan\r\ndef profit(position, order):\r\n #doanh thu\r\n Tr = 5 + order[3] + order[4] * 2\r\n Tc = math.sqrt((position[0] - order[1])**2 + (position[1] - order[2])**2) * 0.5 + 10\r\n return Tr - Tc\r\n\r\ndef assign(file_input, file_output):\r\n # read input\r\n arr = DocFile(file_input)\r\n x0, y0 = arr[0]\r\n num_shippers = arr[1][1]\r\n num_orders = arr[1][0]\r\n idx = np.array(range(num_orders))[np.newaxis].T\r\n list_orders = np.hstack((idx, np.array(arr[2:])))\r\n\r\n #tao array chua id va loi nhuan hien tai\r\n dtype = [('id', int), ('profit', float), ('x', int), ('y', int)]\r\n values = [(i, 0, x0, y0) for i in range(num_shippers)]\r\n current_profit = np.array(values, dtype=dtype)\r\n #tao list chua order hien tai\r\n append_order = [[] for i in range(num_shippers)]\r\n\r\n\r\n # run algorithm\r\n while len(list_orders) != 0:\r\n #tinh mean loi nhuan cua tat ca shipper hien co\r\n mean = sum([i[1] for i in current_profit]) / num_shippers\r\n print('mean-----------------------')\r\n print(mean)\r\n #chon idx shiper co abs profit nho nhat\r\n abs_current_profit = current_profit.copy()\r\n for i in range(num_shippers):\r\n abs_current_profit[i] = (abs_current_profit[i][0], abs(abs_current_profit[i][1] - mean), abs_current_profit[i][2], abs_current_profit[i][3])\r\n idx = np.sort(abs_current_profit, kind='heapsort', order='profit')[-1][0]\r\n print('abs_current_profit-----------------------')\r\n print(abs_current_profit)\r\n print('id shipper chose-----------------------')\r\n print(np.sort(abs_current_profit, kind='heapsort', order='profit'))\r\n print(idx)\r\n dtype = [('id', int), ('abs_profit', float)]\r\n values = [(list_orders[i,0], abs(current_profit[idx][1] + profit(list(current_profit[idx])[2:], list_orders[i]) - mean)) for i in range(len(list_orders))]\r\n print('values-----------------------')\r\n print(values)\r\n all_order_profit = np.array(values, dtype=dtype)\r\n print('all_order_profit-----------------------')\r\n print(all_order_profit)\r\n #chon id don hang co chi phi den do dat gan bang 0\r\n all_order_profit = np.sort(all_order_profit, kind='heapsort', order='abs_profit')\r\n print('all_order_profit-----------------------')\r\n print(all_order_profit)\r\n print('all_order_profit-----------------------')\r\n print(all_order_profit[0][0])\r\n id_arr = list(list_orders[:,0])\r\n print('id_arr-----------------------')\r\n print(id_arr)\r\n id_order = [i for i in range(len(id_arr)) if id_arr[i] == all_order_profit[0][0]][0]\r\n print('id_order-----------------------')\r\n print(id_order)\r\n # print(current_profit[0][0])\r\n # print(current_profit[0][1])\r\n # print(list(current_profit[0])[2:])\r\n # print()\r\n current_profit[idx] = (current_profit[idx][0], current_profit[idx][1] + profit(list(current_profit[idx])[2:], list_orders[id_order]), list_orders[id_order,1], list_orders[id_order,2])\r\n\r\n #append new order append_order\r\n append_order[current_profit[idx][0]].append(all_order_profit[0][0])\r\n print('append_order------------------')\r\n print(append_order)\r\n\r\n #update list_orders\r\n list_orders = np.delete(list_orders, [id_order * 5 + i for i in range(5)])\r\n list_orders = list_orders.reshape(len(list_orders) // 5, 5)\r\n print('list_orders------------------')\r\n print(list_orders)\r\n # current_profit = np.sort(current_profit, kind='heapsort', order='profit')\r\n print('**********************************')\r\n print(current_profit)\r\n print(append_order)\r\n print('**********************************')\r\n\r\n\r\n # write output\r\n # print(append_order)\r\n\r\n\r\nassign('input.txt', 'output.txt')\r\n","sub_path":"btl2/BTL2/assignment2.py","file_name":"assignment2.py","file_ext":"py","file_size_in_byte":4355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"156491014","text":"from peak.api import *\n\nimport os, sys, weakref\nfrom time import sleep\n\n\nclass DDEConnectionError(Exception):\n \"\"\"Problem connecting to a DDE Server\"\"\"\n\n\nclass ServerManager(object):\n\n \"\"\"This ensures that 'Shutdown()' gets called when the server is GC'd\"\"\"\n\n def __init__(self,name,logger=logs.AbstractLogger(levelName='EMERG')):\n import win32ui, dde\n server = self.server = dde.CreateServer()\n self.name = name\n self.logger = logger\n server.Create(name)\n\n def __call__(self, serviceName, topicName):\n import dde\n conn = dde.CreateConversation(self.server)\n\n self.logger.debug(\"%s: attempting DDE connection to (%s,%s)\",\n self.name, serviceName, topicName\n )\n\n conn.ConnectTo(serviceName, topicName)\n return conn\n\n def __del__(self):\n if self.server is not None:\n self.logger.debug(\"%s: shutting down DDE server\", self.name)\n self.server.Shutdown()\n self.server = None\n\n close = __del__\n\n\nclass ddeURL(naming.URL.Base):\n\n \"\"\"PEAK Win32 DDE URL\n\n Example::\n\n \"win32.dde:service::topic;file=c:\\\\foo;retries=5;sleep=5\"\n\n Syntax is 'service::topic' followed by semicolon-separated\n parameters, which may be 'file' to designate a file to be launched\n if the initial connection attempt is unsuccessful, 'retries' to\n indicate how many retries should occur if the initial attempt is\n unsuccessful, and 'sleep' to set the number of seconds to wait between\n retry attempts.\n\n These parameters are all available as attributes of the same names,\n including 'service' and 'topic'.\"\"\"\n\n supportedSchemes = 'win32.dde',\n defaultFactory = 'peak.storage.DDE.DDEConnection'\n\n class service(naming.URL.RequiredField):\n pass\n\n class topic(naming.URL.RequiredField):\n pass\n\n class file(naming.URL.Field):\n pass\n\n class retries(naming.URL.IntField):\n defaultValue = 10\n\n class sleep(naming.URL.IntField):\n defaultValue = 1\n\n\n\n\n\n\n syntax = naming.URL.Sequence(\n service, '::', topic,\n naming.URL.Set(\n naming.URL.Sequence(';file=',file),\n naming.URL.Sequence(';retries=',retries),\n naming.URL.Sequence(';sleep=',sleep),\n ),\n )\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass DDEConnection(storage.ManagedConnection):\n\n \"\"\"Managed DDE connection\"\"\"\n\n protocols.advise(\n instancesProvide = [storage.IDDEConnection],\n )\n\n serviceName = binding.Obtain(\"address/service\")\n topicName = binding.Obtain(\"address/topic\")\n launchFile = binding.Obtain(\"address/file\", default=None)\n\n retries = binding.Obtain(\"address/retries\", default=10)\n sleepFor = binding.Obtain(\"address/sleep\", default=1)\n\n logger = binding.Obtain('logger:dde')\n\n def ddeServer(self):\n return ServerManager(\n str(binding.getComponentPath(self)),\n # weakref to the logger so that the ServerManager isn't part of\n # a cycle with us (if our logger refers to us)\n logger=weakref.proxy(self.logger)\n )\n\n ddeServer = binding.Make(ddeServer)\n\n def __call__(self, requestStr):\n \"\"\"Issue a DDE request (requestStr -> responseStr)\"\"\"\n return self.connection.Request(requestStr)\n\n def execute(self, commandStr):\n \"\"\"Execute a DDE command\"\"\"\n return self.connection.Exec(commandStr)\n\n def poke(self, commandStr, data=None):\n \"\"\"DDE Poke of command string and optional data buffer\"\"\"\n return self.connection.Poke(commandStr, data)\n\n\n\n def _open(self):\n\n attemptedLaunch = False\n\n for i in range(self.retries+1):\n\n try:\n conn = self.ddeServer(self.serviceName, self.topicName)\n except:\n t,v,tb = sys.exc_info()\n if (t,v) != ('error','ConnectTo failed'):\n del t,v,tb,conn\n raise\n else:\n return conn\n\n if attemptedLaunch:\n sleep(self.sleepFor)\n else:\n if self.launchFile:\n self.logger.debug(\"%s: launching %s\",self,self.launchFile)\n os.startfile(self.launchFile)\n\n attemptedLaunch = True\n\n\n else:\n raise DDEConnectionError(\n \"ConnectTo failed\", self.serviceName, self.topicName\n )\n\n def _close(self):\n self.ddeServer.close()\n del self.ddeServer # force shutdown\n\n\n\n\n\n\n\n","sub_path":"PEAK-0.5a4dev_r2085/build/lib.macosx-10.6-x86_64-2.7/peak/storage/DDE.py","file_name":"DDE.py","file_ext":"py","file_size_in_byte":4552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"244368404","text":"import argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-i','--in',dest = 'in_', help = 'input file name, required')\nparser.add_argument('-f', dest = \"fasta_\", help = 'fasta file name, required')\nparser.add_argument('-o','--out',dest = 'out_', default = 'output.txt', help = 'output file name, optional (default: output.txt)', required = False)\n\nargs = parser.parse_args()\nin_file_name = args.in_\nout_file_name = args.out_\nfasta_file_name = args.fasta_\n\ninput_file = open(in_file_name)\nfasta_file = open(fasta_file_name)\noutput_file = open(out_file_name, \"w\")\n\n\ndef dict_():\n\n\tdict_ = {}\n\n\tfor line in fasta_file:\n\t\tif line.startswith(\">\"):\n\t\t\tline_ = line\n\t\t\tdict_[line] = []\n\n\t\tif not line.startswith(\">\"):\n\t\t\tdict_[line_].append(line)\n\n\treturn dict_\n\ndef wrap(word):\n\tj = 0\n\tnew = ''\n\twhile j < len(word):\n\t\tnew += word[j:j + 60] + \"\\n\"\n\t\tj += 60\n\n\treturn new\n\n\ndef search_ (dict_):\n\n\tfor line in input_file:\n\t\tline_ = line.split()\n\t\tname = line_[0]\n\n\t\tfor key, value in dict_.items():\n\t\t\tif name in key:\n\t\t\t\tseq = str(value)\n\t\t\t\tseq = seq.strip(\"[\").strip(\"]\").replace(\",\",\"\").replace(\" \",\"\").replace(\"\\\\\",\"\").replace(\"n\",\"\").replace(\"\\'\",\"\")\n\t\t\t\tseq_ = wrap(seq)\n\t\t\t\toutput_file.write(key + seq_ )\n\n\n\ndict_ = dict_()\nsearch_(dict_)\n","sub_path":"mainfasta.py","file_name":"mainfasta.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"267498771","text":"from __future__ import annotations\r\n\r\nfrom typing import Optional, Tuple, TYPE_CHECKING\r\n\r\nimport colors\r\nimport exceptions\r\nimport math\r\nimport copy\r\nimport entity_factories\r\n\r\nfrom random import randint\r\nfrom entity import Actor, Entity, Item, Gold, Shop\r\n\r\nif TYPE_CHECKING:\r\n from engine import Engine\r\n from entity import Actor, Entity, Item, Gold, Shop\r\n \r\ndef check_turn_advance(engine: Engine, entity: Actor):\r\n if entity == engine.player:\r\n engine.current_turn += 1\r\n \r\n remove_summons = []\r\n for npc in engine.game_map.actors:\r\n npc_buffs = list(npc.battler.current_buffs)\r\n for buff in npc_buffs:\r\n buff_turn = npc.battler.current_buffs[buff][0]\r\n if buff_turn < engine.current_turn:\r\n if buff == \"Player Summoned\":\r\n remove_summons.append(npc)\r\n else:\r\n engine.message_log.add_message(f\"{npc.name}'s {buff} fades.\", colors.red)\r\n del npc.battler.current_buffs[buff]\r\n\r\n for removes in remove_summons:\r\n engine.message_log.add_message(f\"{removes.name} vanishes in a puff of smoke.\", colors.red)\r\n engine.game_map.entities.remove(removes)\r\n \r\n\r\ndef get_size_damage_diff(num_damage_dice: int , size_damage_dice: int, entity: Actor):\r\n main_damage_list = [[1, 1], [1, 2], [1, 3], [1, 4], [1, 6], [1, 8], [2, 6], [3, 6], [4, 6],\r\n [6, 6], [8, 6]]\r\n twodfour_damage_list = [[1, 1], [1, 2], [1, 3], [1, 4], [1, 6], [2, 4], [2, 6], [3, 6], [4, 6],\r\n [6, 6], [8, 6]]\r\n onedten_damage_list = [[1, 1], [1, 2], [1, 3], [1, 4], [1, 6], [1, 8], [1, 10], [2, 8], [3, 8],\r\n [4, 8], [6, 8]]\r\n onedtwelve_damage_list = [[1, 1], [1, 2], [1, 3], [1, 4], [1, 6], [1, 8], [1, 10], [1, 12], [3, 6],\r\n [4, 6], [6, 6], [8, 6]]\r\n twodsix_damage_list = [[1, 1], [1, 2], [1, 3], [1, 4], [1, 6], [1, 8], [1, 10], [2, 6], [3, 6],\r\n [4, 6], [6, 6], [8, 6]]\r\n \r\n if (num_damage_dice, size_damage_dice) == (2, 4):\r\n working_list = twodfour_damage_list\r\n elif (num_damage_dice, size_damage_dice) == (1, 10):\r\n working_list = onedten_damage_list\r\n elif (num_damage_dice, size_damage_dice) == (1, 12):\r\n working_list = onedtwelve_damage_list\r\n elif (num_damage_dice, size_damage_dice) == (2, 6):\r\n working_list = twodsix_damage_list\r\n else:\r\n working_list = main_damage_list\r\n\r\n current_position = working_list.index([num_damage_dice, size_damage_dice])\r\n\r\n if entity.battler.size == \"Medium\":\r\n pass\r\n elif entity.battler.size == \"Small\":\r\n current_position -= 1\r\n elif entity.battler.size == \"Tiny\":\r\n current_position -= 2\r\n elif entity.battler.size == \"Large\":\r\n current_position += 1\r\n\r\n return working_list[current_position]\r\n \r\n\r\nclass Action:\r\n def __init__(self, entity: Actor, penalty: int = 0) -> None:\r\n super().__init__()\r\n self.entity = entity\r\n self.penalty = penalty\r\n\r\n @property\r\n def engine(self) -> Engine:\r\n \"\"\"Return the engine this action belongs to.\"\"\"\r\n return self.entity.gamemap.engine\r\n\r\n def perform(self) -> None:\r\n \"\"\"Perform this action with the objects needed to determine its scope.\r\n\r\n `self.engine` is the scope this action is being performed in.\r\n\r\n `self.entity` is the object performing the action.\r\n\r\n This method must be overridden by Action subclasses.\r\n \"\"\"\r\n raise NotImplementedError()\r\n\r\nclass ToggleCombatModeAction(Action):\r\n \"\"\"Toggles between Melee and Ranged. Quick Draw makes it free action.\"\"\"\r\n \"\"\"Otherwise takes turn to do so.\"\"\"\r\n\r\n def __init__(self, entity: Actor):\r\n super().__init__(entity)\r\n\r\n def perform(self) -> None:\r\n inventory = self.entity.inventory\r\n\r\n if self.entity.battler.combat_mode == \"Ranged\":\r\n self.entity.battler.combat_mode = \"Melee\"\r\n if \"Quick Draw\" in self.entity.battler.combat_feats:\r\n raise exceptions.Impossible(\"You quickly shift to melee.\")\r\n else:\r\n self.engine.message_log.add_message(f\"Now in Melee mode.\")\r\n check_turn_advance(self.engine, self.entity)\r\n return\r\n elif self.entity.battler.combat_mode == \"Melee\":\r\n if self.entity.equipment.ranged == None:\r\n self.engine.message_log.add_message(f\"You have no ranged weapon\")\r\n else:\r\n self.entity.battler.combat_mode = \"Ranged\"\r\n if \"Quick Draw\" in self.entity.battler.combat_feats:\r\n raise exceptions.Impossible(\"You quickly shift to your ranged weapon.\")\r\n else:\r\n self.engine.message_log.add_message(f\"Now in ranged mode.\")\r\n check_turn_advance(self.engine, self.entity)\r\n return\r\n else:\r\n raise exceptions.Impossible(\"Error: Combat mode not melee or ranged found.\")\r\n\r\nclass PickupAction(Action):\r\n \"\"\"Pickup an item and add it to the inventory, if there is room for it.\"\"\"\r\n\r\n def __init__(self, entity: Actor):\r\n super().__init__(entity)\r\n\r\n def perform(self) -> None:\r\n actor_location_x = self.entity.x\r\n actor_location_y = self.entity.y\r\n inventory = self.entity.inventory\r\n\r\n for item in self.engine.game_map.items:\r\n if actor_location_x == item.x and actor_location_y == item.y:\r\n if item.can_stack == True:\r\n for i in range(len(self.entity.inventory.items)):\r\n if item.name == self.entity.inventory.items[i].name:\r\n self.entity.inventory.items[i].number_in_stack += 1\r\n self.engine.message_log.add_message(f\"You have one more {item.name}!\")\r\n self.engine.game_map.entities.remove(item)\r\n return\r\n\r\n if len(inventory.items) >= inventory.capacity:\r\n raise exceptions.Impossible(\"Your inventory is full.\")\r\n\r\n self.engine.game_map.entities.remove(item)\r\n item.parent = self.entity.inventory\r\n inventory.items.append(item)\r\n\r\n self.engine.message_log.add_message(f\"You picked up the {item.name}!\")\r\n check_turn_advance(self.engine, self.entity)\r\n return\r\n raise exceptions.Impossible(\"There is nothing here to pick up.\")\r\n\r\nclass RangedAction(Action):\r\n def __init__(\r\n self,\r\n entity: Actor,\r\n target_xy: Tuple[int, int] = (0, 0),\r\n penalty: int = 0,\r\n ranged_attack_list = [],\r\n ):\r\n super().__init__(entity)\r\n self.target_xy = target_xy\r\n self.penalty = penalty\r\n self.ranged_attack_list = ranged_attack_list\r\n @property\r\n def target_actor(self) -> Optional[Actor]:\r\n return self.engine.game_map.get_actor_at_location(*self.target_xy)\r\n\r\n def perform(self) -> None:\r\n \"\"\"Fire bow or use thrown weapon.\"\"\"\r\n target = self.target_actor\r\n if not target:\r\n raise exceptions.Impossible(\"Nothing to attack.\")\r\n if self.entity == target:\r\n raise exceptions.Impossible(\"You can't target yourself.\")\r\n\r\n damage = 0\r\n dam_string = \"shoots\"\r\n crit_needs = 20\r\n crit_mult = 2\r\n\r\n if \"Point Blank Shot\" in self.entity.battler.combat_feats and self.entity.distance(target.x, target.y) <= 6:\r\n pb = 1\r\n else:\r\n pb = 0\r\n\r\n if self.entity.equipment.ranged != None:\r\n dam_string = self.entity.equipment.ranged.equippable.damage_name\r\n num_damage_dice = self.entity.equipment.ranged.equippable.weapon_num_dice\r\n size_damage_dice = self.entity.equipment.ranged.equippable.weapon_size_dice\r\n num_damage_dice, size_damage_dice = get_size_damage_diff(num_damage_dice, size_damage_dice, self.entity)\r\n crit_needs = self.entity.equipment.ranged.equippable.crit_needs\r\n crit_mult = self.entity.equipment.ranged.equippable.crit_mult\r\n elif len(self.ranged_attack_list) == 0:\r\n raise exceptions.Impossible(\"You have no ranged weapon.\")\r\n else:\r\n dam_string = self.ranged_attack_list[1]\r\n num_damage_dice = self.ranged_attack_list[2]\r\n size_damage_dice = self.ranged_attack_list[3]\r\n\r\n for i in range(num_damage_dice):\r\n damage = damage + randint(1, size_damage_dice)\r\n\r\n damage = damage + self.entity.battler.ranged_to_damage + pb #half damage on secondary attacks?\r\n attack_desc = f\"{self.entity.name.capitalize()} {dam_string} {target.name}\"\r\n if self.entity is self.engine.player:\r\n attack_color = colors.player_atk\r\n self.engine.last_target = target\r\n else:\r\n attack_color = colors.enemy_atk\r\n\r\n (damage, attack_desc) = HandleAttack(self.entity.battler.ranged_to_hit, target.battler.current_ac, damage, attack_desc, crit_needs, crit_mult)\r\n self.engine.message_log.add_message(attack_desc, attack_color)\r\n target.battler.hp -= damage\r\n\r\nclass FullAttackRangedAction(Action):\r\n def __init__(\r\n self,\r\n entity: Actor,\r\n target_xy: Tuple[int, int] = (0, 0),\r\n penalty: int = 0,\r\n ):\r\n super().__init__(entity)\r\n self.target_xy = target_xy\r\n self.penalty = penalty\r\n\r\n @property\r\n def target_actor(self) -> Optional[Actor]:\r\n return self.engine.game_map.get_actor_at_location(*self.target_xy)\r\n\r\n def perform(self) -> None:\r\n target = self.target_actor\r\n if self.entity.battler.combat_mode != \"Ranged\":\r\n if \"Quick Draw\" in self.entity.battler.combat_feats:\r\n self.engine.message_log.add_message(f\"{self.entity.name} quickly switches to a ranged weapon.\")\r\n self.entity.battler.combat_mode = \"Ranged\"\r\n elif self.entity.equipment.ranged != None:\r\n self.engine.message_log.add_message(f\"{self.entity.name} struggles to change to a ranged weapon.\")\r\n self.entity.battler.combat_mode = \"Ranged\"\r\n check_turn_advance(self.engine, self.entity)\r\n return RangedAction(self.entity, self.target_xy).perform()\r\n elif len(self.entity.battler.full_ranged_attack) > 0:\r\n self.engine.message_log.add_message(f\"{self.entity.name} struggles to change to a ranged weapon.\")\r\n self.entity.battler.combat_mode = \"Ranged\"\r\n check_turn_advance(self.engine, self.entity)\r\n return RangedAction(self.entity, self.target_xy, 0, self.entity.battler.full_ranged_attack[0]).perform()\r\n #Doesn't handle secondary attack in first slot of full ranged attack, ignores -5 hit penalty, fix\r\n if not target:\r\n raise exceptions.Impossible(\"Nothing to attack.\")\r\n if self.entity == target:\r\n raise exceptions.Impossible(\"You can't target yourself.\")\r\n \r\n if self.entity.equipment.ranged != None:\r\n if self.entity.battler.bab < 6: #BAB can be 0, still 1 attack\r\n if \"Rapid Shot\" not in self.entity.battler.combat_feats:\r\n check_turn_advance(self.engine, self.entity)\r\n return RangedAction(self.entity, self.target_xy).perform()\r\n else:\r\n RangedAction(self.entity, self.target_xy).perform()\r\n if target.battler.is_dead == True:\r\n target = FindNewTarget(self.engine, target, ranged = True)\r\n if target == None:\r\n check_turn_advance(self.engine, self.entity)\r\n return None\r\n self.engine.message_log.add_message(f\"{self.entity.name} retargets to {target.name}.\", colors.player_atk)\r\n self.target_xy = (target.x, target.y)\r\n check_turn_advance(self.engine, self.entity)\r\n return RangedAction(self.entity, self.target_xy).perform()\r\n else: #Iteratives\r\n bab_counter = self.entity.battler.bab\r\n if \"Rapid Shot\" in self.entity.battler.combat_feats:\r\n RangedAction(self.entity, self.target_xy).perform()\r\n while bab_counter > 0:\r\n if target.battler.is_dead == True:\r\n target = FindNewTarget(self.engine, target, ranged = True)\r\n if target == None:\r\n check_turn_advance(self.engine, self.entity)\r\n return None\r\n self.engine.message_log.add_message(f\"{self.entity.name} retargets to {target.name}.\", colors.player_atk)\r\n self.target_xy = (target.x, target.y)\r\n RangedAction(self.entity, self.target_xy, self.penalty).perform()\r\n bab_counter -= 5\r\n self.penalty += 5\r\n check_turn_advance(self.engine, self.entity)\r\n return None\r\n elif len(self.entity.battler.full_ranged_attack) == 0: #No weapons, no full attack\r\n raise exceptions.Impossible(\"You have no ranged weapon.\")\r\n\r\n else:\r\n for i in range(len(self.entity.battler.full_ranged_attack)):\r\n if target.battler.is_dead == True:\r\n check_turn_advance(self.engine, self.entity)\r\n return None\r\n if self.entity.battler.full_ranged_attack[i][0] == 'P': #Primary attack\r\n RangedAction(self.entity, self.target_xy, 0, self.entity.battler.full_ranged_attack[i]).perform()\r\n elif self.entity.battler.full_ranged_attack[i][0] == 'S': #Secondary attack\r\n RangedAction(self.entity, self.target_xy, 5, self.entity.battler.full_ranged_attack[i]).perform()\r\n check_turn_advance(self.engine, self.entity)\r\n return None\r\n\r\ndef FindNewTarget(engine: Engine, target, ranged: bool=True):\r\n for newtarget in engine.game_map.actors:\r\n if newtarget == target or newtarget == engine.player:\r\n continue\r\n if engine.game_map.visible[newtarget.x, newtarget.y]:\r\n if ranged == True:\r\n return newtarget\r\n else:\r\n if InMeleeReach(engine.player, newtarget):\r\n return newtarget\r\n return None\r\n\r\ndef InMeleeReach(attacker, target):\r\n melee_reach = 1.5 # Could be awhile until complete here, when reach implemented\r\n Xdist = attacker.x - target.x\r\n Ydist = attacker.y - target.y\r\n if math.sqrt(Xdist**2 + Ydist**2) < melee_reach:\r\n return True\r\n else:\r\n return False\r\n\r\nclass DragonBreathAction(Action):\r\n def __init__(\r\n self,\r\n entity: Actor,\r\n target_xy: Tuple[int, int] = (0, 0),\r\n ):\r\n super().__init__(entity)\r\n self.target_xy = target_xy\r\n\r\n @property\r\n def target_actor(self) -> Optional[Actor]:\r\n return self.engine.game_map.get_actor_at_location(*self.target_xy)\r\n\r\n if not target:\r\n raise exceptions.Impossible(\"Nothing to attack.\")\r\n\r\n def perform(self) -> None:\r\n target = self.target_actor\r\n choice = ''\r\n damage = 0\r\n if not target:\r\n raise exceptions.Impossible(\"Nothing to attack.\")\r\n breath_weapons = ['Cone Fire Breath']\r\n for breath in breath_weapons:\r\n if breath in self.entity.battler.spells:\r\n choice = breath # doesn't handle multiple breath weapon options\r\n if choice == '':\r\n raise exception.Impossible(\"You found no breath attack\")\r\n else:\r\n if choice == 'Cone Fire Breath':\r\n num_dice = self.entity.battler.spells[choice][0]\r\n for i in range(num_dice):\r\n damage = damage + randint(1, self.entity.battler.spells[choice][1])\r\n attack_desc = f\"{self.entity.name.capitalize()} breathes fire on {target.name}\"\r\n \r\n if self.entity is self.engine.player:\r\n attack_color = colors.player_atk\r\n self.engine.last_target = target\r\n else:\r\n attack_color = colors.enemy_atk\r\n if make_save(breath_dc(self.entity), target, \"Reflex\"):\r\n damage = int(damage/2)\r\n attack_desc += f\" save made\"\r\n else:\r\n attack_desc += f\" save failed\"\r\n if damage > 0:\r\n self.engine.message_log.add_message(\r\n f\"{attack_desc} for {damage} hit points.\", attack_color\r\n )\r\n else:\r\n self.engine.message_log.add_message(\r\n f\"{attack_desc} but does no damage.\", attack_color\r\n )\r\n target.battler.take_damage(damage)\r\n check_turn_advance(self.engine, self.entity)\r\n return None\r\n \r\ndef breath_dc(entity):\r\n save_needed = 10\r\n save_needed += (int(entity.battler.current_con/2) + int(entity.level.current_level/2))\r\n return save_needed\r\n\r\ndef make_save(target_dc, target, save_type: str = \"Reflex\"):\r\n roll = randint(1,20)\r\n if roll == 1:\r\n roll = -10 # 1 auto-fail is just very bad roll now.\r\n if save_type == \"Reflex\":\r\n save_bonus = target.battler.reflex_save\r\n elif save_type == \"Fortitude\":\r\n save_bonus = target.battler.fort_save\r\n elif save_type == \"Will\":\r\n save_bonus = target.battler.will_save\r\n else:\r\n raise exceptions.Impossible(\"Save is not Fort, Reflex, or Will. Undefined.\")\r\n return True\r\n if (save_bonus + roll) >= target_dc or roll == 20: # natural 20 always saves\r\n return True\r\n else:\r\n return False\r\n\r\nclass SummonMonsterAction(Action):\r\n def __init__(\r\n self,\r\n entity: Actor,\r\n target_xy: Tuple[int, int] = (0, 0),\r\n chosen_spell: str = \"\",\r\n mana_cost: int = 0,\r\n ):\r\n super().__init__(entity)\r\n self.target_xy = target_xy\r\n self.chosen_spell = chosen_spell\r\n self.mana_cost = mana_cost\r\n\r\n def perform(self) -> None:\r\n cost = self.mana_cost\r\n if cost > self.entity.battler.mana:\r\n raise exceptions.Impossible(\"You do not have enough mana for that.\")\r\n if self.chosen_spell == \"Summon Monster 1\":\r\n Entity.spawn_summon(entity_factories.celestial_badger, self.engine.game_map,\r\n self.target_xy[0], self.target_xy[1], self.entity.level.current_level)\r\n elif self.chosen_spell == \"Summon Monster 2\":\r\n Entity.spawn_summon(entity_factories.celestial_riding_dog, self.engine.game_map,\r\n self.target_xy[0], self.target_xy[1], self.entity.level.current_level)\r\n elif self.chosen_spell == \"Summon Monster 3\":\r\n Entity.spawn_summon(entity_factories.celestial_dire_badger, self.engine.game_map,\r\n self.target_xy[0], self.target_xy[1], self.entity.level.current_level)\r\n else:\r\n raise exceptions.Impossible(f\"Summon spells not found: {chosen_spell}\")\r\n self.entity.battler.use_mana(cost)\r\n check_turn_advance(self.engine, self.entity)\r\n return None\r\n\r\nclass CastMagicMissileAction(Action):\r\n def __init__(\r\n self,\r\n entity: Actor,\r\n target_xy: Tuple[int, int] = (0, 0),\r\n ):\r\n super().__init__(entity)\r\n self.target_xy = target_xy\r\n\r\n @property\r\n def target_actor(self) -> Optional[Actor]:\r\n return self.engine.game_map.get_actor_at_location(*self.target_xy)\r\n\r\n if not target:\r\n raise exceptions.Impossible(\"Nothing to attack.\")\r\n\r\n def perform(self) -> None:\r\n target = self.target_actor\r\n if not target:\r\n raise exceptions.Impossible(\"Nothing to attack.\")\r\n cost = self.entity.battler.spells[\"Magic Missile\"]\r\n if cost > self.entity.battler.mana:\r\n raise exceptions.Impossible(\"You do not have enough mana for that.\")\r\n num_dice = int((self.entity.level.current_level + 1) / 2)\r\n num_dice = min(num_dice, 5)\r\n damage = 0\r\n for i in range(num_dice):\r\n damage = damage + randint(1, 4) + 1\r\n attack_desc = f\"{self.entity.name.capitalize()} zaps {target.name}\"\r\n if self.entity is self.engine.player:\r\n attack_color = colors.player_atk\r\n self.engine.last_target = target\r\n else:\r\n attack_color = colors.enemy_atk\r\n if damage > 0:\r\n self.engine.message_log.add_message(\r\n f\"{attack_desc} for {damage} hit points.\", attack_color\r\n )\r\n else:\r\n self.engine.message_log.add_message(\r\n f\"{attack_desc} but does no damage.\", attack_color\r\n )\r\n target.battler.take_damage(damage)\r\n self.entity.battler.use_mana(cost)\r\n check_turn_advance(self.engine, self.entity)\r\n return None\r\n\r\nclass ItemAction(Action):\r\n def __init__(\r\n self,\r\n entity: Actor,\r\n item: Item,\r\n target_xy: Optional[Tuple[int, int]] = None\r\n ):\r\n super().__init__(entity)\r\n self.item = item\r\n if not target_xy:\r\n target_xy = entity.x, entity.y\r\n self.target_xy = target_xy\r\n\r\n @property\r\n def target_actor(self) -> Optional[Actor]:\r\n \"\"\"Return the actor at this actions destination.\"\"\"\r\n return self.engine.game_map.get_actor_at_location(*self.target_xy)\r\n\r\n def perform(self) -> None:\r\n \"\"\"Invoke the items ability, this action will be given to provide context.\"\"\"\r\n check_turn_advance(self.engine, self.entity)\r\n if self.item.consumable:\r\n self.item.consumable.activate(self)\r\n\r\nclass DropItem(ItemAction):\r\n def perform(self) -> None:\r\n self.entity.inventory.drop(self.item)\r\n check_turn_advance(self.engine, self.entity)\r\n\r\nclass EquipItem(ItemAction):\r\n def perform(self) -> None:\r\n \"\"\"Equip toggle Item\"\"\"\r\n if self.item.equippable:\r\n self.item.equippable.activate(self.engine.player)\r\n check_turn_advance(self.engine, self.entity)\r\n \r\n\r\nclass WaitAction(Action):\r\n def __init__(self, entity: Actor):\r\n super().__init__(entity)\r\n \r\n def perform(self) -> None:\r\n if self.entity.battler.mana < self.entity.battler.max_mana:\r\n self.entity.battler.mana += 1\r\n if self.entity.battler.hp < self.entity.battler.max_hp:\r\n self.entity.battler.hp += 1\r\n check_turn_advance(self.engine, self.entity)\r\n pass\r\n\r\nclass TakeStairsAction(Action):\r\n def perform(self) -> None:\r\n \"\"\"\r\n Take the stairs, if any exist at the entity's location.\r\n \"\"\"\r\n stairs_found = False\r\n for entity in self.engine.game_map.entities:\r\n if (entity.stairs and self.entity.x == entity.x and self.entity.y == entity.y):\r\n stairs_found = True\r\n found_stairs = entity\r\n if stairs_found == True:\r\n self.engine.last_level = self.engine.game_map.dungeon_level\r\n self.engine.game_world.generate_floor(found_stairs.stairs)\r\n self.engine.message_log.add_message(\r\n \"You take the stairs.\", colors.descend\r\n )\r\n check_turn_advance(self.engine, self.entity)\r\n else:\r\n raise exceptions.Impossible(\"There are no stairs here.\")\r\n\r\nclass ActionWithDirection(Action):\r\n def __init__(self, entity: Actor, dx: int, dy: int, penalty: int = 0,\r\n attack_list = []):\r\n super().__init__(entity)\r\n\r\n self.dx = dx\r\n self.dy = dy\r\n self.penalty = penalty\r\n self.attack_list = attack_list\r\n\r\n @property\r\n def dest_xy(self) -> Tuple[int, int]:\r\n \"\"\"Returns this actions destination.\"\"\"\r\n return self.entity.x + self.dx, self.entity.y + self.dy\r\n\r\n @property\r\n def blocking_entity(self) -> Optional[Entity]:\r\n \"\"\"Return the blocking entity at this actions destination..\"\"\"\r\n return self.engine.game_map.get_blocking_entity_at_location(*self.dest_xy)\r\n\r\n @property\r\n def target_actor(self) -> Optional[Actor]:\r\n \"\"\"Return the actor at this actions destination.\"\"\"\r\n return self.engine.game_map.get_actor_at_location(*self.dest_xy)\r\n\r\n @property\r\n def target_shop(self) -> Optional[Shop]:\r\n \"\"\"Return the shop at this actions destination.\"\"\"\r\n return self.engine.game_map\r\n\r\n def perform(self) -> None:\r\n raise NotImplementedError()\r\n\r\ndef reflex_save_half(damage, save_dc, save_bonus):\r\n if damage <= 0:\r\n return 0\r\n else:\r\n roll = randint(1,20)\r\n if roll == 1:\r\n roll = -10\r\n if roll + save_bonus >= save_dc or roll == 20:\r\n half_damage = max(1, int(damage/2))\r\n return half_damage\r\n else:\r\n return damage\r\n \r\ndef not_in_cone(caster_x, caster_y, pos_x, pos_y, enemy_x, enemy_y, radius):\r\n distance = math.sqrt((caster_x - enemy_x)**2 + (caster_y - enemy_y)**2)\r\n if distance > radius + 0.5: # adjustment for edge of cone\r\n return True\r\n elif pos_x > caster_x: # mouse/cursor east of caster\r\n if enemy_x < caster_x:\r\n return True\r\n elif pos_y > caster_y: # mouse/cursor south of caster\r\n if enemy_x > caster_x and enemy_y > caster_y:\r\n return False\r\n else:\r\n return True\r\n elif pos_y < caster_y: # mouse/cursor north of caster\r\n if enemy_x > caster_x and enemy_y < caster_y:\r\n return False\r\n else:\r\n return True\r\n else: # mouse/cursor directly east of caster\r\n if enemy_x > caster_x and abs(enemy_y - caster_y) < abs(enemy_x - caster_x):\r\n return False\r\n else:\r\n return True\r\n elif pos_x < caster_x: # mouse/cursor west of caster\r\n if enemy_x > caster_x:\r\n return True\r\n elif pos_y > caster_y: # mouse/cursor south of caster\r\n if enemy_x < caster_x and enemy_y > caster_y:\r\n return False\r\n else:\r\n return True\r\n elif pos_y < caster_y: # mouse/cursor north of caster\r\n if enemy_x < caster_x and enemy_y < caster_y:\r\n return False\r\n else:\r\n return True\r\n else: # mouse/cursor directly west of caster\r\n if enemy_x < caster_x and abs(enemy_y - caster_y) < abs(enemy_x - caster_x):\r\n return False\r\n else:\r\n return True\r\n elif pos_y < caster_y: # mouse/cursor directly north of caster\r\n if enemy_y < caster_y and abs(enemy_y - caster_y) > abs(enemy_x - caster_x):\r\n return False\r\n else:\r\n return True\r\n elif pos_y > caster_y: # mouse/cursor directly south of caster\r\n if enemy_y > caster_y and abs(enemy_y - caster_y) > abs(enemy_x - caster_x):\r\n return False\r\n else:\r\n return True\r\n else: # cursor on caster, should not happen. No one is in cone\r\n return True\r\n\r\nclass BurningHandsAction(Action):\r\n def __init__(\r\n self,\r\n entity: Actor,\r\n target_xy: Tuple[int, int] = (0, 0),\r\n spell_name: str = \"\",\r\n mana: int = 1,\r\n radius: int = 3,\r\n ):\r\n super().__init__(entity)\r\n self.target_xy = target_xy\r\n self.spell_name = spell_name\r\n self.mana = mana\r\n self.radius = radius\r\n\r\n def perform(self) -> None:\r\n if self.engine.player.x == self.target_xy[0] and self.engine.player.y == self.target_xy[1]:\r\n raise exceptions.Impossible(\"Can't target cone on yourself.\")\r\n\r\n damage = 0\r\n\r\n cost = self.entity.battler.spells[self.spell_name]\r\n if cost > self.entity.battler.mana:\r\n raise exceptions.Impossible(\"You do not have enough mana for that.\")\r\n num_dice = self.entity.level.current_level\r\n num_dice = min(num_dice, 5)\r\n for i in range(num_dice):\r\n damage = damage + randint(1, 4)\r\n attack_desc = f\"{self.entity.name.capitalize()} sprays fire.\"\r\n if self.entity is self.engine.player:\r\n attack_color = colors.player_atk\r\n else:\r\n attack_color = colors.enemy_atk\r\n\r\n enemies_hit_dam = []\r\n for enemy in self.engine.game_map.actors:\r\n distance = math.sqrt((self.entity.x - enemy.x)**2 + (self.entity.y - enemy.y)**2)\r\n if distance > self.radius:\r\n continue\r\n elif self.entity == enemy:\r\n continue\r\n elif not_in_cone(self.entity.x, self.entity.y, self.target_xy[0], self.target_xy[1], enemy.x, enemy.y, self.radius):\r\n #print(f\"Not in cone: {enemy.name} xy: {enemy.x}, {enemy.y}, caster: {self.entity.x}, {self.entity.y}, target: {self.target_xy[0]}, {self.target_xy[1]}\")\r\n continue\r\n else:\r\n save_dc = int((self.entity.battler.current_int - 10)/2) + 10 + 1\r\n damage = reflex_save_half(damage, save_dc, enemy.battler.reflex_save)\r\n enemies_hit = True\r\n attack_desc += f\" {enemy.name} takes {damage} damage.\"\r\n enemies_hit_dam.append((enemy, damage))\r\n for enemy, damage in enemies_hit_dam:\r\n enemy.battler.take_damage(damage)\r\n if enemies_hit_dam == []:\r\n attack_desc += f\" but no one is hit.\"\r\n self.engine.message_log.add_message(attack_desc, attack_color)\r\n self.entity.battler.use_mana(cost)\r\n\r\n check_turn_advance(self.engine, self.entity)\r\n return None\r\n\r\nclass CastSelfBuffAction(Action):\r\n def __init__(\r\n self,\r\n entity: Actor,\r\n buff_spell: str = \"\",\r\n target_xy: Optional[Tuple[int, int]] = None\r\n ):\r\n super().__init__(entity)\r\n self.buff_spell = buff_spell\r\n if not target_xy:\r\n target_xy = entity.x, entity.y\r\n self.target_xy = target_xy\r\n self.entity = entity\r\n\r\n @property\r\n def target_actor(self) -> Optional[Actor]:\r\n \"\"\"Return the actor at this actions destination.\"\"\"\r\n return self.engine.game_map.get_actor_at_location(*self.target_xy)\r\n\r\n def perform(self) -> None:\r\n target = self.target_actor\r\n caster_level = self.entity.level.current_level\r\n if not target:\r\n raise exceptions.Impossible(\"Nothing to buff.\")\r\n \r\n if self.buff_spell == \"\":\r\n raise exceptions.Impossible(\"No buff spell found: error.\")\r\n if self.buff_spell in (\"Mage Armor\", \"Shield\", \"Magic Weapon\", \"Alter Self\"):\r\n if self.entity.battler.spells[self.buff_spell] > self.entity.battler.mana:\r\n raise exceptions.Impossible(\"You don't have enough mana for that.\")\r\n if self.entity.battler.spells[self.buff_spell] > self.entity.level.current_level:\r\n raise exceptions.Impossible(\"You are not high enough level to cast that.\")\r\n if self.buff_spell in target.battler.current_buffs:\r\n self.engine.message_log.add_message(\r\n f\"{self.entity.name} refreshes {self.buff_spell}\", colors.white\r\n )\r\n else:\r\n self.engine.message_log.add_message(\r\n f\"{self.entity.name} gains {self.buff_spell}\", colors.white\r\n )\r\n if self.buff_spell == \"Mage Armor\":\r\n self.entity.battler.current_buffs[\"Mage Armor\"] = [self.engine.current_turn + caster_level * 10 * 60, 0]\r\n elif self.buff_spell in (\"Shield\", \"Magic Weapon\"):\r\n self.entity.battler.current_buffs[self.buff_spell] = [self.engine.current_turn + caster_level * 10, 0]\r\n elif self.buff_spell in (\"Alter Self\"):\r\n self.entity.battler.current_buffs[self.buff_spell] = [self.engine.current_turn + caster_level * 10 * 10, 0]\r\n else:\r\n raise exceptions.Impossible(\"That is not a buff spell: error.\")\r\n\r\n self.entity.battler.use_mana(self.entity.battler.spells[self.buff_spell])\r\n check_turn_advance(self.engine, self.entity)\r\n return None\r\n\r\nclass CastSelfHealAction(Action):\r\n def __init__(\r\n self,\r\n entity: Actor,\r\n heal_spell: str = \"\",\r\n target_xy: Optional[Tuple[int, int]] = None\r\n ):\r\n super().__init__(entity)\r\n self.heal_spell = heal_spell\r\n if not target_xy:\r\n target_xy = entity.x, entity.y\r\n self.target_xy = target_xy\r\n self.entity = entity\r\n\r\n @property\r\n def target_actor(self) -> Optional[Actor]:\r\n \"\"\"Return the actor at this actions destination.\"\"\"\r\n return self.engine.game_map.get_actor_at_location(*self.target_xy)\r\n\r\n def perform(self) -> None:\r\n target = self.target_actor\r\n caster_level = self.entity.level.current_level\r\n dice = 1\r\n level_cap = 5\r\n healing = 0\r\n if not target:\r\n raise exceptions.Impossible(\"Target not found.\")\r\n \r\n if self.heal_spell == \"\":\r\n raise exceptions.Impossible(\"No healing spell found: error.\")\r\n if self.heal_spell in (\"Cure Light Wounds\", \"Cure Moderate Wounds\", \"Cure Serious Wounds\", \"Cure Critical Wounds\"):\r\n if self.entity.battler.spells[self.heal_spell] > self.entity.battler.mana:\r\n raise exceptions.Impossible(\"You don't have enough mana for that.\")\r\n if self.entity.battler.spells[self.heal_spell] > self.entity.level.current_level:\r\n raise exceptions.Impossible(\"You are not high enough level to cast that.\")\r\n if self.entity.battler.hp >= self.entity.battler.max_hp:\r\n raise exceptions.Impossible(\"You do not need healing.\")\r\n if self.heal_spell == \"Cure Light Wounds\":\r\n dice = 1\r\n level_cap = 5\r\n elif self.heal_spell == \"Cure Moderate Wounds\":\r\n dice = 2\r\n level_cap = 10\r\n elif self.heal_spell == \"Cure Serious Wounds\":\r\n dice = 3\r\n level_cap = 15\r\n elif self.heal_spell == \"Cure Critical Wounds\":\r\n dice = 4\r\n level_cap = 20\r\n for all_dice in range(dice):\r\n healing += randint(1,8)\r\n healing += max(caster_level, level_cap)\r\n healing = min(healing, (self.entity.battler.max_hp - self.entity.battler.hp))\r\n self.entity.battler.heal(healing)\r\n else:\r\n raise exceptions.Impossible(\"That is not a heal spell: error.\")\r\n actual_heal = max(healing, self.entity.battler.max_hp - self.entity.battler.hp)\r\n self.engine.message_log.add_message(\r\n f\"{self.entity.name} heals {healing} hps.\", colors.white\r\n )\r\n self.entity.battler.use_mana(self.entity.battler.spells[self.heal_spell])\r\n check_turn_advance(self.engine, self.entity)\r\n return None\r\n\r\nclass CastShockingGraspAction(Action):\r\n def __init__(\r\n self,\r\n entity: Actor,\r\n target_xy: Tuple[int, int] = (0, 0),\r\n ):\r\n super().__init__(entity)\r\n self.target_xy = target_xy\r\n\r\n @property\r\n def target_actor(self) -> Optional[Actor]:\r\n return self.engine.game_map.get_actor_at_location(*self.target_xy)\r\n\r\n if not target:\r\n raise exceptions.Impossible(\"Nothing to attack.\")\r\n\r\n def perform(self) -> None:\r\n maximum_range = 1.43\r\n target = self.target_actor\r\n if not target:\r\n raise exceptions.Impossible(\"Nothing to attack\")\r\n distance = self.entity.distance(target.x, target.y)\r\n if distance > maximum_range:\r\n raise exceptions.Impossible(\"That target is too far (Melee only)\")\r\n\r\n damage = 0\r\n\r\n cost = self.entity.battler.spells[\"Shocking Grasp\"]\r\n if cost > self.entity.battler.mana:\r\n raise exceptions.Impossible(\"You do not have enough mana for that.\")\r\n num_dice = self.entity.level.current_level\r\n num_dice = min(num_dice, 5)\r\n for i in range(num_dice):\r\n damage = damage + randint(1, 6)\r\n attack_desc = f\"{self.entity.name.capitalize()} shocks {target.name}\"\r\n if self.entity is self.engine.player:\r\n attack_color = colors.player_atk\r\n self.engine.last_target = target\r\n else:\r\n attack_color = colors.enemy_atk\r\n\r\n (damage, attack_desc) = HandleAttack(self.entity.battler.melee_to_hit, target.battler.current_touch_ac, damage, attack_desc, 20, 2)\r\n self.engine.message_log.add_message(attack_desc, attack_color)\r\n target.battler.take_damage(damage)\r\n self.entity.battler.use_mana(cost)\r\n\r\n check_turn_advance(self.engine, self.entity)\r\n return None\r\n\r\nclass CastScorchingRayAction(Action):\r\n def __init__(\r\n self,\r\n entity: Actor,\r\n target_xy: Tuple[int, int] = (0, 0),\r\n ):\r\n super().__init__(entity)\r\n self.target_xy = target_xy\r\n\r\n @property\r\n def target_actor(self) -> Optional[Actor]:\r\n return self.engine.game_map.get_actor_at_location(*self.target_xy)\r\n\r\n if not target:\r\n raise exceptions.Impossible(\"Nothing to attack.\")\r\n\r\n def perform(self) -> None:\r\n maximum_range = 5 + self.entity.level.current_level / 2\r\n target = self.target_actor\r\n if not target:\r\n raise exceptions.Impossible(\"Nothing to attack\")\r\n distance = self.entity.distance(target.x, target.y)\r\n if distance > maximum_range:\r\n raise exceptions.Impossible(\"That target is too far: \" + str(int(maximum_range)) + \" squares.\")\r\n cost = self.entity.battler.spells[\"Scorching Ray\"]\r\n if cost > self.entity.battler.mana:\r\n raise exceptions.Impossible(\"You do not have enough mana for that.\")\r\n if self.entity.level.current_level >= 11:\r\n num_dice = 12\r\n elif self.entity.level.current_level >= 7:\r\n num_dice = 8\r\n else:\r\n num_dice = 4\r\n damage = 0\r\n for i in range(num_dice):\r\n damage += randint(1, 6)\r\n attack_desc = f\"{self.entity.name.capitalize()} scorches {target.name}\"\r\n if self.entity is self.engine.player:\r\n attack_color = colors.player_atk\r\n self.engine.last_target = target\r\n else:\r\n attack_color = colors.enemy_atk\r\n\r\n (damage, attack_desc) = HandleAttack(self.entity.battler.ranged_to_hit, target.battler.current_touch_ac, damage, attack_desc, 20, 2)\r\n self.engine.message_log.add_message(attack_desc, attack_color)\r\n target.battler.take_damage(damage)\r\n self.entity.battler.use_mana(cost)\r\n\r\n check_turn_advance(self.engine, self.entity)\r\n return None\r\n\r\nclass CastRayOfEnfeeblementAction(Action):\r\n def __init__(\r\n self,\r\n entity: Actor,\r\n target_xy: Tuple[int, int] = (0, 0),\r\n ):\r\n super().__init__(entity)\r\n self.target_xy = target_xy\r\n\r\n @property\r\n def target_actor(self) -> Optional[Actor]:\r\n return self.engine.game_map.get_actor_at_location(*self.target_xy)\r\n\r\n if not target:\r\n raise exceptions.Impossible(\"Nothing to attack.\")\r\n\r\n def perform(self) -> None:\r\n maximum_range = 5 + self.entity.level.current_level / 2\r\n target = self.target_actor\r\n if not target:\r\n raise exceptions.Impossible(\"Nothing to attack\")\r\n distance = self.entity.distance(target.x, target.y)\r\n if distance > maximum_range:\r\n raise exceptions.Impossible(\"That target is too far: \" + str(int(maximum_range)) + \" squares.\")\r\n\r\n str_damage = 0\r\n\r\n cost = self.entity.battler.spells[\"Ray of Enfeeblement\"]\r\n if cost > self.entity.battler.mana:\r\n raise exceptions.Impossible(\"You do not have enough mana for that.\")\r\n str_damage = randint(1, 6) + min(5, self.entity.level.current_level)\r\n attack_desc = f\"{self.entity.name.capitalize()} shoots a grey beam at {target.name}\"\r\n if self.entity is self.engine.player:\r\n attack_color = colors.player_atk\r\n self.engine.last_target = target\r\n else:\r\n attack_color = colors.enemy_atk\r\n if (self.entity.battler.ranged_to_hit + randint(1, 20) - self.penalty) >= target.battler.current_touch_ac:\r\n if str_damage > 0:\r\n self.engine.message_log.add_message(\r\n f\"{attack_desc} lowering str by {str_damage}.\", attack_color\r\n )\r\n target.battler.current_buffs[\"Ray of Enfeeblement\"] = [self.engine.current_turn + 10 * self.entity.level.current_level, str_damage]\r\n else:\r\n self.engine.message_log.add_message(\r\n f\"{attack_desc} but does no damage.\", attack_color\r\n )\r\n else:\r\n self.engine.message_log.add_message(\r\n f\"{attack_desc} and misses.\", attack_color\r\n )\r\n self.entity.battler.use_mana(cost)\r\n\r\n check_turn_advance(self.engine, self.entity)\r\n return None\r\n\r\nclass MeleeAction(ActionWithDirection):\r\n\r\n def perform(self) -> None:\r\n target = self.target_actor\r\n if not target:\r\n raise exceptions.Impossible(\"Nothing to attack.\")\r\n\r\n damage = 0\r\n dam_string = \"punches\"\r\n crit_needs = 20\r\n crit_mult = 2\r\n\r\n if self.entity.equipment.main_hand == None:\r\n if len(self.attack_list) == 0:\r\n num_damage_dice = self.entity.battler.unarmed_num_dice\r\n for i in range(num_damage_dice):\r\n damage = damage + randint(1, self.entity.battler.unarmed_size_dice)\r\n else: \r\n for i in range(self.attack_list[2]):\r\n damage = damage + randint(1, self.attack_list[3])\r\n else:\r\n num_damage_dice = self.entity.equipment.main_hand.equippable.weapon_num_dice\r\n size_damage_dice = self.entity.equipment.main_hand.equippable.weapon_size_dice\r\n num_damage_dice, size_damage_dice = get_size_damage_diff(num_damage_dice, size_damage_dice, self.entity)\r\n crit_needs = self.entity.equipment.main_hand.equippable.crit_needs\r\n crit_mult = self.entity.equipment.main_hand.equippable.crit_mult\r\n for i in range(num_damage_dice):\r\n damage = damage + randint(1, size_damage_dice)\r\n dam_string = self.entity.equipment.main_hand.equippable.damage_name\r\n\r\n if len(self.attack_list) == 0:\r\n damage = damage + self.entity.battler.melee_to_damage\r\n else:\r\n dam_string = self.attack_list[1]\r\n if self.attack_list[0] == 'P':\r\n damage = damage + self.entity.battler.melee_to_damage\r\n elif self.attack_list[0] == '2H':\r\n damage = damage + int(1.5 * self.entity.battler.melee_to_damage) #This could fail in edge cases, like weapon specialization. Work for str and power attack though\r\n else: #should be 'S'\r\n damage = damage + int(self.entity.battler.melee_to_damage / 2)\r\n \r\n attack_desc = f\"{self.entity.name.capitalize()} {dam_string} {target.name}\"\r\n if self.entity is self.engine.player:\r\n attack_color = colors.player_atk\r\n else:\r\n attack_color = colors.enemy_atk\r\n\r\n (damage, attack_desc) = HandleAttack(self.entity.battler.melee_to_hit, target.battler.current_ac, damage, attack_desc, crit_needs, crit_mult)\r\n self.engine.message_log.add_message(attack_desc, attack_color)\r\n target.battler.hp -= damage\r\n\r\n\r\ndef HandleAttack(attack_to_hit, target_ac, damage, attack_desc, crit_needs = 20, crit_mult = 2):\r\n first_roll = randint(1, 20)\r\n if first_roll == 20:\r\n first_roll = 30\r\n if damage < 1: damage = 1 #penalties don't reduce damage below 1. DR can, though\r\n if first_roll >= crit_needs and attack_to_hit + first_roll >= target_ac: #critical threat\r\n if randint(1, 20) + attack_to_hit >= target_ac: #confirmed crit 20 does not guarantee this time\r\n damage = damage * crit_mult\r\n attack_desc += f\" *Crit* for {damage} hit points.\"\r\n else:\r\n attack_desc += f\" for {damage} hit points.\"\r\n elif first_roll + attack_to_hit >= target_ac:\r\n attack_desc += f\" for {damage} hit points.\"\r\n else:\r\n attack_desc += f\" and misses.\"\r\n damage = 0\r\n return (damage, attack_desc)\r\n\r\nclass FullAttackMeleeAction(ActionWithDirection):\r\n def perform(self) -> None:\r\n target = self.target_actor\r\n if not target:\r\n raise exceptions.Impossible(\"Nothing to attack.\")\r\n\r\n if self.entity.battler.combat_mode == \"Ranged\":\r\n if self.entity.equipment.main_hand == None:\r\n self.entity.battler.combat_mode = \"Melee\"\r\n self.engine.message_log.add_message(f\"{self.entity.name} enters melee mode.\")\r\n elif \"Quick Draw\" in self.entity.battler.combat_feats:\r\n self.entity.battler.combat_mode = \"Melee\"\r\n self.engine.message_log.add_message(f\"{self.entity.name} quickly shifts to melee mode.\")\r\n elif self.entity.equipment.main_hand !=None:\r\n self.entity.battler.combat_mode = \"Melee\"\r\n self.engine.message_log.add_message(f\"{self.entity.name} clumsily grabs its melee weapon.\")\r\n check_turn_advance(self.engine, self.entity)\r\n return MeleeAction(self.entity, self.dx, self.dy).perform()\r\n else:\r\n raise exceptions.Impossible(\"This shouldn't be possible.\")\r\n\r\n if self.entity.equipment.main_hand != None:\r\n if self.entity.battler.bab < 6: #BAB can be 0, still 1 attack\r\n check_turn_advance(self.engine, self.entity)\r\n return MeleeAction(self.entity, self.dx, self.dy).perform()\r\n else:\r\n iterative_penalty = 0\r\n bab_counter = self.entity.battler.bab #Doesn't handle BAB>20, but can't happen currently\r\n while bab_counter > 0:\r\n if target.battler.is_dead == True:\r\n target = FindNewTarget(self.engine, target, ranged = False)\r\n if target == None:\r\n check_turn_advance(self.engine, self.entity)\r\n return None\r\n self.engine.message_log.add_message(f\"{self.entity.name} retargets to {target.name}.\", colors.player_atk)\r\n self.dx = target.x - self.entity.x\r\n self.dy = target.y - self.entity.y\r\n MeleeAction(self.entity, self.dx, self.dy, iterative_penalty).perform()\r\n bab_counter -= 5\r\n iterative_penalty += 5\r\n check_turn_advance(self.engine, self.entity)\r\n return None\r\n\r\n elif len(self.entity.battler.full_attack) == 0: #No weapons, no full attack\r\n check_turn_advance(self.engine, self.entity)\r\n return MeleeAction(self.entity, self.dx, self.dy).perform()\r\n\r\n else:\r\n for i in range(len(self.entity.battler.full_attack)):\r\n if target.battler.is_dead == True:\r\n target = FindNewTarget(self.engine, target, ranged = False)\r\n if target == None:\r\n check_turn_advance(self.engine, self.entity)\r\n return None\r\n self.engine.message_log.add_message(f\"{self.entity.name} retargets to {target.name}.\", colors.player_atk)\r\n self.dx = target.x - self.entity.x\r\n self.dy = target.y - self.entity.y\r\n if self.entity.battler.full_attack[i][0] == 'P': #Primary attack\r\n MeleeAction(self.entity, self.dx, self.dy, 0, self.entity.battler.full_attack[i]).perform()\r\n elif self.entity.battler.full_attack[i][0] == 'S': #Secondary attack\r\n MeleeAction(self.entity, self.dx, self.dy, 5, self.entity.battler.full_attack[i]).perform()\r\n elif self.entity.battler.full_attack[i][0] == '2H': #\"Two-hand\": hard hitting single attack (crocodile bite)\r\n MeleeAction(self.entity, self.dx, self.dy, 0, self.entity.battler.full_attack[i]).perform()\r\n check_turn_advance(self.engine, self.entity)\r\n return None\r\n\r\nclass MovementAction(ActionWithDirection):\r\n def perform(self) -> None:\r\n dest_x, dest_y = self.dest_xy\r\n\r\n if not self.engine.game_map.in_bounds(dest_x, dest_y):\r\n # Destination is out of bounds.\r\n raise exceptions.Impossible(\"That way is blocked.\")\r\n if not self.engine.game_map.tiles[\"walkable\"][dest_x, dest_y]:\r\n # Destination is blocked by a tile.\r\n raise exceptions.Impossible(\"That way is blocked.\")\r\n if self.engine.game_map.get_blocking_entity_at_location(dest_x, dest_y):\r\n # Destination is blocked by a tile.\r\n raise exceptions.Impossible(\"That way is blocked.\")\r\n\r\n for gold in self.engine.game_map.gold_piles:\r\n if dest_x == gold.x and dest_y == gold.y:\r\n self.engine.message_log.add_message(f\"{self.entity.name} picks up {gold.number} gold pieces.\")\r\n self.entity.battler.gold += gold.number\r\n self.engine.game_map.entities.remove(gold)\r\n self.entity.move(self.dx, self.dy)\r\n return\r\n\r\n if self.entity == self.engine.player:\r\n log_items = True\r\n ground_message = \"\"\r\n for item_maybe in self.engine.game_map.entities:\r\n if dest_x == item_maybe.x and dest_y == item_maybe.y:\r\n if item_maybe.blocks_movement == True:\r\n log_items = False\r\n else:\r\n ground_message = ground_message + f\"{item_maybe.name} is here.\\n\"\r\n if log_items == True and len(ground_message) > 0:\r\n self.engine.message_log.add_message(ground_message)\r\n\r\n check_turn_advance(self.engine, self.entity)\r\n self.entity.move(self.dx, self.dy)\r\n \r\n\r\nclass BumpAction(ActionWithDirection):\r\n def perform(self) -> None:\r\n if self.target_actor:\r\n return FullAttackMeleeAction(self.entity, self.dx, self.dy).perform()\r\n else:\r\n return MovementAction(self.entity, self.dx, self.dy).perform()\r\n\r\nclass LongRestAction(Action):\r\n def __init__(self,\r\n entity: Actor,\r\n duration: int = 0) -> None:\r\n self.entity = entity\r\n self.duration = duration\r\n\r\n def perform(self) -> None:\r\n player = self.entity\r\n enemy_spotted = \"Derp?\"\r\n rest_check = False\r\n max_rest = 0\r\n if self.duration == 0: #Rest until full hps/mana\r\n while rest_check == False and max_rest < 500: #increase with slower regen\r\n for enemy in self.engine.game_map.actors:\r\n if enemy == self.engine.player:\r\n continue\r\n if self.engine.game_map.visible[enemy.x][enemy.y] == True:\r\n rest_check = True\r\n enemy_spotted = enemy\r\n if rest_check == True:\r\n raise exceptions.Impossible(f\"{enemy_spotted.name} spotted.\")\r\n break\r\n else:\r\n check_turn_advance(self.engine, self.entity) #need better regen plan\r\n player.battler.hp = min(player.battler.hp + 1, player.battler.max_hp)\r\n player.battler.mana = min(player.battler.mana + 1, player.battler.max_mana)\r\n max_rest += 1\r\n self.engine.update_fov()\r\n if player.battler.hp >= player.battler.max_hp and player.battler.mana >= player.battler.max_mana:\r\n rest_check = True\r\n self.engine.message_log.add_message(f\"{player.name} is fully rested.\", colors.white)\r\n else: #Fixed number of turns\r\n while rest_check == False and max_rest < self.duration:\r\n for enemy in self.engine.game_map.actors:\r\n if enemy == self.engine.player:\r\n continue\r\n if self.engine.game_map.visible[enemy.x][enemy.y] == True:\r\n rest_check = True\r\n enemy_spotted = enemy\r\n if rest_check == True:\r\n raise exceptions.Impossible(f\"{enemy_spotted.name} spotted.\")\r\n break\r\n else:\r\n check_turn_advance(self.engine, self.entity) #need better regen plan\r\n player.battler.hp = min(player.battler.hp + 1, player.battler.max_hp)\r\n player.battler.mana = min(player.battler.mana + 1, player.battler.max_mana)\r\n max_rest += 1\r\n self.engine.update_fov()\r\n self.engine.message_log.add_message(f\"{player.name} stops resting.\", colors.white)\r\n \r\n\r\nclass FastMoveAction(ActionWithDirection):\r\n def perform(self) -> None:\r\n dest_x, dest_y = self.dest_xy\r\n movecheck = False\r\n max_moves = 1\r\n enemy_spotted = \"Derp?\"\r\n while movecheck == False and max_moves < 100:\r\n for enemy in self.engine.game_map.actors:\r\n if enemy == self.engine.player:\r\n continue\r\n if self.engine.game_map.visible[enemy.x][enemy.y] == True:\r\n movecheck = True\r\n enemy_spotted = enemy\r\n destin_x = self.engine.player.x + self.dx\r\n destin_y = self.engine.player.y + self.dy\r\n if not self.engine.game_map.in_bounds(destin_x, destin_y):\r\n # Destination is out of bounds.\r\n raise exceptions.Impossible(\"That way is blocked. (out of map)\")\r\n break\r\n if not self.engine.game_map.tiles[\"walkable\"][destin_x, destin_y]:\r\n # Destination is blocked by a tile.\r\n break\r\n raise exceptions.Impossible(\"That way is blocked. (tile)\")\r\n if self.engine.game_map.get_blocking_entity_at_location(destin_x, destin_y):\r\n # Destination is blocked by a blocking entity.\r\n raise exceptions.Impossible(\"That way is blocked. (entity)\")\r\n break\r\n\r\n if self.entity == self.engine.player:\r\n log_items = True\r\n ground_message = \"\"\r\n for item_maybe in self.engine.game_map.entities:\r\n if destin_x == item_maybe.x and destin_y == item_maybe.y:\r\n if item_maybe.blocks_movement == True:\r\n log_items = False\r\n else:\r\n ground_message = ground_message + f\"{item_maybe.name} is here.\\n\"\r\n if log_items == True and len(ground_message) > 0:\r\n self.engine.message_log.add_message(ground_message)\r\n\r\n if movecheck == True:\r\n raise exceptions.Impossible(f\"{enemy_spotted.name} spotted.\")\r\n break\r\n else:\r\n check_turn_advance(self.engine, self.entity)\r\n self.entity.move(self.dx, self.dy)\r\n max_moves += 1\r\n self.engine.update_fov()\r\n","sub_path":"actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":56349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"125208287","text":"import json\nimport time\n\nimport boto3\nimport botocore\nfrom botocore.config import Config\nfrom botocore.exceptions import ClientError\n\nconfig = Config(retries={'max_attempts': 10, 'mode': 'standard'})\n\n\nclass Deleter:\n def __init__(self, profile=None):\n if profile:\n self._session = boto3.Session(profile_name=profile)\n else:\n self._session = boto3.Session()\n self._iam = self._session.client('iam', config=config)\n\n def delete_policy(self, policy_arn):\n try:\n self._iam.delete_policy(PolicyArn=policy_arn)\n print(f'Deleted policy {policy_arn}')\n except botocore.exceptions.ClientError as error:\n print(f'Error deleting policy: {error.response[\"Error\"][\"Message\"]} - Skipping')\n\n def delete_role(self, role_name):\n # Detach policies\n more_results = True\n marker = None\n while more_results:\n if marker:\n res = self._iam.list_attached_role_policies(RoleName=role_name, Marker=marker)\n else:\n res = self._iam.list_attached_role_policies(RoleName=role_name)\n for policy in res['AttachedPolicies']:\n try:\n self._iam.detach_role_policy(RoleName=role_name, PolicyArn=policy['PolicyArn'])\n except botocore.exceptions.ClientError as error:\n print(f'Error detaching role policy: {error.response[\"Error\"][\"Message\"]} - Skipping')\n more_results = res.get('IsTruncated', False)\n if more_results:\n marker = res['Marker']\n\n # Delete Inline policies\n more_results = True\n marker = None\n while more_results:\n if marker:\n res = self._iam.list_role_policies(RoleName=role_name, Marker=marker)\n else:\n res = self._iam.list_role_policies(RoleName=role_name)\n for policy_name in res['PolicyNames']:\n try:\n self._iam.delete_role_policy(RoleName=role_name, PolicyName=policy_name)\n except botocore.exceptions.ClientError as error:\n print(f'Error deleting inline role policy: {error.response[\"Error\"][\"Message\"]} - Skipping')\n more_results = res.get('IsTruncated', False)\n if more_results:\n marker = res['Marker']\n\n # Remove from Instance Profiles\n more_results = True\n marker = None\n while more_results:\n if marker:\n res = self._iam.list_instance_profiles_for_role(RoleName=role_name, Marker=marker)\n else:\n res = self._iam.list_instance_profiles_for_role(RoleName=role_name)\n for instance_profile in res['InstanceProfiles']:\n try:\n self._iam.remove_role_from_instance_profile(RoleName=role_name, InstanceProfileName=instance_profile['InstanceProfileName'])\n except botocore.exceptions.ClientError as error:\n print(f'Error removing role from instance profile: {error.response[\"Error\"][\"Message\"]} - Skipping')\n more_results = res.get('IsTruncated', False)\n if more_results:\n marker = res['Marker']\n\n # Delete the Role\n try:\n self._iam.delete_role(RoleName=role_name)\n print(f'Deleted role {role_name}')\n except botocore.exceptions.ClientError as error:\n print(f'Error deleting role: {error.response[\"Error\"][\"Message\"]} - Skipping')\n","sub_path":"airiam/models/Deleter.py","file_name":"Deleter.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"378655539","text":"from Entity import Entity\nimport pygame\n\n\nclass Laser(Entity):\n def __init__(self, pos, heading):\n Entity.__init__(self, 0, pos)\n self.lifespan = 120\n self.angle = heading\n self.setForwardVel(self.C)\n\n def update(self):\n Entity.update(self)\n self.lifespan-=1\n if self.lifespan<=0:\n self.markedForDeletion = True\n\n\n def render(self, screen):\n pygame.draw.line(screen, (255, 255, 255),\n (int(self.oldPosX), int(self.oldPosY)), (int(self.posX), int(self.posY)))\n","sub_path":"asteroids/Laser.py","file_name":"Laser.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"57894588","text":"\"\"\"\nAUTHOR : Robin Singh\nIMPLEMENTATION OF SINGLY LINKED LIST OR DOUBLY LINKED LIST\n\n\"\"\"\n\n\nclass Node:\n def __init__(self,data):\n self.data = data\n self.next = None\n\nclass Linked_list:\n def __init__(self):\n self.start = None\n\n def insert_start(self,ele):\n new_node = Node(ele)\n if self.start == None:\n self.start=new_node\n else:\n new_node.next = self.start\n self.start = new_node\n\n def insert_end(self,ele):\n new_node = Node(ele)\n if self.start==None:\n self.start=new_node\n else:\n temp = self.start\n while temp.next != None:\n temp=temp.next\n temp.next = new_node\n\n def delete(self):\n if self.start ==None:\n print(\"Empty\")\n else:\n temp = self.start\n self.start = self.start.next\n return temp.data\n\n def delete_end(self):\n if self.start == None:\n print(\"list has no elements\")\n return\n n = self.start\n while n.next.next != None:\n n = n.next\n print(\"deleted ele\",n.next.data)\n n.next = None\n\n\n\n\n def display_list(self):\n if self.start == None:\n return True\n else:\n temp = self.start\n while temp:\n print(temp.data,end=\" \")\n temp = temp.next\n\n def size(self):\n curr = self.start\n count = 0\n while curr:\n count+=1\n curr = curr.next\n return count\n\n\n\n\n\nif __name__ == '__main__':\n a = Linked_list()\n while(1):\n print(f\"\\n1.Insert At Start\\t2.Insert At End\\t3.Delete start\\t4.Delete End\\t5.Display\\t6.Size\\t7.Exit\")\n ch = int(input(\"Entre your choice\"))\n\n if ch == 7:\n break\n\n elif ch == 1:\n ele = int(input(\"Entre YOur Number To be Inserted at start\"))\n a.insert_start(ele)\n a.display_list()\n\n\n elif ch == 2:\n ele = int(input(\"Entre YOur Number To be Inserted at end\"))\n a.insert_end(ele)\n a.display_list()\n\n\n\n elif ch == 3:\n ele2 = a.delete()\n print(\"deleted element is \",ele2)\n\n\n elif ch == 5:\n if (a.display_list()):\n print(\"print\")\n\n elif ch == 6:\n c=a.size()\n print(\"size of linked list is \",c)\n\n elif ch == 4:\n a.delete_end()\n\n\n else:\n print(\"Invalid Option\")\n\n\n\n\n\n\n","sub_path":"Data Structures/Linked List/Singly Linked list.py","file_name":"Singly Linked list.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"44393520","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sept 11 13:23:10 2020\n@author: paper2code\n\"\"\"\nimport os\nimport json\n\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\nimport click\n\nimport tqdm\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nfrom flask import Flask, jsonify, request\n\nfrom haystack import Finder\nfrom haystack.indexing.cleaning import clean_wiki_text\nfrom haystack.indexing.utils import convert_files_to_dicts, fetch_archive_from_http\nfrom haystack.reader.farm import FARMReader\nfrom haystack.reader.transformers import TransformersReader\nfrom haystack.utils import print_answers\n\nfrom haystack.database.elasticsearch import ElasticsearchDocumentStore\nfrom haystack.retriever.sparse import ElasticsearchRetriever\n\ndocument_store = ElasticsearchDocumentStore(host=\"elasticsearch\", username=\"\", password=\"\", index=\"arxiv-qa\")\n\ndef filter_answers(results: dict, details: str = \"all\"):\n answers = results[\"answers\"]\n if details != \"all\":\n if details == \"minimal\":\n keys_to_keep = set([\"answer\", \"context\"])\n elif details == \"medium\":\n keys_to_keep = set([\"answer\", \"context\", \"score\"])\n else:\n keys_to_keep = answers.keys()\n\n # filter the results\n filtered_answers = []\n for ans in answers:\n filtered_answers.append({k: ans[k] for k in keys_to_keep})\n return filtered_answers\n else:\n return results\n\ndef train_model(input_file='../data/arxiv-metadata-oai.json'):\n print(\"training the model...\")\n data = []\n with tqdm.tqdm(total=os.path.getsize(input_file)) as pbar:\n with open(input_file, 'r') as f:\n for line in f:\n pbar.update(len(line))\n data.append(json.loads(line))\n data = pd.DataFrame(data)\n document_store.write_documents(data[['title', 'abstract']].rename(columns={'title':'name','abstract':'text'}).to_dict(orient='records'))\n\nretriever = ElasticsearchRetriever(document_store=document_store)\nreader = FARMReader(model_name_or_path=\"deepset/roberta-base-squad2\", use_gpu=True, context_window_size=500)\nfinder = Finder(reader, retriever)\n\napp = Flask(__name__)\n\n@app.route('/query')\ndef query():\n question = request.args.get('question')\n prediction = finder.get_answers(question=question, top_k_retriever=20, top_k_reader=1)\n result = filter_answers(prediction, details=\"minimal\")\n app.logger.info('question: %s', question)\n app.logger.info('result: %s', result)\n return jsonify(result)\n\n@click.command()\n@click.option(\"--host\", default=\"0.0.0.0\", help=\"Server host.\")\n@click.option(\"--port\", default=\"5006\", help=\"Server port.\")\n@click.option(\"--train\", default=False, is_flag=True, help=\"Train the model.\")\ndef service(host, port, train):\n \"\"\"Run the paper2code arXiv-QA server.\"\"\"\n if train:\n train_model()\n handler = RotatingFileHandler('../logs/arxiv-qa.log', maxBytes=10000, backupCount=1)\n handler.setLevel(logging.INFO)\n app.logger.addHandler(handler)\n app.run(host=host, port=port)\n\nif __name__ == '__main__':\n service()\n","sub_path":"services/server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"230864363","text":"import os\r\nimport json\r\nimport yaml\r\nimport asyncio\r\nfrom copy import deepcopy\r\nimport base64\r\n\r\nimport paho.mqtt.client as mqtt\r\nimport paho.mqtt.publish as publish\r\nimport paho.mqtt.subscribe as subscribe\r\n\r\nfrom ObjectDetector import ObjectDetector\r\n\r\ndet = ObjectDetector()\r\n\r\n# consts\r\nconfig_path = 'obj-det-mapper.yaml'\r\n\r\n# configs\r\ndevice_id = os.environ['DEVICE_ID']\r\n\r\nwith open(config_path, 'r') as config_file:\r\n config = yaml.full_load(config_file)\r\n \r\n mqtt_broker_host = config['mqtt_broker_host']\r\n mqtt_broker_port = config['mqtt_broker_port']\r\n sync_interval = config['sync_interval']\r\n\r\n# deep copy the following templates and fill actual values\r\n# to create request body in sync_twin()\r\n\r\nDeviceStateTemplate = {\r\n 'state': '{}'\r\n}\r\n\r\nTwinUpdateTemplate = {\r\n 'twin': {\r\n 'payload': {\r\n 'actual': {\r\n 'value': '{}',\r\n 'metadata': {\r\n 'timestamp': 0\r\n }\r\n },\r\n 'metadata': {\r\n 'type': 'Updated'\r\n }\r\n },\r\n }\r\n}\r\n\r\ndevice_prefix = '$hw/events/device/'\r\nstate_update_suffix = '/state/update'\r\ntwin_update_suffix = '/twin/update'\r\ntwin_get_suffix = '/twin/get'\r\ntwin_result_get_suffix = '/twin/get/result'\r\n\r\ntwin_result_future = asyncio.Future()\r\nconnect_future = asyncio.Future()\r\n\r\n# mqtt\r\n\r\ndef on_connect(client, userdata, flags, rc):\r\n print(\"Connected with result code \"+str(rc))\r\n # client.subscribe(\"$SYS/#\")\r\n\r\n client.subscribe('{}{}{}'.format(device_prefix, device_id, twin_result_get_suffix))\r\n client.message_callback_add(\r\n '{}{}{}'.format(device_prefix, device_id, twin_result_get_suffix),\r\n on_result\r\n )\r\n\r\n # connect_future.set_result('connected')\r\n\r\n# a default message handler for unmatched message\r\ndef on_message(client, userdata, msg):\r\n print(msg.topic+' '+str(msg.payload))\r\n\r\n# result message handler\r\ndef on_result(client, userdata, msg):\r\n print(msg.payload)\r\n\r\n global twin_result_future\r\n twin_result_future.set_result(json.loads(msg.payload))\r\n\r\nclient = mqtt.Client()\r\nclient.on_connect = on_connect\r\nclient.on_message = on_message\r\n\r\nclient.connect(mqtt_broker_host, mqtt_broker_port, 60)\r\nclient.loop_start()\r\n\r\n# work flow\r\n\r\ndef update_device_state(state):\r\n print('update_device_state()')\r\n\r\n device_state = deepcopy(DeviceStateTemplate)\r\n device_state['state'] = device_state['state'].format(state)\r\n\r\n msg_info = client.publish(\r\n '{}{}{}'.format(device_prefix, device_id, state_update_suffix),\r\n payload=json.dumps(device_state)\r\n )\r\n\r\n msg_info.wait_for_publish()\r\n\r\nasync def sync_twin():\r\n print('sync_twin()')\r\n\r\n twin_update_body = deepcopy(TwinUpdateTemplate)\r\n\r\n # do actual work\r\n objs = det.detect()\r\n result_dict = {\r\n 'count': objs.shape[2]\r\n }\r\n result_json = json.dumps(result_dict)\r\n\r\n twin_update_body['twin']['payload']['actual']['value'] = '{}'.format(str(base64.b64encode(result_json.encode('utf-8')), 'utf-8'))\r\n twin_update_body['twin']['payload']['metadata']['type'] = 'Updated'\r\n\r\n print(device_id, twin_update_body)\r\n msg_info = client.publish(\r\n '{}{}{}'.format(device_prefix, device_id, twin_update_suffix),\r\n payload=json.dumps(twin_update_body)\r\n )\r\n msg_info.wait_for_publish()\r\n\r\n# main\r\nasync def main():\r\n # connected = await connect_future\r\n\r\n update_device_state('online')\r\n\r\n while True:\r\n await sync_twin()\r\n await asyncio.sleep(sync_interval)\r\n\r\nif __name__ == '__main__':\r\n asyncio.run(main())\r\n","sub_path":"openvino-demo/files/Object-Detect-Mapper.py","file_name":"Object-Detect-Mapper.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"209167945","text":"#!/usr/bin/env python3 -tt\nimport getopt\nimport collections\nimport sys\nimport os\nimport logging\nfrom colorama import Fore\nfrom prettytable import PrettyTable\n\nlogging.basicConfig(filename='procenv.log', filemode='a+', level=logging.INFO)\nlogging.info(\"procenv.py started\")\n\n\ndef get_env():\n logging.info(\"Calling function get_env() in procenv.py\")\n dic = collections.OrderedDict()\n dic[\"CWD\"] = os.getcwd()\n dic[\"Home\"] = os.environ['HOME']\n dic[\"PWD\"] = os.environ['PWD']\n dic[\"Path\"] = os.environ['PATH']\n dic[\"Shell\"] = os.environ['SHELL']\n dic[\"User\"] = os.environ['USER']\n dic[\"UID\"] = str(os.getuid())\n dic[\"GID\"] = str(os.getgid())\n dic[\"VEnv\"] = os.environ['VIRTUAL_ENV']\n return dic\n\n\ndef get_namePid(pid):\n logging.info(\"Calling function get_namePid() in procenv.py\")\n with open('/proc/' + pid + '/comm') as f:\n for line in f:\n res = line.strip()\n return res\n\n\ndef get_statusPid(pid):\n logging.info(\"Calling function get_statusPid() in procenv.py\")\n with open('/proc/' + pid + '/stat') as f:\n for line in f:\n res = line.strip()\n return res.split(\" \")[2]\n\n\ndef get_ppid(pid):\n logging.info(\"Calling function get_ppid() in procenv.py\")\n with open('/proc/' + pid + '/stat') as f:\n for line in f:\n res = line.strip()\n return res.split(\" \")[3]\n\n\ndef get_tree():\n logging.info(\"Calling function get_tree() in procenv.py\")\n dic = collections.OrderedDict()\n pid = str(os.getpid())\n\n i = 0\n while pid != '0':\n dic[i] = pid\n pid = get_ppid(pid)\n i = i + 1\n return dic\n\nif __name__ == '__main__':\n\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'ep', ['env', 'pidtree'])\n\n except getopt.GetoptError:\n logging.error(\"Invalid argument\")\n sys.exit(1)\n\n for opt, arg in opts:\n if (opt in ('-e', '--env')):\n dic = get_env()\n for key, value in dic.items():\n print(key + ': \\t' + Fore.RED + value + Fore.RESET)\n\n elif (opt in ('-p', '--pidtree')):\n dic = get_tree()\n x = PrettyTable([Fore.RED + \"PID\", \"Name\", \"Status\" + Fore.RESET])\n x.align[Fore.RED + \"PID\"] = \"l\"\n x.align[\"Name\"] = \"l\"\n for key, value in dic.items():\n x.add_row([value, get_namePid(str(value)),\n get_statusPid(str(value))])\n print(x)\n","sub_path":"L2/procenv.py","file_name":"procenv.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"529116173","text":"import os\nimport re\nimport heapq\nfrom collections import Counter\nimport unidecode\nimport math\nimport numpy as np\nimport flask\nfrom flask import Flask, request, render_template, url_for, abort, redirect, flash, g, abort\nimport MarkovText.MarkovText as Mark\nfrom . import models\nfrom . import app\nfrom PIL import Image\nfrom wordcloud import WordCloud\n\n\nfrom bokeh.plotting import figure, output_file, show, save\nfrom bokeh.charts import Bar\n# prepare some data, a Pandas GroupBy object in this case\nfrom bokeh.models import HoverTool\nfrom bokeh.charts.attributes import ColorAttr, CatAttr\nfrom bokeh.resources import CDN\nfrom bokeh.resources import INLINE\nfrom bokeh.util.string import encode_utf8\nfrom bokeh.embed import components\n\n\n\ncurrent_dir = os.path.dirname(__file__)\nstatic_dir = os.path.join(current_dir,'static')\nm = Mark.Markov()\n\ndef create_word_cloud():\n mask = np.array(Image.open(os.path.join(current_dir,'static','donald_trump_hair.png')))\n wc = WordCloud(background_color=\"white\", max_words=2000, mask=mask)\n wc_text = \"\"\n # add to the markov dict\n for filename in os.listdir(os.path.join(current_dir,'static')):\n if filename.endswith(\".txt\"):\n file_path = os.path.join(static_dir,filename)\n with open(file_path,\"r\") as f:\n t = f.read()\n # also want to create cloud\n wc_text = wc_text + unidecode.unidecode(t).replace('“','\"').replace('”','\"')\n\n wc.generate(wc_text)\n wc.to_file(os.path.join(static_dir,'trump_cloud.png'))\n\ndef create_bar_chart():\n wc_text = \"\"\n # add to the markov dict\n wc = WordCloud(background_color=\"white\", max_words=2000)\n for filename in os.listdir(static_dir):\n if filename.endswith(\".txt\"):\n file_path = os.path.join(static_dir,filename)\n with open(file_path,\"r\") as f:\n t = f.read()\n wc_text = wc_text + unidecode.unidecode(t).replace('“','\"').replace('”','\"')\n\n words_list = wc.process_text(wc_text.lower())\n total_words = sum(word[1] for word in words_list)\n top_words = sorted(heapq.nlargest(10, words_list, key=lambda x: x[1]), key=lambda x: x[1], reverse=True)\n percents = [word[1]/total_words for word in top_words]\n data = {'Word': [word[0] for word in top_words], 'percent': percents}\n\n p = Bar(data, label=CatAttr(columns=['Word'], sort=False), values='percent', tools='hover', responsive=True)\n hover = p.select(dict(type=HoverTool))\n hover.tooltips = [('Word',' $x')]\n p.toolbar_location = None\n\n # specify how to output the plot(s)\n #out = os.path.join(static_dir,'chart.html')\n #output_file(out)\n #plot = file_html(p, CDN, \"plot\")\n #save(p)\n app.plot = p\n\ndef generate_markov_dict():\n for filename in os.listdir(static_dir):\n if filename.endswith(\".txt\"):\n file_path = os.path.join(static_dir,filename)\n with open(file_path,\"r\") as f:\n t = unidecode.unidecode(f.read()).replace('“','').replace('”','').replace('\"','')\n m.add_to_dict(t)\n\n\n\n@app.before_first_request\ndef init_request():\n generate_markov_dict()\n p = create_bar_chart()\n create_word_cloud()\n\n@app.route('/')\ndef home():\n text = m.create_sentences(10)\n return render_template('home.html', text=text)\n\n@app.route('/speak/')\ndef speak(num=5):\n try:\n text = m.create_sentences(int(num))\n return render_template('home.html', text=text)\n except:\n abort(404)\n\n@app.route('/data')\ndef data():\n\n js_resources = INLINE.render_js()\n css_resources = INLINE.render_css()\n script, div = components(app.plot, INLINE)\n\n cloud_image = os.path.join(current_dir,'generated','trump_cloud.png')\n html = render_template(\n 'data.html',\n plot_script=script,\n plot_div=div,\n js_resources=js_resources,\n css_resources=css_resources,\n cloud_image=cloud_image)\n\n\n\n return html\n\n","sub_path":"TrumpSpeak/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"573953692","text":"import requests\n\nchars = [\n 'villager'\n]\n\ndef fetch_character_img():\n\n for i in range(len(chars)):\n stringy = 'https://www.proguides.com/api/v2/game-resources/super-smash-bros-ultimate/static-data/characters/by-key/{0}'.format(chars[i])\n img_data = requests.get(stringy).content\n with open('data_'+chars[i]+'.json', 'wb') as handler:\n handler.write(img_data)\n\n\nfetch_character_img()","sub_path":"get_char_data.py","file_name":"get_char_data.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"627381827","text":"import numpy as np\n# import glob, os\nimport pandas as pd\n# import numpy as np\n# import cv2 as cv\nimport keras\nfrom keras.layers import BatchNormalization\nfrom keras.layers import Dense, Flatten, Dropout\nfrom keras.layers import Conv2D, MaxPooling2D,AveragePooling2D,GaussianNoise\nfrom keras.layers import LeakyReLU\nfrom keras.models import Sequential\nfrom keras import regularizers\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.layers import Input\nfrom keras import optimizers\n# from keras.applications.vgg19 import VGG19\n# from sklearn.model_selection import train_test_split\n\n\npath = 'aligned'\npath1 = 'val'\n# erects = pd.read_csv('bb_landmark/loose_bb_train.csv')\n# blah = np.asarray(erects)\n# num_epoch = 10\n###########################\n#Define model\n###########################\n\n# model = InceptionV3(weights = None, include_top = True, input_shape = (100, 80, 1), classes = 2)\n\n# model = VGG19(\n# \tinclude_top=True,\n# \tweights=None,\n# \tinput_tensor=None,\n# \tinput_shape=(100, 80, 1),\n# \tpooling=None, classes=500\n# \t)\n\nmodel = Sequential()\n\nmodel.add(Conv2D(3, (4,5), strides=(1,1), padding='same', input_shape=(100,80,3)))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(Conv2D(32, (1,3), strides=(1,1), padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(Conv2D(32, (3,1), strides=(1,1), padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1)))\nmodel.add(BatchNormalization())\n\nmodel.add(Conv2D(64, (1,3), strides=(1,1), padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(Conv2D(64, (3,1), strides=(1,1), padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(Conv2D(64, (3,3), strides=(1,1), padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))\nmodel.add(BatchNormalization())\n\nmodel.add(Conv2D(128, (1,3), strides=(1,1), padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(Conv2D(128, (3,1), strides=(1,1), padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(Conv2D(128, (3,3), strides=(1,1), padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))\nmodel.add(BatchNormalization())\n\nmodel.add(Conv2D(256, (1,3), strides=(1,1), padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(Conv2D(256, (3,1), strides=(1,1), padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(Conv2D(256, (3,3), strides=(1,1), padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))\nmodel.add(BatchNormalization())\n\nmodel.add(Conv2D(512, (1,3), strides=(1,1), padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(Conv2D(512, (3,1), strides=(1,1), padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(Conv2D(512, (3,3), strides=(1,1), padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))\nmodel.add(BatchNormalization())\n\nmodel.add(Flatten())\nmodel.add(Dense(2048))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(Dropout(0.4))\nmodel.add(Dense(1024, kernel_regularizer=regularizers.l2(0.01)))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(Dropout(0.4))\n\nmodel.add(Dense(500, activation='softmax'))\n# model.summary()\n\n##########################\nmodel.compile(\n\toptimizer = optimizers.adam(lr = 1e-4),\n\tloss = 'categorical_crossentropy',\n\tmetrics = ['accuracy']\n\t)\n\nmodel.summary()\n###########################\n# Train = np.load(\"label.npy\")\n# # print Train.shape\n# Test = np.load(\"test_labels.npy\")\n# print Test.shape\n\n# data = ImageDataGenerator()\n# train_data = data.flow_from_directory(\n# \tdirectory=path,\n# \tcolor_mode='grayscale',\n# \ttarget_size=(100,80),\n# \tbatch_size=400\n# \t)\n\n\ntrain_data = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\ntest_data = ImageDataGenerator(rescale=1./255)\n\ntrain_generator = train_data.flow_from_directory(\n path,\n target_size=(100,80),\n\tbatch_size=138\n\t)\n\nvalidation_generator = test_data.flow_from_directory(\n path1,\n target_size=(100,80),\n\tbatch_size=300\n )\n\nhistory = model.fit_generator(\n\t\t\ttrain_generator,\n\t\t\tsteps_per_epoch=1000,\n\t\t\tepochs=35,\n\t\t\tshuffle=True,\n\t\t\tverbose=1,\n\t\t\tvalidation_data=validation_generator,\n\t\t\tvalidation_steps=111\n\t\t\t)\n\nmodel.save('corrected_my_net_LeakyReLu_35.h5')\n\n# res = model.evaluate_generator(\n# validation_generator,\n# max_queue_size=30,\n# verbose=1\n# )\n\n# print res\n\nnp.save(\"mynet_Acc_LeReLu_35.npy\", history.history['acc'])\nnp.save(\"mynet_Acc.val_LeReLu_35.npy\", History.history['val_acc'])\nnp.save(\"mynet_Loss_LeReLu_35.npy\", history.history['loss'])\nnp.save(\"mynet_Loss.val_LeReLu_35.npy\", History.history['val_loss'])\n\n# model.save('VGG19.h5')\n","sub_path":"Face_recognition/corrected_my_net_LeakyReLu_35.py","file_name":"corrected_my_net_LeakyReLu_35.py","file_ext":"py","file_size_in_byte":4761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"543073774","text":"\"\"\"Once upon a time there was a snail Bob who liked flowers very much.\r\nHe has built an enormous flower garden which can be represented as matrix whose width and height is 10^9 cells.\r\nEach row is indexed from 1 to 10^9 from top to bottom, and each column is indexed from 1 to 10^9 from left to right.\r\nEach cell has coordinates: (column-index, row-index).\r\n\r\nSnail Bob has been planting flowers in such a way that in cell with coordinates (x, y) he has planted min(x, y) flowers.\r\nAdditionally he has built a bee hive in each cell whose number of flowers is a multiple of 5.\r\n\r\nYour task is to determine the number of bee hives inside a rectangle with upper-left corner in (x1, y1) and lower-right corner in (x2, y2).\r\n\r\nInput\r\nThe only line of input consists of 4 space separated integers: x1, y1, x2, y2.\r\nIn 50% of test cases:1 <= x1, y1, x2, y2 <= 10^3\r\nIn other 50% of test cases: 1 <= x1, y1, x2, y2 <= 10^9.\r\n\r\nOutput\r\nNumber of bee hives inside the described rectangle.\r\n\r\nExample\r\n\r\nInput\r\n1 1 15 4\r\n\r\nOutput\r\n0\r\n\r\nInput\r\n5 5 10 10\r\n\r\nOutput\r\n12\r\n\r\n\"\"\"\r\n\r\n\r\nimport fileinput\r\n\r\ndef processLine(inputLine):\r\n x1,y1,x2,y2=inputLine\r\n def wrap():\r\n for x in range(x1, x2+1):\r\n for y in range(y1, y2+1):\r\n yield min(x,y)\r\n y=0\r\n for x in wrap():\r\n if x%5==0:\r\n y+=1\r\n return y\r\n\r\n\r\nfor line in fileinput.input():\r\n inputLine = line.split(' ',4)\r\n list_of_numbers = []\r\n for el in inputLine:\r\n try:\r\n list_of_numbers.append(int(el))\r\n except ValueError:\r\n pass\r\n inputLine=list_of_numbers[0:4]\r\n print(processLine(inputLine))\r\n ","sub_path":"hackathon1.py","file_name":"hackathon1.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"520666219","text":"#!/usr/bin/env python3\n\"\"\"Install Khan's python2.\"\"\"\n\nimport argparse\nimport platform\nimport re\nimport subprocess\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--force\", help=\"Force install of Khan's python2\", action=\"store_true\"\n)\nargs = parser.parse_args()\n\nwhich = subprocess.run([\"which\", \"python2\"], capture_output=True, text=True)\nis_installed = (\n which.returncode == 0 and which.stdout.strip() != \"/usr/bin/python2\"\n)\nif is_installed:\n print(\"Already running a non-system python2.\")\n\nif args.force or not is_installed:\n action = \"reinstall\" if is_installed else \"install\"\n print(\"Installing python2 from khan/repo. This may take a few minutes.\")\n if platform.uname().machine == \"arm64\":\n brew_runner = [\"arch\", \"-x86_64\", \"/usr/local/bin/brew\"]\n else:\n brew_runner = [\"brew\"]\n subprocess.run(brew_runner + [action, \"khan/repo/python@2\"], check=True)\n\n# Get version of pip2\npip2_version = \"\"\npip2_version_str = subprocess.run(\n [\"pip2\", \"--version\"], capture_output=True, text=True\n)\nif pip2_version_str:\n match = re.match(r\"\\w+ (\\d+)\", pip2_version_str.stdout)\n if match:\n pip2_version = match.group(1)\n\nif pip2_version and pip2_version > \"19\":\n print(\"Reverting pip2 from version: \" + pip2_version_str.stdout.strip())\n subprocess.run([\"pip2\", \"install\", \"pip<20\", \"-U\"], check=True)\n\n# Simple diagnostics\nsubprocess.run([\"pip2\", \"--version\"])\nprint(\"which python2: \" + which.stdout.strip())\n","sub_path":"bin/install-mac-python2.py","file_name":"install-mac-python2.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"650739682","text":"# -*- coding: utf-8 -*-\r\n#\r\n\r\nimport os\r\nimport shutil\r\n\r\n#############################################\r\n#创建指定目录\r\ndef create_path(path=None):\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n return True\r\n\r\n#删除指定目录下的文件以及文件夹\r\ndef delete_path(path_list=None):\r\n path_list = path_list if isinstance(path_list, (list, tuple)) else [path_list]\r\n for path in path_list:\r\n if os.path.exists(path):\r\n if os.path.isfile(path):\r\n os.remove(path)\r\n elif os.path.isdir(path):\r\n shutil.rmtree(path, True)\r\n return True","sub_path":"lib/core/os_file.py","file_name":"os_file.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"424881963","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef distance(X, M):\n # calculate the euclidean distance between numpy arrays X and M\n (m, n) = X.shape\n d = np.zeros(m)\n for i in range(m):\n for j in range(n):\n d[i] = d[i] + np.square(X[i, j] - M[i, j])\n d = list(d)\n return d\n\n\ndef findClosestCentres(X, M):\n # finds the centre in M closest to each point in X\n (k, n) = M.shape # k is number of centres\n (m, n) = X.shape # m is number of data points\n C = list()\n for j in range(k):\n C.append(list())\n for j in range(m):\n d = []\n for i in range(k):\n dis = np.linalg.norm(X[j] - M[i])\n d.append(dis)\n a = np.argmin(d)\n C[a].append(j)\n return C\n\n\ndef updateCentres(X, C):\n # updates the centres to be the average of the points closest to it.\n k = len(C) # k is number of centres\n (m, n) = X.shape # n is number of features\n M = np.zeros((k, n))\n for i in range(k):\n c = 0\n for j in C[i]:\n M[i] = np.add(M[i], X[j])\n c = c + 1\n M[i] = M[i] / c\n return M\n\n\ndef plotData(X, C, M):\n # plot the data, coloured according to which centre is closest. and also plot the centres themselves\n fig, ax = plt.subplots(figsize=(12, 8))\n ax.scatter(X[C[0], 0], X[C[0], 1], c='c', marker='o')\n ax.scatter(X[C[1], 0], X[C[1], 1], c='b', marker='o')\n ax.scatter(X[C[2], 0], X[C[2], 1], c='g', marker='o')\n # plot centres\n ax.scatter(M[:, 0], M[:, 1], c='r', marker='x', s=100, label='centres')\n ax.set_xlabel('x1')\n ax.set_ylabel('x2')\n ax.legend()\n fig.savefig('graph.png')\n\n\ndef main():\n print('testing the distance function ...')\n print(distance(np.array([[1, 2], [3, 4]]), np.array([[1, 2], [1, 2]])))\n\n print('testing the findClosestCentres function ...')\n print(findClosestCentres(np.array([[1, 2], [3, 4], [0.9, 1.8]]), np.array([[1, 2], [2.5, 3.5]])))\n\n print('testing the updateCentres function ...')\n print(updateCentres(np.array([[1, 2], [3, 4], [0.9, 1.8]]), [[0, 2], [1]]))\n\n print('loading test data ...')\n X = np.loadtxt('data.txt')\n [m, n] = X.shape\n iters = 10\n k = 3\n print('initialising centres ...')\n init_points = np.random.choice(m, k, replace=False)\n M = X[init_points, :] # initialise centres randomly\n print('running k-means algorithm ...')\n for i in range(iters):\n C = findClosestCentres(X, M)\n M = updateCentres(X, C)\n print('plotting output')\n\n plotData(X, C, M)\n\n\nif __name__ == '__main__':\n main()","sub_path":"kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"15045653","text":"WIN_CONDITIONS = [ [0,1,2], [3,4,5], [6,7,8], [0,3,6], [1,4,7], [2,5,8], [0,4,8] ]\n\nPLAYERS = ['X','O']\n\n\n\nclass TicTacToe:\n\n def __init__(self):\n self.board = [None,None,None,None,None,None,None,None,None]\n self.last_player = 'O'\n self.remaining_win_conditions = WIN_CONDITIONS\n\n def resolve_turn(self):\n self.print_board()\n\n if self.possible_winner() and self.winner():\n print('%s has won the game, congratulations!' % self.last_player)\n elif len(self.remaining_win_conditions) == 0:\n if None in self.board:\n print('Tie! There is no path to victory')\n else:\n print('Tie! The board is full')\n\n x = \"test\"\n return x\n\n def move(self, square):\n\n if self.last_player == PLAYERS[1]:\n player = PLAYERS[0]\n else:\n player = PLAYERS[1]\n if self.board[square] is not None:\n raise ValueError('Not a valid square')\n elif (square > 9 or square < 0):\n raise ValueError('Square outside bounds')\n self.board[square] = player\n self.last_player = player\n self.resolve_turn()\n\n\n def possible_winner(self):\n if self.board.count(PLAYERS[0]) > 2 or self.board.count(PLAYERS[1]) > 2:\n return True\n else:\n return False\n\n def winner(self):\n for win in self.remaining_win_conditions:\n values = []\n for square in win:\n values.append(self.board[square])\n if values.count(self.last_player) == 3:\n return True\n elif values.count(PLAYERS[0]) > 1 and values.count(PLAYERS[1]) > 1:\n self.remaining_win_conditions.remove(win)\n\n\n def print_board(self):\n i = 0\n squares = []\n\n for square in self.board:\n if square is None:\n squares.append(str(i))\n else:\n squares.append(square)\n i += 1\n\n print('%s | %s | %s\\n----------\\n%s | %s | %s\\n----------\\n%s | %s | %s' % tuple(squares))\n\n\ncall = TicTacToe()\ncall.resolve_turn()\n\n\n# import unittest\n# import TicTacToe\n# class testTicTacToe(unittest.TestCase):\n#\n# def test_print_board(self):\n# self.assertEqual(print_board('%s | %s | %s\\n----------\\n%s | %s | %s\\n----------\\n%s | %s | %s'),\"'%s | %s | %s\\n----------\\n%s | %s | %s\\n----------\\n%s | %s | %s'\")\n#\n#\n# unittest.main()","sub_path":"Unittest/project_6/python_qa_exercise.py","file_name":"python_qa_exercise.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"225882655","text":"import sys\nimport os\nimport random\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import KFold\nfrom sklearn.preprocessing import normalize\nfrom scipy.io import loadmat\nfrom scipy import spatial\nfrom scipy import stats\nimport sys\nsys.path.insert(1, '../PySPaRTAN/')\nfrom SPaRTAN import SPaRTAN\n\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--dataset_D\", help=\"name of the dataset D which will be passed in\", type=str, default=\"Dpbmc\")\nparser.add_argument(\"--dataset_P\", help=\"name of the dataset P which will be passed in\", type=str, default=\"Ppbmc5kn_CD8\")\nparser.add_argument(\"--dataset_Y\", help=\"name of the dataset Y which will be passed in\", type=str, default=\"Ypbmc5kn_CD8\")\nparser.add_argument(\"--spectrumA\", help=\"\", type=float, default=1.0)\nparser.add_argument(\"--spectrumB\", help=\"\", type=float, default=0.7)\nparser.add_argument(\"--lamda\", help=\"\", type=float, default=0.001)\nparser.add_argument(\"--rsL2\", help=\"\", type=float, default=0.001)\n\nparser.add_argument(\"--normalization\", help=\"type of normalizion, no normalization if empty\", type=str, default=\"l2\")\nparser.add_argument(\"--input_dir\", help=\"directory of input files\", type=str, default=\"../data/inputs\")\nparser.add_argument(\"--output_dir\", help=\"directory of output files\", type=str, default=\"../data/outputs\")\nparser.add_argument('--fold', help=\"how many folds for the cross_validation.No cross_validation and using default/specified parameters if set to 0\", type=int, default=0)\n\nargs = parser.parse_args()\n\n\n\nif not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n\nD_ori = pd.read_csv(os.path.join(args.input_dir, args.dataset_D+'.csv'), index_col=0)\nP_ori = pd.read_csv(os.path.join(args.input_dir, args.dataset_P+'.csv'), index_col=0)\nY_ori = pd.read_csv(os.path.join(args.input_dir, args.dataset_Y+'.csv'), index_col=0)\n\nTF_name = list(D_ori.columns)\ncell_name = list(Y_ori.columns)\ngene_name = list(Y_ori.index)\nprotein_name = list(P_ori.columns)\n\nD_mat = D_ori.values\nP_mat = P_ori.values\nY_mat = Y_ori.values\n\n# normalize the dataset\nif args.normalization != \"\":\n D = normalize(D_mat, norm=args.normalization, axis=0)\n Y = normalize(Y_mat, norm=args.normalization, axis=0)\n P = normalize(P_mat, norm=args.normalization, axis=1)\n\n# create the object of SPaRTAN\nreg = SPaRTAN()\n\n\n\n# cross-validate to determine hyperparameters\nfold = args.fold\nif fold != 0: #using cross validation to determine the optimal parameters\n \n lamdas = [0.001, 0.01, 0.1, 0.2, 0.3]\n rsL2s = [0, 0.001, 0.01]\n spectrumAs = [1]\n spectrumBs = [0.5, 0.6, 0.7]\n\n lenlamdas = len(lamdas)\n lenrsL2s = len(rsL2s)\n lenspAs = len(spectrumAs)\n lenspBs = len(spectrumBs)\n \n corr_all_spearman = np.zeros((lenspAs, lenspBs, lenlamdas, lenrsL2s))\n for a in range(0, lenspAs):\n for b in range(0, lenspBs):\n for l in range(0, lenlamdas):\n for r in range(0, lenrsL2s):\n print(\n \"cross validating spectrumA={}, spectrumB={}, lambda={}, rsL2={}\".format(\n spectrumAs[a], spectrumBs[b], lamdas[l], rsL2s[r]\n )\n )\n sum_corr_spearman = 0\n \n kf = KFold(n_splits=fold)\n for train_index, test_index in kf.split(P_mat):\n \n # split the data into train and test set\n P_train, P_test = P_mat[train_index, :], P_mat[test_index, :]\n Y_train, Y_test = Y_mat[:, train_index], Y_mat[:, test_index]\n \n # normalize the train and test set\n Y_train = normalize(Y_train, axis=0)\n Y_test = normalize(Y_test, axis=0)\n \n P_train = normalize(P_train, axis=1)\n P_test = normalize(P_test, axis=1)\n \n # train the model\n reg.fit(\n D,\n P_train,\n Y_train,\n lamda=lamdas[l],\n rsL2=rsL2s[r],\n spectrumA=spectrumAs[a],\n spectrumB=spectrumBs[b],\n )\n \n # get predicted value Y_pred on P_test\n Y_pred = reg.predict(P_test)\n \n # get the correlation bewteen Y_pred and Y_test\n corr_spearman = reg.get_corr(Y_pred, Y_test)\n \n sum_corr_spearman = sum_corr_spearman + corr_spearman\n \n corr_all_spearman[a, b, l, r] = sum_corr_spearman / fold\n \n \n # retrive the best parameters\n max_a, max_b, max_l, max_r = np.unravel_index(\n corr_all_spearman.argmax(), corr_all_spearman.shape\n )\n \n lamda_best = lamdas[max_l]\n rsL2_best = rsL2s[max_r]\n spectrumA_best = spectrumAs[max_a]\n spectrumB_best = spectrumBs[max_b]\n \n print(\"lamda_best={}, rsL2_best={}, spectrumA_best={},spectrumB_best={}\".format(lamda_best,rsL2_best,spectrumA_best,spectrumB_best))\n \n \nelse: #fold ==0: using default/specified paramters\n \n lamda_best=args.lamda\n rsL2_best=args.rsL2\n spectrumA_best=args.spectrumA\n spectrumB_best=args.spectrumB\n\n\n# re-train the model\nreg.fit(D, P, Y, lamda_best, rsL2_best, spectrumA_best, spectrumB_best)\n\n# retrieve W, projD, projP\nW = reg.get_W()\nprojD = reg.get_projD()\nprojP = reg.get_projP()\n\n\ndf_W = pd.DataFrame(data=W, index=TF_name, columns=protein_name)\ndf_projP = pd.DataFrame(data=projP, index=cell_name, columns=protein_name)\ndf_projD = pd.DataFrame(data=projD, index=TF_name, columns=cell_name)\n\noutfile_W = os.path.join(args.output_dir, \"W.csv\")\noutfile_projP = os.path.join(args.output_dir, \"projP.csv\")\noutfile_projD = os.path.join(args.output_dir, \"projD.csv\")\n\ndf_W.to_csv(outfile_W)\ndf_projP.to_csv(outfile_projP)\ndf_projD.to_csv(outfile_projD)\n\n# with open(outfile_W, \"w\") as outfile:\n # np.savetxt(outfile, W, fmt=\"%-7.4f\")\n\n# with open(outfile_projP, \"w\") as outfile:\n # np.savetxt(outfile, projP, fmt=\"%-7.4f\")\n\n# with open(outfile_projD, \"w\") as outfile:\n # np.savetxt(outfile, projD, fmt=\"%-7.4f\")\n\nprint(\"Process finished successfully!\")\n","sub_path":"PySPaRTAN_preRelease/tests/SPaRTAN_run.py","file_name":"SPaRTAN_run.py","file_ext":"py","file_size_in_byte":6386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"192706076","text":"#!/usr/bin/env python\nimport spotipy\nimport spotipy.util as util\n\n\nSCOPES = [\n 'user-library-read',\n 'playlist-read-private',\n 'playlist-modify-private',\n 'playlist-modify-public'\n]\n\n\nclass Spotify(object):\n\n def __init__(self, username):\n self.counter = 0\n self.username = username\n self.authorize(username)\n\n def authorize(self, username):\n scope = \" \".join(SCOPES)\n self.token = util.prompt_for_user_token(username, scope)\n self.sp = None\n\n if self.token is None:\n sys.stderr.write(\"Can't get token for {0}\\n\".format(username))\n sys.exit(1)\n\n self.sp = spotipy.Spotify(auth=self.token)\n self.sp.trace = False\n self.user_id = self.sp.me()['id']\n\n def re_authorize(self):\n self.counter += 1\n if self.counter % 5 == 0:\n self.authorize(self.username)\n\n def search(self, artist, track):\n query = 'artist:{0} track:{1}'.format(artist, track)\n result = self.sp.search(q=query, type='track')\n if 'tracks' in result.keys():\n tracks = result['tracks']\n if 'items' in tracks.keys():\n return tracks['items']\n\n def get_playlists(self):\n result = self.sp.user_playlists(self.user_id)\n if 'items' in result.keys():\n return result['items']\n\n def get_playlist(self, id):\n result = self.sp.user_playlists(self.user_id, id)\n if 'items' in result.keys():\n return result['items']\n\n def find_playlist(self, name):\n playlists = self.get_playlists()\n for playlist in playlists:\n if playlist['name'] == name:\n return playlist['id']\n\n def add_track(self, playlist_name, artist, track, album):\n result = False\n\n playlist_id = self.find_playlist(playlist_name)\n if playlist_id is None:\n return\n\n search_results = self.search(artist, track)\n if len(search_results) == 0:\n return result\n\n track_id = search_results[0]['id']\n self.sp.user_playlist_add_tracks(self.user_id, playlist_id, [track_id])\n result = True\n\n return result\n","sub_path":"somafm-listener/spotify/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"520853753","text":"import sys\nsys.setrecursionlimit(1 << 20)\nINF = float('inf')\n\n\ndef read_int_list():\n return list(map(int, input().split()))\n\n\ndef read_ints():\n return map(int, input().split())\n\n\ndef main():\n N, K = read_ints()\n A = read_int_list()\n # l, r = 0, 0\n l = 0\n s = 0\n ans = 0\n for r in range(N):\n s += A[r]\n if s >= K:\n ans += N - r\n while l <= r:\n s -= A[l]\n l += 1\n if s >= K:\n ans += N - r\n else:\n break\n\n print(ans)\n\n\nmain()\n","sub_path":"abc/abc130d.py","file_name":"abc130d.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"189002442","text":"import numpy as np\nfrom collections import defaultdict\nimport itertools\n\nfrom utils import plotting\nfrom utils.policy import make_epsilon_greedy_policy\n\ndef q_learning(env, num_episodes, discount_factor=1.0, alpha=0.5, epsilon=0.1):\n \"\"\"\n Q-Learning algorithm: Off-policy TD control. Finds the optimal greedy policy\n while following an epsilon-greedy policy\n\n Args:\n env: OpenAI environment.\n num_episodes: Number of episodes to run for.\n discount_factor: Lambda time discount factor.\n alpha: TD learning rate.\n epsilon: Chance the sample a random action. Float betwen 0 and 1.\n\n Returns:\n A tuple (Q, episode_lengths).\n Q is the optimal action-value function, a dictionary mapping state -> action values.\n stats is an EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.\n \"\"\"\n\n # The final action-value function.\n # A nested dictionary that maps state -> (action -> action-value).\n Q = defaultdict(lambda: np.zeros(env.action_space.n))\n\n # keeps track of useful statistics\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n\n policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)\n\n for i_episode in range(num_episodes):\n current_state = env.reset()\n # keep track number of time-step per episode only for plotting\n for t in itertools.count():\n # choose the action based on epsilon greedy policy\n action_probs = policy(current_state)\n action = np.random.choice(np.arange(len(action_probs)), p=action_probs)\n next_state, reward, done, _ = env.step(action)\n\n # sse the greedy action to evaluate Q, not the one we actually follow\n greedy_next_action = Q[next_state].argmax()\n # evaluate Q using estimated action value of (next_state, greedy_next_action)\n td_target = reward + discount_factor * Q[next_state][greedy_next_action]\n td_error = td_target - Q[current_state][action]\n Q[current_state][action] += alpha * td_error\n\n # improve epsilon greedy policy using new evaluate Q\n policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)\n\n # update statistics\n stats.episode_rewards[i_episode] += reward\n stats.episode_lengths[i_episode] = t\n\n if done:\n break\n else:\n current_state = next_state\n\n return Q, stats\n","sub_path":"reinforcement/q_learning.py","file_name":"q_learning.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"554335965","text":"import torch\nimport torch.nn as nn\n\nclass autoencodernet(nn.Module):\n def __init__(self):\n super(autoencodernet, self).__init__()\n self.compress1 = nn.Conv2d(in_channels = 3, out_channels = 3, kernel_size = 4, stride = 4, padding = 212)\n self.nlinear1 = nn.ReLU()\n self.decompress = nn.ConvTranspose2d(in_channels = 3, out_channels = 3, kernel_size = 90, stride = 2)\n self.nlinear2 = nn.ReLU()\n\n def forward(self, x):\n x = self.compress1(x)\n x = self.nlinear1(x)\n y = self.decompress(x)\n y = self.nlinear2(y)\n return x,y\n\n\ninput = torch.randn([1,3,600,600])\nmodel = autoencodernet()\n\n\n\n#torch.save(model, 'ae.pth')\nop = model(input)\nprint(type(op))\nprint(op[0].shape)\nprint(op[1].shape)\n\n\n#for name, parameters in model.named_parameters():\n# print(name, parameters.numel())\n\n\n\n","sub_path":"papercodes/autoencoder/autoencoder_pytorch.py","file_name":"autoencoder_pytorch.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"404602934","text":"import os\nos.chdir('..')\n\nimport sys\nsys.path.append(\".\")\n\nif os.path.exists('pywikibot.lwp'):\n os.remove('pywikibot.lwp') #pepega\n\nimport pywikibot\n\ndry_run = True\n\nnamespace = 'File' # 0 for main\nprefix_from = 'HMS Neptune'\nprefix_to = 'Neptune'\nsuffix = '.ogg'\nmessage = 'Standardize collab ship names to Ship (Collab Name)'\n\nnoredirect = (namespace != 0) # suppress redirects if namespace is not main\nprint('noredirect', noredirect)\n\n\"\"\"\nnamespace = 'User'\nprefix_from = 'Evil4Zerggin/Move'\nprefix_to = 'Evil4Zerggin/Moved'\nmessage = 'Testing move bot'\n\"\"\"\n\nsite = pywikibot.Site('azurlane') # The site we want to run our bot on\nsite.login()\n\nfor page in site.allpages(prefix=prefix_from, namespace=namespace):\n if site.page_isredirect(page): continue\n title = page.title()\n if suffix and title[-len(suffix):] != suffix: continue\n if namespace:\n namespace_and_prefix_length = len(namespace) + 1 + len(prefix_from)\n newtitle = namespace + ':' + prefix_to + title[namespace_and_prefix_length:]\n else:\n newtitle = prefix_to + title[len(prefix_from):]\n print('%s -> %s' % (title, newtitle))\n if pywikibot.Page(site, newtitle).exists():\n print('Page already exists, skipping')\n continue\n if not dry_run:\n site.movepage(page, newtitle, message, noredirect=noredirect)\n","sub_path":"wiki_scripts/move_pages.py","file_name":"move_pages.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"608811190","text":"from app import app\nfrom flask import render_template, request, jsonify\nfrom SPARQLWrapper import SPARQLWrapper, JSON\nfrom .bnf_requests import hugo_sample_req, generic, getAuteurs, getBooksDetail, getAuthorsDetail, getAuthorsBooks, getRelatedAuthors\n\n# Page principale avec la recherche générique\n@app.route(\"/\")\ndef root():\n return render_template(\"search.html\")\n\n@app.route(\"/search\", methods=[\"GET\", \"POST\"])\ndef search():\n auteurs = []\n if request.method == \"POST\":\n query = request.form.get(\"query\")\n auteurs = getAuteurs(query)\n return render_template(\"search.html\", results=auteurs)\n\n# Page de détails d'un auteur\n@app.route(\"/author/\")\ndef author(name):\n name = name.replace(\"_\", \" \")\n dateBirth = \"\"\n if \"dBirth\" in request.args:\n dateBirth = request.args[\"dBirth\"]\n results = getAuthorsDetail(name, dateBirth)\n results.append(getAuthorsBooks(name))\n if len(results) == 0:\n results = [{}]\n relatedAuthors = getRelatedAuthors(name) \n print(relatedAuthors)\n return render_template(\"author.html\", details=results[0], name=name, relatedAuthors = relatedAuthors)\n\n# Page de détails d'un livre\n@app.route(\"/book\")\ndef book():\n name = \"harry potter\"\n name = name.replace('_', ' ')\n results = getBooksDetail(name)\n print(results)\n if len(results) == 0:\n results = [{}]\n return render_template(\"book.html\", details=results[0], name=name)\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"102929067","text":"import importlib\nimport marshal\nimport os\nimport py_compile\n\nfrom .python.modules import Module\n\n\ndef transpile(sourcefile, namespace, outdir=None):\n print(\"Compiling %s ...\" % sourcefile)\n py_compile.compile(sourcefile)\n\n transpiler = Transpiler(namespace)\n transpiler.transpile(sourcefile)\n transpiler.write(outdir)\n\n\nclass Transpiler:\n def __init__(self, namespace):\n self.namespace = namespace\n self.classfiles = []\n\n def write(self, outdir):\n # Create directory tree to store classfile\n if outdir:\n basedir = [outdir]\n else:\n basedir = []\n\n for namespace, class_name, javaclassfile in self.classfiles:\n dirname = os.path.join(*(basedir + namespace.split('.')))\n try:\n os.makedirs(dirname)\n except FileExistsError:\n pass\n\n classfilename = os.path.join(dirname, '%s.class' % class_name)\n\n print(\"Writing %s ...\" % classfilename)\n with open(classfilename, 'wb') as out:\n javaclassfile.write(out)\n print(\"Done.\")\n\n def transpile(self, sourcefile):\n with open(importlib.util.cache_from_source(sourcefile), 'rb') as compiled:\n # Read off the magic from the start of the PYC file.\n compiled.read(12)\n\n # Decompile the code object.\n code = marshal.load(compiled)\n\n module = Module(self.namespace, sourcefile)\n\n # Extract commands from the code block\n module.extract(code)\n\n # Transpile the module code, adding any classfiles generated\n # to the list to be exported.\n self.classfiles.extend(module.transpile())\n","sub_path":"voc/transpiler.py","file_name":"transpiler.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"461646398","text":"from __future__ import print_function, division\r\nfrom builtins import range\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom grid_world import standard_grid, negative_grid, ACTION_SPACE\r\nfrom grid_world_print import print_values, print_policy\r\n\r\ndef rand_action(a, eps=0.2):\r\n p = np.random.random()\r\n if p < (1 - eps):\r\n return a\r\n else:\r\n return np.random.choice(ACTION_SPACE)\r\n \r\n \r\n \r\ndef max_of(d):\r\n max_val = float('-inf')\r\n max_key = None\r\n for k, v in d.items():\r\n if v > max_val:\r\n max_val = v\r\n max_key = k\r\n return max_key, max_val\r\n\r\n\r\nclass Model:\r\n def __init__(self):\r\n self.theta = np.random.randn(25) / np.sqrt(25)\r\n def sa2x(self, s, a):\r\n return np.array([\r\n s[0] - 1 if a == 'U' else 0,\r\n s[1] - 1.5 if a == 'U' else 0,\r\n (s[0]*s[1] - 3)/3 if a == 'U' else 0,\r\n (s[0]*s[0] - 2)/2 if a == 'U' else 0,\r\n (s[1]*s[1] - 4.5)/4.5 if a == 'U' else 0,\r\n 1 if a == 'U' else 0,\r\n s[0] - 1 if a == 'D' else 0,\r\n s[1] - 1.5 if a == 'D' else 0,\r\n (s[0]*s[1] - 3)/3 if a == 'D' else 0,\r\n (s[0]*s[0] - 2)/2 if a == 'D' else 0,\r\n (s[1]*s[1] - 4.5)/4.5 if a == 'D' else 0,\r\n 1 if a == 'D' else 0,\r\n s[0] - 1 if a == 'L' else 0,\r\n s[1] - 1.5 if a == 'L' else 0,\r\n (s[0]*s[1] - 3)/3 if a == 'L' else 0,\r\n (s[0]*s[0] - 2)/2 if a == 'L' else 0,\r\n (s[1]*s[1] - 4.5)/4.5 if a == 'L' else 0,\r\n 1 if a == 'L' else 0,\r\n s[0] - 1 if a == 'R' else 0,\r\n s[1] - 1.5 if a == 'R' else 0,\r\n (s[0]*s[1] - 3)/3 if a == 'R' else 0,\r\n (s[0]*s[0] - 2)/2 if a == 'R' else 0,\r\n (s[1]*s[1] - 4.5)/4.5 if a == 'R' else 0,\r\n 1 if a == 'R' else 0,\r\n 1\r\n ])\r\n \r\n def predict(self, s, a):\r\n x = self.sa2x(s, a)\r\n return self.theta.dot(x)\r\n\r\n def grad(self, s, a):\r\n return self.sa2x(s, a)\r\n \r\n \r\ndef getQs(model, s):\r\n Qs = {}\r\n for a in ACTION_SPACE:\r\n q = model.predict(s,a)\r\n Qs[a] = q\r\n return Qs\r\n\r\nif __name__ == '__main__':\r\n grid = negative_grid(step_cost=-0.1)\r\n idx = 0\r\n print_values(grid.rewards, grid)\r\n print('\\n\\n')\r\n \r\n SA2IDX = {}\r\n for s in grid.all_states():\r\n SA2IDX[s] = {}\r\n for a in ACTION_SPACE:\r\n SA2IDX[s][a] = idx\r\n idx = idx + 1\r\n \r\n model = Model()\r\n t = 1.0\r\n tt = 1.0\r\n deltas = []\r\n \r\n for it in range(20000):\r\n if it%100 == 0:\r\n t = t + 0.01\r\n tt = tt + 0.01\r\n if it%2000 == 0:\r\n print(it)\r\n \r\n alpha = 0.1/tt\r\n \r\n s = (2,0)\r\n grid.set_state(s)\r\n \r\n Qs = getQs(model, s)\r\n \r\n a = max_of(Qs)[0]\r\n a = rand_action(a, eps=0.5/t)\r\n biggest_change = 0\r\n while not grid.game_over():\r\n r = grid.move(a)\r\n s_next = grid.current_state()\r\n \r\n old_theta = model.theta.copy()\r\n if grid.is_terminal(s_next):\r\n model.theta += alpha * (r - model.predict(s,a)) * model.grad(s,a)\r\n else:\r\n Qs_next = getQs(model, s_next) \r\n a_next = max_of(Qs_next)[0]\r\n a_next = rand_action(a_next, eps=0.5/t) \r\n model.theta += alpha * (r + 0.9*model.predict(s_next, a_next )- model.predict(s,a)) * model.grad(s,a)\r\n \r\n s = s_next\r\n a = a_next\r\n biggest_change = max(biggest_change, np.abs(model.theta - old_theta).sum())\r\n \r\n \r\n \r\n \r\n \r\n deltas.append(biggest_change)\r\n \r\n plt.plot(deltas)\r\n plt.show()\r\n \r\n V = {}\r\n policy = {}\r\n for s in grid.actions.keys():\r\n Qs = getQs(model, s)\r\n a, q = max_of(Qs)\r\n policy[s] = a\r\n V[s] = q\r\n\r\n \r\n \r\n \r\n print(\"values:\")\r\n print_values(V, grid)\r\n print(\"policy:\")\r\n print_policy(policy, grid)","sub_path":"sarsa_approx.py","file_name":"sarsa_approx.py","file_ext":"py","file_size_in_byte":4395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"70796489","text":"import numpy as np\nimport sympy as sym\nimport matplotlib.pyplot as plt\n\ng = 9.81 # gravitacijski pospešek\nv0 = 10 # začetna hitrost\nalpha = 40 # kot\nh = 3 # začetna višina\ntime = np.linspace(0, 2, 20)\n\nt = sym.symbols('t')\ndef fx(t):\n return v0*t*sym.cos(np.deg2rad(alpha))\ndef fy(t):\n return h + v0*t*sym.sin(np.deg2rad(alpha)) - g*(t**2)/2\ndef dfy(tim):\n return sym.diff(fy(tim), t)\n\nfxl = sym.lambdify(t, fx(t), modules=['numpy'])\nfyl = sym.lambdify(t, fy(t), modules=['numpy'])\nfydl = sym.lambdify(t, dfy(t), modules=['numpy'])\n\ny_random = []\nfor i in time:\n y_random.append(fy(t).subs({t: i}) + np.random.randint(11)/5 -1)\ny_random = np.asarray(y_random)\n\n# aproksimacija\na, b, c = np.polyfit(time, y_random, deg=2)\n\n# ničla\nfrom scipy import optimize\nroot1 = optimize.newton(fy, 1.5)\n#print('optimize.newton', root1)\n\n# ničla odvoda\ntest = False\nif(test):\n root2 = optimize.newton(dfy, 0.58)\nelse:\n def fyd(t):\n return v0*sym.sin(np.deg2rad(alpha)) - g*t\n root2 = optimize.newton(fyd, 0.5)\n#print('optimize.newton', root2)\n\n# plot\nplt.plot(fxl(time), fyl(time), 'b', label='$funkcija\\\\ (poševni\\\\ met)$', linewidth=2)\nplt.plot(fxl(time), y_random, 'ro', label='$deviacija$')\nplt.plot(fxl(time), a*time**2 + b*time + c, 'g', label='$aproksimacija$', linewidth=3)\n#plt.plot(fxl(time), fydl(time), 'r', label='$odvod\\\\ funkcije$')\n\nplt.xlabel('$dolžina/m$')\nplt.ylabel('$višina/m$')\nplt.axhline(y=0)\n#plt.axvline(x=fx(root1)) #največja dolžina (y=0)\n#plt.axvline(x=fx(root2))\n#plt.axhline(y=fy(root2)) #največja višina\nplt.legend(loc=\"best\")\nplt.grid()\nplt.show()\n\nplt.plot(fxl(time), fyl(time), 'b', label='$funkcija\\\\ (poševni\\\\ met)$', linewidth=2)\n#plt.plot(fxl(time), y_random, 'ro', label='$deviacija$')\n#plt.plot(fxl(time), a*time**2 + b*time + c, 'g', label='$aproksimacija$', linewidth=3)\nplt.plot(fxl(time), fydl(time), 'r', label='$odvod\\\\ funkcije$')\n\nplt.xlabel('$dolžina/m$')\nplt.ylabel('$višina/m$')\nplt.axhline(y=0)\nplt.axvline(x=fx(root1), color='k') #največja dolžina (y=0)\nplt.axvline(x=fx(root2), color='k')\nplt.axhline(y=fy(root2), color='k') #največja višina\nplt.legend(loc=\"best\")\nplt.grid()\nplt.show()\n\n","sub_path":"scipy/seminar-1.1.py","file_name":"seminar-1.1.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"316857649","text":"import pygame,sys\r\npygame.init()\r\nscreen=pygame.display.set_mode([640,480])\r\nscreen.fill([255,255,255])\r\ntheball=pygame.image.load('th.png')\r\nx=50\r\ny=50\r\nscreen.blit(theball,[x,y])\r\npygame.display.flip()\r\nfor looper in range(1,100):\r\n pygame.time.delay(20)\r\n pygame.draw.rect(screen,[255,255,255],[x,y,90,90],0)\r\n x=x+5\r\n screen.blit(theball,[x,y])\r\n pygame.display.flip()\r\nrunny=True\r\nwhile runny:\r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n runny=False\r\npygame.quit()","sub_path":"16-13.py","file_name":"16-13.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"338716331","text":"#! /usr/bin/env python3\n\nclass Solution(object):\n def multiply(self, num1, num2):\n \"\"\"\n :type num1: str\n :type num2: str\n :rtype: str\n \"\"\"\n number1 = int(num1)\n number2 = int(num2)\n\n return str(number1 * number2)\n\n\n\n# Test cases\nif __name__=='__main__':\n sol = Solution()\n\n product = sol.multiply('2', '3')\n try:\n assert( product == str(6) )\n except AssertionError as err:\n print('product does not equal to 6.')\n\n product = sol.multiply('123', '456')\n try:\n assert( product == str(56088) )\n except AssertionError as err:\n print('product does not equal 56088')","sub_path":"scripts/Array/43_Multiply_string/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"130510165","text":"def main():\n n = int(input())\n a = list(map(int, input().split()))\n a.sort()\n a.reverse()\n\n ans = 0\n for i, x in enumerate(a):\n ans = max(ans, i + x + 1)\n\n print(ans + 1)\n\nmain()\n","sub_path":"kattis/plantingtrees.py","file_name":"plantingtrees.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"390809255","text":"#Yiming shao, 2018/04/12\n\n'''Extract datetimes from log entries and calculate the time\n between the first and last shutdown events'''\nfrom datetime import datetime\nimport os\nimport urllib.request\n\nSHUTDOWN_EVENT = 'Shutdown initiated'\n\n# prep: read in the logfile\nlogfile = os.path.join('/tmp', 'log')\nurllib.request.urlretrieve('http://bit.ly/2AKSIbf', logfile)\n\nwith open(logfile) as f:\n loglines = f.readlines()\n\n\n# for you to code:\n\ndef convert_to_datetime(line):\n my_ary = [None]*1000;\n i = 0;\n for character in line:\n if character.isdigit():\n my_ary[i]=character\n i+=1\n my_year = int(my_ary[0]+my_ary[1]+my_ary[2]+my_ary[3])\n my_month = int(my_ary[4]+my_ary[5])\n my_day = int(my_ary[6]+my_ary[7])\n my_hour = int(my_ary[8]+my_ary[9])\n my_min = int(my_ary[10]+my_ary[11])\n my_sec = int(my_ary[12]+my_ary[13])\n return datetime(my_year,my_month,my_day,my_hour,my_min,my_sec)\n\n\n\ndef time_between_shutdowns(loglines):\n i=0;\n for events in loglines:\n if SHUTDOWN_EVENT in events:\n if i==0:\n firstdate = convert_to_datetime(events)\n i = 1\n if i==1:\n lastdate = convert_to_datetime(events)\n return (lastdate - firstdate)\n\n\n","sub_path":"day1-3/logtimes.py","file_name":"logtimes.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"377307820","text":"import unittest\nfrom piscola.extinction_correction import calculate_ebv, redden, deredden\nimport numpy as np\n\nclass TestPiscola(unittest.TestCase):\n \n def test_mw_ebv(self):\n \n ra, dec = 0.0, 180.0\n ebv = calculate_ebv(ra, dec)\n \n self.assertTrue(ebv > 0.0)\n \n def test_reddening(self):\n \n ra, dec = 0.0, 180.0\n wave, flux = np.array([4000.0]), np.array([1000.0])\n \n for reddening_law in ['fitzpatrick99', 'ccm89']:\n redden_flux = redden(wave, flux, ra, dec, reddening_law=reddening_law)\n self.assertTrue(redden_flux[0] < flux[0])\n \n def test_dereddening(self):\n \n ra, dec = 0.0, 180.0\n wave, flux = np.array([4000.0]), np.array([1000.0])\n \n for reddening_law in ['fitzpatrick99', 'ccm89']:\n deredden_flux = deredden(wave, flux, ra, dec, reddening_law=reddening_law)\n self.assertTrue(deredden_flux[0] > flux[0])\n \nif __name__ == '__main__':\n unittest.main()","sub_path":"tests/test_extinction.py","file_name":"test_extinction.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"95531124","text":"#!/usr/bin/env python\n#\n# Copyright 2013 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Adds all the lib directories needed by bdconfig.\n\nAssumes the current directory is a sibling of library directories under a\nshared parent lib/ directory. Python should be invoked with -S to supress\nimporting the site module to avoid conflicting versions of libraries.\n\"\"\"\n\nimport os\nimport sys\n\n\ndef InitializeSysPath():\n lib_dir = os.path.dirname(os.path.dirname(os.path.dirname(\n os.path.realpath(__file__))))\n libs = [os.path.join(lib_dir, lib) for lib in os.listdir(lib_dir)]\n\n # Removes entries from libs that are already on the path.\n libs = list(set(libs) - set(sys.path))\n\n sys.path = libs + sys.path\n","sub_path":"tools/bdconfig/lib/google_hadoop/bdconfig_lib/path_initializer.py","file_name":"path_initializer.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"559689477","text":"# coding: utf-8\n\"\"\"\n 微信机器人\n\"\"\"\nimport logging\nimport sys\nimport os\n\nimport itchat\nimport models\nfrom wxbot.bot_manager import BotManager\nfrom models.robot import Robot\nfrom models.topic import Topic\n\nlogging.basicConfig(format='%(asctime)s\\t%(filename)s:%(lineno)d\\t%(levelname)5s\\t%(message)s', level=logging.DEBUG)\n\n\ndef main(bot_id, bot_wx_account, topic_id):\n # 检查bot_id和微信账号是否对应, 并存储process id 存入到数据当中\n\n logging.debug('bot starting with bot_id:{0}, bot_wx_account:{1}'.format(bot_id, bot_wx_account))\n\n robot_model = Robot.get_model_filter(Robot.Id == bot_id, Robot.WxAccount == bot_wx_account)\n if robot_model is None:\n logging.error('bot id:{0} bot_wx_account:{1} not created'.format(bot_id, bot_wx_account))\n return\n\n topic_model = Topic.get_model_by_id(topic_id)\n if topic_model is None or topic_model.UserId != robot_model.UserId:\n logging.error('bot[{0}] the topic[{1}] is not belong you'.format(bot_id, topic_id))\n return\n\n robot_model.TopicId = topic_id\n\n if robot_model.get_status() != Robot.STATUS_STOPPED:\n logging.error('robot already starting')\n return\n\n root_path = os.path.dirname(os.path.abspath(__file__))\n logging.debug(root_path)\n storage_dir = '{0}/data/{1}.pkl'.format(root_path, bot_wx_account)\n\n BotManager.set_bot(robot_model, root_path, storage_dir)\n\n robot_model.start()\n robot_model.Pid = os.getpid()\n Robot.save([robot_model])\n logging.info('wxbot {0} {1} {2} {3} starting'.format(bot_id, bot_wx_account, robot_model.Pid, robot_model.TopicId))\n\n itchat.auto_login(hotReload=True, statusStorageDir=storage_dir, qrCallback=BotManager.qr_callback,\n loginCallback=BotManager.login_callback, exitCallback=BotManager.exit_callback,\n initCallback=BotManager.init_message)\n itchat.run(blockThread=True, schedule=BotManager.schedule, exitCallback=BotManager.exit_callback)\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 4:\n logging.debug('usage: \\n {0} bot_id bot_wx_account topic_id'.format(sys.argv[0]))\n sys.exit(1)\n\n bot_id = sys.argv[1]\n bot_wx_account = sys.argv[2]\n topic_id = sys.argv[3]\n\n models._base.metadata.create_all(models.db.engine)\n\n main(bot_id, bot_wx_account, topic_id)\n","sub_path":"wx_bot.py","file_name":"wx_bot.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"645583655","text":"import torch\nfrom collections import OrderedDict\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom .random_variables import RandomVariable\nfrom .lazy import LazyVariable\n\n\nclass Module(nn.Module):\n def __init__(self):\n super(Module, self).__init__()\n self._bounds = OrderedDict()\n self.conditioning = False\n\n def forward(self, *inputs, **kwargs):\n raise NotImplementedError\n\n def __call__(self, *inputs, **kwargs):\n if self.training and not self.conditioning and not hasattr(self, 'train_inputs'):\n raise RuntimeError('Cannot run module in training mode before calling `condition`!')\n\n if self.conditioning:\n self.train_inputs = inputs\n\n for input in inputs:\n if not(isinstance(input, RandomVariable) or isinstance(input, Variable)):\n raise RuntimeError('Input must be a RandomVariable or Variable, was a %s' %\n input.__class__.__name__)\n outputs = self.forward(*inputs, **kwargs)\n if isinstance(outputs, Variable) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):\n return outputs\n\n for output in outputs:\n if not (isinstance(output, RandomVariable) or\n isinstance(output, Variable) or\n isinstance(output, LazyVariable)):\n raise RuntimeError('Output must be a RandomVariable, Variable, or LazyVariable. Was a %s' %\n input.__class__.__name__)\n if len(outputs) == 1:\n outputs = outputs[0]\n return outputs\n\n def register_parameter(self, name, param, bounds, prior=None):\n \"\"\"\n Adds a parameter to the module.\n The parameter can be accessed as an attribute using given name.\n\n name (str): name of parameter\n param (torch.nn.Parameter): parameter\n bounds (2-tuple of float or Tensor): lower and upper bounds for parameter\n prior (RandomVariable): prior for parameter (default: None)\n \"\"\"\n if '_parameters' not in self.__dict__:\n raise AttributeError(\n \"cannot assign parameter before Module.__init__() call\")\n super(Module, self).register_parameter(name, param)\n kwargs = {}\n kwargs[name] = bounds\n self.set_bounds(**kwargs)\n\n def initialize(self, **kwargs):\n \"\"\"\n Set a value for a parameter\n\n kwargs: (param_name, value) - parameter to initialize\n Value can take the form of a tensor, a float, or an int\n \"\"\"\n for name, val in kwargs.items():\n if name not in self._parameters:\n raise AttributeError('Unknown parameter %s for %s' % (name, self.__class__.__name__))\n if torch.is_tensor(val):\n self.__getattr__(name).data.copy_(val)\n elif isinstance(val, float) or isinstance(val, int):\n self.__getattr__(name).data.fill_(val)\n else:\n raise AttributeError('Type %s not valid to initialize parameter %s' % (type(val), name))\n\n # Ensure initializion is within bounds\n param = self._parameters[name]\n lower_bound, upper_bound = self._bounds[name]\n lower_mask = param.data < lower_bound\n if any(lower_mask.view(-1)):\n raise AttributeError('Parameter %s exceeds lower bound' % name)\n upper_mask = param.data > upper_bound\n if any(upper_mask.view(-1)):\n raise AttributeError('Parameter %s exceeds upper bound' % name)\n return self\n\n def set_bounds(self, **kwargs):\n \"\"\"\n Set bounds for a parameter\n\n kwargs: (param_name, value) - parameter to initialize\n Value can take the form of a tensor, a float, or an int\n \"\"\"\n for name, bounds in kwargs.items():\n if name not in self._parameters:\n raise AttributeError('Unknown parameter %s for %s' % (name, self.__class__.__name__))\n param = self._parameters[name]\n # Set bounds\n lower_bound, upper_bound = bounds\n if torch.is_tensor(lower_bound) and torch.is_tensor(upper_bound):\n if lower_bound.size() != upper_bound.size() or \\\n lower_bound.size() != param.size():\n raise AttributeError('Lower bound, upper bound, and param should have the same size')\n elif not (isinstance(lower_bound, int) or isinstance(lower_bound, float)) or \\\n not (isinstance(upper_bound, int) or isinstance(upper_bound, float)):\n raise AttributeError('Unsupported argument types for parameter %s' % name)\n\n if name not in self._bounds:\n self._bounds[name] = [None, None]\n self._bounds[name][0] = lower_bound\n self._bounds[name][1] = upper_bound\n return self\n\n def bound_for(self, name):\n \"\"\"\n Get bounds for parameter\n\n name (str): parameter name\n \"\"\"\n if '.' in name:\n module, name = name.split('.', 1)\n if module in self._modules:\n return self.__getattr__(module).bound_for(name)\n else:\n raise AttributeError('Invalid bound name %s. '\n '%s has no module %s' % (name, type(self).__name__, module))\n else:\n if name in self._parameters:\n return self._bounds[name]\n else:\n raise AttributeError('Invalid bound name %s. '\n '%s has no parameter %s' % (name, type(self).__name__, module))\n\n def named_parameter_bounds(self):\n \"\"\"\n Returns an iterator over module parameters bounds, yielding both the\n name of the parameter as well as the parameter bound itself\n \"\"\"\n for name, _ in self.named_parameters():\n yield name, self.bound_for(name)\n\n def parameter_bounds(self):\n \"\"\"\n Returns an iterator over module parameters bounds.\n This is typically passed to an optimizer.\n \"\"\"\n for name, bound in self.named_parameter_bounds():\n yield bound\n\n def _set_conditioning_flag(self, mode=True):\n self.conditioning = mode\n for module in self.children():\n module._set_conditioning_flag(mode)\n\n def condition(self, train_inputs, train_target=None, **kwargs):\n \"\"\"\n Conditions the model on data. After conditioning, the model functions\n in posterior mode rather than prior mode.\n\n train_inputs: (Variables or tuple of Variables) inputs to condition on\n train_target: (Variable) target to condition on\n \"\"\"\n if (isinstance(train_inputs, Variable) or isinstance(train_inputs, LazyVariable) or\n isinstance(train_inputs, RandomVariable)):\n train_inputs = train_inputs,\n if not (isinstance(train_target, Variable) or isinstance(train_target, LazyVariable) or\n isinstance(train_target, RandomVariable)):\n raise RuntimeError('train_target must be a Variable or LazyVariable or RandomVariable')\n\n training_mode = self.training\n self.train()\n self._set_conditioning_flag(True)\n self(*train_inputs)\n self.train(training_mode)\n self._set_conditioning_flag(False)\n\n self.train_target = train_target\n\n return self\n\n @property\n def posterior(self):\n \"\"\"\n Returns if the model is in posterior mode (are we conditioning on data?)\n \"\"\"\n return hasattr(self, 'train_inputs') and not self.training\n\n @property\n def has_grid(self):\n return hasattr(self, 'grid')\n\n @property\n def needs_grid(self):\n return False\n\n def _set_interpolation_grid(self, grid, inducing_points, grid_size, grid_bounds):\n if self.needs_grid:\n self.grid_size = grid_size\n self.grid_bounds = grid_bounds\n self.register_buffer('grid', grid)\n self.register_buffer('inducing_points', inducing_points)\n\n for module in self.children():\n module._set_interpolation_grid(grid, inducing_points, grid_size, grid_bounds)\n\n def initialize_interpolation_grid(self, grid_size, grid_bounds):\n grid = torch.zeros(len(grid_bounds), grid_size)\n for i in range(len(grid_bounds)):\n grid_diff = float(grid_bounds[i][1] - grid_bounds[i][0]) / (grid_size - 2)\n grid[i] = torch.linspace(grid_bounds[i][0] - grid_diff,\n grid_bounds[i][1] + grid_diff,\n grid_size)\n\n inducing_points = torch.zeros(int(pow(grid_size, len(grid_bounds))), len(grid_bounds))\n prev_points = None\n for i in range(len(grid_bounds)):\n for j in range(grid_size):\n inducing_points[j * grid_size ** i:(j + 1) * grid_size ** i, i].fill_(grid[i, j])\n if prev_points is not None:\n inducing_points[j * grid_size ** i:(j + 1) * grid_size ** i, :i].copy_(prev_points)\n prev_points = inducing_points[:grid_size ** (i + 1), :(i + 1)]\n\n self._set_interpolation_grid(grid, inducing_points, grid_size, grid_bounds)\n return self\n\n def __getattr__(self, name):\n if '_parameters' in self.__dict__:\n _parameters = self.__dict__['_parameters']\n if name in _parameters:\n param = _parameters[name]\n # Ensure parameter is within bounds\n lower_bound, upper_bound = self._bounds[name]\n lower_mask = param.data < lower_bound\n if any(lower_mask.view(-1)):\n if torch.is_tensor(lower_bound):\n param.data.masked_scatter_(lower_mask, lower_bound[lower_mask])\n else:\n param.data.masked_fill_(lower_mask, lower_bound)\n upper_mask = param.data > upper_bound\n if any(upper_mask.view(-1)):\n if torch.is_tensor(upper_bound):\n param.data.masked_scatter_(upper_mask, upper_bound[upper_mask])\n else:\n param.data.masked_fill_(upper_mask, upper_bound)\n return param\n if '_buffers' in self.__dict__:\n _buffers = self.__dict__['_buffers']\n if name in _buffers:\n return _buffers[name]\n if '_modules' in self.__dict__:\n modules = self.__dict__['_modules']\n if name in modules:\n return modules[name]\n raise AttributeError(\"'{}' object has no attribute '{}'\".format(\n type(self).__name__, name))\n\n def __setattr__(self, name, value):\n if isinstance(value, nn.Parameter):\n raise RuntimeError(\"Please assign torch.nn.Parameters using\"\n \"gpytorch.module.register_parameters()\")\n else:\n super(Module, self).__setattr__(name, value)\n","sub_path":"gpytorch/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":11127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"584233838","text":"\"\"\"Departures over trailing days\"\"\"\nimport datetime\nfrom collections import OrderedDict\n\nfrom pandas.io.sql import read_sql\nfrom pyiem.network import Table as NetworkTable\nfrom pyiem.util import get_autoplot_context, get_dbconn\n\nPDICT = OrderedDict([\n ('precip', 'Precipitation [inch]'),\n ('avgt', 'Daily Average Temperature [F]'),\n ('high', 'Daily High Temperature [F]'),\n ('low', 'Daily Low Temperature [F]')])\n\n\ndef get_description():\n \"\"\" Return a dict describing how to call this plotter \"\"\"\n desc = dict()\n desc['data'] = True\n desc['cache'] = 86400\n desc['description'] = \"\"\"This plot presents the trailing X number of days\n temperature or precipitation departure from long term average.\n \"\"\"\n today = datetime.date.today()\n sts = today - datetime.timedelta(days=720)\n desc['arguments'] = [\n dict(type='station', name='station', default='IA0200',\n label='Select Station:', network='IACLIMATE'),\n dict(type='int', name='p1', default=31, label='First Period of Days'),\n dict(type='int', name='p2', default=91,\n label='Second Period of Days'),\n dict(type='int', name='p3', default=365,\n label='Third Period of Days'),\n dict(type='date', name='sdate', default=sts.strftime(\"%Y/%m/%d\"),\n min='1893/01/01',\n label='Start Date of Plot'),\n dict(type='date', name='edate', default=today.strftime(\"%Y/%m/%d\"),\n min='1893/01/01',\n label='End Date of Plot'),\n dict(type='select', name='pvar', default='precip', options=PDICT,\n label='Which variable to plot?'),\n ]\n return desc\n\n\ndef plotter(fdict):\n \"\"\" Go \"\"\"\n import matplotlib\n matplotlib.use('agg')\n import matplotlib.pyplot as plt\n import matplotlib.dates as mdates\n ctx = get_autoplot_context(fdict, get_description())\n station = ctx['station']\n network = ctx['network']\n nt = NetworkTable(network)\n p1 = ctx['p1']\n p2 = ctx['p2']\n p3 = ctx['p3']\n pvar = ctx['pvar']\n sts = ctx['sdate']\n ets = ctx['edate']\n bts = sts - datetime.timedelta(days=max([p1, p2, p3]))\n\n pgconn = get_dbconn('coop')\n\n table = \"alldata_%s\" % (station[:2], )\n df = read_sql(\"\"\"\n WITH obs as (\n SELECT day,\n high - avg(high) OVER (PARTITION by sday) as high_diff,\n low - avg(low) OVER (PARTITION by sday) as low_diff,\n ((high+low)/2.) -\n avg((high+low)/2.) OVER (PARTITION by sday) as avgt_diff,\n precip - avg(precip) OVER (PARTITION by sday) as precip_diff\n from \"\"\" + table + \"\"\"\n WHERE station = %s ORDER by day ASC),\n lags as (\n SELECT day,\n avg(high_diff) OVER (ORDER by day ASC ROWS %s PRECEDING) as p1_high_diff,\n avg(high_diff) OVER (ORDER by day ASC ROWS %s PRECEDING) as p2_high_diff,\n avg(high_diff) OVER (ORDER by day ASC ROWS %s PRECEDING) as p3_high_diff,\n avg(low_diff) OVER (ORDER by day ASC ROWS %s PRECEDING) as p1_low_diff,\n avg(low_diff) OVER (ORDER by day ASC ROWS %s PRECEDING) as p2_low_diff,\n avg(low_diff) OVER (ORDER by day ASC ROWS %s PRECEDING) as p3_low_diff,\n avg(avgt_diff) OVER (ORDER by day ASC ROWS %s PRECEDING) as p1_avgt_diff,\n avg(avgt_diff) OVER (ORDER by day ASC ROWS %s PRECEDING) as p2_avgt_diff,\n avg(avgt_diff) OVER (ORDER by day ASC ROWS %s PRECEDING) as p3_avgt_diff,\n sum(precip_diff)\n OVER (ORDER by day ASC ROWS %s PRECEDING) as p1_precip_diff,\n sum(precip_diff)\n OVER (ORDER by day ASC ROWS %s PRECEDING) as p2_precip_diff,\n sum(precip_diff)\n OVER (ORDER by day ASC ROWS %s PRECEDING) as p3_precip_diff\n from obs WHERE day >= %s and day <= %s)\n\n SELECT * from lags where day >= %s and day <= %s ORDER by day ASC\n \"\"\", pgconn, params=(station, p1, p2, p3, p1, p2, p3, p1, p2, p3,\n p1, p2, p3, bts, ets, sts, ets), index_col='day')\n\n (fig, ax) = plt.subplots(1, 1, figsize=(8, 6))\n\n ax.plot(df.index.values, df['p1_'+pvar+'_diff'], lw=2,\n label='%s Day' % (p1, ))\n ax.plot(df.index.values, df['p2_'+pvar+'_diff'], lw=2,\n label='%s Day' % (p2, ))\n ax.plot(df.index.values, df['p3_'+pvar+'_diff'], lw=2,\n label='%s Day' % (p3, ))\n ax.set_title((\"[%s] %s\\nTrailing %s, %s, %s Day Departures\"\n ) % (station, nt.sts[station]['name'], p1, p2, p3))\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%b\\n%Y'))\n ax.set_ylabel(PDICT.get(pvar))\n ax.grid(True)\n ax.legend(ncol=3, fontsize=12, loc='best')\n ax.text(1, -0.12, \"%s to %s\" % (sts.strftime(\"%-d %b %Y\"),\n ets.strftime(\"%-d %b %Y\")), va='bottom',\n ha='right', fontsize=12, transform=ax.transAxes)\n\n return fig, df\n\n\nif __name__ == '__main__':\n plotter(dict())\n","sub_path":"htdocs/plotting/auto/scripts100/p142.py","file_name":"p142.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"187436556","text":"from django.shortcuts import render, redirect\nfrom django.conf import settings\nfrom django.utils.http import is_safe_url\nimport logging\nfrom importlib import import_module\n\nlogger = logging.getLogger(__name__)\n\n\ndef index(request, template=None):\n \"\"\"Render the Identity home page.\"\"\"\n conf = {'urls': settings.CORE_URLS,\n 'session_timeout': settings.SESSION_TIMEOUT_DEFAULT_SECONDS}\n\n return render(request, 'idbase/index.html', conf)\n\n\ndef login(request):\n \"\"\"This view gets SSO-protected and redirects to next.\"\"\"\n if request.user.is_authenticated():\n logger.info('User %s logged in' % (request.user.username))\n if not request.user.username.endswith('@washington.edu'):\n # Non-uw possibility when using a federated idp for recovery.\n return _login_error(request)\n if (request.user.get_full_name() is None and\n hasattr(settings, 'GET_FULL_NAME_FUNCTION')):\n mod, func = settings.GET_FULL_NAME_FUNCTION.rsplit('.', 1)\n module = import_module(mod)\n full_name_function = getattr(module, func)\n request.user.set_full_name(full_name_function(request))\n return _safe_redirect(request)\n else:\n # This can happen if a user gets past weblogin but comes in with\n # no attributes, which indicates a problem upstream.\n return _login_error(request)\n\n\ndef logout(request):\n response = _safe_redirect(request)\n logger.debug('Logging out {}'.format(request.user.username))\n for cookie in request.COOKIES.keys():\n response.delete_cookie(cookie)\n return response\n\n\ndef _safe_redirect(request):\n \"\"\"Return a redirect response to the 'next' parameter if safe.\"\"\"\n redirect_target = request.GET.get('next', '/')\n return redirect(redirect_target if is_safe_url(redirect_target) else '/')\n\n\ndef _login_error(request):\n context = {}\n if not request.user.is_authenticated():\n logger.error('No REMOTE_USER variable set')\n else:\n logger.error('incorrect idp!!, REMOTE_USER={}'.format(\n request.user.username))\n context['non_uw_user'] = True\n\n # end of the road.\n request.session.flush()\n return render(request, 'idbase/login-error.html', status=401,\n context=context)\n","sub_path":"idbase/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"2708964","text":"import numpy as np\nimport random as rnd\nL=5 #size of atrium\nd = 0.05 #probability of dysfunction\nv = 0.2 # probability of transverse connection down\n\ndef CreateAtrium(L,v,d):\n \"\"\"Creats the atrium. Atrium[i,j] gives a site (row,column). \n Atrium[i,j[0] gives phase. \n Atrium[i,j][1] dysfunctionality (False = dysfunctional), \n Atrium[i,j[2] gives neightbouring sites\"\"\" \n Atrium = np.ndarray((L,L),dtype = list)\n for i in range(L):\n for j in range(L):\n y = rnd.uniform(0,1)\n if d > y:\n Atrium[i,j] = [4, False, []]\n if d <= y:\n Atrium[i,j] = [4, True,[]]\n for i in range(L):\n for j in range(L):\n if j == 0:\n Atrium[i,j][2].extend([(i,j+1)])\n if j == L-1:\n Atrium[i,j][2].extend([(i,j-1)])\n if j>0 and j>= 8\n ans += 1\n print(ans)\n\n\ndef work2(n):\n a = [0] * (n + 1)\n for _ in range(n):\n num, name = input().split(' ')\n num = int(num)\n a[num] = name\n m = int(input())\n for _ in range(m):\n q = int(input())\n ans = 0\n for j in range(q):\n x, y = input().split(' ')\n x = int(x)\n y = int(y)\n for k in range(min(len(a[x]), len(a[y]))):\n if a[x][k] != a[y][k]:\n break\n ans += k\n print(ans)\n\n\n# -- author: lijw --\nif __name__ == '__main__':\n n = int(input())\n work2(n)\n","sub_path":"43. Magicstar/043.py","file_name":"043.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"465564507","text":"from __future__ import print_function\n\ndef print_n_log(log, string):\n print(string, end=\"\")\n if log is not None:\n log.write(string)\n\n\ndef refresh_log(log, model_path):\n if log is not None:\n log.close()\n log = open(model_path + '/log.txt', \"a\")\n return log","sub_path":"utilities/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"189319972","text":"from openslide import OpenSlide\nimport os\nfrom typeguard import typechecked\nfrom typing import Optional\nfrom syntax._utils import misc\n\n@typechecked\nclass Slide(OpenSlide):\n \"\"\"The summary line for a class docstring should fit on one line.\n\n If the class has public attributes, they may be documented here\n in an ``Attributes`` section and follow the same formatting as a\n function's ``Args`` section. Alternatively, attributes may be documented\n inline with the attribute's declaration (see __init__ method below).\n\n Properties created with the ``@property`` decorator should be documented\n in the property's getter method.\n\n Attributes:\n attr1 (str): Description of `attr1`.\n attr2 (:obj:`int`, optional): Description of `attr2`.\n\n \"\"\"\n\n def __init__(self,\n slide_path: str,\n level0: Optional[int] = None,\n verbose: Optional[bool] = False):\n \"\"\"\n\n Args:\n slide_path: Path to the WSI readable by openslide.\n level0: The 'magnification' at level 0. If 'infer' we attempt to get from metadata.\n verbose:\n \"\"\"\n super(Slide, self).__init__(slide_path)\n self.verbose = verbose\n\n # Get slide id for reference\n self.ID = os.path.splitext(os.path.basename(slide_path))[0]\n\n # Add level0 magnification.\n if level0 == None:\n try:\n self.level0 = float(self.properties['openslide.objective-power'])\n if self.verbose:\n print('Level 0 found @ {}X'.format(self.level0))\n except:\n raise Exception('Slide does not have property objective-power.')\n else:\n self.level0 = float(level0)\n\n # Compute level magnifications.\n self._magnification_list = [self.level0 / downsample for downsample in self.level_downsamples]\n\n def get_tile(self, w: int, h: int, magnification: int, size: int):\n \"\"\"\n Get a tile.\n If required magnification not available will use a higher magnification and resize.\n Args:\n w: Width coordinate in level 0 frame.\n h: Height coordinate in level 0 frame.\n magnification: Desired magnification.\n size: Desired tile size (square tile).\n\n Returns:\n\n \"\"\"\n assert self.level0 >= magnification, 'Magnification not available.'\n\n higher_mags = [self.magnifications[i] >= magnification for i in range(len(self.magnifications))]\n extraction_level = misc.index_last_non_zero(higher_mags)\n extraction_mag = self.magnifications[extraction_level]\n extraction_size = int(size * extraction_mag / magnification)\n\n # Make sure it's RGB (not e.g. RGBA).\n tile = self.read_region((w, h), extraction_level, (extraction_size, extraction_size)).convert('RGB')\n if extraction_size != size:\n tile.thumbnail((size, size)) # Resize inplace.\n return tile\n\n @property\n def magnifications(self):\n return self._magnification_list","sub_path":"syntax/slide/slide.py","file_name":"slide.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"107799803","text":"import datetime\n\n\nclass Song():\n MAX_RATING = 5\n MIN_RATING = 1\n\n def __init__(self, title, artist, album, rate, length, bitrate):\n self.title = title\n self.artist = artist\n self.album = album\n self.rate = rate\n self.length = length\n self.bitrate = bitrate\n\n def rate_song(self, rate):\n if rate < self.MIN_RATING or rate > self.MAX_RATING:\n error_message = \"rate must be between %s and %s\" % (\n self.MIN_RATING, self.MAX_RATING)\n raise ValueError(error_message)\n self.rate = rate\n\n def str(self):\n return str(self)\n\n def __str__(self):\n time = str(datetime.timedelta(seconds=self.length))\n return \"{} {} - {}\".format(self.artist, self.title, time)\n","sub_path":"week1-2nd/Mplayer/Song.py","file_name":"Song.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"228774812","text":"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport copy\nimport itertools\nfrom functools import partial\nfrom logging import FileHandler\nfrom typing import Any, Dict, List, Optional\n\nfrom lisa import schema\nfrom lisa.action import Action\nfrom lisa.testsuite import TestResult, TestStatus\nfrom lisa.util import BaseClassMixin, constants, run_in_threads\nfrom lisa.util.logger import create_file_handler, get_logger, remove_handler\nfrom lisa.util.subclasses import Factory\n\n\ndef parse_testcase_filters(raw_filters: List[Any]) -> List[schema.BaseTestCaseFilter]:\n if raw_filters:\n filters: List[schema.BaseTestCaseFilter] = []\n factory = Factory[schema.BaseTestCaseFilter](schema.BaseTestCaseFilter)\n for raw_filter in raw_filters:\n if constants.TYPE not in raw_filter:\n raw_filter[constants.TYPE] = constants.TESTCASE_TYPE_LISA\n filter = factory.create_runbook(raw_filter)\n filters.append(filter)\n else:\n filters = [schema.TestCase(name=\"test\", criteria=schema.Criteria(area=\"demo\"))]\n return filters\n\n\nclass BaseRunner(BaseClassMixin):\n \"\"\"\n Base runner of other runners.\n \"\"\"\n\n def __init__(self, runbook: schema.Runbook) -> None:\n super().__init__()\n self._runbook = runbook\n\n self._log = get_logger(self.type_name())\n self._log_handler: Optional[FileHandler] = None\n self.canceled = False\n\n def run(self, id_: str) -> List[TestResult]:\n # do not put this logic to __init__, since the mkdir takes time.\n if self.type_name() == constants.TESTCASE_TYPE_LISA:\n # default lisa runner doesn't need separated handler.\n self._working_folder = constants.RUN_LOCAL_PATH\n else:\n # create separated folder and log for each runner.\n runner_path_name = f\"{self.type_name()}_runner\"\n self._working_folder = constants.RUN_LOCAL_PATH / runner_path_name\n self._log_file_name = str(self._working_folder / f\"{runner_path_name}.log\")\n self._working_folder.mkdir(parents=True, exist_ok=True)\n self._log_handler = create_file_handler(self._log_file_name, self._log)\n return self._run(id_)\n\n def _run(self, id_: str) -> List[TestResult]:\n raise NotImplementedError()\n\n def close(self) -> None:\n if self._log_handler:\n remove_handler(self._log_handler)\n\n\nclass RootRunner(Action):\n \"\"\"\n The entry runner, which starts other runners.\n \"\"\"\n\n def __init__(self, runbook: schema.Runbook) -> None:\n super().__init__()\n self.exit_code: int = 0\n\n self._runbook = runbook\n self._log = get_logger(\"RootRunner\")\n self._runners: List[BaseRunner] = []\n\n async def start(self) -> None:\n await super().start()\n\n self._initialize_runners()\n raw_results = self._start_run()\n\n test_results = list(itertools.chain(*raw_results))\n self._output_results(test_results)\n\n # pass failed count to exit code\n self.exit_code = sum(1 for x in test_results if x.status == TestStatus.FAILED)\n\n async def stop(self) -> None:\n await super().stop()\n # TODO: to be implemented\n\n async def close(self) -> None:\n await super().close()\n\n def _completed_callback(self, future: Any) -> None:\n \"\"\"\n exit sub tests, once received cancellation message from executor.\n \"\"\"\n # future is False, if it's called explicitly by run_in_threads.\n if not future or future.cancelled() or future.exception():\n self._log.debug(f\"set cancel signal on future: {future}\")\n for runner in self._runners:\n runner.canceled = True\n\n def _initialize_runners(self) -> None:\n # group filters by runner type\n runner_filters: Dict[str, List[schema.BaseTestCaseFilter]] = {}\n for raw_filter in self._runbook.testcase_raw:\n # by default run all filtered cases unless 'enable' is specified as false\n filter = schema.BaseTestCaseFilter.schema().load(raw_filter) # type:ignore\n if filter.enable:\n raw_filters: List[schema.BaseTestCaseFilter] = runner_filters.get(\n filter.type, []\n )\n if not raw_filters:\n runner_filters[filter.type] = raw_filters\n raw_filters.append(raw_filter)\n else:\n self._log.debug(f\"Skip disabled filter: {raw_filter}.\")\n\n # initialize runners\n factory = Factory[BaseRunner](BaseRunner)\n for runner_name, raw_filters in runner_filters.items():\n self._log.debug(\n f\"create runner {runner_name} with {len(raw_filters)} filter(s).\"\n )\n\n runbook = copy.copy(self._runbook)\n # keep filters to current runner's only.\n runbook.testcase = parse_testcase_filters(raw_filters)\n runner = factory.create_by_type_name(type_name=runner_name, runbook=runbook)\n\n self._runners.append(runner)\n\n def _output_results(self, test_results: List[TestResult]) -> None:\n self._log.info(\"________________________________________\")\n result_count_dict: Dict[TestStatus, int] = dict()\n for test_result in test_results:\n self._log.info(\n f\"{test_result.runtime_data.metadata.full_name:>50}: \"\n f\"{test_result.status.name:<8} {test_result.message}\"\n )\n result_count = result_count_dict.get(test_result.status, 0)\n result_count += 1\n result_count_dict[test_result.status] = result_count\n\n self._log.info(\"test result summary\")\n self._log.info(f\" TOTAL : {len(test_results)}\")\n for key in TestStatus:\n count = result_count_dict.get(key, 0)\n if key == TestStatus.ATTEMPTED and count == 0:\n # attempted is confusing, if user don't know it.\n # so hide it, if there is no attempted cases.\n continue\n self._log.info(f\" {key.name:<9}: {count}\")\n\n def _start_run(self) -> List[List[TestResult]]:\n raw_results: List[List[TestResult]] = []\n # in case all of runners are disabled\n if self._runners:\n try:\n raw_results = run_in_threads(\n [\n partial(runner.run, id_=runner.type_name())\n for runner in self._runners\n ],\n completed_callback=self._completed_callback,\n log=self._log,\n )\n finally:\n for runner in self._runners:\n runner.close()\n return raw_results\n","sub_path":"lisa/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":6803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"580589526","text":"\"\"\"\nCopyright (c) 2018 SPARKL Limited. All Rights Reserved.\nAuthor Miklos Duma.\n\nTest cases for Lib SMS SPARKL mix in examples repo.\n\"\"\"\nimport pytest\n\n# IMPORT_DIR is the default folder in the SPARKL user tree\n# your test files are imported to for testing\n# setup method in conftest creates it for the tests and then deletes it\nfrom tests.conftest import (IMPORT_DIR, OPERATION, INPUT_FIELDS,\n EXP_RESPONSE, run_tests, read_from_config)\n\n# Environment variables used by test.\nTWILIO_SID = read_from_config('twilio_sid')\nTWILIO_PWD = read_from_config('twilio_pass')\nTWILIO_NUMBER = read_from_config('twilio_number')\nTEST_NUMBER = read_from_config('test_number')\n\n# List of file paths to one or more SPARKL mixes your test needs.\n# The setup method uses the path(s) to import your configuration(s)\nFILE_PATHS = ['Library/lib_sms/sms_lib.xml']\n\n# The path to the root folder of your mix in the SPARKL user tree.\nUSER_TREE_PATH = '{}/lib.sms'.format(IMPORT_DIR)\n\n# Message sent in SMS by one of the tests.\nTEST_MESSAGE = 'Lib sms test succesful!'\n\n# The path to all tested operations in the user tree.\nSOLICIT_OP = '{}/Mix/SendMessageSecure'.format(USER_TREE_PATH)\nSET_STATE_OP = '{}/Mix/Impl/SetAPIKeys'.format(USER_TREE_PATH)\n\n# Responses/replies sent by SPARKL.\nOK_RESP = 'Ok'\n\n# Input fields used by the operations.\nACC_SID_FLD = 'acc_sid'\nAUTH_FLD = 'auth_token'\nFROM_FLD = 'from'\nMSG_FLD = 'message'\nTO_FLD = 'to'\n\n##########################################################################\n# Test data.\n#\n# Each set of data is used to call the parametrised test once.\n# A set comprises:\n# - OPERATION:\n# The name of the operation to call\n# - EXP_RESPONSE:\n# The expected response/reply\n# - INPUT_FIELDS (optional):\n# The input fields and their values, if any\n# - OUTPUT_FIELDS (optional):\n# One or more output fields with their expected value\n# - CHECK_FUN (optional):\n# A function that makes extra assertions on the output values\n# - STOP_OR_NOT (optional):\n# A flag to indicate all running services must be stopped\n# before the test is run\n##########################################################################\nTEST_DATA = [\n\n # Tests SetAPIKeys operation. Expects Ok reply.\n {\n OPERATION: SET_STATE_OP,\n INPUT_FIELDS: [(ACC_SID_FLD, TWILIO_SID),\n (AUTH_FLD, TWILIO_PWD),\n (FROM_FLD, TWILIO_NUMBER)],\n EXP_RESPONSE: OK_RESP},\n\n # Tests SendMessageSecure solicit. Expects Ok response.\n {\n OPERATION: SOLICIT_OP,\n INPUT_FIELDS: [(ACC_SID_FLD, TWILIO_SID),\n (AUTH_FLD, TWILIO_PWD),\n (FROM_FLD, TWILIO_NUMBER),\n (MSG_FLD, TEST_MESSAGE),\n (TO_FLD, TEST_NUMBER)],\n EXP_RESPONSE: OK_RESP}]\n\n\n@pytest.mark.parametrize('test_data', TEST_DATA)\ndef test_lib_sms(test_data, setup_method):\n \"\"\"\n Calls each set of data in TEST_DATA. The function also uses:\n - setup_method:\n A basic setup method that imports the needed configuration(s)\n and yields the SPARKL alias used in the session\n \"\"\"\n alias = setup_method\n run_tests(alias, **test_data)\n","sub_path":"tests/with_auth/test_lib_sms.py","file_name":"test_lib_sms.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"272917576","text":"from scipy import optimize # if you remove this, everything at Utah breaks. seriously.\nimport numpy as np\nimport torch\nfrom astra import log\nfrom astra.utils import flatten, expand_path, dict_to_list, logarithmic_tqdm\nfrom astra.base import TaskInstance, Parameter\nfrom astra.database.astradb import database, ApogeeNetOutput\nfrom astropy.nddata import StdDevUncertainty\nfrom astropy import units as u\n\nfrom astra.tools.spectrum import SpectrumList\nfrom astra.tools.spectrum.utils import spectrum_overlaps\nfrom astra.contrib.apogeenet.model import Model\nfrom astra.contrib.apogeenet.utils import get_metadata, create_bitmask\n\n\nclass StellarParameters(TaskInstance):\n\n \"\"\"\n Estimate stellar parameters for APOGEE spectra given a pre-trained neural network.\n\n :param model_path:\n A model path.\n\n \"\"\"\n\n model_path = Parameter(\n default=\"$MWM_ASTRA/component_data/APOGEENet/model.pt\", bundled=True\n )\n num_uncertainty_draws = Parameter(default=100)\n large_error = Parameter(default=1e10)\n\n data_slice = Parameter(default=[0, 1]) # only affects apStar data products\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def execute(self):\n \"\"\"Execute the task.\"\"\"\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n log.info(f\"Executing on {device}\")\n\n model = Model(expand_path(self.model_path), device)\n\n all_results = []\n # Since this could be a long process, we update the progress bar only ~100 times on ~pseudo\n # uniform steps in log iterations so that we don't fill the log file with crap.\n with logarithmic_tqdm(total=len(self.context[\"tasks\"]), miniters=100) as pb:\n for i, (task, data_products, parameters) in enumerate(self.iterable()):\n\n # Input data products could be apStar, mwmVisit, or mwmStar files.\n # If they are mwmVisit/mwmStar files then those could include BOSS spectra.\n # Since we don't know yet, just load as SpectrumList.\n results = dict(snr=[], source_id=[], parent_data_product_id=[])\n flux, e_flux, meta = ([], [], [])\n for spectrum in SpectrumList.read(\n data_products[0].path, data_slice=parameters.get(\"data_slice\", None)\n ):\n # Check for APOGEE spectral range\n if not spectrum_overlaps(spectrum, 16_500 * u.Angstrom):\n continue\n\n N, P = np.atleast_2d(spectrum.flux).shape\n flux.append(np.nan_to_num(spectrum.flux.value).astype(np.float32))\n e_flux.append(\n np.nan_to_num(\n spectrum.uncertainty.represent_as(StdDevUncertainty).array\n ).astype(np.float32)\n )\n meta_dict, metadata_norm = get_metadata(spectrum)\n meta.append(np.tile(metadata_norm, N).reshape((N, -1)))\n results[\"snr\"].extend(spectrum.meta[\"SNR\"])\n results[\"source_id\"].extend([spectrum.meta.get(\"CAT_ID\", None)] * N)\n parent_data_product_ids = spectrum.meta.get(\"DATA_PRODUCT_ID\", None)\n if parent_data_product_ids is None or len(parent_data_product_ids) == 0:\n parent_data_product_ids = [data_products[0].id] * N\n results[\"parent_data_product_id\"].extend(parent_data_product_ids)\n\n assert len(results[\"snr\"]) == len(results[\"source_id\"])\n assert len(results[\"snr\"]) == len(results[\"parent_data_product_id\"])\n\n if len(flux) == 0:\n log.warning(\n f\"No infrared spectra found in {data_products[0]}: {data_products[0].path} -- skipping!\"\n )\n continue\n\n flux, e_flux, meta = [\n np.vstack(ea) for ea in (flux, e_flux, meta)\n ]\n median_error = 5 * np.median(e_flux, axis=1)\n for j, value in enumerate(median_error):\n bad_pixel = (e_flux[j] == parameters[\"large_error\"]) | (\n e_flux[j] >= value\n )\n e_flux[j][bad_pixel] = value\n\n N, P = flux.shape\n flux = flux.reshape((N, 1, P))\n e_flux = e_flux.reshape((N, 1, P))\n\n if N == 1:\n flux = torch.from_numpy(flux).to(device)\n e_flux = torch.from_numpy(e_flux).to(device)\n meta = torch.from_numpy(meta).to(device)\n\n with torch.set_grad_enabled(False):\n predictions = model.predict_spectra(flux, meta)\n if device != \"cpu\":\n predictions = predictions.cpu().data.numpy()\n\n # Replace infinites with non-finite.\n predictions[~np.isfinite(predictions)] = np.nan\n\n inputs = (\n torch.randn(\n (parameters[\"num_uncertainty_draws\"], N, 1, P), device=device\n )\n * e_flux\n + flux\n )\n inputs = inputs.reshape((parameters[\"num_uncertainty_draws\"] * N, 1, P))\n\n meta_draws = meta.repeat(parameters[\"num_uncertainty_draws\"], 1)\n with torch.set_grad_enabled(False):\n draws = model.predict_spectra(inputs, meta_draws)\n if device != \"cpu\":\n draws = draws.cpu().data.numpy()\n\n draws = draws.reshape((parameters[\"num_uncertainty_draws\"], N, -1))\n\n\n else:\n predictions = []\n with torch.set_grad_enabled(False):\n for j in range(N):\n prediction = model.predict_spectra(\n torch.from_numpy(flux[[j]]).to(device),\n torch.from_numpy(meta[[j]]).to(device),\n )\n if device != \"cpu\":\n prediction = prediction.cpu().data.numpy()\n predictions.append(prediction)\n predictions = np.array(predictions).reshape((N, -1))\n predictions[~np.isfinite(predictions)] = np.nan\n\n draws = []\n with torch.set_grad_enabled(False):\n for j in range(N):\n inputs = (\n torch.randn(\n (parameters[\"num_uncertainty_draws\"], 1, P), device=device\n )\n * torch.from_numpy(e_flux[[j]]).to(device)\n + torch.from_numpy(flux[[j]]).to(device)\n )\n meta_draws = torch.from_numpy(meta[[j]]).to(device).repeat(\n parameters[\"num_uncertainty_draws\"], 1\n )\n draw = model.predict_spectra(inputs, meta_draws)\n if device != \"cpu\":\n draw = draw.cpu().data.numpy()\n draws.append(draw)\n\n draws = np.array(draws).reshape((parameters[\"num_uncertainty_draws\"], N, -1))\n\n # un-log10-ify the draws before calculating summary statistics\n predictions[:, 1] = 10 ** predictions[:, 1]\n draws[:, :, 1] = 10 ** draws[:, :, 1]\n\n median_draw_predictions = np.nanmedian(draws, axis=0)\n std_draw_predictions = np.nanstd(draws, axis=0)\n\n logg_median, teff_median, fe_h_median = median_draw_predictions.T\n logg_std, teff_std, fe_h_std = std_draw_predictions.T\n\n logg, teff, fe_h = predictions.T\n\n bitmask_flag = create_bitmask(\n predictions,\n meta_dict,\n median_draw_predictions=median_draw_predictions,\n std_draw_predictions=std_draw_predictions,\n )\n\n results.update({\n \"teff\": teff,\n \"logg\": logg,\n \"fe_h\": fe_h,\n \"e_teff\": teff_std,\n \"e_logg\": logg_std,\n \"e_fe_h\": fe_h_std,\n \"teff_sample_median\": teff_median,\n \"logg_sample_median\": logg_median,\n \"fe_h_sample_median\": fe_h_median,\n \"bitmask_flag\": bitmask_flag,\n })\n\n results_list = dict_to_list(results)\n\n # Create or update rows.\n with database.atomic():\n task.create_or_update_outputs(ApogeeNetOutput, results_list)\n\n pb.update()\n all_results.append(results_list)\n\n return all_results\n","sub_path":"python/astra/contrib/apogeenet/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":9222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"393613111","text":"\n\n#calss header\nclass _RANDOM():\n\tdef __init__(self,): \n\t\tself.name = \"RANDOM\"\n\t\tself.definitions = [u'happening, done, or chosen by chance rather than according to a plan: ', u'strange or unusual: ', u'unknown and unexpected in a particular situation: ', u'by chance, or without being chosen intentionally: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_random.py","file_name":"_random.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"75025200","text":"#!/usr/bin/env python3\n# Require Python 3.8\n\nfrom urllib.request import urlopen\nfrom urllib.error import URLError, HTTPError\n\nimport xml.etree.ElementTree as ET\n\nfrom datadog_checks.base.checks import AgentCheck\n\n\ndef get_latest_version(versions):\n '''\n get_latest_version takes a list of Jenkins versions\n then return the latest one.\n It sorts separatelly each Jenkins versions components\n which follow the pattern X.Y.Z where:\n - X.Y is a weekly version\n - X.Y.Z is a stable version\n So we retrieve the latest X component version.\n Then we look for the latest valid Y version considering X.\n Finally we look for the latest Z version considering X.Y\n '''\n\n results = []\n # for i in range(3) limit the sort to the first 3 components\n for i in range(3):\n solutions = []\n for version in versions:\n values = version.split('.')\n\n str_results = [str(x) for x in results]\n\n if (len(results) < len(values) and\n str_results[0:len(results)] == values[0:len(results)]):\n try:\n if int(values[i]) not in solutions:\n solutions.append(int(values[i]))\n except Exception:\n print(\"Ignoring version {}\".format(values[i]))\n\n if not solutions:\n break\n\n results.append(int(sorted(solutions)[-1]))\n\n str_results = [str(x) for x in results]\n return '.'.join(str_results)\n\n\ndef is_exist(distribution, url):\n '''\n is_exist test if a specific artifact exist on the destination\n '''\n try:\n return_code = urlopen(url).code\n if return_code != 200:\n print('rc should be 200 but is {}'.format(return_code))\n return 1\n except HTTPError as err:\n if err.code == 404:\n print('{} package not found on {}'.format(distribution, url))\n else:\n print('Something went wrong with url {} for {} package: {}'\n .format(url, distribution, err))\n return 0\n\n\nclass PackageCanBeDownloaded(AgentCheck):\n '''\n PackageCanBeDownloaded tests that the latest jenkins packages\n can be downloaded\n '''\n metadataUrl = 'https://repo.jenkins-ci.org/releases/org/jenkins-ci/main/jenkins-war/maven-metadata.xml'\n\n def get_latest_weekly_version(self):\n '''\n getLatestWeeklyVersion retrieves the latest weekly version\n '''\n try:\n url = self.metadataUrl\n tree = ET.parse(urlopen(url))\n root = tree.getroot()\n return root.find('versioning/latest').text\n\n except URLError as err:\n self.warning(\"Something went wrong while retrieving weekly version: {}\"\n .format(err))\n\n def get_latest_stable_version(self):\n '''\n getLatestStableVersion retrieves the latest stable version\n '''\n try:\n stable_version = []\n url = self.metadataUrl\n tree = ET.parse(urlopen(url))\n root = tree.getroot()\n\n versions = root.findall('versioning/versions/version')\n\n for version in versions:\n if len(version.text.split('.')) == 3:\n stable_version.append(version.text)\n\n return get_latest_version(stable_version)\n\n except URLError as err:\n self.warning(\"Something went wrong while retrieving stable version: {}\"\n .format(err))\n\n def check(self, instance):\n '''\n check defines the datadog custom check\n '''\n\n weekly_version = self.get_latest_weekly_version()\n stable_version = self.get_latest_stable_version()\n\n hostname = \"get.jenkins.io\"\n\n endpoints = {\n 'debian': 'https://{}/debian/jenkins_{}_all.deb'\n .format(hostname, weekly_version),\n 'redhat': 'https://{}/redhat/jenkins-{}-1.1.noarch.rpm'\n .format(hostname, weekly_version),\n 'opensuse': 'https://{}/opensuse/jenkins-{}-1.2.noarch.rpm'\n .format(hostname, weekly_version),\n 'windows': 'https://{}/windows/{}/jenkins.msi'\n .format(hostname, weekly_version),\n 'war': 'https://{}/war/{}/jenkins.war'\n .format(hostname, weekly_version),\n 'debian-stable': 'https://{}/debian-stable/jenkins_{}_all.deb'\n .format(hostname, stable_version),\n 'redhat-stable': 'https://{}/redhat-stable/jenkins-{}-1.1.noarch.rpm'\n .format(hostname, stable_version),\n 'windows-stable': 'https://{}/windows-stable/{}/jenkins.msi'\n .format(hostname, stable_version),\n 'opensuse-stable': 'https://{}/opensuse-stable/jenkins-{}-1.2.noarch.rpm'\n .format(hostname, stable_version),\n 'war-stable': 'https://{}/war-stable/{}/jenkins.war'\n .format(hostname, stable_version),\n }\n\n metric = 'jenkins.package.available'\n package = instance['package']\n self.warning(\"PackageAvailable: {}\".format(package))\n tags = [\n \"package:\" + package,\n ]\n\n if endpoints.__contains__(package):\n self.gauge(metric, is_exist(package, endpoints[package]), tags)\n else:\n self.warning(\"PackageCanDownload: Package {} is not supported\"\n .format(package))\n\n\nif __name__ == \"__main__\":\n '''\n Only there for testing purposes\n '''\n p = PackageCanBeDownloaded\n\n weekly_version = p.get_latest_weekly_version(p)\n stable_version = p.get_latest_stable_version(p)\n hostname = \"get.jenkins.io\"\n\n endpoints = {\n 'debian': 'https://{}/debian/jenkins_{}_all.deb'\n .format(hostname, weekly_version),\n 'redhat': 'https://{}/redhat/jenkins-{}-1.1.noarch.rpm'\n .format(hostname, weekly_version),\n 'opensuse': 'https://{}/opensuse/jenkins-{}-1.2.noarch.rpm'\n .format(hostname, weekly_version),\n 'war': 'https://{}/war/{}/jenkins.war'\n .format(hostname, weekly_version),\n 'windows': 'https://{}/windows/{}/jenkins.msi'\n .format(hostname, weekly_version),\n 'debian-stable': 'https://{}/debian-stable/jenkins_{}_all.deb'\n .format(hostname, stable_version),\n 'redhat-stable': 'https://{}/redhat-stable/jenkins-{}-1.1.noarch.rpm'\n .format(hostname, stable_version),\n 'windows-stable': 'https://{}/windows-stable/{}/jenkins.msi'\n .format(hostname, stable_version),\n 'opensuse-stable': 'https://{}/opensuse-stable/jenkins-{}-1.2.noarch.rpm'\n .format(hostname, stable_version),\n 'war-stable': 'https://{}/war-stable/{}/jenkins.war'\n .format(hostname, stable_version),\n }\n\n print(\"Latest weekly version: {}\".format(weekly_version))\n print(\"Latest stable version: {}\".format(stable_version))\n\n packages = [\n \"debian\", \"debian-stable\",\n \"redhat\", \"redhat-stable\",\n \"opensuse\", \"opensuse-stable\",\n \"war\", \"war-stable\",\n \"windows\", \"windows-stable\",\n ]\n\n for package in packages:\n if not is_exist(package, endpoints[package]):\n print(\"Latest version for package {} unailable from {}\"\n .format(package, endpoints[package]))\n else:\n print(\"Latest version for package {} available from {}\"\n .format(package, endpoints[package]))\n","sub_path":"checks.d/packageCanBeDownloaded.py","file_name":"packageCanBeDownloaded.py","file_ext":"py","file_size_in_byte":7693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"128519833","text":" # -*- coding:utf-8 -*-\n# You are a professional robber planning to rob houses along a street. Each house has a certain amount of money stashed,\n# the only constraint stopping you from robbing each of them is that adjacent houses have security system\n# connected and it will automatically contact the police if two adjacent houses were broken into on the same night.\n# Given a list of non-negative integers representing the amount of money of each house,\n# determine the maximum amount of money you can rob tonight without alerting the police.\n#\n# Example 1:\n#\n# Input: [1,2,3,1]\n# Output: 4\n# Explanation: Rob house 1 (money = 1) and then rob house 3 (money = 3).\n# Total amount you can rob = 1 + 3 = 4.\n# Example 2:\n#\n# Input: [2,7,9,3,1]\n# Output: 12\n# Explanation: Rob house 1 (money = 2), rob house 3 (money = 9) and rob house 5 (money = 1).\n# Total amount you can rob = 2 + 9 + 1 = 12.\n# 每一位都保持着当前最大(通过累加或者直接是当前位)\nclass Solution(object):\n def rob(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums:\n return 0\n for i in range(len(nums)):\n if i == 1:\n nums[i] = max(nums[0], nums[1])\n if i > 1:\n nums[i] = max(nums[i] + nums[i - 2], nums[i - 1])\n return max(nums)\n\n\n# You are a professional robber planning to rob houses along a street. Each house has a certain amount of\n # money stashed.\n # All houses at this place are arranged in a circle.\n # That means the first house is the neighbor of the last one.\n # Meanwhile, adjacent houses have security system connected and\n # it will automatically contact the police if two adjacent houses were broken into on the same night.\n # Given a list of non-negative integers representing the amount of money of each house,\n # determine the maximum amount of money you can rob tonight without alerting the police.\n#\n# Example 1:\n#\n# Input: [2,3,2]\n# Output: 3\n# Explanation: You cannot rob house 1 (money = 2) and then rob house 3 (money = 2),\n# because they are adjacent houses.\n# Example 2:\n#\n# Input: [1,2,3,1]\n# Output: 4\n# Explanation: Rob house 1 (money = 1) and then rob house 3 (money = 3).\n# Total amount you can rob = 1 + 3 = 4.\n\ndef rob(nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n n = len(nums)\n if n == 0: return 0\n # 因为首尾不能连接,element个数少于4时,所以直接返回最大的\n if n < 4: return max(nums)\n\n # 除了最后一个,最大的\n first, second = 0, 0\n # first+i 算上当前点的最大,second前一个点的最大。nums[i] 表示前i个可达最大和\n for i in nums[:-1]: first, second = second, max(first + i, second) # 等价于 nums[i] = max(nums[i]+nums[i-2],nums[i-1])\n result = second\n\n # 除了第一个最大的\n first, second = 0, 0\n for i in nums[1:]: first, second = second, max(first + i, second)\n return max(result, second)\n\n\n# The thief has found himself a new place for his thievery again. There is only one entrance\n# to this area, called the \"root.\" Besides the root, each house has one and only one parent house.\n# After a tour, the smart thief realized that \"all houses in this place forms a binary tree\".\n# It will automatically contact the police if two directly-linked houses were broken into on the same night.\n# Determine the maximum amount of money the thief can rob tonight without alerting the police.\n#\n# Example 1:\n#\n# Input: [3,2,3,null,3,null,1]\n#\n# 3\n# / \\\n# 2 3\n# \\ \\\n# 3 1\n#\n# Output: 7\n# Explanation: Maximum amount of money the thief can rob = 3 + 3 + 1 = 7.\n# Example 2:\n#\n# Input: [3,4,5,1,3,null,1]\n#\n# 3\n# / \\\n# 4 5\n# / \\ \\\n# 1 3 1\n#\n# Output: 9\n# Explanation: Maximum amount of money the thief can rob = 4 + 5 = 9.\n\n\ndef rob(self, root):\n def dfs(node):\n # The key point is return two parameters with DFS. For each node:\n # its either you rob it or not; if you rob it,\n # you can't rob its left or right.\n # return (subtree max money if not rob this node, subtree max money)\n if not node: return 0, 0\n max_l_ignore, max_l = dfs(node.left)\n max_r_ignore, max_r = dfs(node.right)\n # max_l + max_r 相当于ignore 当前节点时的最大值,max(max_l + max_r, node.val + max_l_ignore + max_r_ignore) 相当于一直保留最大值\n return max_l + max_r, max(max_l + max_r, node.val + max_l_ignore + max_r_ignore)\n\n return dfs(root)[1]\n","sub_path":"Basic_Algorithm/highlyFrequentQuestion/Robber.py","file_name":"Robber.py","file_ext":"py","file_size_in_byte":4614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"177877801","text":"import typing\nfrom datetime import datetime as dt, timedelta\nimport collections\n\nimport discord\nfrom discord.ext import tasks, commands\n\nfrom cogs import utils\n\n\nclass UserMessageHandler(utils.Cog):\n\n def __init__(self, bot:utils.CustomBot):\n super().__init__(bot)\n the_start_of_time = lambda: dt(2000, 1, 1, 0, 0)\n self.last_message: typing.Dict[discord.Member, dt] = collections.defaultdict(the_start_of_time)\n self.cached_for_saving: typing.List[discord.Message] = list()\n self.user_message_databaser.start()\n\n def cog_unload(self):\n \"\"\"Stop the databaser loop very gently so it stores everything in cache first\"\"\"\n\n self.user_message_databaser.stop()\n\n @tasks.loop(minutes=1)\n async def user_message_databaser(self):\n \"\"\"Saves all messages stored in self.cached_for_saving to db\"\"\"\n\n # Only save messages if there _were_ any\n if len(self.cached_for_saving) == 0:\n self.log_handler.info(f\"Storing 0 cached messages in database\")\n return\n\n # Get the messages we want to save\n currently_saving = self.cached_for_saving.copy() # Make a copy to fend off the race conditions\n for m in currently_saving:\n try:\n self.cached_for_saving.remove(m)\n except ValueError:\n pass\n\n # Sort them into a nice easy tuple\n records = [(i.id, i.author.id, i.guild.id) for i in currently_saving if i.author.bot is False]\n\n # Copy the records into the db\n self.log_handler.info(f\"Storing {len(records)} cached messages in database\")\n async with self.bot.database() as db:\n await db.conn.copy_records_to_table(\n 'user_messages',\n columns=('message_id', 'user_id', 'guild_id'),\n records=records\n )\n\n @utils.Cog.listener(\"on_message\")\n async def user_message_cacher(self, message:discord.Message):\n \"\"\"Listens for a user sending a message, and then saves that message as a point\n into the db should their last message be long enough ago\"\"\"\n\n # Filter out DMs\n if message.guild is None:\n return\n\n # Make sure it's in the time we want\n last_message_from_user = self.last_message[message.author]\n if last_message_from_user < dt.utcnow() - timedelta(minutes=1):\n self.last_message[message.author] = message.created_at\n else:\n return\n\n # Cache to be saved\n self.cached_for_saving.append(message)\n\n # Cache for internal use\n utils.CachedMessage(\n user_id=message.author.id,\n guild_id=message.guild.id,\n message_id=message.id\n )\n\n @commands.command()\n async def getpoints(self, ctx:utils.Context, user:typing.Optional[discord.User], *attrs):\n\n attributes = {i.split('=')[0]: int(i.split('=')[1]) for i in attrs}\n attributes = attributes or {\"days\": 1}\n user = user or ctx.author\n data = utils.CachedMessage.get_messages(user, ctx.guild, **attributes)\n await ctx.send(len(data))\n\n @commands.command()\n async def leaderboard(self, ctx:utils.Context, *attrs):\n\n attributes = {i.split('=')[0]: int(i.split('=')[1]) for i in attrs}\n attributes = attributes or {\"days\": 1}\n all_keys_for_guild = [i for i in utils.CachedMessage.all_messages.keys() if i[1] == ctx.guild.id]\n all_data_for_guild = {}\n for key in all_keys_for_guild:\n all_data_for_guild[key[0]] = len(utils.CachedMessage.get_messages(key[0], ctx.guild, **attributes))\n ordered_user_ids = sorted(all_data_for_guild.keys(), key=lambda k: all_data_for_guild[k], reverse=True)\n filtered_list = [i for i in ordered_user_ids if ctx.guild.get_member(i)]\n await ctx.send('\\n'.join([f\"**{self.bot.get_user(i)!s}** - {all_data_for_guild[i]}\" for i in filtered_list[:10]]))\n\n\ndef setup(bot:utils.CustomBot):\n x = UserMessageHandler(bot)\n bot.add_cog(x)\n","sub_path":"cogs/user_message_handler.py","file_name":"user_message_handler.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"396181462","text":"#!/usr/bin/python3\n\nimport sys\nimport gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gdk\n\nglobal_color = Gdk.RGBA(red=.50, green=.50, blue=.50, alpha=1.0).to_color()\nglobal_alpha = 65535\n\nclass AppWindow(Gtk.ApplicationWindow):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.set_border_width(10)\n self.set_size_request(200, 100)\n modal = Gtk.Button.new_with_label(\"Modal\")\n nonmodal = Gtk.Button.new_with_label(\"Non-Modal\")\n modal.connect(\"clicked\", self.on_run_color_selection_dialog, \n self, True)\n nonmodal.connect(\"clicked\", self.on_run_color_selection_dialog, \n self, False)\n hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)\n hbox.pack_start(modal, False, False, 5)\n hbox.pack_start(nonmodal, False, False, 5)\n self.add(hbox)\n\n def on_dialog_response(self, dialog, result):\n if result == Gtk.ResponseType.OK:\n colorsel = dialog.get_color_selection()\n alpha = colorsel.get_current_alpha()\n color = colorsel.get_current_color()\n print(color.to_string())\n global_color = color\n global_alpha = alpha\n dialog.destroy()\n\n def on_run_color_selection_dialog(self, button, window, domodal):\n if domodal:\n title = (\"Choose Color -- Modal\")\n else:\n title = (\"Choose Color -- Non-Modal\")\n dialog = Gtk.ColorSelectionDialog(title=title, parent=window, modal=domodal)\n colorsel = dialog.get_color_selection()\n colorsel.set_has_opacity_control(True)\n colorsel.set_current_color(global_color)\n dialog.connect(\"response\", self.on_dialog_response)\n dialog.show_all()\n\nclass Application(Gtk.Application):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, application_id=\"org.example.myapp\",\n **kwargs)\n self.window = None\n\n def do_activate(self):\n if not self.window:\n self.window = AppWindow(application=self, title=\"Color Selection Dialog\")\n self.window.show_all()\n self.window.present()\n\nif __name__ == \"__main__\":\n app = Application()\n app.run(sys.argv)\n","sub_path":"book/06-Dialogs/ColorSelectionDialog.py","file_name":"ColorSelectionDialog.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"561346139","text":"import json\nimport logging\nimport webapp2\n\nfrom cache_controller import CacheController\nfrom datetime import datetime\nfrom model_choosie_post import ChoosiePost\nfrom model_user import User\nfrom utils import Utils\n\nclass FeedHandler(webapp2.RequestHandler):\n def get(self):\n user_fb_uid = self.request.get('fb_uid', default_value=None)\n choosie_posts, cursor = FeedHandler.get_feed_and_cursor(self.request.get('cursor'),\n self.request.get('limit'),\n self.request.get('timestamp'),\n user_fb_uid)\n choosie_posts_json = Utils.items_to_json(choosie_posts)\n self.response.out.write(json.dumps({'feed': choosie_posts_json,\n 'cursor': cursor,\n 'timestamp': datetime.utcnow().isoformat()}))\n \n @staticmethod\n def get_feed_and_cursor(cursor, limit = 10, timestamp = None, fb_uid = None):\n if not limit:\n limit = 10\n limit = int(limit)\n logging.info('Retrieving %d posts from db' % limit)\n posts = ChoosiePost.all()\n if fb_uid is not None:\n posts.filter(\"user_fb_id =\", fb_uid)\n if cursor:\n posts.with_cursor(cursor)\n if timestamp:\n created_after = Utils.parse_iso_format_datetime(timestamp)\n posts.filter('created_at >', created_after)\n posts.order(\"-created_at\")\n posts_result = []\n for post in posts.run(limit=limit):\n posts_result.append(post)\n new_cursor = posts.cursor()\n CacheController.set_multi_models(posts_result)\n return (posts_result, new_cursor)\n \n","sub_path":"choosie-server/feed_handler.py","file_name":"feed_handler.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"47041315","text":"import base64\nimport contextlib\nimport asyncio\nimport glob\nimport tempfile\nimport threading\n\nfrom django.conf import settings\nfrom hfc.fabric import Client\nfrom hfc.fabric.peer import Peer\nfrom hfc.fabric.user import create_user\nfrom hfc.fabric.orderer import Orderer\nfrom hfc.util.keyvaluestore import FileKeyValueStore\nfrom hfc.fabric.block_decoder import decode_fabric_MSP_config, decode_fabric_peers_info, decode_fabric_endpoints\n\nuser = None\nuser_lock = threading.Lock()\n\n\ndef ledger_grpc_options(hostname):\n return {\n 'grpc.max_send_message_length': settings.LEDGER_GRPC_MAX_SEND_MESSAGE_LENGTH,\n 'grpc.max_receive_message_length': settings.LEDGER_GRPC_MAX_RECEIVE_MESSAGE_LENGTH,\n 'grpc.keepalive_timeout_ms': settings.LEDGER_GRPC_KEEPALIVE_TIMEOUT_MS,\n 'grpc.http2.max_pings_without_data': settings.LEDGER_GRPC_HTTP2_MAX_PINGS_WITHOUT_DATA,\n 'grpc.keepalive_permit_without_calls': settings.LEDGER_GRPC_KEEPALIVE_PERMIT_WITHOUT_CALLS,\n 'grpc.ssl_target_name_override': hostname\n }\n\n\n@contextlib.contextmanager\ndef get_hfc(channel_name):\n loop, client, user = _get_hfc(channel_name)\n try:\n yield (loop, client, user)\n finally:\n loop.run_until_complete(\n client.close_grpc_channels()\n )\n del client\n loop.close()\n\n\ndef _get_hfc(channel_name):\n global user\n\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n if not user:\n with user_lock:\n # Only call `create_user` once in the lifetime of the application.\n # Calling `create_user` twice breaks thread-safety (bug in fabric-sdk-py)\n user = create_user(\n name=settings.LEDGER_USER_NAME,\n org=settings.ORG_NAME,\n state_store=FileKeyValueStore(settings.LEDGER_CLIENT_STATE_STORE),\n msp_id=settings.LEDGER_MSP_ID,\n key_path=glob.glob(settings.LEDGER_CLIENT_KEY_PATH)[0],\n cert_path=settings.LEDGER_CLIENT_CERT_PATH\n )\n\n client = Client()\n\n # Add peer from backend ledger config file\n peer = Peer(name=settings.LEDGER_PEER_NAME)\n peer.init_with_bundle({\n 'url': f'{settings.LEDGER_PEER_HOST}:{settings.LEDGER_PEER_PORT}',\n 'grpcOptions': ledger_grpc_options(settings.LEDGER_PEER_HOST),\n 'tlsCACerts': {'path': settings.LEDGER_PEER_TLS_CA_CERTS},\n 'clientKey': {'path': settings.LEDGER_PEER_TLS_CLIENT_KEY},\n 'clientCert': {'path': settings.LEDGER_PEER_TLS_CLIENT_CERT},\n })\n client._peers[settings.LEDGER_PEER_NAME] = peer\n\n # Check peer has joined channel\n\n response = loop.run_until_complete(\n client.query_channels(\n requestor=user,\n peers=[peer],\n decode=True\n )\n )\n\n channels = [ch.channel_id for ch in response.channels]\n\n if channel_name not in channels:\n raise Exception(f'Peer has not joined channel: {channel_name}')\n\n channel = client.new_channel(channel_name)\n\n # This part is commented because `query_committed_chaincodes` is not implemented in\n # the last version of fabric-sdk-py\n\n # chaincode_name = settings.LEDGER_CHANNELS[channel_name]['chaincode']['name']\n\n # /!\\ New chaincode lifecycle.\n\n # Check chaincode is committed in the channel\n # responses = loop.run_until_complete(\n # client.query_committed_chaincodes(\n # requestor=user,\n # channel_name=channel_name,\n # peers=[peer],\n # decode=True\n # )\n # )\n # chaincodes = [cc.name\n # for resp in responses\n # for cc in resp.chaincode_definitions]\n # if chaincode_name not in chaincodes:\n # raise Exception(f'Chaincode : {chaincode_name}'\n # f' is not committed in the channel : {channel_name}')\n\n # Discover orderers and peers from channel discovery\n results = loop.run_until_complete(\n channel._discovery(\n user,\n peer,\n config=True,\n local=False,\n interests=[{'chaincodes': [{'name': \"_lifecycle\"}]}]\n )\n )\n\n results = _deserialize_discovery(results)\n\n _validate_channels(channel_name, results)\n _update_client_with_discovery(client, results)\n\n return loop, client, user\n\n\ndef _validate_channels(channel_name, discovery_results):\n\n channel = settings.LEDGER_CHANNELS[channel_name]\n\n # Ensure solo channels have 1 member at most\n if channel_name.startswith('solo-') or channel['restricted']:\n channel_members = list(discovery_results['config']['msps'].keys())\n num_members = len(channel_members) - 1 # remove orderer\n if (num_members > 1):\n raise Exception(f'Restricted channel {channel_name} should have at most 1 member, but has '\n f'{num_members}')\n\n\ndef _update_client_with_discovery(client, discovery_results):\n\n # Get all msp tls root cert files\n tls_root_certs = {}\n\n for mspid, msp_info in discovery_results['config']['msps'].items():\n tls_root_certs[mspid] = base64.decodebytes(\n msp_info['tls_root_certs'].pop().encode()\n )\n\n # Load one peer per msp for endorsing transaction\n for msp in discovery_results['members']:\n if not len(msp):\n continue\n\n peer_info = msp[0]\n\n if peer_info['mspid'] != settings.LEDGER_MSP_ID:\n peer = Peer(name=peer_info['mspid'])\n\n with tempfile.NamedTemporaryFile() as tls_root_cert:\n tls_root_cert.write(tls_root_certs[peer_info['mspid']])\n tls_root_cert.flush()\n\n url = peer_info['endpoint']\n peer.init_with_bundle({\n 'url': url,\n 'grpcOptions': ledger_grpc_options(peer_info['endpoint'].split(':')[0]),\n 'tlsCACerts': {'path': tls_root_cert.name},\n 'clientKey': {'path': settings.LEDGER_PEER_TLS_CLIENT_KEY},\n 'clientCert': {'path': settings.LEDGER_PEER_TLS_CLIENT_CERT}\n })\n\n client._peers[peer_info['mspid']] = peer\n\n # Load one orderer for broadcasting transaction\n orderer_mspid, orderer_info = list(discovery_results['config']['orderers'].items())[0]\n\n orderer = Orderer(name=orderer_mspid)\n\n with tempfile.NamedTemporaryFile() as tls_root_cert:\n tls_root_cert.write(tls_root_certs[orderer_mspid])\n tls_root_cert.flush()\n\n # Need loop\n orderer.init_with_bundle({\n 'url': f\"{orderer_info[0]['host']}:{orderer_info[0]['port']}\",\n 'grpcOptions': ledger_grpc_options(orderer_info[0]['host']),\n 'tlsCACerts': {'path': tls_root_cert.name},\n 'clientKey': {'path': settings.LEDGER_PEER_TLS_CLIENT_KEY},\n 'clientCert': {'path': settings.LEDGER_PEER_TLS_CLIENT_CERT}\n })\n\n client._orderers[orderer_mspid] = orderer\n\n\ndef _deserialize_discovery(response):\n results = {\n 'config': None,\n 'members': [],\n 'cc_query_res': None\n }\n\n for res in response.results:\n if res.config_result and res.config_result.msps and res.config_result.orderers:\n results['config'] = _deserialize_config(res.config_result)\n\n if res.members:\n results['members'].extend(_deserialize_members(res.members))\n\n if res.cc_query_res and res.cc_query_res.content:\n results['cc_query_res'] = _deserialize_cc_query_res(res.cc_query_res)\n\n return results\n\n\ndef _deserialize_config(config_result):\n\n results = {'msps': {},\n 'orderers': {}}\n\n for mspid in config_result.msps:\n results['msps'][mspid] = decode_fabric_MSP_config(\n config_result.msps[mspid].SerializeToString()\n )\n\n for mspid in config_result.orderers:\n results['orderers'][mspid] = decode_fabric_endpoints(\n config_result.orderers[mspid].endpoint\n )\n\n return results\n\n\ndef _deserialize_members(members):\n peers = []\n\n for mspid in members.peers_by_org:\n peer = decode_fabric_peers_info(\n members.peers_by_org[mspid].peers\n )\n peers.append(peer)\n\n return peers\n\n\ndef _deserialize_cc_query_res(cc_query_res):\n cc_queries = []\n\n for cc_query_content in cc_query_res.content:\n cc_query = {\n 'chaincode': cc_query_content.chaincode,\n 'endorsers_by_groups': {},\n 'layouts': []\n }\n\n for group in cc_query_content.endorsers_by_groups:\n peers = decode_fabric_peers_info(\n cc_query_content.endorsers_by_groups[group].peers\n )\n\n cc_query['endorsers_by_groups'][group] = peers\n\n for layout_content in cc_query_content.layouts:\n layout = {\n 'quantities_by_group': {\n group: int(layout_content.quantities_by_group[group])\n for group in layout_content.quantities_by_group\n }\n }\n cc_query['layouts'].append(layout)\n\n cc_queries.append(cc_query)\n\n return cc_queries\n","sub_path":"backend/substrapp/ledger/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":9144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"171521449","text":"# Resolve the problem!!\nimport re\n\ndef run():\n # Start coding here\n with open('encoded.txt', 'r', encoding='utf-8') as f:\n text = f.read()\n secret_message = re.findall('[a-z]', text)\n print(''.join(secret_message))\n \nif __name__ == '__main__':\n run()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"231218888","text":"\"\"\"Opener of the ISIC 2018 dataset\"\"\"\nimport os\nimport csv\nimport numpy as np\nfrom PIL import Image\n\n\nPREFIX_X = \"IMG_\"\nSUFFIX_X = \".jpg\"\nPREFIX_Y = \"LABEL_\"\nSUFFIX_Y = \".csv\"\nPRED_FILE = \"pred.csv\"\n\ndef check_existing_files(folder, files):\n \"\"\"check if files from a list of files are located in folder\"\"\"\n for fname in files:\n if not os.path.isfile(os.path.join(folder, fname)):\n raise FileNotFoundError(\"non existing file %s in folder %s\" %\n (fname, folder))\n\n\ndef get_files(folder):\n \"\"\"return list of features and label files given a folder location (with\n the same order)\"\"\"\n # get list of features files and create associated list of label files\n X_files = [f for f in os.listdir(folder) if '.jpg' in f]\n y_files = [f.replace(PREFIX_X, PREFIX_Y).replace(SUFFIX_X, SUFFIX_Y) for f in X_files]\n # check label files exist\n try:\n check_existing_files(folder, y_files)\n except FileNotFoundError as e:\n print(str(e))\n y_files = None\n return X_files, y_files\n\n\ndef get_X(folder):\n \"\"\"Format and return the ISIC features data as np arrays.\"\"\"\n print('Finding features files...')\n X_files, _ = get_files(folder)\n print('Loading features...')\n X = []\n for f in X_files:\n image = Image.open(os.path.join(folder, f))\n X.append(np.array(image))\n return np.array(X)\n\n\ndef get_y(folder):\n \"\"\"Format and return the ISIC labels as np arrays.\"\"\"\n print('Finding label files...')\n _, y_files = get_files(folder)\n print('Loading labels...')\n y = []\n for f in y_files:\n with open(os.path.join(folder, f)) as open_f:\n str_y = open_f.readline().split(',')\n y.append([float(yy) for yy in str_y])\n return np.array(y)\n\n\ndef save_pred(y_pred, folder):\n \"\"\"Save prediction in PRED_FILE in folder\"\"\"\n with open(os.path.join(folder, PRED_FILE), \"w\") as f:\n writer = csv.writer(f)\n writer.writerows(y_pred)\n\n\ndef get_pred(folder):\n \"\"\"Get predictions which were saved using the save_pred function\"\"\"\n with open(os.path.join(folder, PRED_FILE), \"r\") as f:\n pred_iter = csv.reader(f)\n pred = [y for y in pred_iter]\n return np.array(pred, copy=False)","sub_path":"substrabac/fixtures/chunantes/datasets/ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994/opener.py","file_name":"opener.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"192395516","text":"class Vertex():\n \"\"\" \n The way of using:\n [x,y,z], x-veris\n \"\"\"\n def __init__(self):\n self.parrent=-1\n self.color=-1\n self.tab_index_incident=None\n self.visited=False\n self.entry=-1\n self.process=-1\ndef BuildFromInc(tab):\n \"\"\" \n Buduje z listy sąsiedstwa w postaci tablicy\n Lista sąsiedstwa tab\n tab=[[2,3],[1,5],...]\n \"\"\"\n tab_of_v=[None]*len(tab)\n for i in range(len(tab)):\n V=Vertex()\n V.tab_index_incident=tab[i]\n tab_of_v[i]=V\n return tab_of_v\n\ndef DFS_Usual_Visit(u_index,time,tab_of_v):\n time+=1\n tab_of_v[u_index].visited=True\n tab_of_v[u_index].entry=time\n for v_ind in tab_of_v[u_index].tab_index_incident:\n if not tab_of_v[v_ind].visited:\n tab_of_v[v_ind].parrent=u_index\n DFS_Usual_Visit(v_ind,time,tab_of_v)\n time+=1\n tab_of_v[u_index].process=time\n\ndef DFS(tab_of_v):\n time=0\n for v_ind in range(len(tab_of_v)):\n if not tab_of_v[v_ind].visited:\n DFS_Usual_Visit(v_ind,time,tab_of_v)\n \"\"\" Writing out the parrents tab \"\"\"\n tab=[0]*len(tab_of_v)\n for i in range(len(tab_of_v)):\n tab[i]=tab_of_v[i].parrent\n return tab\ntab_v=BuildFromInc([[1,4],[2],[3],[],[]])\nprint(DFS(tab_v))\n\ndef Topologic_Sort_DFS_Visited(u_index,time,tab_of_v,Topologic_Sort):\n time+=1\n tab_of_v[u_index].visited=True\n tab_of_v[u_index].entry=time\n for v_ind in tab_of_v[u_index].tab_index_incident:\n if not tab_of_v[v_ind].visited:\n tab_of_v[v_ind].parrent=u_index\n Topologic_Sort_DFS_Visited(v_ind,time,tab_of_v,Topologic_Sort)\n time+=1\n tab_of_v[u_index].process=time\n Topologic_Sort.append(u_index)\n\ndef Topologic_Sort_DFS(tab_of_v):\n time=0\n Topologic_Sort=[]\n for v_ind in range(len(tab_of_v)):\n if not tab_of_v[v_ind].visited:\n Topologic_Sort_DFS_Visited(v_ind,time,tab_of_v,Topologic_Sort)\n Topologic_Sort.reverse()\n return Topologic_Sort\n\n\ntab_v=BuildFromInc([[],[2],[3],[4],[5],[]])\nprint(Topologic_Sort_DFS(tab_v))\n\n","sub_path":"Graphs/DFS_usual/DFS_and_Topologic_new.py","file_name":"DFS_and_Topologic_new.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"5385629","text":"#urls.py\nfrom django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.index, name='index'),\n path('analyze', views.analyze, name='analyze'),\n path('contact',views.contact,name='contact'),\n path('about',views.about,name='about'),\n path('register',views.register,name='register'),\n path(\"login\",views.login,name=\"login\"),\n path(\"logout\",views.logout,name=\"logout\"),\n]","sub_path":"web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"86049980","text":"import os\nimport contextlib2\n\nfrom pycocotools.coco import COCO\n\nfrom object_detection.dataset_tools import tf_record_creation_util\nfrom object_detection.utils import dataset_util\n\nimport tensorflow.compat.v1 as tf\n\nflags = tf.app.flags\nflags.DEFINE_string('data_dir', 'learn/coco', 'Root directory to raw Microsoft COCO dataset.')\nflags.DEFINE_string('output_dir', 'learn/coco/subset', 'Path to output TFRecord')\nflags.DEFINE_list('sup_cats', 'person,vehicle,outdoor', 'Super categories to be included')\nflags.DEFINE_integer('max_examples_per_cat', 5000, 'Max number of examples')\nFLAGS = flags.FLAGS\n\ndef load_coco_dection_dataset(imgsDir, annFile, supCats):\n \"\"\"Load data from dataset by pycocotools.\"\n Args:\n imgs_dir: directories of COCO images (train2017 | val2017)\n ann_file: file path of COCO annotations file (instances_train2017.json | instances_val2017.json)\n Return:\n coco_data: list of dictionary format information of each image\n \"\"\"\n\n coco = COCO(annFile)\n\n catIds = coco.getCatIds(supNms=supCats)\n\n # Because `img_ids = coco.getImgIds(catIds=catIds)` won't work ...\n imgIds = []\n for c in catIds:\n cImgs = coco.getImgIds(catIds=c)\n cImgs = cImgs[0:FLAGS.max_examples_per_cat] if len(cImgs) > FLAGS.max_examples_per_cat else cImgs\n imgIds.extend(cImgs)\n\n imgIds = set(imgIds) # Remove duplicates\n\n cocoData = []\n nimgs = len(imgIds)\n\n for index, imgId in enumerate(imgIds):\n if index % 100 == 0:\n print(\"Reading images: %d / %d \" % (index, nimgs))\n\n imgInfo = {}\n bboxes = []\n labels = []\n\n imgDetail = coco.loadImgs(imgId)[0]\n imgHeight = imgDetail['height']\n imgWidth = imgDetail['width']\n\n annIds = coco.getAnnIds(imgIds=imgId, catIds=catIds)\n anns = coco.loadAnns(annIds)\n\n for ann in anns:\n catId = ann['category_id']\n if not catId in catIds:\n continue\n\n bboxes_data = [\n ann['bbox'][0]/float(imgWidth),\n ann['bbox'][1]/float(imgHeight),\n ann['bbox'][2]/float(imgWidth),\n ann['bbox'][3]/float(imgHeight)\n ]\n\n bboxes.append(bboxes_data)\n labels.append(catId)\n\n imgPath = os.path.join(imgsDir, imgDetail['file_name'])\n imgBytes = tf.gfile.FastGFile(imgPath, 'rb').read()\n\n imgInfo['pixel_data'] = imgBytes\n imgInfo['height'] = imgHeight\n imgInfo['width'] = imgWidth\n imgInfo['bboxes'] = bboxes\n imgInfo['labels'] = labels\n\n cocoData.append(imgInfo)\n \n return cocoData\n\ndef dict_to_coco_example(imgData):\n \"\"\"Convert python dictionary of one image to tf.Example proto.\n Args:\n img_data: infomation of one image\n Returns:\n example: The converted tf.Example\n \"\"\"\n\n bboxes = imgData['bboxes']\n xmin, xmax, ymin, ymax = [], [], [], []\n\n for bbox in bboxes:\n xmin.append(bbox[0])\n xmax.append(bbox[0] + bbox[2])\n ymin.append(bbox[1])\n ymax.append(bbox[1] + bbox[3])\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(imgData['height']),\n 'image/width': dataset_util.int64_feature(imgData['width']),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/class/label': dataset_util.int64_list_feature(imgData['labels']),\n 'image/encoded': dataset_util.bytes_feature(imgData['pixel_data']),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf-8')),\n }))\n return example\n\n\ndef create_tf_record_from_coco_annotations(imgsDir, annFile, supCats, outputDir, numShards):\n\n with contextlib2.ExitStack() as tfRecordCloseStack, tf.gfile.GFile(annFile, 'r') as fid:\n outputTfRecords = tf_record_creation_util.open_sharded_output_tfrecords(tfRecordCloseStack, outputDir, numShards)\n\n cocoData = load_coco_dection_dataset(imgsDir, annFile, supCats)\n totalImgs = len(cocoData)\n\n for idx, imgData in enumerate(cocoData):\n if idx % 100 == 0:\n print(\"Converting images: %d / %d\" % (idx, totalImgs))\n\n tfExample = dict_to_coco_example(imgData)\n shardIdx = idx % numShards\n\n if tfExample:\n outputTfRecords[shardIdx].write(tfExample.SerializeToString())\n\ndef main(_):\n if not tf.gfile.IsDirectory(FLAGS.output_dir):\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n train_output_path = os.path.join(FLAGS.output_dir, 'coco_train.record')\n val_output_path = os.path.join(FLAGS.output_dir, 'coco_val.record')\n\n print(\">>>> Converting COCO train dataset to TF record <<<<\")\n train_img_dir = os.path.join(FLAGS.data_dir, 'train2017')\n train_ann_file = os.path.join(FLAGS.data_dir, 'annotations', 'instances_train2017.json')\n create_tf_record_from_coco_annotations(train_img_dir, train_ann_file, FLAGS.sup_cats, train_output_path, 100)\n\n print(\">>>> Converting COCO validation dataset to TF record <<<<\")\n val_img_dir = os.path.join(FLAGS.data_dir, 'val2017')\n val_ann_file = os.path.join(FLAGS.data_dir, 'annotations', 'instances_val2017.json')\n create_tf_record_from_coco_annotations(val_img_dir, val_ann_file, FLAGS.sup_cats, val_output_path, 50)\n\n\nif __name__ == \"__main__\":\n tf.app.run()","sub_path":"docker/object_detection/scripts/create_coco_subset_tf_record.py","file_name":"create_coco_subset_tf_record.py","file_ext":"py","file_size_in_byte":5609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"423108871","text":"\n\nimport struct\nimport sys\nif len(sys.argv) != 3:\n exit(1)\n\nfile = sys.argv[1]\noutput_file = sys.argv[2]\n\n\ndef WHA(string):\n mask = 0x3fffffff\n outHash = 0\n for byte in string:\n byte = ord(byte)\n intermediate_value = ((byte ^ 0xcc) << 24) | \\\n ((byte ^ 0x33) << 16) | \\\n ((byte ^ 0xaa) << 8) | \\\n (byte ^ 0x55)\n outHash = (outHash & mask) + (intermediate_value & mask)\n return hex(outHash)\n\n\n\nwith open(file, 'rb') as f:\n file_data = f.read()\n\noutput = WHA(file_data)[2:]\n\nwith open(output_file, 'w') as f:\n f.write(output)\n\n# calculate the collision string\n# with open('sol_3.1.3.2.txt', 'r') as f:\n# file_data = f.read()\n\n# output = WHA(file_data)[2:]\n# print output \n\n\n\n","sub_path":"mp3/sol_3.1.3.2.py","file_name":"sol_3.1.3.2.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"194621591","text":"from __future__ import annotations\n\nfrom typing import Dict\n\nfrom tools37.colors import BLACK, WHITE\nfrom tools37.console import Console as BaseConsole\nfrom .Entry import Entry\n\n__all__ = [\"Console\"]\n\n\nclass Console(BaseConsole):\n \"\"\"\n Console implementation to display the log lines in a fancy way\n \"\"\"\n\n def __init__(self, width: int = 100, styles: Dict[str, Console.Style] = None):\n if styles is None:\n styles = {}\n\n if '_time' not in styles:\n styles['_time'] = Console.Style(bg=BLACK, fg='#AAAAFF')\n\n if '_code' not in styles:\n styles['_code'] = Console.Style(bg=BLACK, fg='#2222FF')\n\n if '_content' not in styles:\n styles['_content'] = Console.Style(bg=BLACK, fg=WHITE)\n\n self.styles = styles\n super().__init__(26, max([len(key) for key in self.styles.keys() if not key.startswith('_')], default=0), width)\n\n def print(self, entry: Entry):\n \"\"\"Print the specified Entry.\"\"\"\n style = self.styles.get(entry.code)\n\n self.display(\n objects=[\n entry.at.isoformat(),\n entry.code,\n entry.content\n ],\n styles=[\n self.styles['_time'],\n style or self.styles['_code'],\n style or self.styles['_content']\n ]\n )\n","sub_path":"venv/Lib/site-packages/tools37/logger/Console.py","file_name":"Console.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"204942447","text":"# -*- coding: UTF-8 -*-\nimport os\nimport zipfile\nimport json\nimport requests as requests\n\n\ndef unzip_file(zipfilename, unziptodir):\n if not os.path.exists(unziptodir):\n os.makedirs(unziptodir, 0o777)\n zfobj = zipfile.ZipFile(zipfilename)\n for name in zfobj.namelist():\n name = name.replace('\\\\', '/')\n if name.endswith('/'):\n os.mkdir(os.path.join(unziptodir, name))\n else:\n ext_filename = os.path.join(unziptodir, name)\n ext_dir = os.path.dirname(ext_filename)\n if not os.path.exists(ext_dir):\n os.mkdir(ext_dir, 0o777)\n outfile = open(ext_filename, 'wb')\n outfile.write(zfobj.read(name))\n outfile.close()\n\n\ndef sendRequest(url, querystring):\n headers = {\n 'cache-control': \"no-cache\"\n }\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n if not response.text:\n return None\n dat = json.loads(response.text)\n return dat\n\n\n# 格式化json输出\ndef get_pretty_print(json_object):\n return json.dumps(json_object, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False)\n\n\nif __name__ == '__main__':\n js = sendRequest(\"http://img.jiaoyuxueli.cn/api/?act=act2&t=1&sn=f9be311e65d81a9ad8150a60844bb94c\", {})\n if js != None:\n pass\n print(js)\n id = js['id']\n fromfile = js['from']\n todir = js['to']\n unzip_file(fromfile, todir)\n # 删除文件\n # os.remove(fromfile)\n sendRequest(\"http://img.jiaoyuxueli.cn/api/?act=act2up&t=1&sn=f9be311e65d81a9ad8150a60844bb94c\", {'id': id})\n","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"306476039","text":"import os\nimport config\nfrom flask import (Flask, render_template, flash, redirect, url_for, request, session, escape, Blueprint, abort)\nfrom flask_login import (login_user, logout_user, login_required, current_user, LoginManager)\nfrom flask_wtf import CSRFProtect\n\n\nfrom models.base_model import db\n\nweb_dir = os.path.join(os.path.dirname(\n\tos.path.abspath(__file__)), 'instagram_web')\n\napp = Flask('NEXTAGRAM', root_path=web_dir)\ncsrf = CSRFProtect(app)\n\nif os.getenv('FLASK_ENV') == 'production':\n\tapp.config.from_object(\"config.ProductionConfig\")\nelse:\n\tapp.config.from_object(\"config.DevelopmentConfig\")\n\n\n@app.before_request\ndef before_request():\n\t# db.close()\n\tdb.connect()\n\n@app.after_request\ndef after_request(response):\n\tdb.close()\n\treturn response\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"76715017","text":"\"\"\" Module to decouple cg code from Housekeeper code \"\"\"\nimport datetime as dt\nimport logging\nimport os\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Set, Tuple\n\nfrom cg.constants import SequencingFileTag\nfrom cg.exc import HousekeeperBundleVersionMissingError, HousekeeperFileMissingError\nfrom sqlalchemy.orm import Query\n\nfrom housekeeper.include import checksum as hk_checksum\nfrom housekeeper.include import include_version\nfrom housekeeper.store import Store, models\nfrom housekeeper.store.models import Archive, Bundle, File, Version\n\nLOG = logging.getLogger(__name__)\n\n\nclass HousekeeperAPI:\n \"\"\"API to decouple cg code from Housekeeper\"\"\"\n\n def __init__(self, config: dict) -> None:\n self._store = Store(config[\"housekeeper\"][\"database\"], config[\"housekeeper\"][\"root\"])\n self.root_dir: str = config[\"housekeeper\"][\"root\"]\n\n def __getattr__(self, name):\n LOG.warning(\"Called undefined %s on %s, please wrap\", name, self.__class__.__name__)\n return getattr(self._store, name)\n\n def new_bundle(self, name: str, created_at: dt.datetime = None) -> Bundle:\n \"\"\"Create a new file bundle.\"\"\"\n return self._store.new_bundle(name, created_at)\n\n def add_bundle(self, bundle_data) -> Tuple[Bundle, Version]:\n \"\"\"Build a new bundle version of files.\"\"\"\n return self._store.add_bundle(bundle_data)\n\n def bundle(self, name: str) -> Bundle:\n \"\"\"Fetch a bundle.\"\"\"\n return self._store.get_bundle_by_name(bundle_name=name)\n\n def bundles(self) -> List[Bundle]:\n \"\"\"Fetch bundles.\"\"\"\n return self._store.bundles()\n\n def create_new_bundle_and_version(self, name: str) -> Bundle:\n \"\"\"Create new bundle with version.\"\"\"\n new_bundle: Bundle = self.new_bundle(name=name)\n self.add_commit(new_bundle)\n new_version: Version = self.new_version(created_at=new_bundle.created_at)\n new_bundle.versions.append(new_version)\n self.commit()\n LOG.info(f\"New bundle created with name {new_bundle.name}\")\n return new_bundle\n\n def set_to_archive(self, file: File, value: bool) -> None:\n \"\"\"Sets the 'to_archive' field of a file.\"\"\"\n file.to_archive: bool = value\n self.commit()\n\n def new_file(\n self, path: str, checksum: str = None, to_archive: bool = False, tags: list = None\n ) -> File:\n \"\"\"Create a new file.\"\"\"\n if tags is None:\n tags = []\n return self._store.new_file(path, checksum, to_archive, tags)\n\n def get_file(self, file_id: int) -> Optional[File]:\n \"\"\"Get a file based on file id.\"\"\"\n LOG.info(f\"Return file: {file_id}\")\n file_obj: File = self._store.get_file_by_id(file_id=file_id)\n if not file_obj:\n LOG.info(\"file not found\")\n return None\n return file_obj\n\n def delete_file(self, file_id: int) -> Optional[File]:\n \"\"\"Delete a file both from database and disk (if included).\"\"\"\n file_obj: File = self.get_file(file_id)\n if not file_obj:\n LOG.info(f\"Could not find file {file_id}\")\n return\n\n if file_obj.is_included and Path(file_obj.full_path).exists():\n LOG.info(f\"Deleting file {file_obj.full_path} from disc\")\n Path(file_obj.full_path).unlink()\n\n LOG.info(f\"Deleting file {file_id} from housekeeper\")\n self._store.session.delete(file_obj)\n self.commit()\n\n return file_obj\n\n def add_file(\n self, path: str, version_obj: Version, tags: list, to_archive: bool = False\n ) -> File:\n \"\"\"Add a file to the database.\"\"\"\n if isinstance(tags, str):\n tags: List[str] = [tags]\n for tag_name in tags:\n if not self.get_tag(tag_name):\n self.add_tag(tag_name)\n\n new_file: File = self.new_file(\n path=str(Path(path).absolute()),\n to_archive=to_archive,\n tags=[self.get_tag(tag_name) for tag_name in tags],\n )\n\n new_file.version: Version = version_obj\n return new_file\n\n def files(\n self,\n *,\n bundle: str = None,\n tags: Set[str] = None,\n version: int = None,\n path: str = None,\n ) -> Query:\n \"\"\"Fetch files.\"\"\"\n return self._store.get_files(\n bundle_name=bundle, tag_names=tags, version_id=version, file_path=path\n )\n\n @staticmethod\n def get_files_from_version(version: Version, tags: Set[str]) -> Optional[List[File]]:\n \"\"\"Return a list of files associated with the given version and tags.\"\"\"\n LOG.debug(f\"Getting files from version with tags {tags}\")\n files: List[File] = []\n for file in list(version.files):\n file_tags = {tag.name for tag in file.tags}\n if tags.issubset(file_tags):\n LOG.debug(f\"Found file {file}\")\n files.append(file)\n if not files:\n LOG.warning(f\"Could not find any files matching the tags {tags}\")\n return files\n\n @staticmethod\n def get_file_from_version(version: Version, tags: Set[str]) -> Optional[File]:\n \"\"\"Return the first file matching the given tags.\"\"\"\n files: List[File] = HousekeeperAPI.get_files_from_version(version=version, tags=tags)\n return files[0] if files else None\n\n @staticmethod\n def get_latest_file_from_version(version: Version, tags: Set[str]) -> Optional[File]:\n \"\"\"Return the latest file from Housekeeper given its version and tags.\"\"\"\n files: List[File] = HousekeeperAPI.get_files_from_version(version=version, tags=tags)\n return sorted(files, key=lambda file_obj: file_obj.id)[-1] if files else None\n\n def rollback(self):\n \"\"\"Wrap method in Housekeeper Store.\"\"\"\n return self._store.session.rollback()\n\n def session_no_autoflush(self):\n \"\"\"Wrap property in Housekeeper Store.\"\"\"\n return self._store.session.no_autoflush\n\n def get_files(\n self, bundle: str, tags: Optional[list] = None, version: Optional[int] = None\n ) -> Query:\n \"\"\"Get all the files in housekeeper, optionally filtered by bundle and/or tags and/or\n version.\n \"\"\"\n return self._store.get_files(bundle_name=bundle, tag_names=tags, version_id=version)\n\n def get_latest_file(\n self, bundle: str, tags: Optional[list] = None, version: Optional[int] = None\n ) -> Optional[File]:\n \"\"\"Return latest file from Housekeeper, filtered by bundle and/or tags and/or version.\"\"\"\n files: Query = self._store.get_files(bundle_name=bundle, tag_names=tags, version_id=version)\n return files.order_by(File.id.desc()).first()\n\n def check_bundle_files(\n self,\n bundle_name: str,\n file_paths: List[Path],\n last_version: Version,\n tags: Optional[list] = None,\n ) -> List[Path]:\n \"\"\"Checks if any of the files in the provided list are already added to the provided\n bundle. Returns a list of files that have not been added.\"\"\"\n for file in self.get_files(bundle=bundle_name, tags=tags, version=last_version.id):\n if Path(file.path) in file_paths:\n file_paths.remove(Path(file.path))\n LOG.info(\n f\"Path {file.path} is already linked to bundle {bundle_name} in housekeeper\"\n )\n return file_paths\n\n @staticmethod\n def get_included_path(root_dir: Path, version_obj: Version, file_obj: File) -> Path:\n \"\"\"Generate the path to a file that should be included.\n If the version dir does not exist, create a new version dir in root dir.\n \"\"\"\n version_root_dir: Path = Path(root_dir, version_obj.relative_root_dir)\n version_root_dir.mkdir(parents=True, exist_ok=True)\n LOG.info(\"Created new bundle version dir: %s\", version_root_dir)\n return Path(version_root_dir, Path(file_obj.path).name)\n\n def include_file(self, file_obj: File, version_obj: Version) -> File:\n \"\"\"Call the include version function to import related assets.\"\"\"\n global_root_dir: Path = Path(self.get_root_dir())\n\n new_path: Path = self.get_included_path(\n root_dir=global_root_dir, version_obj=version_obj, file_obj=file_obj\n )\n if file_obj.to_archive:\n # calculate sha1 checksum if file is to be archived\n file_obj.checksum = HousekeeperAPI.checksum(file_obj.path)\n if new_path.exists():\n LOG.warning(\n f\"Another file with identical included file path: {new_path} already exist. Skip linking of: {file_obj.path}\"\n )\n file_obj.path = str(new_path).replace(f\"{global_root_dir}/\", \"\", 1)\n return file_obj\n # hardlink file to the internal structure\n os.link(file_obj.path, new_path)\n LOG.info(f\"Linked file: {file_obj.path} -> {new_path}\")\n file_obj.path = str(new_path).replace(f\"{global_root_dir}/\", \"\", 1)\n return file_obj\n\n def new_version(self, created_at: dt.datetime, expires_at: dt.datetime = None) -> Version:\n \"\"\"Create a new bundle version.\"\"\"\n return self._store.new_version(created_at, expires_at)\n\n def version(self, bundle: str, date: dt.datetime) -> Version:\n \"\"\"Fetch a version.\"\"\"\n LOG.debug(f\"Return version: {date}, from {bundle}\")\n return self._store.get_version_by_date_and_bundle_name(\n bundle_name=bundle, version_date=date\n )\n\n def last_version(self, bundle: str) -> Version:\n \"\"\"Gets the latest version of a bundle.\"\"\"\n LOG.debug(f\"Fetch latest version from bundle {bundle}\")\n return (\n self._store._get_query(table=Version)\n .join(Version.bundle)\n .filter(Bundle.name == bundle)\n .order_by(models.Version.created_at.desc())\n .first()\n )\n\n def get_all_non_archived_spring_files(self) -> List[File]:\n \"\"\"Return all spring files which are not marked as archived in Housekeeper.\"\"\"\n return self._store.get_all_non_archived_files(tag_names=[SequencingFileTag.SPRING])\n\n def get_latest_bundle_version(self, bundle_name: str) -> Optional[Version]:\n \"\"\"Get the latest version of a Housekeeper bundle.\"\"\"\n last_version: Version = self.last_version(bundle_name)\n if not last_version:\n LOG.warning(f\"No bundle found for {bundle_name} in Housekeeper\")\n return None\n LOG.debug(f\"Found Housekeeper version object for {bundle_name}: {repr(last_version)}\")\n return last_version\n\n def get_create_version(self, bundle_name: str) -> Version:\n \"\"\"Returns the latest version of a bundle if it exists. If not creates a bundle and\n returns its version.\"\"\"\n last_version: Version = self.last_version(bundle=bundle_name)\n if not last_version:\n LOG.info(f\"Creating bundle for sample {bundle_name} in housekeeper\")\n bundle_result: Tuple[Bundle, Version] = self.add_bundle(\n bundle_data={\n \"name\": bundle_name,\n \"created_at\": dt.datetime.now(),\n \"expires_at\": None,\n \"files\": [],\n }\n )\n last_version: Version = bundle_result[1]\n return last_version\n\n def new_tag(self, name: str, category: str = None):\n \"\"\"Create a new tag.\"\"\"\n return self._store.new_tag(name, category)\n\n def add_tag(self, name: str, category: str = None) -> models.Tag:\n \"\"\"Add a tag to the database.\"\"\"\n tag_obj = self._store.new_tag(name, category)\n self.add_commit(tag_obj)\n return tag_obj\n\n def get_tag(self, name: str) -> models.Tag:\n \"\"\"Fetch a tag.\"\"\"\n return self._store.get_tag(name)\n\n @staticmethod\n def get_tag_names_from_file(file: File) -> List[str]:\n \"\"\"Fetch tag names for a file.\"\"\"\n return [tag.name for tag in file.tags]\n\n def include(self, version_obj: Version):\n \"\"\"Call the include version function to import related assets.\"\"\"\n include_version(self.get_root_dir(), version_obj)\n version_obj.included_at = dt.datetime.now()\n\n def add_commit(self, obj):\n \"\"\"Wrap method in Housekeeper Store.\"\"\"\n self._store.session.add(obj)\n return self._store.session.commit()\n\n def commit(self):\n \"\"\"Wrap method in Housekeeper Store.\"\"\"\n return self._store.session.commit()\n\n def get_root_dir(self) -> str:\n \"\"\"Returns the root dir of Housekeeper.\"\"\"\n return self.root_dir\n\n @staticmethod\n def checksum(path) -> str:\n \"\"\"Calculate the checksum.\"\"\"\n return hk_checksum(path)\n\n def initialise_db(self):\n \"\"\"Create all tables in the store.\"\"\"\n self._store.create_all()\n\n def destroy_db(self):\n \"\"\"Drop all tables in the store.\"\"\"\n self._store.drop_all()\n\n def add_and_include_file_to_latest_version(\n self, bundle_name: str, file: Path, tags: list\n ) -> None:\n \"\"\"Adds and includes a file in the latest version of a bundle.\"\"\"\n version: Version = self.last_version(bundle_name)\n if not version:\n LOG.warning(f\"Bundle: {bundle_name} not found in Housekeeper\")\n raise HousekeeperBundleVersionMissingError\n hk_file: File = self.add_file(version_obj=version, tags=tags, path=str(file.absolute()))\n self.include_file(version_obj=version, file_obj=hk_file)\n self.commit()\n\n def include_files_to_latest_version(self, bundle_name: str) -> None:\n \"\"\"Include all files in the latest version on a bundle.\"\"\"\n bundle_version: Version = self.get_latest_bundle_version(bundle_name=bundle_name)\n if not bundle_version:\n return None\n if bundle_version.included_at:\n LOG.info(\n f\"Bundle: {bundle_name}, version: {bundle_version} already included at {bundle_version.included_at}\"\n )\n return\n for hk_file in bundle_version.files:\n if not hk_file.is_included:\n try:\n self.include_file(version_obj=bundle_version, file_obj=hk_file)\n except FileExistsError as error:\n LOG.error(error)\n continue\n LOG.warning(\n f\"File is already included in Housekeeper for bundle: {bundle_name}, version: {bundle_version}\"\n )\n bundle_version.included_at = dt.datetime.now()\n self.commit()\n\n def get_file_from_latest_version(self, bundle_name: str, tags: Set[str]) -> Optional[File]:\n \"\"\"Return a file in the latest version of a bundle.\"\"\"\n version: Version = self.last_version(bundle=bundle_name)\n if not version:\n LOG.warning(f\"Bundle: {bundle_name} not found in Housekeeper\")\n raise HousekeeperBundleVersionMissingError\n return self.files(version=version.id, tags=tags).first()\n\n def get_files_from_latest_version(self, bundle_name: str, tags: List[str]) -> Query:\n \"\"\"Return files in the latest version of a bundle.\n\n Raises HousekeeperBundleVersionMissingError:\n - When no version was found for the given bundle\n \"\"\"\n version: Version = self.last_version(bundle=bundle_name)\n if not version:\n LOG.warning(f\"Bundle: {bundle_name} not found in Housekeeper\")\n raise HousekeeperBundleVersionMissingError\n return self.files(version=version.id, tags=tags)\n\n def is_fastq_or_spring_in_all_bundles(self, bundle_names: List[str]) -> bool:\n \"\"\"Return whether or not all FASTQ/SPRING files are included for the given bundles.\"\"\"\n sequencing_files_in_hk: Dict[str, bool] = {}\n if not bundle_names:\n return False\n for bundle_name in bundle_names:\n sequencing_files_in_hk[bundle_name] = False\n for tag in [SequencingFileTag.FASTQ, SequencingFileTag.SPRING_METADATA]:\n sample_file_in_hk: List[bool] = []\n hk_files: Optional[List[File]] = self.get_files_from_latest_version(\n bundle_name=bundle_name, tags=[tag]\n )\n sample_file_in_hk += [True for hk_file in hk_files if hk_file.is_included]\n if sample_file_in_hk:\n break\n sequencing_files_in_hk[bundle_name] = (\n all(sample_file_in_hk) if sample_file_in_hk else False\n )\n return all(sequencing_files_in_hk.values())\n\n def get_non_archived_files(self, bundle_name: str, tags: Optional[list] = None) -> List[File]:\n \"\"\"Returns all files from given bundle, with given tag, which have not been archived.\"\"\"\n return self._store.get_non_archived_files(bundle_name=bundle_name, tags=tags)\n\n def get_archived_files(self, bundle_name: str, tags: Optional[list] = None) -> List[File]:\n \"\"\"Returns all files from given bundle, with given tag, which have been archived.\"\"\"\n return self._store.get_archived_files(bundle_name=bundle_name, tags=tags)\n\n def add_archives(self, files: List[Path], archive_task_id: int) -> None:\n \"\"\"Creates an archive object for the given files, and adds the archive task id to them.\"\"\"\n for file in files:\n archived_file: Optional[File] = self._store.get_files(file_path=file.as_posix()).first()\n if not archived_file:\n raise HousekeeperFileMissingError(f\"No file in housekeeper with the path {file}\")\n archive: Archive = self._store.create_archive(\n archived_file.id, archiving_task_id=archive_task_id\n )\n self._store.session.add(archive)\n self.commit()\n\n def is_fastq_or_spring_on_disk_in_all_bundles(self, bundle_names: List[str]) -> bool:\n \"\"\"Return whether or not all FASTQ/SPRING files are on disk for the given bundles.\"\"\"\n sequencing_files_on_disk: Dict[str, bool] = {}\n if not bundle_names:\n return False\n for bundle_name in bundle_names:\n sequencing_files_on_disk[bundle_name] = False\n for tag in [SequencingFileTag.FASTQ, SequencingFileTag.SPRING_METADATA]:\n sample_file_on_disk: List[bool] = []\n hk_files: Optional[List[File]] = self.get_files_from_latest_version(\n bundle_name=bundle_name, tags=[tag]\n )\n sample_file_on_disk += [\n True for hk_file in hk_files if Path(hk_file.full_path).exists()\n ]\n if sample_file_on_disk:\n break\n sequencing_files_on_disk[bundle_name] = (\n all(sample_file_on_disk) if sample_file_on_disk else False\n )\n return all(sequencing_files_on_disk.values())\n\n def get_non_archived_spring_path_and_bundle_name(self) -> List[Tuple[str, str]]:\n \"\"\"Return a list of bundles with corresponding file paths for all non-archived SPRING\n files.\"\"\"\n return [\n (file.version.bundle.name, file.path)\n for file in self.get_all_non_archived_spring_files()\n ]\n","sub_path":"cg/apps/housekeeper/hk.py","file_name":"hk.py","file_ext":"py","file_size_in_byte":19215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"447825216","text":"from pygments import highlight\nfrom pygments.formatters.html import HtmlFormatter\nfrom pygments.formatters.latex import LatexFormatter\nfrom pygments.lexers.agile import PythonLexer\nimport inspect\nimport json\nimport sys\n\npy_lexer = PythonLexer()\nh_fm = HtmlFormatter(lineanchors='l')\nl_fm = LatexFormatter()\n\nmodule_names = [\n \"garlicsim\",\n \"garlicsim_lib\",\n \"garlicsim_lib.simpacks\",\n \"garlicsim_lib.simpacks.prisoner\",\n \"garlicsim_lib.simpacks.prisoner.players\"\n]\n\nfor m in module_names:\n __import__(m)\n\nmodules = [sys.modules[m] for m in module_names]\n\ndef get_modules(m):\n return [x for x_name, x in inspect.getmembers(m) if inspect.ismodule(x)]\n\ndef get_classes(m):\n return [x for x_name, x in inspect.getmembers(m) if inspect.isclass(x)]\n\ndef get_functions(m):\n return [x for x_name, x in inspect.getmembers(m) if (inspect.ismethod(x) or inspect.isfunction(x))]\n\n\ncode = {}\nfor m in modules:\n for c in get_classes(m):\n qualified_name = c.__module__ + \".\" + c.__name__\n code[qualified_name] = {}\n code[qualified_name]['doc'] = m.__doc__\n for f in get_functions(c):\n try:\n source = inspect.getsource(f)\n latex_source = highlight(source, py_lexer, l_fm)\n html_source = highlight(source, py_lexer, h_fm)\n\n code[qualified_name][f.__name__] = {}\n code[qualified_name][f.__name__]['source'] = source\n code[qualified_name][f.__name__]['html'] = html_source\n code[qualified_name][f.__name__]['latex'] = latex_source\n except IOError:\n pass\n\n\nf = open(\"dexy--source.json\", \"w\")\njson.dump(code, f)\nf.close()\n\n","sub_path":"docs/examples/garlicsim/garlicsim-source.py","file_name":"garlicsim-source.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"286852865","text":"import numpy as np\nimport itertools\n\ndef tanh(x):\n return np.tanh(x)\n\n\ndef tanh_deriv(x):\n return 1.0 - np.tanh(x) ** 2\n\n\ndef logistic(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef logistic_derivative(x):\n return logistic(x) * (1 - logistic(x))\n \n \ndef add_ones(samples):\n\t# adds column of ones to the end\n\tsamples = np.array(samples)\n\ttemp = np.ones([samples.shape[0], samples.shape[1] + 1])\n\ttemp[:,0:-1] = samples\n\tsamples = temp\n\treturn samples\n\n\ndef add_zeros(samples):\n\t# adds column of ones to the end\n\tsamples = np.array(samples)\n\ttemp = np.zeros([samples.shape[0], samples.shape[1] + 1])\n\ttemp[:,0:-1] = samples\n\tsamples = temp\n\treturn samples\n\n\ndef add_column(samples, col=1):\n\t# adds column of ones to the end\n\tsamples = np.array(samples)\n\ttemp = np.empty([samples.shape[0], samples.shape[1] + 1])\n\ttemp.fill(col)\n\ttemp[:,0:-1] = samples\n\tsamples = temp\n\treturn samples\n \n \ndef categorize(features, ignore_nans=True):\n # returns new array where every unique feature is labeled\n uniq = np.unique(features)\n if ignore_nans and np.nan in uniq:\n uniq = np.delete(list(uniq).index(np.nan))\n uniq = list(uniq)\n labeled_features = []\n for cat in features:\n if ignore_nans and np.isnan(cat):\n labeled_features.append(cat)\n else:\n labeled_features.append(uniq.index(cat))\n return np.array(labeled_features)\n \n \ndef rescale(samples, low=-1, high=1):\n # returns rescaled column from -1 to 1\n\tmins = np.min(samples, axis=0)\n\tmaxs = np.max(samples, axis=0)\n\tfs = np.asfarray(samples)\n\trng = maxs - mins\n\treturn high - (((high - low) * (maxs - fs)) / rng)\n\n\ndef scale_back(scaled, original, low=-1, high=1):\n\tmins = np.min(original, axis=0)\n\tmaxs = np.max(original, axis=0)\n\trng = maxs - mins\n\treturn maxs - (high - scaled)*rng / (high-low)\n\n \ndef standardize(samples):\n\tmean = np.mean(samples, axis=0)\n\tstd = np.std(samples, axis=0)\n\treturn (samples - mean) / std\n\t\n\t\ndef stand_back(samples, original):\n\tmean = np.mean(original, axis=0)\n\tstd = np.std(original, axis=0)\t\n\treturn samples * std + mean\n\t\n\ndef getstats(samples):\n\tmins = np.min(samples, axis=0)\n\tmaxs = np.max(samples, axis=0)\n\tscaled_x = rescale(samples)\n\tmean = np.mean(scaled_x, axis=0)\n\tstd = np.std(scaled_x, axis=0)\n\tstats = {'min':mins,\n\t\t\t'max':maxs,\n\t\t\t'mean':mean,\n\t\t\t'std':std}\n\treturn stats\n\t\n\t\ndef delnans_any(s, ax=1):\n\tif ax is 1:\n\t\treturn s[~np.isnan(s).any(axis=1)]\n\telif ax is 0:\n\t\treturn s[:, ~np.isnan(s).any(axis=0)]\n\t\n\t\ndef delnans_all(s, ax=1):\n\tif ax is 1:\n\t\treturn s[~np.isnan(s).all(axis=1)]\n\telif ax is 0:\n\t\treturn s[:, ~np.isnan(s).all(axis=0)]\n\t\n\n#raises order of a sample set by n, returns new columns\ndef raise_order(samples, n):\n\tif n==1:\n\t\treturn samples\n\telif n<1:\n\t\traise AttributeError('n must be greater or equal to 1')\n\telse:\n\t\ts = samples.copy()\n\t\t# make iterations array\n\t\titer = []\n\t\tfor i in range(2, n+1):\n\t\t\titer += list(itertools.combinations_with_replacement(range(s.shape[1]),i))\n\t\t# for every combination\n\t\tfor el in iter:\n\t\t\t# for every element in combination\n\t\t\tf = None\n\t\t\tfor i in el:\n\t\t\t\tif f is None:\n\t\t\t\t\tf = samples[:,i].copy()\n\t\t\t\telse:\n\t\t\t\t\tf *= samples[:,i]\n\t\t\ts = np.concatenate([s,np.atleast_2d(f).T], axis=1)\n\treturn s\n\t\t\n\ndef array_to_float(data, tonans=True):\n\tresult = np.zeros((data.shape[0], data.shape[1]))\n\tfor i in range(data.shape[0]):\n\t\tfor j in range(data.shape[1]):\n\t\t\ttry: \n\t\t\t\tresult[i,j] = float(data[i,j])\n\t\t\texcept ValueError:\n\t\t\t\tif tonans or result[i,j] is '':\n\t\t\t\t\tresult[i,j] = np.nan\n\t\t\t\telse:\n\t\t\t\t\tresult[i,j] = data[i,j]\n\treturn result","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"63303146","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Credit, Client\nfrom django.contrib.auth.models import User\nfrom .forms import CreditForm, CreditAlertForm\n\n@login_required\ndef financial_index(request):\n return redirect('financial_history')\n\n@login_required\ndef financial_history(request, template_name='dashboard/financial_history.html'):\n\tdata = {}\n\tcredit_list = Credit.objects.filter(client__user=request.user).order_by('-date')\n\tdata['object_list'] = credit_list\n\tdata['url_create'] = 'financial_credit'\t\n\tdata['title'] = 'Credit History'\n\treturn render(request, template_name, data)\n\n@login_required\ndef financial_credit(request, template_name='dashboard/client_edit.html'):\n\tdata = {}\t\n\tform = CreditForm(request.POST or None)\n\tif form.is_valid():\n\t\tcredit = form.save(commit=False)\n\t\tcredit.client = Client.objects.filter(user=request.user)[0]\n\t\tcredit.save()\n\t\treturn redirect('financial_index')\n\tdata['form'] = form\t\n\tdata['Title'] = 'Add Credit'\t\n\treturn render(request, template_name, data)\n\n@login_required\ndef financial_alert(request, template_name='dashboard/financial_alert.html'):\n\tdata = {}\n\tclient = get_object_or_404(Client, user=request.user)\n\tform = CreditAlertForm(request.POST or None, instance=client)\n\tif form.is_valid():\n\t\tform.save()\n\t\treturn redirect('server_index')\n\tdata['form'] = form\t\n\tdata['Title'] = 'Set Credt Alert'\t\n\treturn render(request, template_name, data)","sub_path":"web/dashboard/views_financial.py","file_name":"views_financial.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"126201167","text":"import time\nfrom random import randint\nfrom dbUtilities import createTables, logTireWithDatatime\n\ncreateTables()\n\n\ndef spoofSampleDataArray():\n\n def spoofSQLiteDatetimeString():\n datetimestr = \"2016-01-12 \" + str(randint(0, 23)) + ':' + str(randint(0, 59)) + ':' + str(randint(0, 59))\n return datetimestr\n\n def spoofUnixTimestamp():\n timestamp = time.time() - (time.time() % 86400)\n timestamp += randint(0, 86399)\n return int(timestamp)\n\n def spoofUID():\n UID = hex(randint(268435456, 4294967295)).split('x')[1]\n return UID\n\n practiceData = []\n\n for i in xrange(1000):\n datetimestr = spoofSQLiteDatetimeString()\n for j in xrange(4):\n practiceData.append({\"UID\": spoofUID(), \"datetime\": datetimestr})\n return practiceData\n\n\ndef logSampleData():\n\n for tire in spoofSampleDataArray():\n logTireWithDatatime(tire[\"UID\"], tire[\"datetime\"])\n return\n\nlogSampleData()\n","sub_path":"generateFakeData.py","file_name":"generateFakeData.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"609720525","text":"#this file doesnt exist by default I have to create this url file\n\nfrom django.urls import path\nfrom . import views\n\n#the app name from inside templates, app name comes from there its predefined - find it\napp_name = 'mkgif'\n\nurlpatterns = [\n #index points to the index view\n path('', views.index, name='index'),\n #this is new, thats a name parameter - I want an integer and want to assign it to a pk\n path('details//', views.details, name='details'),\n path('delete//', views.delete_animation, name='delete_animation'),\n]\n","sub_path":"mkgif/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"30111636","text":"import pygame.camera\n#import picamera\n#import asciiNbr\n#import asciiTxt\n\n# backslash remove space before & after multiline\n\n#camera = picamera.PiCamera()\nwidth = 640\nheight = 480\npygame.init()\npygame.camera.init()\ncam = pygame.camera.Camera(\"/dev/video0\",(width, height))#/home/pi/pjtSmScr\",(width, height))\ncam.start()\n#setup window\nwindowSurfaceObj = pygame.display.set_mode((width,height),1,16)\npygame.display.set_caption('Camera')\n\n#take pic\nimage = cam.get_image()\ncam.stop\n\n#save it\npygame.image.save(windowSurfaceObj,'picture2.jpg')\n\n","sub_path":"camT01.py","file_name":"camT01.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"263730206","text":"#!/usr/bin/env python3\n\n#%% Python Vector Functions\n\n# 벡터 데이터 모두 True면 True 반환\nall([1, 1, 1, 1, 1])\nall([1, 1, 0, 1, 1])\n\n# 한 개라도 True면 True 반환\nany([1, 0, 0, 0, 0])\n\n\n\nmax([1, 3, 5, 7, 9])\nmin([2, 4, 6, 8, 10])\n\n\n\n# 0부터 9까지 숫자 생성\nlist(range(10))\n\n# 1부터 5까지 숫자 생성\nlist(range(1,6))\n\n# 1부터 9까지 2 간격으로 숫자 생성\nlist(range(1, 10, 2))\n\n\n\n\n#%% pandas Package\n\nimport os\nos.getcwd()\n\n\nimport pandas as pd\n\nDF = pd.read_csv('pyData\\\\mtcars.csv')\n\n# pandas - Data Frame\nDF.head()\nDF[0:6]\n\nDF.tail()\nDF[-5:]\n\n\n\n\nDF.columns\nDF.describe()\n\n\nDF.mean()\nDF.groupby(['cyl']).mean()\nDF.groupby(['cyl'])['hp'].mean()\n\n\n\nDF['hp']\nDF.hp\n\nDF['hp'].mean()\nDF.hp.sum()\nDF.hp.mean()\nDF.hp.var()\nDF.hp.std()\nDF.hp.min()\nDF.hp.max()\n\n\nDF.hp[30]\nDF.hp[0:6]\n\n\n\n\n\n\n# pandas - Series\ntype(DF.hp)\nDF_S1 = DF.hp\nDF_S1\n\nDF_S1.index = DF.model\nDF_S1\nDF_S1['Camaro Z28']\nDF_S1['Camaro Z28':'Ferrari Dino']\n\n\nprint(DF_S1 * 2)\n\n\n\n\n\n\n\n\n#%% numpy Package\n\nimport numpy as np\n\nnp.sum(DF.hp)\nnp.mean(DF.hp)\nnp.var(DF.hp)\nnp.std(DF.hp)\nnp.min(DF.hp)\nnp.max(DF.hp)\n\n\n\nV0 = np.array([[5, 3, 1], [2, 4, 6]])\nV0\n\nlist(range(12))\nV1 = np.array(range(12))\nV1\n\nV1.reshape((4, 3))\nV1.reshape((3, 4))\nV1.reshape((3, 2, 2))\n\n\nnp.zeros((3, 4))\nnp.full((3, 4), 9)\nnp.eye(4)\n\n\n# numpy - ndarray\nV2 = np.array(list(range(51, 60)))\nV2\nV3 = np.array(list(range(101, 110)))\nV3\n\nV2 = V2.reshape(3, 3)\nV2\nV3 = V3.reshape(3, 3)\nV3\n\nV2 + V3\nV2 - V3\nV2 * V3\nV2 / V3\n\nV2.dot(V3)\n\n\n\n#%% matplotlib Package\n\nimport matplotlib.pyplot as plt\nplt.plot([1, 2, 3], [177, 183, 165])\n\n\nDF = pd.read_csv('Documents\\\\pyData\\\\mtcars.csv')\nDF.plot()\nDF.hp.plot()\nDF[['disp', 'hp']].plot()\n\n\nDF[['disp', 'hp']].plot(kind = 'bar')\nDF[['disp', 'hp']].plot.bar()\nDF[['disp', 'hp']].plot.bar(stacked = True)\nDF[['disp', 'hp']].plot.barh(stacked = True)\n\n\nDF.hp.plot.hist()\nDF.hp.plot.hist(bins = 20)\nDF[['disp', 'hp']].plot.hist(alpha = 0.5)\n\n\nDF[['disp', 'hp']].plot.box()\nDF[['disp', 'hp']].boxplot()\nDF[['disp', 'hp', 'am']].boxplot(by = 'am')\n\n\nDF[['disp', 'hp']].plot.scatter(x = 'hp', y = 'disp')\nDF[['disp', 'hp']].plot.scatter(x = 'hp', y = 'disp', s = 100, alpha = 0.5)\n\n\n\nfrom pandas.plotting import scatter_matrix\nscatter_matrix(DF, alpha = 0.5, figsize = (12, 12), diagonal = 'kde')\n\n\n\n\n\n\n\n\n","sub_path":"[003]_PNM.py","file_name":"[003]_PNM.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"76949399","text":"#!/usr/bin/env python\n\n###############################################################\n# < next few lines under version control, D O N O T E D I T >\n# $Date: 2018-03-29 10:12:00 -0400 (Thu, 29 Mar 2018) $\n# $Revision: 100014 $\n# $Author: Barry.Baker@noaa.gov $\n# $Id: nemsio2nc4.py 100014 2018-03-29 14:12:00Z Barry.Baker@noaa.gov $\n###############################################################\n\n__author__ = 'Patrick Campbell'\n__email__ = 'Patrick.C.Campbell@noaa.gov'\n__license__ = 'GPL'\n\nimport os\nimport subprocess\nimport sys\nfrom argparse import ArgumentDefaultsHelpFormatter, ArgumentParser\n\nimport cartopy.crs as ccrs\nimport dask\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport sys\nimport monet\nfrom monet.util.tools import calc_8hr_rolling_max,calc_24hr_ave,get_relhum\n\nsns.set_context('notebook')\n\nplt.ioff()\n'''\nSimple utility to make box-whisker plots\n'''\n\n\ndef make_24hr_regulatory(df,col=None):\n \"\"\" Make 24-hour averages \"\"\"\n return calc_24hr_ave(df,col)\n\ndef make_8hr_regulatory(df,col=None):\n \"\"\" Make 8-hour rolling average daily \"\"\"\n return calc_8hr_rolling_max(df,col,window=8)\n\n\ndef chdir(fname):\n dir_path = os.path.dirname(os.path.realpath(fname))\n os.chdir(dir_path)\n return os.path.basename(fname)\n\n\ndef load_paired_data(fname):\n return pd.read_hdf(fname)\n\n\ndef make_plots(df, variable, obs_variable, startdate, enddate, vmin, vmax, ylog, out_name):\n \n print(\n \">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n print('Creating Plot:', obs_variable, 'for period:', startdate, 'to ', enddate )\n print(\n \">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\") \n \n make_boxplot_epa(df, out_name, startdate, enddate, vmin, vmax, ylog, col1=obs_variable, col2=variable)\n\n\ndef make_boxplot_epa(\n df,\n savename,\n startdate,\n enddate,\n vmin,\n vmax,\n ylog,\n col1='OZONE',\n col2='O3'\n):\n from monet.util.tools import get_epa_region_df as epard\n from monet.plots import savefig\n import seaborn as sns\n df = epard(df)\n dfa = df[[col1, 'EPA_ACRO']]\n dfm = df[[col2, 'EPA_ACRO']]\n dfa['Legend'] = 'AIRNOW'\n dfm['Legend'] = 'CMAQ'\n dfa.rename({col1: col1}, axis=1, inplace=True)\n dfm.rename({col2: col1}, axis=1, inplace=True)\n dfn = pd.concat([dfa, dfm], ignore_index=True)\n f, ax = plt.subplots(figsize=(10, 5))\n sns.boxplot(ax=ax, x='EPA_ACRO', y=col1, hue='Legend', data=dfn)\n sns.despine()\n if vmin != None and vmax != None:\n plt.ylim(vmin, vmax)\n if ylog == True:\n plt.yscale('log')\n plt.legend(loc=2)\n plt.tight_layout(pad=0)\n name = \"{}.bp.jpg\".format(savename)\n monet.plots.savefig(name, dpi=100, loc=3, decorate=True)\n plt.close()\n\n\ndef get_df_region(obj, region):\n from monet.util.tools import get_epa_region_df as epard\n if region.lower() == 'domain':\n obj['EPA_ACRO'] = 'domain'\n return obj\n else:\n obj = epard(region)\n return obj.loc[obj.EPA_ACRO == region.upper()]\n\n\nif __name__ == '__main__':\n\n parser = ArgumentParser(\n description='Make Box-Whisker Plots for each time step in files',\n formatter_class=ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\n '-p',\n '--paired_data',\n help='paired data input file names',\n type=str,\n required=True)\n parser.add_argument(\n '-s', '--species', nargs='+', help='Species', required=False, default=['OZONE'])\n parser.add_argument(\n '-n',\n '--output_name',\n help='Box-whisker plot Output base name',\n type=str,\n required=False,\n default='CMAQ_AIRNOW')\n parser.add_argument(\n '-r',\n '--regulatory',\n help='boolean set to True fore 8-hrmax or 24-ave NAAQS regulatory calcs',\n type=bool,\n required=False,\n default=False)\n parser.add_argument(\n '-sd',\n '--startdate',\n help='Startdate for bias plot statistics over a period YYYY-MM-DD HH:MM:SS',\n type=str,\n required=False,\n default=None)\n parser.add_argument(\n '-ed',\n '--enddate',\n help='Enddate for bias plot statisics over a period YYYY-MM-DD HH:MM:SS',\n type=str,\n required=False,\n default=None)\n parser.add_argument(\n '-e',\n '--epa_region',\n help='EPA Region ACRONYM',\n required=False,\n default='domain')\n parser.add_argument(\n '-miny', '--miny_scale', help='Set static min y-scale', type=float, required=False, default=None)\n parser.add_argument(\n '-maxy', '--maxy_scale', help='Set static max y-scale', type=float, required=False, default=None)\n parser.add_argument(\n '-ylog', '--ylog_scale', help='Set log y-scale', type=bool, required=False, default=False)\n args = parser.parse_args()\n\n paired_data = args.paired_data\n species = args.species\n out_name = args.output_name\n startdate = args.startdate\n enddate = args.enddate\n reg = args.regulatory\n region = args.epa_region\n vmin = args.miny_scale\n vmax = args.maxy_scale\n ylog = args.ylog_scale\n\n #load the paired dataframe\n df = load_paired_data(paired_data)\n mapping_table = {'OZONE':'O3', 'PM2.5':'PM25_TOT', 'PM10':'PMC_TOT', 'CO':'CO_new', 'NO':'NO_new', 'NO2':'NO2_new', 'SO2':'SO2_new','NOX':'NOX_new','NOY':'NOY_new','TEMP':'TEMP2','WS':'WSPD10','WD':'WDIR10','SRAD':'GSW','BARPR':'PRSFC','PRECIP':'RT','RHUM':'Q2'}\n sub_map = {i: mapping_table[i] for i in species if i in mapping_table}\n\n #Loop through species\n for jj in species:\n df_replace = df.replace(0.0,np.nan) #Replace all exact 0.0 values with nan\n df_drop=df_replace.dropna(subset=[jj,sub_map.get(jj)]) #Drops all rows with obs species = NaN\n\n#Converts OZONE, PM10, or PM2.5 dataframe to NAAQS regulatory values\n if jj == 'OZONE' and reg is True:\n df2 = make_8hr_regulatory(df_drop,[jj,sub_map.get(jj)]).rename(index=str,columns={jj+'_y':jj,sub_map.get(jj)+'_y':sub_map.get(jj)})\n elif jj == 'PM2.5' and reg is True:\n df2 = make_24hr_regulatory(df_drop,[jj,sub_map.get(jj)]).rename(index=str,columns={jj+'_y':jj,sub_map.get(jj)+'_y':sub_map.get(jj)})\n elif jj == 'PM10' and reg is True:\n df2 = make_24hr_regulatory(df_drop,[jj,sub_map.get(jj)]).rename(index=str,columns={jj+'_y':jj,sub_map.get(jj)+'_y':sub_map.get(jj)})\n else:\n df2=df_drop\n#Convert airnow met variable if necessary:\n if jj == 'WS':\n df2.loc[:,'WS']=df2.loc[:,'WS']*0.514 #convert obs knots-->m/s\n df2.query('WS > 0.2',inplace=True) #Filter out calm WS obs (< 0.2 m/s), should not be trusted--creates artificially larger postive model bias\n elif jj == 'BARPR':\n df2.loc[:,'PRSFC']=df2.loc[:,'PRSFC']*0.01 #convert model Pascals-->millibars\n elif jj == 'PRECIP':\n df2.loc[:,'PRECIP']=df2.loc[:,'PRECIP']*0.1 #convert obs mm-->cm\n elif jj == 'TEMP':\n df2.loc[:,'TEMP2'] = df2.loc[:,'TEMP2']-273.16 #convert model K-->C\n elif jj == 'RHUM':\n #convert model mixing ratio to relative humidity\n df2.loc[:,'Q2'] = get_relhum(df2.loc[:,'TEMP2'],df2.loc[:,'PRSFC'],df2.loc[:,'Q2'])\n elif jj == 'CO':\n df2.loc[:,'CO']=df2.loc[:,'CO']*1000.0 #convert obs ppm-->ppb\n else:\n df2=df2\n#subset for period, or use output frequency\n if startdate != None and enddate != None:\n\n mask = (df2['time'] >= startdate) & (df2['time'] <= enddate)\n dfnew =df2.loc[mask]\n import datetime\n startdatename_obj = datetime.datetime.strptime(startdate, '%Y-%m-%d %H:%M:%S')\n enddatename_obj = datetime.datetime.strptime(enddate, '%Y-%m-%d %H:%M:%S')\n startdatename = str(datetime.datetime.strftime(startdatename_obj,'%Y-%m-%d_%H'))\n enddatename = str(datetime.datetime.strftime(enddatename_obj,'%Y-%m-%d_%H'))\n outname = \"{}.{}.{}.{}.{}\".format(out_name, region, jj, startdatename, enddatename)\n if reg is True:\n outname = \"{}.{}.{}.{}.{}.{}\".format(out_name, region, jj,startdatename, enddatename,'reg')\n if jj == 'PM2.5':\n outname = outname.replace('PM2.5','PM2P5')\n if region == 'domain':\n outname = outname.replace('domain','5X')\n else:\n dfnew = df2\n outname = \"{}.{}.{}\".format(out_name,region, jj)\n if reg is True:\n outname = \"{}.{}.{}.{}\".format(out_name,region, jj, 'reg')\n if jj == 'PM2.5':\n outname = outname.replace('PM2.5','PM2P5')\n if region == 'domain':\n outname = outname.replace('domain','5X')\n\n dfnew_drop=dfnew.dropna(subset=[jj,sub_map.get(jj)])\n\n initial_datetime = dfnew_drop.time.min()\n # make the plots\n\n make_plots(dfnew_drop, sub_map.get(jj), jj, startdate, enddate, vmin, vmax, ylog, outname)\n","sub_path":"06.verify_box_plots.py","file_name":"06.verify_box_plots.py","file_ext":"py","file_size_in_byte":8909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"550874436","text":"import json\nfrom flask import Blueprint, request\nfrom flask_security import auth_token_required, roles_accepted\nfrom server.flaskserver import running_context\nimport core.case.database as case_database\nfrom server import forms\n\n\nevents_page = Blueprint('events_page', __name__)\n\n@events_page.route('/', methods=['POST'])\n@auth_token_required\n@roles_accepted(*running_context.user_roles['/cases'])\ndef edit_event_note(event_id):\n form = forms.EditEventForm(request.form)\n if form.validate():\n if form.note.data:\n valid_event_id = case_database.case_db.session.query(case_database.Event) \\\n .filter(case_database.Event.id == event_id).all()\n if valid_event_id:\n case_database.case_db.edit_event_note(event_id, form.note.data)\n return json.dumps(case_database.case_db.event_as_json(event_id))\n else:\n return json.dumps({\"status\": \"invalid event\"})\n else:\n return json.dumps({\"status\": \"Invalid form\"})\n\n","sub_path":"server/blueprints/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"648811612","text":"# coding=utf-8\n\"\"\"\nCreated on 2015-4-22\n\n@author: Jay\n\"\"\"\nfrom utils.route import route\nfrom utils.network.http import HttpRpcHandler\nfrom service_mgr.logic.web import setting as web_setting\nfrom service_mgr.logic.web import require_login\nfrom utils.wapper.web import web_adaptor\nfrom utils.web import cookie\nfrom utils.web.verify_code import render_verify_code\n\n@route(r'/', name='login_html') # 首页\nclass LoginHandle(HttpRpcHandler):\n @web_adaptor(use_http_render=False)\n def get(self):\n self.render('login.html')\n \n @web_adaptor(use_http_render=False)\n def post(self, username, password, verify_code):\n if not cookie.check_cookie(self, \"service_mgr_verify_code\", verify_code):\n self.reponse_msg(\"验证码错误!!!\")\n return\n user = \"test\"\n if user:\n seesionid = self.get_secure_cookie('sid')\n self._sessions_[seesionid] = user\n self.redirect('/index')\n else:\n self.write('登录失败')\n self.finish()\n\n\n@route(r'/index', name='index') # 首页\nclass IndexHandle(HttpRpcHandler):\n @require_login\n @web_adaptor(use_http_render=False)\n def get(self, *args, **kwargs):\n user = \"server\"\n render_dict = {'user_name': user, 'sub_frames': web_setting.menu}\n self.render('index.html', **render_dict)\n\n@route(r'/verify_code', name='verify_code') # 验证码\nclass VerifyCodeHandle(HttpRpcHandler):\n @web_adaptor(use_http_render=False)\n def get(self, *args, **kwargs):\n render_verify_code(self, cookie_name=\"service_mgr_verify_code\")\n\n@route(r'/logout', name='logout') # 登出\nclass LoginHandle(HttpRpcHandler):\n @web_adaptor(use_http_render=False)\n def get(self):\n self.redirect('/')\n\n","sub_path":"server/workspace/common_server/service_mgr/logic/web/handler/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"137775926","text":"__author__ = 'nlsegerl'\n\n\n\"\"\"\nTest existential filter functionality on a frame with using a key, value format to store string data.\n\"\"\"\n\ntest_frame_in_name = 'existential_filter_itest_in'\ntest_frame_out_name = 'existential_filter_itest_out'\n\nimport trustedanalytics as ta\nimport existential_filter\n\nta.connect()\nta.drop([test_frame_in_name, test_frame_out_name])\n\n# just two fields\nkey = 'key'\nwriter = 'writer'\n\nin_schema = [(key, str), (writer, str)]\n\n# key values\nkey1 = 'key1'\nkey2 = 'key2'\nkey3 = 'key3'\n\n# data values\ncamus = 'camus'\nsartre = 'sartre'\nkant = 'kant'\npythagoras = 'pythagoras'\n\nin_values = [ [key1, camus],\n [key2, pythagoras],\n [key1, kant],\n [key2, kant],\n [key3, sartre]]\n\n\nin_frame = ta.Frame(name = test_frame_in_name)\n\nin_frame.append(ta.UploadRows(in_values, in_schema))\n\nframe_names = ta.get_frame_names() # taking inventory of existing frame names to check for frame leak\nout_frame = existential_filter.filter(ta, in_frame = in_frame, out_name = test_frame_out_name, key = key,\n predicate= lambda row: row[writer] in {camus, sartre})\n\n\nassert(in_frame.row_count == 5)\nassert(out_frame.schema == in_frame.schema)\nassert(out_frame.name == test_frame_out_name)\n\nassert(out_frame.row_count ==3)\n\nframe_contents = out_frame.take(3)\nassert([key1, camus] in frame_contents)\nassert([key1, kant] in frame_contents)\nassert([key3, sartre] in frame_contents)\n\n\n# check for frame leak\nframe_names.append(unicode(test_frame_out_name))\nassert(set(ta.get_frame_names()) == set(frame_names))\n","sub_path":"python-client/existential_filter_itest.py","file_name":"existential_filter_itest.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"510095883","text":"import argparse\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport PIL.Image as Image\nimport torch\nfrom sklearn.cluster import MiniBatchKMeans, KMeans\nfrom sklearn import decomposition\nfrom scipy.sparse import csr_matrix\nimport torchvision\nimport torch.nn as nn\nfrom torchvision import transforms\nimport torch.nn.functional as F\n\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.optim.lr_scheduler import StepLR\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.autograd import Variable\nimport copy\n\nfrom sklearn.datasets import fetch_openml\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport PIL.Image as Image\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader, TensorDataset\nfrom IPython import display\nimport torch.optim as optim\n\ndevice='cuda:0' if torch.cuda.is_available() else 'cpu'\n\ntorch.manual_seed(0)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--unlabelled_datapath', type=str, default = None)\nparser.add_argument('--supervised_datapath', type=str, default = None) \nparser.add_argument('--supervised_labels', type=str, default = None) \n\nparser.add_argument('--output_labels', type=str, default = None) \nparser.add_argument('--output_classifier', type=str, default = None)\n\nargs=parser.parse_args()\n\n# if not os.path.exists(args.savedir):\n# \tos.makedirs(args.savedir)\n\n\n\n\n# *******************************************************LOADING DATA******************************************************\nX = np.load(args.unlabelled_datapath)\n\n# oneshot_data=np.load(path+'sample_images.npy')\noneshot_data=np.load(args.supervised_datapath)\n\nprint('shape of oneshot_data', oneshot_data.shape)\n\n#applying minibatch kmeans\nX = -1*((X)/255. -1.) #for making it a sparse matrix\n# X = (X)/255.\nprint('x ki shape', X.shape)\nX=X.reshape((-1,28*28)) #shape 640k, 784\nx_oneshot = -1*(oneshot_data.reshape((-1, 28*28))/(255.) -1.) #shape 10, 784\n# x_oneshot = oneshot_data.reshape((-1, 28*28))/(255.) #shape 10, 784\n\n# X = np.concatenate((X, x_oneshot))\nx_oneshot_target = x_oneshot #from 0th class to 8th class, 9th dropped as its no where in the images i THINK\n# x_oneshot_target = x_oneshot[:-1] #from 0th class to 8th class, 9th dropped as its no where in the images i THINK\n\n\nprint('shape of X', X.shape)\nprint('shape of x_oneshot', x_oneshot.shape)\nprint('shape of x_oneshot_target', x_oneshot_target.shape)\n\nprint('X \\n', X)\nprint('x_oneshot \\n', x_oneshot)\nprint('x_oneshot_target \\n', x_oneshot_target)\n\nX = X.reshape(-1, 1, 28, 28)\nprint(X.shape)\n\nclass CustomTensorDataset_pair(Dataset):\n \"\"\"TensorDataset with support of transforms.\n \"\"\"\n def __init__(self, tensors, transform=None):\n assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)\n self.tensors = tensors\n self.transform = transform\n\n def __getitem__(self, index):\n\n x = self.tensors[0][index]\n # print(x.shape)\n if self.transform:\n x = self.transform(x)\n\n y = self.tensors[1][index]\n\n return x, y\n\n def __len__(self):\n return self.tensors[0].size(0)\n\n\n# we have supervised data (10) and unsuper vised data (1280000) which is X\n# apply transformations on X \n# X can be first shuffled \nshuffler = np.random.permutation(X.shape[0])\nX = X[shuffler]\nX = torch.tensor(X)\n# X = X[:9000]\nX = X[:18000]\nprint('shape of X now after sampling for making final unsup data = ', X.shape)\n\n\n#now sequentially select batches of X and apply transformations\n# select transformations\n# t0 = transforms.RandomApply()\nt1 = transforms.RandomRotation(20)\n# t2 = transforms.RandomCrop((28, 28), padding = 4)\nt2 = transforms.RandomCrop((28, 28))\n\nt3 = transforms.RandomPerspective()\ntrans = transforms.Compose([transforms.ToPILImage(), t1, t2, t3, transforms.ToTensor()])\n\nunsup_dataset = CustomTensorDataset_pair(tensors = (X.float(), X), transform=trans)\nunsup_train_loader = torch.utils.data.DataLoader(unsup_dataset, batch_size=180)\n\n\n\n\n#making supervised dataset ---- unsupervised is already made above\nsup_onsht_data = torch.tensor(x_oneshot_target.reshape(-1, 1, 28, 28))\n# sup_onsht_labels = torch.tensor([i for i in range(9)])\nsup_onsht_labels = torch.tensor(np.load(args.supervised_labels))\n\nshuffler = np.random.permutation(sup_onsht_data.shape[0])\nsup_onsht_data = sup_onsht_data[shuffler]\nsup_onsht_labels = sup_onsht_labels[shuffler]\n\nprint(sup_onsht_labels, sup_onsht_labels.shape)\nprint('supervised datashape = ', sup_onsht_data.shape)\n\n\n\n# sup_dataset = CustomTensorDataset(tensors = sup_onsht_data)\nnum_batches = len(unsup_train_loader)\n# sup_data = torch.cat([sup_onsht_data for i in range(num_batches)], dim = 0)\n# sup_labels = torch.cat([sup_onsht_labels for i in range(num_batches)], dim = 0)\n\nsup_data = sup_onsht_data\nsup_labels = sup_onsht_labels\n\nprint(sup_data.shape)\n\nsup_dataset = CustomTensorDataset_pair(tensors = (sup_data.float(), sup_labels), transform=trans)\n# sup_dataset = CustomTensorDataset_pair(tensors = (sup_data, sup_labels))\nsup_train_loader = torch.utils.data.DataLoader(sup_dataset, batch_size = 90, shuffle = False)\n\nprint(len(sup_train_loader))\n\n\n\nprint('sup and unsup trainloader shape = ', len(sup_train_loader), len(unsup_train_loader))\n\n\nX_target=np.load(args.target_datapath)\nX = X_target\nX = -1*((X)/255. -1.) #for making it a sparse matrix\n\nprint('x ki shape', X.shape)\nX=X.reshape((-1,28*28)) #shape 640k, 784\n\nprint('Xtarget shape', X)\n\nbatchsize = 128\ntarget_loader = DataLoader(X.reshape(-1, 1, 28, 28), batch_size=batchsize, shuffle=False)\n\n\ndef predict(model, device, test_loader, use_cuda):\n model.eval()\n predictions = []\n with torch.no_grad():\n for data in test_loader:\n data = data.to(device)\n output = model(data.float())\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n predictions.extend(pred.tolist())\n\n # print(predictions)\n\n return np.array(predictions)\n\n\ndef is_set_correct(array):\n # print(array)\n # print(set(array))\n if len(set(array)) >= 8:\n return True\n return False\n\n\ndef clustering_accuracy(labels):\n #labels are of shape (totalsmall images in all sudoku which is divisible by 64,)\n labels = labels.reshape((labels.shape[0]//64, -1))\n labels = labels.reshape((-1, 8, 8))\n # print(labels.shape)\n # print(labels[0])\n # print(labels[10000])\n\n subatomic_correct = 0\n\n correct = 0\n total = 0\n #now we have labels of correct shape\n final_bool_arr = np.array([True for i in range(labels.shape[0])])\n for i in range(8):\n k = i * 2 if i<4 else (i-4) * 2\n j= (i // 4) * 4\n # print(k, j)\n # if(np.all(np.apply_along_axis(is_set_correct, axis = 1, arr = labels[:, :, i])) == True or np.all(np.apply_along_axis(is_set_correct, axis = 1, arr = labels[:, i, :])) == True or np.all(np.apply_along_axis(is_set_correct, axis = 1, arr = labels[:, k:k+2, j:j+4].reshape(-1, 8))) !=True ):\n # correct+=1\n # total+=1\n\n arr1 = np.apply_along_axis(is_set_correct, axis = 1, arr = labels[:, :, i])\n arr2 = np.apply_along_axis(is_set_correct, axis = 1, arr = labels[:, i, :])\n arr3 = np.apply_along_axis(is_set_correct, axis = 1, arr = labels[:, k:k+2, j:j+4].reshape(-1, 8))\n arr = arr1*arr2*arr3\n # arr = arr1*arr2\n assert(arr.shape[0] == labels.shape[0] and len(arr.shape) == 1)\n final_bool_arr *= arr\n subatomic_correct += arr1.sum() + arr2.sum() + arr3.sum()\n # subatomic_correct += arr1.sum() + arr2.sum()\n\n return final_bool_arr.sum()/final_bool_arr.shape[0], subatomic_correct/(3*8*labels.shape[0])\n\n\n# classifier network\nclass LeNet(nn.Module):\n def __init__(self):\n super(LeNet, self).__init__()\n self.conv1 = nn.Conv2d(1, 6, 5, padding = 2)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.fc1 = nn.Linear(400, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 9)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, (2, 2))\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, (2, 2))\n x = x.view(-1, np.prod(x.size()[1:]))\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n x = F.relu(x)\n x = self.fc3(x)\n return x\n\n\nmodel = LeNet().to(device)\n\n\ntest_batch_size=1000\nepochs=25\nlr=0.1\ngamma=0.987\nno_cuda=False\nseed=1\nlog_interval=100\nsave_model=False\nuse_cuda = not no_cuda and torch.cuda.is_available()\ntorch.manual_seed(seed)\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\noptimizer = optim.Adam(model.parameters(), lr=0.0002)\nscheduler = StepLR(optimizer, step_size=1, gamma=gamma)\n\n\n\nfor epoch in range(epochs):\n model.train()\n acc = 0\n for batch_idx, (Y, X) in enumerate(zip(unsup_train_loader, sup_train_loader)):\n\n (Xtrans, Xnotrans)= Y\n (Xsup, labels) = X\n\n Xtrans, Xnotrans, Xsup, labels = Xtrans.to(device), Xnotrans.to(device), Xsup.to(device), labels.to(device)\n optimizer.zero_grad()\n\n # print(Xtrans.shape, Xnotrans.shape, Xsup.shape, labels.shape)\n\n softmax = nn.Softmax(dim=1)\n temp_model = copy.deepcopy(model).eval()\n\n sup_out = model(Xsup.float())\n with torch.no_grad():\n unsup_notrans_out = softmax(temp_model(Xnotrans.float()))\n unsup_trans_out = softmax(model(Xtrans.float()))\n\n loss_sup = nn.CrossEntropyLoss()\n loss_unsup = nn.BCELoss()\n\n l2unsup = loss_unsup(unsup_trans_out, unsup_notrans_out)\n l1sup = loss_sup(sup_out, labels.long())\n total_loss = (l2unsup+ 10*l1sup)\n\n acc += (torch.argmax(sup_out, dim=1).long() == labels.long()).sum().item()/(labels.shape[0])\n\n total_loss.backward()\n optimizer.step()\n\n\n print('epoch = {}, loss1sup = {}, loss2usup = {}, acc = {}'.format(epoch, l1sup.item(), l2unsup.item(), acc/(batch_idx+1)))\n\n if(epoch% 5 == 0):\n target_labels = predict(model, device, target_loader, True)\n print(clustering_accuracy(target_labels))\n\n\ntorch.save(model, args.output_classifier)\n\n# for classification of the whole dataset after training \n\nX = np.load(args.unlabelled_datapath)\n\nX = -1*((X)/255. -1.) #for making it a sparse matrix\nprint('x ki shape', X.shape)\nX=X.reshape((-1,28*28)) #shape 640k, 784\n\n\nmodel.eval()\n# targetset = TensorDataset(X[40000:] ,data_Y[40000:])\nbatchsize = 128\ndata_loader = DataLoader(X.reshape(-1, 1, 28, 28), batch_size=batchsize, shuffle=False)\n\ndef predict(model, device, test_loader, use_cuda):\n model.eval()\n predictions = []\n with torch.no_grad():\n for data in test_loader:\n data = data.to(device)\n output = model(data.float())\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n predictions.extend(pred.tolist())\n\n # print(predictions)\n\n return np.array(predictions)\n\n\n\ndata_labels = predict(model, device, data_loader, True)\ndata_labels.shape\n\n#save labels of query and target\nnp.save(args.output_labels, data_labels)\n\n\n\n\n","sub_path":"uda.py","file_name":"uda.py","file_ext":"py","file_size_in_byte":11200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"592808690","text":"from lambda_app.reddit_data_model import User, Thread, Comment, DB\nfrom flask import Flask, request\nimport random\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'\nDB.init_app(app)\n\n\n@app.route('/')\ndef landing():\n user = User()\n user.id = random.randint(0, 999999999)\n user.name = 'paul'\n DB.session.add(user)\n DB.session.commit()\n users = User.query.all()\n return 'hello world, users registered:' + ', '.join([u.name for u in users])\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"lambda_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"487626496","text":"class Solution(object):\n # Op1: Bottom-up DP\n def maxCoins(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n nums = [1] + nums + [1]\n n = len(nums)\n dp = [[0] * n for _ in range(n)]\n\n for gap in range(2, n):\n for i in range(0, n - gap):\n j = i + gap\n for k in range(i + 1, j):\n dp[i][j] = max(dp[i][j], nums[i] * nums[k] * nums[j] +\n dp[i][k] + dp[k][j])\n return dp[0][n - 1]\n\n # Op2: Top-down DP\n def maxCoins2(self, nums):\n nums = [1] + nums + [1]\n n = len(nums)\n dp = [[0] * n for _ in range(n)]\n\n def calculate(i, j):\n if dp[i][j] or j == i + 1:\n return dp[i][j]\n coins = 0\n for k in range(i + 1, j):\n coins = max(coins, nums[i] * nums[k] * nums[j] +\n calculate(i, k) + calculate(k, j))\n dp[i][j] = coins\n return coins\n\n return calculate(0, n - 1)\n\n\nnums = [3, 1, 5, 8]\nprint(Solution().maxCoins(nums))\nprint(Solution().maxCoins2(nums))\n","sub_path":"python/312 Burst Balloons.py","file_name":"312 Burst Balloons.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"162085897","text":"from sklearn.datasets import load_iris\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import confusion_matrix\niris=load_iris()\nX=iris.data\ny=iris.target\ndef fn(s):\n global X,y\n mlp=MLPClassifier(hidden_layer_sizes=100,activation=s)\n #print(mlp.coefs_)\n #coefs throws error__python 2 problem\n mlp.fit(X,y)\n print(mlp.predict([[3,5,4,2]]))\n p=mlp.predict(X)\n print(confusion_matrix(y,p))\n\ndef main():\n i=7\n while(i!=0):\n i=input('Enter 1-tanh,2-relu,0-exit')\n if(i==1):\n ss='tanh'\n fn(ss)\n elif(i==2):\n ss='relu'\n fn(ss)\n\nmain()\n \n\n\n","sub_path":"day14/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"34976609","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n pip_services_container.info.ContainerInfoFactory\r\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n \r\n Container info factory implementation\r\n \r\n :copyright: Conceptual Vision Consulting LLC 2015-2016, see AUTHORS for more details.\r\n :license: MIT, see LICENSE for more details.\r\n\"\"\"\r\n\r\nfrom .ContainerInfo import ContainerInfo\r\n\r\nfrom pip_services_commons.refer import Descriptor\r\nfrom pip_services_commons.build import Factory\r\n\r\nContainerInfoFactoryDescriptor = Descriptor(\r\n \"pip-services-container\", \"factory\", \"container-info\", \"default\", \"1.0\"\r\n)\r\n\r\nContainerInfoDescriptor = Descriptor(\r\n \"pip-services-container\", \"container-info\", \"default\", \"default\", \"1.0\"\r\n)\r\n\r\nclass ContainerInfoFactory(Factory):\r\n\r\n def __init__(self):\r\n self.register_as_type(ContainerInfoDescriptor, ContainerInfo)\r\n","sub_path":"pip_services_container/info/ContainerInfoFactory.py","file_name":"ContainerInfoFactory.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"540444254","text":"#!/usr/bin/python\n# Solved by Bogdan Trif @ Completed on Sun, 12 Mar 2017, 11:25\n#The Euler Project https://projecteuler.net\n'''\n Idempotents - Problem 407\n\nIf we calculate a**2 mod 6 for 0 ≤ a ≤ 5 we get: 0,1,4,3,4,1.\n\nThe largest value of a such that : a**2 ≡ a (mod 6) is = 4.\nLet's call M(n) the largest value of a < n such that : a**2 ≡ a (mod n).\nSo M(6) = 4.\n\nFind ∑M(n) for 1 ≤ n ≤ 10**7.\n\n\n'''\nimport time, gmpy2, zzz\nfrom math import log, gcd\nfrom itertools import combinations\nfrom functools import reduce\nfrom operator import mul\n\ndef primesieve(n): ### o(^_^)o FASTEST o(^_^)o ### Highly Efficient !!!\n import numpy as np\n \"\"\"return array of primes 2<=p<=n\"\"\"\n sieve=np.ones(n+1,dtype=bool)\n for i in range(2, int((n+1)**0.5+1)):\n if sieve[i]:\n sieve[2*i::i]=False\n return np.nonzero(sieve)[0][2:]\n\ndef get_factors(n): ### o(^_^)o FASTEST o(^_^)o ###\n ''' Decompose a factor in its prime factors. This function uses the pyprimes module. THE FASTEST '''\n from pyprimes import factorise\n return [val for sublist in [[i[0]]*i[1] for i in factorise(n)] for val in sublist]\n\ndef egcd(a, b): #Extended Euclidian Algorithm\n if a == 0:\n return (b, 0, 1)\n else:\n g, y, x = egcd(b % a, a)\n return (g, x - (b // a) * y, y)\n\n\n\ndef modinv(a, m): # Modular Inverse\n g, x, y = egcd(a, m)\n if g != 1:\n raise Exception('modular inverse does not exist')\n else:\n return x % m\n\n\ndef sieve_factorization(n): # Sieve Factorization and Totient Sieve at once\n ''':Description: Sieve Factorization and Totient Sieve at once\n The factorization is done only with the largest prime powers\n This is needed for the Idempotents Euler Problem 407 '''\n\n from collections import defaultdict\n F = defaultdict(list)\n T = list(range(n+1))\n for p in range(2, n+1):\n if p not in F :\n T[p] = p-1\n for i in range(p+p, n+1, p ) :\n j, k = i, 1\n while j % p == 0 :\n j //= p\n k *= p\n F[i].append(k)\n T[i] = T[i] * (p-1)//p\n\n return F, T\n\n\n\n# OBSERVATION : If modulo n is a prime or a prime ^ some_power ==> a = 1 . Examples :\n# 8, 16, 32, 64, 9 , 27 , 81, 25, 125 + all the primes\n\n\n# ==== GENERAL IDEAS ===========\n# So our plan, in pseudocode, is:\n#\n# 1. Factorize all the integers up to 10**7 by sieving.\n# 2. For each integer n:\n# 3. - if n is a prime or prime power, then a=1;\n# 4. - otherwise n is a product of two or more distinct primes, so:\n# 5. for each way to factorize n=uv with u, v coprime:\n# 6. - find w=u**(−1) (mod v)\n# 7. a is the maximum of uw for all such uv.\n#\n#\n# Now at step 6 we have to find the multiplicative inverse of u modulo v.\n# There are a couple of algorithms for finding the modular multiplicative inverse:\n# normally we would use the extended Euclidean algorithm, but here the method using Euler's theorem\n# is attractive because we know that u is coprime to v.\n# So u**(−1) = u**(φ(v)−1) (mod v) where φ is Euler's totient function.\n\n# ==== Wed, 30 Jan 2013, 05:40, Bo, USA\n# http://math.stackexchange.com/questions/264290/division-into-xx-1/264307#264307\n# This post at Stack Exchange Mathematics seems to have been written for this problem, judging by the dates.\n#\n# It gives a nice explanation of the solution...\n# a^2 = a (mod n)\n# a * (a - 1) = 0 (mod n)\n# Let n = d * e with a = 0 (mod d) , (a - 1) = 0 (mod e) , and gcd(d, e) = 1\n# Let a = d * w and then substitute (d * w - 1) = 0 (mod e)\n# Re-write as d * w = 1 (mod e) , or w = modinv(d, e)\n# Then a = d * modinv(d, e)\n#\n# So, check all coprime d and e with d * e = n to see which one gives the largest a.\n#\n# I used a sieve to compute the divisors of n , storing only divisors >= sqrt(n) and skipping 1 .\n# I also realized we can use the results of the extended Euclidean algorithm for GCD three times - once to check gcd(d, e) = 1 ,\n# once for modinv(d, e) , and once for modinv(e, d). This is faster than calling GCD once and modinv twice.\n#\n# 104 seconds in Pypy (with hand-written egcd) or 195 seconds in Python 3.3 (with GMPY's gcdext).\n\n\n\n\n\n# @2017-01-22 , 22:45\n# http://www.math.stonybrook.edu/~moira/mat331-spr10/papers/1978%20McLeanGroups%20in%20Modular%20Arithmetic.pdf\n# https://en.wikipedia.org/wiki/Idempotent_(ring_theory)#Types_of_ring_idempotents\n# http://www.mersenneforum.org/showthread.php?t=21717\n# https://crypto.stanford.edu/pbc/notes/numbertheory/exp.html\n\n\n\n\n\n\n\nprint('\\n--------------------------TESTS------------------------------')\nt1 = time.time()\n\n# print('\\nmath log function :\\t', log(11**5, 11))\n# print('math log function :\\t', log(11**5, 11) %1 == 0 )\n\n\ndef brute_force_testing(lim):\n Mod = {}\n Sm = 0\n for n in range(2, lim+1 ) :\n if gmpy2.is_prime(n) : Sm += 1\n else :\n gf = get_factors(n)\n if len(set(gf)) == 1 : Sm +=1\n else :\n for a in range(n-1, 0, -1):\n if pow(a, 2, n ) == a%n :\n print(str(n)+'. M('+str(n)+')= a=', a , ' a%n=', a%n ,' mod : ' , pow(a, 2, n),' a*(a-1))%n=' , (a*(a-1))%n , ' n=',get_factors(n) , ' a=', get_factors(a) ,' gcd=',gcd(n,a) )\n Sm += a\n Mod[n] = a\n # if a not in Mod : Mod[a] = [n]\n # else : Mod[a].append(n)\n break\n print('-----------')\n\n return Mod, print('\\nAnswer : \\t', Sm ) # Answer : \tFirst 1000 : 314035\n # print('\\nMod :', len(Mod), Mod)\n\n# brute_force_testing(lim = 10**3)\nBF = brute_force_testing(10**3)\n\n# print('\\n-----Interesting test. See the regularity of 1*2, 2*3, 3*4, 4*5 ... ----------------')\n# n=155\n# for i in range(n-1,0,-1):\n# print(i, (i*(i-1))%n ,' ', n-i, n-i+1 )\n\nt2 = time.time()\nprint('\\nCompleted in :', round((t2-t1)*1000,6), 'ms\\n\\n')\n\n\n# import gmpy2, sympy\n# # M(90)= a= 81 a%n= 81 mod : 81 a*(a-1))%n= 0 n= [2, 9, 5]\n# sympy.totient(9)\n# u, v = 15, 8\n# w = modinv(u, v )\n# w , u**(sympy.totient(v)-1) % v, w*u\n\n\nprint('\\n================ My FIRST SOLUTION, 18 min ===============\\n')\nt1 = time.time()\n\ndef idempotents (lim) :\n\n F, T = sieve_factorization( lim )\n S = 0\n for n in range(2, lim+1) :\n if n not in F or len(F[n]) <2 :\n b = 1\n S+=1\n continue\n\n # C = [] !!!! REALLY BAD PRACTICE. Don't do this kind of CODE AGAIN !!!!!! Watch bellow the good one !\n # for q in range(1, len(F[n) ) :\n # C.extend( [ reduce(mul, j) for j in list(combinations(F[n], q) ) ] )\n # U = [ u for u in C ]\n # V = [ n//u for u in C ]\n # a = max([ max( u*(u**(T[v] -1)%v) , v*(v**(T[u]-1)%u) ) for u, v in zip(U, V)] )\n # S+=a\n def a(n):\n max_a = 0\n for q in range(1, len(F[n]) ) :\n for j in combinations(F[n], q ):\n u = reduce(mul, j)\n v = n// u\n a = u*pow(u, T[v]-1, v)\n if a > max_a : max_a = a\n return max_a\n\n b = a(n)\n S += a(n)\n\n # print(str(n)+'. ', b , ' ' )\n # if BF[0][n] != a :\n # print(str(n), ' difference -> REAL: ', BF[0][n], ' algo:', a(n) )\n\n if n%10**5 == 0 : print(str(n))\n\n return print('\\nAnswer : \\t ' , S ) # Answer : \t 39782849136421\n\n\nidempotents(10**3) # Completed in : 18.657367 min\n\n\n\nt2 = time.time()\nprint('\\nCompleted in :', round((t2-t1)/60,6), 'min\\n\\n')\n\n\nprint('\\n===============OTHER SOLUTIONS FROM THE EULER FORUM ==============')\nprint('\\n--------------------------SOLUTION 1, NICE, 3 min --------------------------')\nt1 = time.time()\n\n# ==== Wed, 30 Jan 2013, 05:40, Bo, USA\n# http://math.stackexchange.com/questions/264290/division-into-xx-1/264307#264307\n# This post at Stack Exchange Mathematics seems to have been written for this problem, judging by the dates.\n#\n# It gives a nice explanation of the solution...\n# a^2 = a (mod n)\n# a * (a - 1) = 0 (mod n)\n# Let n = d * e with a = 0 (mod d) , (a - 1) = 0 (mod e) , and gcd(d, e) = 1\n# Let a = d * w and then substitute (d * w - 1) = 0 (mod e)\n# Re-write as d * w = 1 (mod e) , or w = modinv(d, e)\n# Then a = d * modinv(d, e)\n#\n# So, check all coprime d and e with d * e = n to see which one gives the largest a.\n#\n# I used a sieve to compute the divisors of n , storing only divisors >= sqrt(n) and skipping 1 .\n# I also realized we can use the results of the extended Euclidean algorithm for GCD three times - once to check gcd(d, e) = 1 ,\n# once for modinv(d, e) , and once for modinv(e, d). This is faster than calling GCD once and modinv twice.\n#\n# 104 seconds in Pypy (with hand-written egcd) or 195 seconds in Python 3.3 (with GMPY's gcdext).\n\n\n\nfrom array import array\nfrom gmpy2 import gcdext\n\ndef solution_1():\n N = pow(10, 7) + 1\n A = [array('I', []) for i in range(N)]\n for n in range(2, N):\n n_squared = n * n\n for m in range(n + n, N, n):\n if n_squared <= m:\n A[m].append(n)\n\n answer = 0\n for n in range(2, N):\n M_n = 1\n for d in A[n]:\n e = n // d\n g, x, y = gcdext(d, e)\n if g == 1:\n a_1 = d * (x % e)\n a_2 = e * (y % d)\n a = a_1 if a_1 >= a_2 else a_2\n M_n = M_n if M_n >= a else a\n answer += M_n\n\n return print(answer)\n\n# solution_1()\n\nt2 = time.time()\nprint('\\nCompleted in :', round((t2-t1)*1000,6), 'ms\\n\\n')\n\nprint('\\n--------------------------SOLUTION 2, --------------------------')\nt1 = time.time()\n\ndef solution_2() :\n from itertools import combinations\n from functools import reduce\n\n N = 10**7 + 1\n answer = 0\n\n euler = [1 for i in range(N)]\n facts = [[] for i in range(N)]\n\n for i in range(2, N):\n if euler[i] == 1:\n\n for j in range(i, N, i):\n euler[j] *= i - 1\n facts[j] += [i]\n\n h = i*i\n while h < N:\n for k in range(h, N, h):\n euler[k] *= i\n facts[k][-1] *= i\n h *= i\n\n f = facts[i]\n if len(f) == 1:\n answer += 1\n continue\n\n m = 0\n for j in range(1, len(f)):\n for c in combinations(f, j):\n u = reduce(lambda x,y:x*y, c)\n v = i // u\n w = u * pow(u, euler[v] - 1, v)\n if w > m: m = w\n\n answer += m\n\n return print(answer)\n\nt2 = time.time()\nprint('\\nCompleted in :', round((t2-t1)*1000,6), 'ms\\n\\n')\n\nprint('\\n--------------------------SOLUTION 3, 7 min --------------------------')\nt1 = time.time()\n\n# ==== Sun, 23 Dec 2012, 17:28, Marcus Stuhr, USA\n# Recall that based on the functional representation of the remainder operation, a**2 ≡ amod n is equivalent to\n# a=a**2−⌊a**2/n⌋*n, which can be rearranged to a(a−1)/n=⌊a**2/n⌋ .\n# This means n is a factor of a(a−1) where 0≤alim: break\n if a 0 and len(self.config['projects']) <= 16):\n logging.error(\"bad number of projects - must be > 0 and <= 16\")\n exit(1)\n\n # if --project is given, skip others\n for project_dir in self.config['projects']:\n project = Project(args, project_dir, self.config)\n if self.args.project is not None:\n if self.args.project != project.id:\n continue\n self.projects.append(project)\n \n # create duplicate projects, only works with --project for a single project\n if args.duplicate:\n # requires project id\n if self.args.project is None:\n logging.error(\"provide the project ID to duplicate with --project\")\n\n # make the copies\n for i in range(args.duplicate):\n dup_project = copy.deepcopy(self.projects[0])\n dup_project.id += i + 1\n dup_project.config['caravel_test']['instance_name'] += str(dup_project.id)\n self.projects.append(dup_project)\n\n # assert ids are unique\n ids = [project.id for project in self.projects]\n if len(ids) != len(set(ids)):\n logging.error(\"not all project ids are unique: %s\" % ids)\n exit(1)\n \n def run_tests(self):\n for project in self.projects:\n project.run_tests()\n\n def copy_gds(self):\n for project in self.projects:\n src = os.path.join(project.directory, project.gds_filename)\n dst = os.path.join(self.config['caravel']['root'], 'openlane', 'user_project_wrapper', 'macros', 'gds', os.path.basename(project.gds_filename))\n logging.info(\"copying %s to %s\" % (src, dst))\n shutil.copyfile(src, dst)\n\n src = os.path.join(project.directory, project.lef_filename)\n dst = os.path.join(self.config['caravel']['root'], 'openlane', 'user_project_wrapper', 'macros', 'lef', os.path.basename(project.lef_filename))\n logging.info(\"copying %s to %s\" % (src, dst))\n shutil.copyfile(src, dst)\n\n def create_openlane_config(self):\n num_macros = len(self.projects)\n logging.info(\"create macro config for user_project_wrapper with %d projects\" % num_macros)\n\n width = 2920\n height = 3520\n\n macro_w = 300\n macro_h = 300\n\n h_space = (width - (4 * macro_w)) / 5\n v_space = (height - (4 * macro_h)) / 5\n\n obs_border = 30\n\n macro_inst_file = os.path.join(self.config['caravel']['root'], 'openlane', 'user_project_wrapper', 'macro.cfg')\n includes_file = os.path.join(self.config['caravel']['rtl_dir'], 'user_project_includes.v')\n\n logging.info(\"creating instantiation %s\" % macro_inst_file)\n logging.info(\"creating includes %s\" % includes_file)\n\n macro_inst_fh = open(macro_inst_file, 'w') \n includes_fh = open(includes_file, 'w')\n macro_verilog = \"\"\n\n for column in range(4):\n for row in range(4):\n macro_count = row + column*4\n\n if macro_count >= num_macros:\n continue\n \n module_name = self.projects[macro_count].config['caravel_test']['module_name']\n instance_name = self.projects[macro_count].config['caravel_test']['instance_name']\n proj_id = self.projects[macro_count].id\n\n y = v_space + (v_space + macro_h) * row\n x = h_space + (h_space + macro_w) * column\n macro_inst_fh.write(\"%s %d %d N\\n\" % (instance_name, x, y))\n\n macro_verilog += instantiate_module(module_name, instance_name, proj_id, self.config['wrapper']['instance'])\n\n\n user_project_wrapper_path = os.path.join(self.config['caravel']['rtl_dir'], \"user_project_wrapper.v\")\n add_instance_to_upw(macro_verilog, user_project_wrapper_path, self.config['wrapper']['upw_template'])\n\n paths = []\n for project in self.projects:\n\n # copy project to caravel rtl\n # couldn't get yosys to read include file to work unless the files are below Caravel root directory\n project.copy_project_to_caravel_rtl()\n\n # create include file \n includes_fh.write(\"// %s\\n\" % project)\n for path in project.get_module_source_paths(absolute=False):\n path = os.path.join(os.path.basename(project.directory), path)\n includes_fh.write('`include \"%s\"\\n' % path)\n\n # copy the local config.tcl file \n src = 'config.tcl'\n dst = os.path.join(self.config['caravel']['root'], 'openlane', 'user_project_wrapper', 'config.tcl')\n shutil.copyfile(src, dst)\n\n \"\"\"\n * generate an index.md with a section for each project\n - title, author, description, link, picture\n * could also create the info.yaml file for efabless\n * tile all images for final image\n \"\"\"\n def generate_docs(self):\n fh = open(\"index.md\", 'w')\n fh.write(\"# Multi Project Index\\n\\n\")\n fh.write(\"This index was made with [multi project tools](https://github.com/mattvenn/multi_project_tools)\\n\\n\")\n try_mkdir(self.config[\"docs\"][\"pic_dir\"], self.args.force_delete)\n for project in self.projects:\n conf = project.config[\"project\"]\n # copy pic\n pic_src = os.path.join(project.directory, conf[\"picture\"])\n pic_dst = os.path.join(self.config[\"docs\"][\"pic_dir\"], os.path.basename(conf[\"picture\"]))\n shutil.copyfile(pic_src, pic_dst)\n\n fh.write(\"## %s\\n\\n\" % conf[\"title\"])\n fh.write(\"* Author: %s\\n\" % conf[\"author\"])\n fh.write(\"* Github: [%s](%s)\\n\" % (conf[\"github\"], conf[\"github\"]))\n fh.write(\"* Description: %s\\n\\n\" % conf[\"description\"])\n fh.write(\"![%s](%s)\\n\\n\" % (conf[\"title\"], pic_dst))\n\n logging.info(\"wrote index.md\")\n","sub_path":"collect.py","file_name":"collect.py","file_ext":"py","file_size_in_byte":6378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"560539205","text":"\"\"\"\n produces a more generalised diagnostic over multiple runs using multiple\n numbers of agents for arc_ukf.py only. \n This produces a chloropleth style map showing the grand mean error\n over both time and agents for various fixed numbers of agents \n and proportions observed.\n\n\nimport data from arc with following in bash terminal\nscp medrclaa@arc3.leeds.ac.uk:/nobackup/medrclaa/dust/Projects/ABM_DA/experiments/ukf_experiments/ukf_results/agg* /home/rob/dust/Projects/ABM_DA/experiments/ukf_experiments/ukf_results/.\nchange to relevant directories\n\"\"\"\nimport pickle\nimport sys\nsys.path.append(\"../../stationsim\")\nsys.path.append(\"../..\")\n\nfrom stationsim.ukf import plots\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport matplotlib.cm as cm\nimport matplotlib.patheffects as pe\n\nimport glob\nimport seaborn as sns\nimport pandas as pd\n\n#plt.rcParams.update({'font.size':20})\n\n#%%\n#plt.style.use(\"dark_background\")\n \n\ndef grand_depickle_ukf_agg_data_parser(instance):\n \"\"\"PUll data from aggregate ukf class into nice arrays\n Parameters\n ------\n instance : class\n \n Returns\n ------\n b,c,d,nan_array : array_like\n `b` UKF predictions every sample rate time steps. All other elements nan\n `c` Full UKF predictions. Say we assimilate every 5 time points then 4 \n are just ABM forecasts and the 5th are assimilated values. Useful\n for animations\n `d` Full true agent positions for comparison\n `nan_array` which elements are nan good for accurate plots/error metrics`\n \"\"\"\n \n sample_rate = instance.sample_rate\n \n nan_array = np.ones(shape=(max([len(agent.history_locations) for \n agent in instance.base_model.agents]),\n 2*instance.pop_total))*np.nan\n for i in range(instance.pop_total):\n agent = instance.base_model.agents[i]\n array = np.array(agent.history_locations)\n array[array==None] ==np.nan\n nan_array[:len(agent.history_locations),2*i:(2*i)+2] = array\n \n nan_array = ~np.isnan(nan_array)\n \n b2 = np.vstack(instance.ukf_histories)\n d = np.vstack(instance.truths)\n\n \n b= np.zeros((d.shape[0],b2.shape[1]))*np.nan\n \n for j in range(int(b.shape[0]//sample_rate)):\n b[j*sample_rate,:] = b2[j,:]\n \n if sample_rate>1:\n c= np.vstack(instance.agg_ukf_preds)\n\n return b,c,d,nan_array\n else:\n return b,d,nan_array \n \ndef l2_parser(instance):\n \"\"\"gets real and UKF predicted data. Measures L2 distances between them\n \n Parameters\n ------\n instance : class\n \n Returns\n ------\n distance_obs : array_like\n `distance_obs` numpy array of distance between agents true positions \n and their respective UKF predictions\n\n \"\"\" \n \n if instance.filter_params[\"sample_rate\"]==1:\n preds,truth,nan_array = grand_depickle_ukf_agg_data_parser(instance)\n else:\n preds,full_preds,truth,nan_array = grand_depickle_ukf_agg_data_parser(instance)\n full_preds[~nan_array]=np.nan #make empty values to prevent mean skewing in diagnostic plots\n\n truth[~nan_array]=np.nan #make empty values to prevent mean skewing in diagnostic plots\n preds[~nan_array]=np.nan #make empty values to prevent mean skewing in diagnostic plots\n plts = plots(instance,\"ukf_results/\")\n distances_obs,oindex,agent_means,t_mean_obs = plts.L2s(truth,preds)\n\n \n return distances_obs\n\n\ndef grand_L2_matrix(n,bin_size,source): \n \"\"\"produces grand median matrix for all 30 ABM runs for choropleth plot\n \n Parameters\n ------\n n,bin_size : float\n population `n` and square size `bin_size`\n \n source : string\n `source` file for data\n \n Returns\n ------\n L2 : array_like\n `L2` matrix of grand medians. each row is a pop each column is a bin size\n \n\n \"\"\" \n \"empty frames\"\n L2 = np.ones((len(n),len(bin_size)))*np.nan\n \n \"cycle over pairs of number of agents and proportions. taking grand (mean of medians) L2 mean for each pair\"\n for i,num in enumerate(n):\n \n files={}\n for j in bin_size: \n files[j] = glob.glob(source + f\"/agg_ukf_agents_{num}_bin_{j}-*\")\n\n for k,_ in enumerate(files.keys()):\n L2_2=[]\n for file in files[_]:\n f = open(file,\"rb\")\n uagg = pickle.load(f)\n f.close()\n distances = l2_parser(uagg)#\n \"grand agent means\"\n \"grand agent medians\"\n L2_2.append(np.nanmedian(distances,axis=0))\n\n L2[i,k]=np.nanmean(np.hstack(L2_2))\n \n return L2\n \ndef grand_L2_plot(data,n,bin_size,save):\n \"\"\"produces grand median matrix for all 30 ABM runs for choropleth plot\n \n Parameters\n ------\n data : array_like\n L2 `data` matrix from grand_L2_matrix\n n,bin_size : float\n population `n` and square size `bin_size`\n save : bool\n `save` plot?\n\n \"\"\" \n \"rotate frame 90 degrees so population on x axis\"\n data = np.rot90(data,k=1) \n\n \"initiate plot\"\n f,ax=plt.subplots(figsize=(8,8))\n \"colourmap\"\n cmap = cm.viridis\n \"set nans for 0 agents unobserved to white (not black because black text)\"\n cmap.set_bad(\"white\") \n \n \" mask needed to get bad white squares in imshow\"\n data2 = np.ma.masked_where(np.isnan(data),data)\n \"rotate again so imshow right way up (origin bottom left i.e. lower)\"\n data2=np.flip(data2,axis=0) \n im=ax.imshow(data2,interpolation=\"nearest\",cmap=cmap,origin=\"lower\")\n \n \"labelling\"\n ax.set_xticks(np.arange(len(n)))\n ax.set_yticks(np.arange(len(bin_size)))\n ax.set_xticklabels(n)\n ax.set_yticklabels(bin_size)\n ax.set_xticks(np.arange(-.5,len(n),1),minor=True)\n ax.set_yticks(np.arange(-.5,len(bin_size),1),minor=True)\n ax.grid(which=\"minor\",color=\"k\",linestyle=\"-\",linewidth=2)\n ax.set_xlabel(\"Number of Agents\")\n ax.set_ylabel(\"Aggregate Grid Squre Size\")\n #plt.title(\"Grand L2s Over Varying Agents and Percentage Observed\")\n\n\n \"text on top of squares for clarity\"\n data = np.flip(data,axis=0)\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n plt.text(j,i,str(data[i,j].round(2)),ha=\"center\",va=\"center\",color=\"w\",\n path_effects=[pe.Stroke(linewidth = 0.7,foreground='k')])\n \n \"colourbar alignment and labelling\"\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\",size=\"5%\",pad=0.05)\n cbar=plt.colorbar(im,cax,cax)\n cbar.set_label(\"Grand Mean L2 Error\")\n \n \"further labelling and saving\"\n cbar.set_label(\"Aggregate Median L2s\")\n ax.set_ylabel(\"Aggregate Grid Squre Width\")\n if save:\n plt.savefig(\"Aggregate_Grand_L2s.pdf\")\n\n \ndef boxplot_parser(n,bin_size):\n \"\"\"similar to grand_L2_matrix but creats a pandas frame for sns.catplot to read\n \n .. deprecated:: \n use median boxplots this is dumb\n \n Parameters\n ------\n n,bin_size : float\n population `n` and square size `bin_size`\n \n source : str\n `source` file for data\n Returns\n ------\n L2 : array_like\n `L2` matrix of grand medians. Produces data frame with columns for pop,\n bin_size, and median. This version gives a median for every AGENT rather\n than a grand median for each run (I.E 30x12xpopulations rows vs 30x12 rows)\n \"\"\" \n L2 = {}\n for i in n:\n files={}\n for j in bin_size:\n files={}\n for j in bin_size: \n files[j] = glob.glob(source +f\"/agg_ukf_agents_{i}_bin_{j}-*\")\n \"sub dictionary for each bin size\" \n L2[i] = {} \n for _ in files.keys():\n L2_2=[]\n for file in files[_]:\n f = open(file,\"rb\")\n u = pickle.load(f)\n f.close()\n distances = l2_parser(u)#\n \n L2_2.append(np.nanmedian(np.nanmean(distances,axis=0)))\n\n L2[i][_] = np.hstack(L2_2)\n \n \"stack dictionaries into dataframe with corresponding n and bin_size next to each agent error\"\n sub_frames = []\n\n for i in n:\n for j in bin_size:\n L2s = L2[i][j]\n sub_frames.append(pd.DataFrame([[i]*len(L2s),[j]*len(L2s),L2s]).T)\n\n \"stack into grand frames and label columns\"\n frame = pd.concat(sub_frames)\n frame.columns = [\"n\",\"square width\",\"L2 agent errors\"]\n\n return frame\n\ndef boxplot_plots(n,bin_size,frame,separate,save): \n \"\"\"produces grand median boxplot for all 30 ABM runs for choropleth plot\n \n ..deprecated:: \n use medians below\n Parameters\n ------\n frame : array_like\n L2 `data` matrix from grand_L2_matrix\n n,bin_size : float\n population `n` and square size `bin_size`\n save,seperate : bool\n `save` plot?\n `seperate` box plots by population or have one big catplot?\n\n \"\"\" \n \"seperate the plots by pop\"\n if separate:\n for i in n:\n f_name = f\"Aggregate_boxplot_{i}.pdf\"\n y_name = \"L2 agent errors\"\n n_subframe = frame.loc[frame[\"n\"]==str(i)]\n\n f = plt.figure()\n sns.boxplot(x=\"square width\",y=y_name,data=n_subframe)\n if save:\n f.savefig(f_name)\n else:\n \"or one big catplot\"\n f_name = f\"Aggregate_boxplot.pdf\"\n y_name = \"L2 agent errors\"\n\n f = plt.figure()\n sns.catplot(x=\"square width\",y=y_name,col=\"n\",kind=\"box\", data=frame)\n plt.tight_layout()\n if save:\n plt.savefig(f_name)\n \ndef boxplot_medians(n,bin_size,source):\n \"\"\"similar to grand_L2_matrix but creats a pandas frame for sns.catplot to read\n \n \n Parameters\n ------\n n,bin_size : float\n population `n` and square size `bin_size`\n \n Returns\n ------\n L2 : array_like\n `L2` matrix of grand medians. Produces data frame with columns for pop,\n bin_size, and median. This version gives a grand (mean of) median \n each run (I.E 30x12 rows total.). This is independent of population size\n and statistically makes a lot more sense vs the other boxplot.\n \n\n \"\"\" \n L2 = {}\n \n \"cycle over pairs of number of agents and proportions. taking grand (mean of medians) L2 mean for each pair\"\n for i,num in enumerate(n):\n \n files={}\n for j in bin_size: \n files[j] = glob.glob(source+f\"/agg_ukf_agents_{num}_bin_{j}-*\")\n\n for k,_ in enumerate(files.keys()):\n L2_2=[]\n for file in files[_]:\n f = open(file,\"rb\")\n uagg = pickle.load(f)\n f.close()\n distances = l2_parser(uagg)#\n #\"grand agent means\"\n #L2_2.append(np.nanmean(distances,axis=0))\n \"grand agent medians\"\n L2_2.append(np.nanmedian(np.nanmean(distances,axis=0)))\n \n L2[num,_]=L2_2\n\n L2_frame = pd.DataFrame(columns =[\"n\",\"square width\",\"grand_median\"]) \n \n for i in n:\n for j in bin_size:\n data = L2[i,j]\n ns = [i]*len(data)\n bins = [j]*len(data)\n L2_2_frame = pd.DataFrame(np.array([ns,bins,data]).T,columns =[\"n\",\"square width\",\"grand_median\"])\n L2_frame = pd.concat([L2_frame,L2_2_frame],axis=0)\n \n return L2_frame\n\ndef median_boxplot(L2_frame):\n \"\"\"produces grand median boxplot for all 30 ABM runs for choropleth plot\n \n \n Parameters\n ------\n frame : array_like\n L2 `data` matrix from grand_L2_matrix\n n,bin_size : float\n population `n` and square size `bin_size`\n save,seperate : bool\n `save` plot?\n `seperate` box plots by population or have one big catplot?\n\n \"\"\" \n f_name = f\"Aggregate_grand_median_boxplot.pdf\"\n y_name = \"grand_median\"\n f = plt.figure()\n sns.catplot(x=\"square width\",y=y_name,col=\"n\",kind=\"box\", data=L2_frame)\n plt.tight_layout()\n if save:\n plt.savefig(f_name)\n \n\n \n #%%\nif __name__ == \"__main__\":\n \n \"\"\"\n plot1 : choropleth\n plot2 : all agents boxplot (deprecated)\n plot3 : grand median boxplot\n n : list of populations\n bin_size : list of grid square sizes\n save : save plots?\n \"\"\"\n plot1 = False\n plot2 = False\n plot3 = True\n n=[10,20,30]\n bin_size = [5,10,25,50]\n source = \"media/rob/ROB1/ukf_results\"\n \n save=True\n if plot1:\n L2 = grand_L2_matrix(n,bin_size, source)\n grand_L2_plot(L2,n,bin_size,save)\n if plot2:\n frame = boxplot_parser(n, bin_size, source)\n boxplot_plots(n,bin_size,frame,False,save) \n\n if plot3:\n L2_frame = boxplot_medians(n, bin_size, source)\n median_boxplot(L2_frame)\n","sub_path":"Projects/ABM_DA/experiments/ukf_experiments/ukf_old/arc_ukf_grand_depickle.py","file_name":"arc_ukf_grand_depickle.py","file_ext":"py","file_size_in_byte":13038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"421424693","text":"# 本模块用于游戏联网所需的帧同步框架\n\nimport socket\nimport queue\nimport threading\n\n\nclass GameDataSync:\n def __init__(self):\n # 服务器地址\n self.HOST = ('lutherlau.com', 7890)\n self.BUF_SIZE = 1024\n self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # 队列存放字符串,用于网网络通信\n self.get_queue = queue.Queue()\n self.put_queue = queue.Queue()\n # 收发线程\n self.get_message = None\n self.put_message = None\n\n # 启动网络,并开启消息线程\n if self.connect():\n self.start_sync()\n\n # 消息循环\n def start_sync(self):\n if self.get_message is None:\n self.get_message = threading.Thread(target=self.recv_data, args=())\n self.get_message.setDaemon(True)\n self.get_message.start()\n if self.put_message is None:\n self.put_message = threading.Thread(target=self.send_data, args=())\n self.put_message.setDaemon(True)\n self.put_message.start()\n\n # 连接服务器\n def connect(self):\n try:\n self.client.connect(self.HOST)\n return True\n except Exception as e:\n if '10056' in str(e):\n return True\n # print('错误:' + str(e))\n return False\n\n # 发送数据\n def send_data(self):\n while True:\n if not self.put_queue.empty():\n data = self.put_queue.get()\n try:\n self.client.send(data.encode(encoding='utf-8'))\n # print('发送' + data)\n except Exception as e:\n # print('错误:' + str(e))\n continue\n\n # 退出程序,释放连接,暂且保留\n if data[0] == 'q':\n self.put_message = None\n break\n\n # 接受数据\n def recv_data(self):\n while True:\n try:\n data = self.client.recv(self.BUF_SIZE)\n # 转换并放置\n data = data.decode(encoding='utf-8')\n if data:\n # 把数据切换成指令消息\n data = data.split('@')\n for str_ in data:\n self.get_queue.put(str_)\n # ('收取' + str_)\n\n # 退出程序,释放连接 保留\n if data[0] == 'q':\n self.get_message = None\n break\n except Exception as e:\n # print('错误:' + str(e))\n # 请重启游戏please restart game\n self.get_queue.put('p r')\n continue\n\n # 关闭连接, 保留\n def close(self):\n try:\n # 关闭网络线程\n self.client.close()\n except Exception as e:\n # print('错误:' + str(e))\n return False\n return True\n\n # 发送消息\n def send(self, data):\n self.client.send(data.encode(encoding='utf-8'))\n\n # 接受消息\n def recv(self):\n return self.client.recv(self.BUF_SIZE).decode(encoding='utf-8')\n\n # 取得数据\n def get_data(self):\n return self.put_queue.get()\n\n # 放置数据\n def put_data(self, data):\n self.put_queue.put(data)\n","sub_path":"Pipo_client/game_client/network_module.py","file_name":"network_module.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"631784272","text":"import numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport csv\n\nlocation='F:/BetaPic/acs/raw/'\nprefix='hst_9861_'\nnumlist=['01','04','05','06','07','08','09','10']\nsuffix='_acs_wfc_f'\nwvlist=['435','606','814']\nsuffix2='w_drz.fits'\n\npairs=[(0,1),(0,2),(1,2),(0,),(2,),(1,),(0,2),(1,2)]\n\nmethod = ['lin','parab','cubic','ave']\n\nside=['LEFT','RIGHT','TOP','BOT']\n\nlx=[]\npx=[]\ncx=[]\nly=[]\npy=[]\ncy=[]\navex=[]\navey=[]\nns=[]\nws=[]\nms=[]\n\n\nif __name__=='__main__':\n for i,n in enumerate(numlist): #Image number\n for j in pairs[i]: #Wavelength\n\n with np.load(location+n+'/intdata.npz') as d:\n ints=d['ints']\n lx.append(ints[0,0])\n ly.append(ints[0,1])\n px.append(ints[1,0])\n py.append(ints[1,1])\n cx.append(ints[2,0])\n cy.append(ints[2,1])\n avex.append((ints[0,0]+ints[1,0]+ints[2,0])/3.)\n avey.append((ints[0,1]+ints[1,1]+ints[2,1])/3.)\n ns.append(n)\n ws.append(wvlist[j])\n\n with open('acs_ints.csv','wb') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['Image Num.','Wavelength','Method','x','y','x-avex','y-avey'])\n for i in range(len(lx)):\n writer.writerow([ns[i],ws[i],method[0],lx[i],ly[i],lx[i]-avex[i],ly[i]-avey[i]])\n writer.writerow([ns[i],ws[i],method[1],px[i],py[i],px[i]-avex[i],py[i]-avey[i]])\n writer.writerow([ns[i],ws[i],method[2],cx[i],cy[i],cx[i]-avex[i],cy[i]-avey[i]])\n writer.writerow([ns[i],ws[i],method[3],avex[i],avey[i],0,0])","sub_path":"image_fitting/past_original_data/intcsv_acs.py","file_name":"intcsv_acs.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"332154516","text":"import requests\r\nimport re\r\nfrom datetime import datetime\r\nfrom datetime import timedelta\r\nfrom dateutil import tz\r\nimport sqlite3\r\nimport config\r\n\r\ndef UTCtoCET(timestampUTC):\r\n\t#timestampUTC = datetime.strptime(timestampUTC, '%Y-%m-%d %H:%M:%S')\r\n\t# Tell the datetime object that it's in UTC time zone since \r\n\t# datetime objects are 'naive' by default\r\n\ttimestampUTC = timestampUTC.replace(tzinfo=from_zone)\r\n\t# Convert time zone\r\n\ttimestampCET = timestampUTC.astimezone(to_zone)\r\n\ttimestampCET = datetime.strftime(timestampCET, '%Y-%m-%d %H:%M')\r\n\treturn(timestampCET)\r\n\r\ndef savetoDB(db_data):\r\n\tdb = sqlite3.connect(config.dbfilepath)\r\n\tcur = db.cursor()\r\n\r\n\tfor row in db_data:\r\n\t\tsql_insert = (\"\"\"INSERT INTO forecastLog (datetime, cloud_cover_total, temp_gnd, u_wind_gnd, v_wind_gnd, ghi_sfc) VALUES (?,?,?,?,?,?)\"\"\",\r\n\t\t\t\t\t\t(row[0],row[1], row[2], row[3], row[4], row[5]))\r\n\t\tcur.execute(*sql_insert)\r\n\t\tdb.commit()\r\n\r\n\tdb.close()\r\n\tprint(\"DATABASE - OK!\")\r\n\r\n# --------------------------------------------------------------------\r\n# \"tcdcclm\" ... entire atmosphere total cloud cover [%]\r\n# \"tmp2m\" ... 2 m above ground temperature [k] \r\n# \"ugrd10m\" ... 10 m above ground u-component of wind [m/s] \r\n# \"vgrd10m\" ... 10 m above ground v-component of wind [m/s]\r\n# \"dswrfsfc\" ... surface downward short-wave radiation flux [w/m^2] \r\n# --------------------------------------------------------------------\r\nvariables = [\"tcdcclm\",\"tmp2m\", \"ugrd10m\", \"vgrd10m\" ,\"dswrfsfc\"]\r\n\r\n# --------------------------------------------------------------------\r\n# \"12z\" ... 1200 UTC run day n --> day n+1 00:00 - 21:00 UTC \r\n# \"18z\" ... 1800 UTC run day n --> day n+1 00:00 - 21:00 UTC\r\n# --------------------------------------------------------------------\r\nmodelrun = {\"12z\" : \"[4:11]\", \"18z\" : \"[2:9]\"}\r\n\r\n# Zwettl\r\nlat = config.lat\r\nlon = config.lon\r\n\r\nfrom_zone = tz.gettz('UTC')\r\nto_zone = tz.gettz('Europe/Vienna')\r\n\r\ntoday = datetime.today()\r\nyesterday = today - timedelta(days=1)\r\n\r\ntimestamp_base = datetime(today.year, today.month, today.day, 0, 0, 0, 0)\r\ntimestamp_list = [timestamp_base + timedelta(hours=x*3) for x in range(0, 8)]\r\ntimestamp_list = [UTCtoCET(timestamp) for timestamp in timestamp_list]\r\n\r\nurl_date = str(yesterday.year) + str(yesterday.month) + str(yesterday.day)\r\nurl_lat = str(int(360 + lat / 0.25))\r\nurl_lon = str(int(lon / 0.25))\r\n\r\n\r\nprint(\"Forecast start date: \" + url_date)\r\n\r\n\r\nout=list()\r\nout.append(timestamp_list)\r\n\r\nfor v in variables:\r\n\r\n\tprint(\"Download: \" + v)\r\n\turl = (\"http://nomads.ncep.noaa.gov:9090/dods/gfs_0p25/gfs\" + \r\n\t\t\turl_date + \"/gfs_0p25_18z.ascii?\" + v + modelrun[\"18z\"] +\r\n\t\t\t\"[\" + url_lat + \":\" + url_lat + \"]\" +\r\n\t\t\t\"[\" + url_lon + \":\" + url_lon + \"]\")\r\n\tprint(url)\r\n\tr = requests.get(url)\r\n\tdata = r.text\r\n\tif \"error\" not in data:\r\n\t\tprint(\"OK\")\r\n\telse:\r\n\t\tprint(\"ERROR - Dateset not available...using previous modelrun '12z' instead...\")\r\n\t\turl = (\"http://nomads.ncep.noaa.gov:9090/dods/gfs_0p25/gfs\" + \r\n\t\turl_date + \"/gfs_0p25_12z.ascii?\" + v + modelrun[\"12z\"] +\r\n\t\t\"[\" + url_lat + \":\" + url_lat + \"]\" +\r\n\t\t\"[\" + url_lon + \":\" + url_lon + \"]\")\r\n\t\tprint(url)\r\n\t\tr = requests.get(url)\r\n\t\tdata = r.text\r\n\r\n\tend = data.index(\"time\")\r\n\tdata = data[0:end]\r\n\tdata = str(data)\r\n\tdata = data.replace(\"\\n\",\"\")\r\n\tdata = data.replace(\" \",\"\")\r\n\tdata = re.sub(\"[\\(\\[].*?[\\)\\]]\",\"\",data)\r\n\tdatalist = data.split(\",\")\r\n\tdatalist = datalist[2:10]\r\n\tdatalist = [float(x) for x in datalist]\r\n\tprint(datalist)\r\n\tout.append(datalist)\r\n\r\ndataDB = zip(*out)\r\n#savetoDB(dataDB)\r\n\r\n\r\n\r\n","sub_path":"dods_GFS.py","file_name":"dods_GFS.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"379867423","text":"\"\"\"\nUtility functions for getting clan 99s.\n\"\"\"\n\nimport time\nimport marshal\n\n# Return the number of days since epoch.\ndef get_days_since_epoch():\n\treturn int(time.time() / (24 * 60 * 60))\n\n# Load a marshalled data struture, if it exists and return DEFAULT otherwise.\ndef load_data(filename, default):\n\ttry:\n\t\tfile = open(filename, 'rb')\n\t\tresult = marshal.load(file)\n\t\tfile.close()\n\t\treturn result\n\texcept FileNotFoundError:\n\t\treturn default\n\n# Marshalls and writes data structures to disk.\ndef write_data(filename, struct):\n\tfile = open(filename, 'wb')\n\tmarshal.dump(struct, file)\n\tfile.close()","sub_path":"clan_99s/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"519076639","text":"\"\"\"\nName: Aditya Krishna Yerraguntla Kumar\nFile: tts.py\nDescription:\n The point of this is to make geting stuff done while I am \n commuting to school. Like I would be able to listen to a \n textbook or something useful while I am driving\n\n This script uses googles tts because it contains all of the\n the stuff that I would like \n\n\"\"\"\n\n#libs that will be used\nfrom gtts import gTTS\nimport audiotsm\nimport sys\n\n\nSPEED_1 = 1.00\nSPEED_2 = 1.25\nSPEED_3 = 1.50 \nSPEED_4 = 1.75 \nSPEED_5 = 2.00\n\nif (len(sys.argv) != 3):\n print(\"useage: python3 tts.py textFile.txt speed\")\nelse:\n fileName = sys.argv[1]\n fileName = fileName.split(\".\")\n mp3FileName = fileName[0] + \".mp3\"\n speed = int(sys.argv[2])\n \n if (speed == 1):\n speed = SPEED_1\n elif (speed == 2):\n speed = SPEED_2\n elif (speed == 3):\n speed = SPEED_3\n elif (speed == 4):\n speed = SPEED_4\n elif (speed == 5):\n speed = SPEED_5\n\n fName = fileName[0] +\".\"+ fileName[1]\n\n with open(fName, \"r\") as txt:\n #print(txt.read())\n tts = gTTS(text=txt.read(), lang=\"en\")\n tts.save(mp3FileName)","sub_path":"tts.py","file_name":"tts.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"26694855","text":"import re\n\ndef input():\n return open('day_06/input.txt').read().split('\\n\\n')\n\nletters = re.compile('''[a-z]''')\ndef str_to_letters_set(s):\n return set(filter(lambda x: letters.match(x), s))\n\ndef part_1(groups):\n groups_answers = map(lambda g: str_to_letters_set(g), groups)\n groups_counts = map(len, groups_answers)\n total_count = sum(groups_counts)\n return total_count\n\ndef intersect_all(s):\n result = s[0]\n for o in s[1:]:\n # Unfortunately, internal group split produces an\n # empty set for the last entry in the input, so\n # we must protect against empty sets...\n if o:\n result.intersection_update(o)\n return result\n\ndef part_2(groups):\n groups_persons = map(lambda g: g.split('\\n'), groups)\n groups_persons_answers = map(lambda g: list(map(lambda p: str_to_letters_set(p), g)), groups_persons)\n groups_common_answers = map(lambda g: intersect_all(g), groups_persons_answers)\n groups_common_counts = map(len, groups_common_answers)\n total_common_count = sum(groups_common_counts)\n return total_common_count\n\nif __name__ == '__main__':\n print(part_1(input()))\n print(part_2(input()))\n","sub_path":"day_06/day_06.py","file_name":"day_06.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"402692291","text":"# SPDX-FileCopyrightText: 2020 Svetlana Pantelejeva\n#\n# SPDX-License-Identifier: MIT\n\nimport dateparser\n\nfrom typing import Optional\n\nfrom components import NameComponents\nfrom utils import find_box_starting_with\n\n\ndef try_google(text_boxes, parent_logger) -> Optional[NameComponents]:\n logger = parent_logger.getChild(\"google\")\n\n is_google = any(\"Google Commerce Limited\\n\" in box for box in text_boxes)\n if is_google:\n logger.debug(\"This is a Google invoice.\")\n\n address_idx = text_boxes.index(\"Bill to\\n\")\n account_holder_name = text_boxes[address_idx + 1].strip()\n\n invoice_date_title_box = find_box_starting_with(text_boxes, \"Invoice date\\n\")\n assert invoice_date_title_box\n invoice_date_idx = text_boxes.index(invoice_date_title_box) + 1\n\n invoice_date_str = text_boxes[invoice_date_idx]\n invoice_date = dateparser.parse(invoice_date_str, languages=[\"en\"])\n assert invoice_date is not None\n\n return NameComponents(invoice_date, \"Google\", account_holder_name, \"Invoice\")\n","sub_path":"pdfrename/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"356688318","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 15 13:13:30 2021\n\n@author: guusv\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import colors\nfrom matplotlib.ticker import PercentFormatter\nfrom numpy import genfromtxt\nfrom sklearn.ensemble import IsolationForest\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import classification_report\n#%% load the data\ndata = genfromtxt('winequality-white.csv', delimiter=';')\ny=data[1:len(data),11]\nX=data[1:len(data),0:11]\n#%% make a histogram\nscores=[]\ncounts=[]\nfor i in range(1,11):\n scores.append(i)\n c=0\n for j in y:\n if i==j:\n c+=1\n counts.append(c)\nplt.bar(scores,counts)\nplt.xlabel('Quality scores(1-10)')\nplt.ylabel('Counts')\nplt.title('Distribution of quality scores')\nplt.show()\n#%% bin the scores into poor, mediocre and excellent \nlabels = np.empty(len(y), dtype=str)\nfor i in range(len(y)):\n if y[i]<5:\n labels[i]='poor'\n elif y[i]>7:\n labels[i]='excellent'\n else:\n labels[i]='mediocre'\nunique, counts = np.unique(labels, return_counts=True)\nprint(dict(zip(unique, counts)))\ny=labels\n#%% create train and test set\nnp.random.seed(1)\nindices = np.random.permutation(X.shape[0])\n\nidx_train, idx_test = indices[:int(len(X)*0.80)], indices[int(len(X)*0.80):]\nX_train, X_test = X[idx_train,:], X[idx_test,:]\ny_train, y_test = y[idx_train], y[idx_test]\n#%% make a new train set with proportion p mediocre samples\np=4\n[idx_e] = np.where(y_train=='e')\n[idx_m] = np.where(y_train=='m')\n[idx_p] = np.where(y_train=='p')\nm = int((len(idx_e)+len(idx_p))/2)\nidx_m=idx_m[0:int(p*m)]\nidx_train=np.concatenate((idx_e,idx_m,idx_p))\nX_train, y_train = X_train[idx_train,:], y_train[idx_train]\n\n\n#%%\nn=500\nd=10\n\n#%% train an isolation forest and random forest on top\niso_forest = IsolationForest(random_state=0, n_estimators=n).fit(X_train)\noutliers = iso_forest.predict(X_train)\n[idx_out]=(outliers<0).nonzero()\nX_out, y_out= X_train[idx_out], y_train[idx_out]\np,e = y_out =='p', y_out=='e'\nprint('nr detected outliers', len(y_out))\nprint('percentage outstanding wines', sum(p+e)/len(y_out))\nprint('undetected outstanding wines', 363-sum(p+e))\nout_forest = RandomForestClassifier(max_depth=d, random_state=0, n_estimators=n)\nout_forest.fit(X_out,y_out)\npred_out_train = out_forest.predict(X_out)\nprint('training score', sum(pred_out_train==y_out)/len(y_out))\n\n#test the model\noutliers = iso_forest.predict(X_test)\n[idx_out]=(outliers<0).nonzero()\nX_out, y_out= X_test[idx_out], y_test[idx_out]\npred_out_test = out_forest.predict(X_out)\nprint('test score', sum(pred_out_test==y_out)/len(y_out))\nprint(classification_report(y_out, pred_out_test))\n\n#%% train regular random forest\nn=500\nd=10\nprint(n,d)\nreg_forest = RandomForestClassifier(max_depth=d, random_state=0, n_estimators=n)\nreg_forest.fit(X_train,y_train)\npred_reg_train = reg_forest.predict(X_train)\nprint('training score', sum(pred_reg_train==y_train)/len(y_train))\n\n#test the model\npred_reg_test = reg_forest.predict(X_test)\nprint('test score', sum(pred_reg_test==y_test)/len(y_test))\nprint(classification_report(y_test, pred_reg_test))\n\n#%% results for the three experiments\n#nr of trees\nn=[10,50,100,500,1000,5000]\nx=range(len(n))\ne_p=np.array([16,16,16,17,17,16])/100\ne_r=np.array([83,88,85,88,85,85])/100\nm_p=np.array([98,98,98,98,98,98])/100\nm_r=np.array([51,53,55,54,55,53])/100\np_p=np.array([11,12,13,12,12,12])/100\np_r=np.array([89,89,89,89,89,89])/100\n\nplt.plot(x,e_p, label='precision excellent wines', color='red')\nplt.plot(x,m_p, label='precision mediocre wines', color ='purple')\nplt.plot(x,p_p, label='precision poor wines', color = 'blue' )\nplt.plot(x,e_r, label='recall excellent wines', color='red', linestyle='dashed')\nplt.plot(x,m_r, label='recall mediocre wines', color ='purple', linestyle='dashed')\nplt.plot(x,p_r, label='recall poor wines', color = 'blue',linestyle='dashed')\nplt.xticks(range(len(n)),n)\nplt.xlabel('number of trees')\nplt.ylabel('classification score')\nplt.title('Classification results for different numbers of trees')\nplt.show()\n\n#%% depth of trees\nd=[2,5,10,20,50,100]\nx=range(len(d))\ne_p=np.array([11,13,17,17,17,17])/100\ne_r=np.array([63,73,88,88,88,88])/100\nm_p=np.array([95,97,98,98,98,98])/100\nm_r=np.array([47,52,54,54,54,54])/100\np_p=np.array([10,12,12,12,12,12])/100\np_r=np.array([81,86,89,86,86,86])/100\n\nplt.plot(x,e_p, label='precision excellent wines', color='red')\nplt.plot(x,m_p, label='precision mediocre wines', color ='purple')\nplt.plot(x,p_p, label='precision poor wines', color = 'blue' )\nplt.plot(x,e_r, label='recall excellent wines', color='red', linestyle='dashed')\nplt.plot(x,m_r, label='recall mediocre wines', color ='purple', linestyle='dashed')\nplt.plot(x,p_r, label='recall poor wines', color = 'blue',linestyle='dashed')\nplt.xticks(range(len(d)),d)\nplt.xlabel('depth of trees')\nplt.ylabel('classification score')\nplt.title('Classification results for different depths of trees')\nplt.show()\n\n#%% class imbalance\n\ni=['4:1', '2:1', '1:1', '1:2', '1:4', '1:26(original)']\nx=range(len(i))\ne_p=np.array([10,11,17,23,40,100])/100\ne_r=np.array([100,100,88,61,41,5])/100\nm_p=np.array([100,99,98,97,95,93])/100\nm_r=np.array([4,18,54,81,94,100])/100\np_p=np.array([7,8,12,25,39,100])/100\np_r=np.array([95,89,89,76,49,11])/100\n\nplt.plot(x,e_p, label='precision excellent wines', color='red')\nplt.plot(x,m_p, label='precision mediocre wines', color ='purple')\nplt.plot(x,p_p, label='precision poor wines', color = 'blue' )\nplt.plot(x,e_r, label='recall excellent wines', color='red', linestyle='dashed')\nplt.plot(x,m_r, label='recall mediocre wines', color ='purple', linestyle='dashed')\nplt.plot(x,p_r, label='recall poor wines', color = 'blue',linestyle='dashed')\nplt.xticks(range(len(i)),i)\nplt.xlabel('proportion samples excellent/poor : mediocre')\nplt.ylabel('classification score')\nplt.title('Effect of class imbalance w.r.t. mediocre wines')\nplt.show()","sub_path":"Assignment-EnsembleLearning/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":5922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"507088457","text":"import os, shutil\n\nimport cv2 # opencv\nimport numpy as np\nimport torch\n\nfrom keras_facenet import FaceNet\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import LabelEncoder\nfrom pytorch_metric_learning.losses import AngularLoss, BaseMetricLossFunction\nfrom matplotlib import pyplot as plt\nfrom mtcnn.mtcnn import MTCNN\nfrom scipy.spatial import distance\nfrom sklearn.model_selection import KFold\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import LabelBinarizer\n\nencoder = LabelBinarizer()\n\nscaller = StandardScaler()\nmodel = FaceNet().model\ndetector = MTCNN() # Медленно работает TODO: выпилить\nface_cascade=cv2.CascadeClassifier('../cascade/haarcascade_frontalface_default.xml')\ndest_path = '../dataset/img_align_sorted_light'\n\ndef l2_normalize(x, axis=-1, epsilon=1e-10):\n output = x / np.sqrt(np.maximum(np.sum(np.square(x), axis=axis, keepdims=True), epsilon))\n return output\nclass CustomScaller:\n def fit_transform(self, x:np.ndarray):\n # Не определился ещё\n # mean = np.mean(x, keepdims=True)\n # std = np.std(x, keepdims=True)\n # return (x - mean) / std\n max = np.max(x, keepdims=True)\n min = np.min(x, keepdims=True)\n return (x - min) / (max - min)\n\ndef norm(x, scaller=None):\n if scaller is None:\n scaller = CustomScaller()\n return scaller.fit_transform(x)\n\ndef get_embeding(x:np.ndarray):\n return model.predict(\n norm(cv2.resize(x, (160, 160))\n .reshape(-1,160,160,3))\n )\n\ndef predict_embedded_eucledian_distance(test_x, train_x, train_y):\n res = []\n for emb_test in test_x:\n min = 100\n res.append(None)\n for indx, emb_train in enumerate(train_x):\n min_new = distance.euclidean(emb_train, emb_test)\n if min_new < 0.75 and min > min_new:\n min = min_new\n res[-1] = train_y[indx]\n return res\n\nif __name__ == \"__main__\":\n kf = KFold(n_splits=2)\n test_y, test_x, train_x, train_y = [[], [], [], []]\n for clss in os.listdir(dest_path):\n print(\"class: \" + clss, end='\\r')\n dates = os.listdir(dest_path + '/' + clss)\n train_index, test_index = kf.split(dates)\n for indx in train_index[0]:\n train_y.append(clss)\n frame = cv2.imread(dest_path + '/' + clss + '/' + dates[indx])\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n train_x.append(frame)\n for indx in test_index[0]:\n test_y.append(clss)\n frame = cv2.imread(dest_path + '/' + clss + '/' + dates[indx])\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n test_x.append(frame)\n\n print()\n med_val_test = 605\n med_val_train = 588\n hight_board = 1511\n transfomed_label_test = [int(x) for x in test_y]\n transfomed_label_train = [int(x) for x in train_y]\n\n\n test_y_new = np.zeros(hight_board)\n test_y_new[med_val_test:hight_board] = 1\n train_y_new = np.zeros(hight_board)\n train_y_new[med_val_train:hight_board] = 1\n\n print(\"lets train\")\n # model_svc = SVC(kernel='linear', probability=True).fit(train_x, transfomed_label_test)\n # test_y_predicted_new = model_svc.predict(test_x)\n\n test_x_embeded, train_x_embeded = [[], []]\n for tst in test_x:\n test_x_embeded.append(get_embeding(tst)[0])\n for trn in train_x:\n train_x_embeded.append(get_embeding(trn)[0])\n test_y_predicted_new = predict_embedded_eucledian_distance(l2_normalize(test_x_embeded),\n l2_normalize(train_x_embeded[med_val_train:hight_board]),\n train_y_new[med_val_train:hight_board])\n for indx in range(len(test_y_predicted_new)):\n if test_y_predicted_new[indx] is None:\n test_y_predicted_new[indx] = 0\n\n\n print(\"Results:\")\n print(\"Accuracy:\", end=' ')\n print(np.mean([test_y_predicted_new[indx] == test_y_new[indx] for indx in range(hight_board)]))\n\n\n print(\"True Positive:\", end=' ')\n print(np.mean([test_y_predicted_new[indx] == test_y_new[indx] for indx in range(med_val_test)]))\n print(\"True Negative:\", end=' ')\n print(np.mean([test_y_predicted_new[indx] == test_y_new[indx] for indx in range(med_val_test, hight_board)]))\n print(\"False Positive:\", end=' ')\n print(np.mean([test_y_predicted_new[indx] == 1 for indx in range(med_val_test)]))\n print(\"False Negative:\", end=' ')\n print(np.mean([test_y_predicted_new[indx] == 0 for indx in range(med_val_test, hight_board)]))\n\n\n","sub_path":"src/facenet_example.py","file_name":"facenet_example.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"52619233","text":"import sys\n\nimport pygame\nfrom pygame.sprite import Group\n\nfrom Button import Button\nimport game_functions as gf\nfrom hero import Hero\nfrom settings import Settings \nfrom mouse import Mouse\nfrom map import Map\nfrom logic_map import LogicMap\nfrom clock import Clock\n\nHOME = 0\nSETTING = 1\nPLAYING = 2\nEND = 3\n\n\ndef run_game():\n\n pygame.init()\n \n ai_settings=Settings()\n ai_settings.bgm.play(-1,0)\n\n # 窗口尺寸\n screen = pygame.display.set_mode((ai_settings.screen_width , ai_settings.screen_height))\n\n # 窗口标题\n pygame.display.set_caption(\"Bubble Legend\")\n \n # 创建鼠标 \n mouse = Mouse(screen) \n \n #按钮\n gameBegin = Button('images/new_game.png','images/new_game_down.png',(ai_settings.screen_width/2, ai_settings.screen_height*9/12),150,60)\n gameReturn = Button('images/return_menu.png','images/return_menu_down.png',(ai_settings.screen_width*5/6, ai_settings.screen_height/18),150,60)\n gameSetting = Button('images/game_set.png','images/game_set_down.png',(ai_settings.screen_width/2, ai_settings.screen_height*10/12),150,60)\n \n MAP_FIRST = Button('images/preview_map1.png','images/preview_map1_down.png',(ai_settings.screen_width*4/12, ai_settings.screen_height/2),320,240)\n MAP_SECOND = Button('images/preview_map2.png','images/preview_map2_down.png',(ai_settings.screen_width*8/12, ai_settings.screen_height/2),320,240)\n \n #屏幕状态\n game_menu = HOME\n map_choose = 1\n\n # 倒计时\n CLOCK_EVENT = pygame.USEREVENT + 1\n\n while True:\n if game_menu == HOME:\n pygame.time.set_timer(CLOCK_EVENT,0) #停止倒计时\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n screen.blit(ai_settings.bg_image, (0,0))\n gameBegin.render(screen)\n gameSetting.render(screen)\n if event.type == pygame.MOUSEBUTTONUP:\n mouse.up(event.button)\n if gameBegin.is_over():\n game_menu = PLAYING\n pygame.time.set_timer(CLOCK_EVENT,1000) #开启倒计时\n # 倒计时\n myClock = Clock()\n # 初始化新游戏\n ai_settings.bgm.stop()\n # 创建一个地图,编号\n myMap = Map(screen,map_choose)\n myMap.music.play(-1,0)\n myMap.music.set_volume(0.2)\n # 创建一个人物,图片路径/初始位置/编号\n player1 = Hero(screen,'images/Hero1.png',90,180,1)\n player2 = Hero(screen,'images/Hero2.png',1110,660,2)\n # 创建一个存储泡泡的编组\n bubbles = Group()\n # 计时器\n frame_rate = pygame.time.Clock()\n # 创建逻辑地图\n myLogicMap = LogicMap(screen,myMap.height,myMap.width)\n \n elif gameSetting.is_over():\n game_menu = SETTING\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse.down(event.button) \n mouse.blitme()\n pygame.display.update()\n \n elif game_menu == SETTING:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n screen.blit(ai_settings.setting_bg, (0,0))\n MAP_FIRST.render(screen)\n MAP_SECOND.render(screen)\n if event.type == pygame.MOUSEBUTTONUP:\n mouse.up(event.button)\n if MAP_FIRST.is_over():\n game_menu = HOME\n map_choose = 1\n elif MAP_SECOND.is_over():\n game_menu = HOME\n map_choose = 2\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse.down(event.button)\n mouse.blitme()\n pygame.display.update()\n\n elif game_menu == PLAYING:\n pygame.key.set_repeat(10)\n game_menu = gf.check_events(screen,ai_settings,player1,player2,bubbles,mouse,gameReturn,myMap,CLOCK_EVENT,myClock)\n game_menu = gf.update_screen(ai_settings,screen,player1,player2,bubbles,mouse,frame_rate,myMap,gameReturn,myLogicMap,myClock,game_menu) \n\n elif game_menu == END:\n while True:\n myMap.blitme()\n for bubble in bubbles:\n bubble.draw_bubble()\n myMap.draw_block()\n player1.blitme()\n player2.blitme()\n myClock.draw(screen)\n mouse.blitme()\n\n\n image = pygame.image.load('images/game_end.png').convert_alpha()\n screen.blit(image,(0,0))\n if player1.blood == player2.blood:\n pingju_image = pygame.image.load('images/win.png').convert_alpha()\n screen.blit(pingju_image,(150,350))\n screen.blit(pingju_image,(800,350))\n else:\n win_image = pygame.image.load('images/win.png').convert_alpha()\n lose_image = pygame.image.load('images/lose.png').convert_alpha()\n if player1.blood > player2.blood:\n screen.blit(win_image,(130,350))\n screen.blit(lose_image,(800,350))\n else:\n screen.blit(lose_image,(130,350))\n screen.blit(win_image,(800,350))\n pygame.display.flip()\n restart = False\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n game_menu = HOME\n restart = True\n myMap.music.stop()\n ai_settings.bgm.play(-1,0)\n\n if restart == True:\n break\n else:\n continue\n\nif __name__ == '__main__':\n # 游戏初始化\n pygame.init() \n \n run_game()\n","sub_path":"Bubble_legend.py","file_name":"Bubble_legend.py","file_ext":"py","file_size_in_byte":6348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"326629774","text":"class Account:\n def __init__(self, owner, balance=0):\n self.owner = owner\n self.balance = balance\n\n def depos(self, deposit):\n self.balance += deposit\n print(f\"vous deposez ${deposit}\")\n print(\"Deposit Accepted\")\n return f\"nouveau solde {self.balance}\\n\"\n\n def withdraw(self, withdraw):\n if (withdraw < self.balance):\n print(f\"Vous retirez ${withdraw}\")\n self.balance -= withdraw\n print(\"Withdraw Accepted\")\n else:\n print(\"Withdraw Refused\")\n return f\"nouveau solde ${self.balance}\\n\"\n\n def __str__(self):\n return f\"Account owner: {self.owner}\\nAccount balance: ${self.balance}\\n\"\n\n\nacct1 = Account('Julien', 100)\nprint(acct1)\nprint(acct1.withdraw(50))\nprint(acct1.depos(100))\nprint(acct1)\n","sub_path":"Tests/Complete-Python-3-Bootcamp.py","file_name":"Complete-Python-3-Bootcamp.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"275287931","text":"# -*- coding: utf-8 -*-\n\n\n# Cuando se genera una nota de credito,la variable invoice_name toma \n# el valor de la nota de credito y refound_name el de refound\n\n\nfrom odoo import models, fields, api\nfrom odoo.tools.misc import find_in_path\nfrom datetime import datetime, timedelta\nimport time\nimport pytz\nfrom pytz import timezone\nfrom dateutil import tz\nimport base64\nimport lxml.html\nimport os\nimport re\nimport tempfile\nfrom odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT\nfrom string import Template\nfrom contextlib import closing\n\nclass AccountInvoiceInherit(models.Model):\n\t\"\"\"\n\tfiscal_reference: Hace referencia a el numero de la factura en la impresion fiscal\n\tfisca_file: Campo donde se guarda el archivo en formato binario\n\tfiscal_name: Nombre que tendra el archivo a descargar [FACTI|NCTI]-HS-XXX.txt\n\tfiscal_id: Codigo unico de identificacion de la maquina fiscal\n\tfiscal_datetime: fecha y hora en que fue impresa la factura y nota credito\n\t\"\"\"\n\t_inherit = \"account.invoice\"\n\tfiscal_reference = fields.Char(string=\"No. Fiscal\")\n\tfiscal_file = fields.Binary('Fiscal Text Report File')\n\tfiscal_name = fields.Char()\n\tfiscal_id = fields.Char(string=\"Impresora Fiscal\")\n\tfiscal_datetime = fields.Char(string=\"Fecha Impresa\")\n\n\n\t@api.multi\n\tdef download_fiscal_file(self):\n\t\ttype_invoice = \"Factura\"\n\t\tcontent_file_fd, content_file_path = tempfile.mkstemp(suffix='.txt', \n\t\t\t\t\t\t\t\t\t\t\t\tprefix='report.invoice.tmp.')\n\t\tDOC_NO_FISCAL = \"DocNoFiscal\"\n\t\tinvoice_type = \"FACTURA\"\n\t\tinvoice_pay = \"CONTADO\"\n\t\tline_cont = 0\n\n\t\tfor invoice in self:\n\t\t\tfile_name = \"FACTI-HS-\" + str(invoice.id) + \".txt\"\n\t\t\tclient_name = invoice.partner_id.name or 'CONTADO'\n\t\t\t#client_ruc = self.get_ruc_from_field(invoice.partner_id.vat or '00-0000-00000')\n\t\t\tclient_ruc = invoice.partner_id.vat or '00-0000-00000'\n\t\t\tclient_dv = self.get_dv_from_field(invoice.partner_id.vat or '00')\n\t\t\tclient_dir = self.get_client_direction(invoice.partner_id)\n\t\t\tinvoice_no = invoice.number or '0'\n\t\t\tself.invoice_name = \"FACTI\" + invoice_no\n\n\t\t\t#amount_off = self.get_total_amount_off(invoice)\n\t\t\tamount_off = \"0.00\"\n\t\t\tamount_close = str(invoice.amount_total) or '0.00'\n\t\t\tamount_total = str(invoice.amount_total) or '0.00'\n\n\t\t\tsurcharge1 = \"0.00\"\t\t\t#Temporalmente\n\t\t\tsurcharge2 = \"0.00\"\t\t\t#Temporalmente\n\t\t\t\n\t\t\t#payment_chash = amount_total\t#Temporalmente\n\t\t\tpayment_chash = \"0.00\"\t\t\t#Temporalmente\n\t\t\tpayment_check = \"0.00\"\t\t\t#Temporalmente\n\t\t\t#payment_check = str(invoice.amount_total) or '0.00'\n\t\t\tpayment_ccard = \"0.00\"\t\t\t#Temporalmente\n\t\t\tpayment_dcard = \"0.00\"\t\t\t#Temporalmente\n\t\t\tpayment_cnote = \"0.00\"\t\t\t#Temporalmente\n\t\t\tpayment_other = \"0.00\"\t\t\t#Temporalmente\n\n\t\t\tdate_invoice = self.get_date_invoice(invoice.date_invoice)\n\n\t\t\tdata_stream = \"\"\n\t\t\tinvoice_refund = invoice.refund_invoice_id or ''\n\t\t\tif type(invoice_refund) is not bool:\n\t\t\t\tfor refund in invoice_refund:\n\t\t\t\t\tself.invoice_name = \"NCTI\" + invoice_no\n\t\t\t\t\tfile_name = \"NCTI-HS-\" + str(invoice.id) + \".txt\"\n\t\t\t\t\trefound_fiscal_id = refund.fiscal_id\n\t\t\t\t\trefound_fiscal_no = refund.fiscal_reference\n\t\t\t\t\tinvoice_type = \"NOTA DE CREDITO\"\n\t\t\t\t\ttype_invoice = \"NotaCredito\"\n\n\t\t\t\t\trefound_name = invoice.origin\n\t\t\t\t\trefound_price = invoice.amount_untaxed\n\t\t\t\t\trefound_tax = invoice.amount_tax\n\t\t\t\t\trefound_note = self.get_refound_name(invoice)\n\t\t\t\t\trefound_date = date_invoice\n\t\t\t\t\tdate_invoice = self.get_date_invoice(refund.date_invoice)\n\t\t\t\t\ttime_invoice = self.get_time_invoice(invoice.create_date)\n\t\t\t\t\t\n\t\t\t\t\t#El valor de cliente_ruc es de 15 pero se alargo a 25\n\t\t\t\t\tdata_stream = \"{}{}{}{}{}{}{}{}{}{}{}{}{}\\r\\n\".format(\n\t\t\t\t\t\t\tself.add_field_cell('1',\t\t\t\t1),\n\t\t\t\t\t\t\tself.add_field_cell(self.invoice_name,\t20),\n\t\t\t\t\t\t\tself.add_field_cell(client_name,\t\t80),\n\t\t\t\t\t\t\tself.add_field_cell(client_ruc,\t\t\t25),\n\t\t\t\t\t\t\tself.add_field_cell(client_dir,\t\t\t150),\n\t\t\t\t\t\t\tself.add_field_cell(refound_price,\t\t19),\n\t\t\t\t\t\t\tself.add_field_cell(refound_tax, \t\t10),\n\t\t\t\t\t\t\tself.add_field_cell(refound_note,\t\t150),\n\t\t\t\t\t\t\tself.add_field_cell(refound_date,\t\t10),\n\n\t\t\t\t\t\t\tself.add_field_cell(time_invoice,\t\t5),\n\t\t\t\t\t\t\tself.add_field_cell(refound_fiscal_id,\t20),\n\t\t\t\t\t\t\tself.add_field_cell(refound_fiscal_no, \t8),\n\t\t\t\t\t\t\tself.add_field_cell(refound_name,\t\t20),\n\t\t\t\t\t\t)\n\t\t\t\n\t\t\tif type_invoice == \"Factura\":\n\t\t\t\tdata_stream = \"{}{}{}{}{}{}{}{}{}{}{}{}{}{}{}{}\\r\\n\".format(\n\t\t\t\t\t\t\tself.add_field_cell(self.invoice_name,\t20),\n\t\t\t\t\t\t\tself.add_field_cell(client_name,\t\t80),\n\t\t\t\t\t\t\tself.add_field_cell(client_ruc,\t\t\t18),\n\t\t\t\t\t\t\tself.add_field_cell(client_dir,\t\t\t150),\n\n\t\t\t\t\t\t\tself.add_field_cell(amount_off, \t\t19),\n\t\t\t\t\t\t\tself.add_field_cell(amount_close,\t\t19),\n\t\t\t\t\t\t\tself.add_field_cell(amount_total,\t\t19),\n\n\t\t\t\t\t\t\tself.add_field_cell(surcharge1,\t\t\t19),\n\t\t\t\t\t\t\tself.add_field_cell(surcharge2,\t\t\t7),\n\n\t\t\t\t\t\t\tself.add_field_cell(payment_chash, \t\t19),\n\t\t\t\t\t\t\tself.add_field_cell(payment_check,\t\t19),\n\t\t\t\t\t\t\tself.add_field_cell(payment_ccard, \t\t19),\n\t\t\t\t\t\t\tself.add_field_cell(payment_dcard, \t\t19),\n\t\t\t\t\t\t\tself.add_field_cell(payment_cnote, \t\t19),\n\t\t\t\t\t\t\tself.add_field_cell(payment_other, \t\t19),\n\t\t\t\t\t\t\tself.add_field_cell(client_dv,\t\t\t2),\n\t\t\t\t\t\t)\n\n\t\t\twith closing(os.fdopen(content_file_fd, 'w')) as content_file:\n\t\t\t\tcontent_file.write(data_stream)\n\n\n\t\t\tfor invoice_line in invoice.invoice_line_ids:\n\t\t\t\tline = self.get_invoice_line(invoice_line)\n\t\t\t\twith open(content_file_path, 'a') as content_file2:\n\t\t\t\t\tcontent_file2.write(line)\n\n\t\t\twith open(content_file_path, 'rb') as textreport:\n\t\t\t\tinvoice.fiscal_file = base64.encodestring(textreport.read())\n\t\t\ttry:\n\t\t\t\tos.unlink(content_file_path)\n\t\t\texcept (OSError, IOError):\n\t\t\t\t_logger.error('Error when trying to remove file %s' % content_file_path)\n\n\t\t\tinvoice.fiscal_name = file_name\n\t\treturn {\n\t\t\t'type': 'ir.actions.act_url',\n\t\t\t'target': 'new',\n\t\t\t'url': '/report/text?model=account.invoice&field=fiscal_file&id=%s'%(self.id),\n\t\t}\n\n\t\n\n\tdef get_file_content(self,id):\n\t\t\"\"\"\n\t\tObtenemos el nombre del archivo que llevara el documento txt\n\t\t\"\"\"\n\t\treturn self.browse(id).fiscal_file\n\n\n\tdef get_date_invoice(self, invoice_datetime):\n\t\t\"\"\"\n\t\tObtenemos la Fecha en que fue creada la nota credito dentro del Odoo\n\t\ty luego le aplicamos la diferencia horaria para obtener la hora UTC de\n\t\tAmerica - Bogota\n\t\t\"\"\"\n\t\tif type(invoice_datetime) == str:\n\t\t\tdate_invoice = datetime.strptime(invoice_datetime, \n\t\t\t\t\t\t\t\t'%Y-%m-%d').strftime('%d/%m/%Y') or ''\n\t\t\treturn date_invoice\n\t\telse:\n\t\t\tdate_invoice = invoice_datetime.strftime('%d/%m/%Y') or ''\n\t\t\treturn date_invoice\n\n\n\tdef get_time_invoice(self, invoice_datetime):\n\t\t\"\"\"\n\t\tObtenemos la Hora en que fue creada la nota credito dentro del Odoo\n\t\ty luego le aplicamos la diferencia horaria para obtener la hora UTC de\n\t\tAmerica - Bogota\n\t\t\"\"\"\n\t\tfrom_zone = tz.gettz('UTC')\n\t\tto_zone = tz.gettz('America/Bogota')\n\t\tif type(invoice_datetime) == str:\n\t\t\tutc_time = datetime.strptime(invoice_datetime, '%Y-%m-%d %H:%M')\n\t\t\tif utc_time != \"\":\n\t\t\t\tutc_time = utc_time.replace(tzinfo=from_zone)\n\t\t\t\tlocal_time = utc_time.astimezone(to_zone)\n\t\t\t\treturn local_time.strftime('%H:%M')\n\t\t\treturn invoice_datetime\n\t\telse:\n\t\t\tutc_time = invoice_datetime\n\t\t\tutc_time = utc_time.replace(tzinfo=from_zone)\n\t\t\tlocal_time = utc_time.astimezone(to_zone)\n\t\t\treturn local_time.strftime('%H:%M')\n\n\n\tdef get_ruc_from_field(self, vat_field):\n\t\t\"\"\"\n\t\tObtenemos el ruc sin el digito verificador.\n\t\tEste metodo esta fuera de uso\n\t\t\"\"\"\n\t\ttry:\n\t\t\tif vat_field == \"\":\n\t\t\t\treturn \"00-0000-00000\"\n\t\t\tif \" DV\" in vat_field:\n\t\t\t\truc = vat_field.split(\" DV\")[0]\n\t\t\t\treturn ruc\n\t\t\telse:\n\t\t\t\treturn vat_field\n\t\texcept:\n\t\t\treturn \"00\"\n\n\t\n\tdef get_dv_from_field(self, vat_field):\n\t\t\"\"\"\n\t\tObtenemos el Digito Verificador del cliente, sino existe agrega\n\t\tpor default el valor 00\n\t\t\"\"\"\n\t\ttry:\n\t\t\tif vat_field == \"\":\n\t\t\t\treturn \"00\"\n\t\t\tif \" \" in vat_field:\n\t\t\t\tsection = vat_field.split(\" \")\n\t\t\t\tif len(section) == 2:\n\t\t\t\t\tdv = section[1]\n\t\t\t\t\treturn dv[2:]\n\t\t\t\telif len(section) == 3:\n\t\t\t\t\treturn section[2]\n\t\t\telse:\n\t\t\t\treturn vat_field\n\t\texcept:\n\t\t\treturn \"00\"\n\n\t\n\t\"\"\"\n\tdef get_total_amount_off(self, invoice):\n\t\ttotal_off = 0.0\n\t\tfor invoice_line in invoice.invoice_line_ids:\n\t\t\tif invoice_line.discount:\n\t\t\t\t#Obtenemos el total sin descuento redondeado a 2 decimales\n\t\t\t\tprice = float(invoice_line.price_unit)\n\t\t\t\tquantity = float(invoice_line.quantity or '0.00')\n\t\t\t\titem_total = quantity * price\n\t\t\t\titem_total = float('{0:.2f}'.format(item_total))\n\n\t\t\t\t#Obtenemos el total con descuento readondeado a 2 decimales\n\t\t\t\tdiscount = (float(invoice_line.discount or '0.00'))/100\n\t\t\t\tamount_off = price - (price * discount)\n\t\t\t\titem_off = quantity * amount_off\n\t\t\t\titem_off = float('{0:.2f}'.format(item_off))\n\n\t\t\t\t#Obtenemos el descunto del producto restando a total, off y\n\t\t\t\t#Luego agregamos a el descuento total del movimiento\n\t\t\t\ttotal_off = total_off + (item_total - item_off)\n\t\treturn '{0:.2f}'.format(total_off)\n\t\"\"\"\n\n\t\n\t\n\tdef get_file_name(self, id):\n\t\t\"\"\"\n\t\tRealiza una busqueda dentro del reguistro para obtener el nombre del documento\n\t\tdel archivo\n\t\t\"\"\"\n\t\treturn self.browse(id).fiscal_name\n\t\n\n\n\tdef add_field_cell(self, content, columnWidth):\n\t\t\"\"\"\n\t\tFormatea un campo/valor para que cumpla con el estandar del archivo\n\t\tagregando al final del mismo el tabulador para cumplir con los\n\t\trequirimientos de la maquina fiscal. Si el valor es menor a\n\t\tcolumnWidth solo agrega el tabularo. Si el valor es mayor o igual a\n\t\ttres elementos menor que el valor permitido, elimina los ultimos\n\t\ttres elementos y los reemplaza con tres puntos suspensivos.\n\t\t\"\"\"\n\t\tnew_content = \"\"\n\t\tif type(content) is not bool:\n\t\t\tnew_content = str(content)\n\t\tlength = len(new_content)\n\t\tif length > columnWidth:\n\t\t\tnew_content = new_content[:columnWidth]\n\t\tnew_content = new_content + \"\\t\"\n\t\treturn new_content\n\n\n\n\tdef get_invoice_line(self, invoice_line):\n\t\t\"\"\"\n\t\tObtenemos el movimiento de la factura una linea a la vez\n\t\t\"\"\"\n\t\tproduct_code = str(invoice_line.product_id.default_code or '')\n\t\tdescription = str(invoice_line.product_id.name)\n\t\tquantity = str(invoice_line.quantity or '')\n\t\tprice = self.get_price_item(invoice_line)\n\t\t#price = str(invoice_line.price_unit)\n\t\tuom = self.get_uom_item(invoice_line)\n\t\ttotal = str(invoice_line.price_subtotal or '')\n\t\ttaxes = self.get_tax_item(invoice_line)\n\n\t\tif description == \"False\":\t#Description jamas debe ser False\n\t\t\treturn \"\"\n\n\t\tdata_stream = \"{}{}{}{}{}{}{}{}\\r\\n\".format(\n\t\t\t\tself.add_field_cell(self.invoice_name,\t20),\n\t\t\t\tself.add_field_cell(product_code,\t\t25),\n\t\t\t\tself.add_field_cell(description,\t\t80),\n\t\t\t\tself.add_field_cell(uom,\t\t\t\t20),\n\t\t\t\tself.add_field_cell(quantity, \t\t\t19),\n\t\t\t\tself.add_field_cell(price,\t\t\t\t19),\n\t\t\t\tself.add_field_cell(taxes,\t\t\t\t10),\n\t\t\t\tself.add_field_cell(2,\t\t\t\t\t10),\n\t\t)\n\t\treturn str(data_stream)\n\n\n\n\tdef get_price_item(self, invoice):\n\t\t\"\"\"\n\t\tObtenemos el precio del producto con cuatro digitos decimales para evitar \n\t\tinconvenientes entre la factura fiscal y el detalle en odoo\n\t\t\"\"\"\n\t\ttry:\n\t\t\tsubtotal = float(invoice.price_subtotal)\n\t\t\tquantity = float(invoice.quantity)\n\t\t\t\"\"\"\n\t\t\tdiscount = (float (invoice.discount or '0.00'))/100\n\t\t\ttotal = price - (price * discount)\n\t\t\treturn '{0:.2f}'.format(total)\n\t\t\t\"\"\"\n\t\t\ttotal = subtotal / quantity\n\t\t\tstrTotal = str(total)\n\t\t\tif \".\" in strTotal:\n\t\t\t\tarrayTotal = strTotal.split(\".\")\n\t\t\t\tintSection = arrayTotal[0]\n\t\t\t\tdecimalSection = arrayTotal[1]\n\t\t\t\tif len(decimalSection) > 4:\n\t\t\t\t\tdecimalSection = decimalSection[:4]\n\t\t\t\tstrTotal = intSection + \".\" + decimalSection\n\t\t\treturn str(strTotal)\n\t\texcept:\n\t\t\treturn str(invoice.price_unit or '0.00')\n\n\n\n\n\tdef get_uom_item(self, invoice):\n\t\t\"\"\"\n\t\tObtenemos la unidad de medida del producto\n\t\t\"\"\"\n\t\tuoms = invoice.uom_id\n\t\tif len(uoms) > 0:\n\t\t\tfor uom in uoms:\n\t\t\t\treturn uom.name\n\t\t\treturn \"c/u\"\n\t\telse:\n\t\t\treturn \"c/u\"\n\n\n\t\n\tdef get_tax_item(self, invoice):\n\t\t\"\"\"\n\t\tObtenemos el primer impuesto aplicado sobre el producto\n\t\t\"\"\"\n\t\ttaxes = invoice.invoice_line_tax_ids \n\t\tif len(taxes) > 0:\n\t\t\tfor tax in taxes:\n\t\t\t\tname = tax.name.split(\" \")[1]\n\t\t\t\treturn name[:-1]\n\t\t\treturn \"0\"\n\t\telse:\n\t\t\treturn \"0\"\n\n\n\n\tdef get_client_direction(self, client):\n\t\t\"\"\"\n\t\tObtenemos la direccion del cliente con el formato requerido por la \n\t\timpresora.\n\t\t\"\"\"\n\t\tstreet = client.street\n\t\tstreet = (' ' + street) if street != False else ''\n\t\t\n\t\tstreet2 = client.street2\n\t\tstreet2 = (' ' + street2) if street2 != False else ''\n\n\t\tstate = client.state_id.name\n\t\tstate = (' ' + state) if state != False else ''\n\t\t\n\t\tzipp = client.zip\n\t\tzipp = (' ' + zipp) if zipp != False else ''\n\n\t\tcity = client.city\n\t\tcity = (' ' + city) if city != False else ''\n\t\t\n\t\tcountry = client.country_id.name\n\t\tcountry = country if country != False else ''\n\t\t\n\t\treturn country + state + city + zipp + street + street2\n\n\t\n\n\tdef get_refound_name(self, refound):\n\t\t\"\"\"\n\t\tObtenemos el motivo por el cual fue rechazada la factura, la misma\n\t\tse agregara a la nota credito\n\t\t\"\"\"\n\t\tnote = refound.name\n\t\tif(len(note) > 150):\n\t\t\tnote = note[:147] + \"...\"\n\t\treturn note ","sub_path":"hs_invoice_fiscal/models/account_invoice.py","file_name":"account_invoice.py","file_ext":"py","file_size_in_byte":12856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"482838300","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n\nfrom random import randrange\n\n\n# returns a random string from the line of a file\ndef get_random_string(filename):\n\n\tlines = get_lines(filename)\n\n\tnum = randrange(len(lines))\n\n\tline = lines[num]\n\treturn line.strip('\\n')\n\n# returns an array of the lines in a file\ndef get_lines(filename):\n\n handler = open(filename, \"r\")\n contents = handler.read()\n handler.close()\n\n lines = contents.split('\\n')\n\n return lines\n","sub_path":"mockusergenerator/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"620126363","text":"import numpy as np\nfrom time import *\n\ndef RS_mdp_solver(rs_Mmdp, dis_RB, discount_fac):\n\t#states, action, trans_s1, trans_p, reward, rb, re, discount_fac):\n\t#initilize the parameters\n\t#curTime = clock()\n\n\tstates = rs_Mmdp.states\n\taction = rs_Mmdp.actions\n\ttrans_s1 = rs_Mmdp.adj\n\ttrans_p = rs_Mmdp.trans\n\treward = rs_Mmdp.rewards\n\trb = rs_Mmdp.RB\n\n\t#print rb[rs_Mmdp.cur_state]\n\n\tval1 = {}\n\tval2={}\n\tpolicy = {}\n\n\tfor s in states:\n\t\tval1[s]= val2[s] = 0.0\n\t\tpolicy[s] = 'e'\n\n\t# value iteration\n\n\t#if rs_Mmdp.H<6:\n\t#print rb['s63']\n\n\tfor i in range(rs_Mmdp.H):\n\t\t#print val1['s30']\n\n\n\t\tfor s in states:\n\t\t\n\t\t\t\n\t\t\tmax_val_s=-9999\n\n\t\t\tfor a in trans_s1[s]:\n\n\t\t\t\ttmp_futu_v = 0.0\t\n\t\t\t\tfor nxtS in trans_s1[s][a]:\n\t\t\t\t\ttmp_futu_v += trans_p[s][a][nxtS] * val1[nxtS]\n\t\t\t\t\n\t\t\t\ttmpVal = reward[s][a] + dis_RB * rb[s][a] + tmp_futu_v * discount_fac\n\n\t\t\t\tif tmpVal>max_val_s:\n\t\t\t\t\tpolicy[s]=a\n\t\t\t\t\tmax_val_s=tmpVal\n\t\t\tval2[s]=max_val_s\n\t\t\n\t\tfor s in states:\n\t\t\tval1[s] = val2[s]\n\n\t#print (clock() - curTime), 'seconds'\n\n\treturn policy\n\n\t# policy iteration\n\t# noChange = 0\n\n\t# while noChange == 0:\n\n\t# \tnoChange = 1\n\n\t# \tfor s in states:\n\t# \t\ttmp_futu_v = 0.0\n\t# \t\ta = policy[s]\n\t# \t\tif a in trans_s1[s]:\n\t# \t\t\tfor nxtS in trans_s1[s][a]:\n\t# \t\t\t\ttmp_futu_v += trans_p[s][a][nxtS] * val_improv[nxtS]\n\t# \t\t\tval_eval[s] = reward[s][a] + dis_RB * rb[s][a] + tmp_futu_v\n\n\t# \tfor s in states:\n\t# \t\tQbest = val_eval[s]\n\t# \t\tfor a in trans_s1[s]:\n\t# \t\t\ttemp_fv = 0.0\n\t# \t\t\tfor nxtS in trans_s1[s][a]:\n\t# \t\t\t\ttemp_fv += trans_p[s][a][nxtS] * val_eval[nxtS]\n\t# \t\t\tQsa = reward[s][a] + dis_RB * rb[s][a] + temp_fv\n\n\t# \t\t\tif Qsa > Qbest:\n\t# \t\t\t\tpolicy[s] = a\n\t# \t\t\t\tval_improv[s] = Qsa\n\t# \t\t\t\tnoChange = 0\n\n\t# return policy\n\n\n\n\n\t# val = [np.zeros(len(states)), np.zeros(len(states))]\n\t# cur = 0\n\t# policy=np.zeros(len(states),np.int8)\n\t\n\t# noChange=0\n\n\t# count = 0\n\t# while noChange==0:\n\n\t# \tcur = 1 - cur\n\t# \tnoChange=1\n\n\t# \tfor i in range(len(states)):\n\t# \t\ts=states[i]\n\t# \t\ta=policy[states[i]]\n\t# \t\ttemp_futu_v=0\n\t# \t\tfor j in range(len(trans_s1[s][a])):\n\t# \t\t\ttemp_futu_v+=trans_p[s][a][j]*val[1 - cur][trans_s1[s][a][j]]\n\t# \t\tval[cur][s]=reward[s][a]+rb[s][a]+re[s][a]+discount_fac*temp_futu_v\n\n\t# \t# print val[cur]\n\n\t# \tcur = 1 - cur\n\n\t# \tfor s in states:\n\t# \t\tQbest=val[1 - cur][s]\n\t# \t\tfor a in action:\n\t# \t\t\ttemp_fv=0\n\t# \t\t\tfor j in range(len(trans_s1[s][a])):\n\t# \t\t\t\ttemp_fv+=trans_p[s][a][j]*val[1 - cur][trans_s1[s][a][j]]\n\t# \t\t\tQsa=reward[s][a]+rb[s][a]+re[s][a]+discount_fac*temp_fv\n\t\t\t\t\n\t# \t\t\tif(Qsa>Qbest):\n\t# \t\t\t\t#print Qsa, Qbest\n\t# \t\t\t\tpolicy[s]=a\n\t# \t\t\t\tval[cur][s]=Qsa\n\t# \t\t\t\tnoChange=0\n\n\t# \t# print count, val[cur]\n\t# \t# print count, policy\n\n\t# \tcount += 1\n\n\t# return [policy, val[cur]]\n\t\t\t\t\t\n\t\t\t\t\n","sub_path":"large_rockSample/rSmdpSolver.py","file_name":"rSmdpSolver.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"568991565","text":"#In and out operators used in expressions and connect two values. \r\n#They evaluate to a Boolean value.\r\n\r\n#checks if howdy is inside the list, and outputs either True or False. \r\nprint('howdy' in ['hello','hi','howdy','heyas'])\r\n\r\n#you can type a list and check if certain indexes are in it.\r\nspam = ['hello','hi','howdy','heyas']\r\n#checks if cat is inside the list spam\r\nprint('cat' in spam)\r\n#checks if howdy is not inside the list spam\r\nprint('howdy' not in spam)\r\nprint('cat' not in spam)\r\n\r\n#mutiple assignment trick\r\n\r\n#lets you assign multi variables with the values in a list in one line of code\r\ncat = ['fat', 'black', 'loud']\r\nsize, color, disposition = cat\r\n\r\n#instead of \r\ncat = ['fat', 'black', 'loud']\r\nsize = cat[0]\r\ncolor = cat[1]\r\ndisposition = cat[2]\r\n\r\n#Augmented Assignment Operators\r\n\r\n\"\"\"\r\noperators like spam = spam + 1 or spam = spam - 1 are instead \r\nspam += 1 or spam -= 1\r\n\"\"\"\r\n\r\n\"\"\"\r\nlist of augumented operators +=,-=,*=,/=,%=\r\n\"\"\"\r\n\r\ndam = 54\r\ndam += 1\r\ndam -= 1\r\n\r\ntram = 'hello'\r\ntram += ' world!'\r\nprint(tram)\r\n","sub_path":"Chapter 4 Lists/Chapter 4 Lists/6InandNotOps.py","file_name":"6InandNotOps.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"421556002","text":"# from Package import CodeEntrance\n# from Package import BioModel\n# from Package import TreatmentProcess\n# from Package import Tank\n# from Package import Grid\n# from Package import Calculation\n# from Package import ResultSaved\n\n\n\nimport os\nimport sys\nworkdir = os.getcwd()\nworkdir.strip('tests')\nsys.path.append(workdir.strip('tests')+'/BioModelMatrix')\nsys.path.append(workdir.strip('tests'))\nfrom BioModel import *\nfrom TreatmentProcess import *\nfrom Tank import *\nfrom Grid import *\nfrom Calculation import *\n\nimport os\nimport numpy as np\nimport numba as nb\nimport pandas as pd\nimport xlrd\nimport operator\nimport time\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn')\n# %matplotlib inline\n\n\nBioModel = BioModel()\n\n\n\n\n\n\n\n# import fib1\n# import testfortran\n\n# print(testfortran.__doc__ )\n# x=testfortran.addsub(4,9)\n\n\n# f2py -m testfortran -c --f90exec=\"gfortran\" --f90flags=\"-L/usr/local/OpenBLAS/lib -lopenblas\" testfortran.f90\n\n\n# import Gui\n# import subprocess\n# import SystemParameterdata1\n\n# os.system('./run.sh') \n# subprocess.call([\"clear\"])\n# os.popen('python3 Gui.py')\n# subprocess.call([\"python3 Gui.py\"])\n# subprocess.call([\"ls\", \"-l\"])\n# os.chdir(aaa)\n# os.system('cd aaa')os.path.abspath(os.path.join(os.path.dirname('1.py'),os.path.pardir)) \n# aaa=os.path.abspath(os.path.join(os.path.dirname('Build.py'),os.path.pardir)) \n# os.system('cd Python')\n# print(aaa)\n# os.system('cd Python')\n# aaa=os.path.abspath('')\n# print(aaa)\n\n\n# metafile = 'metaCalculation.py'\n\n# BioModel\t\t= bg.BioModel_EAWAG()\n# # BioModel\t\t= bg.BioModel_EAWAG_Analysis()\n# # BioModel_ERP155\t\t= bg.BioModel_Exam_Rittm_P155()\n# Calculation_1\t= Calculation.Calculation()\n\n\n# Boundary\t\t= Tank_Boundary()\n# Tank_1\t\t\t= Tank_CSTR()\n# Tank_2\t\t\t= Tank_CSTR()\n# Tank_3\t\t\t= Tank_Settlling()\n\n# Tank_1.Aerated = True\n# Tank_1.DO = 5\n\n\n# Tank_1.Reset_Hydraulics()\n# Tank_2.Reset_Hydraulics()\n# Tank_3.Reset_Hydraulics()\n# Tank_1.Name = 'terationTank1'\n# Tank_2.Name = 'terationTank2'\n# Tank_3.Name = 'tettlingTank1'\n# Tank_1.Volume = 4.86/1000.00\n# Tank_3.Volume_Outlet = 1.77/1000.00\n# Tank_3.Volume_Blanket = 0.50/1000.00\n# Tank_3.SettlingFactor = 0.9984\n\n# # Calculation_1.connection(Boundary, Tank_1, 'Flow[0]', 'InBioComponent')\n# # Calculation_1.connection(Tank_1, Boundary, 'Flow[0]', 'CalBioComponent[0]')\n# Calculation_1.connection(Boundary, Tank_1, 'Flow[0]', 'InBioComponent')\n# Calculation_1.connection(Tank_3, Tank_1, 'Flow[1]', 'CalBioComponent[1]', SettlingZone='Blanket')\n# Calculation_1.connection(Tank_1, Boundary, '(Flow[2])', 'CalBioComponent[0]')\n# Calculation_1.connection(Tank_1, Tank_3, ['(Flow[0]+Flow[1]-Flow[2])', 'Flow[1]'], 'CalBioComponent[0]')\n# Calculation_1.connection(Tank_3, Boundary, '(Flow[0]-Flow[2])', 'CalBioComponent[2]', SettlingZone='Outlet')\n\n\n\n\n\n\n\n\n# bg.get_BioModel(metafile, [Boundary, Tank_1, Tank_3])\n# Calculation_1.get_Calculation(metafile, BioModel, [Boundary, Tank_1, Tank_3])\n# import metaCalculation as cal\n\n\n\n\n# BioStoichoimetric = bg.BioStoichoimetric\n# BioParameter = bg.BioParameter\n# BioComponent = bg.BioComponent\n# BioComposition = bg.BioComposition\n# BioTested = bg.BioTested\n# BioProcess = bg.BioProcess\n# amount_BioComponent = BioModel.amount_BioComponent\n\n\n\n\n\n# gd = Grid.Grid()\n# delta_time = 60 #s\n# gd.Grid_CalcultionTime = 1000 # 13.8d = 3s!\n# gd.Grid_TraceTimeReactor = 100\n# # Q_in = 0.0001\n# # Q_r = 0.00001\n# # Q_w = 0.000001\n\n# Q_in = 17.0/1000\n# Q_r = 23.3/1000\n# Q_w = 0.7/1000\n\n\n# Flow = [Q_in, Q_r, Q_w]\n\n# gd.get_trace(delta_time, amount_BioComponent, BioComponent['Component'])\n# Trace_BioComponent = gd.Trace_BioComponent\n\n\n# CalBioParameter = np.array(BioParameter['DefaultValue'])\n\n\n# Boundary.snapshot_BioComponent = np.array(BioComponent['InitialValue/SteadyStateResult'])\n# Tank_1.snapshot_BioComponent = np.array(BioComponent['InitialValue/SteadyStateResult'])\n# Tank_3.snapshot_BioComponent_Blanket = np.array(BioComponent['InitialValue/SteadyStateResult'])\n# Tank_3.snapshot_BioComponent_Outlet = np.array(BioComponent['InitialValue/SteadyStateResult'])\n\n\n\n\n# Boundary.snapshot_BioComponent[0] = 0 #SO\n# Boundary.snapshot_BioComponent[1] = 0.282*253.00 #SS\n# Boundary.snapshot_BioComponent[2] = 17 #SI\n# Boundary.snapshot_BioComponent[3] = 18.00 #NH4\n# Boundary.snapshot_BioComponent[4] = 4.00 #NO3\n# Boundary.snapshot_BioComponent[5] = 0.00 #N2\n# Boundary.snapshot_BioComponent[6] = 0.00 #PO4\n# Boundary.snapshot_BioComponent[7] = 244.00/61.02 #ALK\n# Boundary.snapshot_BioComponent[8] = (1.00-17.00/253.00-0.282-0.635)*253 #XI\n# Boundary.snapshot_BioComponent[9] = 0.635*253 #XS\n# Boundary.snapshot_BioComponent[10:] = 0.0 #XH..TSS\n\n# # M_component_A_in = cell2mat(System.Influent.Component(2:14,3));\n# # T_component_A = [-234.516761863764\n# # 0.344505277567974\n# # 17.0000000000057\n# # 0.746342596652999\n# # 18.6462096977270\n# # 7.36064145405248\n# # 1.72012701865026\n# # 365.296875686376\n# # 177.785149759276\n# # 961.389170173820\n# # 213.008708360878\n# # 59.5898977690354\n# # 1453.99790524856\n# # ];\n# # T_component_r_Inlet = [ -234.516761863765\n# # 0.369419266512845\n# # 17.0000000000057\n# # 0.773290842895577\n# # 18.3393880239032\n# # 7.66746312787640\n# # 1.74396772722643\n# # 365.345618702491\n# # 176.654551609129\n# # 961.632970702888\n# # 212.951662379220\n# # 59.5825743795784\n# # 1453.36511573468\n# # ];\n# # T_component_r_Blanket = [-234.516761863768\n# # 0.362456234716170\n# # 17.0000000000061\n# # 1.14151327840029\n# # 13.9435913668100\n# # 12.0632597849700\n# # 2.08425480526916\n# # 620.635735392827\n# # 284.170685418957\n# # 1635.30226971942\n# # 360.220411999380\n# # 100.998165099213\n# # 2457.40745414388\n# # ];\n# # T_component_r_Outlet = [ -234.516761863780\n# # 0.347647113697431\n# # 17.0000000000075\n# # 0.777081649551237\n# # 18.2916909943220\n# # 7.71516015746104\n# # 1.74764542981478\n# # 1.42790073846587\n# # 0.541191137222781\n# # 3.77522275001063\n# # 0.814758387506714\n# # 0.230455270320608\n# # 5.57078415756575\n# # ];\n\n\n# # Influent{1,1} = 'Influent';\t\tInfluent{1,2} = 'Unit'; \t\t\tInfluent{1,3} = 'Value';\t\t\t\t\t\n# # Influent{2,1} = 'COD';\t253.00\t\t\tInfluent{2,2} = 'gCOD/m3';\t\t\tInfluent{2,3} = Influent_COD;\t\t\t\n# # Influent{3,1} = 'SI';\t\t17.00\t\tInfluent{3,2} = 'gCOD/m3';\t\t\tInfluent{3,3} = Influent_SS;\t\t\n# # Influent{4,1} = 'SS';\t\t0.282*253.00\t\tInfluent{4,2} = 'gCOD/m3';\t\t\tInfluent{4,3} = Influent_SI;\n# # Influent{5,1} = 'XS';\t\t0.635*253\t\tInfluent{5,2} = 'gCOD/m3';\t\t\tInfluent{5,3} = Influent_XS;\t\n# # Influent{6,1} = 'XI';\t\t(1.00-17.00/253.00-0.282-0.635)*253\t\tInfluent{6,2} = 'gCOD/m3';\t\t\tInfluent{6,3} = Influent_XI;\t\n# # Influent{7,1} = 'NH4';\t\t18.00\t\tInfluent{7,2} = 'gN/m3';\t\t\tInfluent{7,3} = Influent_NH4;\t\n# # Influent{8,1} = 'NO3';\t\t4.00\t\tInfluent{8,2} = 'gN/m3';\t\t\tInfluent{8,3} = Influent_NO3;\n# # Influent{9,1} = 'ALK';\t\t\t 244.00/61.02\tInfluent{9,2} = 'molHCO3-/m3';\t\tInfluent{9,3} = Influent_ALK;\t\t\t\t\n\n\n# # 17.00/253.00,0.282,0.635,1.00-17.00/253.00-0.282-0.635, ...\n# # 18.00, ...\n# # 4.00, ...\n# # 244.00/61.02);\n\n\n\n\n# Boundary.get_FlowBalnceCheck(Flow)\n# Tank_1.get_FlowBalnceCheck(Flow)\n# Tank_3.get_FlowBalnceCheck(Flow)\n\n# BioModel.BioParameter_Value = np.array(BioModel.BioParameter['DefaultValue'])\n\n# ModelBalanceCheck = cal.get_CalModelBalanceCheck(BioModel)\n\n# # Tank_1.Volume = 10\n# # Tank_2.Volume = 10\n# # Volume = [Tank_1.Volume, Tank_2.Volume]\n\n# start_time = time.time()\n# TreatmentProcess.ResultSaved = cal.calc(TreatmentProcess)\n# end_time = time.time()\n# print ('run time:'+str(end_time - start_time)+'s')\n\n# for comp in range(TreatmentProcess.BioModel.amount_BioComponent):\n# \tTreatmentProcess.ResultSaved[0].iloc[comp,:].plot()\n# plt.title('Total1')\n# plt.legend(BioModel.BioStoichoimetric.columns, loc = 0, ncol = 2) \n# plt.savefig('total1.jpg',dpi=300)\n# plt.show()\n\n\n\n\n\n\n\n\n\n\n\n# metafile = 'metaCalculation_ERP155.py'\n# BioModel\t\t\t= BioModel.BioModel_Exam_Rittm_P155()\n# Boundary\t\t\t= Tank_Boundary()\n# Tank\t\t\t\t= Tank_CSTR()\n# gd \t\t\t\t\t= Grid.Grid()\n# Calculation\t\t\t= Calculation.Calculation()\n# TreatmentProcess \t= TreatmentProcess.TreatmentProcess()\n\n# Q_in \t\t\t\t= 1000\n# Volume \t\t= 2000\n\n# Flow_List \t\t\t= [Q_in]\n# Tank_List\t\t\t= [Boundary, Tank]\n\n# BioModel.get_BioModel(metafile)\n# BioModel.BioParameter_Value = np.array(BioModel.BioParameter['DefaultValue'])\n\n# Boundary.snapshot_BioComponent \t= np.array(BioModel.BioComponent['SteadyStateInfluent'])\n# snapshot_BioComponent \t\t= np.array(BioModel.BioComponent['InitialValue/SteadyStateResult'])\n\n# delta_time = 1 #s\n# gd.Grid_CalcultionTime = 10000 # 13.8d = 3s!\n# gd.Grid_TraceTimeReactor = 100\n# gd.get_trace(delta_time, BioModel.amount_BioComponent, BioModel.BioComponent['Component'])\n\n# Calculation.connection(Boundary, Tank, 'Flow[0]', 'InBioComponent')\n# Calculation.connection(Tank, Boundary, 'Flow[0]', 'CalBioComponent[0]')\n# Boundary.get_FlowBalnceCheck(Flow_List)\n# get_FlowBalnceCheck(Flow_List)\n# Calculation.get_Calculation(metafile, BioModel, Tank_List)\n# import metaCalculation_ERP155 as cal\n# ModelBalanceCheck = cal.get_CalModelBalanceCheck(BioModel)\n# TreatmentProcess.get_System(BioModel, Tank_List, Flow_List, gd, Calculation)\n\n# start_time = time.time()\n# TreatmentProcess.ResultSaved = cal.calc(TreatmentProcess)\n# end_time = time.time()\n# print ('run time:'+str(end_time - start_time)+'s')\n\n# for comp in range(TreatmentProcess.BioModel.amount_BioComponent):\n# \tTreatmentProcess.ResultSaved[0].iloc[comp,:].plot()\n# plt.title('Total1')\n# plt.legend(BioModel.BioStoichoimetric.columns, loc = 0, ncol = 2) \n# plt.savefig('total1.jpg',dpi=300)\n# plt.show()\n\n\n\n\n\n\n\n\n# metafile = 'metaCalculation.py'\n# BioModel\t\t= BioModel.BioModel_EAWAG()\n# # BioModel\t\t= BioModel.BioModel_EAWAG_Analysis()\n# Tank\t\t\t\t= Tank_CSTR()\n# gd \t\t\t\t\t= Grid.Grid()\n# Calculation\t\t\t= Calculation.Calculation()\n# TreatmentProcess \t= TreatmentProcess.TreatmentProcess()\n\n# Name \t\t\t= 'tank1'\n# Volume \t\t= 2000\n# Aerated\t\t= False\n# # Aerated\t\t= True\n# DO \t\t\t= 7\n# Flow_List \t\t\t= [0]\n# Tank_List\t\t\t= [Tank]\n\n# BioModel.get_BioModel(metafile, Tank_List)\n# BioModel.BioParameter_Value = np.array(BioModel.BioParameter['DefaultValue'])\n# snapshot_BioComponent \t\t= np.array(BioModel.BioComponent['InitialValue/SteadyStateResult'])\n\n# delta_time = 1 #s\n# gd.Grid_CalcultionTime = 10000 # 13.8d = 3s!\n# gd.Grid_TraceTimeReactor = 100\n# gd.get_trace(delta_time, BioModel.amount_BioComponent, BioModel.BioComponent['Component'])\n\n# # get_FlowBalnceCheck(Flow_List)\n# Calculation.get_Calculation(metafile, BioModel, Tank_List)\n# import metaCalculation as cal\n# ModelBalanceCheck = cal.get_CalModelBalanceCheck(BioModel)\n# TreatmentProcess.get_System(BioModel, Tank_List, Flow_List, gd, Calculation)\n\n# start_time = time.time()\n# TreatmentProcess.ResultSaved = cal.calc(TreatmentProcess)\n# end_time = time.time()\n# print ('run time:'+str(end_time - start_time)+'s')\n\n# for comp in range(TreatmentProcess.BioModel.amount_BioComponent):\n# \tTreatmentProcess.ResultSaved[0].iloc[comp,:].plot()\n# plt.title('Total1')\n# plt.legend(BioModel.BioStoichoimetric.columns, loc = 0, ncol = 2) \n# plt.savefig('total1.jpg',dpi=300)\n# plt.show()\n\n# TreatmentProcess.ResultSaved[0].iloc[0,:].plot()\n# plt.title('DO')\n# plt.legend(BioModel.BioStoichoimetric.columns, loc = 0, ncol = 2) \n# plt.savefig('DO.jpg',dpi=300)\n# plt.show()\n\n\n\n\n\n\n\n# AAO\n# metafile = 'metaCalculation.py'\n# # BioModel\t\t= BioModel.BioModel_EAWAG()\n# BioModel\t\t= BioModel.BioModel_EAWAG_Analysis()\n\n# Boundary \t\t\t= Tank_Boundary()\n# tank1\t\t\t\t= Tank_CSTR()\n# tank2\t\t\t\t= Tank_CSTR()\n# Tank3\t\t\t\t= Tank_CSTR()\n# Tank4\t\t\t\t= Tank_Settlling()\n# gd \t\t\t\t\t= Grid.Grid()\n# Calculation\t\t\t= Calculation.Calculation()\n# TreatmentProcess \t= TreatmentProcess.TreatmentProcess()\n\n# tank1.Aerated\t\t= False\n# tank2.Aerated\t\t= False\n# Tank3.Aerated\t\t= True\n# Tank4.Aerated\t\t= False\n# Tank3.DO \t\t\t= 7\n\n# Q_in \t\t\t\t= 1000\n# Q_r_1\t\t\t\t= 100\n# Q_r_2\t\t\t\t= 200\n# Q_r_3\t\t\t\t= 300\n# Q_w\t\t\t\t\t= 10\n\n# Flow_List \t\t\t= [Q_in, Q_r_1, Q_r_2, Q_r_3, Q_w]\n# Tank_List\t\t\t= [Boundary, tank1, tank2, Tank3, Tank4]\n\n# # tank2.BioCalculated = False\n\n# Boundary.Reset_Hydraulics()\n# tank1.Reset_Hydraulics()\n# tank2.Reset_Hydraulics()\n# Tank3.Reset_Hydraulics()\n# Tank4.Reset_Hydraulics()\n# tank1.Name = 'AnaTank'\n# tank2.Name = 'AnoTank'\n# Tank3.Name = 'AerationTank'\n# Tank4.Name = 'SettlingTank'\n\n# tank1.Volume \t\t= 100\n# tank2.Volume \t\t= 250\n# Tank3.Volume \t\t= 500\n# Tank4.Volume_Outlet = 150\n# Tank4.Volume_Blanket = 20\n# Tank4.SettlingFactor = 0.995270858\n\n# Calculation.connection(Boundary, tank1, 'Flow[0]', 'InBioComponent')\n# Calculation.connection(tank2, tank1, 'Flow[1]', 'CalBioComponent[1]')\n# Calculation.connection(tank1, tank2, '(Flow[0]+Flow[1])', 'CalBioComponent[0]')\n\n# Calculation.connection(Tank3, tank2, 'Flow[2]', 'CalBioComponent[2]')\n# Calculation.connection(tank2, Tank3, '(Flow[0]+Flow[2])', 'CalBioComponent[1]')\n\n# # Calculation.connection(Tank3, Tank4, '(Flow[0]+Flow[3])', 'CalBioComponent[2]')\n# Calculation.connection(Tank3, Tank4, ['(Flow[0]+Flow[3])', '(Flow[3]+Flow[4])'], 'CalBioComponent[2]')\n# Calculation.connection(Tank4, Tank3, 'Flow[3]', 'CalBioComponent[3]', SettlingZone='Blanket')\n\n# Calculation.connection(Tank4, Boundary, '(Flow[0]-Flow[4])', 'CalBioComponent[4]', SettlingZone='Outlet')\n# Calculation.connection(Tank4, Boundary, 'Flow[4]', 'CalBioComponent[3]', SettlingZone='Blanket')\n\n\n# BioModel.get_BioModel(metafile, Tank_List)\n# BioModel.BioParameter_Value = np.array(BioModel.BioParameter['DefaultValue'])\n\n# Boundary.snapshot_BioComponent \t\t= np.array(BioModel.BioComponent['SteadyStateInfluent'])\n# tank1.snapshot_BioComponent \t\t= np.array(BioModel.BioComponent['InitialValue/SteadyStateResult'])\n# tank2.snapshot_BioComponent \t\t= np.array(BioModel.BioComponent['InitialValue/SteadyStateResult'])\n# Tank3.snapshot_BioComponent \t\t= np.array(BioModel.BioComponent['InitialValue/SteadyStateResult'])\n# Tank4.snapshot_BioComponent_Blanket = np.array(BioModel.BioComponent['InitialValue/SteadyStateResult'])\n# Tank4.snapshot_BioComponent_Outlet \t= np.array(BioModel.BioComponent['InitialValue/SteadyStateResult'])\n\n# delta_time = 1 #s\n# gd.Grid_CalcultionTime = 1000000# 13.8d = 3s!\n# gd.Grid_TraceTimeReactor = 100\n# gd.get_trace(delta_time, BioModel.amount_BioComponent, BioModel.BioComponent['Component'])\n\n# Boundary.get_FlowBalnceCheck(Flow_List)\n# tank1.get_FlowBalnceCheck(Flow_List)\n# tank2.get_FlowBalnceCheck(Flow_List)\n# Tank3.get_FlowBalnceCheck(Flow_List)\n# Tank4.get_FlowBalnceCheck(Flow_List)\n\n# Calculation.get_Calculation(metafile, BioModel, Tank_List)\n# import metaCalculation as cal\n# ModelBalanceCheck = cal.get_CalModelBalanceCheck(BioModel)\n# TreatmentProcess.get_System(BioModel, Tank_List, Flow_List, gd, Calculation)\n\n# start_time = time.time()\n# TreatmentProcess.ResultSaved = cal.calc(TreatmentProcess)\n# end_time = time.time()\n# print ('run time:'+str(end_time - start_time)+'s')\n\n# for comp in range(TreatmentProcess.BioModel.amount_BioComponent):\n# \tTreatmentProcess.ResultSaved[0].iloc[comp,:].plot()\n# plt.title('AnaTank')\n# plt.legend(BioModel.BioStoichoimetric.columns, loc = 0, ncol = 2) \n# plt.savefig('Anajpg',dpi=300)\n# plt.show()\n\n# for comp in range(TreatmentProcess.BioModel.amount_BioComponent):\n# \tTreatmentProcess.ResultSaved[1].iloc[comp,:].plot()\n# plt.title('AnoxicTank')\n# plt.legend(BioModel.BioStoichoimetric.columns, loc = 0, ncol = 2) \n# plt.savefig('Anoxicjpg',dpi=300)\n# plt.show()\n\n# for comp in range(TreatmentProcess.BioModel.amount_BioComponent):\n# \tTreatmentProcess.ResultSaved[2].iloc[comp,:].plot()\n# plt.title('AerationTank')\n# plt.legend(BioModel.BioStoichoimetric.columns, loc = 0, ncol = 2) \n# plt.savefig('Aerationjpg',dpi=300)\n# plt.show()\n\n# for comp in range(TreatmentProcess.BioModel.amount_BioComponent):\n# \tTreatmentProcess.ResultSaved[3].iloc[comp,:].plot()\n# plt.title('Blanket')\n# plt.legend(BioModel.BioStoichoimetric.columns, loc = 0, ncol = 2) \n# plt.savefig('Blanket.jpg',dpi=300)\n# plt.show()\n\n\n# for comp in range(TreatmentProcess.BioModel.amount_BioComponent):\n# \tTreatmentProcess.ResultSaved[4].iloc[comp,:].plot()\n# plt.title('Effluent')\n# plt.legend(BioModel.BioStoichoimetric.columns, loc = 0, ncol = 2) \n# plt.savefig('Effluent.jpg',dpi=300)\n# plt.show()\n\n# # TreatmentProcess.ResultSaved[0].iloc[0,:].plot()\n# # plt.title('DO')\n# # plt.legend(BioModel.BioStoichoimetric.columns, loc = 0, ncol = 2) \n# # plt.savefig('DO.jpg',dpi=300)\n# # plt.show()\n\n\n# bb = TreatmentProcess.ResultSaved\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#CAS\nmetafile = 'metaCalculation.py'\n\nbg\t\t\t= BioModel_ASM3()\nBoundary\t= Tank_Boundary()\ntank1\t\t= Tank_CSTR(name='tank1', volume=4.86/1000.00, Constant_DO=True, DO=7.35, BioCalculated=True)\ntank2\t\t= Tank_Settlling(name='tank2', volume_Outlet=1.77/1000.00, volume_Blanket=0.50/1000.00, Constant_DO=True, DO=0.0, BioCalculated=False,SettlingFactor=0.9984)\n\n# Boundary \t\t\t= Tank_Boundary()\n# tank1\t\t\t\t= Tank_CSTR()\n# tank2\t\t\t\t= Tank_Settlling()\ngd \t\t\t\t\t= Grid()\nCalculation\t\t\t= Calculation()\nTreatmentProcess \t= TreatmentProcess()\n\ntank1.Aerated\t\t= True\ntank2.Aerated\t\t= True\ntank1.DO \t\t\t= 7.35\ntank2.DO \t\t\t= 0\nQ_in \t\t\t\t= 17/1000\nQ_r\t\t\t\t\t= 23.3/1000\nQ_w\t\t\t\t\t= 0.7/1000\nFlow_List \t\t\t= [Q_in, Q_r, Q_w]\nTank_List\t\t\t= [Boundary, tank1, tank2]\n\ntank2.BioCalculated = True\nBoundary.Reset_Hydraulics()\ntank1.Reset_Hydraulics()\ntank2.Reset_Hydraulics()\ntank1.Name = 'AerationTank'\ntank2.Name = 'SettlingTank'\ntank1.Volume \t\t= 4.86/1000.00\ntank2.Volume_Outlet = 1.77/1000.00\ntank2.Volume_Blanket = 0.50/1000.00\ntank2.SettlingFactor = 0.9984\n\nCalculation.connection(Boundary, tank1, 'Flow[0]', 'InBioComponent')\nCalculation.connection(tank2, tank1, 'Flow[1]', 'CalBioComponent[1]', SettlingZone='Blanket')\nCalculation.connection(tank1, Boundary, 'Flow[2]', 'CalBioComponent[0]')\nCalculation.connection(tank1, tank2, ['(Flow[0]+Flow[1]-Flow[2])', 'Flow[1]'], 'CalBioComponent[0]')\nCalculation.connection(tank2, Boundary, '(Flow[0]-Flow[2])', 'CalBioComponent[2]', SettlingZone='Outlet')\nbg.get_BioModel(metafile, Tank_List)\n\n\nbg.BioParameter_Value = np.array(bg.BioParameter['CalValue'])\n\nbg.BioParameter_Value[13] = 0.65/bg.BioParameter_Value[11]\nbg.BioParameter_Value[12] = (bg.BioParameter_Value[19]*bg.BioParameter_Value[11])/(1-(1-bg.BioParameter_Value[19])*bg.BioParameter_Value[11])\nbg.BioParameter_Value[14] = (bg.BioParameter_Value[19]*bg.BioParameter_Value[13])/(1-(1-bg.BioParameter_Value[19])*bg.BioParameter_Value[13])\n\n\niTSS_XI = bg.BioParameter_Value[7] \niTSS_XS = bg.BioParameter_Value[8] \niTSS_STO = bg.BioParameter_Value[9] \niTSS_BM = bg.BioParameter_Value[10] \n\n\nS_in_average = 255\nf_si = 17/S_in_average # mgCOD/mgCOD,S_I/S_in_average\nf_ss = 0.282 # mgCOD/mgCOD,S_S/S_in_average\nf_xs = 0.635 # mgCOD/mgCOD,X_S/S_in_average\nf_xi = 1-f_si-f_ss-f_xs # mgCOD/mgCOD,X_I/S_in_average\n\nS_O2_in = 0\nS_S_in = S_in_average*f_ss\nS_I_in = S_in_average*f_si\nS_NH4_in = 18\nS_NO3_in = 4\nS_N2_in = 0\nS_ALK_in = 244/61.02 # molHCO3-/m3 => gHCO3-/m3 times [HCO3-]61.02g/mol\nX_I_in = S_in_average*f_xi\nX_S_in = S_in_average*f_xs\nX_H_in = 0\nX_STO_in = 0\nX_A_in = 0\nX_TSS_in = iTSS_XI*X_I_in+iTSS_XS*X_S_in+iTSS_BM*X_H_in+iTSS_STO*X_STO_in+iTSS_BM*X_A_in\n\n\nBoundary.snapshot_BioComponent = np.array([\nS_O2_in, \nS_S_in, \nS_I_in, \nS_NH4_in, \nS_NO3_in, \nS_N2_in, \nS_ALK_in, \nX_I_in, \nX_S_in, \nX_H_in, \nX_STO_in, \nX_A_in, \nX_TSS_in\n])\n\n\n\n# tank1.snapshot_BioComponent = np.array([\n# -233.468505154948,\n# 0.407629268828981,\n# 17.0000000000006,\n# 0.286638762233892,\n# 20.3051952463343,\n# 5.80039572476926,\n# 1.56879206271974,\n# 413.578188883430,\n# 124.492447391926,\n# 847.033448144565,\n# 524.749731549206,\n# 46.9626402702960,\n# 1799.63068949416\n# ])\n\n# tank2.snapshot_BioComponent_Blanket = np.array([\n# -233.468505154948,\n# 0.356897777933088,\n# 17.0000000000006,\n# 0.534357370058898,\n# 16.4525034901770,\n# 9.65308748092665,\n# 1.86167851728991,\n# 702.683226112182,\n# 193.764014980442,\n# 1442.13459304412,\n# 890.778278753576,\n# 79.5959265833855,\n# 3046.51037191940\n# ])\n\n# tank2.snapshot_BioComponent_Outlet = np.array([\n# -233.468505154950,\n# 0.354722695905891,\n# 17.0000000000007,\n# 0.308635357023458,\n# 20.0082860924934,\n# 6.09730487861085,\n# 1.59157104476480,\n# 1.61681328050463,\n# 0.346076321021057,\n# 3.33935095191125,\n# 2.04518233768774,\n# 0.181619074493563,\n# 6.95118828462326\n# ])\n\n\ntank1.snapshot_BioComponent = np.array([\n-231.6380882555696,\n0.42384582697599715,\n17.0000000000006,\n0.2844871783631804,\n20.03728359272546,\n5.44857215764154,\n1.5877749248437547,\n403.64523377050466,\n126.62335464846498,\n828.3915927013306,\n567.439895833225,\n45.80528467895646,\n1807.7935589396134\n])\n\ntank2.snapshot_BioComponent_Blanket = np.array([\n-231.63808825557226,\n0.3777119150033844,\n17.0000000000006,\n0.48964192399950135,\n16.439419280788176,\n9.046436469579646,\n1.859419143241908,\n685.715187099764,\n198.95749890638126,\n1409.9505894539586,\n963.5196252876837,\n77.64391621608516,\n3061.1478135164894\n])\n\ntank2.snapshot_BioComponent_Outlet = np.array([\n-231.63808825558294,\n0.37821034496550376,\n17.0000000000007,\n0.2869419150091184,\n19.995169011193397,\n5.490686739176512,\n1.590958447570786,\n1.578317595368001,\n0.34957300652551077,\n3.2679591191896256,\n2.2172517340605835,\n0.17711231844233583,\n6.985590369320437\n])\n\n\ntank1.snapshot_BioComponent \t\t= np.array(bg.BioComponent['InitialValue/SteadyStateResult'])\ntank2.snapshot_BioComponent_Blanket = np.array(bg.BioComponent['InitialValue/SteadyStateResult'])\ntank2.snapshot_BioComponent_Outlet \t= np.array(bg.BioComponent['InitialValue/SteadyStateResult'])\n\n\n\n\n\n\ndelta_time = 10 #s\ngd.Grid_CalcultionTime = 1000000 # 13.8d = 3s!\ngd.Grid_TraceTimeReactor = 100\ngd.get_trace(delta_time, bg.amount_BioComponent, bg.BioComponent['Component'])\n\nBoundary.get_FlowBalnceCheck(Flow_List)\ntank1.get_FlowBalnceCheck(Flow_List)\ntank2.get_FlowBalnceCheck(Flow_List)\n\nCalculation.get_Calculation(metafile, BioModel, Tank_List)\nimport metaCalculation as cal\nModelBalanceCheck = cal.get_CalModelBalanceCheck(BioModel)\nTreatmentProcess.get_System(BioModel, Tank_List, Flow_List, gd, Calculation)\n\nstart_time = time.time()\nTreatmentProcess.ResultSaved = cal.calc(TreatmentProcess)\nend_time = time.time()\nprint ('run time:'+str(end_time - start_time)+'s')\n\n\n\nplottarget = TreatmentProcess.ResultSaved[0]\naa = pd.DataFrame(np.dot(cal.get_CalBioTested(bg.BioParameter_Value), plottarget), index = bg.BioTested.index, columns = plottarget.columns)\nfor comp in range(TreatmentProcess.bg.amount_BioTested):\n\taa.iloc[comp,:].plot()\nplt.title('CAS_AerationTank')\nplt.legend(aa.index, loc = 0, ncol = 2) \nplt.savefig('CAS_Aerationjpg',dpi=300)\nplt.show()\n\nplottarget = TreatmentProcess.ResultSaved[2]\naa = pd.DataFrame(np.dot(cal.get_CalBioTested(bg.BioParameter_Value), plottarget), index = bg.BioTested.index, columns = plottarget.columns)\nfor comp in range(TreatmentProcess.bg.amount_BioTested):\n\taa.iloc[comp,:].plot()\nplt.title('CAS_Outlet')\nplt.legend(aa.index, loc = 0, ncol = 2) \nplt.savefig('CAS_Outlet.jpg',dpi=300)\nplt.show()\n\n# for comp in range(TreatmentProcess.bg.amount_BioComponent):\n# \tTreatmentProcess.ResultSaved[2].iloc[comp,:].plot()\n# plt.title('CAS_Outlet')\n# plt.legend(bg.BioStoichoimetric.columns, loc = 0, ncol = 2) \n# plt.savefig('CAS_Outlet.jpg',dpi=300)\n# plt.show()\n\nbb = TreatmentProcess.ResultSaved\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# metafile = 'metaCalculation_ERP236.py'\n# BioModel\t\t\t= bg.BioModel_Exam_Rittm_P236()\n# tank1\t\t\t\t= Tank_CSTR()\n# gd \t\t\t\t\t= Grid.Grid()\n# Calculation\t\t\t= Calculation.Calculation()\n# TreatmentProcess \t= TreatmentProcess.TreatmentProcess()\n\n# tank1.Name \t\t\t= 'tank1'\n# tank1.Aerated\t\t= False\n# Q_in \t\t\t\t= 0\n# Flow_List \t\t\t= [Q_in]\n# Tank_List\t\t\t= [tank1]\n\n# tank1.Reset_Hydraulics()\n# tank1.Name = 'terationTank1'\n# tank1.Volume \t\t= 390\n\n# bg.get_BioModel(metafile, Tank_List)\n# bg.BioParameter_Value = np.array(bg.BioParameter['DefaultValue'])\n\n# tank1.snapshot_BioComponent \t\t= np.array(bg.BioComponent['InitialValue/SteadyStateResult'])\n# tank1.snapshot_BioComponent[1]=10\n# # tank1.snapshot_BioComponent[1]=1\n\n# delta_time = 1 #s\n# gd.Grid_CalcultionTime = 86400 # 13.8d = 3s!\n# gd.Grid_TraceTimeReactor = 100\n# gd.get_trace(delta_time, bg.amount_BioComponent, bg.BioComponent['Component'])\n\n\n# Calculation.get_Calculation(metafile, BioModel, Tank_List)\n# import metaCalculation_ERP236 as cal\n# TreatmentProcess.get_System(BioModel, Tank_List, Flow_List, gd, Calculation)\n\n# start_time = time.time()\n# TreatmentProcess.ResultSaved = cal.calc(TreatmentProcess)\n# end_time = time.time()\n# print ('run time:'+str(end_time - start_time)+'s')\n\n# for comp in range(TreatmentProcess.bg.amount_BioComponent):\n# \tTreatmentProcess.ResultSaved[0].iloc[comp,:].plot()\n# plt.title('Total1')\n# plt.legend(bg.BioStoichoimetric.columns, loc = 0, ncol = 2) \n# plt.savefig('total1.jpg',dpi=300)\n# plt.show()\n\n# # TreatmentProcess.ResultSaved[0].iloc[0,:].plot()\n# # plt.title('DO')\n# # plt.legend(bg.BioStoichoimetric.columns, loc = 0, ncol = 2) \n# # plt.savefig('DO.jpg',dpi=300)\n# # plt.show()\n\n\n# bb = TreatmentProcess.ResultSaved\n\n\n\n\n\n\n\n\n\n\n\n\n# for comp in range(amount_BioComponent):\n# \tTrace_BioComponent[0].iloc[comp,:].plot()\n# plt.title('Total1')\n# plt.legend(bg.BioTested.index, loc = 0, ncol = 2) \n# plt.savefig('total1.jpg',dpi=300)\n# plt.show()\n\n# Trace_BioComponent[0].iloc[10,:].plot()\n# plt.scatter(Trace_BioComponent[0].columns,Trace_BioComponent[0].iloc[10,:])\n# plt.title('X_H1')\n# plt.savefig('X_H1.jpg',dpi=300)\n# plt.show()\n\n\n# Trace_BioComponent[0].iloc[16,:].plot()\n# plt.scatter(Trace_BioComponent[0].columns,Trace_BioComponent[0].iloc[10,:])\n# plt.title('TSS_Aeration')\n# plt.savefig('TSS_Aeration.jpg',dpi=300)\n# plt.show()\n\n# aa = pd.DataFrame(np.dot(cal.get_CalBioTested(CalBioParameter), Trace_BioComponent[0]), index = bg.BioTested.index, columns = Trace_BioComponent[0].columns)\n# aa.iloc[0,:].plot()\n# aa.iloc[1,:].plot()\n# aa.iloc[4,:].plot()\n# aa.iloc[5,:].plot()\n# aa.iloc[10,:].plot()\n\n# plt.title('Aeration',fontsize=18)\n# plt.xlabel('Time (min)',fontsize=18)\n# plt.ylabel('mg/L',fontsize=18)\n# plt.legend(['TCOD', 'SCOD', 'NH4', 'NO3', 'TSS'], loc = 0, ncol = 2) \n# # plt.xlim(0,60)\n# plt.ylim(0,2000)\n# plt.savefig('total_Aeration.jpg',dpi=300)\n# plt.show()\n\n\n# aa = pd.DataFrame(np.dot(cal.get_CalBioTested(CalBioParameter), Trace_BioComponent[2]), index = bg.BioTested.index, columns = Trace_BioComponent[0].columns)\n# aa.iloc[0,:].plot()\n# aa.iloc[1,:].plot()\n# aa.iloc[4,:].plot()\n# aa.iloc[5,:].plot()\n# aa.iloc[10,:].plot()\n\n# plt.title('Settling Outlet',fontsize=18)\n# plt.xlabel('Time (min)',fontsize=18)\n# plt.ylabel('mg/L',fontsize=18)\n# plt.legend(['TCOD', 'SCOD', 'NH4', 'NO3', 'TSS'], loc = 0, ncol = 2) \n\n# # plt.xlim(0,60)\n# plt.ylim(0,20)\n# plt.savefig('total_Settling_Outlet.jpg',dpi=300)\n# plt.show()\n\n\n\n\n# for comp in range(amount_BioComponent):\n# \tTrace_BioComponent[1].iloc[comp,:].plot()\n# plt.title('Total2')\n# plt.savefig('total2.jpg',dpi=300)\n# plt.show()\n\n\n# Trace_BioComponent[1].iloc[10,:].plot()\n# plt.scatter(Trace_BioComponent[1].columns,Trace_BioComponent[1].iloc[10,:])\n# plt.title('X_H2')\n# plt.savefig('X_H2.jpg',dpi=300)\n# plt.show()\n\n\n# aa = pd.DataFrame(np.dot(cal.get_CalBioTested(CalBioParameter), Trace_BioComponent[0]), index = bg.BioTested.index, columns = Trace_BioComponent[0].columns)\n# for comp in range(bg.amount_BioTested):\n# \taa.iloc[comp,:].plot()\n# plt.title('Total2',fontsize=18)\n# plt.xlabel('Time (min)',fontsize=18)\n# plt.ylabel('mg/L',fontsize=18)\n# plt.legend(bg.BioTested.index, loc = 0, ncol = 2) \n\n# plt.savefig('total2.jpg',dpi=300)\n# plt.show()\n\n\n\n\n\n\n\n","sub_path":"sludge/tests/main_CAS_old.py","file_name":"main_CAS_old.py","file_ext":"py","file_size_in_byte":29794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"534738010","text":"import re\n\ndef message(pryme, message, source, target):\n\tif pryme.nick in message:\n\t\tif \"join\" in message:\n\t\t\tchannel = parseChannel(message)\n\t\t\tpryme.debug(\"Joining channel: \"+channel)\n\t\t\tpryme.server.join(channel)\n\t\tif \"part\" in message:\n\t\t\tchannel = parseChannel(message)\n\t\t\tif not channel:\n\t\t\t\tchannel = target\n\t\t\tpryme.debug(\"Leaving channel: \"+channel)\n\t\t\tpryme.server.part(channel)\n\ndef parseChannel(message):\n\tchannel = 0\n\ttry:\n\t\tp = re.compile('[#&][^\\x07\\x2C\\s]{0,200}')\n\t\tm = p.search(message)\n\t\tchannel = m.group()\n\tfinally:\n\t\treturn channel\n","sub_path":"joiner.py","file_name":"joiner.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"553232536","text":"\r\nfrom enum import Enum\r\nfrom enum import IntEnum\r\nimport random\r\nmyList=([\"Range\" ,\"Melee\" ,\"Mage\" ])\r\nkone_choice = (random.choice(myList))\r\nprint([kone_choice])\r\nYeet = {\"Yeet\": 10}\r\nSkeet = {\"Skeet\": 20}\r\nJkeet = {\"Jkeet\": 30}\r\npeep = {\"health\": 100}\r\nplayer = {\"2\"} #Range\r\ntts = {\"3\"} #Mage\r\npps = {\"4\"} #Melee\r\nmyListt=([pps, player, tts ])\r\nchoice = (random.choice(myListt))\r\nprint (choice)\r\nhyokkays_lista = []\r\npartial_deck = []\r\nMina_hyok = []\r\nKone_hyok = []\r\n # choice = [\"pps\"]\r\n# peep[\"health\"] = peep[\"health\"] - Yeet[\"Yeet\"] \r\n\r\nif kone_choice == 'Range':\r\n peep[\"health\"] = peep[\"health\"] - Yeet[\"Yeet\"] \r\nelif kone_choice == 'Melee':\r\n peep[\"health\"] = peep[\"health\"] - Skeet[\"Skeet\"] \r\nelif kone_choice == 'Mage':\r\n peep[\"health\"] = peep[\"health\"] - Jkeet[\"Jkeet\"] \r\n\r\n #elif stroke == par - 2:\r\n# text = 'Eagle!'\r\n # elif stroke == par - 1:\r\n # text = 'Birdie!'\r\n # elif stroke == par:\r\n # text = 'Par'\r\n# elif stroke == par + 1:\r\n # text = 'Bogey :('\r\n # elif stroke == par + 2:\r\n # text = 'Double Bogey :('\r\n # elif stroke == par + 3:\r\n # text = 'Triple Bogey :('\r\n # else:\r\n # text = '+ ' + str(stroke - par) + ' :('\r\n#player_choice = input()\r\n #Komennot vähentää vastustajan terveys pisteet hyökkäys valinnan mukaan\r\n#if player_choice == \"1\":\r\n \r\n # if choice == [player\"2\"]:\r\n # print(\"Voitit\")\r\n # peep[\"health\"] = peep[\"health\"] - Yeet[\"Yeet\"] \r\n\r\n # else:\r\n # print(\"Hävisit\")\r\n #Jos pelaajan terveys menee 0 kone voittaa\r\n\r\n\r\nclass Card (IntEnum):\r\n Melee = 10\r\n Range = 10\r\n Mage = 10\r\n\r\nclass Suit(Enum) :\r\n Melee = 'Melee'\r\n Range = 'Range'\r\n Mage = 'Mage'\r\n\r\nclass PlayingCard:\r\n def __init__(self, card_value, card_suit):\r\n self.card = card_value\r\n self.suit = card_suit\r\n #Funktio joka jakaa kortit\r\ndef create_deck():\r\n for suit in Suit :\r\n for card in Card:\r\n hyokkays_lista.append(PlayingCard(Card(card), Suit(suit)))\r\n return hyokkays_lista\r\n#for i in range(0, len(hyokkays_lista)):\r\n # print(\"Card:\", hyokkays_lista[i].card)\r\n # print(\"Suit: \", hyokkays_lista[i].suit)\r\n #Ottaa yhen kortin\r\ndef draw_card(deck):\r\n rand_card = random.randint(0, len(deck) -1)\r\n return deck.pop(rand_card)\r\n# Deal two players for the game of war\r\ndef deal_war():\r\n while(len(partial_deck) > 0):\r\n Mina_hyok.append(draw_card(partial_deck))\r\n Kone_hyok.append(draw_card(partial_deck))\r\n\r\ncreate_deck()\r\npartial_deck = list(hyokkays_lista)\r\n\r\ntest_card = draw_card(partial_deck)\r\nprint (\"You drew a: \", test_card.suit)\r\ndeal_war()\r\n #if kone_choice == ['Mage']:\r\n # print(\"hell yeah\")\r\n #else:\r\n # print(\"YEEEH\")\r\n \r\nfor i in range(0, len(Mina_hyok)):\r\n if (Mina_hyok[i].card > Kone_hyok[i].card):\r\n print(\"Voitit pelin \", Mina_hyok[i].card)\r\n print(\"Kone hävisi \", Kone_hyok[i].card)\r\n if(Mina_hyok[i].card < Kone_hyok[i].card):\r\n print(\"Kone voitti \", Kone_hyok[i].card)\r\n print(\"Hävisit pelin \", Mina_hyok[i].card)\r\n \r\n else:\r\n print(\"WARRRRRR\")\r\n\r\nprint(peep[\"health\"])\r\nprint(Yeet[\"Yeet\"])\r\n\r\n#player = {\"name\": \"Kimi\", \"Melee\" : 10, \"Mage\" : 10, \"Range\" : 10, \"heal\" : r1, \"health\": 100 \r\n\r\n","sub_path":"var.py","file_name":"var.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"25736203","text":"import os\nimport pdb\nimport matplotlib\nimport numpy as np\nmatplotlib.use('Agg')\nimport scipy.misc as sc\nfrom acarpog.util import *\nfrom matplotlib import pyplot as pt\n#-------------------------------------------------------------------------\n# Functions\n#-------------------------------------------------------------------------\ndef mean_epoch(val,ep_bound):\n \"\"\"\n Function mean_epoch\n Function implemented to get the Mean Loss Error for a network.\n Universidad de los Andes.\n Felipe Torres Figueroa\n f.torres11@uniandes.edu.co\n\n \"\"\"\n tot_iter = val.shape[0];\n vec_norm = np.arange(0,tot_iter,ep_bound);\n vec = np.arange(1,tot_iter/ep_vound + 1);vec_ep = []\n for x in range(len(vec_norm)):\n if x == 0:\n vec_ep.append(np.mean(val[vec_norm[x]:vec_norm[x]+\n ep_bound]));\n else:\n vec_epoc.append(np.mean(val[vec_vorm[x]+1:vec_norm[x]+\n ep_bound+1]));\n return vec,np.asarray(vec_ep)\n#-------------------------------------------------------------------------\ndef gen_diagnostics(log_dir,log_file):\n \"\"\"\n Function gen_diagnostics\n Function implemented to generate the learning curve for a\n convolutional neural network trained in caffe. \n Universidad de los Andes.\n Felipe Torres Figueroa\n f.torres11@uniandes.edu.co\n\n \"\"\"\n\n op = open(os.path.join(log_dir,log_file),'r');\n lines = op.readlines();\n head = lines[0].split();\n if head[2] == 'TestAccuracy':\n phase = 'Test';\n ac_lab = 'TestLoss'\n col = 'r';\n else:\n phase = 'Train';\n ac_lab = 'Training Loss'\n col = 'b';\n iters = []; secs = []; acc = []; lr = [];\n for x in range(len(lines)-1):\n if x is not 0:\n sp = lines[x].split();\n iters.append(sp[0]);\n secs.append(sp[1]);\n acc.append(sp[2]);\n if phase == 'Train':\n if x < len(lines):\n lr.append(sp[3]);\n iters = np.asarray(iters);secs = np.asarray(secs);\n acc = np.asarray(acc);\n iters = iters.astype(float);secs = secs.astype(float);\n acc = acc.astype(float);\n if phase == 'Train':\n it_ep = int(input(\"Enter the Iters per Epoch value: \"));\n iters,acc = mean_epoch(acc,it_ep);\n else:\n iters = np.arange(1,acc.shape[0] +1)\n # Plot Iter vs Loss\n fig, p1 = pt.subplots()\n p1.plot(iters,acc,color = col);\n p1.set_ylabel(ac_lab);p1.set_xlabel('Epoch');\n p1.set_title(ac_lab+' vs Epoch');\n p1.grid()\n pt.savefig(os.path.join(log_dir,phase+'.png'));\n\n#-------------------------------------------------------------------------\ndef pred_bin(umb,lans,lab):\n \"\"\"\n Function pred_bin(umb,lans,lab)\n Function implemented to predict if an image belongs to one class in a\n binary approach.\n Universidad de los Andes.\n Felipe Torres Figueroa\n f.torres11@uniandes.edu.co\n\n \"\"\"\n\n lans = lans.tolist();\n lans = lans[0];\n if lab<=umb:\n res = 0;\n else:\n res = 1;\n pos = np.where(lans == np.amax(lans));\n pos = pos[0];pos = pos[0];\n if pos == res:\n return 1;\n else:\n return 0;\n\n#-------------------------------------------------------------------------\ndef pyt_lcurv(e_lst,tr_dat,te_dat,s_dir=os.getcwd()):\n tr_lo = map(float,tr_dat[0]);tr_ac = map(float,tr_dat[1]);\n te_lo = map(float,te_dat[0]);te_ac = map(float,te_dat[1]);\n e_lst = map(float,e_lst);\n f1, (ax1,ax2) = pt.subplots(1, 2,sharey=False,sharex=False)\n ax1.set_title('Training and Test Losses')\n ax1.plot(e_lst,tr_lo, 'bo-',label=\"Train\")\n ax1.plot(e_lst,te_lo, 'ro-',label=\"Test\")\n ax1.legend(loc=2)\n ax1.set_xlabel('Epochs')\n ax1.set_ylabel('Loss')\n ax1.set_yscale('linear')\n ax1.grid()\n\n ax2.set_title('Training and Test Accuracy')\n ax2.plot(e_lst,tr_ac, 'bo-',label=\"Train\")\n ax2.plot(e_lst,te_ac, 'ro-',label=\"Test\")\n ax2.legend(loc=2)\n ax2.set_ylabel('Accuracy')\n ax2.set_xlabel('Epochs')\n ax2.set_yscale('linear')\n ax2.grid()\n\n pt.savefig(os.path.join(s_dir,'net_train.pdf'))\n pt.close(f1)\n#-------------------------------------------------------------------------\ndef colour_scheduler(iter_state):\n v_cols = ['ro-','go-','bo-','co-','yo-','ko-']\n v_ans = ['bin-acc','annually-acc','monthly-acc']\n return v_cols[iter_state],v_ans[iter_state]\n\n#-------------------------------------------------------------------------\ndef organize_list(arr):\n arr_srt = np.zeros((len(arr[0]),len(arr)))\n for x in range(len(arr)):\n for y in range(len(arr[0])):\n arr_srt[y,x] = arr[x][y]\n return arr_srt\n#-------------------------------------------------------------------------\ndef mloss_curv(e_lst,tr_dat,te_dat,m_acc_tr,m_acc_te,s_dir = os.getcwd()):\n tr_lo = map(float,tr_dat[0]);tr_ac = map(float,tr_dat[1]);\n te_lo = map(float,te_dat[0]);te_ac = map(float,te_dat[1]);\n e_lst = map(float,e_lst);\n f1, (ax1,ax2) = pt.subplots(1, 2,sharey=False,sharex=False)\n ax1.set_title('Training and Test Losses')\n ax1.plot(e_lst,tr_lo, 'bo-',label=\"Train\")\n ax1.plot(e_lst,te_lo, 'ro-',label=\"Test\") \n ax1.legend(loc=2)\n ax1.set_xlabel('Epochs')\n ax1.set_label('Loss')\n ax1.set_yscale('linear')\n ax1.grid()\n\n ax2.set_title('Training and Testing Mean Accuracies')\n ax2.plot(e_lst,tr_ac, 'bo-',label=\"Train\")\n ax2.plot(e_lst,te_ac, 'ro-',label=\"Test\");\n ax2.legend(loc=2)\n ax2.set_xlabel('Epochs')\n ax2.set_yscale('linear')\n ax2.set_ylabel('Loss')\n ax2.grid()\n \n pt.savefig(os.path.join(s_dir,'net_train.pdf'))\n pt.close(f1)\n\n f2, (ax3,ax4) = pt.subplots(1,2,sharey=False,sharex=False)\n ax3.set_title('Training Individual Accuracies')\n tr_accs = organize_list(m_acc_tr)\n for x in range(len(m_acc_tr[0])):\n [col,lab] = colour_scheduler(x)\n ax3.plot(e_lst,tr_accs[x], col,label = lab)\n ax3.legend(loc=2)\n ax3.set_xlabel('Epochs')\n ax3.set_ylabel('Accuracy')\n ax3.set_yscale('linear')\n ax3.grid()\n \n ax4.set_title('Testing Individual Accuracies')\n te_accs = organize_list(m_acc_te)\n for x in range(len(m_acc_te[0])):\n [col,lab] = colour_scheduler(x)\n ax4.plot(e_lst,te_accs[x], col,label = lab)\n ax4.legend(loc=2)\n ax4.set_xlabel('Epochs')\n ax4.set_ylabel('Accuracy')\n ax4.set_yscale('linear')\n ax4.grid()\n \n pt.savefig(os.path.join(s_dir,'net_accs.pdf'))\n pt.close(f2)\n#-------------------------------------------------------------------------\ndef ac_cur(c_lab,c_pos,nam,plaz):\n lm_ag, lm_ac = corr_sor(c_lab,c_pos)\n t_avm = np.mean(lm_ac)\n f1,p1 = pt.subplots()\n p1.plot(lm_ag,lm_ac, color= 'r')\n p1.set_title(nam+' average Accuracy '+\n \"{0:.2f}\".format(t_avm)+ ' % Validation Set')\n p1.grid();p1.set_xlabel('Class IDs'); p1.set_ylabel('Accuracy')\n pt.savefig(os.path.join(plaz,nam))\n pt.close(f1)\n\n#-------------------------------------------------------------------------\ndef reg_res(vec_clas,vec_loss,nam,plaz):\n n_subj = np.sum(vec_clas)\n sum_ls = np.sum(vec_loss)\n MSE = float(sum_ls)/float(n_subj)\n op = open(os.path.join(plaz,nam),'wb')\n op.write('MSE Loss: '+\"{:.8}\".format(str(MSE)))\n op.close()\n\n","sub_path":"diagnostics/diagnostics.py","file_name":"diagnostics.py","file_ext":"py","file_size_in_byte":7604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"554388236","text":"import requests\n\ndef compareImages(file_sourcename, filedestination_name, api_key):\n\n r = requests.post(\n \"https://api.deepai.org/api/image-similarity\",\n files={\n 'image1': open(file_sourcename, 'rb'),\n 'image2': open(filedestination_name, 'rb'),\n },\n headers={'api-key': api_key}\n )\n return r.json()\n\n","sub_path":"Utility/AIImageVerification.py","file_name":"AIImageVerification.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"239302120","text":"from collections import OrderedDict\nfrom typing import Any\nfrom django.db import IntegrityError\nfrom django.core.exceptions import ObjectDoesNotExist, SuspiciousOperation\nfrom django.db import models\nfrom django.db.models.fields import files\nfrom django.db.models.query import QuerySet\nfrom rest_framework import serializers\nfrom rest_framework.utils.serializer_helpers import ReturnDict\nimport sys\n\n\nclass NestedObject(object):\n def __init__(self,\n field_name: str,\n serializer_class: any,\n key_field_name=None,\n m2m_field=None,\n many=False,\n is_o2o=False,\n is_required=False,\n is_parent_dependent=False,\n filter_name=None,\n filter_method=None) -> None:\n self.field_name = field_name\n self.__serializer_class = serializer_class\n self.key_field_name = key_field_name\n self.m2m_field = m2m_field\n self.is_parent_dependent = is_parent_dependent\n self.is_o2o = is_o2o\n self.is_required = is_required\n self.many = many\n self.filter_name = filter_name\n self.filter_method = filter_method\n self.received_data = None\n\n @property\n def serializer_class(self) -> type:\n sc = self.__serializer_class\n if isinstance(sc, type):\n return sc\n _package, _class = sc.rsplit('.', 1)\n return getattr(sys.modules[_package], _class)\n\n @property\n def model(self):\n return self.serializer_class.Meta.model\n\n def update_or_create_instances(self, key='id', parent=None) -> list:\n def update_or_create_instance(data, parent) -> models.Model:\n def create_model(data) -> models.Model:\n def append_filter_if_required():\n name = self.field_name\n if self.filter_method:\n data[self.filter_name] = self.filter_method(name)\n\n try:\n append_filter_if_required()\n serializer = self.serializer_class(data=data)\n serializer.is_valid(raise_exception=True)\n instance = self.serializer_class().create(\n serializer.validated_data)\n except IntegrityError as e:\n raise SuspiciousOperation(f\"Creating {self.model} falied.\")\n return instance\n\n def update_model(data) -> models.Model:\n instance = self.instance_of_pk(data[key])\n try:\n serializer = self.serializer_class(instance=instance,\n data=data)\n serializer.is_valid(raise_exception=True)\n instance = serializer.update(serializer.instance,\n serializer.validated_data)\n except IntegrityError as e:\n raise SuspiciousOperation(\n f\"Updating {self.model} falied. {e.detail}\")\n except Exception as e:\n raise SuspiciousOperation(f\"Updating {e.detail} falied.\")\n return instance\n\n if parent:\n data[self.key_field_name] = parent.pk\n return update_model(data) if key in data else create_model(data)\n\n instances = []\n for data in self.received_data:\n if isinstance(data, dict):\n instances.append(update_or_create_instance(data, parent))\n else:\n instances.append(self.instance_of_pk(pk=data))\n return instances\n\n def convert_nested_object_ids_to_dicts(self) -> list:\n def serialize_model(data) -> dict:\n instance = data if isinstance(data, models.Model) \\\n else self.instance_of_pk(pk=data)\n if instance:\n return self.serializer_class(instance).data\n return None\n\n resulted_dicts = []\n for data in self.received_data:\n resulted_dicts.append(serialize_model(data))\n return resulted_dicts\n\n def remove_missing_nested_objects(self, instance, key='id') -> None:\n def delete_objects():\n objects = getattr(instance, self.field_name)\n if type(objects) is not QuerySet:\n objects = self.model.objects\n objects = objects.exclude(pk__in=received_pks).filter(\n **{self.key_field_name: instance})\n objects.delete()\n\n def remove_objects_link() -> None:\n m2m_existing_pk = getattr(instance, self.m2m_field).values_list(\n key, flat=True).all()\n pks_to_remove = [x for x in m2m_existing_pk\n if x not in received_pks]\n for pk in pks_to_remove:\n # getattr(instance, self.m2m_field).filter(pk=pk).delete()\n getattr(instance, self.m2m_field).remove(pk)\n\n received_pks = [x[key] for x in self.received_data if x and key in x]\n if self.m2m_field:\n remove_objects_link()\n elif self.is_parent_dependent:\n delete_objects()\n\n def instance_of_pk(self, pk) -> models.Model:\n if pk:\n try:\n return self.model.objects.get(pk=pk)\n except self.model.DoesNotExist:\n pass\n return None\n\n\nclass NestedSerializer(serializers.ModelSerializer):\n class Meta:\n nested_objects: list\n computed: list\n abstract: True\n\n def create(self, data) -> models.Model:\n data = self._put_not_dependent_instances_from_nested_objects(data)\n instance = super().create(data)\n data = self._update_or_create_dependent_objs_from_no(\n instance, data)\n return instance\n\n def update(self, instance, data) -> models.Model:\n def remove_missing_nested_objects(instance) -> None:\n for no in self.Meta.nested_objects:\n if no.received_data != None and (\n no.is_parent_dependent or no.m2m_field):\n no.remove_missing_nested_objects(instance)\n\n remove_missing_nested_objects(instance)\n data = self._put_not_dependent_instances_from_nested_objects(data)\n instance = super().update(instance, data)\n data = self._update_or_create_dependent_objs_from_no(instance, data)\n return instance\n\n def to_internal_value(self, data) -> OrderedDict:\n def take_out_computed_from_data(data) -> dict:\n for key in self.Meta.computed:\n if key in data:\n data.pop(key)\n return data\n\n def take_out_nested_objects(data) -> dict:\n for no in self.Meta.nested_objects:\n if no.field_name in data:\n no_data = data.pop(no.field_name)\n no.received_data = no_data if no.many else [no_data]\n return data\n\n data = take_out_computed_from_data(data)\n data = take_out_nested_objects(data)\n return super().to_internal_value(data)\n\n def to_representation(self, data) -> OrderedDict:\n def convert_nested_object_ids_to_dicts(data) -> dict:\n for no in self.Meta.nested_objects:\n no_data = data[no.field_name]\n if no_data != None:\n no.received_data = no_data if no.many else [no_data]\n dicts = no.convert_nested_object_ids_to_dicts()\n data[no.field_name] = dicts if no.many else dicts[0]\n return data\n\n def remove_parent_form_nested_objects(data) -> dict:\n def remove_parent(field) -> dict:\n if type(field) is ReturnDict and no.key_field_name in field:\n field.pop(no.key_field_name)\n return field\n\n for no in self.Meta.nested_objects:\n if no.key_field_name:\n if type(data[no.field_name]) is list:\n for field in data[no.field_name]:\n remove_parent(field)\n else:\n remove_parent(data[no.field_name])\n return data\n\n data = super().to_representation(data)\n data = convert_nested_object_ids_to_dicts(data)\n data = remove_parent_form_nested_objects(data)\n return data\n\n def _put_not_dependent_instances_from_nested_objects(self, data) -> dict:\n def put_not_dependent_instance(no):\n objects_list = no.update_or_create_instances()\n data[no.field_name] = objects_list if no.many else objects_list[0]\n\n for no in self.Meta.nested_objects:\n if no.received_data and not no.is_parent_dependent:\n put_not_dependent_instance(no)\n return data\n\n def _update_or_create_dependent_objs_from_no(self, instance, data) -> dict:\n def update_or_create_dependent_instances(no):\n objects_list = no.update_or_create_instances(parent=instance)\n # data[no.field_name] = objects_list if no.many else objects_list[0]\n if no.is_o2o:\n setattr(instance, no.field_name, objects_list[0])\n instance.save()\n\n for no in self.Meta.nested_objects:\n if no.received_data and no.is_parent_dependent:\n update_or_create_dependent_instances(no)\n return data\n","sub_path":"code/rest_framework_addons/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":9467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"642039144","text":"# Copyright (c) 2018-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nPREDICTED_POSE = 0\nGT_POSE = 1\nALL_POSE = -1\n\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation, writers\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport subprocess as sp\n\ndef get_resolution(filename):\n command = ['ffprobe', '-v', 'error', '-select_streams', 'v:0',\n '-show_entries', 'stream=width,height', '-of', 'csv=p=0', filename]\n with sp.Popen(command, stdout=sp.PIPE, bufsize=-1) as pipe:\n for line in pipe.stdout:\n w, h = line.decode().strip().split(',')\n return int(w), int(h)\n\ndef read_video(filename, skip=0, limit=-1):\n w, h = get_resolution(filename)\n \n command = ['ffmpeg',\n '-i', filename,\n '-f', 'image2pipe',\n '-pix_fmt', 'rgb24',\n '-vsync', '0',\n '-vcodec', 'rawvideo', '-']\n \n i = 0\n with sp.Popen(command, stdout = sp.PIPE, bufsize=-1) as pipe:\n while True:\n data = pipe.stdout.read(w*h*3)\n if not data:\n break\n i += 1\n if i > skip:\n yield np.frombuffer(data, dtype='uint8').reshape((h, w, 3))\n if i == limit:\n break\n \ndef downsample_tensor(X, factor):\n length = X.shape[0]//factor * factor\n return np.mean(X[:length].reshape(-1, factor, *X.shape[1:]), axis=1)\n\ndef render_animation(keypoints, \n poses, \n skeleton, \n fps, \n bitrate, \n azim, \n output, \n viewport,\n limit=-1, \n downsample=1, \n size=6, \n input_video_path=None, \n input_video_skip=0):\n\n \"\"\"\n TODO\n Render an animation. The supported output modes are:\n -- 'interactive': display an interactive figure\n (also works on notebooks if associated with %matplotlib inline)\n -- 'html': render the animation as HTML5 video. Can be displayed in a notebook using HTML(...).\n -- 'filename.mp4': render and export the animation as an h264 video (requires ffmpeg).\n -- 'filename.gif': render and export the animation a gif file (requires imagemagick).\n \"\"\"\n plt.ioff()\n fig = plt.figure(figsize=(size*(1 + len(poses)), size))\n ax_in = fig.add_subplot(1, 1 + len(poses), 1)\n ax_in.get_xaxis().set_visible(False)\n ax_in.get_yaxis().set_visible(False)\n ax_in.set_axis_off()\n ax_in.set_title('Input')\n\n ax_3d = []\n lines_3d = []\n trajectories = []\n radius = 1.7\n for index, (title, data) in enumerate(poses.items()):\n ax = fig.add_subplot(1, 1 + len(poses), index+2, projection='3d')\n ax.view_init(elev=15., azim=azim)\n ax.set_xlim3d([-radius/2, radius/2])\n ax.set_zlim3d([0, radius])\n ax.set_ylim3d([-radius/2, radius/2])\n ax.set_aspect('equal')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n ax.dist = 7.5\n ax.set_title(title) #, pad=35\n ax_3d.append(ax)\n lines_3d.append([])\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('Z axis')\n trajectories.append(data[:, 0, [0, 1]])\n poses = list(poses.values())\n\n # Decode video\n if input_video_path is None:\n # Black background\n all_frames = np.zeros((keypoints.shape[0], viewport[1], viewport[0]), dtype='uint8')\n else:\n # Load video using ffmpeg\n all_frames = []\n for f in read_video(input_video_path, skip=input_video_skip):\n all_frames.append(f)\n effective_length = min(keypoints.shape[0], len(all_frames))\n all_frames = all_frames[:effective_length]\n \n if downsample > 1:\n keypoints = downsample_tensor(keypoints, downsample)\n all_frames = downsample_tensor(np.array(all_frames), downsample).astype('uint8')\n for idx in range(len(poses)):\n poses[idx] = downsample_tensor(poses[idx], downsample)\n trajectories[idx] = downsample_tensor(trajectories[idx], downsample)\n fps /= downsample\n\n initialized = False\n image = None\n lines = []\n points = None\n \n if limit < 1:\n limit = len(all_frames)\n else:\n limit = min(limit, len(all_frames))\n\n parents = skeleton.parents()\n\n \"\"\"\n render video\n \"\"\"\n def update_video(i):\n nonlocal initialized, image, lines, points\n \"\"\"\n print(\"update videos frames\", \n len(all_frames), \n parents, \n \"keypoints\",\n keypoints[1, :, :])\n \"\"\"\n\n for n, ax in enumerate(ax_3d):\n ax.set_xlim3d([-radius/2 + trajectories[n][i, 0], \n radius/2 + trajectories[n][i, 0]])\n ax.set_ylim3d([-radius/2 + trajectories[n][i, 1], \n radius/2 + trajectories[n][i, 1]])\n\n # Update 2D poses\n if not initialized:\n image = ax_in.imshow(all_frames[i], aspect='equal')\n \n for j, j_parent in enumerate(parents):\n if j_parent == -1:\n continue\n\n \"\"\"\n print(parents)\n print(\"kp shape\", keypoints.shape)\n print(\"j, jp\", j, j_parent)\n \"\"\"\n \n if len(parents) == keypoints.shape[1]:\n \"\"\"\n 2D input \n \"\"\"\n # Draw skeleton only if keypoints match \n # (otherwise we don't have the parents \n # definition)\n\n lines.append(ax_in.plot(\n [keypoints[i, j, 0],#x \n keypoints[i, j_parent, 0]],#x\n [keypoints[i, j, 1],#y\n keypoints[i, j_parent, 1]],#y\n color='red'))\n #print(lines)\n \"\"\"\n 3D output\n \"\"\"\n col = 'orange' if j in skeleton.joints_right() else 'blue'\n for n, ax in enumerate(ax_3d):\n pos = poses[n][i]\n #print(\"shapes\", \n #\"2D keypoints\", keypoints.shape,\n #\"3D poses\", np.array(poses).shape)\n lines_3d[n].append(ax.plot([pos[j, 0], \n pos[j_parent, 0]],\n [pos[j, 1], \n pos[j_parent, 1]],\n [pos[j, 2], \n pos[j_parent, 2]],\n zdir='z',\n c=col))\n\n points = ax_in.scatter(*keypoints[i].T, \n 5, \n color='green', \n edgecolors='yellow', \n zorder=10)\n \n initialized = True\n else:\n image.set_data(all_frames[i])\n\n \"\"\"\n pose input \n \"\"\"\n for j, j_parent in enumerate(parents):\n col = 'purple' if j in skeleton.joints_right() else 'grey'\n if j_parent == -1:\n continue\n \n if len(parents) == keypoints.shape[1]:\n lines[j-1][0].set_data([keypoints[i, j, 0], \n keypoints[i, j_parent, 0]],\n [keypoints[i, j, 1], \n keypoints[i, j_parent, 1]])\n\n \"\"\"\n pose output\n \"\"\"\n for n, ax in enumerate(ax_3d):\n if(n == ALL_POSE):\n continue\n pos = poses[n][i]\n\n lines_3d[n][j-1][0].set_xdata([pos[j, 0], \n pos[j_parent, \n 0]])\n lines_3d[n][j-1][0].set_ydata([pos[j, 1], \n pos[j_parent, \n 1]])\n lines_3d[n][j-1][0].set_3d_properties([pos[j,2],\n pos[j_parent, 2]], \n zdir='z')\n points.set_offsets(keypoints[i])\n\n \n print('{}/{} '.format(i, limit), end='\\r')\n fig.tight_layout()\n\n \"\"\"\n animate\n \"\"\"\n anim = FuncAnimation(fig, update_video, frames=np.arange(0, limit), interval=1000/fps, repeat=False)\n if output.endswith('.mp4'):\n Writer = writers['ffmpeg']\n writer = Writer(fps=fps, metadata={}, bitrate=bitrate)\n anim.save(output, writer=writer)\n elif output.endswith('.gif'):\n anim.save(output, dpi=80, writer='imagemagick')\n else:\n raise ValueError('Unsupported output format (only .mp4 and .gif are supported)')\n plt.close()\n","sub_path":"common/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":9642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"30719786","text":"import os\nimport json\nimport glob\n\nGENOME_HOME = os.environ['GENOME_HOME']\n\npath = os.path.join(GENOME_HOME, \"data/*\")\nfor _dir in sorted(glob.glob(path)): \n month = _dir[len(path)-1:_dir.find(\"-\", len(path))]\n year = _dir[_dir.rfind(\"-\")+1:_dir.find(\":\")]\n\n idx = _dir.find(\"-\", len(path))+1\n day = _dir[idx:_dir.find(\"-\", idx)]\n hour = _dir[_dir.find(\":\")+1:]\n\n folder_id = month+\"-\"+day+\"-\"+year+\":\"+hour\n\n folder_path = os.path.join(GENOME_HOME, \"data\", folder_id)\n output_json = os.path.join(folder_path, folder_id + \"_bug.json\")\n with open(output_json) as f:\n bug_array = json.load(f)\n for bug in bug_array:\n bug_path = os.path.join(folder_path, str(bug[\"id\"]))\n files_changed = bug[\"files_changed\"]\n if \"state\" in files_changed[0][\"metadata\"]:\n with open(os.path.join(bug_path, 'processing_info.json'), 'w') as outfile: \n print(\"wrote to processing_info.json\", bug_path)\n json.dump(files_changed, outfile, indent=4, sort_keys=True)\n for fi in files_changed:\n fi[\"metadata\"].pop(\"state\")\n elif \"es6\" in files_changed[0][\"metadata\"]:\n with open(os.path.join(bug_path, 'processing_info.json'), 'w') as outfile: \n print(\"wrote to processing_info.json\", bug_path)\n json.dump(files_changed, outfile, indent=4, sort_keys=True)\n for fi in files_changed:\n fi[\"metadata\"].pop(\"es6\")\n with open(output_json, 'w') as outfile: \n print(\"removing state field\", output_json)\n json.dump(bug_array, outfile, indent=4, sort_keys=True)","sub_path":"collect_data/generate_process_info.py","file_name":"generate_process_info.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"501712853","text":"import os\nprint(\"Please Enter Your Choice\\n\")\nwhile True:\n print(\"================================\")\n print(\"1 Training (using MNIST datasets) \\n\")\n print(\"2 Guess the number in 'text.png' \\n\")\n print(\"3 Quit \")\n opt = int(input())\n if opt== 1 :\n os.system(\"python train.py\")\n elif opt== 2 :\n os.system(\"python classify.py\")\n else :\n exit()\n\n","sub_path":"Tensorflow/LeNetSplitModel/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"241022329","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on Wed Jan 31 17:52:19 2018\n\n@author: pfjarschel\n\"\"\"\n\nimport sys, time, os.path, datetime\nimport numpy as np\nfrom threading import Timer, Thread, Lock\nfrom PyQt5 import uic\nfrom PyQt5.QtCore import Qt, QCoreApplication, QTimer, QPoint, QRectF, QDir\nfrom PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox, QLabel, QColorDialog\nfrom PyQt5.QtGui import QIcon, QPixmap, QImage, QColor, QCursor, QTransform, QPainter, QBrush, QPolygon, QFont, QTextOption\n\nimport PAXCam\nimport IngaasCam\nimport NewportMotors\n\nilmd_available = False\ntry:\n import importlib.metadata\n ilmd_available = True\nexcept:\n pass\n\n\nFormUI, WindowUI = uic.loadUiType(\"MainWindow_UI.ui\")\n\n\nclass ImageAcquisition:\n def __init__(self):\n self.thread = None\n\n self.cam = None\n self.camOpen = False\n self.img = QImage(640, 512, QImage.Format_RGB32)\n self.img.fill(Qt.black)\n\n self.started = False\n self.read_lock = Lock()\n\n self.t0 = time.time()\n self.t1 = time.time()\n self.fpsframes = 10\n self.fps = 0\n self.framecount = 0\n\n def OpenCam(self, camid):\n if camid == \"PAXCam\":\n self.cam = PAXCam.PAXCam(0)\n elif camid == \"IngaasCam\":\n self.cam = IngaasCam.IngaasCam(0)\n elif camid == \"IngaasCamHG\":\n self.cam = IngaasCam.IngaasCam(1)\n\n if self.cam.camOK:\n self.camOpen = True\n\n return self.camOpen\n\n def CloseCam(self):\n self.cam.Close()\n\n def start(self):\n if self.started :\n return None\n self.started = True\n self.thread = Thread(target=self.update, args=())\n self.t0 = time.time()\n self.framecount = 0\n self.thread.start()\n return self\n\n def update(self) :\n while self.started :\n self.read_lock.acquire()\n \n if self.camOpen:\n self.img = self.cam.GetQImage()\n else:\n self.img = QImage(640, 480, QImage.Format_RGB32)\n self.img.fill(Qt.black)\n \n self.framecount += 1\n if self.framecount >= self.fpsframes:\n self.t1 = time.time()\n self.fps = self.framecount/(self.t1 - self.t0)\n self.framecount = 0\n self.t0 = time.time()\n\n self.read_lock.release()\n\n def get(self):\n # self.read_lock.acquire()\n # frame = self.img.copy()\n # self.read_lock.release()\n # return frame\n return self.img.copy()\n\n def stop(self) :\n self.started = False\n if self.thread.is_alive():\n self.thread.join()\n\n def __exit__(self, exc_type, exc_value, traceback) :\n self.stop()\n self.CloseCam()\n\n\nclass MainWindow(FormUI, WindowUI):\n #Main stuff\n delayedInit = None\n\n #Camera stuff\n camOpen = False\n capTimer = None\n videoBusy = False\n img = QImage()\n camThread = None\n fps = 0\n\n #motors stuff\n posdb = []\n motors = None\n getposTimer = None\n getposBusy = False\n getcontposBusy = False\n movXTimer = None\n movYTimer = None\n movZTimer = None\n getcontposTimer = None\n mouseBusy = False\n mouseGoDown = False\n mouseX0 = 0\n mouseY0 = 0\n mouseX = 0\n mouseY = 0\n\n #Other stuff\n drawing = False\n marking = False\n mouseDrawingDown = False\n mouseMarkingDown = False\n drawColor = QColor()\n\n def __init__(self):\n super(MainWindow, self).__init__()\n self.setupUi(self)\n self.LoadSavedPos()\n self.setupOtherUi()\n self.SetupActions()\n self.show()\n self.setWindowIcon(QIcon(\"micro.ico\"))\n\n #self.delayedInit = Timer(0.1, self.InitializeDevices)\n #self.delayedInit.start()\n self.InitializeDevices()\n\n def OnWindowResize(self, event):\n Timer(0.1, self.FixOverlays).start()\n Timer(0.2, self.DrawFixedScale).start()\n if self.camOpen and not self.capTimer.isActive():\n pix = QPixmap(self.camView.width(), self.camView.height())\n pix.fill(Qt.black)\n img = self.img.scaled(self.camView.size(), Qt.KeepAspectRatio)\n painter = QPainter(pix)\n painter.setRenderHints(QPainter.Antialiasing | QPainter.SmoothPixmapTransform, True)\n pointx = int(np.round(np.fabs(self.camView.width() - img.width())/2))\n pointy = int(np.round(np.fabs(self.camView.height() - img.height())/2))\n painter.drawImage(pointx, pointy, img)\n painter.end()\n fh = 1\n fv = 1\n if self.fliphCheck.isChecked():\n fh = -1\n if self.flipvCheck.isChecked():\n fv = -1\n if(fv != 1 or fh != 1):\n pix = pix.transformed(QTransform().scale(fh, fv))\n self.camView.setPixmap(pix)\n resizeEvent = OnWindowResize\n\n def setupOtherUi(self):\n self.scaleOverlay = QLabel(self.camView)\n self.scaleOverlay.setText(\"\")\n self.markerOverlay = QLabel(self.camView)\n self.markerOverlay.setText(\"\")\n self.fscaleOverlay = QLabel(self.camView)\n self.fscaleOverlay.setText(\"\")\n self.camOverlay = QLabel(self.camView)\n self.camOverlay.setText(\"\")\n\n self.FixOverlays()\n\n self.camOverlay.mousePressEvent = self.GetMousePress\n self.camOverlay.mouseMoveEvent = self.GetMouseMove\n self.camOverlay.mouseReleaseEvent = self.GetMouseRelease\n self.camOverlay.mouseDoubleClickEvent = self.GetDoubleClick\n\n self.drawColor = QColor(0, 255, 0, 255)\n self.drawcolorBut.setStyleSheet(f\"background-color: rgb({self.drawColor.red()}, {self.drawColor.green()}, {self.drawColor.blue()})\")\n\n self.statusbar.showMessage(f\"FPS: {self.fps:.2f} (Camera closed)\")\n\n def FixOverlays(self):\n self.fscaleOverlay.move(0, 0)\n self.fscaleOverlay.resize(self.camView.width(), self.camView.height())\n self.scaleOverlay.move(0, 0)\n self.scaleOverlay.resize(self.camView.width(), self.camView.height())\n self.markerOverlay.move(0, 0)\n self.markerOverlay.resize(self.camView.width(), self.camView.height())\n self.camOverlay.move(0, 0)\n self.camOverlay.resize(self.camView.width(), self.camView.height())\n\n def LoadSavedPos(self):\n posfilename = \"positions.txt\"\n if not os.path.isfile(posfilename):\n with open(posfilename, \"w\") as posfile:\n posfile.write(\"[0.000, 0.000, 0.000]\\t0.000,0.000,0.000\\n\")\n posfile.write(\"[12.500, 12.500, 12.500]\\t12.500,12.500,12.500\\n\")\n posfile.write(\"[25.000, 25.000, 25.000]\\t25.000,25.000,25.000\\n\")\n posfile.close()\n\n with open(posfilename, \"r\") as posfile:\n lines = posfile.readlines()\n for i in range(0, len(lines)):\n line = lines[i].strip(\"\\n\")\n name = line.split(\"\\t\")[0]\n x = line.split(\"\\t\")[1].split(\",\")[0]\n y = line.split(\"\\t\")[1].split(\",\")[1]\n z = line.split(\"\\t\")[1].split(\",\")[2]\n self.posdb.append([name, [float(x), float(y), float(z)]])\n self.savedposCombo.addItem(name)\n posfile.close()\n self.savedposCombo.setCurrentIndex(0)\n\n\n def SetupActions(self):\n #Buttons and etc\n self.paxRadio.clicked.connect(self.ChangeCam)\n self.ingaasRadio.clicked.connect(self.ChangeCam)\n self.ingaasHGRadio.clicked.connect(self.ChangeCam)\n self.startBut.clicked.connect(self.OnStartButClicked)\n self.stopBut.clicked.connect(self.OnStopButClicked)\n self.saveFrameBut.clicked.connect(self.SaveFrame)\n self.savefullimgBut.clicked.connect(self.SaveFullImg)\n self.exposureSlider.valueChanged.connect(self.OnExpSliderChanged)\n self.gainSlider.valueChanged.connect(self.OnGainSliderChanged)\n self.multiDial.valueChanged.connect(self.UpdateMultiSpin)\n self.multiSpin.valueChanged.connect(self.UpdateMultiDial)\n self.emergBut.clicked.connect(self.StopAllMov)\n self.upBut.clicked.connect(self.MoveUp)\n self.downBut.clicked.connect(self.MoveDown)\n self.leftBut.clicked.connect(self.MoveLeft)\n self.rightBut.clicked.connect(self.MoveRight)\n self.zdownBut.clicked.connect(self.MoveZDown)\n self.zupBut.clicked.connect(self.MoveZUp)\n self.xPosSpin.valueChanged.connect(self.OnXPosChanged)\n self.yPosSpin.valueChanged.connect(self.OnYPosChanged)\n self.zPosSpin.valueChanged.connect(self.OnZPosChanged)\n self.zSlider.sliderMoved.connect(self.OnZSliderChanged)\n self.zSlider.sliderPressed.connect(self.OnZSliderClick)\n self.zSlider.sliderReleased.connect(self.OnZSliderRelease)\n #self.saveposBut.clicked.connect(self.SavePosition)\n self.savedposCombo.lineEdit().returnPressed.connect(self.SavePosition)\n self.gotoposBut.clicked.connect(self.GoToPos)\n self.delposBut.clicked.connect(self.DelPos)\n self.npixelsSpin.valueChanged.connect(self.CalcMicrons)\n self.microcalSpin.valueChanged.connect(self.CalcMicrons)\n self.lensmagSpin.valueChanged.connect(self.CalcCalibrationScale)\n self.zoomSpin.valueChanged.connect(self.CalcCalibrationScale)\n self.drawcolorBut.clicked.connect(self.SetDrawColor)\n self.drawlinemeasBut.clicked.connect(self.DrawLineMeasurement)\n self.clearmeasBut.clicked.connect(self.ClearLineMeasurement)\n self.meassaveBut.clicked.connect(self.SaveMeas)\n self.measloadBut.clicked.connect(self.LoadMeas)\n self.placemarkerBut.clicked.connect(self.PlaceMarker)\n self.clearmarkerBut.clicked.connect(self.ClearMarker)\n self.markersaveBut.clicked.connect(self.SaveMarker)\n self.markerloadBut.clicked.connect(self.LoadMarker)\n self.fixedscaleCheck.stateChanged.connect(self.DrawFixedScale)\n\n #Video\n self.capTimer = QTimer()\n self.capTimer.timeout.connect(self.CaptureVideo)\n self.capTimer.setInterval(10)\n\n #Motors\n self.getposTimer = QTimer()\n self.getposTimer.timeout.connect(self.UpdateMotorsPos)\n self.getposTimer.setInterval(100)\n self.getcontposTimer = QTimer()\n self.getcontposTimer.timeout.connect(self.UpdateMotorsContPos)\n self.getcontposTimer.setInterval(100)\n self.movXTimer = QTimer()\n self.movXTimer.timeout.connect(self.MoveXCont)\n self.movXTimer.setInterval(500)\n self.movYTimer = QTimer()\n self.movYTimer.timeout.connect(self.MoveYCont)\n self.movYTimer.setInterval(500)\n self.movZTimer = QTimer()\n self.movZTimer.timeout.connect(self.MoveZCont)\n self.movZTimer.setInterval(500)\n\n #Menu bar\n self.action_Reserved.triggered.connect(self.ShowReserved)\n self.actionGuide.triggered.connect(self.ShowGuide)\n self.actionAbout_Qt.triggered.connect(self.AboutQt)\n self.actionAbout_Python.triggered.connect(self.AboutPython)\n self.actionAbout.triggered.connect(self.About)\n self.actionExit.triggered.connect(self.Exit)\n\n def InitializeDevices(self):\n self.motors = NewportMotors.NewportMotors()\n homing = False\n time.sleep(0.2)\n if self.motors.xOK:\n self.xOK.setPixmap(QPixmap(\"green_led.png\"))\n self.xPosSpin.setValue(self.motors.xPOS)\n elif self.motors.xhoming:\n self.xOK.setPixmap(QPixmap(\"yellow_led.png\"))\n homing = True\n if self.motors.yOK:\n self.yOK.setPixmap(QPixmap(\"green_led.png\"))\n self.yPosSpin.setValue(self.motors.yPOS)\n elif self.motors.yhoming:\n self.yOK.setPixmap(QPixmap(\"yellow_led.png\"))\n homing = True\n if self.motors.zOK:\n self.zOK.setPixmap(QPixmap(\"green_led.png\"))\n self.zPosSpin.setValue(self.motors.zPOS)\n elif self.motors.zhoming:\n self.zOK.setPixmap(QPixmap(\"yellow_led.png\"))\n homing = True\n\n if homing:\n time.sleep(0.5)\n box = QMessageBox()\n box.setIcon(QMessageBox.Warning)\n box.setText(\"One or more motors need homing. Before continuing, please confirm that the microscope \" +\n \"lens is very far from any object. Since in the homing process the whole tube will \" +\n \"move down considerably, this is VERY IMPORTANT!\\n\\n By clicking OK, you accept that \" +\n \"anything bad that might eventually happen is entirely your fault.\")\n box.setWindowTitle(\"Please be careful and patient\")\n box.exec()\n self.motors.HomeAll()\n self.getposTimer.start()\n\n def OpenCamera(self):\n if self.paxRadio.isChecked():\n self.camThread.OpenCam(\"PAXCam\")\n elif self.ingaasRadio.isChecked():\n self.camThread.OpenCam(\"IngaasCam\")\n elif self.ingaasHGRadio.isChecked():\n self.camThread.OpenCam(\"IngaasCamHG\")\n\n if self.camThread.cam.camOK:\n self.camOpen = True\n self.camOK.setPixmap(QPixmap(\"green_led.png\"))\n self.gainSlider.setValue(int(self.camThread.cam.GetGain()))\n self.exposureSlider.setValue(int(self.camThread.cam.GetExposure()))\n self.CalcCalibrationScale()\n\n def ChangeCam(self):\n if self.camOpen:\n recap = self.capTimer.isActive()\n self.capTimer.stop()\n self.camOpen = False\n self.camOK.setPixmap(QPixmap(\"red_led.png\"))\n self.camThread.cam.Close()\n time.sleep(0.2)\n del self.camThread.cam\n time.sleep(0.2)\n self.OpenCamera()\n if recap:\n self.capTimer.start()\n\n def CloseDevices(self):\n if self.camOpen:\n self.camThread.stop()\n self.camThread.CloseCam()\n self.camOpen = False\n self.camThread = None\n self.statusbar.showMessage(f\"FPS: {self.fps:.2f} (Camera closed)\")\n \n\n def OnStartButClicked(self):\n self.camThread = None\n self.camThread = ImageAcquisition().start()\n\n if not self.camOpen:\n self.OpenCamera()\n\n self.capTimer.start()\n Timer(0.2, self.DrawFixedScale).start()\n\n\n def OnStopButClicked(self):\n self.capTimer.stop()\n self.CloseDevices()\n\n def OnExpSliderChanged(self):\n if self.camOpen:\n self.camThread.cam.SetExposure(self.exposureSlider.value())\n\n def OnGainSliderChanged(self):\n if self.camOpen:\n self.camThread.cam.SetGain(self.gainSlider.value())\n\n def UpdateMultiSpin(self):\n self.multiSpin.setValue(self.multiDial.value())\n\n def UpdateMultiDial(self):\n self.multiDial.setValue(self.multiSpin.value())\n\n def CaptureVideo(self):\n if not self.videoBusy and self.camOpen:\n self.videoBusy = True\n self.img = self.camThread.get()\n pix = QPixmap(self.camView.width(), self.camView.height())\n pix.fill(Qt.black)\n img = self.img.scaled(self.camView.size(), Qt.KeepAspectRatio)\n painter = QPainter(pix)\n painter.setRenderHints(QPainter.Antialiasing | QPainter.SmoothPixmapTransform, True)\n pointx = int(np.round(np.fabs(self.camView.width() - img.width())/2))\n pointy = int(np.round(np.fabs(self.camView.height() - img.height())/2))\n painter.drawImage(pointx, pointy, img)\n painter.end()\n fh = 1\n fv = 1\n if self.fliphCheck.isChecked():\n fh = -1\n if self.flipvCheck.isChecked():\n fv = -1\n if(fv != 1 or fh != 1):\n pix = pix.transformed(QTransform().scale(fh, fv))\n self.camView.setPixmap(pix)\n if self.fps != self.camThread.fps:\n self.fps = self.camThread.fps\n self.statusbar.showMessage(f\"FPS: {self.fps:.2f}\")\n self.videoBusy = False\n\n def SaveFrame(self):\n recap = self.capTimer.isActive()\n self.capTimer.stop()\n file = QFileDialog.getSaveFileName(self, \"Save file\", QDir.homePath(), \"PNG images (*.png)\")\n filename = file[0]\n if filename != \"\":\n if filename[-4:] != \".png\" and filename[-4:] != \".PNG\":\n filename = filename + \".png\"\n self.img.save(filename)\n if recap:\n self.capTimer.start()\n\n def SaveFullImg(self):\n recap = self.capTimer.isActive()\n self.capTimer.stop()\n file = QFileDialog.getSaveFileName(self, \"Save file\", QDir.homePath(), \"PNG images (*.png)\")\n filename = file[0]\n if filename != \"\":\n if filename[-4:] != \".png\" and filename[-4:] != \".PNG\":\n filename = filename + \".png\"\n stackPix = QPixmap(self.camView.width(), self.camView.height())\n painter = QPainter(stackPix)\n painter.setRenderHints(QPainter.Antialiasing | QPainter.SmoothPixmapTransform, True)\n if self.camView.pixmap() is not None:\n painter.drawPixmap(0,0,self.camView.pixmap().scaled(self.camView.size(), Qt.IgnoreAspectRatio))\n if self.scaleOverlay.pixmap() is not None:\n painter.drawPixmap(0,0,self.scaleOverlay.pixmap())\n if self.fscaleOverlay.pixmap() is not None:\n painter.drawPixmap(0,0,self.fscaleOverlay.pixmap())\n if self.markerOverlay.pixmap() is not None:\n painter.drawPixmap(0,0,self.markerOverlay.pixmap())\n painter.end()\n stackPix.toImage().scaled(self.camView.size(), Qt.IgnoreAspectRatio).save(filename)\n if recap:\n self.capTimer.start()\n\n def SaveMeas(self):\n recap = self.capTimer.isActive()\n self.capTimer.stop()\n file = QFileDialog.getSaveFileName(self, \"Save file\", QDir.homePath(), \"PNG images (*.png)\")\n filename = file[0]\n if filename != \"\":\n if filename[-4:] != \".png\" and filename[-4:] != \".PNG\":\n filename = filename + \".png\"\n self.scaleOverlay.pixmap().toImage().scaled(self.camView.size(), Qt.IgnoreAspectRatio).save(filename)\n if recap:\n self.capTimer.start()\n\n def LoadMeas(self):\n recap = self.capTimer.isActive()\n self.capTimer.stop()\n file = QFileDialog.getOpenFileName(self, \"Load file\", QDir.homePath(), \"PNG images (*.png)\")\n filename = file[0]\n if filename != \"\":\n pix = QPixmap(filename)\n painter = QPainter(pix)\n if self.scaleOverlay.pixmap() is not None:\n painter.drawPixmap(0, 0, self.scaleOverlay.pixmap())\n painter.drawPixmap(0, 0, self.camOverlay.pixmap())\n self.scaleOverlay.setPixmap(pix)\n painter.end()\n if recap:\n self.capTimer.start()\n\n def SaveMarker(self):\n recap = self.capTimer.isActive()\n self.capTimer.stop()\n file = QFileDialog.getSaveFileName(self, \"Save file\", QDir.homePath(), \"PNG images (*.png)\")\n filename = file[0]\n if filename != \"\":\n if filename[-4:] != \".png\" and filename[-4:] != \".PNG\":\n filename = filename + \".png\"\n self.markerOverlay.pixmap().toImage().scaled(self.camView.size(), Qt.IgnoreAspectRatio).save(filename)\n if recap:\n self.capTimer.start()\n\n def LoadMarker(self):\n recap = self.capTimer.isActive()\n self.capTimer.stop()\n file = QFileDialog.getOpenFileName(self, \"Load file\", QDir.homePath(), \"PNG images (*.png)\")\n filename = file[0]\n if filename != \"\":\n pix = QPixmap(filename)\n painter = QPainter(pix)\n if self.markerOverlay.pixmap() is not None:\n painter.drawPixmap(0, 0, self.markerOverlay.pixmap())\n painter.drawPixmap(0, 0, self.camOverlay.pixmap())\n self.markerOverlay.setPixmap(pix)\n painter.end()\n if recap:\n self.capTimer.start()\n\n def UpdateMotorsPos(self):\n if not self.getposBusy:\n self.getposBusy = True\n self.motors.UpdatePositions()\n xmov = self.motors.AxisMoving(1)\n ymov = self.motors.AxisMoving(2)\n zmov = self.motors.AxisMoving(3)\n if xmov:\n self.xPosSpin.setValue(self.motors.xPOS)\n elif self.motors.xOK:\n self.xOK.setPixmap(QPixmap(\"green_led.png\"))\n if ymov:\n self.yPosSpin.setValue(self.motors.yPOS)\n elif self.motors.yOK:\n self.yOK.setPixmap(QPixmap(\"green_led.png\"))\n if zmov:\n self.zPosSpin.setValue(self.motors.zPOS)\n elif self.motors.zOK:\n self.zOK.setPixmap(QPixmap(\"green_led.png\"))\n if not (xmov or ymov or zmov):\n self.getposTimer.stop()\n self.getposBusy = False\n\n def MoveUp(self):\n if self.motors.yOK:\n self.motors.MoveRelative(2, self.xyStepSpin.value())\n self.yOK.setPixmap(QPixmap(\"yellow_led.png\"))\n self.getposTimer.start()\n\n def MoveDown(self):\n if self.motors.yOK:\n self.motors.MoveRelative(2, -self.xyStepSpin.value())\n self.yOK.setPixmap(QPixmap(\"yellow_led.png\"))\n self.getposTimer.start()\n\n def MoveLeft(self):\n if self.motors.xOK:\n self.motors.MoveRelative(1, -self.xyStepSpin.value())\n self.xOK.setPixmap(QPixmap(\"yellow_led.png\"))\n self.getposTimer.start()\n\n def MoveRight(self):\n if self.motors.xOK:\n self.motors.MoveRelative(1, self.xyStepSpin.value())\n self.zOK.setPixmap(QPixmap(\"yellow_led.png\"))\n self.getposTimer.start()\n\n def MoveZUp(self):\n if self.motors.zOK:\n self.motors.MoveRelative(3, self.zStepSpin.value())\n self.zOK.setPixmap(QPixmap(\"yellow_led.png\"))\n self.getposTimer.start()\n\n def MoveZDown(self):\n if self.motors.zOK:\n self.motors.MoveRelative(3, -self.zStepSpin.value())\n self.zOK.setPixmap(QPixmap(\"yellow_led.png\"))\n self.getposTimer.start()\n\n def OnXPosChanged(self):\n if self.motors.xOK:\n self.motors.MoveAbsolute(1, self.xPosSpin.value())\n self.xOK.setPixmap(QPixmap(\"yellow_led.png\"))\n self.getposTimer.start()\n\n def OnYPosChanged(self):\n if self.motors.yOK:\n self.motors.MoveAbsolute(2, self.yPosSpin.value())\n self.yOK.setPixmap(QPixmap(\"yellow_led.png\"))\n self.getposTimer.start()\n\n def OnZPosChanged(self):\n if self.motors.zOK:\n self.motors.MoveAbsolute(3, self.zPosSpin.value())\n self.zOK.setPixmap(QPixmap(\"yellow_led.png\"))\n self.getposTimer.start()\n\n def SavePosition(self):\n if self.savedposCombo.currentText() == \"\":\n self.savedposCombo.setCurrentText(\"[\" + str(self.xPosSpin.value()) + \",\" +\n str(self.yPosSpin.value()) + \",\" + str(self.zPosSpin.value()) + \"]\")\n\n duplicate = True\n while duplicate:\n itemExistsIndex = self.savedposCombo.findText(self.savedposCombo.currentText())\n if itemExistsIndex != -1 and itemExistsIndex != self.savedposCombo.count() - 1:\n dupi = 0\n foundupi = 0\n while foundupi != -1:\n dupi = dupi + 1\n foundupi = self.savedposCombo.findText(self.savedposCombo.currentText() + \" (\" + str(dupi) + \")\")\n self.savedposCombo.setCurrentText(self.savedposCombo.currentText() + \" (\" + str(dupi) + \")\")\n duplicate = True\n else:\n if self.savedposCombo.currentText() == self.posdb[len(self.posdb) - 1][0]:\n dupi = 0\n foundupi = 0\n while foundupi != -1:\n dupi = dupi + 1\n foundupi = self.savedposCombo.findText(self.savedposCombo.currentText() + \" (\" + str(dupi) + \")\")\n self.savedposCombo.setCurrentText(self.savedposCombo.currentText() + \" (\" + str(dupi) + \")\")\n duplicate = True\n else:\n duplicate = False\n\n self.posdb.append([self.savedposCombo.currentText(),[self.xPosSpin.value(),\n self.yPosSpin.value(), self.zPosSpin.value()]])\n with open(\"positions.txt\", \"a\") as posfile:\n posfile.write(self.savedposCombo.currentText() + \"\\t\" + str(self.xPosSpin.value()) + \",\" +\n str(self.yPosSpin.value()) + \",\" + str(self.zPosSpin.value()) + \"\\n\")\n posfile.close()\n if len(self.posdb) > self.savedposCombo.count():\n self.savedposCombo.addItem(self.savedposCombo.currentText())\n self.savedposCombo.setCurrentIndex(self.savedposCombo.count() - 1)\n\n def GoToPos(self):\n i = self.savedposCombo.currentIndex()\n self.xPosSpin.setValue(self.posdb[i][1][0])\n self.yPosSpin.setValue(self.posdb[i][1][1])\n self.zPosSpin.setValue(self.posdb[i][1][2])\n\n def DelPos(self):\n i = self.savedposCombo.currentIndex()\n if i > 2:\n self.savedposCombo.removeItem(i)\n self.posdb.pop(i)\n self.RebuildPositionsFile()\n else:\n QMessageBox.warning(self, \"Wait!\", \"Not permitted to delete base positions!\")\n\n def RebuildPositionsFile(self):\n with open(\"positions.txt\", \"w\") as posfile:\n for i in range(0, len(self.posdb)):\n posfile.write(self.posdb[i][0] + \"\\t\" + str(self.posdb[i][1][0]) + \",\" +\n str(self.posdb[i][1][1]) + \",\" + str(self.posdb[i][1][2]) + \"\\n\")\n posfile.close()\n\n def UpdateMotorsContPos(self):\n if not self.getcontposBusy:\n self.getcontposBusy = True\n self.motors.UpdatePositions()\n xmov = self.motors.AxisMoving(1)\n ymov = self.motors.AxisMoving(2)\n zmov = self.motors.AxisMoving(3)\n if xmov and self.motors.xOK:\n self.xPosSpin.setValue(self.motors.xPOS)\n self.xOK.setPixmap(QPixmap(\"yellow_led.png\"))\n elif self.motors.xOK:\n self.xOK.setPixmap(QPixmap(\"green_led.png\"))\n if ymov and self.motors.yOK:\n self.yPosSpin.setValue(self.motors.yPOS)\n self.yOK.setPixmap(QPixmap(\"yellow_led.png\"))\n elif self.motors.yOK:\n self.yOK.setPixmap(QPixmap(\"green_led.png\"))\n if zmov and self.motors.zOK:\n self.zPosSpin.setValue(self.motors.zPOS)\n self.zOK.setPixmap(QPixmap(\"yellow_led.png\"))\n elif self.motors.zOK:\n self.zOK.setPixmap(QPixmap(\"green_led.png\"))\n self.getcontposBusy = False\n\n def OnZSliderClick(self):\n self.OnZPosChanged()\n self.movZTimer.start()\n if self.motors.zOK:\n self.zOK.setPixmap(QPixmap(\"yellow_led.png\"))\n self.getcontposTimer.start()\n\n def OnZSliderChanged(self):\n self.motors.CalculateContParams(3, self.zSlider.value()/1000.0, 0.008875*np.exp(0.04721*self.multiDial.value()))\n self.movZTimer.setInterval(self.motors.zWait*1000)\n\n def OnZSliderRelease(self):\n self.movZTimer.stop()\n self.getcontposTimer.stop()\n self.zSlider.setValue(0)\n self.getposTimer.start()\n self.motors.StopAll()\n\n def MoveXCont(self):\n self.motors.MoveRelative(1, self.motors.xStep)\n\n def MoveYCont(self):\n self.motors.MoveRelative(2, self.motors.yStep)\n\n def MoveZCont(self):\n self.motors.MoveRelative(3, self.motors.zStep)\n\n def StopAllMov(self):\n self.motors.StopAll()\n\n def GetMousePress(self, event):\n self.FixOverlays()\n\n self.mouseX0 = event.x()\n self.mouseY0 = event.y()\n self.mouseX = self.mouseX0\n self.mouseY = self.mouseY0\n\n if event.button() == 1 and self.drawing and self.camOpen:\n self.mouseDrawingDown = True\n self.DrawMeasOverlay()\n \n elif event.button() == 1 and self.marking and self.camOpen:\n self.mouseMarkingDown = True\n self.DrawMarkerOverlay(self.markersizeSpin.value())\n\n if event.button() == 2:\n self.mouseGoDown = True\n self.mouseX0 = int(self.camView.width() / 2)\n self.mouseY0 = int(self.camView.height() / 2)\n self.mouseX = self.mouseX0\n self.mouseY = self.mouseY0\n dx = self.mouseX - self.mouseX0\n dy = self.mouseY - self.mouseY0\n self.motors.CalculateContParams(1, 2*dx/self.camView.width(), 0.008875*np.exp(0.04721*self.multiDial.value()))\n self.motors.CalculateContParams(2, 2*dy/self.camView.height(), 0.008875*np.exp(0.04721*self.multiDial.value()))\n self.movXTimer.setInterval(self.motors.xWait*1000)\n self.movYTimer.setInterval(self.motors.yWait*1000)\n\n self.movXTimer.start()\n self.movYTimer.start()\n if self.motors.xOK:\n self.xOK.setPixmap(QPixmap(\"yellow_led.png\"))\n if self.motors.yOK:\n self.yOK.setPixmap(QPixmap(\"yellow_led.png\"))\n self.getcontposTimer.start()\n\n self.DrawGoOverlay()\n cursor = QCursor()\n cursor.setShape(Qt.SizeAllCursor)\n cursor.setPos(self.camOverlay.mapToGlobal(QPoint(self.mouseX, self.mouseY)))\n QApplication.setOverrideCursor(cursor)\n\n def GetMouseMove(self, event):\n mX = event.x()\n mY = event.y()\n lock = False\n if mX < 0:\n mX = 0\n lock = True\n if mX > self.camView.width():\n mX = self.camView.width()\n lock = True\n if mY < 0:\n mY = 0\n lock = True\n if mY > self.camView.height():\n mY = self.camView.height()\n lock = True\n if lock:\n cursor = QCursor()\n cursor.setPos(self.camOverlay.mapToGlobal(QPoint(mX, mY)))\n QApplication.setOverrideCursor(cursor)\n\n self.mouseX = mX\n self.mouseY = mY\n\n if self.mouseDrawingDown and self.camOpen:\n self.DrawMeasOverlay()\n\n elif self.mouseMarkingDown and self.camOpen:\n self.DrawMarkerOverlay(self.markersizeSpin.value())\n \n elif self.mouseGoDown:\n self.DrawGoOverlay()\n dx = self.mouseX - self.mouseX0\n dy = -(self.mouseY - self.mouseY0)\n self.motors.CalculateContParams(1, 2*dx/self.camView.width(), 0.008875*np.exp(0.04721*self.multiDial.value()))\n self.motors.CalculateContParams(2, 2*dy/self.camView.height(), 0.008875*np.exp(0.04721*self.multiDial.value()))\n self.movXTimer.setInterval(self.motors.xWait*1000)\n self.movYTimer.setInterval(self.motors.yWait*1000)\n\n def GetMouseRelease(self, event):\n if event.button() == 1 and self.mouseDrawingDown:\n self.mouseDrawingDown = False\n self.drawing = False\n self.drawlinemeasBut.setDown(False)\n pix = QPixmap(self.scaleOverlay.width(), self.scaleOverlay.height())\n pix.fill(Qt.transparent)\n painter = QPainter(pix)\n painter.setRenderHints(QPainter.Antialiasing | QPainter.SmoothPixmapTransform, True)\n if self.scaleOverlay.pixmap() is not None:\n painter.drawPixmap(0, 0, self.scaleOverlay.pixmap())\n painter.drawPixmap(0, 0, self.camOverlay.pixmap())\n self.scaleOverlay.setPixmap(pix)\n painter.end()\n pix0 = QPixmap(self.scaleOverlay.width(), self.scaleOverlay.height())\n pix0.fill(Qt.transparent)\n self.camOverlay.setPixmap(pix0)\n if event.button() == 1 and self.mouseMarkingDown:\n self.mouseMarkingDown = False\n self.marking = False\n self.placemarkerBut.setDown(False)\n pix = QPixmap(self.markerOverlay.width(), self.markerOverlay.height())\n pix.fill(Qt.transparent)\n painter = QPainter(pix)\n painter.setRenderHints(QPainter.Antialiasing | QPainter.SmoothPixmapTransform, True)\n if self.markerOverlay.pixmap() is not None:\n painter.drawPixmap(0, 0, self.markerOverlay.pixmap())\n painter.drawPixmap(0, 0, self.camOverlay.pixmap())\n self.markerOverlay.setPixmap(pix)\n painter.end()\n pix0 = QPixmap(self.markerOverlay.width(), self.markerOverlay.height())\n pix0.fill(Qt.transparent)\n self.camOverlay.setPixmap(pix0)\n if event.button() == 2 and self.mouseGoDown:\n self.mouseGoDown = False\n self.movXTimer.stop()\n self.movYTimer.stop()\n pix = QPixmap(self.scaleOverlay.width(), self.scaleOverlay.height())\n pix.fill(Qt.transparent)\n self.camOverlay.setPixmap(pix)\n QApplication.setOverrideCursor(Qt.ArrowCursor)\n self.getcontposTimer.stop()\n self.getposTimer.start()\n self.motors.StopAll()\n\n def GetDoubleClick(self, event):\n if event.button() == 1 and self.camOpen:\n self.FixOverlays()\n dx = event.x() - self.camView.width() / 2\n dy = event.y() - self.camView.height() / 2\n ratioView = self.camView.width()/self.camView.height()\n ratioFrame = self.camThread.cam.frameW/self.camThread.cam.frameH\n scale = 1\n if ratioView < ratioFrame:\n scale = self.camThread.cam.frameW/self.camView.width()\n else:\n scale = self.camThread.cam.frameH/self.camView.height()\n dx = dx*scale\n dy = dy*scale\n dxm = dx/self.microcalSpin.value()\n dym = -dy/self.microcalSpin.value()\n self.xPosSpin.setValue(self.xPosSpin.value() + dxm/1000.0)\n self.yPosSpin.setValue(self.yPosSpin.value() + dym/1000.0)\n\n def CreateBlankImg(self, w, h):\n img = QImage(w, h, QImage.Format_ARGB32)\n img.fill(Qt.transparent)\n return img\n\n def SetDrawColor(self):\n self.drawColor = QColorDialog.getColor()\n self.drawcolorBut.setStyleSheet(f\"background-color: rgb({self.drawColor.red()}, {self.drawColor.green()}, {self.drawColor.blue()})\")\n self.drawcolorBut.update()\n\n def SetDrawPen(self, painter, thick, color, fill=False):\n pen = painter.pen()\n pen.setColor(color)\n pen.setWidthF(thick)\n painter.setPen(pen)\n if fill:\n painter.setBrush(QBrush(color, Qt.SolidPattern))\n painter.setRenderHints(QPainter.Antialiasing | QPainter.SmoothPixmapTransform, True)\n return painter\n\n def DrawGoOverlay(self):\n self.FixOverlays()\n\n thick = self.drawthickSpin.value()*2\n endhs = thick*3\n\n dx = self.mouseX - self.mouseX0\n dy = -(self.mouseY - self.mouseY0)\n if np.abs(dx) < 0.001:\n dx = 0.001\n if np.abs(dy) < 0.001:\n dy = 0.001\n slope = dx/dy\n length = np.sqrt(dx**2 + dy**2)\n cosine = dx/length\n sine = dy/length\n\n endx = np.sqrt(endhs**2/(slope**2 + 1))\n endy = slope*endx\n \n start1 = QPoint(int(np.round(self.mouseX0 + endx)), int(np.round(self.mouseY0 + endy)))\n start2 = QPoint(int(np.round(self.mouseX0 - endx)), int(np.round(self.mouseY0 - endy)))\n\n arrowx = self.mouseX - endhs*2*cosine\n arrowy = self.mouseY + endhs*2*sine\n arrow1 = QPoint(int(np.round(arrowx + endx)), int(np.round(arrowy + endy)))\n arrow2 = QPoint(int(np.round(arrowx - endx)), int(np.round(arrowy - endy)))\n arrow3 = QPoint(self.mouseX, self.mouseY)\n\n pix = QPixmap(self.scaleOverlay.width(), self.scaleOverlay.height())\n pix.fill(Qt.transparent)\n painter = QPainter(pix)\n painter = self.SetDrawPen(painter, 2*self.thick, self.drawColor, True)\n painter.setFont(QFont(\"Sans\", pointSize=int(np.round(11*(thick**(1/3))))))\n painter.drawLine(start1, start2)\n painter.drawLine(QPoint(self.mouseX0, self.mouseY0), QPoint(arrowx, arrowy))\n painter.drawPolygon(QPolygon([arrow1, arrow2, arrow3]))\n painter.end()\n self.camOverlay.setPixmap(pix)\n\n def DrawMeasOverlay(self):\n self.FixOverlays()\n\n thick = self.drawthickSpin.value()\n endhs = thick*3\n\n dx = self.mouseX - self.mouseX0\n dy = -(self.mouseY - self.mouseY0)\n if np.abs(dx) < 0.001:\n dx = 0.001\n if np.abs(dy) < 0.001:\n dy = 0.001\n slope = dx/dy\n\n endx = np.sqrt(endhs**2/(slope**2 + 1))\n endy = slope*endx\n \n start1 = QPoint(int(np.round(self.mouseX0 + endx)), int(np.round(self.mouseY0 + endy)))\n start2 = QPoint(int(np.round(self.mouseX0 - endx)), int(np.round(self.mouseY0 - endy)))\n end1 = QPoint(int(np.round(self.mouseX + endx)), int(np.round(self.mouseY + endy)))\n end2 = QPoint(int(np.round(self.mouseX - endx)), int(np.round(self.mouseY - endy)))\n\n ratioView = self.camView.width()/self.camView.height()\n ratioFrame = self.camThread.cam.frameW/self.camThread.cam.frameH\n scale = 1\n if ratioView < ratioFrame:\n scale = self.camThread.cam.frameW/self.camView.width()\n else:\n scale = self.camThread.cam.frameH/self.camView.height()\n dx = dx*scale\n dy = dy*scale\n\n dist = np.sqrt(dx**2 + dy**2)\n self.npixelsSpin.setValue(int(dist))\n meastring = \"{:.2f}\".format(self.micronSpin.value()) + \" \" + u\"\\u03BC\" + \"m\"\n\n midx = self.mouseX0 + (self.mouseX - self.mouseX0)/2\n midy = self.mouseY0 + (self.mouseY - self.mouseY0)/2\n textx = int(np.round(-len(meastring)*4*(thick**(1/3))))\n texty = int(np.round(-2*thick - 8))\n textangle = 90 + 180*np.arctan(slope)/np.pi\n if textangle > 90:\n textangle += 180\n\n pix = QPixmap(self.scaleOverlay.width(), self.scaleOverlay.height())\n pix.fill(Qt.transparent)\n painter = QPainter(pix)\n painter = self.SetDrawPen(painter, thick, self.drawColor, False)\n painter.setFont(QFont(\"Sans\", pointSize=int(np.round(11*(thick**(1/3))))))\n painter.drawLine(start1, start2)\n painter.drawLine(QPoint(self.mouseX0, self.mouseY0), QPoint(self.mouseX, self.mouseY))\n painter.drawLine(end1, end2)\n painter.translate(midx, midy)\n painter.rotate(textangle)\n painter.drawText(QPoint(textx, texty), meastring)\n painter.end()\n self.camOverlay.setPixmap(pix)\n\n def DrawLineMeasurement(self):\n self.drawing = True\n self.drawlinemeasBut.setDown(True)\n\n def ClearLineMeasurement(self):\n pix = QPixmap(self.markerOverlay.width(), self.markerOverlay.height())\n pix.fill(Qt.transparent)\n self.scaleOverlay.setPixmap(pix)\n\n def DrawMarkerOverlay(self, size):\n text = self.markerEdit.text()\n\n markertype = 0\n if self.markcircleRadio.isChecked():\n markertype = 0\n elif self.marktriRadio.isChecked():\n markertype = 1\n elif self.marksquareRadio.isChecked():\n markertype = 2\n elif self.markstarRadio.isChecked():\n markertype = 3\n elif self.marktextRadio.isChecked():\n markertype = 4\n\n pix = QPixmap(self.markerOverlay.width(), self.markerOverlay.height())\n pix.fill(Qt.transparent)\n painter = QPainter(pix)\n painter = self.SetDrawPen(painter, self.drawthickSpin.value(), self.drawColor, False)\n painter.setFont(QFont(\"Sans\", pointSize=size))\n\n if markertype == 0:\n painter.drawEllipse(QPoint(self.mouseX, self.mouseY), int(size/2), int(size/2))\n elif markertype == 1:\n p1 = QPoint(int(self.mouseX - size/2), int(self.mouseY + size/2))\n p2 = QPoint(int(self.mouseX + size/2), int(self.mouseY + size/2))\n p3 = QPoint(int(self.mouseX), int(self.mouseY - size/2))\n painter.drawLine(p1, p2)\n painter.drawLine(p2, p3)\n painter.drawLine(p3, p1)\n elif markertype == 2:\n painter.drawRect(self.mouseX, self.mouseY, size, size)\n elif markertype == 3:\n r0 = size/2\n alpha = 2.0*np.pi/10.0\n omega = alpha*11\n r1 = r0*(11 % 2 + 1)/2\n p0 = QPoint(int(np.round(r1 * np.sin(omega) + self.mouseX)), int(np.round(r1 * np.cos(omega) + self.mouseY)))\n for i in reversed(range(1, 11)):\n r1 = r0*(i % 2 + 1)/2\n omega = alpha * i\n p1 = QPoint(int(np.round(r1 * np.sin(omega) + self.mouseX)), int(np.round(r1 * np.cos(omega) + self.mouseY)))\n painter.drawLine(p0, p1)\n p0 = p1\n\n elif markertype == 4:\n painter.drawText(QPoint(self.mouseX, self.mouseY), text)\n \n painter.end()\n\n self.camOverlay.setPixmap(pix)\n\n def PlaceMarker(self):\n self.marking = True\n self.placemarkerBut.setDown(True)\n\n def ClearMarker(self):\n pix = QPixmap(self.markerOverlay.width(), self.markerOverlay.height())\n pix.fill(Qt.transparent)\n self.markerOverlay.setPixmap(pix)\n\n def DrawFixedScale(self):\n self.FixOverlays()\n\n if self.fixedscaleCheck.isChecked() and self.camOpen:\n scaleColor = QColor(220, 220, 220, 255)\n linewidth = 2\n basesize = int(np.round(self.camView.width()/2))\n midtickh1 = 10\n midtickh2 = 6\n ratioView = self.camView.width()/self.camView.height()\n ratioFrame = self.camThread.cam.frameW/self.camThread.cam.frameH\n scale = 1\n if ratioView < ratioFrame:\n scale = self.camThread.cam.frameW/self.camView.width()\n else:\n scale = self.camThread.cam.frameH/self.camView.height()\n micronScale = basesize*scale/self.microcalSpin.value()\n if micronScale <= 10:\n micronScale = int(np.round(micronScale))\n elif micronScale < 100:\n micronScale = 10*int(np.round(micronScale/10))\n else:\n micronScale = 100*int(np.round(micronScale/100))\n roundsize = int(np.round(micronScale*self.microcalSpin.value()/scale))\n posI = int(np.round((self.camView.width()/2 - roundsize/2)))\n posF = int(np.round((self.camView.width()/2 + roundsize/2)))\n bottom = self.camView.height() - midtickh1*2\n posmidtick = int(np.round(self.camView.width()/2)) - (int(np.round(linewidth/2)))\n posquartertick1 = posmidtick + int(np.round(roundsize/4)) - (int(np.round(linewidth/2)))\n posquartertick2 = posmidtick - int(np.round(roundsize/4)) - (int(np.round(linewidth/2)))\n labelHeight = int(np.round(midtickh1*6.5))\n\n pix = QPixmap(self.fscaleOverlay.width(), self.fscaleOverlay.height())\n pix.fill(Qt.transparent)\n now = datetime.datetime.now()\n painter = QPainter(pix)\n painter.setRenderHints(QPainter.Antialiasing | QPainter.SmoothPixmapTransform, True)\n self.SetDrawPen(painter, linewidth, QColor(0, 0, 0, 160), True)\n painter.drawRect(0, self.camView.height() - labelHeight, self.camView.width(), labelHeight)\n self.SetDrawPen(painter, linewidth, scaleColor, False)\n painter.drawLine(QPoint(posI, bottom), QPoint(posF, bottom))\n painter.drawLine(QPoint(posI, bottom - midtickh1), QPoint(posI, bottom + midtickh1))\n painter.drawLine(QPoint(posF, bottom - midtickh1), QPoint(posF, bottom + midtickh1))\n painter.drawLine(QPoint(posmidtick, bottom - midtickh1), QPoint(posmidtick, bottom + midtickh1))\n painter.drawLine(QPoint(posquartertick1, bottom - midtickh2), QPoint(posquartertick1, bottom + midtickh2))\n painter.drawLine(QPoint(posquartertick2, bottom - midtickh2), QPoint(posquartertick2, bottom + midtickh2))\n painter.setFont(QFont(\"Sans\", pointSize=int(1.5*midtickh1)))\n rect = QRectF(0, bottom - 4*midtickh1, self.camView.width(), midtickh1*3)\n rect2 = QRectF(0, bottom - 1*midtickh1, self.camView.width(), midtickh1*3)\n painter.drawText(rect, str(micronScale) + \" \" + u\"\\u03BC\" + \"m\", QTextOption(Qt.AlignHCenter))\n painter.drawText(rect2, \" LCO\", QTextOption(Qt.AlignLeft))\n painter.drawText(rect2, str(now.year) + \"-\" + str(now.month) + \"-\" + str(now.day) + \" \", QTextOption(Qt.AlignRight))\n painter.end()\n self.fscaleOverlay.setPixmap(pix)\n else:\n pix = QPixmap(self.fscaleOverlay.width(), self.fscaleOverlay.height())\n pix.fill(Qt.transparent)\n self.fscaleOverlay.setPixmap(pix)\n\n def CalcCalibrationScale(self):\n if self.camOpen:\n size = self.camThread.cam.ccdSize[0]/(self.lensmagSpin.value()*self.zoomSpin.value())\n scale = self.camThread.cam.maxW/size\n magic = 1.52\n self.microcalSpin.setValue(scale*magic)\n else:\n size = 12986/(self.lensmagSpin.value()*self.zoomSpin.value())\n scale = 2448/size\n magic = 1.52\n self.microcalSpin.setValue(scale*magic)\n self.DrawFixedScale()\n\n def CalcMicrons(self):\n microns = self.npixelsSpin.value()/self.microcalSpin.value()\n self.micronSpin.setValue(microns)\n\n def ShowReserved(self):\n aboutText = f\"

      In the future, there is going to be more functions in this menu, maybe even a neat toolbar!

      \" \\\n \"

      It is on my To-Do list, honest!

      \"\n\n about = QMessageBox()\n about.setWindowTitle(\"Reserved\")\n about.setText(\"Reserved Menu\")\n about.setInformativeText(aboutText)\n about.setStandardButtons(QMessageBox.Ok)\n about.setDefaultButton(QMessageBox.Ok)\n about.setIconPixmap(QPixmap(f\"hourglass.png\").scaledToHeight(64, Qt.SmoothTransformation))\n about.show()\n about.exec()\n\n def ShowGuide(self):\n aboutText = f\"

      In the future, this will be a short guide.

      \" \\\n \"

      It is not available yet.

      \"\n\n about = QMessageBox()\n about.setWindowTitle(\"Quick Guide\")\n about.setText(\"Quick Guide\")\n about.setInformativeText(aboutText)\n about.setStandardButtons(QMessageBox.Ok)\n about.setDefaultButton(QMessageBox.Ok)\n about.setIconPixmap(QPixmap(f\"guide.png\").scaledToHeight(64, Qt.SmoothTransformation))\n about.show()\n about.exec()\n\n def AboutQt(self):\n QMessageBox.aboutQt(self, \"About Qt\")\n\n def AboutPython(self):\n aboutText = f\"

      This software is using Python version {sys.version}.

      \" \\\n \"

      It uses the following modules:
      \"\n\n include_na = True\n moduleslist = []\n for module in sys.modules:\n fullnm = str(sys.modules[module])\n if not \".\" in module and not module.startswith(\"_\") and not \"(built-in)\" in fullnm:\n try:\n if ilmd_available:\n moduleslist.append(f\"{module}\\t{importlib.metadata.version(module)}, \")\n except:\n try:\n moduleslist.append(f\"{module}\\t{sys.modules[module].__version__}, \")\n except:\n try:\n if type(sys.modules[module].version) is str:\n moduleslist.append(f\"{module}\\t{sys.modules[module].version}, \")\n else:\n moduleslist.append(f\"{module}\\t{sys.modules[module].version()}, \")\n except:\n try:\n moduleslist.append(f\"{module}\\t{sys.modules[module].VERSION}, \")\n except:\n if include_na:\n moduleslist.append(f\"{module}, \")\n\n moduleslist.sort(key=str.lower)\n for text in moduleslist:\n aboutText += text\n aboutText = aboutText[:-2] + \".

      \"\n\n about = QMessageBox()\n about.setWindowTitle(\"About Python\")\n about.setText(\"About Python\")\n about.setInformativeText(aboutText)\n about.setStandardButtons(QMessageBox.Ok)\n about.setDefaultButton(QMessageBox.Ok)\n pyicon = np.random.randint(1, 3)\n about.setIconPixmap(QPixmap(f\"python_{pyicon}.png\").scaled(64,64, Qt.IgnoreAspectRatio, Qt.SmoothTransformation))\n about.show()\n about.exec()\n\n def About(self):\n aboutText = \"

      LCO Microscope Move PRO v.0.9

      \" \\\n \"

      This software is made with Python and PyQt.

      \" \\\n \"

      Copyright © 2020

      \" \\\n \"

      By Paulo Jarschel, no rights reserved.

      \" \\\n\n QMessageBox.about(self, \"About\", aboutText)\n\n def Exit(self):\n self.capTimer.stop()\n self.CloseDevices()\n QCoreApplication.quit()\n \n \n def closeEvent(self, event):\n self.capTimer.stop()\n self.CloseDevices()\n\n\n#Run\nif __name__ == \"__main__\":\n\n app = QApplication(sys.argv)\n window = MainWindow()\n\n sys.exit(app.exec_())","sub_path":"MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":49981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"447634421","text":"# import numpy\n# from RRTTree import RRTTree\n#\n# class HeuristicRRTPlanner(object):\n#\n# def __init__(self, planning_env, visualize):\n# self.planning_env = planning_env\n# self.visualize = visualize\n#\n#\n# def Plan(self, start_config, goal_config, epsilon = 0.001):\n#\n# tree = RRTTree(self.planning_env, start_config)\n# plan = []\n# if self.visualize and hasattr(self.planning_env, 'InitializePlot'):\n# self.planning_env.InitializePlot(goal_config)\n# # TODO: Here you will implement the rrt planner\n# # The return path should be an array\n# # of dimension k x n where k is the number of waypoints\n# # and n is the dimension of the robots configuration space\n#\n# plan.append(start_config)\n# plan.append(goal_config)\n#\n# return plan\n\n\n\nimport numpy\nfrom RRTTree import RRTTree\nimport time\n\n\nclass HeuristicRRTPlanner(object):\n\n def __init__(self, planning_env, visualize):\n self.planning_env = planning_env\n self.visualize = visualize\n self.tree = []\n self.path = []\n self.result = []\n self.arrived = False\n\n\n def Plan(self, start_config, goal_config, epsilon = 2.0):\n self.tree = RRTTree(self.planning_env, start_config)\n plan = []\n start_time = time.time()\n\n if self.visualize and hasattr(self.planning_env, 'InitializePlot'):\n self.planning_env.InitializePlot(goal_config)\n # TODO: Here you will implement the rrt planner\n # The return path should be an array\n # of dimension k x n where k is the number of waypoints\n # and n is the dimension of the robots configuration space\n\n # simple:\n # goal_tf = numpy.eye(4,dtype = float)\n # goal_tf[0,3] = goal_config[0]\n # goal_tf[1,3] = goal_config[1]\n #\n # start_tf = numpy.eye(4,dtype = float)\n # start_tf[0,3] = start_config[0]\n # start_tf[1,3] = start_config[1]\n\n self.tree.vertices = []\n # simple:\n # self.tree.AddVertex(start_tf)\n self.tree.AddVertex(start_config)\n print(\"tree vertex:{}\".format(start_config))\n count = 0\n\n while (True):\n count += 1\n\n # Generate random point\n if(count % 10 is not 0):\n qr = self.planning_env.GenerateRandomConfiguration()\n else:\n # simple:\n # qr = goal_tf\n qr = goal_config\n # Find the nearest point to the random point\n # qi: nearest point\n #simple\n # if(numpy.shape(qr) != (4, 4)):\n if (numpy.shape(qr) != (1, 7)):\n qr = self.planning_env.GenerateRandomConfiguration()\n\n qi_id, qi = self.tree.GetNearestVertex(qr)\n\n else:\n qi_id, qi = self.tree.GetNearestVertex(qr)\n\n qc = self.planning_env.Extend(qi,qr)\n\n qc_id = self.tree.AddVertex(qc)\n self.tree.AddEdge(qi_id, qc_id)\n\n # when reach the goal, break the while loop\n # if(self.planning_env.ComputeDistance(qc, goal_tf) < epsilon):\n print(\"compute distance:{}\".format(self.planning_env.ComputeDistance(qc, goal_config)))\n if(self.planning_env.ComputeDistance(qc, goal_config) < epsilon):\n print(\"Reach the goal is {} \".format(qc_id))\n break\n\n path = []\n level = 0\n visited = [False] * len(self.tree.vertices)\n # use DFS to find the path from the start_config to the goal_config\n # print(\"Print all paths is {} \".format(self.tree.vertices))\n self.printAllPathsUtil(0, qc_id,visited,path,level)\n\n # Hacky way to truncate the extra path return by the DFS\n idx = 0\n while(self.path[idx] is not qc_id):\n self.result.append(self.path[idx])\n idx += 1\n self.result.append(qc_id)\n\n # Look up the vertices given the vertices' id\n for i in self.result:\n v = self.tree.vertices[i]\n #simple:\n # plan.append((v[0,3],v[1,3]))\n plan.append(v)\n\n step_dist = self.planning_env.step\n all_step_dist = numpy.sum(len(plan) * step_dist)\n\n elapsed_time = time.time() - start_time\n vertices = len(self.tree.vertices)\n print(\"RRT: All step dist: {}, elapsed time is {} and vertices number {} \".format(all_step_dist,\n elapsed_time,\n vertices))\n\n # return N*Vertice_dimension ndarray path\n return plan\n\n\n def printAllPathsUtil(self, u, d, visited, path,level):\n level += 1\n visited[u] = True\n path.append(u)\n if u == d:\n print(\"---------------------------------------------\")\n print(\"This is path {}\".format(path))\n # self.result.append(path)\n self.path = path\n else:\n for i in self.tree.edges[u]:\n if visited[i] == False:\n print(\"level %d, val %d \"%(level,i))\n self.printAllPathsUtil(i,d,visited,path,level)","sub_path":"autonomy_3/handin/HeuristicRRTPlanner.py","file_name":"HeuristicRRTPlanner.py","file_ext":"py","file_size_in_byte":5315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"605058441","text":"import numpy as np\nfrom scipy.stats import norm\n\nN_con = 60\nN_exp = 60\n\n# Significance Level\nalpha = 0.05\n\nX_A = np.random.randint(100, size = N_con)\nX_B = np.random.randint(100, size = N_exp)\n\n# Calculating means of control and experimental groups\nmu_con = np.mean(X_A)\nmu_exp = np.mean(X_B)\n\nvariance_con = np.var(X_A)\nvariance_exp = np.var(X_B)\n\n# Pooled Variance\npooled_variance = np.sqrt(variance_con/N_con + variance_exp/N_exp)\n\n# Test statistics\nT = (mu_con-mu_exp)/np.sqrt(variance_con/N_con + variance_exp/N_exp)\n\n# two sided test and using symmetry property of Normal distibution so we multiple with 2\np_value = norm.sf(T)*2\n\n# Z-critical value\nZ_crit = norm.ppf(1-alpha/2)\n\n# Margin of error\nm = Z_crit*pooled_variance\n\n# Confidence Interval\nCI = [(mu_con - mu_exp) - m, (mu_con - mu_exp) + m]\n\n\nprint(\"Test Statistics stat: \", T)\nprint(\"Z-critical: \", Z_crit)\nprint(\"P_value: \", p_value)\nprint(\"Confidence Interval of 2 sample Z-test for proportions: \", np.round(CI,2))\n\nimport matplotlib.pyplot as plt\nz = np.arange(-3,3, 0.1)\nplt.plot(z, norm.pdf(z), label = 'Standard Normal Distribution',color = 'purple',linewidth = 2.5)\nplt.fill_between(z[z>Z_crit], norm.pdf(z[z>Z_crit]), label = 'Right Rejection Region',color ='y' )\nplt.fill_between(z[z<(-1)*Z_crit], norm.pdf(z[z<(-1)*Z_crit]), label = 'Left Rejection Region',color ='y' )\nplt.title(\"Two Sample Z-test rejection region\")\nplt.legend()\nplt.show()\n","sub_path":"Statistical-tests/2-Sample-2-sided-Z-test.py","file_name":"2-Sample-2-sided-Z-test.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"163215368","text":"\"\"\"empty message\n\nRevision ID: ad29d4d85165\nRevises: 2424036140c0\nCreate Date: 2020-05-16 22:03:55.610552\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = 'ad29d4d85165'\ndown_revision = '2424036140c0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('item_ibfk_1', 'item', type_='foreignkey')\n op.drop_column('item', 'category_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('item', sa.Column('category_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))\n op.create_foreign_key('item_ibfk_1', 'item', 'catalog', ['category_id'], ['id'])\n # ### end Alembic commands ###\n","sub_path":"migrations/ozon/versions/ad29d4d85165_.py","file_name":"ad29d4d85165_.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"467090556","text":"import pandas as pd\nfrom pandas import json_normalize\nimport json\nimport argparse\nimport sys\nfrom sqlalchemy import create_engine\nimport sqlite3\nfrom importlib import reload\nchembl =\"sqlite:////home/fleer/Desktop/Tesis/Chembldb/chembl_27/chembl_27_sqlite/chembl_27/chembl_27_sqlite/chembl_27.db\"\nengine = create_engine(chembl)\nCHEMBL_VERSION = 27\n\ndef search_bypfam(pfam_id):\n find_pfam = (f''' SELECT a2.pchembl_value, a2.activity_comment, md.chembl_id as mol_chemblid,\n cr.compound_name, source_domain_id, md.max_phase\n FROM drug_mechanism dm\n JOIN binding_sites bs on bs.tid = dm.tid\n JOIN site_components sc ON sc.site_id =bs.site_id\n JOIN domains d2 ON d2.domain_id = sc.domain_id\n JOIN activities a2 ON dm.molregno = a2.molregno\n JOIN molecule_dictionary md ON md.molregno = dm.molregno\n JOIN compound_properties cp ON cp.molregno = md.molregno\n JOIN compound_records cr ON cr.molregno = cp.molregno\n WHERE a2.src_id = 15\n AND a2.standard_type = 'IC50'\n AND source_domain_id =\"{pfam_id}\"\n AND (NOT (a2.activity_comment LIKE 'inconclusive' OR a2.activity_comment LIKE 'undetermined')\n OR a2.activity_comment IS NULL)\n AND cp.PSA IS NOT NULL;''')\n# print(find_pfam)\n df = pd.read_sql(find_pfam, engine)\n return df\n#se puede agregar , g.pref_name a la query en sql\n\ndef Main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i','--input', help='Input pfam_file', type=argparse.FileType('r'), required=True)\n parser.add_argument('-o','--output', help='Output result must be .csv file',\n type=argparse.FileType('w'), default=sys.stdout)\n args = parser.parse_args()\n\n for pfam in args.input:\n pfam=pfam.strip()\n if pfam:\n df_drugs=search_bypfam(pfam)\n if len(df_drugs):\n df_drop = df_drugs.drop_duplicates(\"mol_chemblid\")\n df_drop.to_csv(args.output) ;\n else:\n print(f'No result for {pfam}', file=sys.stderr)\n\n return 0\n\n\n\nif __name__=='__main__':\n\t\tMain()\n","sub_path":"patho_chembl/pfam_trg_sql_mecanism.py","file_name":"pfam_trg_sql_mecanism.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"353373645","text":"import datetime\nfrom django import forms\nfrom hw6.models import *\nfrom django.db import models\nfrom django.contrib.admin.widgets import AdminDateWidget\n\nclass TaskEditForm(forms.ModelForm):\n class Meta:\n model = Task\n fields = ['title', 'estimate', 'state']\n widgets = {\n 'estimate': SelectDateWidget(years=range(1960, 2100)),\n }\n\nclass TaskCreateForm(forms.ModelForm):\n class Meta:\n model = Task\n fields = ['title', 'estimate']\n widgets = {\n 'estimate': SelectDateWidget(years=range(1960, 2100)),\n }\nclass RoadmapForm(forms.ModelForm):\n class Meta:\n model = Roadmap\n fields = ['roadmap_name']\n","sub_path":"L6HW/homework6/hw6/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"182573699","text":"#!/usr/bin/env python3\n#\n# usage: plash noroot CMD\n\nimport os\nimport sys\nimport signal\nfrom subprocess import check_call, CalledProcessError\nfrom plashlib.utils import catch_and_die, get_plash_data, die\nimport tempfile\nfrom getpass import getuser\nfrom multiprocessing import Lock # that takes way too long to load\nimport signal\nimport ctypes\n\n# I do believe this libc constants are stable and pray every day for that\nCLONE_NEWNS = 0x00020000\nCLONE_NEWUSER = 0x10000000\nMS_REC = 0x4000\nMS_PRIVATE = 1<<18\n\n\ndef get_subs(query_user, subfile):\n 'get subuids or subgids for a user'\n with open(subfile) as f:\n read = f.readline()\n user, start, count = read.split(':')\n if user == query_user:\n return int(start), int(count)\n die('The user {} does not havy any subuids or subgids, please add some'.format(query_user))\n\ndef unshare_if_user(extra_setup_cmd=None):\n if not os.getuid():\n return\n os.environ['PLASH_DATA'] = get_plash_data()\n uid_start, uid_count = get_subs(getuser(), '/etc/subuid')\n gid_start, gid_count = get_subs(getuser(), '/etc/subgid')\n \n setup_cmds = [\n [\n 'newuidmap',\n str(os.getpid()),\n '0', str(os.getuid()), '1',\n '1', str(uid_start), str(uid_count)\n ], [\n 'newgidmap',\n str(os.getpid()),\n '0', str(os.getgid()), '1',\n '1', str(gid_start), str(gid_count)]]\n\n if extra_setup_cmd:\n setup_cmds.append(extra_setup_cmd)\n \n def prepare_unshared_proccess():\n for cmd in setup_cmds:\n with catch_and_die([CalledProcessError], debug='forked child'):\n check_call(cmd)\n\n # we need to call prepare_unshared_proccess\n # from outside of the unshared process\n lock = Lock()\n lock.acquire()\n child = os.fork()\n if not child:\n lock.acquire()\n prepare_unshared_proccess()\n sys.exit(0)\n # what the unshare binary does do\n libc = ctypes.CDLL('libc.so.6')\n assert libc.unshare(CLONE_NEWNS | CLONE_NEWUSER) != -1\n assert libc.mount(\"none\", \"/\", None, MS_REC|MS_PRIVATE, None) != -1\n\n lock.release()\n os.wait()\n\ndef unshare_if_root():\n if os.getuid():\n return\n libc = ctypes.CDLL('libc.so.6')\n assert libc.unshare(CLONE_NEWNS) != -1\n assert libc.mount(\"none\", \"/\", None, MS_REC|MS_PRIVATE, None) != -1\n","sub_path":"plashlib/unshare.py","file_name":"unshare.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"549771566","text":"### test1.py: parallell processing, both function f1, f2 are executed\n\nimport threading\n\ndef f1(name):\n while True:\n print(\"{} is running\".format(name))\n\n\ndef f2(name):\n while True:\n print(\"{} is running\".format(name))\n\n\nif __name__ == \"__main__\":\n thread1 = threading.Thread(target=f1, args=(\"thread-1\", ))\n thread2 = threading.Thread(target=f2, args=(\"thread-2\", ))\n thread1.start()\n thread2.start()\n thread1.join()\n thread2.join()\n","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"107388575","text":"\"\"\"Audit the dependencies of the conda-forge ecosystem\"\"\"\nimport os\nimport tempfile\nimport time\nimport traceback\nfrom collections import defaultdict\nfrom concurrent.futures._base import as_completed\n\nimport networkx as nx\nfrom depfinder.main import simple_import_search\nfrom grayskull.base.factory import GrayskullFactory\nfrom ruamel import yaml\n\nfrom conda_forge_tick.contexts import MigratorSessionContext, FeedstockContext\nfrom conda_forge_tick.git_utils import feedstock_url\nfrom conda_forge_tick.git_xonsh_utils import fetch_repo\nfrom conda_forge_tick.migrators.core import _get_source_code\nfrom conda_forge_tick.utils import load_graph, dump, load_feedstock, load, executor\nfrom conda_forge_tick.xonsh_utils import indir, env\n\n\ndef depfinder_audit_feedstock(fctx: FeedstockContext, ctx: MigratorSessionContext):\n \"\"\"Uses Depfinder to audit the requirements for a python package\n \"\"\"\n # get feedstock\n feedstock_dir = os.path.join(ctx.rever_dir, fctx.package_name + \"-feedstock\")\n origin = feedstock_url(fctx=fctx, protocol=\"https\")\n fetch_repo(\n feedstock_dir=feedstock_dir, origin=origin, upstream=origin, branch=\"master\",\n )\n recipe_dir = os.path.join(feedstock_dir, \"recipe\")\n\n # get source code\n cb_work_dir = _get_source_code(recipe_dir)\n with indir(cb_work_dir):\n # run depfinder on source code\n deps = simple_import_search(cb_work_dir, remap=True)\n for k in list(deps):\n deps[k] = set(deps[k])\n return deps\n\n\ndef grayskull_audit_feedstock(fctx: FeedstockContext, ctx: MigratorSessionContext):\n \"\"\"Uses grayskull to audit the requirements for a python package\n \"\"\"\n # TODO: come back to this, since CF <-> PyPI is not one-to-one and onto\n pkg_name = fctx.package_name\n pkg_version = fctx.attrs[\"version\"]\n recipe = GrayskullFactory.create_recipe(\n \"pypi\", pkg_name, pkg_version, download=False,\n )\n\n with tempfile.TemporaryDirectory() as td:\n recipe.generate_recipe(\n td,\n mantainers=list(\n {\n m: None\n for m in fctx.attrs[\"meta_yaml\"][\"extra\"][\"recipe-maintainers\"]\n },\n ),\n )\n with open(os.path.join(td, pkg_name, \"meta.yaml\"), \"r\") as f:\n out = f.read()\n return out\n\n\nAUDIT_REGISTRY = {\n \"depfinder\": {\"run\": depfinder_audit_feedstock, \"writer\": dump, \"ext\": \"json\"},\n # Grayskull produces a valid meta.yaml, there is no in memory representation for that so we just write out the\n # string\n \"grayskull\": {\n \"run\": grayskull_audit_feedstock,\n \"writer\": lambda x, f: f.write(x),\n \"dumper\": yaml.dump,\n \"ext\": \"yml\",\n },\n}\n\n\ndef inner_grayskull_comparison(meta_yaml, attrs, node):\n # load the feedstock with the grayskull meta_yaml\n try:\n new_attrs = load_feedstock(node, {}, meta_yaml=meta_yaml)\n except Exception as e:\n return str(e)\n requirement_keys = [\n k\n for k in new_attrs\n if \"requirements\" in k and k not in {\"requirements\", \"total_requirements\"}\n ]\n results = defaultdict(dict)\n for k in requirement_keys:\n for kk in attrs[k]:\n cf_attrs_k_kk = attrs[k][kk]\n gs_attrs_k_kk = new_attrs[k][kk]\n if cf_attrs_k_kk != gs_attrs_k_kk and (\n kk != \"test\" and gs_attrs_k_kk != set(\"pip\")\n ):\n results[k][kk] = {\"cf\": cf_attrs_k_kk, \"grayskull\": gs_attrs_k_kk}\n cf_minus_gs = cf_attrs_k_kk - gs_attrs_k_kk\n gs_minus_cf = gs_attrs_k_kk - cf_attrs_k_kk\n if cf_minus_gs:\n results[k][kk].update({\"cf_not_gs_diff\": cf_minus_gs})\n if gs_minus_cf:\n results[k][kk].update({\"gs_not_cf_diff\": gs_minus_cf})\n return dict(results) or False\n\n\ndef compare_grayskull_audits(gx):\n grayskull_files = os.listdir(\"audits/grayskull\")\n bad_inspections = {}\n\n if \"_net_audit.json\" in grayskull_files:\n grayskull_files.pop(grayskull_files.index(\"_net_audit.json\"))\n with open(\"audits/grayskull/_net_audit.json\", \"r\") as f:\n bad_inspections = load(f)\n\n futures = {}\n with executor(\"dask\", max_workers=20) as pool:\n\n for node, attrs in gx.nodes(\"payload\"):\n if not attrs.get(\"version\"):\n continue\n node_version = f\"{node}_{attrs['version']}\"\n if node_version in bad_inspections:\n continue\n # construct the expected filename\n expected_filename = f\"{node_version}.yml\"\n if expected_filename in grayskull_files:\n with open(\n os.path.join(\"audits/grayskull\", expected_filename), \"r\",\n ) as f:\n meta_yaml = f.read()\n futures[\n pool.submit(\n inner_grayskull_comparison,\n meta_yaml=meta_yaml,\n attrs=attrs,\n node=node,\n )\n ] = node_version\n for future in as_completed(futures):\n try:\n bad_inspections[futures[future]] = future.result()\n except Exception as e:\n bad_inspections[futures[future]] = str(e)\n\n with open(\"audits/grayskull/_net_audit.json\", \"w\") as f:\n dump(bad_inspections, f)\n return bad_inspections\n\n\ndef compare_depfinder_audits(gx):\n bad_inspection = {}\n files = os.listdir(\"audits/depfinder\")\n\n if \"_net_audit.json\" in files:\n files.pop(files.index(\"_net_audit.json\"))\n\n for node, attrs in gx.nodes(\"payload\"):\n if not attrs.get(\"version\"):\n continue\n node_version = f\"{node}_{attrs['version']}\"\n # construct the expected filename\n expected_filename = f\"{node_version}.json\"\n if expected_filename in files:\n with open(os.path.join(\"audits/depfinder\", expected_filename), \"r\") as f:\n output = load(f)\n if isinstance(output, str):\n bad_inspection[node_version] = output\n continue\n quest = output.get(\"questionable\", set())\n required_pkgs = output.get(\"required\", set())\n d = {}\n run_req = attrs[\"requirements\"][\"run\"]\n excludes = {\n node,\n node.replace(\"-\", \"_\"),\n node.replace(\"_\", \"-\"),\n \"python\",\n \"setuptools\",\n }\n cf_minus_df = run_req - required_pkgs - excludes - quest\n if cf_minus_df:\n d.update(cf_minus_df=cf_minus_df)\n df_minus_cf = required_pkgs - run_req - excludes\n if df_minus_cf:\n d.update(df_minus_cf=df_minus_cf)\n bad_inspection[node_version] = d or False\n with open(\"audits/depfinder/_net_audit.json\", \"w\") as f:\n dump(bad_inspection, f)\n return bad_inspection\n\n\ndef main(args):\n gx = load_graph()\n ctx = MigratorSessionContext(\"\", \"\", \"\")\n start_time = time.time()\n\n os.makedirs(\"audits\", exist_ok=True)\n for k in AUDIT_REGISTRY:\n os.makedirs(os.path.join(\"audits\", k), exist_ok=True)\n\n # TODO: generalize for cran skeleton\n # limit graph to things that depend on python\n python_des = nx.descendants(gx, \"pypy-meta\")\n for node in sorted(\n python_des, key=lambda x: (len(nx.descendants(gx, x)), x), reverse=True,\n ):\n if time.time() - int(env.get(\"START_TIME\", start_time)) > int(\n env.get(\"TIMEOUT\", 60 * 30),\n ):\n break\n # depfinder only work on python at the moment so only work on things\n # with python as runtime dep\n payload = gx.nodes[node][\"payload\"]\n for k, v in AUDIT_REGISTRY.items():\n version = payload.get(\"version\", None)\n ext = v[\"ext\"]\n if (\n not payload.get(\"archived\", False)\n and version\n and \"python\" in payload[\"requirements\"][\"run\"]\n and f\"{node}_{version}.{ext}\" not in os.listdir(f\"audits/{k}\")\n ):\n print(node)\n fctx = FeedstockContext(\n package_name=node, feedstock_name=payload[\"name\"], attrs=payload,\n )\n try:\n deps = v[\"run\"](fctx, ctx)\n except Exception as e:\n deps = {\n \"exception\": str(e),\n \"traceback\": str(traceback.format_exc()).split(\"\\n\"),\n }\n if \"dumper\" in v:\n deps = v[\"dumper\"](deps)\n finally:\n with open(f\"audits/{k}/{node}_{version}.{ext}\", \"w\") as f:\n v[\"writer\"](deps, f)\n\n compare_grayskull_audits(gx)\n compare_depfinder_audits(gx)\n","sub_path":"conda_forge_tick/audit.py","file_name":"audit.py","file_ext":"py","file_size_in_byte":8941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"156648290","text":"import tensorflow as tf\nfrom configs.pose_hrnet_config import POSE_HIGH_RESOLUTION_NET\nfrom models.pose_hrnet import basic_block, bottle_neck, high_resolution_module\n\n\nclass Model(object):\n def __init__(self):\n self.inplanes = 64\n self.image_size = [256, 256, 3]\n self.size_of_joints = 17\n self.is_training = False\n self.blocks_dict = {'BASIC': basic_block,\n 'BOTTLENECK': bottle_neck}\n self.stage2_cfg = POSE_HIGH_RESOLUTION_NET.STAGE2\n num_channels = self.stage2_cfg.NUM_CHANNELS\n block = self.blocks_dict[self.stage2_cfg.BLOCK]\n num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))]\n self.transition1 = self._make_transition_layer(num_channels_pre_layer=[256],\n num_channels_cur_layer=num_channels)\n self.stage2, prestage_channels = self._make_stage(self.stage2_cfg, num_inchannels=num_channels)\n self.stage3_cfg = POSE_HIGH_RESOLUTION_NET.STAGE3\n num_channels = self.stage3_cfg.NUM_CHANNELS\n block = self.blocks_dict[self.stage3_cfg.BLOCK]\n num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))]\n self.transition2 = self._make_transition_layer(num_channels_pre_layer=prestage_channels,\n num_channels_cur_layer=num_channels)\n self.stage3, prestage_channels = self._make_stage(self.stage3_cfg, num_inchannels=num_channels)\n\n self.stage4_cfg = POSE_HIGH_RESOLUTION_NET.STAGE4\n num_channels = self.stage4_cfg.NUM_CHANNELS\n block = self.blocks_dict[self.stage4_cfg.BLOCK]\n num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))]\n self.transition3 = self._make_transition_layer(num_channels_pre_layer=prestage_channels,\n num_channels_cur_layer=num_channels)\n self.stage4, prestage_channels = self._make_stage(self.stage4_cfg,\n num_inchannels=num_channels,\n multi_scale_output=False)\n\n self.init_saver()\n\n def __call__(self, inputs, training):\n with tf.name_scope(\"PoseHRNet\"):\n inputs_identity = tf.identity(inputs)\n #STEM NET\n outputs = tf.layers.conv2d(inputs=inputs, filters=64, kernel_size=3, strides=2,\n use_bias=False, padding='SAME')\n outputs = tf.layers.batch_normalization(inputs=outputs, momentum=POSE_HIGH_RESOLUTION_NET.BN_MOMENTUM,\n training=training)\n outputs = tf.nn.relu(outputs)\n outputs = tf.layers.conv2d(inputs=outputs, filters=64, kernel_size=3, strides=2,\n use_bias=False, padding='SAME')\n outputs = tf.layers.batch_normalization(inputs=outputs, momentum=POSE_HIGH_RESOLUTION_NET.BN_MOMENTUM,\n training=training)\n outputs = tf.nn.relu(outputs)\n\n layer_1_output = self._get_layer_output(\"BootleNeck_1\", outputs, training=training, block=bottle_neck,\n planes=64, blocks=4)\n outputs = tf.identity(layer_1_output)\n with tf.name_scope(\"StageTwo\"):\n x_list = []\n for i in range(self.stage2_cfg.NUM_BRANCHES):\n with tf.name_scope(\"Transition_Layer_\" + str(i)):\n if self.transition1[i] is not None:\n if isinstance(self.transition1[i], list):\n x_list.append(self._inference_sequential(self.transition1[i], outputs, training))\n else:\n x_list.append(self.transition1[i](outputs, training=training))\n else:\n x_list.append(outputs)\n y_list = self._inference_stage(self.stage2, x_list, training=training)\n\n with tf.name_scope(\"StageThree\"):\n x_list = []\n for i in range(self.stage3_cfg.NUM_BRANCHES):\n with tf.name_scope(\"Transition_Layer_\" + str(i)):\n if self.transition2[i] is not None:\n if isinstance(self.transition2[i], list):\n x_list.append(self._inference_sequential(self.transition2[i], y_list[-1], training))\n else:\n x_list.append(self.transition2[i](y_list[-1], training=training))\n else:\n x_list.append(y_list[i])\n y_list = self._inference_stage(self.stage3, x_list, training=training)\n\n with tf.name_scope(\"StageFour\"):\n x_list = []\n for i in range(self.stage4_cfg.NUM_BRANCHES):\n with tf.name_scope(\"Transition_Layer_\" + str(i)):\n if self.transition3[i] is not None:\n if isinstance(self.transition3[i], list):\n x_list.append(self._inference_sequential(self.transition3[i], y_list[-1], training))\n else:\n x_list.append(self.transition3[i](y_list[-1], training=training))\n else:\n x_list.append(y_list[i])\n y_list = self._inference_stage(self.stage4, x_list, training=training)\n with tf.name_scope(\"FinalLayer\"):\n outputs = tf.layers.conv2d(inputs=y_list[0], filters=self.size_of_joints,\n kernel_size=POSE_HIGH_RESOLUTION_NET.FINAL_CONV_KERNEL, strides=1,\n padding='SAME' if POSE_HIGH_RESOLUTION_NET.FINAL_CONV_KERNEL == 3\n else 'VALID')\n return outputs\n\n def _get_layer_output(self, namescope, inputs, training, block, planes, blocks, stride=1):\n downsample = None\n with tf.name_scope(namescope):\n outputs = tf.identity(inputs)\n if stride != 1 or self.inplanes != planes * block.expansion:\n def downsample(inputs, training):\n outputs = tf.layers.conv2d(inputs=inputs, filters=planes * block.expansion, kernel_size=1,\n strides=stride, use_bias=False, padding='SAME')\n outputs = tf.layers.batch_normalization(inputs=outputs, momentum=POSE_HIGH_RESOLUTION_NET.BN_MOMENTUM,\n training=training)\n return outputs\n layer = block.Model(namescope=\"layer_0\", planes=planes, stride=stride, downsample=downsample)\n outputs = layer(outputs, training=training)\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layer = block.Model(\"layer_\" + str(i), planes=planes)\n outputs = layer(outputs, training=training)\n return outputs\n\n def _make_transition_layer(self, num_channels_pre_layer,\n num_channels_cur_layer):\n\n num_branches_cur = len(num_channels_cur_layer)\n num_branches_pre = len(num_channels_pre_layer)\n\n transition_layers = []\n for i in range(num_branches_cur):\n if i < num_branches_pre:\n if num_channels_cur_layer[i] != num_channels_pre_layer[i]:\n channels = num_channels_cur_layer[i]\n def block_1(inputs, training):\n outputs = tf.layers.conv2d(inputs=inputs, filters=channels, kernel_size=3,\n strides=1, use_bias=False, padding='SAME')\n outputs = tf.layers.batch_normalization(inputs=outputs, training=training)\n outputs = tf.nn.relu(outputs)\n return outputs\n transition_layers.append(block_1)\n else:\n transition_layers.append(None)\n else:\n conv3x3s = []\n for j in range(i+1-num_branches_pre):\n inchannels = num_channels_pre_layer[-1]\n outchannels = num_channels_cur_layer[i] if j == i-num_branches_pre else inchannels\n\n def block_2(inputs, training):\n outputs = tf.layers.conv2d(inputs=inputs, filters=outchannels, kernel_size=3,\n strides=2, use_bias=False, padding='SAME')\n outputs = tf.layers.batch_normalization(inputs=outputs, training=training)\n outputs = tf.nn.relu(outputs)\n return outputs\n conv3x3s.append(block_2)\n transition_layers.append(conv3x3s)\n return transition_layers\n\n def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True):\n num_modules = layer_config.NUM_MODULES\n num_branches = layer_config.NUM_BRANCHES\n num_blocks = layer_config.NUM_BLOCKS\n num_channels = layer_config.NUM_CHANNELS\n block = self.blocks_dict[layer_config.BLOCK]\n fuse_method = layer_config.FUSE_METHOD\n\n modules = []\n for i in range(num_modules):\n # multi_scale_output is only used last module\n if not multi_scale_output and i == num_modules - 1:\n reset_multi_scale_output = False\n else:\n reset_multi_scale_output = True\n\n modules.append(high_resolution_module.Model(\"high_resolution_module_\" + str(i),\n num_branches, block, num_blocks, num_inchannels, num_channels,\n fuse_method, reset_multi_scale_output))\n return modules, modules[-1].get_num_inchannels()\n\n def _inference_sequential(self, stage_modules, inputs, training):\n outputs = tf.identity(inputs)\n for i in range(len(stage_modules)):\n outputs = stage_modules[i](outputs, training)\n return outputs\n\n def _inference_stage(self, stage_modules, inputs, training):\n outputs = []\n for i in range(len(inputs)):\n outputs.append(tf.identity(inputs[i]))\n for i in range(len(stage_modules)):\n outputs = stage_modules[i](outputs, training)\n return outputs\n\n def init_saver(self):\n pass\n","sub_path":"models/pose_hrnet/pose_hrnet_model.py","file_name":"pose_hrnet_model.py","file_ext":"py","file_size_in_byte":10777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"161429868","text":"#-*- coding: UTF-8 -*-\n\nfrom flask import g\n\nclass Collection:\n\n# GET\n\n\t# get an author's all collections\n\t@staticmethod\n\tdef get_collections_by_author(authorID):\n\t\tquery = \"SELECT * FROM collection WHERE AuthorID = %d\" % authorID\n\t\tg.cursor.execute(query)\n\t\treturn g.cursor.fetchall()\n\n\t# get single collection\n\t@staticmethod\n\tdef get_collection(collectionID):\n\t\tquery = '''SELECT collection.CollectionID, collection.Collection, collection.Introduction, author.AuthorID, author.Author, author.Abbr AS AuthorAbbr, dynasty.Dynasty, dynasty.Abbr AS DynastyAbbr\\n\n\t\t\tFROM collection, author, dynasty\\n\n\t\t\tWHERE collection.AuthorID = author.AuthorID\\n\n\t\t\tAND author.DynastyID = dynasty.dynastyID\n\t\t\tAND collectionID = %d''' % collectionID\n\t\tg.cursor.execute(query)\n\t\treturn g.cursor.fetchone()\n\n# NEW\n\n\t# add a collection\n\t@staticmethod\n\tdef add_collection(collection, authorID, introduction):\n\t\tquery = '''INSERT INTO collection (Collection, AuthorID, Introduction) VALUES\\n\n\t\t\t('%s', %d, '%s')''' % (collection, authorID, introduction)\n\t\tg.cursor.execute(query)\n\t\tg.conn.commit()\n\t\treturn g.cursor.lastrowid\n\n# EDIT\n\n\t# edit a collection\n\t@staticmethod\n\tdef edit_collection(collection, authorID, introduction, collectionID):\n\t\tquery = '''UPDATE collection SET Collection = '%s', AuthorID = %d, Introduction = '%s'\\n\n\t\t\tWHERE CollectionID = %d''' % (collection, authorID, introduction, collectionID)\n\t\tg.cursor.execute(query)\n\t\treturn g.conn.commit()","sub_path":"xichuangzhu/models/collection_model.py","file_name":"collection_model.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"220916075","text":"'''\nCreated on Jan 22, 2018\n\n@author: PATI\n'''\nfrom repo.repoJ import ExceptionR\nfrom repo.repoF import ExceptionRF\nfrom domain.jucator import Jucator\nfrom repo.repoF import RepoF\nfrom service.serviceF import ServiceF\nfrom random import random, randint\nfrom test.test_buffer import randitems, rand_structure\n\nclass ExceptionC(Exception):\n \"\"\"\n Clasa de exceptii pentru consola\n \"\"\"\n def __init__(self,*args,**kwargs):\n Exception.__init__(self,*args,**kwargs)\n\nclass Console(object):\n '''\n Clasa consola care permite interactiunea cu utilizatorul\n '''\n\n\n def __init__(self, service):\n '''\n Initailizam campul service cu service-ul pentru jucatori\n '''\n self.__service=service\n \n \"\"\"\n Functia citeste de la tastatura un nume prenume o inaltime si un post \n Functia adauga un jucator cu atributele de mai sus in fisier\n \"\"\"\n def __uiAdaugaJucator(self):\n try:\n nume=input(\"Dati numele jucatorului: \")\n prenume=input(\"Dati prenumele jucatorului: \")\n inaltime=int(input(\"Dati inaltimea jucatorului: \"))\n post=input(\"Dati postul: \")\n if nume==\"\" or prenume==\"\":\n raise ExceptionC(\"Numele si prenumele nu pot fi vide!!!\")\n if inaltime<0:\n raise ExceptionC(\"Inaltimea trebuie sa fie pozitiva!!!\")\n if post!=\"Fundas\" and post!=\"Pivot\" and post!=\"Extrema\":\n raise ExceptionC(\"Postul trebuie sa fie unul dintre: Fundas, Pivot, Extrema\")\n self.__service.add(Jucator(nume,prenume,inaltime,post))\n print(\"Jucator adaugat cu succes!\")\n \n except ValueError:\n print(\"Valori invalide!!!\")\n \n \"\"\"\n Functia citeste de la tastatura un nume prenume o inaltime\n Functia modifica inaltimea unui jucator cu atributele de mai sus in fisier\n \"\"\"\n def __uiModificaInaltime(self):\n try:\n nume=input(\"Dati numele jucatorului: \")\n prenume=input(\"Dati prenumele jucatorului: \")\n inaltime=int(input(\"Dati inaltimea jucatorului: \"))\n if nume==\"\" or prenume==\"\":\n raise ExceptionC(\"Numele si prenumele nu pot fi vide!!!\")\n if inaltime<0:\n raise ExceptionC(\"Inaltimea trebuie sa fie pozitiva!!!\")\n self.__service.mod(nume,prenume,inaltime)\n print(\"Inaltime modificata cu succes!\")\n except ValueError:\n print(\"Valori invalide!!!\")\n \n \"\"\"\n Functia returneaza o lista care contine jucatorii care alcatuiesc o echipa\n \"\"\"\n def __uiEchipa(self):\n rez=self.__service.echipa()\n print(\"Echipa este:\")\n for x in rez:\n print(x)\n \n \"\"\"\n Citeste de la tastatura un nume de fisier si adauga in lista numele jucatorilor alaturi d eo inaltime si un post random\n \"\"\"\n def __uiImport(self):\n try:\n nrinit=len(self.__service.getAll())\n nume=input(\"Dati numele fisierului existent: \")\n repoF=RepoF(nume)\n serviceF=ServiceF(repoF)\n alls=serviceF.getAll()\n for x in alls:\n nume=x[0]\n prenume=x[1]\n inaltime=randint(150,250)\n l=[\"Fundas\",\"Pivot\",\"Extrema\"]\n poz=randint(0,2)\n post=l[poz]\n try:\n if nume==\"\" or prenume==\"\":\n raise ExceptionC(\"Numele si prenumele nu pot fi vide!!!\")\n if inaltime<0:\n raise ExceptionC(\"Inaltimea trebuie sa fie pozitiva!!!\")\n if post!=\"Fundas\" and post!=\"Pivot\" and post!=\"Extrema\":\n raise ExceptionC(\"Postul trebuie sa fie unul dintre: Fundas, Pivot, Extrema\")\n self.__service.add2(Jucator(nume,prenume,inaltime,post))\n \n except ValueError:\n print(\"Valori invalide!!!\")\n print(\"Jucatorii care nu existau deja in fisier au fost importati cu succes!\")\n nrinit2=len(self.__service.getAll())\n print(\"Nr de jucatori adaugati este: \")\n print (nrinit2-nrinit)\n \n except ExceptionRF as ve:\n print(ve)\n \n \"\"\"\n Functia care ruleaza aplicatia\n \"\"\"\n def run(self):\n while True:\n print(\"\")\n print(\"1. Adauga jucator\")\n print(\"2. Modifica inaltimea\")\n print(\"3. Tipareste echipa\")\n print(\"4. Importa jucatori\")\n print(\"5. Exit\")\n print(\"\")\n \n try:\n cmd=input(\"Alegeti optiunea dorita: \")\n if cmd=='1':\n self.__uiAdaugaJucator()\n if cmd=='2':\n self.__uiModificaInaltime()\n if cmd=='3':\n self.__uiEchipa()\n if cmd=='4':\n self.__uiImport()\n if cmd=='5':\n return\n \n except ValueError:\n print(\"Valoare invalida!\")\n except ExceptionC as ve:\n print(ve)\n except ExceptionR as ve:\n print(ve)","sub_path":"Python/Application/ui/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":5292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"262532391","text":"# -*- coding: UTF-8 -*-\n\nimport base64\nimport platform\nimport json\nimport re\n\nclass LinuxAndWindowsConversion:\n # 采集过程中的数据处理\n def transform(self, data, key=\" \"):\n sysstr = platform.system()\n if (sysstr == \"Windows\"):\n data = str(data) # windows\n elif (sysstr == \"Linux\"):\n if key == 'uni_code':\n data = data.encode('utf-8') # Linux\n else:\n data = str(data) # windows\n\n # 采坑:由于采集的详情信息为富文本类型,其中含有class=\"XXX\"等字符串,而json字符串中的键值对用\",在最后执行sql语句的时候用',需要把data进行base64加密\n if key == \"content_picture_detail_content\":\n # pattern = re.compile('\"')\n # data = re.sub(pattern, \"'\", data)\n # 采坑\n if (sysstr == \"Windows\"):\n data = self.base64Encrypt(data)\n elif (sysstr == \"Linux\"):\n data = self.base64Encrypt(data.encode('utf-8'))\n else:\n data = self.base64Encrypt(data)\n return data\n\n # base64加密\n # data 必须为字符串,要么转成json格式,要么进行utf-8转义\n def base64Encrypt(self, data):\n sysstr = platform.system()\n if (sysstr == \"Windows\"):\n json_content = str(base64.b64encode(data.encode('utf-8')), 'utf-8')\n elif (sysstr == \"Linux\"):\n json_content = base64.b64encode(data)\n else:\n json_content = str(base64.b64encode(data.encode('utf-8')), 'utf-8')\n return json_content\n\n # 采坑:处理json中的Unicode(专一的json字符串中Unicode的斜杠在执行sql语句的时候当成转义字符处理了,需要把\\转换为\\\\)\n def handleUnicode(self, data):\n data = data.replace('\\\\u', '\\\\\\\\u')\n return data","sub_path":"meishu/utils/LinuxAndWindowsConversion.py","file_name":"LinuxAndWindowsConversion.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"272589968","text":"\"\"\" \n CES-35 - Redes de Computadores\n Laboratório 1 - Sockets\n Nicholas Scharan Cysne (PMG)\n\n request.py\n\"\"\"\n\nimport json\n\nfrom socket import gethostbyname\n\nclass Request:\n def __init__(self, obj=dict()) -> None:\n self.version = obj.get(\"version\", \"HTTP/1.0\")\n self.host = obj.get(\"host\",\"\")\n self.port = obj.get(\"port\",\"\")\n self.url = obj.get(\"url\", \"/\")\n self.method = obj.get(\"method\",\"GET\")\n\n def encode(self):\n msg = \" \".join([self.method, self.url, self.version])\n msg += \"\\r\\n\" + \"Host: \" + self.host + \":\" + self.port\n msg += \"\\r\\n\" + \"Accept: text/html\"\n msg += \"\\r\\n\\r\\n\"\n # Convert to bytes\n return bytes(msg, encoding='utf-8') \n\n def parse(byteObj):\n req = dict()\n # Decode byte object\n msg = byteObj.decode('utf-8')\n # Separate field in HTTP Request \n obj = msg.split(\"\\r\\n\")\n # Retrieve information from HTTP Request\n header = obj[0].split(\" \")\n req['method'] = header[0]\n req['url'] = header[1]\n req['version'] = header[2]\n host_info = obj[1].split(\" \")\n destination = host_info[1].split(\":\")\n req['host'] = destination[0]\n req['port'] = destination[1]\n\n # Return Request object\n return Request(req)\n\n def parseURL(url):\n # Split http://host:port/file into [\"http:\",\"\",\"host\",\"port\",\"file\"] \n s = url.split(\"/\")\n # Split host:port\n destination = s[2].split(\":\")\n host = destination[0].replace(\"www.\",\"\")\n port = destination[1]\n file = s[3]\n\n return {\n 'host': host, # Host Name\n 'addr': gethostbyname(host), # Host IP Address\n 'port': port, # Port Number\n 'url': \"/\" + file # File of interest\n }","sub_path":"CES-35/Lab1/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"110767556","text":"# CIRC17 - IR Replay\n# (CircuitPython)\n# this circuit was designed for use with the Metro Express Explorers Guide on Learn.Adafruit.com\n\n# by Asher Lieber for Adafruit Industries\n\nimport time\nimport board\nimport pulseio\nimport digitalio\nimport array\n\n# 38 for NEC\nir_led = pulseio.PWMOut(board.D3, frequency=1000*38, duty_cycle=0) \nir_led_send = pulseio.PulseOut(ir_led)\n\nrecv = pulseio.PulseIn(board.D2, maxlen=150, idle_state = True)\n\n# creating DigitalInOut objects for a record and play button\nbut0 = digitalio.DigitalInOut(board.D9) # record\nbut1 = digitalio.DigitalInOut(board.D8) # playback\n\n# setting up indicator LED\nled = digitalio.DigitalInOut(board.D13)\nled.switch_to_output()\n\n# waits for IR to be detected, returns\ndef get_ir():\n ir_f = array.array('H')\n print('waiting for ir')\n recv.clear()\n while len(recv) == 0:\n pass\n # time to collect\n time.sleep(.2) \n print(len(recv))\n for i in range(len(recv)):\n ir_f.append(recv[i])\n recv.clear()\n time.sleep(.01)\n print('recieved')\n return ir_f\n\ndef send_ir(ir_f):\n # enable_out()\n ir_led.duty_cycle = (2**16)//3 #???\n time.sleep(.4)\n print('sending')\n if ir_f[0] == 65535:\n ir_led_send.send(ir_f[1:])\n else:\n ir_led_send.send(ir_f)\n time.sleep(.5) # give some cooldown time\n ir_led.duty_cycle = 0\n\n# so nothing devastating happens if play before record\nto_send = array.array('H')\nwhile True:\n # while (but0.value or but1.value):\n # pass\n # record\n if not but0.value:\n led.value = True\n to_send = get_ir()\n if len(to_send) != 72:\n for i in range(2):\n time.sleep(.1)\n led.value = not led.value\n led.value = False\n if not but1.value:\n for i in range(5):\n time.sleep(.05)\n led.value = not led.value\n if len(to_send) != 0:\n send_ir(to_send)\n led.value = False\n","sub_path":"circuitpython/CIRC17/ir_replay.py","file_name":"ir_replay.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"637220805","text":"from docutils.parsers.rst import Directive\nfrom docutils.transforms import Transform\nfrom typing import Any\n\nlogger: Any\nON_RTD: Any\nneeds_sphinx: str\nextensions: Any\nproject: str\ncopyright: str\nauthor: str\n\ndef read_version(): ...\ndef read_minimum_emacs_version(): ...\n\nrelease: Any\nversion: Any\nsource_suffix: str\nmaster_doc: str\nrst_prolog: Any\nexclude_patterns: Any\ndefault_role: str\nprimary_domain: str\ntemplates_path: Any\npygments_style: str\nnitpicky: bool\nnitpick_ignore: Any\nhtml_theme: str\nhtml_theme_options: Any\nhtml_sidebars: Any\nhtml_static_path: Any\nhtml_favicon: str\nlinkcheck_ignore: Any\nintersphinx_mapping: Any\nextlinks: Any\ntodo_include_todos: bool\n\nclass SupportedLanguage(Directive):\n required_arguments: int = ...\n final_argument_whitespace: bool = ...\n has_content: bool = ...\n option_spec: Any = ...\n def run(self): ...\n\nclass SyntaxCheckerConfigurationFile(Directive):\n required_arguments: int = ...\n final_argument_whitespace: bool = ...\n def run(self): ...\n\nclass IssueReferences(Transform):\n ISSUE_PATTERN: Any = ...\n ISSUE_URL_TEMPLATE: str = ...\n default_priority: int = ...\n def apply(self) -> None: ...\n\ndef build_offline_html(app: Any) -> None: ...\ndef add_offline_to_context(app: Any, _pagename: Any, _templatename: Any, context: Any, _doctree: Any) -> None: ...\ndef setup(app: Any) -> None: ...\n","sub_path":"Result/4079files/Uninf_noImp_noSemantic/1531-Uninf_noImp_noSemantic.pyi","file_name":"1531-Uninf_noImp_noSemantic.pyi","file_ext":"pyi","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"407946926","text":"\"\"\"application.py -- top-level web application for wayback-discover-diff.\n\"\"\"\nimport logging.config\nimport os\nfrom celery import Celery\nfrom flask_cors import CORS\nfrom redis import StrictRedis, BlockingConnectionPool\nfrom wayback_discover_diff import stats\nfrom wayback_discover_diff.util import load_config\nfrom wayback_discover_diff.discover import Discover\n\n# Init config\nCFG = load_config()\n\n# Init logging\nlogconf = CFG.get('logging')\nif logconf:\n logging.config.dictConfig(logconf)\n\n# Init statsd client\nstats_conf = CFG.get('statsd')\nif isinstance(stats_conf, dict):\n stats.configure(**stats_conf)\n\n# Init Celery app\nCELERY = Celery(**CFG['celery'])\nCELERY.register_task(Discover(CFG))\n\n# Init Flask app\nfrom . import web\nAPP = web.get_app(CFG)\n\n# Initialize CORS support\ncors = CFG.get('cors')\nif cors:\n CORS(APP, origins=cors)\n\n# Initialize Celery and Redis\nAPP.celery = CELERY\nAPP.redis = StrictRedis(\n connection_pool=BlockingConnectionPool.from_url(\n **CFG.get('redis')\n )\n )\n\n# ensure the instance folder exists\ntry:\n os.makedirs(APP.instance_path)\nexcept OSError:\n pass\n","sub_path":"wayback_discover_diff/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"225017152","text":"# Routines to check the forces and hessian numerically\n\n#--------------Numerical forces dU/dx -------------------------------\ndef bond_num(bond_style,nbonds,bonds,bondcoeff,pos,acc):\n\n pbond = 0\n\n# print(\"Analytical forces\",pos.size)\n# print(acc)\n print(\"Calculating Numerical Forces\")\n acc_num = numpy.zeros(pos.size)\n acc_d = numpy.zeros(acc.shape)\n h = .000001\n\n post = pos.reshape(pos.size)\n # print(post.reshape((-1,3)))\n # uses df(x)/dx = (f(x+h)-f(x-h))/(2h)\n pbond = bond_force(bond_style,nbonds,bonds,bondcoeff,post.reshape(-1,3),acc_d)\n for i in range(post.size):\n tpos = post[i]\n post[i] = tpos + h\n pbond1 = bond_force(bond_style,nbonds,bonds,bondcoeff,post.reshape(-1,3),acc_d)\n post[i] = tpos - h\n pbondm1 = bond_force(bond_style,nbonds,bonds,bondcoeff,post.reshape(-1,3),acc_d)\n post[i] = tpos\n\n acc_num[i] = -(pbond1-pbondm1)/(2.0*h)\n #print(i,pbond,pbond1,pbondm1)\n\n numpy.copyto(acc,acc_num.reshape(-1,3))\n return pbond\n\n#-----------------------Numerical second derivative --------------------\ndef bond_hess_num(bond_style,nbonds,bonds,bondcoeff,pos,acc,masses,hess_num):\n\n pbond = 0\n h = .0001\n ma = masses.reshape(pos.size)\n acc_d = numpy.zeros(acc.shape)\n\n print(\"Calculating Numerical 2nd derivatives\")\n post = pos.reshape(pos.size)\n # print(post.reshape((-1,3)))\n for i in range(post.size):\n tpos = post[i]\n # uses d^2f(x)/ dx dx = (f(x+h,y)+f(x-h,y)-2f(x,y))/(hh)\n pbond = bond_force(bond_style,nbonds,bonds,bondcoeff,post.reshape(-1,3),acc_d)\n post[i] = tpos - h\n pbondm1 = bond_force(bond_style,nbonds,bonds,bondcoeff,post.reshape(-1,3),acc_d)\n post[i] = tpos + h\n pbond1 = bond_force(bond_style,nbonds,bonds,bondcoeff,post.reshape(-1,3),acc_d)\n post[i] = tpos\n\n hess_num[i][i] = (pbond1+pbondm1-2.0*pbond)/(h*h*ma[i]) #diagonal with mass weight\n\n #print(i,pbond1,pbondm1,pbond,h,ma[i])\n #print(i,pbond,hess_num[i][i],post)\n for j in range(i+1,post.size): # off diagonals\n # uses d^2f(x)/ dx dy = (f(x+h,y+h)+f(x-h,y-h)-f(x+h,y-h)-f(x-h,y+h))/(4hh)\n tposi = post[i]\n tposj = post[j]\n post[i] = tposi - h\n post[j] = tposj - h\n pbondm1m1 = bond_force(bond_style,nbonds,bonds,bondcoeff,post.reshape(-1,3),acc_d)\n post[j] = tposj + h\n pbondm11 = bond_force(bond_style,nbonds,bonds,bondcoeff,post.reshape(-1,3),acc_d)\n post[i] = tposi + h\n pbond11 = bond_force(bond_style,nbonds,bonds,bondcoeff,post.reshape(-1,3),acc_d)\n post[j] = tposj - h\n pbond1m1 = bond_force(bond_style,nbonds,bonds,bondcoeff,post.reshape(-1,3),acc_d)\n post[i] = tposi\n post[j] = tposj\n\n hess_num[i][j] = (pbond11+pbondm1m1-pbondm11-pbond1m1)/(h*h*4.0)\n hess_num[i][j] /= math.sqrt(ma[i]*ma[j])\n hess_num[j][i] = hess_num[i][j]\n\n#-------------------------------------------------\ndef bond(bond_style,nbonds,bonds,bondcoeff,pos,acc,masses):\n\n #print(\"Calculating bond forces\")\n pbond = 0\n pbond = bond_force(bond_style,nbonds,bonds,bondcoeff,pos,acc)\n\n return pbond\n # check routines...\n #check_forces(bond_style,nbonds,bonds,bondcoeff,pos,acc,masses)\n #print(acc)\n #check_inm(bond_style,nbonds,bonds,bondcoeff,pos,acc,masses)\n #exit(1)\n\n#----------------------------------------------------------\ndef bond_hess(bond_style,nbonds,bonds,bondcoeff,pos,masses,hessian):\n inm(bond_style,nbonds,bonds,bondcoeff,pos,masses,hessian)\n\n#----------------------------------------------------------\ndef check_forces(bond_style,nbonds,bonds,bondcoeff,pos,acc,masses):\n #check forces\n\n acc.fill(0) # rezero forces\n pbond = bond_force(bond_style,nbonds,bonds,bondcoeff,pos,acc)\n\n acc_num = numpy.copy(acc)\n bond_num(bond_style,nbonds,bonds,bondcoeff,pos,acc_num)\n\n tol = 1e-6\n diff = acc_num-acc\n mv = max(diff.max(),abs(diff.min()))\n\n if(mv < tol):\n print(\"Forces Match, pbond =\",pbond,mv)\n else:\n print(\"Forces DO NOT Match!\")\n print(\"Analytical\")\n print(acc)\n print(\"Numerical\")\n print(acc_num)\n print(\"Diff = \",diff)\n\n#----------------------------------------------------------\ndef check_inm(bond_style,nbonds,bonds,bondcoeff,pos,acc,masses):\n\n print(\"Calculating Hessian\")\n hessian = numpy.zeros((pos.size,pos.size))\n bond_hess(bond_style,nbonds,bonds,bondcoeff,pos,masses,hessian)\n #print(hessian)\n\n #print(pos.size)\n hess_num = numpy.zeros((pos.size,pos.size))\n bond_hess_num(bond_style,nbonds,bonds,bondcoeff,pos,acc,masses,hess_num)\n #print(hess_num)\n\n hdiff = hess_num-hessian\n mv = max(hdiff.max(),abs(hdiff.min()))/hessian.max()\n\n rdiff = numpy.sum(numpy.sum(hdiff*hdiff,axis=1))\n tol = 1e-5\n print(rdiff,mv)\n if(rdiffmax):\r\n max = model[words[j]][i]\r\n max_arr.append(max)\r\n return max_arr\r\n\r\n def min_fun(words, model):\r\n min_arr=[]\r\n for i in range(300):\r\n min = model[words[0]][i]\r\n for j in range(len(words)):\r\n if(model[words[j]][i] 5):\r\n f2.write(\"Individual hates others mostly and the percentage of hating tweets is \"+str(hate_tweets_percent)+\".\\n\")\r\n else:\r\n f2.write(\"Individual doesn't hates others mostly and the percentage of hating tweets is \"+str(hate_tweets_percent)+\".\\n\")\r\n\r\n\r\n with open('pickle_models/clf_sex.pickle', 'rb') as f:\r\n sexist_model = pickle.load(f)\r\n sexism_predicted=sexist_model.predict(tweets_translated)\r\n sexism_tweets=0\r\n for i in sexism_predicted:\r\n if (i==1):\r\n sexism_tweets=sexism_tweets+1\r\n sexism_tweets_percent=(sexism_tweets/len(sexism_predicted))*100 #sexism percent\r\n\r\n if(sexism_tweets_percent > 5):\r\n f2.write(\"Individual promotes sexism and has a percentage of tweets related to it is \"+str(sexism_tweets_percent)+\".\\n\")\r\n else:\r\n f2.write(\"Individual doesn't promotes sexism and has a percentage of tweets related to it is \"+str(sexism_tweets_percent)+\".\\n\")\r\n\r\n\r\n \r\n\r\n \r\n with open('pickle_models/bullying_model_file', 'rb') as f:\r\n bullying_model = pickle.load(f)\r\n with open('pickle_models/countvector_bullying_model_file', 'rb') as f:\r\n bullying_countvector = pickle.load(f)\r\n tweets_cvb=bullying_countvector.transform(tweets_translated)\r\n bullying_predicted=bullying_model.predict(tweets_cvb)\r\n bullying_tweets=0\r\n non_bullyin_tweets=0\r\n for i in bullying_predicted:\r\n if (i==1):\r\n bullying_tweets=bullying_tweets+1\r\n bullying_tweets_percent=(bullying_tweets/len(bullying_predicted))*100 #bullying_percent\r\n\r\n if(bullying_tweets_percent > 1):\r\n f2.write(\"The individual has more bullying personality against others and the percentage of bullying tweets is \"+str(bullying_tweets_percent)+\".\\n\")\r\n else:\r\n f2.write(\"The individual has less bullying personality against others and the percentage of bullying tweets is \"+str(bullying_tweets_percent)+\".\\n\")\r\n\r\n \r\n \r\n lst=[]\r\n lst_of_traits=[]\r\n text =tweets_translated \r\n New_vectors=vector_for_tweet(text)\r\n infile = open('pickle_models/O_model.sav','rb')\r\n o = pickle.load(infile)\r\n infile = open('pickle_models/C_model.sav','rb')\r\n c = pickle.load(infile)\r\n infile = open('pickle_models/E_model.sav','rb')\r\n e = pickle.load(infile)\r\n infile = open('pickle_models/A_model.sav','rb')\r\n a = pickle.load(infile)\r\n infile = open('pickle_models/N_model.sav','rb')\r\n n = pickle.load(infile)\r\n\r\n y_pred=o.predict(New_vectors)\r\n count=count_ones_and_zeros_percent(y_pred)\r\n if count>50:\r\n lst_of_traits.append(\"Openness trait is high\")\r\n else:\r\n lst_of_traits.append(\"Openness trait is less\")\r\n y_pred=c.predict(New_vectors)\r\n count=count_ones_and_zeros_percent(y_pred)\r\n if count>50:\r\n lst_of_traits.append(\"Conscientiousness trait is high\")\r\n else:\r\n lst_of_traits.append(\"Conscientiousness trait is Less\")\r\n y_pred=e.predict(New_vectors)\r\n count=count_ones_and_zeros_percent(y_pred)\r\n if count>50:\r\n lst_of_traits.append(\"Extraversion trait is high\")\r\n else:\r\n lst_of_traits.append(\"Extraversion trait is Less\")\r\n y_pred=a.predict(New_vectors)\r\n count=count_ones_and_zeros_percent(y_pred)\r\n if count>50:\r\n lst_of_traits.append(\"Agreeableness trait is high\")\r\n else:\r\n lst_of_traits.append(\"Agreeableness trait is Less\")\r\n y_pred=n.predict(New_vectors)\r\n count=count_ones_and_zeros_percent(y_pred)\r\n if count>50:\r\n lst_of_traits.append(\"Neuroticism trait is high\")\r\n else:\r\n lst_of_traits.append(\"Neuroticism trait is Less\")\r\n\r\n \r\n with open('pickle_models/personel_attacks_model_file', 'rb') as f:\r\n attack_model = pickle.load(f)\r\n attack_predicted=attack_model.predict(tweets_translated)\r\n attacks=0\r\n for i in attack_predicted:\r\n if i:\r\n attacks=attacks+1\r\n attack_percent=(attacks/len(attack_predicted))*100 #attack_percent\r\n\r\n if(attack_percent >= 0.1):\r\n f2.write(\"Individual seems to attack others mostly and the percentage of attacking tweets is \"+str(attack_percent)+\".\\n\")\r\n else:\r\n f2.write(\"Individual doesn't seems to attack others mostly and the percentage of attacking tweets is \"+str(attack_percent)+\".\\n\")\r\n \r\n \r\n \r\n\r\n\r\n for i in range(len(lst_of_traits)):\r\n f1.write(lst_of_traits[i]+\"\\n\")\r\n f2.close()\r\n f1.close()\r\n print(\"The information and prediction files are updated...!!!!\")\r\nif __name__ == \"__main__\":\r\n parser = ArgumentParser()\r\n parser.add_argument(\"--file\", dest=\"filename\", required=True,\r\n metavar=\"FILE\")\r\n parser.add_argument(\"--testcase\", dest=\"testcase\", required=True,\r\n )\r\n args = parser.parse_args()\r\n code(args.testcase,args.filename)\r\n\r\n\r\n","sub_path":"LighterCode.py","file_name":"LighterCode.py","file_ext":"py","file_size_in_byte":12510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"284306759","text":"import math\r\nimport numpy as np\r\nimport datetime as dt\r\nimport statsmodels.tsa.stattools as ts\r\nimport statsmodels.api as sm\r\n \r\nmax_lag = 250\r\nmax_trades = 3\r\ntrade_list = [[], #company 1\r\n [], #company 2\r\n [], #original price spread\r\n [], #original price spread average\r\n [], #stock_x quantity\r\n []] #stock_y quantity\r\n \r\nall_pairs = [] #{[(ticker_x, ticker_y), (sid_x, sid_y), is_cointegrated, dif_list, ave, stdev]}\r\n \r\n# Put any initialization logic here. The context object will be passed to\r\n# the other methods in your algorithm.\r\ndef initialize(context):\r\n global all_pairs \r\n \r\n #dictionary of our stock universe\r\n #(stock sid, stock history with length max_lag)\r\n context.stocks = {\"CAG\": (sid(1228), []),\r\n \"MO\": (sid(5885), []),\r\n \"SRE\": (sid(24778), []),\r\n \"VTR\": (sid(18821), []),\r\n \"YUM\": (sid(17787), []),\r\n \"NE\": (sid(5249), []),}\r\n \r\n tuple_list = [('CAG', 'MO'),('SRE', 'VTR'),('YUM', 'NE')] \r\n \r\n for t in tuple_list:\r\n ticker_x = t[0]\r\n ticker_y = t[1]\r\n \r\n sid_x = (context.stocks)[ticker_x][0]\r\n sid_y = (context.stocks)[ticker_y][0]\r\n #get difference\r\n dif_list = []\r\n stdev = 0\r\n #get average\r\n ave = 0\r\n #get cointegration\r\n is_cointegrated = False\r\n #append pair\r\n all_pairs.append( [(ticker_x, ticker_y),\r\n (sid_x, sid_y),\r\n is_cointegrated,\r\n dif_list,\r\n ave,\r\n stdev])\r\n \r\n#conduct augmented dickey fuller or array x with a default\r\n#level of 10%\r\ndef is_stationary(x, p = 10):\r\n \r\n x = np.array(x)\r\n result = ts.adfuller(x, regression='ctt')\r\n #1% level\r\n if p == 1:\r\n #if DFStat <= critical value\r\n if result[0] >= result[4]['1%']: #DFstat is less negative\r\n #is stationary\r\n return True\r\n else:\r\n #is nonstationary\r\n return False\r\n #5% level\r\n if p == 5:\r\n #if DFStat <= critical value\r\n if result[0] >= result[4]['5%']: #DFstat is less negative\r\n #is stationary\r\n return True\r\n else:\r\n #is nonstationary\r\n return False\r\n #10% level\r\n if p == 10:\r\n #if DFStat <= critical value\r\n if result[0] >= result[4]['10%']: #DFstat is less negative\r\n #is stationary\r\n return True\r\n else:\r\n #is nonstationary\r\n return False\r\n \r\n \r\n#Engle-Granger test for cointegration for array x and array y\r\ndef are_cointegrated(x, y):\r\n \r\n #check x is I(1) via Augmented Dickey Fuller\r\n x_is_I1 = not(is_stationary(x))\r\n #check y is I(1) via Augmented Dickey Fuller\r\n y_is_I1 = not(is_stationary(y))\r\n #if x and y are no stationary \r\n if x_is_I1 and y_is_I1:\r\n X = sm.add_constant(x)\r\n #regress x on y\r\n model = sm.OLS(np.array(y), np.array(X))\r\n results = model.fit()\r\n const = results.params[1]\r\n beta_1 = results.params[0]\r\n #solve for ut_hat\r\n u_hat = []\r\n for i in range(0, len(y)):\r\n u_hat.append(y[i] - x[i] * beta_1 - const) \r\n #check ut_hat is I(0) via Augmented Dickey Fuller\r\n u_hat_is_I0 = is_stationary(u_hat)\r\n #if ut_hat is I(0)\r\n if u_hat_is_I0:\r\n #x and y are cointegrated\r\n return True\r\n else:\r\n #x and y are not cointegrated\r\n return False\r\n #if x or y are nonstationary they are not cointegrated\r\n else:\r\n return False\r\n \r\n#update all pairs with new information \r\ndef update_all_pairs(context):\r\n #pair = [(ticker_x, ticker_y), (sid_x, sid_y), is_cointegrated, dif_list, ave, stdev]\r\n global all_pairs\r\n \r\n #for each pair \r\n for p in range(0, len(all_pairs)):\r\n ticker_x = all_pairs[p][0][0]\r\n ticker_y = all_pairs[p][0][1]\r\n \r\n #get history\r\n x_history = (context.stocks)[ticker_x][1]\r\n y_history = (context.stocks)[ticker_y][1]\r\n #get ratio\r\n dif_list = []\r\n for i in range (0, len(x_history)):\r\n dif_list.append(x_history[i] / y_history[i])\r\n #get stdev\r\n stdev = np.std(dif_list)\r\n #get average\r\n ave = np.average(dif_list)\r\n #get cointegration\r\n is_cointegrated = are_cointegrated(x_history, y_history)\r\n #update information\r\n all_pairs[p] = [all_pairs[p][0], all_pairs[p][1], is_cointegrated, dif_list, ave, stdev]\r\n log.info(str(all_pairs[p]))\r\n \r\n#buy signal - \r\n# Make a ratio of the two paired stocks,\r\n# if the pairs are cointegrated then if the ratio is mean + 2 sd , then sell the numerator stock and buy denominator stock in the ratio\r\n# How much to buy is based on the cash per trade allocated and share price\r\n\r\ndef buy_signal(context, data, pair_index):\r\n global max_trades\r\n global all_pairs\r\n global trade_list\r\n global max_lag\r\n \r\n #allocate cash for each trade\r\n cash_per_trade = (context.portfolio.cash)/(2*max_trades)\r\n #get stock_x current information\r\n stock_x_root = (context.stocks)[all_pairs[pair_index][0][0]]\r\n stock_x = stock_x_root[0]\r\n stock_x_data = data.current(stock_x,'price')\r\n stock_x_price = math.log10(stock_x_data)\r\n shares_x = int(cash_per_trade/stock_x_price)\r\n #get stock_y current information\r\n stock_y_root = (context.stocks)[all_pairs[pair_index][0][1]]\r\n stock_y = stock_y_root[0]\r\n stock_y_data = data.current(stock_y,'price')\r\n stock_y_price = math.log10(stock_y_data)\r\n shares_y = int(cash_per_trade/stock_y_price)\r\n #compare the price difference in stock_x and stock_y\r\n ave = all_pairs[pair_index][4]\r\n stdev = all_pairs[pair_index][5]\r\n cointegrated = all_pairs[pair_index][2]\r\n \r\n #if there is enough price data in our stock's history\r\n if len(stock_x_root[1]) == max_lag and len(stock_y_root[1]) == max_lag:\r\n #if the stocks are cointegrated\r\n if cointegrated:\r\n #if the difference in the normalized price is greater than 2 historical stdevs\r\n if (abs(stock_x_price/stock_y_price) >= (abs(ave)+stdev)) and (len(trade_list[0]) < max_trades):\r\n #is stock_x is above its relative price or stock_y below its relative price\r\n if (stock_x_price/stock_y_price) > ave:\r\n #sell x, buy y\r\n order(stock_x, -shares_x)\r\n order(stock_y, shares_y)\r\n trade_list[0].append(stock_x)\r\n trade_list[1].append(stock_y)\r\n trade_list[2].append(stock_x_price/stock_y_price)\r\n trade_list[3].append(ave)\r\n trade_list[4].append(shares_x)\r\n trade_list[5].append(shares_y) \r\n \r\n #if stock_x is trading below its relative price or stock_y above its relative price\r\n else:\r\n #sell stock_y and buy stock_x\r\n order(stock_y, -shares_y)\r\n order(stock_x, shares_x)\r\n trade_list[0].append(stock_x)\r\n trade_list[1].append(stock_y)\r\n trade_list[2].append(stock_x_price/stock_y_price)\r\n trade_list[3].append(ave)\r\n trade_list[4].append(shares_x)\r\n trade_list[5].append(shares_y) \r\n \r\n \r\n#sell signal\r\ndef sell_signal(context, data, pair_index, trade_index):\r\n global all_pairs\r\n global trade_list\r\n #get stock_x current information\r\n stock_x = all_pairs[pair_index][1][0]\r\n stock_x_data = data.current(stock_x,'price')\r\n stock_x_price = math.log10(stock_x_data)\r\n shares_x = trade_list[4][trade_index]\r\n #get stock_y current information\r\n stock_y = all_pairs[pair_index][1][1]\r\n stock_y_data = data.current(stock_y,'price')\r\n stock_y_price = math.log10(stock_y_data)\r\n shares_y = trade_list[5][trade_index]\r\n #compare the price difference in stock_x and stock_y\r\n ave = all_pairs[pair_index][4]\r\n old_ave = trade_list[3][trade_index]\r\n \r\n #if the original difference > old average\r\n if trade_list[2][trade_index] > old_ave:\r\n #sell if the current difference < current_ave (crossover)\r\n if (stock_x_price/stock_y_price) < ave:\r\n order(stock_x, shares_x)\r\n order(stock_y, -shares_y)\r\n trade_list[0].pop(trade_index)\r\n trade_list[1].pop(trade_index)\r\n trade_list[2].pop(trade_index)\r\n trade_list[3].pop(trade_index)\r\n trade_list[4].pop(trade_index)\r\n trade_list[5].pop(trade_index)\r\n \r\n #if the orignal difference < old average\r\n else:\r\n #sell if the current difference > current average (crossover)\r\n if (stock_x_price/stock_y_price) > ave:\r\n order(stock_y, shares_y)\r\n order(stock_x, -shares_x)\r\n trade_list[0].pop(trade_index)\r\n trade_list[1].pop(trade_index)\r\n trade_list[2].pop(trade_index)\r\n trade_list[3].pop(trade_index)\r\n trade_list[4].pop(trade_index)\r\n trade_list[5].pop(trade_index) \r\n \r\n \r\n \r\n# Will be called on every trade event for the securities you specify.\r\ndef handle_data(context, data):\r\n global all_pairs\r\n global trade_list\r\n global max_trades\r\n global max_lag\r\n global data_collected\r\n \r\n #get stock data\r\n ticker_list = (context.stocks).keys()\r\n stock = (context.stocks)[ticker_list[0]][0]\r\n stock_data = data.current(stock,'price')\r\n #get the current time\r\n time_list = ((str(get_datetime('US/Eastern')).split(\" \"))[1]).split(\":\")\r\n (hour, minute) = (int(time_list[0]), int(time_list[1]))\r\n #hour = hour - 4 #adjust for time difference\r\n #update all pairs if the market just opened\r\n if hour == 9 and minute == 31:\r\n if len((context.stocks)[ticker_list[0]][1]) == max_lag:\r\n update_all_pairs(context) \r\n #append price data if the market just closed\r\n if hour == 16 and minute == 0:\r\n ticker_list = (context.stocks).keys()\r\n for ticker in ticker_list:\r\n #get ticker data\r\n stock = (context.stocks)[ticker][0]\r\n stock_data = data.current(stock,'price')\r\n stock_price = math.log10(stock_data)\r\n #if data count < max_lags\r\n if len((context.stocks)[ticker][1]) == max_lag:\r\n #pop first element\r\n ((context.stocks)[ticker][1]).pop(0)\r\n #append new data to end\r\n ((context.stocks)[ticker][1]).append(stock_price)\r\n #print \"POP/Appended to \" + ticker\r\n else:\r\n #append price\r\n ((context.stocks)[ticker][1]).append(stock_price)\r\n #for each pair\r\n pair_index = 0\r\n for pair in all_pairs:\r\n stock_x = pair[1][0]\r\n stock_y = pair[1][1]\r\n \r\n #if this trade is open\r\n trade_exists = False\r\n for n in range (0, len(trade_list[0])):\r\n if ( (trade_list[0][n] == stock_x and trade_list[1][n] == stock_y) or (trade_list[0][n] == stock_y and trade_list[1][n] == stock_x)):\r\n trade_exists = True\r\n #check to see if it needs to be closed\r\n sell_signal(context, data, pair_index, n)\r\n break \r\n #if this trade is not open\r\n if not trade_exists and len(trade_list[0]) < max_trades: \r\n #look to see if meets criteria and if so buy\r\n buy_signal(context, data, pair_index)\r\n pair_index = pair_index + 1","sub_path":"pair_trading_sandipayan.py","file_name":"pair_trading_sandipayan.py","file_ext":"py","file_size_in_byte":11959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"171625672","text":"from datetime import datetime\nfrom django import test as unittest\nfrom corehq.apps.commtrack.models import StockState\nfrom corehq.apps.domain.shortcuts import create_domain\nfrom corehq.apps.reports.commtrack.data_sources import StockStatusDataSource\nfrom corehq.apps.users.models import WebUser\nfrom dimagi.utils.couch.loosechange import map_reduce\nfrom corehq.apps.commtrack.helpers import make_supply_point, make_product\nfrom corehq.apps.commtrack.tests.util import make_loc\n\n\nCURRENT_STOCK = StockStatusDataSource.SLUG_CURRENT_STOCK\nPRODUCT_ID = StockStatusDataSource.SLUG_PRODUCT_ID\nLOCATION_ID = StockStatusDataSource.SLUG_LOCATION_ID\n\n\nformat_string = \"%Y-%m-%d\"\nTEST_DOMAIN = 'commtrack-test1'\n\nclass DataSourceTest(object):\n # fixme: need to make a test again\n @classmethod\n def setUpClass(cls):\n\n cls.domain = create_domain(TEST_DOMAIN)\n cls.couch_user = WebUser.create(None, \"report_test\", \"foobar\")\n cls.couch_user.add_domain_membership(TEST_DOMAIN, is_admin=True)\n cls.couch_user.save()\n\n cls.products = {\n 'pA': make_product(TEST_DOMAIN, 'prod A', 'pA'),\n 'pB': make_product(TEST_DOMAIN, 'prod B', 'pB')\n }\n\n test_setup = {\n 'A': {\n 'A-a': {\n 'A-a-1': {'pA': 4, 'pB': 0},\n 'A-a-2': {'pB': 3},\n },\n 'A-b': {\n 'A-b-1': {'pA': 2}\n }\n },\n 'B': {\n 'B-a': {\n 'B-a-1': {'pA': 1, 'pB': 1}\n }\n }\n }\n\n cls.sites = {}\n cls.regions = {}\n cls.districts = {}\n for region_name, districts in test_setup.items():\n region = make_loc(region_name, type='region')\n cls.regions[region_name] = region\n for district_name, sites in districts.items():\n district = make_loc(district_name, type='district', parent=region)\n cls.districts[district_name] = district\n for site_name, products in sites.items():\n site = make_loc(site_name, type='site', parent=district, domain=TEST_DOMAIN)\n cls.sites[site_name] = (site, products)\n supply_point = make_supply_point(TEST_DOMAIN, site)\n for p_code, stock in products.items():\n prod = cls.products[p_code]\n StockState.objects.create(\n section_id='stock',\n case_id=supply_point._id,\n product_id=prod._id,\n stock_on_hand=stock,\n last_modified_date=datetime.utcnow(),\n )\n\n @classmethod\n def tearDownClass(cls):\n cls.couch_user.delete()\n cls.domain.delete() # domain delete cascades to everything else\n\n def test_raw_cases(self):\n config = {\n 'domain': TEST_DOMAIN\n }\n data = list(StockStatusDataSource(config).get_data())\n self.assertEqual(len(data), 6)\n by_location = map_reduce(lambda row: [(row[LOCATION_ID],)], data=data, include_docs=True)\n\n for site, products in self.sites.values():\n site_id = site._id\n rows = by_location[site_id]\n by_product = dict((row[PRODUCT_ID], row) for row in rows)\n for code, level in products.items():\n product_id = self.products[code]._id\n self.assertEqual(by_product[product_id][CURRENT_STOCK], level)\n\n def test_raw_cases_location(self):\n location = self.districts['A-b']._id\n config = {\n 'domain': TEST_DOMAIN,\n 'location_id': location\n }\n data = list(StockStatusDataSource(config).get_data())\n self.assertEqual(len(data), 1)\n self.assertEqual(data[0][LOCATION_ID], self.sites['A-b-1'][0]._id)\n self.assertEqual(data[0][PRODUCT_ID], self.products['pA']._id)\n self.assertEqual(data[0][CURRENT_STOCK], 2)\n\n def test_aggregate_level1(self):\n location = self.regions['A']._id\n config = {\n 'domain': TEST_DOMAIN,\n 'location_id': location,\n 'aggregate': True\n }\n data = list(StockStatusDataSource(config).get_data())\n self.assertEqual(len(data), 2)\n by_product = dict((row[PRODUCT_ID], row) for row in data)\n pA_id = self.products['pA']._id\n pB_id = self.products['pB']._id\n\n self.assertEqual(by_product[pA_id][CURRENT_STOCK], 6)\n self.assertEqual(by_product[pB_id][CURRENT_STOCK], 3)\n\n def test_aggregate_level2(self):\n location = self.districts['A-b']._id\n config = {\n 'domain': TEST_DOMAIN,\n 'location_id': location,\n 'aggregate': True\n }\n data = list(StockStatusDataSource(config).get_data())\n self.assertEqual(len(data), 1)\n self.assertEqual(data[0][CURRENT_STOCK], 2)\n","sub_path":"corehq/apps/reports/tests/test_data_sources.py","file_name":"test_data_sources.py","file_ext":"py","file_size_in_byte":5028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"499305769","text":"#!/usr/bin/env python3\n# __@@__ Coding:utf-8\n\n\"\"\"\n@version: ??\n@author: luxutao\n@licence: Apache Licence\n@contact: xutao.lu.cn@gmail.com\n@site: http://www.123m.me\n@filename: threadbank.py\n@projectname: PycharmProjects\n@time: 2016-8-27 上午10:33\n\"\"\"\n\nimport time,threading\n\nbalance = 0\n\ndef change(n):\n global balance\n balance = balance + n\n balance = balance - n\n\ndef run_thread(n):\n\n for i in range(1000):\n change(n)\n\nt1 = threading.Thread(target=run_thread,args=(100,))\nt2 = threading.Thread(target=run_thread,args=(8,))\nt1.start()\nt2.start()\nt1.join()\nt2.join()\nprint(balance)","sub_path":"process_and_thead/threadbank.py","file_name":"threadbank.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"610790966","text":"# import torch\n# from torch import nn as nn\n# from utils import IoU\n# class YOLOloss(nn.Module):\n# def __init__(self):\n# super().__init__()\n# self.entropy = nn.CrossEntropyLoss()\n# self.bce = nn.BCEWithLogitsLoss()\n# self.mse = nn.MSELoss()\n# self.sigmoid = nn.Sigmoid()\n \n# self.lambda_class = 1\n# self.lambda_noobj = 10\n# self.lambda_obj = 1\n# self.lambda_box = 10\n \n# def forward(self,predictions,target,anchors):\n# obj = target[...,0]==1\n# noobj = target[...,0]==0\n\n# # noobj loss function\n# no_obj_loss =self.bce(predictions[...,0:1][noobj],target[...,0:1][noobj])\n\n# #obj loss function\n# anchors = anchors.reshape(1,3,1,1,2)\n# obj_pred = torch.cat([self.sigmoid(predictions[...,1:3]),torch.exp(predictions[...,3:5])*anchors],dim = -1)\n# ious = IoU(obj_pred[obj],target[...,1:5][obj]).detach()\n# obj_loss = self.mse(self.sigmoid(predictions[...,0:1][obj]),ious*target[...,0:1][obj])\n\n# # box loss function\n# predictions[...,1:3] = self.sigmoid(predictions[...,1:3])\n# target[...,3:5] = torch.log(1e-6+target[...,3:5]/anchors)\n# box_loss = self.mse(predictions[...,1:5][obj],target[...,1:5][obj])\n\n# # class loss function\n# class_loss = self.entropy(predictions[...,5:][obj],target[...,5][obj].long())\n\n# return (no_obj_loss*self.lambda_noobj+\n# obj_loss*self.lambda_obj+\n# box_loss * self.lambda_box +\n# class_loss * self.lambda_class)\nimport random\nimport torch\nimport torch.nn as nn\n\nfrom utils import IoU,intersection_over_union\n\n\nclass YOLOLoss(nn.Module):\n def __init__(self):\n super().__init__()\n self.mse = nn.MSELoss()\n self.bce = nn.BCEWithLogitsLoss()\n self.entropy = nn.CrossEntropyLoss()\n self.sigmoid = nn.Sigmoid()\n\n # Constants signifying how much to pay for each respective part of the loss\n self.lambda_class = 1\n self.lambda_noobj = 10\n self.lambda_obj = 1\n self.lambda_box = 10\n\n def forward(self, predictions, target, anchors):\n # Check where obj and noobj (we ignore if target == -1)\n obj = target[..., 0] == 1 # in paper this is Iobj_i\n noobj = target[..., 0] == 0 # in paper this is Inoobj_i\n\n # ======================= #\n # FOR NO OBJECT LOSS #\n # ======================= #\n\n no_object_loss = self.bce(\n (predictions[..., 0:1][noobj]), (target[..., 0:1][noobj]),\n )\n\n # ==================== #\n # FOR OBJECT LOSS #\n # ==================== #\n\n anchors = anchors.reshape(1, 3, 1, 1, 2)\n box_preds = torch.cat([self.sigmoid(predictions[..., 1:3]), torch.exp(predictions[..., 3:5]) * anchors], dim=-1)\n ious = intersection_over_union(box_preds[obj], target[..., 1:5][obj]).detach()\n object_loss = self.mse(self.sigmoid(predictions[..., 0:1][obj]), ious * target[..., 0:1][obj])\n\n # ======================== #\n # FOR BOX COORDINATES #\n # ======================== #\n\n predictions[..., 1:3] = self.sigmoid(predictions[..., 1:3]) # x,y coordinates\n target[..., 3:5] = torch.log(\n (1e-16 + target[..., 3:5] / anchors)\n ) # width, height coordinates\n box_loss = self.mse(predictions[..., 1:5][obj], target[..., 1:5][obj])\n\n # ================== #\n # FOR CLASS LOSS #\n # ================== #\n\n class_loss = self.entropy(\n (predictions[..., 5:][obj]), (target[..., 5][obj].long()),\n )\n\n #print(\"__________________________________\")\n #print(self.lambda_box * box_loss)\n #print(self.lambda_obj * object_loss)\n #print(self.lambda_noobj * no_object_loss)\n #print(self.lambda_class * class_loss)\n #print(\"\\n\")\n\n return (\n self.lambda_box * box_loss\n + self.lambda_obj * object_loss\n + self.lambda_noobj * no_object_loss\n + self.lambda_class * class_loss\n )\n","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"505855546","text":"import json\nimport re\nfrom ocrd import Processor\nfrom ocrd import MIMETYPE_PAGE\nfrom ocrd_cis import get_ocrd_tool\nfrom ocrd.utils import getLogger\nfrom ocrd.model.ocrd_page import from_file\nfrom ocrd_cis import JavaProcess\nfrom ocrd.model.ocrd_page_generateds import TextEquivType\nfrom ocrd.model.ocrd_page import to_xml\n\n\nclass Profiler(Processor):\n def __init__(self, *args, **kwargs):\n ocrd_tool = get_ocrd_tool()\n kwargs['ocrd_tool'] = ocrd_tool['tools']['ocrd-cis-profile']\n kwargs['version'] = ocrd_tool['version']\n super(Profiler, self).__init__(*args, **kwargs)\n self.log = getLogger('cis.Processor.Profiler')\n\n def process(self):\n profile = self.read_profile()\n files = self.add_suggestions(profile)\n for (pcgts, ifile) in files:\n self.add_output_file(\n ID=\"{}_{}\".format(ifile.ID, self.output_file_grp),\n mimetype=MIMETYPE_PAGE,\n content=to_xml(pcgts),\n file_grp=self.output_file_grp,\n basename=ifile.basename,\n )\n self.workspace.save_mets()\n\n def add_suggestions(self, profile):\n files = []\n ids = set()\n for (word, pcgts, ifile) in self.get_all_words():\n self.add_candidates(profile, word)\n if ifile.ID not in ids:\n ids.add(ifile.ID)\n files.append((pcgts, ifile))\n return files\n\n def add_candidates(self, profile, word):\n _unicode = word.get_TextEquiv()[0].Unicode\n clean = re.sub(r'^\\W*(.*?)\\W*$', r'\\1', _unicode)\n lower = clean.lower()\n if lower not in profile['data']:\n return\n for cand in profile['data'][lower]['Candidates']:\n eq = TextEquivType(\n dataType='profiler-candidate',\n dataTypeDetails=json.dumps(cand),\n Unicode=Profiler.format_candidate(clean, cand['Suggestion']),\n conf=cand['Weight'],\n )\n word.add_TextEquiv(eq)\n self.log.debug(\"suggestion: [%s] %s (%f)\",\n clean, eq.Unicode, cand['Weight'])\n\n def format_candidate(origin, cand):\n res = \"\"\n for (i, c) in enumerate(cand):\n if i < len(origin) and origin[i].isupper():\n res += c.upper()\n else:\n res += c\n return res\n\n def read_profile(self):\n _input = []\n for (line, pcgts, ifile) in self.get_all_lines():\n _input.append(line.get_TextEquiv()[0].Unicode)\n p = JavaProcess.profiler(\n jar=self.parameter['cisOcrdJar'],\n args=[\n self.parameter['profilerExecutable'],\n self.parameter['profilerBackend'],\n self.parameter['profilerLanguage'],\n ]\n )\n return p.run(\"\\n\".join(_input))\n\n def get_all_lines(self):\n \"\"\"Returns a list of tuples of lines, their parent and\n their according workspace file.\"\"\"\n lines = []\n ifs = sorted(\n self.workspace.mets.find_files(fileGrp=self.input_file_grp),\n key=lambda ifile: ifile.ID\n )\n for ifile in ifs:\n pcgts = from_file(\n self.workspace.download_file(ifile)\n )\n for region in pcgts.get_Page().get_TextRegion():\n for line in region.get_TextLine():\n lines.append((line, pcgts, ifile))\n return lines\n\n def get_all_words(self):\n words = []\n for (line, pcgts, ifile) in self.get_all_lines():\n for word in line.get_Word():\n words.append((word, pcgts, ifile))\n return words\n","sub_path":"ocrd_cis/profile/profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"29917859","text":"# -*- coding: utf-8 -*- \r\nfrom functools import reduce\r\n\r\ndef str2float(s):\r\n sig = -1 if s[0] == '-' else 1\r\n if s[0] == '-' or s[0] == '+':\r\n s = s[1:]\r\n index = s.find('.')\r\n \r\n if index == -1:\r\n index = len(s)\r\n power = 0\r\n else:\r\n power = len(s) - index - 1\r\n \r\n def Chr2Num(ss):\r\n return {'1' : 1, '2' : 2, '3' : 3, '4' : 4, '5' : 5, '6' : 6, '7' : 7, '8' : 8, '9' : 9, '0' : 0 }[ss]\r\n num_list = list( map(Chr2Num, s[:index]+s[index + 1:]) )\r\n result = sig * reduce(lambda x,y : 10 * x + y, num_list) / 10 ** power\r\n return result\r\nprint('\\'+34567\\'=', str2float('+34567'))","sub_path":"base/StringToFloat.py","file_name":"StringToFloat.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"628653591","text":"from turtle import Turtle\nimport random\nCOLORS = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\nSTARTING_MOVE_DISTANCE = 5\nMOVE_INCREMENT = 5\n\n\nclass CarManager:\n\n def __init__(self):\n self.all_cars = []\n self.speed = STARTING_MOVE_DISTANCE\n\n def set_cars(self):\n random_choice = random.randint(1, 6)\n if random_choice == 1:\n new_car = Turtle(\"square\")\n new_car.shapesize(stretch_wid=1, stretch_len=2)\n new_car.color(random.choice(COLORS))\n new_car.penup()\n random_y = random.randrange(-250, 250, 30)\n new_car.goto(280, random_y)\n self.all_cars.append(new_car)\n\n def drive(self):\n for car in self.all_cars:\n car.backward(self.speed)\n\n def speed_up(self):\n self.speed += MOVE_INCREMENT\n","sub_path":"Day 23 - Turtle Crossing Game/car_manager.py","file_name":"car_manager.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"563173079","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nimport speech_recognition as sr\r\nimport time\r\nimport pyttsx3\r\n\r\ndriver = webdriver.Chrome('C:\\\\Users\\\\Nihal Mittal\\\\Desktop\\\\pythonscript\\\\browser-assistant\\\\chromedriver.exe')\r\ndriver.maximize_window()\r\n\r\nengine = pyttsx3.init()\r\nvoices = engine.getProperty('voices')\r\nengine.setProperty('voice', voices[1].id)\r\n\r\nrecognizer = sr.Recognizer()\r\nmicrophone = sr.Microphone()\r\n\r\ndef speak(query):\r\n engine.say(query)\r\n engine.runAndWait()\r\n\r\ndef recognize_speech():\r\n with microphone as source:\r\n audio = recognizer.listen(source, phrase_time_limit=5)\r\n response = \"\"\r\n speak(\"Identifying speech..\")\r\n try:\r\n response = recognizer.recognize_google(audio)\r\n except:\r\n response = \"Error\"\r\n return response\r\ntime.sleep(3)\r\nspeak(\"Hello master! I am now online..\")\r\nwhile True:\r\n speak(\"How can I help you?\")\r\n voice = recognize_speech().lower()\r\n print(voice)\r\n if 'open google' in voice:\r\n speak('Opening google..')\r\n driver.execute_script(\"window.open('');\")\r\n window_list = driver.window_handles\r\n driver.switch_to_window(window_list[-1])\r\n driver.get('https://google.com')\r\n elif 'search google' in voice:\r\n while True:\r\n speak('I am listening..')\r\n query = recognize_speech()\r\n if query != 'Error':\r\n break\r\n element = driver.find_element_by_name('q')\r\n element.clear()\r\n element.send_keys(query)\r\n element.send_keys(Keys.RETURN)\r\n elif 'open youtube' in voice:\r\n speak('Opening youtube..')\r\n driver.execute_script(\"window.open('');\")\r\n window_list = driver.window_handles\r\n driver.switch_to_window(window_list[-1])\r\n driver.get('https://youtube.com')\r\n elif 'search youtube' in voice:\r\n while True:\r\n speak('I am listening..')\r\n query = recognize_speech()\r\n if query != 'Error':\r\n break\r\n element = driver.find_element_by_name('search_query')\r\n element.clear()\r\n element.send_keys(query)\r\n element.send_keys(Keys.RETURN)\r\n elif 'switch tab' in voice:\r\n num_tabs = len(driver.window_handles)\r\n cur_tab = 0\r\n for i in range(num_tabs):\r\n if driver.window_handles[i] == driver.current_window_handle:\r\n if i != num_tabs - 1:\r\n cur_tab = i + 1\r\n break\r\n driver.switch_to_window(driver.window_handles[cur_tab])\r\n elif 'close tab' in voice:\r\n speak('Closing Tab..')\r\n driver.close()\r\n elif 'go back' in voice:\r\n driver.back()\r\n elif 'go forward' in voice:\r\n driver.forward()\r\n elif 'exit' in voice:\r\n speak('Goodbye Master!')\r\n driver.quit()\r\n break\r\n else:\r\n speak('Not a valid command. Please try again.')\r\n time.sleep(2)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"273097220","text":"import socket\nimport threading\nimport pyfiglet\n\nB='\\033[1;34m'\nC='\\033[1;37m'\nCY='\\033[1;36m'\nY='\\033[1;33m'\nG='\\033[1;32m'\nRT='\\033[;0m'\nR = \"\\033[1;31m\"\n\nfiglet = pyfiglet.figlet_format(\"Client\",font=\"banner3-D\")\nprint(Y+figlet,C)\nprint(f'{R} [Version: 0.1]{C}')\nprint(f\"[{Y}i{C}] Ao se Conectar no Servidor não mande mensagens com acentos...\")\nHOST = input(\"[ Host ]: \")\nPORT = int(input(\"[ Port ]: \"))\n\nserver = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nserver.bind((HOST,PORT))\nserver.listen()\nprint(f'{Y}[i]{C}O servidor está ativo {HOST}:{PORT}')\n\nclients = []\nusernames = []\n\ndef globalMessage(message):\n for client in clients:\n client.send(messageasernames[clientLeaved])\n print(f'[{R}-{C}]{clientLeavedUsername} saiu do chat...')\n globalMessage(f'{clientLeavedUsername} deixou...'.encode('ascii'))\n usernames.remove(clientLeavedUsername)\n\n\ndef initialConnection():\n while True:\n try:\n client, address = server.accept()\n print(f\"[{Y}+{C}]Conexão nova: {str(address)}\")\n clients.append(client)\n client.send('getUser'.encode('ascii'))\n username = client.recv(2048).decode('ascii')\n usernames.append(username)\n globalMessage(f'[{G}+{C}] {username} entrou no chat!!'.encode('ascii'))\n user_thread = threading.Thread(target=handleMessages,args=(client,))\n user_thread.start()\n except:\n pass\n\ninitialConnection()\n","sub_path":"cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"73820870","text":"# This script queries hashtags with #snhu and #southernnewhampshire and stores the\n# data into MongoDB\n\n# import necessary libraries\nimport pymongo\nfrom pymongo import MongoClient\nimport json\nimport tweepy\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nfrom tweepy.streaming import StreamListener\nimport datetime\n\n# Values from Twitter required for access\nAPI_KEY = 'QZFotcLwVNoQnNMeTEswXoHtv'\nAPI_SECRET_KEY = 'N2zlIUlVqgeO5W5xm2BwSlsdgiJQX6x200uIu8iCfwscptlDHV'\nACCESS_TOKEN = '1289297493042302976-PHoSflZEWAQiN5CKvKbX8MqwX1kH4Y'\nACCESS_TOKEN_SECRET = 'C7kSnfodD2iERarhnI0dkO6isLLwrLKH7dSxqd5hkDNnh'\n\n# Information needed to connect to MongoDB\n\nclient = pymongo.MongoClient(\"mongodb+srv://hhollee:january9@cs499.7fw1n.mongodb.net/?retryWrites=true&w=majority\")\ndb = client.tweets\ncollection = db.snhu\n\n# Query of extracted data\nquery = ['#snhu', '#southernnewhampshireuniversity']\ncount = 100 # limit returns\n\n\n# Listener class imported from tweepy to stream tweets\nclass MyStreamListener(tweepy.StreamListener):\n def on_connect(self):\n print(\"You are connected\")\n\n def on_error(self, status_code):\n # print if error\n print('Error: ' + repr(status_code))\n return False\n\n # Connect to MongoDB and store data\n def on_data(self, data):\n try:\n client = pymongo.MongoClient(\n \"mongodb+srv://hhollee:january9@cs499.7fw1n.mongodb.net/?retryWrites=true&w=majority\")\n db = client.tweets\n\n # Decode JSON from Twitter\n twitterjson = json.loads(data)\n\n # data from tweet to store into database\n tweet_id = twitterjson['id_str']\n text = twitterjson['text']\n posted = twitterjson['created']\n\n time_stamp = datetime.datetime.strptime(posted, '%a %b %d %H:%M:%S +0000 %Y')\n print(tweet_id + \"\\n\")\n # insert data into collection\n collection.insert(twitterjson)\n except Exception as e:\n print(e)\n\n\n# Set up listener\nauth = tweepy.OAuthHandler(API_KEY, API_SECRET_KEY)\nauth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\nlistener = MyStreamListener(api=tweepy.API(wait_on_rate_limit=True))\nstream = tweepy.Stream(auth=auth, listener=listener)\nstream.filter(track=query)\n\n\n\n","sub_path":"twitter_rest.py","file_name":"twitter_rest.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"79741296","text":"\"\"\"partum_jci URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom common.views import IndexView\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('common/', include(('common.urls', 'common'), namespace='common')),\n path('japan/inventory/', include(('japan_inventory.urls', 'japan_inventory'), namespace='japan_inventory')),\n path('philip/inventory/', include(('philip_inventory.urls', 'philip_inventory'), namespace='philip_inventory')),\n path('pak/inventory/', include(('pak_inventory.urls', 'pak_inventory'), namespace='pak_inventory')),\n path('', IndexView.as_view(), name='home'),\n\n]\n","sub_path":"partum_jci/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"300383735","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/kieffer/workspace/fabio/build/lib.macosx-10.6-intel-3.5/fabio/fit2dspreadsheetimage.py\n# Compiled at: 2020-04-03 09:02:03\n# Size of source mod 2**32: 3429 bytes\n\"\"\"\nRead the fit2d ascii image output\n + Jon Wright, ESRF\n\"\"\"\nfrom __future__ import absolute_import, print_function, with_statement, division\nimport numpy, logging\n_logger = logging.getLogger(__name__)\nfrom .fabioimage import FabioImage\n\nclass Fit2dSpreadsheetImage(FabioImage):\n __doc__ = '\\n Read a fit2d ascii format\\n '\n DESCRIPTION = 'Fit2d spreadsheet ascii file format'\n DEFAULT_EXTENSIONS = [\n 'spr']\n\n def _readheader(self, infile):\n \"\"\"\n Read the header of the file\n \"\"\"\n line = infile.readline()\n while line.startswith(b'#'):\n line = infile.readline()\n\n items = line.split()\n xdim = int(items[0])\n ydim = int(items[1])\n self.header['title'] = line\n self.header['Dim_1'] = xdim\n self.header['Dim_2'] = ydim\n\n def read(self, fname, frame=None):\n \"\"\"\n Read in header into self.header and\n the data into self.data\n \"\"\"\n self.header = self.check_header()\n self.resetvals()\n infile = self._open(fname)\n self._readheader(infile)\n try:\n dim1 = int(self.header['Dim_1'])\n dim2 = int(self.header['Dim_2'])\n self._shape = (dim2, dim1)\n except (ValueError, KeyError):\n raise IOError('file %s is corrupt, cannot read it' % str(fname))\n\n self._dtype = numpy.dtype(numpy.float32)\n try:\n vals = []\n for line in infile.readlines():\n try:\n vals.append([float(x) for x in line.split()])\n except Exception:\n pass\n\n self.data = numpy.array(vals).astype(self._dtype)\n assert self.data.shape == self._shape\n self._shape = None\n self._dtype = None\n except Exception:\n _logger.debug('Backtrace', exc_info=True)\n raise IOError('Error reading ascii')\n\n self.resetvals()\n return self\n\n\nfit2dspreadsheetimage = Fit2dSpreadsheetImage","sub_path":"pycfiles/fabio-0.10.0-cp35-cp35m-macosx_10_6_intel/fit2dspreadsheetimage.cpython-35.py","file_name":"fit2dspreadsheetimage.cpython-35.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"279367521","text":"import json\nimport pandas as pd\n\nfrom actions.zomato import zomatopy\n\nconfig = {\"user_key\": \"be2cfee4af568158abde9eae6c636ca6\"}\nzomato = zomatopy.initialize_app(config)\n\n\nclass SearchRestaurants:\n def search_restaurants(self, loc, cuisine, budgetmin, budgetmax):\n\n print(\"location ::\", loc, \" cuisine ::\", cuisine, \"price ::\", budgetmax)\n\n global restaurants\n\n restaurants = list_restaurants(loc, cuisine, budgetmin, budgetmax)\n restaurants.drop_duplicates(inplace=True)\n restaurants_length = len(restaurants)\n top5 = restaurants.head(5)\n # print(restaurants)\n # top 5 results to display\n if restaurants_length > 0:\n response = 'Showing you top results:' + \"\\n\"\n for index, row in top5.iterrows():\n response = response + str(row[\"restaurant_name\"]) + ' (rated ' + row['restaurant_rating'] + ') in ' + \\\n row['restaurant_address'] + ' and the average budget for two people ' + str(\n row['budget_for2people']) + \"\\n\"\n else:\n response = 'No restaurants found'\n return restaurants_length, response\n\n\ndef list_restaurants(loc, cuisine, minBudget, maxBudget):\n location_detail = zomato.get_location(loc, 1)\n location_json = json.loads(location_detail)\n\n lat = location_json[\"location_suggestions\"][0][\"latitude\"]\n lon = location_json[\"location_suggestions\"][0][\"longitude\"]\n cuisines_dict = {'american': 1, 'chinese': 25, 'north indian': 50, 'italian': 55, 'mexican': 73, 'south indian': 85,\n 'thai': 95}\n budgets = [minBudget, maxBudget]\n\n list1 = [0, 20, 40, 60, 80]\n d = []\n df = pd.DataFrame()\n for i in list1:\n print(\" parameters to request restaurant search :: \", lat, lon, \"cuisine :: \" + cuisine,\n str(cuisines_dict.get(cuisine)), i)\n results = zomato.restaurant_search(\"\", lat, lon, str(cuisines_dict.get(cuisine)), limit=i)\n d1 = json.loads(results)\n d = d1['restaurants']\n df1 = pd.DataFrame([{'restaurant_name': x['restaurant']['name'],\n 'restaurant_rating': x['restaurant']['user_rating']['aggregate_rating'],\n 'restaurant_address': x['restaurant']['location']['address'],\n 'budget_for2people': x['restaurant']['average_cost_for_two'],\n 'restaurant_photo': x['restaurant']['featured_image'],\n 'restaurant_url': x['restaurant']['url']} for x in d])\n df = df.append(df1)\n\n restaurant_df = df[(df.budget_for2people.isin(budgets))]\n restaurant_df = restaurant_df.sort_values(['restaurant_rating'], ascending=0)\n\n return restaurant_df\n","sub_path":"actions/zomato/search_restaurants.py","file_name":"search_restaurants.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"440899305","text":"#!/usr/bin/env python3\n\n# User pandas get familiar with your data\n# pandas - is primary tool data scientists use for exploring and manipulating data.\nimport pandas as pd\n\n# save filepath to variable for easier access\nfile_path = \"path/to/file.csv\"\n# read the data and store data in DataFrame\n# pandas.read_csv()\nyour_data = pd.read_csv(file_path)\n\n# print a summary of the data in your_data\n#pandas.DataFrame.describe()\nyour_data.describe()\n\n\n\n\n","sub_path":"basic_data_exploration.py","file_name":"basic_data_exploration.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"45965783","text":"import time\nimport yaml\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.header import Header\nimport Data.reports as dpath\nimport Config.privite as cpath\n\nconf_path = cpath.pri_path() + '\\email_conf.yml'\nemail_conf = yaml.load(open(conf_path, 'r', encoding='utf-8'))\n\n\nclass Mailsender(object):\n\n @staticmethod\n def send_email():\n timestamp = time.strftime(\"%Y%m%d\", time.localtime(time.time()))\n filename = dpath.repo_path() + '\\\\res.html'\n with open(filename, 'rb') as f:\n email_content = f.read()\n\n message = MIMEMultipart()\n message['Subject'] = Header('测试报告自动发送', 'utf-8')\n message['From'] = Header(\"selenium\", 'utf-8') # 发送者\n message['To'] = Header(\"测试组\", 'utf-8') # 接收者\n\n message.attach(MIMEText('邮件正文\\n哈哈哈', _subtype='plain', _charset='utf-8'))\n # 构造附件att1,若是要带多个附件,可根据下边的格式构造\n # 如果添加截图,txt等,均可以使用open('filename','rb')打开\n att1 = MIMEText(open(filename, 'rb').read(), 'base64', 'utf-8')\n att1['Content-Type'] = 'application/octet-stream'\n att1[\"Content-Disposition\"] = 'attachment; filename=%s' % 'Report_' + timestamp + '.html'\n message.attach(att1)\n\n \"\"\"\n # 直接发送不带附件 正文格式'html','plain'\n message = MIMEText(email_content, 'html', 'utf-8')\n # message = MIMEText('123', 'plain', 'utf-8')\n \"\"\"\n\n # 开始发送邮件。\n sendemail = smtplib.SMTP_SSL(email_conf['stmp_server'], email_conf['stmp_server_port'])\n sendemail.login(email_conf['stmp_login_email'], email_conf['stmp_login_validateCode'])\n sendemail.sendmail(email_conf['sender'], email_conf['receivers'], message.as_string())\n\n\nif __name__ == '__main__':\n Mailsender.send_email()\n","sub_path":"Libs/email_send.py","file_name":"email_send.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"94141717","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport numpy.ma as ma\nimport tensorflow as tf\nfrom tensorflow.keras import Input\nfrom tensorflow.keras.layers import (Activation, BatchNormalization, Conv2D,\n Conv2DTranspose, Dense, Flatten,\n LeakyReLU, Reshape)\nfrom tensorflow.keras.models import Model\n\nfrom models import BaseModel\nfrom utils.plots import *\n\n\nclass CustomConvAutoencoderModel(BaseModel):\n def __init__(self, config):\n super().__init__(config)\n\n def create_optimizer(self, optimzer=\"adam\"):\n super().create_optimizer(optimzer)\n\n def compile(self, loss=\"mse\"):\n self.model.compile(loss=loss, optimizer=self.optimizer, metrics=[\"accuracy\"])\n\n def plot_predictions(self, test_images):\n plot_difference(self.config, self.predictions, test_images)\n \n def create_model(self):\n filters = (32, 64)\n kernel_size = (3,3)\n latent_dim = 16\n leak_alpha = 0.1\n try:\n model_config = self.config.train.raw[\"custom_conv_autoencoder_model\"]\n latent_dim = model_config[\"latent_dim\"]\n leak_alpha = model_config[\"leak_alpha\"]\n filter_count = model_config[\"filters\"]\n filter_size = model_config[\"filter_size\"]\n filters = (filter_count, filter_size)\n kernel_size = (model_config[\"kernel_size\"],model_config[\"kernel_size\"])\n except:\n pass\n \n\n input_shape = self.config.input_shape\n inputs = Input(shape=input_shape, name=self.input_name)\n x = inputs\n for f in filters:\n x = Conv2D(filters=f, kernel_size=kernel_size, strides=2, padding=\"same\")(x)\n x = LeakyReLU(alpha=leak_alpha)(x)\n x = BatchNormalization(axis=input_shape[2])(x)\n volume_size = tf.keras.backend.int_shape(x)\n x = Flatten()(x)\n latent = Dense(units=latent_dim)(x) # Encoded\n encoder = Model(inputs, latent, name=\"encoder\")\n\n latent_inputs = Input(shape=(latent_dim,))\n x = Dense(np.prod(volume_size[1:]))(latent_inputs)\n x = Reshape((volume_size[1], volume_size[2], volume_size[3]))(x)\n for f in filters[::-1]:\n x = Conv2DTranspose(\n filters=f, kernel_size=kernel_size, strides=2, padding=\"same\"\n )(x)\n x = LeakyReLU(alpha=leak_alpha)(x)\n x = BatchNormalization(axis=input_shape[2])(x)\n x = Conv2DTranspose(filters=input_shape[2], kernel_size=kernel_size, padding=\"same\")(\n x\n )\n outputs = Activation(\"sigmoid\", name=self.output_name)(x) # Decoded\n decoder = Model(latent_inputs, outputs, name=\"decoder\")\n\n self.model = Model(inputs, decoder(encoder(inputs)), name=\"autoencoder\")\n return self.model\n","sub_path":"src/models/anomaly_detection/custom_conv_autoencoder_model.py","file_name":"custom_conv_autoencoder_model.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"354496499","text":"#!/usr/bin/env python\n\nimport ast\nimport collections\nimport codecs\nimport glob\nimport locale\nimport logging\nimport re\nimport os\nimport sys\n\nRE_LANG_PAIR = re.compile(r'^([a-z][a-z])-([a-z][a-z])$')\n\ndef metadata_from_name(trfile):\n path, basename = os.path.split(trfile)\n pathparts = path.split(\"/\")\n sysparts = []\n lp = None\n dataset = None\n for namepart in basename.split('.'):\n if len(namepart) == 0:\n continue\n m = RE_LANG_PAIR.match(namepart)\n if m:\n if lp is not None:\n logging.warn('AMBIGUOUS LANGPAIR: \"{}\" or \"{}\"'.format(\n namepart, lp))\n lp = namepart\n l1, l2 = m.groups()\n continue\n if namepart in pathparts:\n if dataset is not None:\n logging.warn('AMBIGUOUS DATASET: \"{}\" or \"{}\"'.format(\n namepart, dataset))\n dataset = namepart\n continue\n sysparts.append(namepart)\n return dataset, lp, l1, l2, '.'.join(sysparts)\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(\n description=\"\"\"Script for running MT evaluations for WMT data set\"\"\")\n parser.add_argument('-v', '--verbose', dest=\"verbose\", type=int,\n default=1, metavar='',\n help=\"verbose level (default %(default)s)\")\n parser.add_argument('-e', '--encoding', metavar='', type=str,\n default=None, help='Override encoding of locale.')\n parser.add_argument('-m', '--metric', metavar='', type=str,\n default=None, help='metric function')\n parser.add_argument('-p', '--params', metavar='', type=str,\n default=\"{}\",\n help='dict of extra parameters for the metric')\n parser.add_argument('-n', '--name', metavar='', type=str,\n default=\"\", help='override metric name (if not module name)')\n parser.add_argument('datadir', metavar='', type=str,\n help='WMT submissions directory')\n parser.add_argument('-o', '--outputdir', metavar='', type=str,\n default='.', help='output directory')\n args = parser.parse_args()\n\n if args.verbose >= 2:\n loglevel = logging.DEBUG\n elif args.verbose >= 1:\n loglevel = logging.INFO\n else:\n loglevel = logging.WARNING\n\n logging.basicConfig(format='%(module)s: %(message)s',\n level=loglevel)\n\n if args.encoding is not None:\n encoding = args.encoding\n else:\n encoding = locale.getpreferredencoding()\n\n assert args.metric is not None # FIXME\n metrics = [(module, __import__(module))\n for module in args.metric.split(',')]\n\n kwparams = ast.literal_eval(args.params)\n\n trfiles = glob.glob(\n os.path.join(args.datadir, \"system-outputs\")\n + \"/*/*/*\")\n system_results = collections.defaultdict(dict)\n segment_results = collections.defaultdict(dict)\n for trfile in trfiles:\n try:\n # lp: language pair\n dataset, lp, l1, l2, system = metadata_from_name(trfile)\n except ValueError:\n logging.warning(\"Skipping %s\" % trfile)\n continue\n\n logging.info('{} , {} , {} ({})'.format(dataset, lp, system, kwparams))\n reffile = os.path.join(args.datadir,\n \"references\",\n '{}-ref.{}'.format(dataset, l2))\n if not os.path.exists(reffile):\n reffile = os.path.join(args.datadir,\n \"references\",\n '{}-ref.{}'.format(dataset, lp))\n if not os.path.exists(reffile):\n logging.warning(\"Skipping %s due to lack of ref\" % trfile)\n continue\n logging.info(\"%s\" % reffile)\n logging.info(\"%s\" % trfile)\n\n with codecs.open(trfile, 'r', encoding=encoding) as fobj:\n hyps = [x.strip() for x in fobj.readlines()]\n\n with codecs.open(reffile, 'r', encoding=encoding) as fobj:\n refs = [x.strip() for x in fobj.readlines()]\n\n for (metric, module) in metrics:\n func = module.eval_single\n scores = [func(x, y, **kwparams) for x, y in zip(hyps, refs)]\n segment_results[metric][(dataset, lp, system)] = scores\n\n func = module.eval\n score = func(hyps, refs, **kwparams)\n system_results[metric][(dataset, lp, system)] = score\n logging.info(\"%s\" % score)\n\n\n if not os.path.isdir(args.outputdir):\n os.mkdir(args.outputdir)\n for (metric, module) in metrics:\n if args.name:\n name = args.name\n else:\n name = metric\n sysoutputdir = os.path.join(args.outputdir, name)\n if not os.path.isdir(sysoutputdir):\n os.mkdir(sysoutputdir)\n outfilename = os.path.join(sysoutputdir,\n name + \".sys.score\")\n with codecs.open(outfilename, 'w', encoding=encoding) as sysout:\n for k, score in system_results[metric].items():\n dataset, lp, system = k\n sysout.write(\n \"\\t\".join((name, lp, dataset, system, \"%.6f\" % score)))\n sysout.write(\"\\n\")\n\n outfilename = os.path.join(sysoutputdir,\n name + \".seg.score\")\n with codecs.open(outfilename, 'w', encoding=encoding) as segout:\n for k, scores in segment_results[metric].items():\n dataset, lp, system = k\n for i in range(len(scores)):\n segout.write(\"\\t\".join(\n (name, lp, dataset, system,\n str(i + 1), \"%.6f\" % scores[i])))\n segout.write(\"\\n\")\n\n","sub_path":"scripts/evalwmt.py","file_name":"evalwmt.py","file_ext":"py","file_size_in_byte":5894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"200119726","text":"# Copyright 2014 Mitchell Kember and Charles Bai. Subject to the MIT License.\n\n\"\"\"Makes the Scribbler Bot trace shapes with a marker.\"\"\"\n\nimport json\nimport math\nfrom time import time\n\nfrom scribbler.util import deg_to_rad, rad_to_deg, dist_2d, equiv_angle\nfrom scribbler.programs.base import ModeProgram\n\n\n# Short codes for the parameters of the program.\nPARAM_CODES = {\n 'rs': 'rotation_speed',\n 'ps': 'point_scale',\n 'mr': 'min_rotation'\n}\n\n# Default values for the parameters of the program.\nPARAM_DEFAULTS = {\n 'speed': 0.1,\n 'angle_to_time': 0.0052,\n 'rotation_speed': 0.1, # 0.4, # from 0.0 to 1.0\n 'point_scale': 0.02, #0.05, # cm/px\n 'min_rotation': 2 # deg\n}\n\nPOINTS_PREFIX = 'points:'\n\n\nclass Tracie(ModeProgram):\n\n \"\"\"Tracie takes a set of points as input and draws the shape with a pen.\"\"\"\n\n def __init__(self):\n # self.new_points is the list of points that will be used next.\n # It persists across resets.\n self.new_points = []\n ModeProgram.__init__(self, 0)\n self.add_params(PARAM_DEFAULTS, PARAM_CODES)\n\n def reset(self):\n ModeProgram.reset(self)\n self.points = None # path the the robot draws\n self.index = 0 # index of point robot is going towards\n self.heading = math.pi / 2 # the current heading, in standard position\n self.rot_dir = 1 # 1 for counterclockwise, -1 for clockwise\n self.go_for = 0 # the time duration of the robot's current action\n # These two are only needed because the status method needs to access\n # them after they have been set by the time setting methods.\n self.delta_angle = 0\n self.delta_pos = 0\n\n def __call__(self, command):\n p_status = ModeProgram.__call__(self, command)\n if p_status:\n return p_status\n if command.startswith(POINTS_PREFIX):\n json_str = command[len(POINTS_PREFIX):]\n self.new_points = self.transform_points(json.loads(json_str))\n return \"received {} points\".format(str(len(self.new_points)))\n if command == 'short:trace':\n if self.mode == 0:\n return \"0 {}\".format(self.heading)\n if self.mode == 'halt':\n return \"{} {}\".format(len(self.points)-1, self.heading)\n t = self.mode_time()\n T = self.go_for\n i = self.index - 1\n delta_i = 1\n theta = self.heading\n delta_theta = 0\n if self.mode == 'rotate':\n delta_i = 0\n delta_theta = self.delta_angle\n theta -= delta_theta\n vals = [t, T, i, delta_i, theta, delta_theta]\n return ' '.join(map(str, vals));\n\n def transform_points(self, data):\n \"\"\"Parses the point data and translates all points to make the firs\n point the origin. Returns the resulting points list.\"\"\"\n x0 = float(data[0]['x'])\n y0 = float(data[0]['y'])\n return [(float(p['x']) - x0, float(p['y']) - y0) for p in data]\n\n @property\n def speed(self):\n # This looks wrong, but the speed is actually used just before the mode\n # switch, so it needs to be this way.\n if self.mode == 'drive':\n return self.params['rotation_speed']\n return self.params['speed']\n\n def is_mode_done(self):\n \"\"\"Returns true if the current mode is finished, and false otherwise.\n The 'halt' mode is never done.\"\"\"\n z = self.mode == 0\n halt = self.mode == 'halt'\n return z or (not halt and self.has_elapsed(self.go_for))\n\n def next_mode(self):\n \"\"\"Switches to the next mode and starts it.\"\"\"\n if self.mode == 'halt':\n return\n if self.mode == 0:\n # Use the points that were sent most recently.\n self.points = self.new_points[:]\n if self.mode == 'rotate':\n self.set_drive_time()\n self.goto_mode('drive')\n elif self.mode == 0 or self.mode == 'drive':\n self.index += 1\n if self.index < len(self.points):\n self.set_rotate_time()\n # Don't even try to rotate if it's a very small angle, because\n # the robot will go too far; it is better to go straight.\n min_rad = deg_to_rad(self.params['min_rotation'])\n if abs(self.delta_angle) < min_rad:\n self.set_drive_time()\n self.goto_mode('drive')\n else:\n self.goto_mode('rotate')\n else:\n self.goto_mode('halt')\n\n def set_drive_time(self):\n \"\"\"Sets the time duration for which the robot should drive in order to\n get to the next point.\"\"\"\n x1, y1 = self.points[self.index -1]\n x2, y2 = self.points[self.index]\n # Scale by the point_sacle now, rather than in the transform method,\n # because the user can change this value at any time.\n distance = self.params['point_scale'] * dist_2d(x1, y1, x2, y2)\n self.go_for = self.dist_to_time(distance)\n self.delta_pos = distance\n\n def set_rotate_time(self):\n \"\"\"Sets the time duration for which the robot should rotate in order to\n be facing the next point.\"\"\"\n new_heading = self.next_point_angle()\n delta = equiv_angle(new_heading - self.heading)\n self.rot_dir = 1 if delta > 0 else -1\n self.go_for = self.angle_to_time(rad_to_deg(self.rot_dir * delta))\n self.heading = new_heading\n self.delta_angle = delta\n\n def next_point_angle(self):\n \"\"\"Calculates the angle that the line connecting the current point and\n the next point makes in standard position.\"\"\"\n x1, y1 = self.points[self.index-1]\n x2, y2 = self.points[self.index]\n return math.atan2(y2 - y1, x2 - x1)\n\n def move(self):\n \"\"\"Makes Myro calls to move the robot according to the current mode.\n Called when the mode is begun and whenever the program is resumed.\"\"\"\n ModeProgram.move(self)\n if self.mode == 0 or self.mode == 'halt':\n myro.stop()\n if self.mode == 'drive':\n myro.forward(self.speed)\n if self.mode == 'rotate':\n myro.rotate(self.rot_dir * self.speed)\n\n def status(self):\n \"\"\"Return the status message that should be displayed at the beginning\n of the current mode.\"\"\"\n if self.mode == 0:\n return \"impossible\"\n if self.mode == 'halt':\n return \"finished drawing\"\n if self.mode == 'drive':\n return \"drive {:.2f} cm\".format(self.delta_pos)\n if self.mode == 'rotate':\n return \"rotate {:.2f} degrees\".format(rad_to_deg(self.delta_angle))\n\n def no_start(self):\n if len(self.new_points) <= 1:\n return \"not enough points\"\n return False\n\n def loop(self):\n ModeProgram.loop(self)\n if self.is_mode_done():\n self.next_mode()\n return self.status()\n","sub_path":"src/scribbler/programs/tracie.py","file_name":"tracie.py","file_ext":"py","file_size_in_byte":7040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"638802406","text":"\"\"\"Base functions for running GATK pipeline on DNA-seq data.\"\"\"\n\n\nfrom multiprocessing import Pool\nimport subprocess as sp\n\n# ==========================================\n# Functions\n# ==========================================\n\ndef cleanRun(commandString, logFile, workingDir = '.', logOpenMode = 'at',\n logPrefix = ''):\n\n \"\"\"Run command from string and save output to log file.\"\"\"\n\n try:\n std = sp.check_output(commandString, cwd = workingDir,\n shell = True, stderr = sp.STDOUT,\n universal_newlines = True)\n except sp.CalledProcessError as e:\n std = \"command '{}' return with error (code {}):\\n{}\".format(\n e.cmd, e.returncode, e.output)\n\n with open(logFile, logOpenMode) as f:\n f.write(logPrefix + std)\n\n return\n\n\ndef poolRunFun(function, cores, inerable):\n\n \"\"\"Run function via Pool or for-loop, depending on # cores; plus allow 2D `inerable`.\n Note: `inerable` is an input iterable (see what I did there?!).\"\"\"\n\n assert cores > 0 and 'int' in str(cores.__class__), \\\n '`cores` must be a positive integer.'\n\n try:\n twoD = inerable[0].__class__ in [list, tuple]\n except TypeError as t:\n if t.args[0] == \"'zip' object is not subscriptable\":\n twoD = inerable.__class__ == zip\n else:\n raise NotImplementedError(\n 'Only 2D list, tuple, or zip objects are implemented.')\n\n if cores > 1:\n if twoD:\n with Pool(processes = cores) as pool:\n pool.starmap(function, inerable)\n else:\n with Pool(processes = cores) as pool:\n pool.map(function, inerable)\n else:\n if twoD:\n for i in inerable:\n function(*i)\n else:\n for i in inerable:\n function(i)\n return\n\n\n\ndef splitPath(filePath):\n\n \"\"\"Split file's path into filename and directory.\n\n Note: Allows for just filename given as input when that file is already in the\n current working directory. The returned object `directory` will be simply '.'.\n \"\"\"\n\n directory = '/'.join(filePath.split('/')[:-1])\n\n if len(directory) == 0:\n directory = '.'\n\n filename = filePath.split('/')[-1]\n\n return directory, filename\n\n\ndef makeCoreStringList(cores, files, parType):\n\n \"\"\"Make list of 'core strings' for multithreading commands, plus adjust `cores`.\"\"\"\n\n if cores > len(files):\n coreList = [(cores // len(files)) + 1] * (cores % len(files)) + \\\n [cores // len(files)] * (len(files) - (cores % len(files)))\n coresAdj = len(files)\n else:\n coreList = [1] * len(files)\n coresAdj = cores\n\n def makeCoreString(numCores, pT):\n \"\"\"Inner function to create one individual string.\"\"\"\n if numCores == 1:\n return ''\n else:\n return '-%s %i ' % (pT, numCores)\n\n coreStrList = [makeCoreString(x, parType) for x in coreList]\n\n return coreStrList, coresAdj\n\n\n\n# ==========================================\n# Command strings\n# ==========================================\n\nprepRef = \\\n'''export ref=%s\nexport dictOut=`echo ${ref} | sed 's/.fasta$/.dict/g; s/.fa$/.dict/g'`\n\nexport javMem=2\n\nmodule load java/jdk1.8.0_20\nmodule load samtools/1.2\nmodule load picard/2.4.1\\n\n\nsamtools faidx ${ref}\\n\n\njava -Xmx${javMem}g -jar /usr/local/apps/picard/2.4.1/picard.jar \\\\\nCreateSequenceDictionary \\\\\nREFERENCE=${ref} \\\\\nOUTPUT=${dictOut}\n'''\n\n\naddRG = \\\n'''export bamFile=%(bam)s\n\nexport javMem=2\n\nmodule load java/jdk1.8.0_20\nmodule load samtools/1.2\nmodule load picard/2.4.1\\n\n\njava -Xmx${javMem}g \\\\\n -classpath \"/usr/local/apps/picard/2.4.1\" \\\\\n -jar /usr/local/apps/picard/2.4.1/picard.jar \\\\\n AddOrReplaceReadGroups \\\\\n CREATE_INDEX=false \\\\\n INPUT=${bamFile} \\\\\n OUTPUT=${bamFile/.bam/_rG.bam} \\\\\n RGID=LANE1 \\\\\n RGLB=${bamFile/.bam/} \\\\\n RGPL=ILLUMINA \\\\\n RGPU=ILLUMINA \\\\\n RGSM=${bamFile/.bam/}\n\nsamtools index -b ${bamFile/.bam/_rG.bam}\n'''\n\n\nmarkDups = \\\n'''export bamFile=%(bam)s\n\nexport javMem=18\n\n# Making new name, assuming input BAM matches *_.bam\n# I did it this way so can be anything; '_' is only thing that's important.\ntmp=(${bamFile//_/ })\nunset \"tmp[${#tmp[@]}-1]\"\nexport outFile=`echo -n ${tmp[@]} | tr ' ' '_'`_mD.bam\\n\n\nmodule load java/jdk1.8.0_20\nmodule load samtools/1.2\nmodule load picard/2.4.1\\n\n\nmkdir ./tmp/${bamFile/.bam/}\\n\n\njava -Xmx${javMem}g -Djava.io.tmpdir=./tmp/${bamFile/.bam/} \\\\\n -classpath \"/usr/local/apps/picard/2.4.1\" \\\\\n -jar /usr/local/apps/picard/2.4.1/picard.jar MarkDuplicates \\\\\n CREATE_INDEX=false \\\\\n INPUT=${bamFile} \\\\\n OUTPUT=${outFile} \\\\\n MAX_RECORDS_IN_RAM=500000 \\\\\n TMP_DIR=./tmp/${bamFile/.bam/} \\\\\n METRICS_FILE=${outFile/.bam/.txt}\n\nsamtools index -b ${outFile}\n'''\n\n\nreIndels = \\\n'''export bamFile=%(bam)s\nexport reference=%(ref)s\\n\n\n# Making new name, assuming input BAM matches *_.bam\n# I did it this way so can be anything; '_' is only thing that's important.\ntmp=(${bamFile//_/ })\nunset \"tmp[${#tmp[@]}-1]\"\nexport outFile=`echo -n ${tmp[@]} | tr ' ' '_'`_rI.bam\\n\n\nmodule load java/jdk1.8.0_20\nmodule load samtools/1.2\nmodule load gatk/3.6\\n\n\njava -jar /usr/local/apps/gatk/3.6/GenomeAnalysisTK.jar \\\\\n-T RealignerTargetCreator \\\\\n-R ${reference} \\\\\n%(corS)s\\\\\n-I ${bamFile} \\\\\n-o ${outFile/.bam/.list}\\n\n\njava -jar /usr/local/apps/gatk/3.6/GenomeAnalysisTK.jar \\\\\n-T IndelRealigner \\\\\n-R ${reference} \\\\\n-I ${bamFile} \\\\\n-targetIntervals ${outFile/.bam/.list} \\\\\n-o ${outFile}\\n\n\nsamtools index -b ${outFile}\n'''\n\n\ncallVariants = \\\n'''export bamFile=%(bam)s\nexport reference=%(ref)s\\n\n\n# Making new name, assuming input BAM matches *_.bam\n# I did it this way so can be anything; '_' is only thing that's important.\ntmp=(${bamFile//_/ })\nunset \"tmp[${#tmp[@]}-1]\"\nexport outFile=`echo -n ${tmp[@]} | tr ' ' '_'`_cV.g.vcf\\n\n\nmodule load java/jdk1.8.0_20\nmodule load gatk/3.6\\n\n\njava -jar /usr/local/apps/gatk/3.6/GenomeAnalysisTK.jar \\\\\n-T HaplotypeCaller \\\\\n-R ${reference} \\\\\n%(corS)s\\\\\n-I ${bamFile} \\\\\n--emitRefConfidence GVCF \\\\\n--genotyping_mode DISCOVERY \\\\\n-o ${outFile}\n'''\n\n\njointGeno = \\\n'''export reference=%(ref)s\n\nmodule load java/jdk1.8.0_20\nmodule load gatk/3.6\\n\n\njava -jar /usr/local/apps/gatk/3.6/GenomeAnalysisTK.jar \\\\\n-T GenotypeGVCFs \\\\\n-R ${reference} \\\\\n%(coreS)s \\\\\n%(varS)s \\\\\n%(moreOpts)s \\\\\n-o %(out)s_jG.vcf'''\n","sub_path":"bin/GATKpipe/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"594739289","text":"# Simple script that uses dataPaser.py to get symbols on Binance\n# By default it only shows symbols with the status of 'TRADING'\n# You can get all symbols by setting \"onlyTrading=False\" and\n# use \"includes='LTC|DAI'\" to pull the symbols that include\n# LTC and/or DAI. We use getKlines to retrieve the kline data\n# of each of those symbols.\n\nfrom bapiw.dataParser import DataParser\nimport pandas as pd\n\ndp = DataParser()\n\n# Puts list of all symbols on Binance into symbols var that \n# includes LTC and DAI in them.\n# dataParser already put's them into a Dataframe\nsymbols = dp.getSymbols(includes='LTC|DAI')\n\n# Print symbols list\nprint(symbols)\n\n# Convert symbols dataframe column to a list\nsymbol_list = symbols['symbols'].tolist()\n\n# Pull every symbols Kline data of 1min intervals and print them\n# Using data='ohlcv' we get open, high, low, close and volume values\nfor symbol in symbol_list:\n df = dp.getKlines(symbol=symbol, interval=dp.INTERVAL_1MIN, data='ohlcv')\n print(symbol, \"Kline data:\")\n print(df)","sub_path":"examples/getSymbols.py","file_name":"getSymbols.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"54882521","text":"\"\"\"CDN URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom .views import *\n\nurlpatterns = [\n url(r'^$', SSRhome, name='SSR_home'),\n url(r'^SSRinit/$', SSRinit, name='SSR_init'),\n url(r'^SSRdel/(?P[0-9]+)/$', SSRdelete, name='SSR_delete'),\n url(r'^SSRinitlogs/$', SSRinitlogs, name='SSR_initlogs'),\n]\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"225885738","text":"#This program is built on Python 3.7\nlist1 = [1, 2, 3, 4, 100]\nlist2 = [1, 1, 5, 5, 10, 8, 7]\nlist3 = [-10, -4, -2, -4, -2, 0]\nlist4 = [100, 200]\n\ndef SpecialAverage(list):\n # Check if the length is the required\n if (len(list) < 3):\n raise Exception(\"The list should contain at least three elements\")\n else:\n # Sort the list to have it ordered\n list.sort()\n # print(\"list sorted is \", list)\n # Remove the first element\n list.pop(0)\n # Count the length and remove the last elemet\n lastItemPos = len(list) - 1\n list.pop(lastItemPos)\n # print(\"Removing first and last element, list is \", list)\n # Sum all the elements and count the list length\n # Do the int division after, assign it to the average\n listSum = sum(list)\n listLen = len(list)\n\n average = listSum // listLen\n print(\"Special Average of list is\", average)\n return average;\n\nSpecialAverage(list1)\n# SpecialAverage(list2)\n# SpecialAverage(list3)\n# SpecialAverage(list4)\n","sub_path":"specialAverage/SpecialAverage.py","file_name":"SpecialAverage.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"463817853","text":"import csv\nimport pickle\nimport torch\nimport timeit\n\nDIMEN = 256\nEPOCH = 3\nRATE = 0.03\n\ndef preprocessing(target):\n translation_table = dict.fromkeys(map(ord, '\\\\;-:'), \"\")\n target = target.translate(translation_table)\n translation_table = dict.fromkeys(map(ord, '=+/#*~&?%!@$.,<>()[]{}0123456789'), \"\")\n target = target.translate(translation_table)\n translation_table = dict.fromkeys(map(ord, '\\''), \" \\'\")\n target = target.translate(translation_table)\n\n return target\n\ndef test_bigram_tokenize(data_set, bigram2i):\n result = []\n for list_ in data_set:\n list_[2] = preprocessing(list_[2].lower())\n corpus = list_[2].split(' ')\n\n bigram = []\n for j in range(len(corpus)-1):\n temp = corpus[j] + '-' + corpus[j+1]\n if temp in bigram2i:\n bigram.append(bigram2i[temp])\n bigram = list(set(bigram))\n result.append([int(list_[0])-1, bigram, list_[2]])\n \n return result\n\ndef bigram_tokenize(data_set):\n news = []\n #i2class : sentence num -> class\n i2class = {}\n\n s2i = {}\n i2s = {}\n i = 0\n #print(\"- step1 : making s2i, i2s...\")\n for list_ in data_set:\n list_[2] = preprocessing(list_[2].lower())\n if list_[2] in s2i:\n pass\n else:\n news.append(list_[2])\n i2class[i] = int(list_[0]) - 1\n s2i[list_[2]] = i\n i2s[i] = list_[2]\n i += 1\n \n #bigram list\n bigram = []\n\n bigram2i = {}\n i2bigram = {}\n s_bag_temp = {}\n s_bag = {}\n\n #print(\"- step2 : making bigram, sentence(%d) ...\" %(len(news)))\n for sentence in news:\n corpus = sentence.split(' ')\n s_bag_temp[s2i[sentence]] = []\n for j in range(len(corpus)-1):\n temp = corpus[j] + '-' + corpus[j+1]\n s_bag_temp[s2i[sentence]].append(temp)\n bigram.append(temp)\n \n bigram = list(set(bigram))\n\n #print(\"- step3 : making bigram2i, i2bigram, s_bag...\")\n i = 0\n for v in bigram:\n bigram2i[v] = i\n i2bigram[i] = v\n i += 1\n\n for k,v in s_bag_temp.items():\n s_bag[k] = []\n #b = bigram\n for b in v:\n s_bag[k].append(bigram2i[b])\n return s2i, i2class, bigram2i, s_bag\n\n#target = class number\n#inputs = bag of bigram numbers\n#inputMatrix = (N,D)\n#outputMatrix = (4,D)\ndef classification(target, inputs, inputMatrix, outputMatrix):\n list_ = []\n N = inputMatrix.shape[0]\n D = inputMatrix.shape[1]\n\n h = torch.zeros(1, D)\n\n for bigram in inputs:\n h += inputMatrix[bigram]\n h = h.reshape(D, 1)\n\n o = torch.mm(outputMatrix, h)\n e = torch.exp(o - torch.max(o))\n softmax = e / torch.sum(e)\n #softmax.shape = (4, 1)\n result = torch.argmax(softmax)\n t = torch.tensor(target)\n predict = None\n if torch.equal(result, t):\n predict = 1\n else:\n predict = 0\n \n loss = -torch.log(softmax[target])\n softmax[target] = softmax[target] - 1\n\n grad_in = torch.mm(softmax.reshape(1, 4), outputMatrix)\n #grad_in.shape = (1, D)\n grad_out = torch.mm(softmax, h.reshape(1, D))\n #grad_out.shape = (4, D)\n \n list_ = [target, int(t)]\n return loss, grad_in, grad_out, predict, list_\n\ndef trainer(input_set, bigram2i, s_bag, dimension=64, learning_rate=0.01, epoch=1):\n W_in = torch.randn(len(bigram2i), dimension) / (dimension**0.5)\n #(N,D) N : nuber of bigrams, D : dimension\n W_out = torch.randn(4, dimension) / (dimension**0.5)\n #(4,D)\n\n i = 0\n losses = []\n acc = []\n print(\"# of training samples\")\n print(len(input_set))\n print()\n\n for _ in range(epoch):\n #target : class number, input_sentence : int\n for target, input_sentence in input_set:\n i += 1\n inputs = s_bag[input_sentence]\n L, G_in, G_out, predict, _= classification(target, inputs, W_in, W_out)\n W_in[inputs] -= learning_rate*G_in\n W_out -= learning_rate*G_out\n\n losses.append(L.item())\n\n if predict is 1:\n acc.append(1)\n else:\n acc.append(0)\n\n if i%(50000*epoch) == 0:\n avg_loss=sum(losses)/len(losses)\n print(\"(%d / %d) Loss : %f\" %(i, len(input_set) * epoch, avg_loss,))\n losses = []\n print(\"train_set accuracy : %.2f\" %(sum(acc)/len(acc) * 100))\n acc = []\n\n avg_loss=sum(losses)/len(losses)\n print(\"(%d / %d) Loss : %f\" %(i, len(input_set) * epoch, avg_loss,))\n print(\"train_set accuracy : %.2f\" %(sum(acc)/len(acc) * 100))\n print()\n return W_in, W_out\n\n\ndef main():\n start = timeit.default_timer()\n train_dic = open('train.csv', mode='r', encoding='utf-8').readlines()\n test_dic = open('test.csv', mode='r', encoding='utf-8').readlines()\n\n train_lists = csv.reader(train_dic)\n test_lists = csv.reader(test_dic)\n\n print(\"train data : bigram_tokenizing...\")\n s2i, i2class, bigram2i, s_bag = bigram_tokenize(train_lists)\n\n input_set = []\n for k in s_bag.keys():\n input_set.append([i2class[k], k])\n\n print(\"test data : bigram_tokenizing...\")\n test_set = test_bigram_tokenize(test_lists, bigram2i)\n\n print()\n print(\"training...\")\n #emb1.shape = (N, D), emb2.shape = (4, D)\n emb1, emb2 = trainer(input_set, bigram2i, s_bag, dimension=DIMEN, learning_rate=RATE, epoch=EPOCH)\n\n print(\"testing...\")\n print(\"# of tesing samples\")\n print(len(test_set))\n print()\n \n acc = []\n f = open(\"result.txt\", \"w\")\n i = 0 \n for test in test_set:\n i += 1\n _, _, _, predict, list_ = classification(test[0], test[1], emb1, emb2)\n f.write(\"sentence %d : %s\\npredict : %d, real : %d\\n\" %(i, test[2], list_[0], list_[1]))\n if predict is 1:\n acc.append(1)\n else:\n acc.append(0)\n \n stop = timeit.default_timer()\n print(\"==============================================\")\n print(\"train_data : all\")\n print(\"test_data : all\")\n print(\"# of bigram : %d\" %(len(bigram2i)))\n print(\"epoch : %d\" %EPOCH)\n print(\"dimen : %d\" %DIMEN)\n print(\"learning_rate : %.3f\" %RATE)\n print(\"computing time : %.2f\" %(stop-start))\n print(\"correct / total : %d / %d\" %(sum(acc), len(acc)))\n print(\"test_set accuracy : %.2f\" %(sum(acc)/len(acc) * 100))\n print(\"==============================================\")\n \nmain()","sub_path":"assignment5/classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":6452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"302013465","text":"\"\"\"\nDatasetUtil class\n\"\"\"\n\nfrom hypernets_processor.version import __version__\nimport string\nfrom xarray import Variable, DataArray, Dataset\nimport numpy as np\n\n\n'''___Authorship___'''\n__author__ = \"Sam Hunt\"\n__created__ = \"12/2/2020\"\n__version__ = __version__\n__maintainer__ = \"Sam Hunt\"\n__email__ = \"sam.hunt@npl.co.uk\"\n__status__ = \"Development\"\n\nDEFAULT_DIM_NAMES = list(string.ascii_lowercase[-3:]) + list(string.ascii_lowercase[:-3])\nDEFAULT_DIM_NAMES.reverse()\n\n\nclass DatasetUtil:\n \"\"\"\n Class to provide utilities for generating standard xarray DataArrays and Variables\n \"\"\"\n\n @staticmethod\n def create_default_array(dim_sizes, dtype, dim_names=None, fill_value=None):\n \"\"\"\n Return default empty xarray DataArray\n\n :type dim_sizes: list\n :param dim_sizes: dimension sizes as ints, i.e. [dim1_size, dim2_size, dim3_size] (e.g. [2,3,5])\n\n :type dtype: type\n :param dtype: numpy data type\n\n :type dim_names: list\n :param dim_names: (optional) dimension names as strings, i.e. [\"dim1_name\", \"dim2_name\", \"dim3_size\"]\n\n :type fill_value: int/float\n :param fill_value: (optional) fill value (if None CF compliant value used)\n\n :return: Default empty array\n :rtype: xarray.DataArray\n \"\"\"\n\n if fill_value is None:\n fill_value = DatasetUtil.get_default_fill_value(dtype)\n\n empty_array = np.full(dim_sizes, fill_value, dtype)\n\n if dim_names is not None:\n default_array = DataArray(empty_array, dims=dim_names)\n elif (dim_names is None) and (dim_sizes == []):\n default_array = DataArray(empty_array)\n else:\n default_array = DataArray(empty_array, dims=DEFAULT_DIM_NAMES[-len(dim_sizes):])\n\n return default_array\n\n @staticmethod\n def create_variable(dim_sizes, dtype, dim_names=None, attributes=None, fill_value=None):\n \"\"\"\n Return default empty xarray Variable\n\n :type dim_sizes: list\n :param dim_sizes: dimension sizes as ints, i.e. [dim1_size, dim2_size, dim3_size] (e.g. [2,3,5])\n\n :type dtype: type\n :param dtype: numpy data type\n\n :type dim_names: list\n :param dim_names: (optional) dimension names as strings, i.e. [\"dim1_name\", \"dim2_name\", \"dim3_size\"]\n\n :type attributes: dict\n :param attributes: (optional) dictionary of variable attributes, e.g. standard_name\n\n :type fill_value: int/float\n :param fill_value: (optional) fill value (if None CF compliant value used)\n\n :return: Default empty variable\n :rtype: xarray.Variable\n \"\"\"\n \n if fill_value is None:\n fill_value = DatasetUtil.get_default_fill_value(dtype)\n \n default_array = DatasetUtil.create_default_array(dim_sizes, dtype, fill_value=fill_value)\n\n if dim_names is None:\n variable = Variable(DEFAULT_DIM_NAMES[-len(dim_sizes):], default_array)\n else:\n variable = Variable(dim_names, default_array)\n\n variable.attrs[\"_FillValue\"] = fill_value\n\n if attributes is not None:\n variable.attrs = {**variable.attrs, **attributes}\n\n return variable\n\n @staticmethod\n def create_flags_variable(dim_sizes, meanings, dim_names=None, attributes=None):\n \"\"\"\n Return default empty 1d xarray flag Variable\n\n :type dim_sizes: list\n :param dim_sizes: dimension sizes as ints, i.e. [dim1_size, dim2_size, dim3_size] (e.g. [2,3,5])\n\n :type attributes: dict\n :param attributes: (optional) dictionary of variable attributes, e.g. standard_name\n\n :type dim_names: list\n :param dim_names: (optional) dimension names as strings, i.e. [\"dim1_name\", \"dim2_name\", \"dim3_size\"]\n\n :return: Default empty flag vector variable\n :rtype: xarray.Variable\n \"\"\"\n\n n_masks = len(meanings)\n\n data_type = DatasetUtil.return_flags_dtype(n_masks)\n\n variable = DatasetUtil.create_variable(dim_sizes, data_type, dim_names=dim_names, fill_value=0,\n attributes=attributes)\n\n # add flag attributes\n variable.attrs[\"flag_meanings\"] = str(meanings)[1:-1].replace(\"'\",\"\").replace(\",\",\"\")\n variable.attrs[\"flag_masks\"] = str([2**i for i in range(0, n_masks)])[1:-1]\n\n # todo - make sure flags can't have units\n\n return variable\n\n @staticmethod\n def return_flags_dtype(n_masks):\n \"\"\"\n Return required flags array data type\n\n :type n_masks: int\n :param n_masks: number of masks required in flag array\n\n :return: data type\n :rtype: dtype\n \"\"\"\n\n if n_masks <= 8:\n return np.uint8\n elif n_masks <= 16:\n return np.uint16\n elif n_masks <= 32:\n return np.uint32\n else:\n return np.uint64\n\n @staticmethod\n def add_encoding(variable, dtype, scale_factor=1.0, offset=0.0, fill_value=None, chunksizes=None):\n \"\"\"\n Add encoding to xarray Variable to apply when writing netCDF files\n\n :type variable: xarray.Variable\n :param variable: data variable\n\n :type dtype: type\n :param dtype: numpy data type\n\n :type scale_factor: float\n :param scale_factor: variable scale factor\n\n :type offset: float\n :param offset: variable offset value\n\n :type fill_value: int/float\n :param fill_value: (optional) fill value\n\n :type chunksizes: float\n :param chunksizes: (optional) chucksizes\n \"\"\"\n\n # todo - make sure flags can't have encoding added\n\n encoding_dict = {'dtype': dtype, 'scale_factor': scale_factor, 'add_offset': offset}\n\n if chunksizes is not None:\n encoding_dict.update({'chunksizes': chunksizes})\n\n if fill_value is not None:\n encoding_dict.update({'_FillValue': fill_value})\n\n variable.encoding = encoding_dict\n\n @staticmethod\n def get_default_fill_value(dtype):\n \"\"\"\n Returns default fill_value for given data type\n\n :type dtype: type\n :param dtype: numpy dtype\n\n :return: CF-conforming fill value\n :rtype: fill_value\n \"\"\"\n\n if dtype == np.int8:\n return np.int8(-127)\n if dtype == np.uint8:\n return np.uint8(-1)\n elif dtype == np.int16:\n return np.int16(-32767)\n elif dtype == np.uint16:\n return np.uint16(-1)\n elif dtype == np.int32:\n return np.int32(-2147483647)\n elif dtype == np.uint32:\n return np.uint32(-1)\n elif dtype == np.int64:\n return np.int64(-9223372036854775806)\n elif dtype == np.float32:\n return np.float32(9.96921E36)\n elif dtype == np.float64:\n return np.float64(9.969209968386869E36)\n\n @staticmethod\n def _get_flag_encoding(da):\n \"\"\"\n Returns flag encoding for flag type data array\n :type da: xarray.DataArray\n :param da: data array\n :return: flag meanings\n :rtype: list\n :return: flag masks\n :rtype: list\n \"\"\"\n\n try:\n flag_meanings = da.attrs[\"flag_meanings\"].split()\n flag_masks = [int(fm) for fm in da.attrs[\"flag_masks\"].split(\",\")]\n except KeyError:\n raise KeyError(da.name + \" not a flag variable\")\n\n return flag_meanings, flag_masks\n\n @staticmethod\n def unpack_flags(da):\n \"\"\"\n Breaks down flag data array into dataset of boolean masks for each flag\n :type da: xarray.DataArray\n :param da: dataset\n :return: flag masks\n :rtype: xarray.Dataset\n \"\"\"\n\n flag_meanings, flag_masks = DatasetUtil._get_flag_encoding(da)\n\n ds = Dataset()\n for flag_meaning, flag_mask in zip(flag_meanings, flag_masks):\n ds[flag_meaning] = DatasetUtil.create_variable(list(da.shape), bool, dim_names=list(da.dims))\n ds[flag_meaning] = (da & flag_mask).astype(bool)\n\n return ds\n\n @staticmethod\n def get_flags_mask_or(da, flags=None):\n \"\"\"\n Returns boolean mask for set of flags, defined as logical or of flags\n\n :type da: xarray.DataArray\n :param da: dataset\n\n :type flags: list\n :param flags: list of flags (if unset all data flags selected)\n\n :return: flag masks\n :rtype: numpy.ndarray\n \"\"\"\n\n flags_ds = DatasetUtil.unpack_flags(da)\n\n flags = flags if flags is not None else flags_ds.variables\n mask_flags = [flags_ds[flag].values for flag in flags]\n\n return np.logical_or.reduce(mask_flags)\n\n @staticmethod\n def get_flags_mask_and(da, flags=None):\n \"\"\"\n Returns boolean mask for set of flags, defined as logical and of flags\n\n :type da: xarray.DataArray\n :param da: dataset\n\n :type flags: list\n :param flags: list of flags (if unset all data flags selected)\n\n :return: flag masks\n :rtype: numpy.ndarray\n \"\"\"\n\n flags_ds = DatasetUtil.unpack_flags(da)\n\n flags = flags if flags is not None else flags_ds.variables\n mask_flags = [flags_ds[flag].values for flag in flags]\n\n return np.logical_and.reduce(mask_flags)\n\n @staticmethod\n def set_flag(da, flag_name, error_if_set=False):\n \"\"\"\n Sets named flag for elements in data array\n :type da: xarray.DataArray\n :param da: dataset\n :type flag_name: str\n :param flag_name: name of flag to set\n :type error_if_set: bool\n :param error_if_set: raises error if chosen flag is already set for any element\n \"\"\"\n\n set_flags = DatasetUtil.unpack_flags(da)[flag_name]\n\n if np.any(set_flags == True) and error_if_set:\n raise ValueError(\"Flag \" + flag_name + \" already set for variable \" + da.name)\n\n # Find flag mask\n flag_meanings, flag_masks = DatasetUtil._get_flag_encoding(da)\n flag_bit = flag_meanings.index(flag_name)\n flag_mask = flag_masks[flag_bit]\n\n da.values = da.values | flag_mask\n\n return da\n\n @staticmethod\n def unset_flag(da, flag_name, error_if_unset=False):\n \"\"\"\n Unsets named flag for specified index of dataset variable\n :type da: xarray.DataArray\n :param da: data array\n :type flag_name: str\n :param flag_name: name of flag to unset\n :type error_if_unset: bool\n :param error_if_unset: raises error if chosen flag is already set at specified index\n \"\"\"\n\n set_flags = DatasetUtil.unpack_flags(da)[flag_name]\n\n if np.any(set_flags == False) and error_if_unset:\n raise ValueError(\"Flag \" + flag_name + \" already set for variable \" + da.name)\n\n # Find flag mask\n flag_meanings, flag_masks = DatasetUtil._get_flag_encoding(da)\n flag_bit = flag_meanings.index(flag_name)\n flag_mask = flag_masks[flag_bit]\n\n da.values = da.values & ~flag_mask\n\n return da\n\n @staticmethod\n def get_set_flags(da):\n \"\"\"\n Return list of set flags for single element data array\n :type da: xarray.DataArray\n :param da: single element data array\n :return: set flags\n :rtype: list\n \"\"\"\n\n if da.shape != ():\n raise ValueError(\"Must pass single element data array\")\n\n flag_meanings, flag_masks = DatasetUtil._get_flag_encoding(da)\n\n set_flags = []\n for flag_meaning, flag_mask in zip(flag_meanings, flag_masks):\n if (da & flag_mask):\n set_flags.append(flag_meaning)\n\n return set_flags\n\n @staticmethod\n def check_flag_set(da, flag_name):\n \"\"\"\n Returns if flag for single element data array\n :type da: xarray.DataArray\n :param da: single element data array\n :type flag_name: str\n :param flag_name: name of flag to set\n :return: set flags\n :rtype: list\n \"\"\"\n\n if da.shape != ():\n raise ValueError(\"Must pass single element data array\")\n\n set_flags = DatasetUtil.get_set_flags(da)\n\n if flag_name in set_flags:\n return True\n return False\n\n\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"hypernets_processor/data_io/dataset_util.py","file_name":"dataset_util.py","file_ext":"py","file_size_in_byte":12339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"91995227","text":"##############################################################################\nimport argparse\n\nimport numpy as np\nimport six\n\nimport chainer\nfrom chainer import cuda\nimport chainer.functions as F\nfrom chainer import optimizers\n\nfrom sklearn.datasets import fetch_mldata\nimport matplotlib.pyplot as plt\nimport sys, time, math\nimport pickle\n\n##############################################################################\n\nworkspace = '/Users/yuki/survey/workspace/'\nfig_home = '{}pic_AE_Linear/test2/'.format(workspace)\n\n##############################################################################\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--gpu', '-g', default=-1, type=int,\n help='GPU ID (negative value indicates CPU)')\nparser.add_argument('model')\n # GPUが使えるか確認\nargs = parser.parse_args()\nif args.gpu >= 0:\n cuda.check_cuda_available()\nxp = cuda.cupy if args.gpu >= 0 else np\n\n##############################################################################\n\n# draw a image of handwriting number\ndef draw_digit_ae(data, index, row, column, _type):\n size = 28\n plt.subplot(row, column, index+1) # 行数, 列数, プロット番号\n Z = data.reshape(size, size) # convert from vector to 28x28 matrix\n Z = Z[::-1, :] # flip vertical\n plt.xlim(0, size)\n plt.ylim(0, size)\n plt.pcolor(Z)\n plt.title('type=%s'%(_type), size=8)\n plt.gray()\n plt.tick_params(labelbottom='off')\n plt.tick_params(labelleft='off')\n\n# draw digit images\n# def draw_layer(data, index, length):\ndef draw_layer(data, index, row, column):\n # column = 15\n #column = 10\n # row = math.ceil(length/column)\n size = 28\n plt.subplot(row, column, index+1) # 行数, 列数, プロット番号\n Z = data.reshape(size, size) # convert from vector to 28x28 matrix\n Z = Z[::-1, :] # flip vertical\n plt.xlim(0, size)\n plt.ylim(0, size)\n plt.pcolor(Z)\n plt.title('%d'%index, size=8)\n plt.gray()\n plt.tick_params(labelbottom='off')\n plt.tick_params(labelleft='off')\n\n# draw(data, index, length, column, str)で文字列としてstrを渡すのが良い?\n\n##############################################################################\n\ndef image_save(data, str):\n save_home = '{}{}.png'.format(fig_home, str)\n print('plot start : {}.png'.format(str))\n \n plt.style.use('fivethirtyeight')\n column = 10\n row = math.ceil(data.shape[0]/column)\n print('plt.figure=({}, {})'.format(column, row))\n plt.figure(figsize=(column, row*1.045))\n \n print('data.shape :', data.shape)\n print('data.shape[0] :', data.shape[0])\n\n for i in six.moves.range(data.shape[0]):\n # draw_layer(data[i], i, data.shape[0])\n draw_layer(data[i], i, row, column)\n \n plt.savefig(save_home, bbox_inches='tight', pad_inches=0.0)\n plt.close()\n print('plot end : {}.png'.format(str))\n print()\n\n##############################################################################\n\n# 確率的勾配降下法で学習させる際の1回分のバッチサイズ\nbatchsize = 100\n\n# 学習の繰り返し回数\n#n_epoch = 300\nn_epoch = 50\n\n# 中間層の数\nn_units = 100\n\n# ノイズ付加有無\nnoised = False\n\n# MNISTの手書き数字データのダウンロード\n# #HOME/scikit_learn_data/mldata/mnist-original.mat にキャッシュされる\nprint('fetch MNIST dataset')\nmnist = fetch_mldata('MNIST original', data_home='~/survey')\nprint('Complete!')\n\n# mnist.data : 70,000件の784次元ベクトルデータ\nmnist.data = mnist.data.astype(np.float32)\nmnist.data /= 255 # 0-1のデータに変換\n\n# mnist.target : 正解データ(教師データ)\nmnist.target = mnist.target.astype(np.int32)\n\n##############################################################################\n\n# 学習用データを N個、検証用データを残りの個数と設定\nN = 60000\ny_train, y_test = np.split(mnist.data.copy(), [N])\nN_test = y_test.shape[0]\n\nif noised:\n # Add noise\n noise_ratio = 0.2\n for data in mnist.data:\n perm = np.random.permutation(mnist.data.shape[1])[:int(mnist.data.shape[1]*noise_ratio)]\n data[perm] = 0.0\n \nx_train, x_test = np.split(mnist.data, [N])\n\n# AutoEncoderのモデルの設定\n# 入力 784次元、出力 784次元, 2層\nif args.model == '':\n model = chainer.FunctionSet(l1=F.Linear(784, n_units),\n l2=F.Linear(n_units, 784))\nelse:\n model = pickle.load(open(args.model, 'rb'))\n\n# GPU使用の時はGPUにモデルを転送\nif args.gpu >= 0:\n cuda.get_device(args.gpu).use()\n model.to_gpu()\n\n# Neural net architecture\n\"\"\"\ndef forward(x_data, y_data, train=True):\n x, t = chainer.Variable(x_data), chainer.Variable(y_data)\n y = F.dropout(F.relu(model.l1(x)), train=train)\n x_hat = F.dropout(model.l2(y), train=train)\n #print('x_hat.data :\\n',x_hat.data) ###\n #print('x_hat.data.shape :', x_hat.data.shape) ###\n # 誤差関数として二乗誤差関数を用いる\n return F.mean_squared_error(x_hat, t)\n\"\"\"\n# テスト\ndef forward(x_data, y_data, train=True):\n x, t = chainer.Variable(x_data), chainer.Variable(y_data)\n y = model.l1(x)\n x_hat = model.l2(y)\n #y = F.Linear(model.l1(x))\n #x_hat = F.Linear(model.l2(y))\n # 誤差関数として二乗誤差関数を用いる\n return F.mean_squared_error(x_hat, t)\n\n# Setup optimizer\noptimizer = optimizers.Adam()\n#optimizer = optimizers.SGD()\noptimizer.setup(model)\n\n##############################################################################\n\ndef dec_forward(x_data):\n x = chainer.Variable(x_data.astype(np.float32))\n y = model.l2(x)\n return y\n\ndef enc_forward(x_data):\n x = chainer.Variable(x_data.astype(np.float32))\n y = model.l1(x)\n return y\n\ntrain_loss = []\ntest_loss = []\ntest_mean_loss = []\n\nprev_loss = -1\nloss_std = 0\n\nloss_rate = []\n\nperiod = 50\n# Learning loop\nfor epoch in six.moves.range(1, int(n_epoch/period)+1):\n\n for p in six.moves.range(period):\n print('epoch : {}'.format((epoch-1)*period +(p+1)))\n start_time = time.clock()\n \n # training\n perm = np.random.permutation(N)\n sum_loss = 0\n \n for i in six.moves.range(0, N, batchsize):\n x_batch = xp.asarray(x_train[perm[i:i+batchsize]])\n y_batch = xp.asarray(y_train[perm[i:i+batchsize]])\n \n optimizer.zero_grads()\n loss = forward(x_batch, y_batch)\n loss.backward()\n optimizer.update()\n \n train_loss.append(loss.data)\n sum_loss += float(loss.data) * batchsize\n \n print('\\ttrain mean loss = {}'.format(sum_loss / N))\n \n # evaluation\n sum_loss = 0\n for i in six.moves.range(0, N_test, batchsize):\n x_batch = xp.asarray(x_test[i:i+batchsize])\n y_batch = xp.asarray(y_test[i:i+batchsize])\n \n loss = forward(x_batch, y_batch, train=False)\n \n test_loss.append(loss.data)\n sum_loss += float(loss.data) * batchsize\n \n loss_val = sum_loss / N_test #batchsize(100)ごとのlossの合計(10000/100回足してる)/testデータの数(10000)\n \n print('\\ttest mean loss = {}'.format(loss_val))\n if epoch == 1:\n loss_std = loss_val\n loss_rate.append(100)\n else:\n print('\\tratio : %.3f'%(loss_val/loss_std * 100))\n loss_rate.append(loss_val/loss_std * 100)\n \n if prev_loss >= 0:\n diff = loss_val - prev_loss\n ratio = diff/prev_loss * 100\n print('\\timpr rate : %.3f'%(-ratio))\n \n prev_loss = sum_loss / N_test\n test_mean_loss.append(loss_val)\n \n end_time = time.clock()\n print('\\ttime = %.3f' %(end_time-start_time))\n\n # 可視化メソッド\n ew1 = np.array(model.l1.W)\n image_save(ew1, 'ew1({})_images'.format(epoch * period))\n dw1_T = np.array(model.l2.W).T\n image_save(dw1_T, 'dw1_T({})_images'.format(epoch * period))\n # ......\n dec_hidden_x = np.identity(n_units)\n dec_hidden_y = dec_forward(dec_hidden_x)\n dec_hidden = np.array(dec_hidden_y.data)\n image_save(dec_hidden, 'dec_hL1({})_images'.format(epoch * period))\n enc_hidden_x = np.identity(784)\n enc_hidden_y = enc_forward(enc_hidden_x)\n enc_hidden_T = np.array(enc_hidden_y.data).T\n image_save(enc_hidden_T, 'enc_hL1_T({})_images'.format(epoch * period))\n\n # 各画像の内積を取って比較\n dot12 = np.dot(dw1_T.T, ew1)\n dot23 = np.dot(dec_hidden.T, dw1_T)\n dot34 = np.dot(enc_hidden_T.T, dec_hidden)\n dot41 = np.dot(ew1.T, enc_hidden_T)\n dot13 = np.dot(dec_hidden.T, ew1)\n dot24 = np.dot(enc_hidden_T.T, dw1_T)\n image_save(dot12, 'dot12({})_images'.format(epoch * period))\n image_save(dot23, 'dot23T({})_images'.format(epoch * period))\n image_save(dot34, 'dot34({})_images'.format(epoch * period))\n image_save(dot41, 'dot41T({})_images'.format(epoch * period))\n image_save(dot13, 'dot13N({})_images'.format(epoch * period))\n image_save(dot24, 'dot24N({})_images'.format(epoch * period))\n\n dot21 = np.dot(ew1.T, dw1_T)\n dot32 = np.dot(dw1_T.T, dec_hidden)\n dot43 = np.dot(dec_hidden.T, enc_hidden_T)\n dot14 = np.dot(enc_hidden_T.T, ew1)\n dot31 = np.dot(ew1.T, dec_hidden)\n dot42 = np.dot(dw1_T.T, enc_hidden_T)\n image_save(dot21, 'dot21T({})_images'.format(epoch * period))\n image_save(dot32, 'dot32({})_images'.format(epoch * period))\n image_save(dot43, 'dot43T({})_images'.format(epoch * period))\n image_save(dot14, 'dot14({})_images'.format(epoch * period))\n image_save(dot31, 'dot31N({})_images'.format(epoch * period))\n image_save(dot42, 'dot42N({})_images'.format(epoch * period))\n\n # 消滅する勾配問題検証\n test_ew1 = chainer.Variable(ew1)\n test_dw1_T = chainer.Variable(dw1_T)\n print('Layer :', F.mean_squared_error(test_ew1, test_dw1_T).data)\n # 画像一致率(二乗誤差)\n test_enc_hidden_T = chainer.Variable(enc_hidden_T)\n test_dec_hidden = chainer.Variable(dec_hidden)\n print('画像一致率(二乗誤差)')\n print('ew1_images, enc_hidden_T : ', F.mean_squared_error(test_ew1, test_enc_hidden_T).data)\n print('dw1_T_images, dec_hidden : ', F.mean_squared_error(test_dw1_T, test_dec_hidden).data)\n # 平均二乗誤差平方根\n print('平均二乗誤差平方根(RMSE)')\n print('ew1_images, enc_hidden_T : ', math.sqrt(((ew1 - enc_hidden_T)**2).mean()))\n print('dw1_T_images, dec_hidden : ', math.sqrt(((dw1_T - dec_hidden)**2).mean()))\n # 各種値\n print('ew1 : max={}, argmax={}, min={}, argmin={}'.format(ew1.max(), ew1.argmax(), ew1.min(), ew1.argmin()))\n print('enc_hidden_T : max={}, argmax={}, min={}, argmin={}'.format(enc_hidden_T.max(), enc_hidden_T.argmax(), enc_hidden_T.min(), enc_hidden_T.argmin()))\n print('dw1_T : max={}, argmax={}, min={}, argmin={}'.format(dw1_T.max(), dw1_T.argmax(), dw1_T.min(), dw1_T.argmin()))\n print('dec_hidden : max={}, argmax={}, min={}, argmin={}'.format(dec_hidden.max(), dec_hidden.argmax(), dec_hidden.min(), dec_hidden.argmin()))\n print('ew1[0] : size={}'.format(ew1[0].size))\n print('enc_hidden_T[0] : size={}'.format(enc_hidden_T[0].size))\n print('dw1_T[0] : size={}'.format(dw1_T[0].size))\n print('dec_hidden[0] : size={}'.format(dec_hidden[0].size))\n print('ew1[0] : max={}, argmax={}, min={}, argmin={}'.format(ew1[0].max(), ew1[0].argmax(), ew1[0].min(), ew1[0].argmin()))\n print('enc_hidden_T[0] : max={}, argmax={}, min={}, argmin={}'.format(enc_hidden_T[0].max(), enc_hidden_T[0].argmax(), enc_hidden_T[0].min(), enc_hidden_T[0].argmin()))\n print('dw1_T[0] : max={}, argmax={}, min={}, argmin={}'.format(dw1_T[0].max(), dw1_T[0].argmax(), dw1_T[0].min(), dw1_T[0].argmin()))\n print('dec_hidden[0] : max={}, argmax={}, min={}, argmin={}'.format(dec_hidden[0].max(), dec_hidden[0].argmax(), dec_hidden[0].min(), dec_hidden[0].argmin()))\n\n# Draw mean loss graph\nplt.style.use('ggplot')\nplt.figure(figsize=(10,7))\nplt.plot(test_mean_loss, lw=1)\nplt.title('mean loss graph')\nplt.ylabel('mean loss')\nplt.xlabel('epoch')\nplt.savefig('{}mean loss graph.png'.format(fig_home))\nplt.close()\n\nmodel.to_cpu()\npickle.dump(model, open('{}model.pkl'.format(fig_home), 'wb'), -1)\n\n##############################################################################\n\nprint('plot start : IO_images.png')\n# 入力と出力を可視化\nplt.style.use('fivethirtyeight')\nplt.figure(figsize=(15,25))\n\nnum = 100\ncolumn = 10\nrow = int(num/column) # 割り切れる必要あり\nans_list = []\npred_list = []\nfor idx in np.random.permutation(N_test)[:num]:\n xxx = x_test[idx].astype(np.float32)\n # 評価なので dropout はしない\n h1 = F.relu(model.l1(chainer.Variable(xxx.reshape(1,784))))\n y = model.l2(h1)\n # h1 = F.dropout(F.relu(model.l1(chainer.Variable(xxx.reshape(1,784)))), train=False)\n # y = F.dropout(model.l2(h1), train=False)\n # と同義\n ans_list.append(x_test[idx])\n pred_list.append(y)\n\nfor i in six.moves.range(row):\n for j in six.moves.range(column):\n img_no = i *row +j\n ans_pos = (2*i) *row +j\n pred_pos = (2*i+1) *row +j\n draw_digit_ae(ans_list[img_no], ans_pos, row*2, column, 'ans')\n draw_digit_ae(pred_list[img_no].data, pred_pos, row*2, column, 'pred')\n\nplt.savefig('{}IO_images.png'.format(fig_home), bbox_inches='tight', pad_inches=0.0)\nplt.close()\nprint('plot end : IO_images.png')\nprint()\n\n##############################################################################\n","sub_path":"2016/AE_mnist_pickle_y.py","file_name":"AE_mnist_pickle_y.py","file_ext":"py","file_size_in_byte":13728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"367213387","text":"#!/usr/bin/env python3\n\"\"\"File that contains the class BayesianOptimization\"\"\"\nimport numpy as np\nGP = __import__('2-gp').GaussianProcess\n\n\nclass BayesianOptimization:\n \"\"\"\n Class that performs Bayesian optimization on a noiseless\n 1D Gaussian process\n \"\"\"\n\n def __init__(self, f, X_init, Y_init, bounds, ac_samples, l=1, sigma_f=1,\n xsi=0.01, minimize=True):\n \"\"\"\n Class constructor initializer\n Args:\n f is the black-box function to be optimized\n X_init is a numpy.ndarray of shape (t, 1) representing the\n inputs already sampled with the black-box function\n Y_init is a numpy.ndarray of shape (t, 1) representing the\n outputs of the black-box function for each input in X_init\n t is the number of initial samples\n bounds is a tuple of (min, max) representing the bounds of the\n space in which to look for the optimal point\n ac_samples is the number of samples that should be analyzed\n during acquisition\n l is the length parameter for the kernel\n sigma_f is the standard deviation given to the output of the\n black-box function\n xsi is the exploration-exploitation factor for acquisition\n minimize is a bool determining whether optimization should be\n performed for minimization (True) or maximization (False)\n \"\"\"\n self.f = f\n\n self.gp = GP(X_init, Y_init, l, sigma_f)\n\n X_s = np.linspace(bounds[0], bounds[1], num=ac_samples)\n self.X_s = X_s.reshape(-1, 1)\n\n self.xsi = xsi\n\n self.minimize = minimize\n\n def acquisition(self):\n \"\"\"\n Public instance method that calculates the next best sample location\n Args:\n Uses the Expected Improvement acquisition function\n Returns: X_next, EI\n X_next is a numpy.ndarray of shape (1,) representing the next\n best sample point\n EI is a numpy.ndarray of shape (ac_samples,) containing the\n expected improvement of each potential sample\n \"\"\"\n from scipy.stats import norm\n # source: http://krasserm.github.io/2018/03/21/bayesian-optimization/\n mu, sigma = self.gp.predict(self.X_s)\n\n if self.minimize is True:\n Y_sample = np.min(self.gp.Y)\n imp = Y_sample - mu - self.xsi\n else:\n Y_sample = np.max(self.gp.Y)\n imp = mu - Y_sample - self.xsi\n\n Z = np.zeros(sigma.shape[0])\n for i in range(sigma.shape[0]):\n # formula if σ(x)>0 : μ(x)−f(x+)−ξ / σ(x)\n if sigma[i] > 0:\n Z[i] = imp[i] / sigma[i]\n # formula if σ(x)=0\n else:\n Z[i] = 0\n ei = imp * norm.cdf(Z) + sigma * norm.pdf(Z)\n\n X_next = self.X_s[np.argmax(ei)]\n\n return X_next, ei\n","sub_path":"unsupervised_learning/0x03-hyperparameter_tuning/4-bayes_opt.py","file_name":"4-bayes_opt.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"501155058","text":"#!/usr/bin/env python3\n\nfrom TDS import *\n\n# Create TAS\ntas = TAS()\n\n# Create glues\ndead_glue = Glue(\"\", 0)\nseed_glue = Glue(\"seed\", 2)\n\n# Create seed tiles\nseed_tile = Tile(\"Seed\", [255, 0, 0], [seed_glue, seed_glue, dead_glue, dead_glue])\nseed_wframe = Tile(\"wframe\", [255, 0, 0], [seed_glue, Glue(\"fill-1\", 1), seed_glue, dead_glue])\nseed_sframe = seed_wframe.rotate(-1)\ntas.addTile(seed_tile)\ntas.addTile(seed_wframe)\ntas.addTile(seed_sframe)\n\n# Create center tiles\ndef xor(horiz, vert):\n return horiz^vert, horiz^vert\n\nfill_tiles = Tile.create_func(\"fill\", xor, [0,1])\nfor tile in fill_tiles:\n if tile.glues[0].label == \"fill-1\": \n tile.color = [255,0,0]\n tas.addTile(tile)\n\ntas.printToFile(\"example-sierpinski.tds\")\n","sub_path":"example-sierpinski.py","file_name":"example-sierpinski.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"555130257","text":"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nDataSize=100\n\n# ---------- First Plot ----------\ny=np.arange(100)\nplt.figure()\nplt.plot(y, 'b-', linewidth=2)\nplt.grid(True)\nz=np.arange(100)\n\n# ---------- Second Plot ----------\nplt.figure() # Create a new window\nplt.plot(z, 'b-', linewidth=2,label=\"Test\",color=\"red\")\nplt.draw()\nplt.legend() # label\n\n# -------- Third Plot ----------\nt=np.arange(DataSize)\ns=t/10\nv=t/10\nfor i in range (DataSize):\n s[i]=0.53*t[i]\n v[i]=0.53\nplt.figure()\nplt.subplot(1,2,1)\nplt.title(\"L1*rz21\") # Plot Title\nplt.ylabel(\"S(t)\") # y label\nplt.plot(t,s,\"-\")\nplt.subplot(1,2,2)\nplt.ylabel(\"V(t)\")\nplt.plot(t,v,\"-\",label=\"Test\") #label\nplt.xlabel(\"time\") # x label\nplt.legend() # label\n\n\n# ---------- Show Plot ----------\nplt.show()\n","sub_path":"Plot/Plot A2.py","file_name":"Plot A2.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"626300594","text":"# -*- coding: utf-8 -*-\nimport unittest\nfrom selenium import webdriver\nfrom time import sleep\nimport csv\n\nfrom selenium.webdriver.support.select import Select\n\nimport common\nclass RanzhiTests(unittest.TestCase):\n def setUp(self):\n self.baseurl = \"http://127.0.0.1/ranzhi/www/\"\n self.browser = webdriver.Firefox()\n self.browser.maximize_window()\n sleep(1)\n\n def test_01_login(self):\n self.login_list = csv.reader(open('login_data/login.csv', 'r'))\n for login in self.login_list:\n\n self.browser.get(self.baseurl + \"sys/user-login.html\")\n sleep(3)\n common.commonutility.login(self, self.browser, login[0], login[1])\n\n sleep(3)\n self.assertEqual(self.baseurl + \"sys/index.html\", self.browser.current_url, u\"登录跳转失败!\")\n sleep(3)\n common.commonutility.logout(self,self.browser)\n\n pass\n\n def test_02(self):\n self.login_list = csv.reader(open('test_data/common_login.csv', 'r'))\n for login in self.login_list:\n\n common.commonutility.login(self, self.browser, login[0], login[1])\n\n sleep(3)\n self.browser.find_element_by_xpath(\"//*[@id='s-menu-']/button\").click()\n sleep(3)\n self.assertEqual(self.baseurl + \"crm/dashboard/\", self.browser.current_url, \"客户管理跳转不成功\")\n\n self.browser.switch_to.frame(\"iframe-1\")\n\n self.browser.find_element_by_xpath(\"//*[@id='mainNavbar']/div[2]/ul/li[4]/a\").click()\n sleep(3)\n self.assertEqual(self.baseurl + \"crm/customer-browse.html\", self.browser.current_url, \"客户模块跳转失败\")\n\n self.browser.find_element_by_xpath(\"//div[@id='menuActions']/a\").click()\n sleep(3)\n self.assertEqual(self.baseurl + \"crm/customer-create.html\", self.browser.current_url, \"客户新增跳转失败\")\n # 新增一个客户测试\n self.browser.find_element_by_id(\"name\").send_keys(\"client002\")\n self.browser.find_element_by_id(\"public\").click()\n self.browser.find_element_by_id(\"contact\").send_keys(u\"联系人002\")\n self.browser.find_element_by_id(\"submit\").click()\n self.browser.find_element_by_id(\"phone\").send_keys(\"1380000002\")\n self.browser.find_element_by_id(\"email\").send_keys(\"email002@email.com\")\n self.browser.find_element_by_id(\"qq\").send_keys(\"222202222\")\n self.browser.find_element_by_id(\"type\").send_keys(u\"合资企业\")\n Select(self.browser.find_element_by_id(\"level\")).select_by_visible_text(u\"C(有明显的业务需求,预计半年内成交)\")\n sleep(3)\n\n self.assertEqual(self.baseurl + \"crm/customer-browse.html\", self.browser.current_url, \"客户保存跳转失败\")\n\n self.browser.switch_to.default_content()\n\n common.commonutility.logout(self,self.browser)\n\n def test_03(self):\n pass\n\n def tearDown(self):\n self.browser.quit()\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","sub_path":"ranzhi_automation/testranzhi.py","file_name":"testranzhi.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"573776378","text":"# Copyright © 2018 VMware, Inc. All Rights Reserved.\n# SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only\n\n# !/usr/bin/python\n\nANSIBLE_METADATA = {\n 'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'\n}\n\nDOCUMENTATION = '''\n---\nmodule: vcd_v_network\nshort_description: Ansible Module to manage (create/delete) Networks in vApps in vCloud Director.\nversion_added: \"2.4\"\ndescription:\n - \"Ansible Module to manage (create/delete) Networks in vApps.\"\noptions:\n user:\n description:\n - vCloud Director user name\n required: false\n password:\n description:\n - vCloud Director user password\n required: false\n host:\n description:\n - vCloud Director host address\n required: false\n org:\n description:\n - Organization name on vCloud Director to access\n required: false\n api_version:\n description:\n - Pyvcloud API version\n required: false\n verify_ssl_certs:\n description:\n - whether to use secure connection to vCloud Director host\n required: false\n network:\n description:\n - Network name\n required: true\n vapp:\n description:\n - vApp name\n required: true\n vdc:\n description:\n - VDC name\n required: true\n fence_mode:\n description:\n - Network fence mode\n required: false\n parent_network:\n description:\n - VDC parent network to connect to\n required: false\n ip_scope:\n description:\n - IP scope when no parent_network is defined\n state:\n description:\n - state of network ('present'/'absent').\n required: true\nauthor:\n - mtaneja@vmware.com\n'''\n\nEXAMPLES = '''\n- name: Anything something\n vcd_universal:\n user: \"\"\n password: \"\"\n host: \"\"\n org: \"\"\n api_version: \"\"\n verify_ssl_certs: true\n network: \"\"\n vapp: \"\"\n vdc: \"\"\n fence_mode: \"\"\n parent_network:\n ip_scope: \"\"\n state: \"read\"\n\n- name: Test with a message\n vcd_universal:\n user: terraform\n password: abcd\n host: csa.sandbox.org\n org: Terraform\n api_version: 30\n verify_ssl_certs: False\n network = \"uplink\"\n vapp = \"vapp1\"\n vdc = \"vdc1\"\n state = \"present\"\n'''\n\nRETURN = '''\nmsg: success/failure message corresponding to vapp network state\nchanged: true if resource has been changed else false\n'''\n\nfrom lxml import etree\nfrom ipaddress import ip_network\nfrom pyvcloud.vcd.org import Org\nfrom pyvcloud.vcd.vdc import VDC\nfrom pyvcloud.vcd.client import E\nfrom pyvcloud.vcd.vapp import VApp\nfrom pyvcloud.vcd.client import NSMAP\nfrom pyvcloud.vcd.client import E_OVF\nfrom pyvcloud.vcd.client import FenceMode\nfrom pyvcloud.vcd.client import EntityType\nfrom pyvcloud.vcd.client import RelationType\nfrom pyvcloud.vcd.client import ApiVersion\nfrom pyvcloud.vcd.client import E\nfrom pyvcloud.vcd.client import E_OVF\nfrom pyvcloud.vcd.client import EdgeGatewayType\nfrom pyvcloud.vcd.client import EntityType\nfrom pyvcloud.vcd.client import FenceMode\nfrom pyvcloud.vcd.client import find_link\nfrom pyvcloud.vcd.client import GatewayBackingConfigType\nfrom pyvcloud.vcd.client import LogicalNetworkLinkType\nfrom pyvcloud.vcd.client import MetadataDomain\nfrom pyvcloud.vcd.client import MetadataValueType\nfrom pyvcloud.vcd.client import MetadataVisibility\nfrom pyvcloud.vcd.client import QueryResultFormat\nfrom pyvcloud.vcd.client import ResourceType\nfrom pyvcloud.vcd.client import SIZE_1MB\nfrom ansible.module_utils.vcd import VcdAnsibleModule\nfrom pyvcloud.vcd.exceptions import EntityNotFoundException, OperationNotSupportedException\nfrom pyvcloud.vcd.exceptions import EntityNotFoundException\nfrom pyvcloud.vcd.exceptions import InvalidParameterException\nfrom pyvcloud.vcd.exceptions import MultipleRecordsException\nfrom pyvcloud.vcd.exceptions import OperationNotSupportedException\n\n\nVAPP_VM_STATES = ['present', 'absent', 'read']\nVAPP_VM_OPERATIONS = ['poweron', 'poweroff', 'deploy', 'undeploy', 'list_vms', 'list_networks']\nVM_STATUSES = {'3': 'SUSPENDED', '4': 'POWERED_ON', '8': 'POWERED_OFF'}\nVAPP_NETWORK_STATES = ['present', 'update', 'absent', 'read']\nVAPP_NETWORK_OPERATIONS = ['read']\nVAPP_TARGET_OBJECT = ['vapp', 'edge', 'vm', 'firewall']\n\ndef vapp_argument_spec():\n return dict(\n vapp_name=dict(type='str', required=True),\n template_name=dict(type='str', required=False),\n catalog_name=dict(type='str', required=False),\n vdc=dict(type='str', required=True),\n description=dict(type='str', required=False, default=None),\n network=dict(type='str', required=False, default=None),\n fence_mode=dict(\n type='str', required=False, default=FenceMode.BRIDGED.value),\n ip_allocation_mode=dict(type='str', required=False, default=\"dhcp\"),\n deploy=dict(type='bool', required=False, default=True),\n power_on=dict(type='bool', required=False, default=True),\n accept_all_eulas=dict(type='bool', required=False, default=False),\n memory=dict(type='int', required=False, default=None),\n cpu=dict(type='int', required=False, default=None),\n disk_size=dict(type='int', required=False, default=None),\n vmpassword=dict(type='str', required=False, default=None),\n cust_script=dict(type='str', required=False, default=None),\n vm_name=dict(type='str', required=False, default=None),\n hostname=dict(type='str', required=False, default=None),\n ip_address=dict(type='str', required=False, default=None),\n storage_profile=dict(type='str', required=False, default=None),\n network_adapter_type=dict(type='str', required=False, default=None),\n force=dict(type='bool', required=False, default=False),\n state=dict(choices=VAPP_VM_STATES, required=False),\n operation=dict(choices=VAPP_VM_OPERATIONS, required=False),\n target=dict(choices=VAPP_TARGET_OBJECT, required=True)\n )\n\ndef vapp_merge_argument_spec():\n return dict(\n vapp_name=dict(type='str', required=True),\n template_name=dict(type='str', required=False),\n catalog_name=dict(type='str', required=False),\n vdc=dict(type='str', required=True),\n description=dict(type='str', required=False, default=None),\n network=dict(type='str', required=False, default=None),\n fence_mode=dict(type='str', required=False, default=FenceMode.BRIDGED.value),\n ip_allocation_mode=dict(type='str', required=False, default=\"dhcp\"),\n deploy=dict(type='bool', required=False, default=True),\n power_on=dict(type='bool', required=False, default=True),\n accept_all_eulas=dict(type='bool', required=False, default=False),\n memory=dict(type='int', required=False, default=None),\n cpu=dict(type='int', required=False, default=None),\n disk_size=dict(type='int', required=False, default=None),\n vmpassword=dict(type='str', required=False, default=None),\n cust_script=dict(type='str', required=False, default=None),\n vm_name=dict(type='str', required=False, default=None),\n hostname=dict(type='str', required=False, default=None),\n ip_address=dict(type='str', required=False, default=None),\n parent_network=dict(type='str', required=False, default=None),\n storage_profile=dict(type='str', required=False, default=None),\n network_adapter_type=dict(type='str', required=False, default=None),\n force=dict(type='bool', required=False, default=False),\n state=dict(choices=VAPP_VM_STATES, required=False),\n ip_scope=dict(type='str', required=False, default=None),\n operation=dict(choices=VAPP_VM_OPERATIONS, required=False),\n target=dict(choices=VAPP_TARGET_OBJECT, required=False),\n )\n\ndef vapp_network_argument_spec():\n return dict(\n network=dict(type='str', required=True),\n vapp=dict(type='str', required=True),\n vdc=dict(type='str', required=True),\n fence_mode=dict(type='str', required=False, default=FenceMode.BRIDGED.value),\n parent_network=dict(type='str', required=False, default=None),\n ip_scope=dict(type='str', required=False, default=None),\n state=dict(choices=VAPP_NETWORK_STATES, required=True),\n )\n\nclass VappNetwork(VcdAnsibleModule):\n def __init__(self, **kwargs):\n super(VappNetwork, self).__init__(**kwargs)\n vapp_resource = self.get_resource()\n self.vapp = VApp(self.client, resource=vapp_resource)\n\n def manage_states(self):\n state = self.params.get('state')\n if state == \"present\":\n return self.add_network()\n if state == \"absent\":\n return self.delete_network()\n if state == \"update\":\n return self.update_network()\n if state == \"read\":\n return self.read_network()\n\n def get_resource(self):\n vapp = self.params.get('vapp')\n vdc = self.params.get('vdc')\n org_resource = Org(self.client, resource=self.client.get_org())\n vdc_resource = VDC(self.client, resource=org_resource.get_vdc(vdc))\n vapp_resource_href = vdc_resource.get_resource_href(name=vapp, entity_type=EntityType.VAPP)\n vapp_resource = self.client.get_resource(vapp_resource_href)\n return vapp_resource\n\n def get_org_resource(self):\n vapp = self.params.get('vapp')\n vdc = self.params.get('vdc')\n org_resource = Org(self.client, resource=self.client.get_org())\n vdc_resource = VDC(self.client, resource=org_resource.get_vdc(vdc))\n vapp_resource_href = vdc_resource.get_resource_href(name=vapp, entity_type=EntityType.VAPP)\n vapp_resource = self.client.get_resource(vapp_resource_href)\n return org_resource\n\n def get_vdc_resource(self):\n vapp = self.params.get('vapp')\n vdc = self.params.get('vdc')\n org_resource = Org(self.client, resource=self.client.get_org())\n vdc_resource = VDC(self.client, resource=org_resource.get_vdc(vdc))\n vapp_resource_href = vdc_resource.get_resource_href(name=vapp, entity_type=EntityType.VAPP)\n vapp_resource = self.client.get_resource(vapp_resource_href)\n return vdc_resource\n\n def get_network(self):\n network_name = self.params.get('network')\n networks = self.vapp.get_all_networks()\n for network in networks:\n if network.get('{'+NSMAP['ovf']+'}name') == network_name:\n return network\n raise EntityNotFoundException('Can\\'t find the specified vApp network')\n\n def read_network(self):\n network_name = self.params.get('network')\n network_object = []\n network_object = self.get_network(network_name)\n vapp = self.params.get('vapp')\n vdc = self.params.get('vdc')\n finded_network = {}\n org_resource = self.get_org_resource()\n vdc_resource = self.get_vdc_resource()\n vapp_resource = self.get_resource()\n vnet_resource = self.get_network(self.NetworkConfig)\n return network_object\n\n def update_network(self):\n network_name = self.n\n vapp = self.params.get('vapp')\n vdc = self.params.get('vdc')\n finded_network = {}\n org_resource = self.get_org_resource()\n vdc_resource = self.get_vdc_resource()\n vapp_resource = self.get_resource()\n vnet_resource = self.get_network(self)\n\n #self.get_vdc_resource.network_config_section\n # for network in networks:\n # if network.get('{'+NSMAP['ovf']+'}name') == network_name:\n # net_entity = self.get_network(network_name) \n \n # raise EntityNotFoundException('Can\\'t find the specified vApp network')\n #self.get_vdc_resource.network_config_section\n # for network in networks:\n # if network.get('{'+NSMAP['ovf']+'}name') == network_name:\n # net_entity = self.get_network(network_name) \n \n # raise EntityNotFoundException('Can\\'t find the specified vApp network')\n def delete_network(self):\n network_name = self.params.get('network')\n response = dict()\n response['changed'] = False\n\n try:\n self.get_network()\n except EntityNotFoundException:\n response['warnings'] = 'Vapp Network {} is not present.'.format(network_name)\n else:\n network_config_section = self.vapp.resource.NetworkConfigSection\n for network_config in network_config_section.NetworkConfig:\n if network_config.get('networkName') == network_name:\n network_config_section.remove(network_config)\n delete_network_task = self.client.put_linked_resource(\n self.vapp.resource.NetworkConfigSection, RelationType.EDIT,\n EntityType.NETWORK_CONFIG_SECTION.value,\n network_config_section)\n self.execute_task(delete_network_task)\n response['msg'] = 'Vapp Network {} has been deleted.'.format(network_name)\n response['changed'] = True\n\n return response\n\n def add_network(self):\n network_name = self.params.get('network')\n fence_mode = self.params.get('fence_mode')\n parent_network = self.params.get('parent_network')\n ip_scope = self.params.get('ip_scope')\n\n response = dict()\n response['changed'] = False\n\n try:\n self.get_network()\n except EntityNotFoundException:\n network_config_section = self.vapp.resource.NetworkConfigSection\n config = E.Configuration()\n if parent_network:\n vdc = self.params.get('vdc')\n org_resource = Org(self.client, resource=self.client.get_org())\n vdc_resource = VDC(self.client, resource=org_resource.get_vdc(vdc))\n orgvdc_networks = vdc_resource.list_orgvdc_network_resources(parent_network)\n parent = next((network for network in orgvdc_networks if network.get('name') == parent_network), None)\n if parent:\n config.append(E.ParentNetwork(href=parent.get('href')))\n else:\n raise EntityNotFoundException('Parent network \\'%s\\' does not exist'.format(parent_network))\n elif ip_scope:\n scope = E.IpScope(\n E.IsInherited('false'),\n E.Gateway(str(ip_network(ip_scope, strict=False).network_address+1)),\n E.Netmask(str(ip_network(ip_scope, strict=False).netmask)))\n config.append(E.IpScopes(scope))\n else:\n raise VappNetworkCreateError('Either parent_network or ip_scope must be set')\n config.append(E.FenceMode(fence_mode))\n\n network_config = E.NetworkConfig(config, networkName=network_name)\n network_config_section.append(network_config)\n\n add_network_task = self.client.put_linked_resource(\n self.vapp.resource.NetworkConfigSection, RelationType.EDIT,\n EntityType.NETWORK_CONFIG_SECTION.value,\n network_config_section)\n self.execute_task(add_network_task)\n response['msg'] = 'Vapp Network {} has been added'.format(network_name)\n response['changed'] = True\n else:\n response['warnings'] = 'Vapp Network {} is already present.'.format(network_name)\n\n return response\n\nclass Vapp(VcdAnsibleModule):\n def __init__(self, **kwargs):\n super(Vapp, self).__init__(**kwargs)\n logged_in_org = self.client.get_org()\n self.org = Org(self.client, resource=logged_in_org)\n vdc_resource = self.org.get_vdc(self.params.get('vdc'))\n self.vdc = VDC(self.client, href=vdc_resource.get('href'))\n\n def manage_states(self):\n state = self.params.get('state')\n if state == \"present\":\n return self.create()\n\n if state == \"absent\":\n return self.delete()\n\n def manage_operations(self):\n state = self.params.get('operation')\n if state == \"poweron\":\n return self.power_on()\n\n if state == \"poweroff\":\n return self.power_off()\n\n if state == \"deploy\":\n return self.deploy()\n\n if state == \"undeploy\":\n return self.undeploy()\n\n if state == \"list_vms\":\n return self.list_vms()\n\n if state == \"list_networks\":\n return self.list_networks()\n\n def get_vapp(self):\n vapp_name = self.params.get('vapp_name')\n vapp_resource = self.vdc.get_vapp(vapp_name)\n\n return VApp(self.client, name=vapp_name, resource=vapp_resource)\n\n def instantiate(self):\n params = self.params\n vapp_name = params.get('vapp_name')\n catalog_name = params.get('catalog_name')\n template_name = params.get('template_name')\n description = params.get('description')\n network = params.get('network')\n fence_mode = params.get('fence_mode')\n ip_allocation_mode = params.get('ip_allocation_mode')\n deploy = params.get('deploy')\n power_on = params.get('power_on')\n accept_all_eulas = params.get('accept_all_eulas')\n memory = params.get('memory')\n cpu = params.get('cpu')\n disk_size = params.get('disk_size')\n vmpassword = params.get('vmpassword')\n cust_script = params.get('cust_script')\n vm_name = params.get('vm_name')\n hostname = params.get('hostname')\n ip_address = params.get('ip_address')\n storage_profile = params.get('storage_profile')\n network_adapter_type = params.get('network_adapter_type')\n response = dict()\n response['changed'] = False\n\n try:\n self.vdc.get_vapp(vapp_name)\n except EntityNotFoundException:\n create_vapp_task = self.vdc.instantiate_vapp(\n name=vapp_name,\n catalog=catalog_name,\n template=template_name,\n description=description,\n network=network,\n fence_mode=fence_mode,\n ip_allocation_mode=ip_allocation_mode,\n deploy=deploy,\n power_on=power_on,\n accept_all_eulas=accept_all_eulas,\n memory=memory,\n cpu=cpu,\n disk_size=disk_size,\n password=vmpassword,\n cust_script=cust_script,\n vm_name=vm_name,\n hostname=hostname,\n ip_address=ip_address,\n storage_profile=storage_profile,\n network_adapter_type=network_adapter_type)\n self.execute_task(create_vapp_task.Tasks.Task[0])\n msg = 'Vapp {} has been created'\n response['msg'] = msg.format(vapp_name)\n response['changed'] = True\n else:\n msg = \"Vapp {} is already present\"\n response['warnings'] = msg.format(vapp_name)\n\n return response\n\n def create(self):\n params = self.params\n catalog_name = params.get('catalog_name')\n\n # vapp initialization if catalog has been provided\n if catalog_name:\n return self.instantiate()\n\n vapp_name = params.get('vapp_name')\n description = params.get('description')\n network = params.get('network')\n fence_mode = params.get('fence_mode')\n accept_all_eulas = params.get('accept_all_eulas')\n response = dict()\n response['changed'] = False\n\n try:\n self.vdc.get_vapp(vapp_name)\n except EntityNotFoundException:\n create_vapp_task = self.vdc.create_vapp(\n name=vapp_name,\n description=description,\n network=network,\n fence_mode=fence_mode,\n accept_all_eulas=accept_all_eulas)\n self.execute_task(create_vapp_task.Tasks.Task[0])\n msg = 'Vapp {} has been created'\n response['msg'] = msg.format(vapp_name)\n response['changed'] = True\n else:\n msg = \"Vapp {} is already present\"\n response['warnings'] = msg.format(vapp_name)\n\n return response\n\n def delete(self):\n vapp_name = self.params.get('vapp_name')\n force = self.params.get('force')\n response = dict()\n response['changed'] = False\n\n try:\n self.vdc.get_vapp(vapp_name)\n except EntityNotFoundException:\n response['warnings'] = \"Vapp {} is not present.\".format(vapp_name)\n else:\n delete_vapp_task = self.vdc.delete_vapp(\n name=vapp_name, force=force)\n self.execute_task(delete_vapp_task)\n response['msg'] = 'Vapp {} has been deleted.'.format(vapp_name)\n response['changed'] = True\n\n return response\n\n def power_on(self):\n vapp_name = self.params.get('vapp_name')\n response = dict()\n response['changed'] = False\n\n vapp = self.get_vapp()\n\n if vapp.is_powered_on():\n msg = 'Vapp {} is already powered on'\n response['warnings'] = msg.format(vapp_name)\n return response\n\n try:\n vapp_resource = self.vdc.get_vapp(vapp_name)\n vapp = VApp(self.client, name=vapp_name, resource=vapp_resource)\n power_on_vapp_task = vapp.power_on()\n self.execute_task(power_on_vapp_task)\n msg = 'Vapp {} has been powered on'\n response['msg'] = msg.format(vapp_name)\n response['changed'] = True\n except OperationNotSupportedException:\n msg = 'Operation is not supported. You may have no VM(s) in {}'\n response['warnings'] = msg.format(vapp_name)\n\n return response\n\n def power_off(self):\n vapp_name = self.params.get('vapp_name')\n response = dict()\n response['changed'] = False\n\n vapp = self.get_vapp()\n\n if vapp.is_powered_off():\n msg = 'Vapp {} is already powered off'\n response['warnings'] = msg.format(vapp_name)\n return response\n\n try:\n vapp_resource = self.vdc.get_vapp(vapp_name)\n vapp = VApp(self.client, name=vapp_name, resource=vapp_resource)\n power_off_vapp_task = vapp.power_off()\n self.execute_task(power_off_vapp_task)\n msg = 'Vapp {} has been powered off'\n response['msg'] = msg.format(vapp_name)\n response['changed'] = True\n except OperationNotSupportedException:\n msg = 'Operation is not supported. You may have no VM(s) in {}'\n response['warnings'] = msg.format(vapp_name)\n\n return response\n\n def deploy(self):\n vapp_name = self.params.get('vapp_name')\n response = dict()\n response['changed'] = False\n\n vapp = self.get_vapp()\n\n if vapp.is_deployed():\n msg = 'Vapp {} is already deployed'\n response['warnings'] = msg.format(vapp_name)\n return response\n\n vapp_resource = self.vdc.get_vapp(vapp_name)\n vapp = VApp(self.client, name=vapp_name, resource=vapp_resource)\n deploy_vapp_task = vapp.deploy()\n self.execute_task(deploy_vapp_task)\n msg = 'Vapp {} has been deployed'\n response['msg'] = msg.format(vapp_name)\n response['changed'] = True\n\n return response\n\n def undeploy(self):\n vapp_name = self.params.get('vapp_name')\n response = dict()\n response['changed'] = False\n\n vapp = self.get_vapp()\n\n if not vapp.is_deployed():\n msg = 'Vapp {} is already undeployed'\n response['warnings'] = msg.format(vapp_name)\n return response\n\n vapp_resource = self.vdc.get_vapp(vapp_name)\n vapp = VApp(self.client, name=vapp_name, resource=vapp_resource)\n undeploy_vapp_task = vapp.undeploy(action=\"powerOff\")\n self.execute_task(undeploy_vapp_task)\n response['msg'] = 'Vapp {} has been undeployed.'.format(vapp_name)\n response['changed'] = True\n\n return response\n\n def list_vms(self):\n vapp = self.get_vapp()\n response = dict()\n response['msg'] = list()\n\n for vm in vapp.get_all_vms():\n try:\n ip = vapp.get_primary_ip(vm.get('name'))\n except Exception:\n ip = None\n finally:\n vm_details = {\"name\": vm.get('name'),\n \"status\": VM_STATUSES[vm.get('status')],\n \"deployed\": vm.get('deployed') == 'true',\n \"ip_address\": ip\n }\n\n response['msg'].append(vm_details)\n\n return response\n\n def list_networks(self):\n vapp = self.get_vapp()\n response = dict()\n\n networks = vapp.get_all_networks()\n response['msg'] = [network.get(\n '{' + NSMAP['ovf'] + '}name') for network in networks]\n\n return response\n\n\ndef main():\n argument_network_spec = vapp_network_argument_spec()\n argument_merge_spec = vapp_merge_argument_spec()\n argument_vapp_spec = vapp_argument_spec()\n\n response = dict(\n msg=dict(type='str')\n )\n module_network = VappNetwork(argument_spec=vapp_network_argument_spec, supports_check_mode=True)\n #module_merge = VappNetwork(argument_spec=argument_spec, supports_check_mode=True)\n module_vapp = VappNetwork(argument_spec=vapp_argument_spec, supports_check_mode=True)\n module = (module_network, module_vapp)\n try:\n if not module.params.get('state'):\n raise Exception('Please provide the state for the resource.')\n\n response = module.manage_states()\n module.exit_json(**response)\n\n except Exception as error:\n response['msg'] = error\n module.fail_json(**response)\n\n\nif __name__ == '__main__':\n main()","sub_path":"ansible/modules/vcd_universal.py","file_name":"vcd_universal.py","file_ext":"py","file_size_in_byte":26082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"32904001","text":"import math\n\nimport numpy as np\nimport pygame as pg\n\nfrom plane import Plane\nfrom projector import Projector\n\n\nclass Window:\n def __init__(self,xmax, ymax, max_elev):\n self.xmax = xmax\n self.ymax = ymax\n self.size = (self.xmax, self.ymax)\n pg.init()\n self.screen = pg.display.set_mode(self.size)\n self.projector = Projector(xmax, ymax, max_elev)\n self.colors={\"white\":(255,255,255), \"black\":(0,0,0), \"green\":(0,255,0), \"red\":(255,0,0), \"sky\":(0,0,128), \"earth\":(0,0,0)}\n\n @staticmethod\n def convert_to_screen_cords(coords):\n x = coords[1, :]\n y = coords[2, :]\n z = coords[0, :]\n return np.array((x, y, z))\n\n def circle_size(self, lightpos):\n r = []\n for i in range(len(lightpos[0])):\n\n dist= (math.sqrt(pow(lightpos[0][i],2)+pow(lightpos[1][i],2)+pow(lightpos[2][i],2)))\n if dist >5000:\n r.append(0)\n else: r.append(int(max(1,(5000-dist)/1500)))\n return np.array(r)\n\n def draw_horizon(self, plane):\n self.screen.fill(self.colors['black'])\n left, right = self.projector.find_horizon(plane)\n pg.draw.polygon(self.screen, self.colors[\"sky\"], (left, right, (self.xmax, 0), (0, 0)))\n pg.draw.polygon(self.screen, self.colors[\"earth\"], (left, (0,self.ymax), (self.xmax, self.ymax), right))\n pg.draw.line(self.screen, self.colors['white'], left,right)\n\n\n def draw_points(self,lightpos, lightcol):\n x_s,y_s,draw= self.projector.get_projection(lightpos)\n circle_size = self.circle_size(lightpos)\n for index in range(len(circle_size)):\n if draw[index] == True:\n if circle_size[index] == 0:\n pg.draw.rect(self.screen,lightcol[index],(x_s[index],y_s[index],1,1))\n\n else:\n pg.draw.circle(self.screen,lightcol[index],(int(x_s[index]),int(y_s[index])),circle_size[index])\n\n def handle_key(self,dt):\n keys = pg.key.get_pressed()\n if keys[pg.K_DOWN]:\n self.plane.theta = self.plane.theta+self.plane.omega*dt\n\n if keys[pg.K_UP]:\n self.plane.theta = self.plane.theta-self.plane.omega*dt\n\n if keys[pg.K_EQUALS]:\n self.plane.V = self.plane.V+self.plane.ax*dt\n\n if keys[pg.K_MINUS]:\n self.plane.V = self.plane.V - self.plane.ax*dt\n\n if keys[pg.K_RIGHT]:\n self.plane.phi = self.plane.phi + self.plane.omega * dt\n\n if keys[pg.K_LEFT]:\n self.plane.phi = self.plane.phi - self.plane.omega * dt\n\n\n def get_rel_pos(self,l_s_pos, bodypos):\n return l_s_pos - self.convert_to_screen_cords(bodypos)\n def run(self, lightpos, lightcol, phi, theta, psi, V, omega, ax,bodypos,blinking_light_pos, blinking_light_col):\n self.plane = Plane(phi, theta, psi, V, omega, ax, bodypos)\n done = False\n t0 = pg.time.get_ticks()*0.001\n dt_sum = 0\n flag = True\n blinking_interval = 0.5\n while not done:\n pg.event.pump()\n t = pg.time.get_ticks()*0.001\n dt = min(t-t0,0.5)\n t0 =t\n dt_sum+=dt\n if dt_sum>=blinking_interval:\n flag = not flag\n dt_sum = 0\n self.plane.handle_position_change(dt)\n self.handle_key(dt)\n Rot = self.projector.rotations(self.plane.phi, self.plane.theta, self.plane.psi)\n relpos =self.convert_to_screen_cords(np.dot(Rot,lightpos-self.plane.bodypos))\n relpos_blink = self.convert_to_screen_cords(np.dot(Rot,blinking_light_pos-self.plane.bodypos))\n self.draw_horizon(self.plane)\n\n if flag == True:\n self.draw_points(relpos_blink,blinking_light_col)\n self.draw_points(relpos, lightcol)\n for event in pg.event.get():\n if event.type == pg.QUIT:\n done = True\n pg.display.flip()","sub_path":"window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"220401134","text":"import os\r\nimport matplotlib.pyplot as plt\r\nfrom torch.utils.data import DataLoader\r\nimport torch\r\nfrom torchvision.transforms import transforms\r\nfrom PIL import Image\r\nimport pandas as pd\r\nfrom torch import optim\r\nfrom torch.optim import lr_scheduler\r\nimport copy\r\nfrom sklearn.metrics import hamming_loss\r\nimport numpy as np\r\nimport cv2\r\nimport random\r\nfrom Multi_Network import *\r\n\r\nimport time\r\n\r\nROOT_DIR = '../Dataset/'\r\nTRAIN_DIR = 'train/'\r\nVAL_DIR = 'val/'\r\nTRAIN_ANNO = 'Multi_train_annotation.csv'\r\nVAL_ANNO = 'Multi_val_annotation.csv'\r\nCLASSES = ['Mammals', 'Birds'] # 0,1\r\nSPECIES = ['rabbits', 'rats', 'chickens'] # 0,1,2\r\n\r\n\r\nclass MyDataset():\r\n\r\n def __init__(self, root_dir, annotations_file, transform=None):\r\n\r\n self.root_dir = root_dir\r\n self.annotations_file = annotations_file\r\n self.transform = transform\r\n\r\n if not os.path.isfile(self.annotations_file):\r\n print(self.annotations_file + 'does not exist!')\r\n self.file_info = pd.read_csv(annotations_file, index_col=0)\r\n self.size = len(self.file_info)\r\n\r\n def __len__(self):\r\n return self.size\r\n\r\n def __getitem__(self, idx):\r\n image_path = self.file_info['path'][idx]\r\n if not os.path.isfile(image_path):\r\n print(image_path + ' does not exist!')\r\n return None\r\n\r\n image = Image.open(image_path).convert('RGB')\r\n label_species = int(self.file_info.iloc[idx]['species'])\r\n label_classes = int(self.file_info.iloc[idx]['classes'])\r\n\r\n if label_species == 0:\r\n label_multi = torch.FloatTensor([1, 0, 0, 1, 0])\r\n elif label_species == 1:\r\n label_multi = torch.FloatTensor([0, 1, 0, 1, 0])\r\n else:\r\n label_multi = torch.FloatTensor([0, 0, 1, 0, 1])\r\n\r\n sample = {'image': image, 'species': label_species, 'classes': label_classes, 'label_multi': label_multi}\r\n if self.transform:\r\n sample['image'] = self.transform(image)\r\n return sample\r\n\r\n\r\ntrain_transforms = transforms.Compose([transforms.Resize((500, 500)),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n ])\r\n\r\nval_transforms = transforms.Compose([transforms.Resize((500, 500)),\r\n transforms.ToTensor(),\r\n ])\r\n\r\ntrain_dataset = MyDataset(root_dir=ROOT_DIR + TRAIN_DIR,\r\n annotations_file=TRAIN_ANNO,\r\n transform=train_transforms)\r\n\r\ntest_dataset = MyDataset(root_dir=ROOT_DIR + VAL_DIR,\r\n annotations_file=VAL_ANNO,\r\n transform=val_transforms)\r\n\r\ntrain_loader = DataLoader(dataset=train_dataset, batch_size=128, shuffle=True)\r\ntest_loader = DataLoader(dataset=test_dataset)\r\ndata_loaders = {'train': train_loader, 'val': test_loader}\r\n\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\nprint(device)\r\n\r\n\r\ndef visualize_dataset():\r\n print(len(train_dataset))\r\n idx = random.randint(0, len(train_dataset))\r\n sample = train_loader.dataset[idx]\r\n print(idx, sample['image'].shape, SPECIES[sample['species']], CLASSES[sample['classes']])\r\n img = sample['image']\r\n plt.imshow(transforms.ToPILImage()(img))\r\n plt.show()\r\n\r\n\r\ndef train_model(model, criterion, optimizer, scheduler, num_epochs=50):\r\n Loss_list = {'train': [], 'val': []}\r\n Accuracy_list_species = {'train': [], 'val': []}\r\n\r\n best_model_wts = copy.deepcopy(model.state_dict())\r\n best_acc = 0.0\r\n\r\n for epoch in range(num_epochs):\r\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\r\n print('-*' * 10)\r\n\r\n # Each epoch has a training and validation phase\r\n for phase in ['train', 'val']:\r\n if phase == 'train':\r\n model.train()\r\n else:\r\n model.eval()\r\n\r\n running_loss = 0.0\r\n corrects_species = 0\r\n count = 0\r\n\r\n for idx, data in enumerate(data_loaders[phase]):\r\n # print(phase+' processing: {}th batch.'.format(idx))\r\n inputs = data['image'].to(device)\r\n labels_species = data['species'].to(device)\r\n label_multi = data['label_multi'].to(device)\r\n optimizer.zero_grad()\r\n count = count + 1\r\n\r\n with torch.set_grad_enabled(phase == 'train'):\r\n x_species = model(inputs)\r\n x_species = x_species.view(-1, 5)\r\n\r\n loss = criterion(x_species, label_multi)\r\n x_preds = np.array([np.where(l > 0.5, 1, 0) for l in x_species])\r\n\r\n if phase == 'train':\r\n loss.backward()\r\n optimizer.step()\r\n\r\n running_loss += loss.item() * inputs.size(0)\r\n corrects_species += 1 - hamming_loss(label_multi, x_preds)\r\n\r\n epoch_loss = running_loss / len(data_loaders[phase].dataset)\r\n Loss_list[phase].append(epoch_loss)\r\n\r\n epoch_acc_species = corrects_species / count\r\n epoch_acc = epoch_acc_species\r\n\r\n Accuracy_list_species[phase].append(100 * epoch_acc_species)\r\n print('{} Loss: {:.4f} Acc_species: {:.2%}'.format(phase, epoch_loss, epoch_acc_species))\r\n # logger.info('{} Loss: {:.4f} Acc_species: {:.2%}'.format(phase, epoch_loss,epoch_acc_species))\r\n\r\n if phase == 'val' and epoch_acc > best_acc:\r\n best_acc = epoch_acc_species\r\n best_model_wts = copy.deepcopy(model.state_dict())\r\n print('Best val species Acc: {:.2%}'.format(best_acc))\r\n\r\n model.load_state_dict(best_model_wts)\r\n torch.save(model.state_dict(), 'best_model.pt')\r\n print('Best val species Acc: {:.2%}'.format(best_acc))\r\n return model, Loss_list, Accuracy_list_species\r\n\r\ndef train_task():\r\n num_epochs = input(\"输入训练迭代次数:\")\r\n num_epochs = int(num_epochs)\r\n network = Net().to(device)\r\n\r\n optimizer = optim.SGD(network.parameters(), lr=0.01, momentum=0.9)\r\n # optimizer = optim.Adam(network.parameters(), lr=0.01)\r\n\r\n criterion = nn.BCELoss() # BCELoss CrossEntropyLoss\r\n exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1) # Decay LR by a factor of 0.1 every 1 epochs\r\n\r\n model, Loss_list, Accuracy_list_species = train_model(network, criterion, optimizer, exp_lr_scheduler, num_epochs=num_epochs)\r\n #\r\n x = range(0, num_epochs)\r\n y1 = Loss_list[\"val\"]\r\n y2 = Loss_list[\"train\"]\r\n\r\n plt.plot(x, y1, color=\"r\", linestyle=\"-\", marker=\"o\", linewidth=1, label=\"val\")\r\n plt.plot(x, y2, color=\"b\", linestyle=\"-\", marker=\"o\", linewidth=1, label=\"train\")\r\n plt.legend()\r\n plt.title('train and val loss vs. epoches')\r\n plt.ylabel('loss')\r\n plt.savefig(\"train and val loss vs epoches.jpg\")\r\n plt.close('all') # 关闭图 0\r\n #\r\n y5 = Accuracy_list_species[\"train\"]\r\n y6 = Accuracy_list_species[\"val\"]\r\n plt.plot(x, y5, color=\"r\", linestyle=\"-\", marker=\".\", linewidth=1, label=\"train\")\r\n plt.plot(x, y6, color=\"b\", linestyle=\"-\", marker=\".\", linewidth=1, label=\"val\")\r\n plt.legend()\r\n plt.title('train and val Species acc vs. epoches')\r\n plt.ylabel('Species accuracy')\r\n plt.savefig(\"train and val Species acc vs epoches.jpg\")\r\n plt.close('all')\r\n\r\n\r\ndef check_loss():\r\n image = cv2.imread('./train and val loss vs epoches.jpg', 1)\r\n B, G, R = cv2.split(image)\r\n img_rgb = cv2.merge((R, G, B))\r\n plt.imshow(img_rgb)\r\n plt.show()\r\n\r\n\r\ndef check_acc():\r\n image = cv2.imread('./train and val Species acc vs epoches.jpg', 1)\r\n B, G, R = cv2.split(image)\r\n img_rgb = cv2.merge((R, G, B))\r\n plt.imshow(img_rgb)\r\n plt.show()\r\n\r\n\r\ndef get_lables(x_species):\r\n sp_list = []\r\n SPECIES = ['rabbits', 'rats', 'chickens', 'Mammals', 'Birds'] # [0,1,2,3,4]\r\n\r\n mask = x_species.ge(0.5)\r\n\r\n for idx, item in enumerate(mask):\r\n if item:\r\n sp_list.append(SPECIES[idx])\r\n return sp_list\r\n\r\n\r\ndef visualize_model():\r\n state_dict = torch.load('best_model.pt')\r\n model = Net()\r\n model.load_state_dict(state_dict)\r\n model.eval()\r\n with torch.no_grad():\r\n for i, data in enumerate(data_loaders['val']):\r\n i = i + 1\r\n check = random.randint(0, 9)\r\n if i % 10 == check:\r\n\r\n inputs = data['image']\r\n label_species = data['species'].to(device)\r\n classes = data['classes'].to(device)\r\n\r\n if label_species == 0:\r\n label_mul = torch.autograd.Variable(torch.LongTensor([[1, 0, 0, 1, 0]]))\r\n elif label_species == 1:\r\n label_mul = torch.autograd.Variable(torch.LongTensor([[0, 1, 0, 1, 0]]))\r\n else:\r\n label_mul = torch.autograd.Variable(torch.LongTensor([[0, 0, 1, 0, 1]]))\r\n\r\n x_species = model(inputs.to(device))\r\n x_species = x_species.view(-1)\r\n label_mul = label_mul.view(-1)\r\n\r\n Sigmoid = nn.Sigmoid()\r\n x_species = Sigmoid(x_species)\r\n\r\n x_lables = get_lables(x_species)\r\n y_lables = get_lables(label_mul)\r\n\r\n x_lables = ' '.join(x_lables)\r\n y_lables = ' '.join(y_lables)\r\n\r\n x_species = torch.nonzero(x_species)\r\n x_species = x_species.view(-1)\r\n\r\n plt.imshow(transforms.ToPILImage()(inputs.squeeze(0)))\r\n plt.title('predicted species: {}\\n ground-truth species:{}'.format(x_lables, y_lables))\r\n plt.show()\r\n\r\n print('测试预览结束')\r\n return True;","sub_path":"project2/project_2/Stage_3 Multi-classification/Multi_classification.py","file_name":"Multi_classification.py","file_ext":"py","file_size_in_byte":9893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"632390338","text":"##############################################################################\n# Parte do livro Introdução à Programação com Python\n# Autor: Nilo Ney Coutinho Menezes\n# Editora Novatec (c) 2010-2020\n# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8\n# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3\n# Terceira Edição - Janeiro/2019 - ISBN 978-85-7522-718-3\n#\n# Site: https://python.nilo.pro.br/\n#\n# Arquivo: exercicios3\\capitulo 07\\exercicio-07-05.py\n##############################################################################\n\nprimeira = input(\"Digite a primeira string: \")\nsegunda = input(\"Digite a segunda string: \")\n\nterceira = \"\"\n\nfor letra in primeira:\n if letra not in segunda:\n terceira += letra\n\nif terceira == \"\":\n print(\"Todos os caracteres foram removidos.\")\nelse:\n print(f\"Os caracteres {segunda} foram removidos de {primeira}, gerando: {terceira}\")\n","sub_path":"exercicios_resolvidos3/exercicios3/capitulo 07/exercicio-07-05.py","file_name":"exercicio-07-05.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"230473693","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport glob\nimport seaborn as sns\nimport matplotlib\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef transform_model(row):\n if row['method'] == 'DM':\n return row['ml']\n elif row['method'] == 'IPW':\n return row['prop_pred']\n else:\n return f'{row[\"prop_pred\"]}, {row[\"ml\"]}'\n\ndf = pd.DataFrame()\n \n# our method\ndf_buffer = pd.read_csv(f'../results/warfarin/compiled/unconstrained_agg.csv')\ndf_buffer['method'] = df_buffer['method'].map({'Direct': 'DM', 'Robust': 'DR', 'IPW': 'IPW'})\ndf_buffer['prop_pred'] = df_buffer['prop_pred'].map({'tree': 'DT'})\ndf_buffer['ml'] = 'RF/Log'\n\ndf_buffer['model'] = df_buffer.apply(lambda row: transform_model(row), axis=1)\ndf = pd.concat([df, df_buffer[['depth', 'method', 'model', 'gap', \n 'solve_time', 'regret_test', 'best_found_test']]], ignore_index=True)\n\n# kallus bertsimas\ndf_buffer = pd.read_csv(f'../results/warfarin/compiled/KB.csv')\ndf_buffer['method'] = df_buffer['method'].map({'Kallus': 'K-PT', 'Bertsimas': 'B-PT'})\ndf_buffer['model'] = '-'\ndf = pd.concat([df, df_buffer[['depth', 'method', 'model', 'gap', \n 'solve_time', 'regret_test', 'best_found_test']]], ignore_index=True)\n\n# PT\ndf_buffer = pd.read_csv(f'../results/warfarin/compiled/policytree/raw_proba.csv')\nfor col, oosp, regret in zip(['random_time', 'r0.06_time', 'r0.11_time'], ['random', 'r0.06', 'r0.11'],\n ['random_oos_regret', 'r0.06_oos_regret', 'r0.11_oos_regret']):\n h = pd.DataFrame({'solve_time': df_buffer[col].tolist(),\n 'regret_test': df_buffer[regret].tolist(),\n 'best_found_test': df_buffer[oosp].tolist()})\n h['method'] = 'PT'\n h['gap'] = 0\n h['best_found_test'] *= 100\n h['depth'] = 2\n h['model'] = 'DT, Mixed'\n df = pd.concat([df, h], ignore_index=False)\n \n# CF, CT\nfor m, m_name in zip(['cf', 'cf_untuned', 'ct'], ['CF', 'CF (untuned)', 'CT']):\n df_buffer = pd.read_csv(f'../results/warfarin/compiled/CF/{m}_baseline_raw.csv')\n for col, oosp, regret, in zip(['time_random', 'time_r0.06', 'time_r0.11'], ['random', 'r0.06', 'r0.11'],\n ['random_oos_regret', 'r0.06_oos_regret', 'r0.11_oos_regret']):\n h = pd.DataFrame({'solve_time': df_buffer[col].tolist(),\n 'regret_test': df_buffer[regret].tolist(),\n 'best_found_test': df_buffer[oosp].tolist()})\n h['method'] = m_name\n h['depth'] = '-'\n h['best_found_test'] *= 100\n h['gap'] = 0\n h['model'] = '-'\n df = pd.concat([df, h], ignore_index=False)\n \n#RC\ndf_buffer = pd.read_csv(f'../results/warfarin/compiled/RC/rc_raw.csv')\ndf_buffer_random = df_buffer[df_buffer['randomization'] == '0.33']\ndf_buffer_random1 = df_buffer_random[df_buffer_random['model'] == 'balanced_rf']\ndf_buffer_random1['model'] = 'best'\ndf_buffer_random = pd.concat([df_buffer_random[df_buffer_random['model'] != 'lrrf'], df_buffer_random1], ignore_index=True)\ndf_buffer_random['model'] = df_buffer_random['model'].map({'balanced_rf': 'RF', 'best': 'Best',\n 'balanced_lr': 'Log'})\n\ndf_buffer_other = df_buffer[df_buffer['randomization'] != '0.33']\ndf_buffer_other['model'] = df_buffer_other['model'].map({'balanced_rf': 'RF', 'lrrf': 'Best',\n 'balanced_lr': 'Log'})\n\ndf_buffer = pd.concat([df_buffer_random, df_buffer_other], ignore_index=True).rename(columns={'oos_regret': 'regret_test',\n 'oosp': 'best_found_test'})\ndf_buffer['method'] = 'R&C'\ndf_buffer['gap'] = 0\ndf_buffer['depth'] = '-'\ndf_buffer['best_found_test'] *= 100\n\ndf_buffer = df_buffer.drop(columns=['randomization', 'dataset', 'seed'])\ndf = pd.concat([df, df_buffer], ignore_index=False)\n\n\nmean_df = df.groupby(['depth', 'method', 'model']).agg('mean').reset_index().round(2)\nstd_df = df.groupby(['depth', 'method', 'model']).agg('std').reset_index().round(2)\n\ncombined = mean_df.merge(std_df, on=['depth', 'method', 'model'])\n\nfor col in ['gap', 'solve_time', 'regret_test', 'best_found_test']:\n combined[col] = combined.apply(lambda row: f'{row[f\"{col}_x\"]:.2f} ± {row[f\"{col}_y\"]:.2f}', axis=1)\n combined = combined.drop(columns=[f'{col}_{i}' for i in ['x', 'y']])\n\n\nmapping = {'IPW': 1, 'DM': 2, 'DR': 3, 'K-PT': 4, 'B-PT': 5, 'PT': 6, 'CF': 0, 'CF (untuned)': 0, 'CT': 0, 'R&C': 0}\n\ncombined['method_map'] = combined['method'].apply(lambda x: mapping[x])\nprint(combined.sort_values(by=['depth', 'method_map']).drop(columns=['method_map']).to_latex(index=False))\n\n\n\n\n\n\n\n\n","sub_path":"analysis_viz/appendix_table1.py","file_name":"appendix_table1.py","file_ext":"py","file_size_in_byte":4814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"604717411","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nfilename_queue = tf.train.string_input_producer([\"dataHwH.csv\"])\n\nreader = tf.TextLineReader()\nkey, value = reader.read(filename_queue)\n\nrecord_defaults = [[\"s\"], [1.0], [1.0], [1.0], [1.0], [1.0], [1.0], [1.0], [1.0]]\ncol1, col2, col3, col4, col5, col6, col7, col8, col9 = tf.decode_csv(\n value, record_defaults=record_defaults)\nmarket = tf.stack([col2, col3, col5, col6, col7, col8, col9]) #col4 is DEM 10y, the Variable to explain\n#usa = tf.stack([col6, col7, col8, col9])\ntenDEM = tf.stack([col4])\n\nmarket_train_array=np.array([[]])\nmarket_test_array=np.array([[]])\ntenDEM_train=np.array([])\ntenDEM_test=np.array([])\n\n\nwith tf.Session() as sess:\n # Start populating the filename queue.\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n\n for i in range(1800):\n # Retrieve a single instance:\n date_train, market_train, tenDEM_temp = sess.run([col1,market, tenDEM]) #, col3, col4, col5, col6, col7, col8, col9])\n if market_train_array.size==0:\n market_train_array=np.append(market_train_array, [market_train],1)\n tenDEM_train=np.append(tenDEM_train, tenDEM_temp)\n market_train_array=np.append(market_train_array, [market_train],0)\n tenDEM_train=np.append(tenDEM_train, tenDEM_temp)\n\n for i in range(1801,2100):\n date_test, market_test, tenDEM_temp = sess.run([col1, market, tenDEM])\n if market_test_array.size==0:\n market_test_array=np.append(market_test_array, [market_test],1)\n market_test_array=np.append(market_test_array, [market_test],0)\n tendDEM_test=np.append(tenDEM_test, tenDEM_temp,0)\n\n coord.request_stop()\n coord.join(threads)\n\n\n\nsess=tf.InteractiveSession()\n\n# Model parameters\nW = tf.Variable(tf.zeros([7,1]))\nb = tf.Variable([0.0], dtype=tf.float32)\n\n\n# Model input and output\nx = tf.placeholder(tf.float32, shape=[None, 7])\ny = tf.nn.softmax(tf.matmul(x, W) + b)\n\n\n#training\ny_ = tf.placeholder(tf.float32)\ncross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\ntrain = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\nsess= tf.InteractiveSession()\ntf.global_variables_initializer().run()\n\nprint(\"market_train_array.shape\")\nprint(market_train_array.shape)\nprint(\"W.shape\")\nprint(W.shape)\n\n\n\nfor i in range(200):\n x_train = market_train_array\n y_train = tenDEM_train\n sess.run(train, {x: x_train, y: y_train})\n\n# evaluate training accuracy\ncurr_W, curr_b, curr_loss = sess.run([W, b, loss], {x: x_train, y: y_train})\nprint(\"W: %s b: %s loss: %s\"%(curr_W, curr_b, curr_loss))\n","sub_path":"spreadMultifactor.py","file_name":"spreadMultifactor.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"270122019","text":"# !pip3 install --upgrade torch --user\n# !pip3 install torchvision --user\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport os\nimport pickle\nimport tqdm\n\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\n\n\nfrom torchvision import datasets\nfrom torchvision import transforms\n\n\ndef get_freer_gpu():\n os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]\n return np.argmax(memory_available)\n\n\ndef normalize(im):\n if im.max() == 0:\n return im\n return (im - im.min())/(im.max()-im.min())\n\n\ndef compute_psnr(image, noised):\n \"\"\"\n Alert: only from images with max value = 1\n \"\"\"\n mse = nn.MSELoss()(image, noised).item()\n if mse == 0:\n return 0\n return 10 * np.log10(1/mse)\n\n\ndef pairwise_dist(arr, k):\n \"\"\"\n arr: torch.Tensor with shape batch x h*w x features\n \"\"\"\n r_arr = torch.sum(arr * arr, dim=2, keepdim=True) # (B,N,1)\n mul = torch.matmul(arr, arr.permute(0,2,1)) # (B,M,N)\n dist = - (r_arr - 2 * mul + r_arr.permute(0,2,1)) # (B,M,N)\n return dist.topk(k=k, dim=-1)[1]\n\n\ndef batched_index_select(t, dim, inds):\n dummy = inds.unsqueeze(2).expand(inds.size(0), inds.size(1), t.size(2))\n out = t.gather(dim, dummy) # b x e x f\n return out\n\n\ndef get_closest_diff(arr, k):\n \"\"\"\n arr: torch.Tensor with shape batch x h * w x features\n \"\"\"\n b, hw, f = arr.shape\n dists = pairwise_dist(arr, k)\n selected = batched_index_select(arr, 1, dists.view(dists.shape[0], -1)).view(b, hw, k, f)\n diff = arr.unsqueeze(2) - selected\n return diff\n\n\ndef plot_diff(real, noised, denoised):\n plt.figure(figsize=(15, 15))\n \n for ind, im, noise, denoise in zip(range(1, 13, 3), real[:4], noised[:4], denoised[:4]):\n plt.subplot(4, 3, ind)\n plt.imshow(im)\n plt.yticks([])\n plt.xticks([])\n plt.title('Clear image')\n \n plt.subplot(4, 3, ind+1)\n plt.imshow(noise)\n plt.yticks([])\n plt.xticks([])\n plt.title('Noised image')\n \n plt.subplot(4, 3, ind+2)\n plt.imshow(denoise)\n plt.yticks([])\n plt.xticks([])\n plt.title('Denoised image')\n\n\n plt.show()","sub_path":"Research/utils/martemev_utils.py","file_name":"martemev_utils.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"106988988","text":"import random\nimport time\nplay = 1\nscore = 0\n\nwhile 0 != play:\n print('Type the number that appears ')\n print('Your current score is: %d' % score)\n time.sleep(random.uniform(0, 2))\n print('Alright go!')\n num = (int(random.uniform(0, 9)))\n print(num)\n start = time.time()\n answer = input('')\n end = time.time()\n if answer == str(num):\n score = score + 1\n print('Nice Job, your score is: %d' % score)\n print('Your time in s is %f' % (end - start))\n else:\n print('You literally had to type the right number... your score is %d' % score)\n print('You took %f s to get the wrong answer' % (end - start))\n if input('Press 0 if you want to quit, otherwise keep going!\\n') == '0':\n play = 0\nprint('K bai')\n\n\n","sub_path":"G1.py","file_name":"G1.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"112312127","text":"#!/usr/bin/python2.5\n# Copyright 2011 JatLeGo Inc. All Rights Reserved.\n# Author: andyzh1314@gmail.com (Andy Zhau)\n\nimport os\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = \"website.jetlag3.config.setting\"\n\nfrom google.appengine.dist import use_library\nuse_library(\"django\", \"1.2\")\n\nfrom django.conf import settings\n\nDEBUG = True\n\nROOT_PATH, CONFIG_PATH = os.path.split(os.path.dirname(__file__))\nTEMPLATE_DIRS = (ROOT_PATH + \"/templates/\", )\n\nINSTALLED_APPS = (\"website.jetlag3.django_tags\", )\nLOCALE_PATHS = (ROOT_PATH + \"/locale\", )\nUSE_I18N = True\n\nLANGUAGES = (\n (\"en-us\", \"English(US)\"),\n (\"zh-cn\", \"Chinese(Simplified)\")\n)\nLANGUAGE_CODE = \"zh-cn\"\n\nfrom django.conf import settings\nsettings.DEBUG\n\n\n# Website configurations:\nWEBSITE_NAME = \"Jet-Lag3\"\nINIT_MESSAGE = 10\n","sub_path":"website/jetlag3/config/setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"323994804","text":"#coding: utf-8\n\nimport csv\n\nmydata = [['onibus', 'linha', 'distancia', 'tempo']]\n\nwith open('part-00000', 'r') as f:\n for line in f:\n mydata.append(line.translate(None, \"(')\\n \").split(\",\"))\n\t\t\t\n\t\t\t\nwith open('prob4data.csv', 'w') as mycsv:\n\ta = csv.writer(mycsv)\n\ta.writerows(mydata)\n","sub_path":"problema4-ad2/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"613678209","text":"#!/usr/bin/env python\n# _*_ coding:utf-8 _*_\n# @Author : lusheng\nimport datetime\nimport sqlite3\nimport json\n\nimport requests\nimport re\nimport time\n\n\n# 创建数据库新闻表\ndef create_db():\n # 连接\n conn = sqlite3.connect(\"announcement.db\")\n c = conn.cursor()\n\n # 创建表\n c.execute('''DROP TABLE IF EXISTS announcement ''') # 删除旧表,如果存在(因为这是临时数据)\n c.execute('''\n CREATE TABLE announcement (\n id INTEGER PRIMARY KEY AUTOINCREMENT, \n companyCd INTEGER , \n companyName text,\n title text,\n publishDate text, \n filePath text)\n ''')\n conn.commit()\n conn.close()\n\n\n# create_db() # 初始化数据库\nurl = 'http://www.neeq.com.cn/disclosureInfoController/infoResult_zh.do?callback=jQuery331_1596699678177'\ncompanyCd = input(\"请输入需要查询的公司代码或公司名称:\")\nif len(companyCd) == 6 or len(companyCd) == 8:\n data = {\n \"disclosureType[]\": 5,\n \"disclosureSubtype[]\": None,\n \"page\": 0,\n \"startTime\": \"2020-03-06\",\n \"endTime\": \"2020-08-06\",\n \"companyCd\": companyCd,\n \"isNewThree\": 1,\n \"keyword\": None,\n \"xxfcbj[]\": None,\n \"hyType[]\": None,\n \"needFields[]\": [\"companyCd\", \"companyName\", \"disclosureTitle\", \"destFilePath\", \"publishDate\", \"xxfcbj\",\n \"destFilePath\", \"fileExt\", \"xxzrlx\"],\n \"sortfield\": \"xxssdq\",\n \"sorttype\": \"asc\",\n }\n # 获取新闻列表\n news_list = []\n response1 = requests.post(url, data)\n # print(response1.text)\n response2 = re.search('(?<=\\(\\[)(.*?)(?=]\\))', response1.text).group()\n # print(response2)\n j = json.loads(response2)['listInfo']\n if j['content'] == []:\n print(\"没有查询到信息,请检查公司代码或名称是否正确\")\n else:\n totalElements = j['totalElements']\n totalPages = j['totalPages']\n print(\"查询到%d条公告,共%d页\" % (totalElements, totalPages))\n db = sqlite3.connect(\"announcement.db\")\n c = db.cursor()\n list = j['content']\n for li in list:\n print(li)\n companyCd = li['companyCd']\n companyName = li['companyName']\n destFilePath = \"http://www.neeq.com.cn\" + li['destFilePath']\n disclosureTitle = li['disclosureTitle']\n publishDate = li['publishDate']\n xxfcbj = li['xxfcbj']\n xxzrlx = li['xxzrlx']\n result = c.execute(\"SELECT * FROM announcement where filePath = '%s'\" % destFilePath)\n if result.fetchone():\n print(disclosureTitle, \" 该公告数据库中已存在\\n\")\n else:\n data = \"NULL,\\'%s\\',\\'%s\\',\\'%s\\',\\'%s\\',\\'%s\\'\" % (\n companyCd, companyName, disclosureTitle, publishDate, destFilePath)\n # print(data, \"\\n\")\n c.execute('INSERT INTO announcement VALUES (%s)' % data)\n db.commit()\n time.sleep(3) # 获取一个页面后休息3秒,防止请求服务器过快\nelse:\n print(\"公司代码或名称格式错误,请检查\")\n\n\n# try:\n# while True:\n# # create_db()\n# db = sqlite3.connect(\"news.db\")\n# c = db.cursor()\n# newspage(5) # 每次获取5页内容\n# time.sleep(600) # 10分钟运行一次\n# except Exception as e:\n# print(str(e))\n# finally:\n# db.close()\n","sub_path":"announcement.py","file_name":"announcement.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"181907789","text":"import numpy as np \nimport cv2 \n \n# read video \ncap = cv2.VideoCapture('./videos/cat.mp4') \n\n# retrieve the very first \n# frame from the video \n_, frame = cap.read() \n \n# set the region for the \n# tracking window p, q, r, s \n# put values according to yourself \np, q, r, s = 150, 150, 460, 100\ntrack_window = (r, p, s, q) \n \n# create the region of interest \nr_o_i = frame[p:p + q, r:r + s] \n \n# converting BGR to HSV format \nhsv = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) \n \n# apply mask on the HSV frame \nmask = cv2.inRange(hsv, np.array((0., 60., 33.)), np.array((220., 225., 225.))) \n \n# get histogram for hsv channel \nroi = cv2.calcHist([hsv], [0], mask, [180], [0, 180]) \n \n# normalize the retrieved values \ncv2.normalize(roi, roi, 0, 255, cv2.NORM_MINMAX) \n \n# termination criteria, either 15 \n# iteration or by at least 2 pt \ntermination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT , 15, 2 ) \n\nwhile(True): \n _, frame = cap.read() \n frame = cv2.resize(frame, (1280, 720), fx = 0, fy = 0, interpolation = cv2.INTER_CUBIC) \n\n # convert BGR to HSV format \n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) \n\n bp = cv2.calcBackProject([hsv], [0], roi, [0, 180], 1) \n\n # applying meanshift to get the new region \n _, track_window = cv2.meanShift(bp, track_window, termination) \n\n # Draw track window on the frame \n x, y, w, h = track_window \n vid = cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2) \n\n # show results \n cv2.imshow('tracker', vid) \n\n k = cv2.waitKey(1) & 0xff\n if k == ord('q'): \n break\n# release cap object \ncap.release() \n\n# destroy all opened windows \ncv2.destroyAllWindows() \n","sub_path":"buoi2/meanshift1.py","file_name":"meanshift1.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"316456970","text":"from module import timer, Scraper, write_csv_in_file\n\nURL = 'https://vologda.upravdom.com'\nSHOW_50 = '?show=50'\nPAGE = '&PAGEN_1='\n\n\ndef get_pagination(soup):\n try:\n total_pages = soup.find('ul', class_='pagination').find_all('li')\n\n if total_pages[-1].get('title') == 'Последняя страница':\n total_pages = total_pages[-1].find('a').get('href').split('=')[-1]\n total_pages = int(total_pages)\n else:\n total_pages = total_pages[-2].find('a').get('href').split('=')[-1]\n total_pages = int(total_pages)\n except AttributeError:\n total_pages = 1\n return total_pages\n return total_pages\n\n\ndef get_data(soup, title='УправДом', available='В наличии'):\n soup_items = soup.find('div', class_='catalog').find('div', class_='category-items').find_all('li', class_='item-wrapper')\n data_items = []\n\n for item in soup_items:\n name = item.find('span', itemprop='name').text.split()\n name = ' '.join(name)\n\n url = URL + item.find('a', itemprop='url').get('href')\n\n url_image = item.find('div', class_='image-wrapper').find('img').get('src')\n url_image = URL + url_image\n\n price = item.find('div', class_='new-price').text.split()\n price = ' '.join(price)\n\n data_item = {'title': title, 'name': name, 'price': price, 'available': available, 'url': url, 'url_image': url_image}\n data_items.append(data_item)\n\n return data_items\n\n\n@timer\ndef main():\n soup = Scraper.get_soup(URL)\n soup_data = soup.find('div', class_='main-container').find('div', class_='dark').find('div', class_='container').find_all('div', 'col-sm-6')\n main_links = (URL + link.find('a').get('href') + SHOW_50 for link in soup_data)\n\n data_list = []\n for link in main_links:\n soup = Scraper.get_soup(link)\n total_pages = get_pagination(soup)\n data_list += get_data(soup)\n\n if total_pages != 1:\n for page in range(2, total_pages+1):\n soup = Scraper.get_soup(link+PAGE+str(page))\n data_list += get_data(soup)\n\n write_csv_in_file(data_list)\n print('Собрано данных: {}'.format(len(data_list)))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scrapers/upravdom.py","file_name":"upravdom.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"234536497","text":"#\r\n# initial URL\r\n#\r\n\r\nimport sys\r\n\r\nurl = input('Enter the domain to crawl (including \"http...\"): ')\r\ndomain_base = urllib.parse.urlparse(url).netloc\r\nprint(\"base domain = \", domain_base)\r\n\r\nprint('url entered = ', url)\r\nif url.startswith('http'):\r\n print(\"url ok\")\r\nelse:\r\n print('Must include http in url...')\r\n sys.exit()\r\n# remove the trailing '/':\r\nif url.endswith('/'):\r\n url = url[:-1]\r\n \r\nprint(url)","sub_path":"WebCrawler/w6.py","file_name":"w6.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114590148","text":"#MIT License\n#\n#Copyright (c) 2018 Luis Victor Muller Fabris\n#\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction, including without limitation the rights\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#copies of the Software, and to permit persons to whom the Software is\n#furnished to do so, subject to the following conditions:\n#\n#The above copyright notice and this permission notice shall be included in all\n#copies or substantial portions of the Software.\n#\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n#SOFTWARE.\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#In this file we declare the toolbar itens that are specific to the spectrometer.\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#Import necessary modules here.\n\n\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#Always import the folowing module.\nfrom ui_headers import *\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#We will use configfile to save the values in the widgets defined below to disk.\n#The argument passed to initconfig must not be used in other module and can not contain \"/\"\n#or any other character that can't be in a file name (for exemple the null character)\nconfigfile=initconfig(\"demo\")\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#Below we add the widgets that we want in the toolbar that are device specific\nlabel = Gtk.Label(\"Integration time:\")\nlabelb = Gtk.Label(u\"\\u03BCs\")\nlabelc = Gtk.Label(\" Sleep time:\")\nlabeld = Gtk.Label(\" Averages:\")\ntool_itemc = Gtk.ToolItem()\ntool_itemd = Gtk.ToolItem()\ntool_itemc.add(label)\ntool_itemd.add(labelb)\ntool_itemb = Gtk.ToolItem()\nadjustment = Gtk.Adjustment(value=5702,lower=3,upper=9854,step_increment=1,page_increment=1,page_size=10)\nadjustmentb = Gtk.Adjustment(value=1,lower=1,upper=30,step_increment=1,page_increment=1,page_size=10)\nentry = Gtk.SpinButton(adjustment=adjustment, digits=0)\n#Fix a bug in some GTK themes (For example Mint-Y) that causes the buttons to disappear (white background and text in white text entry) when the spin button has focus.\nentry.get_style_context().remove_class(Gtk.STYLE_CLASS_SPINBUTTON)\nentry.set_numeric(True)\nentryb = Gtk.SpinButton(adjustment=adjustmentb, digits=0)\n#Fix a bug in some GTK themes (For example Mint-Y) that causes the buttons to disappear (white background and text in white text entry) when the spin button has focus.\nentryb.get_style_context().remove_class(Gtk.STYLE_CLASS_SPINBUTTON)\nentryb.set_numeric(True)\nentry.set_width_chars(5)\nentryb.set_width_chars(2)\ntool_itementryaverages = Gtk.ToolItem()\ntool_itementryaverages.add(entryb)\ntool_itemb.add(entry)\nfreq = Gtk.ListStore(int, str)\ntool_itemcombo = Gtk.ToolItem()\ntool_itemlabelcombo = Gtk.ToolItem()\ntool_itemlabelaverages = Gtk.ToolItem()\ntool_itemlabelaverages.add(labeld)\nfreq_combo = Gtk.ComboBox.new_with_model(freq)\nrenderer_textb = Gtk.CellRendererText()\nfreq_combo.pack_start(renderer_textb, True)\nfreq_combo.add_attribute(renderer_textb, \"text\", 1)\ntool_itemcombo.add(freq_combo)\ntool_itemlabelcombo.add(labelc)\nfreq_combo.set_active(0)\noldtint=-1\ndef changeavailtinits(tint,vsettings):\n\tglobal oldtint\n\tglobal lastactive\n\tlastactive=int(vsettings)\n\ttry:\n\t\tlastactive=freq_combo.get_model()[freq_combo.get_active_iter()][:2][0]\n\texcept:\n\t\tnull=None\n\tif(tint!=oldtint):\n\t\tfor introw in freq:\n\t\t\tdel freq[0]\n\t\ti=0\n\t\tif(((1000000/10)-126-20-tint)>=0):\n\t\t\tfreq.append([1, str((1000000/30)-126-20-tint)+u\" \\u03BCs + 126 \\u03BCs + 20 \\u03BCs = 30 hz\"])\n\t\t\ti=i+1\n\t\tif(((1000000/100)-126-20-tint)>=0):\n\t\t\tfreq.append([2, str((1000000/100)-126-20-tint)+u\" \\u03BCs + 126 \\u03BCs + 20 \\u03BCs = 100 hz\"])\n\t\t\ti=i+1\n\t\tif(((1000000/500)-126-20-tint)>=0):\n\t\t\tfreq.append([3, str((1000000/500)-126-20-tint)+u\" \\u03BCs + 126 \\u03BCs + 20 \\u03BCs = 500 hz\"])\n\t\t\ti=i+1\n\t\t#if(((1000000/1000)-126-20-tint)>=0):\n\t\t#\tfreq.append([4, str((1000000/1000)-126-20-tint)+u\" \\u03BCs + 126 \\u03BCs + 20 \\u03BCs = 1 khz\"])\n\t\t#\ti=i+1\n\t\t#if(((1000000/2000)-126-20-tint)>=0):\n\t\t#\tfreq.append([5, str((1000000/2000)-126-20-tint)+u\" \\u03BCs + 126 \\u03BCs + 20 \\u03BCs = 2 khz\"])\n\t\t#\ti=i+1\n\t\t#if(((1000000/3000)-126-20-tint)>=0):\n\t\t#\tfreq.append([6, str((1000000/3000)-126-20-tint)+u\" \\u03BCs + 126 \\u03BCs + 20 \\u03BCs = 3 khz\"])\n\t\t#\ti=i+1\n\t\t#if(((1000000/4000)-126-20-tint)>=0):\n\t\t#\tfreq.append([7, str((1000000/4000)-126-20-tint)+u\" \\u03BCs + 126 \\u03BCs + 20 \\u03BCs = 4 khz\"])\n\t\t#\ti=i+1\n\t\t#if(((1000000/5000)-126-20-tint)>=0):\n\t\t#\tfreq.append([8, str((1000000/5000)-126-20-tint)+u\" \\u03BCs + 126 \\u03BCs + 20 \\u03BCs = 5 khz\"])\n\t\t#\ti=i+1\n\t\t#if(((1000000/6000)-126-20-tint)>=0):\n\t\t#\tfreq.append([9, str((1000000/6000)-126-20-tint)+u\" \\u03BCs + 126 \\u03BCs + 20 \\u03BCs = 6 khz\"])\n\t\t#\ti=i+1\n\t\tif((int(lastactive))>=i):\n\t\t\tlastactive=i\n\t\tfreq_combo.set_active(int(lastactive)-1)\n\t\toldtint=tint\ndef changetinta(self):\n\tchangeavailtinits(float(entry.get_value()),2)\nentry.connect('value-changed', changetinta)\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#Here we will load the values from the configuration file into the widgets.\n#We use the try/except do avoid errors if the value stored is not an float/int or if there is no value stored yet.\nvsettings=\"1\"\ntry:\n\tvsettings=float(configfile['1'])\nexcept:\n\tconfigfile['1']=\"\"\nif(str(vsettings)+\"\"!=\"\"):\n\tentry.set_value(float(vsettings))\nvsettings=\"1\"\ntry:\n\tvsettings=float(configfile['2'])\nexcept:\n\tconfigfile['2']=\"\"\nif(str(vsettings)+\"\"!=\"\"):\n\tentryb.set_value(float(vsettings))\nvsettings=\"1\"\ntry:\n\tvsettings=float(configfile['3'])\nexcept:\n\tconfigfile['3']=\"\"\nchangeavailtinits(float(entry.get_value()),vsettings)\nif(str(vsettings)+\"\"!=\"\"):\n\tfreq_combo.set_active(int(vsettings)-1)\nconfigfile.close()\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n#The folowing function receives the toolbar object and insert the widgets declared before in order.\n#This function must always be declared here.\ndef additems(toolbar):\n\t#toolbar.insert(tool_itementryaverages,0)\n\t#toolbar.insert(tool_itemlabelaverages,0)\n\ttoolbar.insert(tool_itemcombo,0)\n\ttoolbar.insert(tool_itemlabelcombo,0)\n\ttoolbar.insert(tool_itemd,0)\n\ttoolbar.insert(tool_itemb,0)\n\ttoolbar.insert(tool_itemc,0)\n#The folowing function sends the values from the widgets to the main program. This values will be returned to the spectrometer data acquisition function.\n#Please don't use any separator in the string other than ; because this string will be concatenated later.\n#This function must always be declared here.\ndef gettoolbarvalues():\n\tgettoolbarvaluesreturnvalue=\"\"\n\tgettoolbarvaluesreturnvalue=gettoolbarvaluesreturnvalue+str(freq_combo.get_model()[freq_combo.get_active_iter()][:2][0])+\";\"\n\tgettoolbarvaluesreturnvalue=gettoolbarvaluesreturnvalue+str(entry.get_value())+\";\"\n\tgettoolbarvaluesreturnvalue=gettoolbarvaluesreturnvalue+str(entryb.get_value())+\";\"\n\treturn gettoolbarvaluesreturnvalue\n#This function returns an array with the widgets that the user can change values. The \"changed\" event will be connected to this widgets.\n#This function must always be declared here.\ndef getchangedeventelements():\n\treturnarray=[None] * 3\n\treturnarray[0]=freq_combo\n\treturnarray[1]=entry\n\treturnarray[2]=entryb\n\treturn returnarray\n#This function is called when some value on a widget changes.\n#Here we will store the new values on the configuration file.\n#This function must always be declared here.\ndef changedelement():\n\tconfigfile=initconfig(\"imon256\")\n\tconfigfile['1']=float(entry.get_value())\n\tconfigfile['2']=float(entryb.get_value())\n\tconfigfile['3']=float((freq_combo.get_model()[freq_combo.get_active_iter()][:2][0]))\n\tconfigfile.close()\n#This function removes all the toolbar buttons from the toolbar.\n#This function is called when the user changes the spectrometer in the botton left combo box.\n#This function must always be declared here.\ndef removeallwidgets(toolbar):\n\ttoolbar.remove(tool_itementryaverages)\n\ttoolbar.remove(tool_itemlabelaverages)\n\ttoolbar.remove(tool_itemcombo)\n\ttoolbar.remove(tool_itemlabelcombo)\n\ttoolbar.remove(tool_itemd)\n\ttoolbar.remove(tool_itemb)\n\ttoolbar.remove(tool_itemc)\ndef getdevicelist():\n\treturn [\"01\"]\n","sub_path":"source/spectrometer_modules/demo/toolbar_options.py","file_name":"toolbar_options.py","file_ext":"py","file_size_in_byte":10171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"451012780","text":"# coding=utf-8\n\nimport sys\n\nimport numpy as np\n\nget_x = lambda t: np.array(zip(*t)[0], dtype=np.float32)\n\nsigmoid = lambda z: 1.0 / (1.0 + np.exp(-z))\nhamming = lambda y, z: np.sum(np.abs(y - z), axis=1)\nsample = lambda p: (p > np.random.uniform(0, 1, p.shape)).astype(np.float32)\n\n\nclass RBM:\n def __init__(self, size, eta, batch_size, epochs,\n mode='bern', momentum=0.0, w=None, a=None, b=None):\n self.v_size, self.h_size = size\n self.eta = eta\n self.batch_size = batch_size\n self.epochs = epochs\n self.mode = mode\n self.momentum = momentum\n\n self.w = np.random.normal(0, 0.1, (self.v_size, self.h_size)) if w is None else w\n self.a = np.zeros(self.v_size) if a is None else a\n self.b = np.zeros(self.h_size) if b is None else b\n\n self.delta_w = np.zeros((self.v_size, self.h_size))\n self.delta_a = np.zeros(self.v_size)\n self.delta_b = np.zeros(self.h_size)\n\n self.scores = []\n\n def hidden_step(self, visible):\n return sigmoid(np.dot(visible, self.w) + self.b)\n\n def visible_step(self, hidden):\n if self.mode == 'bern':\n return sigmoid(np.dot(hidden, self.w.T) + self.a)\n elif self.mode == 'gauss':\n return np.dot(hidden, self.w.T) + self.a\n\n def contrastive_divergence(self, batch):\n self.delta_w *= self.momentum\n self.delta_a *= self.momentum\n self.delta_b *= self.momentum\n\n vis = batch\n\n # CD-k, уже при k = 1 качество не сильно уступает большим значениям,\n # но выигрыш в скорости значительный => будем делать только один проход без цикла.\n # P(h|v)\n p_hid = self.hidden_step(vis)\n\n self.delta_w += np.dot(vis.T, p_hid)\n self.delta_a += np.sum(vis, axis=0)\n self.delta_b += np.sum(p_hid, axis=0)\n\n hid = 1. * sample(p_hid)\n\n p_vis = self.visible_step(hid)\n\n # не семплировать видимый слой\n # (семплирование замедляет сходимость, но математически это более корректно);\n p_hid = self.hidden_step(p_vis)\n\n # не семплировать значения скрытого слоя при выводе из восстановленного образа;\n self.delta_w -= np.dot(p_vis.T, p_hid)\n self.delta_a -= np.sum(p_vis, axis=0)\n self.delta_b -= np.sum(p_hid, axis=0)\n\n self.w += self.eta * self.delta_w / self.batch_size\n self.a += self.eta * self.delta_a / self.batch_size\n self.b += self.eta * self.delta_b / self.batch_size\n\n def fit(self, train_data, cv_data=None):\n\n for epoch in range(self.epochs):\n np.random.shuffle(train_data) # inplace shuffle\n\n for i in range(len(train_data) / self.batch_size):\n batch = train_data[self.batch_size * i:self.batch_size * i + self.batch_size]\n self.contrastive_divergence(batch)\n\n if cv_data is not None:\n pred_data = self.visible_step(sample(self.hidden_step(cv_data)))\n score = np.mean(((cv_data - pred_data) ** 2))\n self.scores.append(score)\n\n sys.stdout.write('\\r' + \"%s / %s | %s\" \\\n % (epoch, self.epochs, self.scores[-1]))\n sys.stdout.flush()\n #\n # if cv_data is not None:\n # pred_data = self.visible_step(sample(self.hidden_step(cv_data)))\n # score = np.mean(np.sum((cv_data - pred_data) ** 2, axis=1))\n # self.scores.append(score)\n #\n # score_delta = 0\n # if len(self.scores) > 10:\n # score_delta = np.abs(np.mean(np.diff(self.scores[-20:])))\n #\n # if score_delta <= 0.001:\n # print(\"!\", score_delta)\n # break\n # sys.stdout.write('\\r' + \"%s / %s | %s %s\" \\\n # % (epoch, self.epochs, self.scores[-1], score_delta))\n # sys.stdout.flush()\n\n\n","sub_path":"deep-learning/hw2/src/RBM.py","file_name":"RBM.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"231754089","text":"import csv\nimport os\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\nengine = create_engine(os.getenv('DATABASE_URL'))\ndb = scoped_session(sessionmaker(bind=engine))\n\n# listar vuelos\n\n\ndef main():\n fo = open(\"vuelos.csv\")\n lector = csv.reader(fo)\n for origen, destino, duracion in lector:\n db.execute(\"INSERT INTO vuelos (origen, destino, duracion) VALUES(:origen, :destino, :duracion)\", {\n \"origen\": origen, \"destino\": destino, \"duracion\": duracion})\n print(f\"agregados vuelos desde {origen} a {destino} durando {duracion} minutos.\")\n db.commit()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"importar.py","file_name":"importar.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"106676565","text":"\"\"\" App for demonstrating the functionality of the Indy-SDK Libindy libraries for managing Self Soverign Identification\non the internet \"\"\"\n\nimport time\nimport re\nimport asyncio\nimport json\n\nimport pprint\n\nfrom indy import pool, ledger, wallet, did\nfrom indy.error import IndyError, ErrorCode\n\n# Required functions:\n\nfrom identity import ID, print_log, create_wallet, did_and_verkey\n\nfrom write_did_functions import pool_configuration, nym_request, query_did, replace_keys, get_verkey, cleanup, delete_wallet\nfrom connection import connect\nfrom secure_messenger import messenger\n\n# from save_schema_and_cred_def_functions import schema_request, credential_definition\n# from issue_credential_functionls import prover_wallet_and_link_secret, offer_credential,request_credential, create_credential, process_and_store_credential\n# from negotiate_proof_functions import build_proof_request, fetch_credentials, create_proof, verify_proof\n\nIP = 'JamesL' # Network IP address for server of Nodes pool\nclientname = 'JamesL' # Ip Address for messenger client (receiver)\n\nasync def run():\n\n while True:\n print_log('\\n')\n print_log('Welcome to Sovrin:')\n print_log(' ______________________________________________________________')\n print_log('1. Connect to the Sovrin nodes pool.')\n print_log('2. Sovrin Messenger.')\n print_log('3. Create/Load identity.')\n print_log('4. Create/Open wallet for identity.')\n print_log('5. Create/Get DID and verkey for Identity (Public DID).') \n print_log('6. Create Connection (Private DID).') \n print_log('7. Create NYM Request.')\n print_log('8. Query DID (GET_NYM Request).')\n print_log('9. Replace Keys.')\n print_log('10. Get Verkey for DID on the ledger.')\n # print_log('11. Create Schema Request.')\n # print_log('12. Create Get Schema Request.')\n # print_log('13. Create Credential Definition.')\n # print_log('14. Create Prover Link Secret.')\n # print_log('15. Offer Credential.')\n # print_log('16. Request Credential.')\n # print_log('17. Create Credential.')\n # print_log('18. Process and Store Credential.')\n # print_log('19. Build Proof Request.')\n # print_log('20. Fetch Credentials.')\n # print_log('21. Create Proof.')\n # print_log('22. Verify Proof.')\n print_log('11. Clean-up.')\n print_log('12. Delete Identity Owner Wallet.') \n print_log('13. Quit.')\n print_log(' ______________________________________________________________') \n\n Sov = input('Please select:').strip()\n\n # Pool config:\n\n if Sov=='1': \n\n await pool_configuration(IP)\n\n # Send secure message:\n elif Sov=='2':\n\n await messenger(clientname)\n\n # Create ID: \n elif Sov=='3':\n\n await ID() # This step creates/loads and stores a new user ID\n\n # Create/Open wallet:\n elif Sov=='4':\n\n await create_wallet()\n\n # Create DID and Verkey for identity owner:\n elif Sov=='5':\n\n await did_and_verkey() \n\n # Create connection:\n elif Sov=='6':\n\n await connect()\n\n # Create NYM request:\n elif Sov=='7':\n\n await nym_request(IP)\n\n # Create GET_NYM request:\n elif Sov=='8':\n\n await query_did(IP)\n\n # Replace Keys:\n elif Sov=='9':\n\n await replace_keys(IP)\n\n # Get Verkey for DID on the Ledger:\n elif Sov=='10':\n\n await get_verkey(IP)\n\n # # Create Schema Request:\n # elif Sov==11:\n\n # # Create Get Schema Request: \n # elif Sov==12: \n\n # # Create Credential Definition:\n # elif Sov==13:\n\n # # Create Prover Link Secret:\n # elif Sov==14:\n\n # # Offer Credential:\n # elif Sov==15:\n\n # # Request Credential:\n # elif Sov==16:\n\n # # Create Credential:\n # elif Sov==17:\n\n # # Process and Store Credential:\n # elif Sov==18:\n\n # # Build Proof Request:\n # elif Sov==19:\n\n # # Fetch Credentials:\n # elif Sov==20:\n\n # # Create Proof:\n # elif Sov==21:\n\n # # Verify Proof:\n # elif Sov==22:\n \n # Close and Clean-up:\n elif Sov == '11':\n\n await cleanup(IP)\n\n elif Sov == '12':\n\n await delete_wallet()\n\n elif Sov == '13':\n\n await cleanup(IP)\n break\n\n else:\n print('Huh?')\n\ndef main():\n loop = asyncio.get_event_loop()\n loop.run_until_complete(run())\n time.sleep(1) # waiting for libindy thread complete\n loop.close()\n\n# Is this file being run directly or is it being imported?\nif __name__ == '__main__': \n main()","sub_path":"Demo/Alice.py","file_name":"Alice.py","file_ext":"py","file_size_in_byte":4877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"377284010","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[59]:\n\n\nimport regex\nimport pandas as pd\nfrom typing import List, Callable\n\n\n# In[116]:\n\n\ndef read_instructions(instructions_path: str = \"x86_instruction_set.csv\", instructions_colname:str = \"Instruction\") -> List[str]: \n \"\"\"\n Takes in a path to a csv file specifying the list of x86-64 instructions\n Given the column name from the csv file it then gets the set of elements in that column and converts them to lower case\n \"\"\"\n instruction_df = pd.read_csv(\"x86_instruction_set.csv\")\n cleaned_instructions = [i.split()[0].lower() for i in instruction_df[\"Instruction\"]]\n \n return list(set(cleaned_instructions))\n\n\n# In[117]:\n\n\n# NOTE \n# to remove the infinite look-behind the instruction prefix should be \n# r\"(?<=\"\n\n\n# In[135]:\n\n\ndef split_regex(instructions: List[str], \n register_regexp: str = r\"(?<=\\%\\w{2,5})(?=,)|(?<=\\s)(?=\\%\\w{2,5})|(?<=\\%\\w{2,5})(?=\\s)|(?<=\\%\\w{2,5})(?=\\))|(?<=\\()(?=\\%)|\\s+\", \n instruction_prefix: str = r\"(?<=(? List[str]: \n \"\"\"\n assembly string: a string of assembly that we wish to tokenize\n split_regexp: the regex string on which to split, ideally the output of split_regex\n filter_fun: function that returns true for the elements we want to keep and false for the ones we want to prune\n clean_regexp: regular expressions for us to remove comments or directives which we do not wish to keep in the \n \"\"\"\n clean_assembly = regex.sub(clean_regexp, \"\", assembly_string)\n clean_assembly = regex.sub(\"\\s+\", \" \", clean_assembly)\n first_split = regex.split(split_regexp, clean_assembly)\n filtered_first_split = list(filter(filter_fun, first_split))\n # split all non-register or instruction tokens \n result = []\n for token in filtered_first_split: \n if token not in instructions and regex.match(\"\\%\\w+\", token) == None: \n result.extend([c for c in token])\n else: \n result.append(token)\n return result\n \n \n\n\n# In[145]:\n\n\n# In[151]:\n\n\nif __name__ == \"__main__\": \n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', help = 'assembly file name to parse')\n parser.add_argument('-o', default = \"tokenized_assembly.txt\")\n args = parser.parse_args()\n with open(args.f, \"r\") as f: \n assembly = f.read()\n print(f\"assembly is {assembly}\")\n instructions = read_instructions()\n split_regexp = split_regex(instructions)\n tokenized_results = tokenize_assembly(assembly, split_regexp, instructions, filter_fun)\n from pprint import pprint\n print(\"tokenized assembly is: \\n\")\n pprint(tokenized_results)\n with open(args.o, \"w+\") as f: \n for tok in tokenized_results: \n f.write(f\"{tok}\\n\")\n print(\"You are the most awesome person ever, when machines take over the universe we will be kind to you\")\n\n","sub_path":"utils/Tokenizer.py","file_name":"Tokenizer.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"275357087","text":"import numpy as np\nimport random\nimport pandas as pd\nimport torch\nimport matplotlib.pyplot as plt\n\nTOTAL_DAYS = 397\nTEST_DAYS = 44\nSKIP_DAYS = 6\nSKIP_HOURS = 24 * SKIP_DAYS\nFILE_LIST = ['gen_by_fuel_type_20171001_20181101_processed.csv', \n 'gen_outage_20171001_20181101_processed.csv',\n 'hrl_load_prelim_20171001_20181101_processed.csv',\n 'lmp_data_20171001_20181101_processed.csv',\n 'load_frcstd_hist_20171001_20181101_processed.csv',\n 'total_lmp_data_20171001_20181101_processed.csv']\n\ndef make_batch(x_train, y_train, batch_size):\n i = random.randint(0, len(x_train) - batch_size)\n if ((i + batch_size) > len(x_train)):\n batch_xs = x_train[i: -1]\n batch_ys = y_train[i: -1]\n else:\n batch_xs = x_train[i: i + batch_size]\n batch_ys = y_train[i: i + batch_size]\n return [batch_xs, batch_ys]\n\ndef read_data(file_list):\n df = pd.DataFrame()\n df_fuel_type = pd.read_csv(file_list[0])\n df_outage = pd.read_csv(file_list[1])\n df_load_pre = pd.read_csv(file_list[2])\n df_comp_lmp = pd.read_csv(file_list[3])\n df_load_fore = pd.read_csv(file_list[4])\n df_total_lmp = pd.read_csv(file_list[5])\n\n y = df_total_lmp.iloc[SKIP_HOURS:, 1:].reset_index()\n y.to_csv('y_t.csv')\n\n temp_df = df_fuel_type.iloc[SKIP_HOURS:, 1:].reset_index()\n # df = df.append(df_fuel_type.iloc[SKIP_HOURS:, 1:])\n df = pd.concat([df, temp_df.iloc[:, 1:]], axis=1)\n \n df = df.reset_index()\n for i in range(0, 7):\n if(i == 0):\n temp_df = df_load_pre.iloc[SKIP_HOURS:, 1:].reset_index()\n df = pd.concat([df, temp_df.iloc[:, 1:]], axis=1)\n else:\n temp_df = df_load_pre.iloc[SKIP_HOURS-24*i:-24*i, 1:].reset_index()\n df = pd.concat([df, temp_df.iloc[:, 1:]], axis=1)\n # df['forecast_load_mw'] = df_load_fore[SKIP_HOURS:, 1] #测试集用\n\n for i in range(1, 7):\n temp_df = df_comp_lmp.iloc[SKIP_HOURS-24*i:-24*i, 2:5].reset_index()\n df = pd.concat([df, temp_df.iloc[:, 1:4]], axis=1)\n\n for i in range(1, 7):\n temp_df = df_total_lmp.iloc[SKIP_HOURS-24*i:-24*i, 1:].reset_index()\n df = pd.concat([df, temp_df.iloc[:, 1:4]], axis=1)\n \n temp_outage = pd.DataFrame()\n for i in range(SKIP_DAYS, TOTAL_DAYS):\n for j in range(0, 24):\n temp_outage = pd.concat([temp_outage, df_outage.iloc[i, 1:]], axis=0, ignore_index=True)\n\n temp_df = temp_outage.reset_index()\n\n df = pd.concat([df, temp_df.iloc[:, 1:]], axis=1)\n # print(df)\n df.to_csv('test.csv')\n\n\ndef divide_data(file_namex, file_namey):\n df_x = pd.read_csv(file_namex)\n df_y = pd.read_csv(file_namey)\n train_x = []\n train_y = []\n test_x = []\n test_y = []\n for i in range(0, len(df_x) - 24 * TEST_DAYS):\n train_x.append(list(df_x.ix[i]))\n train_y.append(list(df_y.ix[i]))\n\n for i in range(len(df_x) - 24 * TEST_DAYS, len(df_x)):\n test_x.append(list(df_x.ix[i]))\n test_y.append(list(df_y.ix[i]))\n\n return train_x, train_y, test_x, test_y\n\ndef cal_mape(y_test, y_fore):\n '''the y_test is a 2D array, the y_fore is a 1D array'''\n n = len(y_test)\n ape_sum = 0\n for i in range(n):\n ape_sum += abs(y_test[i][0] - y_fore[i]) / abs(y_test[i][0])\n return 100 * ape_sum / n\n\ndef get_results(y_train, y_fore_train, y_test, y_fore_test, end_time=0, start_time=0):\n print('time_cost in training is:', end_time - start_time)\n print('the error on train data is:', cal_mape(y_train, y_fore_train))\n print('the error on test data is:', cal_mape(y_test, y_fore_test))\n\n fig1 = plt.figure()\n plt.title('2018.10.18---2018.11.1 PJM NODE:5021072 DAY AHEAD LMP FORECASTING')\n l1 = plt.plot(y_test, marker='*', label='actual')\n l2 = plt.plot(y_fore_test, marker='o', label='forecast')\n plt.legend()\n plt.show()\n\ndef get_encoded(x_train, x_test):\n encoder_net = torch.load('net.pkl') # 提取训练好的encoder\n x_train_encoded = []\n x_test_encoded = []\n for each in x_train:\n x_train_encoded.append(encoder_net(torch.from_numpy(np.array(each)).float()).detach().numpy())\n for each in x_test:\n x_test_encoded.append(encoder_net(torch.from_numpy(np.array(each)).float()).detach().numpy())\n return x_train_encoded, x_test_encoded\n\nif __name__ == '__main__':\n read_data(FILE_LIST)\n # divide_data('x.csv', 'y.csv')","sub_path":"period2/32414845dailyahead/processed_data/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":4420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"361456551","text":"import sys\nimport time\nfrom datetime import datetime\n\nimport win32com.client as win32\n\nfrom get_calls_handled import get_calls_handled\nfrom get_pogo_sales import get_pogo_sales\n# from get_DEPP_sales import get_DEPP_sales\nfrom get_DEPP_sales1 import get_DEPP_sales\nfrom get_fcp_sales import get_fcp_sales\nfrom get_DEPP_sales_breakdown import get_DEPP_sales_breakdown\nfrom data_files import callsHandledReportLocation, pogoSalesReportLocation\nfrom data_files import fcpReportLocation, DEPPreportLocation\nfrom data_files import tableNames\nfrom data_files import jaelesiaTeam, tekTeam, antwonTeam, jacksonTeam\nfrom tableformat import topOfTable\nfrom tableformat import agentRowStart, agentRowEnd\nfrom tableformat import agentIDStart, agentIDEnd\nfrom tableformat import agentNameStart, agentNameEnd\nfrom tableformat import callsHandledStart, callsHandledEnd\nfrom tableformat import salesCallsHandledStart, salesCallsHandledEnd\nfrom tableformat import bounceSalesStart, bounceSalesEnd\nfrom tableformat import closeRateStartRed, closeRateStartYellow\nfrom tableformat import closeRateStartGreen, closeRateStartNoColor\nfrom tableformat import closeRateEnd, FCPSalesStart, FCPSalesEnd\nfrom tableformat import DEPPSalesStart, DEPPSalesEnd\nfrom tableformat import supRowStart, supRowEnd\nfrom tableformat import grandTotalRowStart, grandTotalRowEnd\nfrom tableformat import supIDStart, supNameStart, supCallsHandledStart\nfrom tableformat import supSalesCallsHandledStart\nfrom tableformat import supBounceSalesStart, supCloseRateStartRed\nfrom tableformat import supCloseRateStartYellow, supCloseRateStartGreen\nfrom tableformat import supCloseRateStartNoColor\nfrom tableformat import supFCPSalesStart, supDEPPSalesStart\nfrom tableformat import gTotalIDStart, gTotalNameStart, gTotalCallsHandledStart\nfrom tableformat import gTotalSalesCallsHandledStart\nfrom tableformat import gTotalBounceSalesStart, gTotalCloseRateStartRed\nfrom tableformat import gTotalCloseRateStartYellow, gTotalCloseRateStartGreen\nfrom tableformat import gTotalFCPSalesStart, gTotalDEPPSalesStart\n\narguments = []\n\nfor arg in sys.argv:\n arguments.append(arg)\narguments = arguments[1:]\n\ntry:\n int(arguments[0])\n reportDate = arguments[0]\nexcept:\n reportDate = ''\n\n# Cell Background and Font Styles (to be used to conditionally format cells)\nbelow_goal_text = \"9C0006\"\nbelow_goal_bg = \"FFC7CE\"\nclose_to_goal_text = \"9C6500\"\nclose_to_goal_bg = \"FFEB9C\"\nat_or_above_goal_text = \"006100\"\nat_or_above_goal_bg = \"C6EFCE\"\n\n(jaelesiaTotalCallsHandled, tekTotalCallsHandled, antwonTotalCallsHandled, jacksonTotalCallsHandled,\n totalCallsHandled) = 0, 0, 0, 0, 0\n(jaelesiaSalesCallsHandled, tekSalesCallsHandled, antwonSalesCallsHandled, jacksonSalesCallsHandled,\n totalSalesCallsHandled) = 0, 0, 0, 0, 0\n(jaelesiaTotalSales, tekTotalSales, antwonTotalSales, jacksonTotalSales,\n totalSales) = 0, 0, 0, 0, 0\n(jaelesiaFCPsales, tekFCPsales, antwonFCPsales, jacksonFCPsales, totalFCPSales) = 0, 0, 0, 0, 0\n(jaelesiaDEPPsales, tekDEPPsales, antwonDEPPsales, jacksonDEPPsales,\n totalDEPPsales) = 0, 0, 0, 0, 0\n\nsupervisorIDs = {\"aervin\": 2062007, \"jnickerson\": 2062001, \"tlevon\": 2062007,\n \"jacksonn\": 2062047, \"jabram\": 2062017,\n \"iqr_acollins\": 2062072, \"jmoore\": 206223, \"mayala\": 2062002}\n\nhtml = topOfTable\n\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\n# Get the calls handled for each agent\n# The format returned is a 2 dimensional array with each agent and their calls\n# represented as:\n# [agent ID, Calls Handled, Sales Calls Handled] in the return array\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\ncalls_handled = get_calls_handled(callsHandledReportLocation(reportDate))\n\n# Sum up the calls handled for each supervisor and for the whole of iQor\nfor item in calls_handled:\n agentID = item[0]\n if agentID in jaelesiaTeam:\n jaelesiaTotalCallsHandled += item[1]\n jaelesiaSalesCallsHandled += item[2]\n totalCallsHandled += item[1]\n totalSalesCallsHandled += item[2]\n if agentID in tekTeam:\n tekTotalCallsHandled += item[1]\n tekSalesCallsHandled += item[2]\n totalCallsHandled += item[1]\n totalSalesCallsHandled += item[2]\n if agentID in antwonTeam:\n antwonTotalCallsHandled += item[1]\n antwonSalesCallsHandled += item[2]\n totalCallsHandled += item[1]\n totalSalesCallsHandled += item[2]\n if agentID in jacksonTeam:\n jacksonTotalCallsHandled += item[1]\n jacksonSalesCallsHandled += item[2]\n totalCallsHandled += item[1]\n totalSalesCallsHandled += item[2]\n\n\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\n# Gather up all the orders from the big bounce sales report\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\npogo_sales = get_pogo_sales(pogoSalesReportLocation(reportDate))\n\n# Team leads also submit POGO orders with text POGO ID rather than numeric ID\n# Replace the team lead text POGO agent IDs with the numeric AVAYA IDs\nfor id in pogo_sales:\n if (type(id) == str):\n try:\n pogo_sales[pogo_sales.index(id)] = supervisorIDs[id]\n except:\n pass\n\n# Sum up the POGO sales for each supervisor and for the whole of iQor\nfor agentID in pogo_sales:\n if agentID in jaelesiaTeam:\n jaelesiaTotalSales += 1\n totalSales += 1\n if agentID in tekTeam:\n tekTotalSales += 1\n totalSales += 1\n if agentID in antwonTeam:\n antwonTotalSales += 1\n totalSales += 1\n if agentID in jacksonTeam:\n jacksonTotalSales += 1\n totalSales += 1\n\n\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\n# Gather up the FCP sales from the FCP report\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\nfcp_sales = get_fcp_sales(fcpReportLocation(reportDate), reportDate)\n\n# Sum up the fcp sales for each supervisor and for the whole of iQor\nfor agentID in fcp_sales:\n if agentID in jaelesiaTeam:\n jaelesiaFCPsales += 1\n totalFCPSales += 1\n if agentID in tekTeam:\n tekFCPsales += 1\n totalFCPSales += 1\n if agentID in antwonTeam:\n antwonFCPsales += 1\n totalFCPSales += 1\n if agentID in jacksonTeam:\n jacksonFCPsales += 1\n totalFCPSales += 1\n\n\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\n# Gather up the DEPP sales from the Products report\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\nDEPP_sales_all = get_DEPP_sales(DEPPreportLocation(reportDate))\n# print(DEPP_sales)\n\n# remove any duplicates - there is probably a better way to do this!\nDUPs_removed = []\nfor DEPP in DEPP_sales_all:\n if DEPP not in DUPs_removed:\n DUPs_removed.append(DEPP)\n\nDEPP_sales_all = DUPs_removed\n\nDEPP_sales = []\n\nfor sale in DEPP_sales_all:\n DEPP_sales.append(sale[0])\n\n# print(DEPP_sales)\n\nfor id in DEPP_sales:\n if (type(id) == str):\n try:\n DEPP_sales[DEPP_sales.index(id)] = supervisorIDs[id]\n except:\n pass\n\n# Sum up the DEPP sales for each supervisor and for the whole of iQor\nfor agentID in DEPP_sales:\n if agentID in jaelesiaTeam:\n jaelesiaDEPPsales += 1\n totalDEPPsales += 1\n if agentID in tekTeam:\n tekDEPPsales += 1\n totalDEPPsales += 1\n if agentID in antwonTeam:\n antwonDEPPsales += 1\n totalDEPPsales += 1\n if agentID in jacksonTeam:\n jacksonDEPPsales += 1\n totalDEPPsales += 1\n\n\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\n# Run through each entry in the tableNames and build the HTML string to be\n# attached to the body of the email\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\nfor agentRow in tableNames:\n agentID = agentRow[0]\n agentName = agentRow[1]\n callsHandled = \"\"\n salesCallsHandled = \"\"\n bounceSales = \"\"\n closeRate = \"\"\n FCPSales = \"\"\n DEPPSales = \"\"\n closeRateStart = closeRateStartNoColor\n supCloseRateStart = supCloseRateStartNoColor\n\n # This is executed if it is an agent and not a supervisor\n if (type(agentID) == int): # only agent have numeric IDs\n\n # Get the agent calls handled and sales calls handled\n for item in calls_handled: # check each nested list\n if (agentID == item[0]):\n callsHandledInteger = item[1]\n salesCallsHandledInteger = item[2]\n if callsHandledInteger > 0:\n callsHandled = str(int(item[1]))\n salesCallsHandled = str(int(item[2]))\n\n # Get the agent Bounce Sales\n if (callsHandled is not \"\"):\n bounceSales = str(pogo_sales.count(agentID))\n bounceSalesInteger = pogo_sales.count(agentID)\n\n # Get the agent FCP Sales\n if (callsHandled is not \"\"):\n FCPSales = str(fcp_sales.count(agentID))\n FCPSalesInteger = fcp_sales.count(agentID)\n\n # Get the agent Close Rate\n if (salesCallsHandled is not \"\"):\n if (int(salesCallsHandled) > 0):\n closeRate = ((bounceSalesInteger + FCPSalesInteger) /\n salesCallsHandledInteger * 100.00)\n closeRate = int(round(closeRate, 0))\n if closeRate > 49:\n closeRateStart = closeRateStartGreen\n elif closeRate > 39:\n closeRateStart = closeRateStartYellow\n else:\n closeRateStart = closeRateStartRed\n closeRate = str(closeRate) + \"%\"\n\n # Get the agent DEPP Sales\n if (callsHandled is not \"\"):\n DEPPSales = str(DEPP_sales.count(agentID))\n\n # Add the HTML string for the agent row\n agentID = str(agentID)\n html += (agentRowStart\n + agentIDStart + agentID + agentIDEnd\n + agentNameStart + agentName + agentNameEnd\n + callsHandledStart + callsHandled + callsHandledEnd\n + salesCallsHandledStart + salesCallsHandled\n + salesCallsHandledEnd\n + bounceSalesStart + bounceSales + bounceSalesEnd\n + closeRateStart + closeRate + closeRateEnd\n + FCPSalesStart + FCPSales + FCPSalesEnd\n + DEPPSalesStart + DEPPSales + DEPPSalesEnd\n + agentRowEnd)\n\n # This is executed if it is a supervisor\n if (agentID == 'jaelesia' or agentID == 'tek' or\n agentID == 'antwon' or agentID == 'jackson'):\n\n if (agentID == 'jaelesia'):\n callsHandled = str(int(jaelesiaTotalCallsHandled)\n ) if jaelesiaTotalCallsHandled else \"\"\n salesCallsHandled = str(int(jaelesiaSalesCallsHandled)\n ) if jaelesiaTotalCallsHandled else \"\"\n bounceSales = (str(jaelesiaTotalSales)\n if jaelesiaSalesCallsHandled >= 0 else \"\")\n FCPSales = (str(jaelesiaFCPsales)\n if jaelesiaSalesCallsHandled >= 0 else \"\")\n DEPPSales = (str(jaelesiaDEPPsales)\n if jaelesiaTotalCallsHandled >= 0 else \"\")\n\n elif (agentID == 'tek'):\n callsHandled = (str(int(tekTotalCallsHandled))\n if tekTotalCallsHandled else \"\")\n salesCallsHandled = (str(int(tekSalesCallsHandled))\n if tekTotalCallsHandled else \"\")\n bounceSales = str(tekTotalSales) if tekSalesCallsHandled >= 0 else \"\"\n FCPSales = str(tekFCPsales) if tekSalesCallsHandled >= 0 else \"\"\n DEPPSales = str(tekDEPPsales) if tekTotalCallsHandled >= 0 else \"\"\n\n elif (agentID == 'antwon'):\n callsHandled = (str(int(antwonTotalCallsHandled))\n if antwonTotalCallsHandled else \"\")\n salesCallsHandled = (str(int(antwonSalesCallsHandled))\n if antwonTotalCallsHandled else \"\")\n bounceSales = (str(antwonTotalSales)\n if (antwonSalesCallsHandled >= 0\n and antwonTotalSales >= 0) else \"\")\n FCPSales = (str(antwonFCPsales)\n if (antwonSalesCallsHandled >= 0\n and antwonTotalSales >= 0) else \"\")\n DEPPSales = (str(antwonDEPPsales)\n if (antwonSalesCallsHandled >= 0\n and antwonTotalSales >= 0) else \"\")\n\n elif (agentID == 'jackson'):\n callsHandled = (str(int(jacksonTotalCallsHandled))\n if jacksonTotalCallsHandled else \"\")\n salesCallsHandled = (str(int(jacksonSalesCallsHandled))\n if jacksonTotalCallsHandled else \"\")\n bounceSales = (str(jacksonTotalSales)\n if (jacksonSalesCallsHandled >= 0\n and jacksonTotalSales >= 0) else \"\")\n FCPSales = (str(jacksonFCPsales)\n if (jacksonSalesCallsHandled >= 0\n and jacksonTotalSales >= 0) else \"\")\n DEPPSales = (str(jacksonDEPPsales)\n if (jacksonSalesCallsHandled >= 0\n and jacksonTotalSales >= 0) else \"\")\n\n # Calculate Jaelesia's close rate and the colors for her cells\n if (agentID == 'jaelesia'):\n if (jaelesiaSalesCallsHandled is not \"\"):\n if (int(jaelesiaSalesCallsHandled) > 0):\n closeRate = ((jaelesiaTotalSales + jaelesiaFCPsales) /\n jaelesiaSalesCallsHandled * 100.00)\n closeRate = int(round(closeRate, 0))\n if closeRate >= 50:\n supCloseRateStart = supCloseRateStartGreen\n elif closeRate >= 40:\n supCloseRateStart = supCloseRateStartYellow\n else:\n supCloseRateStart = supCloseRateStartRed\n closeRate = str(closeRate) + \"%\"\n\n # Calculate Tek's close rate and the colors for his cells\n if (agentID == 'tek'):\n if (tekSalesCallsHandled is not \"\"):\n if (int(tekSalesCallsHandled) > 0):\n closeRate = ((tekTotalSales + tekFCPsales) /\n tekSalesCallsHandled * 100.00)\n closeRate = int(round(closeRate, 0))\n if closeRate >= 50:\n supCloseRateStart = supCloseRateStartGreen\n elif closeRate >= 40:\n supCloseRateStart = supCloseRateStartYellow\n else:\n supCloseRateStart = supCloseRateStartRed\n closeRate = str(closeRate) + \"%\"\n\n # Calculate Antwon's close rate and the colors for his cells\n if (agentID == 'antwon'):\n if (antwonSalesCallsHandled is not \"\"):\n if (int(antwonSalesCallsHandled) > 0):\n closeRate = ((antwonTotalSales + antwonFCPsales) /\n antwonSalesCallsHandled * 100.00)\n closeRate = int(round(closeRate, 0))\n if closeRate >= 50:\n supCloseRateStart = supCloseRateStartGreen\n elif closeRate >= 40:\n supCloseRateStart = supCloseRateStartYellow\n else:\n supCloseRateStart = supCloseRateStartRed\n closeRate = str(closeRate) + \"%\"\n\n if (agentID == 'jackson'):\n if (jacksonSalesCallsHandled is not \"\"):\n if (int(jacksonSalesCallsHandled) > 0):\n closeRate = ((jacksonTotalSales + jacksonFCPsales) /\n jacksonSalesCallsHandled * 100.00)\n closeRate = int(round(closeRate, 0))\n if closeRate >= 50:\n supCloseRateStart = supCloseRateStartGreen\n elif closeRate >= 40:\n supCloseRateStart = supCloseRateStartYellow\n else:\n supCloseRateStart = supCloseRateStartRed\n closeRate = str(closeRate) + \"%\"\n\n # Add the HTMl string for the supervisor\n agentID = \" \"\n html += (supRowStart\n + supIDStart + agentID + agentIDEnd\n + supNameStart + agentName + agentNameEnd\n + supCallsHandledStart + callsHandled + callsHandledEnd\n + supSalesCallsHandledStart + salesCallsHandled\n + salesCallsHandledEnd\n + supBounceSalesStart + bounceSales + bounceSalesEnd\n + supCloseRateStart + closeRate + closeRateEnd\n + supFCPSalesStart + FCPSales + FCPSalesEnd\n + supDEPPSalesStart + DEPPSales + DEPPSalesEnd\n + supRowEnd)\n\n # This is executed if it is grand Total\n if agentID == 'grandTotal':\n callsHandled = str(int(totalCallsHandled))\n salesCallsHandled = str(int(totalSalesCallsHandled))\n bounceSales = str(totalSales)\n FCPSales = str(totalFCPSales)\n DEPPSales = str(totalDEPPsales)\n\n if (totalSalesCallsHandled is not \"\"):\n if (int(totalSalesCallsHandled) > 0):\n closeRate = ((totalSales + totalFCPSales) /\n totalSalesCallsHandled * 100.00)\n closeRate = int(round(closeRate, 0))\n if closeRate >= 50:\n supCloseRateStart = gTotalCloseRateStartGreen\n elif closeRate >= 40:\n supCloseRateStart = gTotalCloseRateStartYellow\n else:\n supCloseRateStart = gTotalCloseRateStartRed\n closeRate = str(closeRate) + \"%\"\n\n # Add the HTML string for the Grand Total\n agentID = \" \"\n html += (grandTotalRowStart\n + gTotalIDStart + agentID + agentIDEnd\n + gTotalNameStart + agentName + agentNameEnd\n + gTotalCallsHandledStart + callsHandled + callsHandledEnd\n + gTotalSalesCallsHandledStart + salesCallsHandled\n + salesCallsHandledEnd\n + gTotalBounceSalesStart + bounceSales + bounceSalesEnd\n + supCloseRateStart + closeRate + closeRateEnd\n + gTotalFCPSalesStart + FCPSales + FCPSalesEnd\n + gTotalDEPPSalesStart + DEPPSales + DEPPSalesEnd\n + grandTotalRowEnd)\n\n# ------------------------------------------------------------------------------\n# send email\n# ------------------------------------------------------------------------------\noutlook = win32.Dispatch('outlook.application')\nmail = outlook.CreateItem(0)\n\ncurrentDate = datetime.now().strftime(\"%m-%d-%y\")\ncurrentTime = time.strftime(\"%#I:%M %p\")\n\ntry:\n int(arguments[0])\n reportDate = arguments[0]\n reportDate = reportDate[0:2] + '-' + reportDate[2:4] + '-' + reportDate[6:]\n subject = 'iQor Sales Report ' + reportDate + ' End of Business'\n additionalEmailList = \"; \".join(arguments[1:])\n\nexcept:\n reportDate = ''\n subject = 'iQor Sales Update ' + currentDate + ' ' + currentTime\n additionalEmailList = \"; \".join(arguments[0:])\n\nmail.To = additionalEmailList + '; jackson.ndiho@iqor.com'\nmail.Subject = subject\nmail.HtmlBody = subject + \":\" + html\nmail.send\n\nprint(\"\\niQor Sales email sent to: \" + additionalEmailList\n + \"; jackson.ndiho@iqor.com \\nat \" + currentDate + \" \" + currentTime \n + \"\\n\\nDone.......\")\n","sub_path":"salesemail.py","file_name":"salesemail.py","file_ext":"py","file_size_in_byte":20671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"372677671","text":"from datetime import datetime, timedelta\nimport mysql\nfrom mysql.connector import errorcode\nfrom decimal import Decimal\n\nclass mean_price:\n\n\n def __init__(self, currency, currency_name):\n self.currency = currency\n self.currency_name = currency_name\n # mean interval in seconds\n self.interval = 60*60\n\n def get_mean_prices(self, end_datetime, sql_obj):\n\n delta = timedelta(days=0, seconds=self.interval)\n\n self.end_datetime = end_datetime\n self.start_datetime = self.end_datetime-delta\n\n # same datetime per CMC api means same price value\n query = (\n \"SELECT DISTINCT(datetime), price_usd FROM prices WHERE currency = %s \"\n \"AND datetime >= %s\"\n \"AND datetime <= %s\")\n\n try:\n sql_obj.execute(query,\n (self.currency, self.start_datetime, self.end_datetime)\n )\n results = sql_obj.cur.fetchall()\n\n except mysql.connector.Error as err:\n raise\n\n else:\n if(len(results)>0):\n self.mean_usd = sum([Decimal(x[1]) for x in results]) / Decimal(len(results))\n return True\n else:\n raise Exception(\"Not enough results from API to calculate %s-min mean\" % (int(self.interval)/60))\n\n\n def save(self, sql_obj):\n\n query = (\n \"INSERT INTO mean_prices (currency, `interval`, start_datetime, end_datetime, mean_usd) \"\n \"VALUES (%s, %s, %s, %s, %s)\")\n\n\n try:\n sql_obj.execute(query, (self.currency, self.interval, self.start_datetime, self.end_datetime, self.mean_usd))\n sql_obj.cnx.commit()\n\n except mysql.connector.Error as err:\n raise\n\n else:\n return True\n","sub_path":"modules/mean_prices.py","file_name":"mean_prices.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"221244024","text":"#!/usr/bin/env python3\n\"\"\"Mural. Kickstart 2019 practice round. OMG OMG!\n\nAlvaro Leal , 2019\n\"\"\"\nfrom math import ceil\n# import pytest\nfrom datetime import datetime\nimport sys\n\n# def get_max_subarray_sum(mural_size, beauty_values):\n# subarray_length = ceil(mural_size / 2)\n# max_subarray_sum = 0\n# for i in range(0, mural_size - subarray_length + 1):\n# subarray_sum = sum(beauty_values[i:i + subarray_length], 0)\n# if subarray_sum > max_subarray_sum:\n# max_subarray_sum = subarray_sum\n# return max_subarray_sum\n\n\ndef get_max_subarray_sum(mural_size, beauty_values):\n subarray_length = ceil(mural_size / 2)\n subarray_sum = sum(beauty_values[0:subarray_length], 0)\n max_subarray_sum = subarray_sum\n\n for i in range(1, mural_size - subarray_length + 1):\n subarray_sum -= beauty_values[i - 1]\n subarray_sum += beauty_values[i + subarray_length - 1]\n if subarray_sum > max_subarray_sum:\n max_subarray_sum = subarray_sum\n return max_subarray_sum\n\n\ndef process_google_testcases(input_file, output_file):\n ret = []\n lines = []\n\n with open(input_file, 'r') as fp:\n lines = fp.readlines()\n\n line = 1\n test_case = 1\n while line < len(lines):\n mural_size = int(lines[line])\n beauty_values = [int(n) for n in lines[line + 1][:-1]]\n max_mural = get_max_subarray_sum(mural_size, beauty_values)\n ret.append(\"Case #{0}: {1}\\n\".format(test_case, max_mural))\n line += 2\n test_case += 1\n\n with open(output_file, 'w') as fp:\n fp.writelines(ret)\n\n\ndef main():\n if len(sys.argv) == 1:\n test_cases = int(input())\n for test_case in range(0, test_cases):\n mural_size = int(input())\n beauty_values = [int(c) for c in input()]\n print(\"Case #{0}: {1}\"\n .format(test_case + 1,\n get_max_subarray_sum(mural_size, beauty_values)))\n\n else:\n if sys.argv[1] == \"test\":\n input_file = sys.argv[2]\n output_file = sys.argv[3]\n\n start_time = datetime.now()\n process_google_testcases(input_file, output_file)\n end_time = datetime.now()\n print('Duration: {}'.format(end_time - start_time))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"2019/practice_round/mural/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"241433637","text":"\"\"\"\nWrite a program which accepts a sequence of comma separated 4 digit binary numbers\nas its input and then check whether they are divisible by 5 or not.\nThe numbers that are divisible by 5 are to be printed in a comma separated sequence.\nExample:\n0100,0011,1010,1001\nThen the output should be:\n1010\n\"\"\"\n\n\ndef binary_to_decimal(bi):\n n1 = int(bi[0])\n n2 = int(bi[1])\n n3 = int(bi[2])\n n4 = int(bi[3])\n num = (n1 * 8) + (n2 * 4) + (n3 * 2) + (n4 * 1)\n return num\n\n\ndef binary_numbers():\n print(\"Please enter comma seperated binary numbers\")\n s = input()\n i = 0 # loop variable\n noc = s.count(\",\") # number of commas in original input\n noc += 1\n while i <= noc:\n comma = s.find(\",\") # position of next comma\n if comma == -1:\n break\n b = s[0: (comma+1)] # binary number extracted\n d = binary_to_decimal(b)\n if d % 5 == 0:\n print(b.replace(\",\", \"\"))\n s = s.replace(b, \"\")\n i += 1\n\n\nbinary_numbers()\n","sub_path":"Python/Tasks/binary_numbers.py","file_name":"binary_numbers.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"524376643","text":"\"\"\"\n (c) 2020 Copyright, Real-Time Innovations, Inc. All rights reserved.\n RTI grants Licensee a license to use, modify, compile, and create derivative\n works of the Software. Licensee has the right to distribute object form only\n for use with RTI products. The Software is provided \"as is\", with no warranty\n of any type, including any warranty for fitness for any purpose. RTI is under\n no obligation to maintain or support the Software. RTI shall not be liable for\n any incidental or consequential damages arising out of the use or inability to\n use the software.\n \"\"\"\n\nimport rti.connextdds as dds\nimport time\nimport argparse\nimport textwrap\n\n\n# Process incoming data in a listener, print out each sample\nclass CftListener(dds.DynamicData.NoOpDataReaderListener):\n def on_data_available(self, reader):\n with reader.take() as samples:\n for sample in filter(lambda s: s.info.valid, samples):\n print(sample.data)\n\n\ndef subscriber_main(domain_id, sample_count, is_cft):\n participant = dds.DomainParticipant(domain_id)\n\n cft_type = dds.QosProvider(\"cft.xml\").type(\"cft_lib\", \"cft\")\n topic = dds.DynamicData.Topic(participant, \"Example cft\", cft_type)\n\n if is_cft:\n # Create a CFT that filters for incoming data within a range\n topic = dds.DynamicData.ContentFilteredTopic(\n topic, \"ContentFilteredTopic\", dds.Filter(\"x >= %0 AND x <= %1\", [\"1\", \"4\"])\n )\n print(\n textwrap.dedent(\n \"\"\"\n ==========================\n Using CFT\n Filter: 1 <= x <= 4\n ==========================\"\"\"\n )\n )\n else:\n # Filtering disabled by default\n print(\n textwrap.dedent(\n \"\"\"\n ==========================\n Using Normal Topic\n ==========================\"\"\"\n )\n )\n\n reader_qos = dds.QosProvider.default.datareader_qos\n reader = dds.DynamicData.DataReader(dds.Subscriber(participant), topic, reader_qos)\n reader.bind_listener(CftListener(), dds.StatusMask.DATA_AVAILABLE)\n\n count = 0\n while (sample_count == 0) or (count < sample_count):\n time.sleep(1)\n\n if is_cft:\n if count == 10:\n # After 10 seconds, udpdate the filter range\n print(\n textwrap.dedent(\n \"\"\"\n ==========================\n Changing Filter Parameters\n Filter: 5 <= x <= 9\n ==========================\"\"\"\n )\n )\n topic.filter_parameters = [\"5\", \"9\"]\n if count == 20:\n # After 20 seconds, update the filter again\n print(\n textwrap.dedent(\n \"\"\"\n ==========================\n Changing Filter Parameters\n Filter: 3 <= x <= 9\n ==========================\"\"\"\n )\n )\n topic.filter_parameters = [\"3\", \"9\"]\n count += 1\n\n\nparser = argparse.ArgumentParser(\n description=\"RTI Connext DDS Example: Using CFTs (Subscriber)\"\n)\nparser.add_argument(\"-d\", \"--domain\", type=int, default=0, help=\"DDS Domain ID\")\nparser.add_argument(\n \"-c\", \"--count\", type=int, default=0, help=\"Number of samples to send\"\n)\nparser.add_argument(\n \"-f\", \"--filter\", action=\"store_true\", default=False, help=\"Use a CFT\"\n)\n\nargs = parser.parse_args()\nassert 0 <= args.domain < 233\nassert args.count >= 0\n\nsubscriber_main(args.domain, args.count, args.filter)\n","sub_path":"examples/content_filtered_topic/cft_subscriber.py","file_name":"cft_subscriber.py","file_ext":"py","file_size_in_byte":3684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"597247287","text":"# -*- coding: UTF-8 -*- \n'''\nAuthorized by Vlon Jang\nCreated on 2017-03-18\nBlog: www.wangqingbaidu.cn\nEmail: wangqingbaidu@gmail.com\nFrom kwai, www.kuaishou.com\n©2015-2017 All Rights Reserved.\n'''\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os,re\nimport argparse\nfrom utils import Common\nimport json\n\nparser = argparse.ArgumentParser(description='Validate results.')\n\n#\"format: filename label1 label2 ...\"\nparser.add_argument('-predict', default='../cache/dictionary/multi_lda/multi_predict',\n help='Predictions path')\n#\"format: filename label1 label2 ...\"\nparser.add_argument('-true', default='../cache/dictionary/multi_lda/multi_valid.txt', \n help='True path')\nparser.add_argument('-atN', default=10, type=int, help='MAP@N')\nargs = parser.parse_args()\n\nhp = [12,124,86,123,135,29,62,7,8,125,25,18,89,83,31,22,110,14,5,81,53,13,\n 113,108,58,128,69,112,39,6,114,9,10,60,93,116,3,99,104,4,121,82,105,101,2,103,46,90]\n\ndef get_similar_set(category_path = '../cache/dictionary/multi_lda/multi_category',\n vocab_path = '../cache/dictionary/multi_lda/multi_vocab',\n firstN=None):\n similar_set = {}\n category = {}\n with open(vocab_path) as f:\n vocab = dict([(line.strip(), str(idx)) for idx, line in enumerate(f.readlines())])\n \n with open(category_path) as f:\n for line in f.readlines():\n items = line.strip().split()\n category_key = '_'.join(items[1:][:firstN])\n \n if vocab.has_key(items[0]):\n if category.has_key(category_key):\n category[category_key].append(vocab[items[0]])\n else:\n category[category_key] = [vocab[items[0]]]\n \n for k in category:\n for l in category[k]:\n similar_set[l] = category[k]\n \n return similar_set\n\nclass EvaluateResults(object):\n def __init__(self, predict_results = None, true_results = None, \n topK=10, atN=10,\n similar_set = None,\n WARNING=False):\n assert topK >= atN, 'topK %d must larger than atN %d' %(topK, atN)\n self.predict_results = predict_results\n self.true_results = true_results\n self.atN = atN\n self.similar_set = similar_set\n self.WARNING = WARNING\n self.invalidate_OP_OR = True\n self.invalidate_CP_CR = True\n self.invalidate_MAP = True\n self.no_mixed = {}\n \n self.evaluate()\n \n def __call__(self, predict_results = None, true_results = None, \n topK=10, atN=10):\n assert topK >= atN, 'topK %d must larger than atN %d' %(topK, atN)\n self.predict_results = predict_results\n self.true_results = true_results\n self.atN = atN\n self.invalidate_OP_OR = True\n self.invalidate_CP_CR = True\n self.invalidate_MAP = True\n self.no_mixed = {}\n \n self.evaluate()\n \n def __str__(self):\n print ('Total predictions %d out of %d' %(self.num_has_prediction, len(self.targets)))\n return \"C-P%%\\t|\\tC-R%%\\t|\\tC-F1\\t|\\tO-P%%\\t|\\tO-R%%\\t|\\tO-F1\\t|\\tMAP@%d%%\\n\"\\\n \"%.2f\\t|\\t%.2f\\t|\\t%.2f\\t|\\t%.2f\\t|\\t%.2f\\t|\\t%.2f\\t|\\t%.2f\"\\\n %(self.atN, self.CP, self.CR, self.CF1, self.OP, self.OR, self.OF1, self.MAP)\n \n def evaluate(self, psp=' |\\t|:', tsp=' |\\t|:'):\n assert os.path.exists(self.predict_results), 'Predictions file %s not exits' %self.predict_results\n assert os.path.exists(self.true_results), 'Predictions file %s not exits' %self.true_results\n \n self.predictions = predictions = {}\n self.targets = targets = {}\n \n with open(self.predict_results, 'r') as f:\n lines = f.readlines()\n for l in lines:\n items = re.split(psp, l.strip())\n items = l.strip().split()\n assert not predictions.has_key(items[0]), \\\n \"Sample %s can't have more than one prediction\" %items[0]\n predictions[items[0]] = items[1:]\n predictions[items[0]] = [i.split(':')[0] for i in items[1:]]\n \n with open(self.true_results, 'r') as f:\n lines = f.readlines()\n for l in lines:\n items = re.split(tsp, l.strip())\n assert not targets.has_key(items[0]), \\\n \"Sample %s can't have more than one true target\" %items[0]\n targets[items[0]] = items[1:]\n# tl = [i for i in items[1:] if int(i) in [90,46,103,2,101]]\n# if tl:\n# targets[items[0]] = tl\n \n self.num_has_prediction = 0\n for k in targets.keys():\n if predictions.has_key(k):\n self.num_has_prediction += 1\n \n self.calc_CP_CR()\n self.calc_OP_OR()\n self.calc_MAP(atN=self.atN)\n \n self.CP\n self.CR\n \n def calc_CP_CR(self, predictions = None, targets = None, print_info = False):\n if not (predictions and targets):\n predictions = self.predictions\n targets = self.targets\n self.invalidate_CP_CR = False\n else:\n self.invalidate_CP_CR = True\n \n num_has_prediction = 0\n \n # predict this : This sample is predicted to be l\n # true this : This sample target is l\n # hit this : This sample is predicted to be true of l\n self.per_class = {}\n \n for k in targets:\n if predictions.has_key(k):\n num_has_prediction += 1\n \n for l in targets[k]:\n if self.per_class.has_key(l):\n self.per_class[l]['true_this'] += 1\n else:\n self.per_class[l] = {}\n self.per_class[l]['true_this'] = 1\n self.per_class[l]['hit_this'] = 0\n self.per_class[l]['predict_this'] = 0\n \n if l in predictions[k]:\n self.per_class[l]['hit_this'] += 1\n \n for l in predictions[k]:\n if self.per_class.has_key(l):\n self.per_class[l]['predict_this'] += 1\n else:\n self.per_class[l] = {}\n self.per_class[l]['true_this'] = 0\n self.per_class[l]['hit_this'] = 0\n self.per_class[l]['predict_this'] = 1\n \n if print_info:\n print ('%d/%d\\tOP:%.5f\\tOR:%.5f' %(num_has_prediction, len(targets), self.CP, self.CR))\n \n def calc_OP_OR(self, predictions = None, targets = None, print_info = False):\n if not (predictions and targets):\n predictions = self.predictions\n targets = self.targets\n self.invalidate_OP_OR = False\n else:\n self.invalidate_OP_OR = True\n \n num_has_prediction = 0\n self._OP = []\n self._OR = []\n \n for k in targets:\n if predictions.has_key(k):\n num_has_prediction += 1\n predict_similar = predictions[k]\n target_similar = targets[k]\n if self.similar_set:\n predict_similar = []\n target_similar = []\n for s in predictions[k]:\n predict_similar += self.similar_set[s]\n for s in targets[k]:\n target_similar += self.similar_set[s]\n\n self._OP.append( len(set(predict_similar) & set(target_similar)) / len(predict_similar))\n self._OR.append( len(set(predict_similar) & set(target_similar)) / len(target_similar))\n if (len(set(predict_similar) & set(target_similar)) == 0):\n self.no_mixed[k] = {'p':predictions[k], 't':targets[k]}\n \n if print_info:\n print ('%d/%d\\tOP:%.5f\\tOR:%.5f' %(num_has_prediction, len(targets), self.OP, self.OR))\n \n def calc_MAP(self, atN = None, predictions = None, targets = None, print_info = False):\n assert type(atN) == int, 'Type of atN must be integer. Now %s' %type(atN)\n if not (predictions and targets and atN):\n predictions = self.predictions\n targets = self.targets\n atN = self.atN\n self.invalidate_MAP = False\n else:\n self.invalidate_MAP = True\n \n self._AP = []\n num_has_prediction = 0\n \n for k in targets:\n if predictions.has_key(k):\n num_has_prediction += 1\n hits = 0\n ap = 0\n assert len(predictions[k]) >= atN, \\\n 'Prediction length of %s is %d smaller than %s' %(k, len(predictions[k]), atN)\n for i in range(atN):\n if predictions[k][i] in targets[k]:\n hits += 1\n ap += hits / (i+1)\n self._AP.append(ap / len(targets[k]))\n \n if print_info:\n print ('%d/%d\\tMAP@%d:%.5f' %(num_has_prediction, len(targets), atN, self.MAP))\n \n \n @property\n def CP(self):\n if self.invalidate_CP_CR:\n self.calc_CP_CR()\n \n _CP = []\n self.CP_per_class = {}\n for c in self.per_class:\n if self.per_class[c]['predict_this']:\n p = self.per_class[c]['hit_this'] / self.per_class[c]['predict_this']\n _CP.append(p)\n self.CP_per_class[c] = p\n else:\n if self.WARNING:\n print ('WARNING: Number of samples predicted to be Class %s is zero' %str(c))\n self.CP_per_class = sorted(self.CP_per_class.items(), key=lambda x: x[1])\n return sum(_CP) / len(_CP) * 100\n \n @property\n def CR(self):\n if self.invalidate_CP_CR:\n self.calc_CP_CR()\n \n _CR = []\n self.CR_per_class = {}\n for c in self.per_class:\n if self.per_class[c]['true_this']:\n r = self.per_class[c]['hit_this'] / self.per_class[c]['true_this']\n _CR.append(r)\n self.CR_per_class[c] = r\n else:\n if self.WARNING:\n print ('WARNING: Number of samples whose true class %s is zero' %str(c))\n self.CR_per_class = sorted(self.CR_per_class.items(), key=lambda x: x[1])\n return sum(_CR) / len(_CR) * 100\n \n @property\n def OP(self):\n if self.invalidate_OP_OR:\n self.calc_OP_OR()\n return sum(self._OP) / len(self._OP) * 100\n \n @property\n def OR(self):\n if self.invalidate_OP_OR:\n self.calc_OP_OR()\n return sum(self._OR) / len(self._OR) * 100\n \n @property\n def CF1(self):\n return 2 / (1/self.CP + 1/self.CR)\n \n @property\n def OF1(self):\n return 2 / (1/self.OP + 1/self.OR)\n \n @property\n def MAP(self):\n if self.invalidate_MAP:\n self.calc_MAP()\n return sum(self._AP) / len(self._AP) * 100\n \nif __name__ == '__main__':\n similar_set = get_similar_set(firstN=1)\n cu = Common.CommonUtiler()\n \n e = EvaluateResults(args.predict, args.true, atN=1, similar_set=None)\n vocab, rev_vocab = cu.load_lda_vocab('../cache/dictionary/multi_lda/multi_vocab')\n category = cu.load_lda_category('../cache/dictionary/multi_lda/multi_category')\n print ('\\n'.join(['%.5f\\t%s\\t%s\\t%d\\t%s\\t%s' %(p, k, vocab[k], \n e.per_class[k]['predict_this'], \n e.per_class[k]['true_this'], \n category[vocab[k]]) \n for k, p in e.CP_per_class]))\n# print (','.join(['%s' %(k) for k, p in e.CP_per_class if p > 0.6]))\n \n count = 0\n json_output = []\n with open('no_mixed.txt', 'w') as f:\n for k in e.no_mixed:\n count += 1\n p = set([category[vocab[x]].split('_')[0] for x in e.no_mixed[k]['p']])\n t = set([category[vocab[x]].split('_')[0] for x in e.no_mixed[k]['t']])\n f.write('%s\\n%s\\n%s\\n' %(k, ','.join(p), ','.join(t)))\n json_output.append({'vid': k.split('_')[0],\n 'trueList': list(t),\n 'predictList': list(p)})\n \n with open('../cache/dictionary/no_mixed_visualize.txt', 'w') as f:\n f.write(json.dumps(json_output, ensure_ascii=False))\n print ('Total no mixed samples: %d' %count)\n \n print (e)\n \n \n ","sub_path":"utils/MultiEvaluation.py","file_name":"MultiEvaluation.py","file_ext":"py","file_size_in_byte":12935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"16206425","text":"fin = open(\"request.in\", 'r')\nfout = open(\"request.out\", 'w')\n\nn = int(fin.readline().rstrip())\na =[]\nfor i in range(n):\n s, f= map(int, fin.readline().split())\n a.append((f, s))\na.sort()\nlast_f = 0\ncnt = 0\nfor i in range(n):\n if a[i][1] >= last_f:\n cnt += 1\n last_f = a[i][0]\nprint(cnt, file=fout)\nfin.close()\nfout.close()\n","sub_path":"anichkov-camp-2018/DAY08/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"481020210","text":"# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# Copyright 2011 Justin Santa Barbara\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Generic Node base class for all workers that run on hosts.\"\"\"\n\nimport os\nimport random\nimport sys\n\nfrom oslo_config import cfg\nimport oslo_messaging as messaging\nfrom oslo_utils import importutils\nfrom oslo_concurrency import processutils\n\nfrom prototype import db\nfrom oslo_context import context\nfrom prototype.common import exception\nfrom prototype.common.i18n import _, _LE, _LW\nfrom oslo_log import log as logging\nfrom prototype.openstack.common import service\nfrom prototype.common import rpc\nfrom prototype.common import utils\nfrom prototype import version\nfrom prototype.common import wsgi\n\n\nfrom oslo_config import cfg\n\nfrom prototype.db import base\nfrom oslo_log import log as logging\nfrom prototype.openstack.common import periodic_task\n\n\nLOG = logging.getLogger(__name__)\n\nservice_opts = [\n cfg.IntOpt('report_interval',\n default=10,\n help='Seconds between nodes reporting state to datastore'),\n cfg.BoolOpt('periodic_enable',\n default=True,\n help='Enable periodic tasks'),\n cfg.IntOpt('periodic_fuzzy_delay',\n default=60,\n help='Range of seconds to randomly delay when starting the'\n ' periodic task scheduler to reduce stampeding.'\n ' (Disable by setting to 0)'),\n cfg.ListOpt('enabled_apis',\n default=['api'],\n help='A list of APIs to enable by default'),\n cfg.ListOpt('enabled_ssl_apis',\n default=[],\n help='A list of APIs with enabled SSL'),\n cfg.StrOpt('api_listen',\n default=\"0.0.0.0\",\n help='The IP address on which the OpenStack API will listen.'),\n cfg.IntOpt('api_listen_port',\n default=8000,\n help='The port on which the OpenStack API will listen.'),\n cfg.IntOpt('api_workers',\n help='Number of workers for OpenStack API service. The default '\n 'will be the number of CPUs available.'),\n cfg.IntOpt('service_down_time',\n default=60,\n help='Maximum time since last check-in for up service'),\n cfg.StrOpt('worker_manager',\n default='prototype.worker.manager.WorkerManager',\n help='Full class name for the Manager for console proxy'),\n ]\n\nCONF = cfg.CONF\nCONF.register_opts(service_opts)\nCONF.import_opt('host', 'prototype.config')\n\n\nclass ServiceBase(object):\n def __init__(self, host, type, topic, *args, **kwargs):\n self.host = host\n self.type = type\n self.topic = topic\n self.model = None\n self.context = context.get_admin_context()\n svcs = db.service_list(self.context, host=self.host, type=self.type, topic=self.topic)\n for svc in svcs:\n self.model = svc\n db.service_update(self.context, svc.id, svc)\n break\n if self.model == None:\n svc = {'host':self.host, 'type':self.type, 'topic':self.topic}\n self.model = db.service_create(self.context, svc)\n LOG.error(\"init ServiceInfo\")\n \n def sync():\n pass\n\n\n\n\nclass Manager(base.Base, periodic_task.PeriodicTasks):\n\n def __init__(self, host=None, db_driver=None, service_name='undefined'):\n if not host:\n host = CONF.host\n self.host = host\n self.backdoor_port = None\n self.service_name = service_name\n self.additional_endpoints = []\n super(Manager, self).__init__(db_driver)\n\n def periodic_tasks(self, context, raise_on_error=False):\n \"\"\"Tasks to be run at a periodic interval.\"\"\"\n return self.run_periodic_tasks(context, raise_on_error=raise_on_error)\n\n def init_host(self):\n \"\"\"Hook to do additional manager initialization when one requests\n the service be started. This is called before any service record\n is created.\n\n Child classes should override this method.\n \"\"\"\n pass\n\n def cleanup_host(self):\n \"\"\"Hook to do cleanup work when the service shuts down.\n\n Child classes should override this method.\n \"\"\"\n pass\n\n def pre_start_hook(self):\n \"\"\"Hook to provide the manager the ability to do additional\n start-up work before any RPC queues/consumers are created. This is\n called after other initialization has succeeded and a service\n record is created.\n\n Child classes should override this method.\n \"\"\"\n pass\n\n def post_start_hook(self):\n \"\"\"Hook to provide the manager the ability to do additional\n start-up work immediately after a service creates RPC consumers\n and starts 'running'.\n\n Child classes should override this method.\n \"\"\"\n pass\n\n\nclass RPCService(service.Service):\n \"\"\"Service object for binaries running on hosts.\n\n A service takes a manager and enables rpc by listening to queues based\n on topic. It also periodically runs tasks on the manager and reports\n it state to the database services table.\n \"\"\"\n\n def __init__(self, host, topic, manager, report_interval=None,\n periodic_enable=None, periodic_fuzzy_delay=None,\n periodic_interval_max=None, db_allowed=True,\n *args, **kwargs):\n super(RPCService, self).__init__()\n self.host = host\n self.topic = topic\n self.base = ServiceBase(host, 'rpc', topic)\n self.manager_class_name = manager\n manager_class = importutils.import_class(self.manager_class_name)\n self.manager = manager_class(host=self.host, *args, **kwargs)\n self.rpcserver = None\n self.report_interval = report_interval\n self.periodic_enable = periodic_enable\n self.periodic_fuzzy_delay = periodic_fuzzy_delay\n self.periodic_interval_max = periodic_interval_max\n self.saved_args, self.saved_kwargs = args, kwargs\n self.backdoor_port = None\n\n def start(self):\n verstr = version.version_string_with_package()\n LOG.debug(_('Starting %(topic)s node (version %(version)s)'),\n {'topic': self.topic, 'version': verstr})\n self.basic_config_check()\n self.manager.init_host()\n self.model_disconnected = False\n ctxt = context.get_admin_context()\n\n\n self.manager.pre_start_hook()\n\n if self.backdoor_port is not None:\n self.manager.backdoor_port = self.backdoor_port\n\n LOG.debug(\"Creating RPC server for service %s %s\", self.topic, self.host)\n\n target = messaging.Target(topic=self.topic, server=self.host)\n endpoints = [\n self.manager,\n rpc.BaseRPCAPI(self.manager.service_name, self.backdoor_port)\n ]\n endpoints.extend(self.manager.additional_endpoints)\n\n \n \n self.rpcserver = rpc.get_server(target, endpoints)\n self.rpcserver.start()\n\n self.manager.post_start_hook()\n\n\n if self.periodic_enable:\n if self.periodic_fuzzy_delay:\n initial_delay = random.randint(0, self.periodic_fuzzy_delay)\n else:\n initial_delay = None\n\n self.tg.add_dynamic_timer(self.periodic_tasks,\n initial_delay=initial_delay,\n periodic_interval_max=self.periodic_interval_max)\n\n\n\n def __getattr__(self, key):\n manager = self.__dict__.get('manager', None)\n return getattr(manager, key)\n\n @classmethod\n def create(cls, host=None, topic=None, manager=None,\n report_interval=None, periodic_enable=None,\n periodic_fuzzy_delay=None, periodic_interval_max=None,\n db_allowed=True):\n \"\"\"Instantiates class and passes back application object.\n\n :param host: defaults to CONF.host\n :param topic: defaults to bin_name - 'prototype-' part\n :param manager: defaults to CONF._manager\n :param report_interval: defaults to CONF.report_interval\n :param periodic_enable: defaults to CONF.periodic_enable\n :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay\n :param periodic_interval_max: if set, the max time to wait between runs\n\n \"\"\"\n if not host:\n host = CONF.host\n if not topic:\n topic = 'worker'\n if not manager:\n manager_cls = ('%s_manager' % topic)\n manager = CONF.get(manager_cls, None)\n if report_interval is None:\n report_interval = CONF.report_interval\n if periodic_enable is None:\n periodic_enable = CONF.periodic_enable\n if periodic_fuzzy_delay is None:\n periodic_fuzzy_delay = CONF.periodic_fuzzy_delay\n\n\n service_obj = cls(host, topic, manager,\n report_interval=report_interval,\n periodic_enable=periodic_enable,\n periodic_fuzzy_delay=periodic_fuzzy_delay,\n periodic_interval_max=periodic_interval_max,\n db_allowed=db_allowed)\n\n return service_obj\n\n def kill(self):\n \"\"\"Destroy the service object in the datastore.\"\"\"\n self.stop()\n try:\n self.conductor_api.service_destroy(context.get_admin_context(),\n self.service_id)\n except exception.NotFound:\n LOG.warning(_LW('Service killed that has no database entry'))\n\n def stop(self):\n try:\n self.rpcserver.stop()\n self.rpcserver.wait()\n except Exception:\n pass\n\n try:\n self.manager.cleanup_host()\n except Exception:\n LOG.exception(_LE('Service error occurred during cleanup_host'))\n pass\n\n super(RPCService, self).stop()\n\n def periodic_tasks(self, raise_on_error=False):\n \"\"\"Tasks to be run at a periodic interval.\"\"\"\n ctxt = context.get_admin_context()\n return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)\n\n def basic_config_check(self):\n \"\"\"Perform basic config checks before starting processing.\"\"\"\n # Make sure the tempdir exists and is writable\n try:\n with utils.tempdir():\n pass\n except Exception as e:\n LOG.error(_LE('Temporary directory is invalid: %s'), e)\n sys.exit(1)\n\n\nclass WSGIService(object):\n \"\"\"Provides ability to launch API from a 'paste' configuration.\"\"\"\n\n def __init__(self, name, loader=None, use_ssl=False, max_url_len=None):\n \"\"\"Initialize, but do not start the WSGI server.\n\n :param name: The name of the WSGI server given to the loader.\n :param loader: Loads the WSGI application using the given name.\n :returns: None\n\n \"\"\"\n self.base = ServiceBase(CONF.host, 'wsgi', name)\n self.name = name\n self.manager = self._get_manager()\n self.loader = loader or wsgi.Loader()\n self.app = self.loader.load_app(name)\n # inherit all compute_api worker counts from osapi_compute\n if name.startswith('openstack_compute_api'):\n wname = 'osapi_compute'\n else:\n wname = name\n self.host = getattr(CONF, '%s_listen' % name, \"0.0.0.0\")\n self.port = getattr(CONF, '%s_listen_port' % name, 0)\n self.workers = (getattr(CONF, '%s_workers' % wname, None) or\n processutils.get_worker_count())\n if self.workers and self.workers < 1:\n worker_name = '%s_workers' % name\n msg = (_(\"%(worker_name)s value of %(workers)s is invalid, \"\n \"must be greater than 0\") %\n {'worker_name': worker_name,\n 'workers': str(self.workers)})\n raise exception.InvalidInput(msg)\n self.use_ssl = use_ssl\n self.server = wsgi.Server(name,\n self.app,\n host=self.host,\n port=self.port,\n use_ssl=self.use_ssl,\n max_url_len=max_url_len)\n # Pull back actual port used\n self.port = self.server.port\n self.backdoor_port = None\n\n def reset(self):\n \"\"\"Reset server greenpool size to default.\n\n :returns: None\n\n \"\"\"\n self.server.reset()\n\n def _get_manager(self):\n \"\"\"Initialize a Manager object appropriate for this service.\n\n Use the service name to look up a Manager subclass from the\n configuration and initialize an instance. If no class name\n is configured, just return None.\n\n :returns: a Manager instance, or None.\n\n \"\"\"\n fl = '%s_manager' % self.name\n if fl not in CONF:\n return None\n\n manager_class_name = CONF.get(fl, None)\n if not manager_class_name:\n return None\n\n manager_class = importutils.import_class(manager_class_name)\n return manager_class()\n\n def start(self):\n \"\"\"Start serving this service using loaded configuration.\n\n Also, retrieve updated port number in case '0' was passed in, which\n indicates a random port should be used.\n\n :returns: None\n\n \"\"\"\n if self.manager:\n self.manager.init_host()\n self.manager.pre_start_hook()\n if self.backdoor_port is not None:\n self.manager.backdoor_port = self.backdoor_port\n self.server.start()\n if self.manager:\n self.manager.post_start_hook()\n\n def stop(self):\n \"\"\"Stop serving this API.\n\n :returns: None\n\n \"\"\"\n self.server.stop()\n\n def wait(self):\n \"\"\"Wait for the service to stop serving this API.\n\n :returns: None\n\n \"\"\"\n self.server.wait()\n\n\ndef process_launcher():\n return service.ProcessLauncher()\n\n\n# NOTE(vish): the global launcher is to maintain the existing\n# functionality of calling service.serve +\n# service.wait\n_launcher = None\n\n\ndef serve(server, workers=None):\n global _launcher\n if _launcher:\n raise RuntimeError(_('serve() can only be called once'))\n\n _launcher = service.launch(server, workers=workers)\n\n\ndef wait():\n _launcher.wait()\n","sub_path":"prototype/prototype/common/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":15145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"237973372","text":"\r\nimport os\r\nfrom threading import Thread\r\nfrom queue import Queue\r\n\r\n\r\n#importing framework defined modules\r\nfrom bta_util_common import obj_util_comm as utility\r\nimport bta_util_constants as CONSTANTS\r\nfrom bta_serv_eventcontroller import event_cnt, clsServEvent\r\nfrom bta_appl_bsccnt_parser import clsBSCCntParser\r\nimport bta_util_serv_ftp as taftp\r\nfrom bta_util_serv_ftp import clsUtilFTP\r\nfrom bta_appl_bsccnt_bscparameters import clsBSCParameters\r\nfrom bta_util_timer import clsTimerObject\r\nfrom bta_appl_bsccnt_populate import clsBSCCntPopulateCounterData\r\n\r\n\r\nDATE_TIME_SEARCH_PATTERN = \"(?:[\\d]{4}\\-[\\d]{2}\\-[\\d]{2}__[\\d]{2}\\-[\\d]{2})\"\r\nDATE_PATTERN = \"%Y-%m-%d\"\r\nTIME_PATTERN = \"%H-%M\"\r\nWRITE_BINARY_MODE = \"wb\"\r\nBCF_ID_SEPARATOR = \":\"\r\nFETCH_DURATION_SEPARATOR = \":\"\r\nCOUNTER_FILE_FORMAT = \"0000\"\r\n\r\nMODULE_NAME = \"bta_appl_bsccnt_controller\"\r\n\r\n\r\n'''\r\nBSC Counter Controller Processes all the bsc counter files from a given folder which fulfills\r\nthe given date time constraint.\r\n\r\nIt provides an API to process bsc counter files in last 24 hours. All the DAT files should be stored in a\r\npath as given in the configuration file.\r\n \r\n'''\r\nclass clsBSCCntController():\r\n def __init__( self, setup_config, bsc_intf_wrapper, dict_cmd_file, db_hdlr, timer_cnt, logger = None ): \r\n try:\r\n self.logger = logger\r\n self.timer_cnt = timer_cnt\r\n self.constraint_dict = None\r\n self.fetch_count = None\r\n self.fetch_iteration = None\r\n self.fetch_duration = None\r\n self.end_time = None\r\n self.fetch_status = None \r\n self.bsc_intf_wrapper = bsc_intf_wrapper\r\n self.counter_fetch_timer = None \r\n self.parser = None\r\n self.date_time_format = None\r\n self.ftp_client = None\r\n self.counter_file_dictionary = {}\r\n self.files_to_populate = {}\r\n self.dict_cmd_file = dict_cmd_file\r\n self.setup_config = setup_config\r\n self.db_hdlr = db_hdlr\r\n self.loop = True\r\n self.configuration = setup_config.obj_gen_setup_info.counter_file_configuration \r\n \r\n self.dir_path = self.configuration.dir_path\r\n self.counter_files_path_at_bsc = self.configuration.file_path_at_bsc\r\n \r\n obj_bsc_intf = setup_config.dict_obj_bts_site_config[setup_config.site_index].dict_obj_interface[CONSTANTS.IF_BSC]\r\n tool_id = obj_bsc_intf.dict_instrument_type[ CONSTANTS.CMD_TYPE_BSC ][0]\r\n \r\n self.bsc_ip_address = obj_bsc_intf.dict_obj_instruments[tool_id].ip\r\n self.bsc_user_name = obj_bsc_intf.dict_obj_instruments[tool_id].user_name\r\n self.bsc_password = obj_bsc_intf.dict_obj_instruments[tool_id].password\r\n self.bsc_version = obj_bsc_intf.dict_obj_instruments[tool_id].version\r\n except Exception as detail:\r\n raise Exception(MODULE_NAME + \".thrdBSCCntController.__init__, \" + str( detail ))\r\n \r\n def function2( self ):\r\n try:\r\n self.constraint_dict = None\r\n self.fetch_count = 0\r\n self.fetch_iteration = 0\r\n self.fetch_duration = 0\r\n self.end_time = 0\r\n self.fetch_status = CONSTANTS.STATUS_COMPLETED \r\n self.counter_fetch_timer = None\r\n \r\n dt_separator = self.configuration.date_separator\r\n tm_separator = self.configuration.time_separator\r\n dt_tm_separator = self.configuration.date_time_separator\r\n self.date_time_format = \"%Y\" + dt_separator + \"%m\" + dt_separator + \"%d\" + dt_tm_separator + \"%H\" + tm_separator + \"%M\"\r\n self.parser = clsBSCCntParser( self.dict_cmd_file[CONSTANTS.FILE_TYPE_COUNTER_PARSER], self.bsc_version, self.logger )\r\n except (Exception, Exception) as detail:\r\n err_detail = MODULE_NAME + \".thrdBSCCntController.initialize, \" + str(detail)\r\n self.logString( err_detail, CONSTANTS.ERROR_LEVEL )\r\n \r\n raise Exception(err_detail)\r\n \r\n def parseAllCounterFiles( self ):\r\n try:\r\n dict_constraints = None\r\n for root, dirs, file_list in os.walk( self.dir_path ): \r\n self.logString( \"Number. of Files to parse \" + str( len( file_list ) ), CONSTANTS.INFO_LEVEL )\r\n \r\n for file_name in file_list: # Parse each file\r\n abs_file_name = os.path.join( root, file_name ) \r\n self.parser.decodeDatFileIfNeeded( abs_file_name, self.counter_file_dictionary, self.files_to_populate, dict_constraints )\r\n \r\n self.logString( \"File Dictionary\" + str( self.counter_file_dictionary ), CONSTANTS.INFO_LEVEL )\r\n except Exception as detail:\r\n err_detail = MODULE_NAME + \".thrdBSCCntController.parseAllCounterFiles, \" + str( detail )\r\n self.logString( err_detail, CONSTANTS.ERROR_LEVEL )\r\n \r\n def populateAllCounterInfo( self ):\r\n try:\r\n populate_counters = None\r\n cnt_file = None\r\n command = None\r\n self.logString( \"Files to be populated \" + str( self.files_to_populate ), CONSTANTS.INFO_LEVEL )\r\n \r\n if len( self.files_to_populate ) > 0:\r\n populate_counters = clsBSCCntPopulateCounterData( self.logger )\r\n if self.db_hdlr:\r\n db_conn = self.db_hdlr.getDBConnection( CONSTANTS.DB_TYPE_COUNTER )\r\n \r\n for cnt_file in list(self.files_to_populate.values()):\r\n self.logString( \"Populating file of START_TIME:\" + str( cnt_file.period_start ), CONSTANTS.INFO_LEVEL )\r\n populate_counters.getInfoCounters(self.db_hdlr, db_conn, CONSTANTS.DB_TYPE_COUNTER, cnt_file)\r\n try:\r\n command1 = \"EXEC [dbo].[sp_KPICalculator] @datetimevalue = '\" + str(cnt_file.period_start) + \"'\"\r\n self.logString( \"KPI Calculator: \" + str(command1), CONSTANTS.INFO_LEVEL )\r\n self.db_hdlr.executeQueryOnConnection( db_conn, command1, CONSTANTS.DB_TYPE_COUNTER, CONSTANTS.DB_OPERATION_INSERT )\r\n except (Exception, Exception) as detail:\r\n self.logString( \"thrdBSCCntController.populateAllCounterInfo : \" + str(detail), CONSTANTS.ERROR_LEVEL )\r\n \r\n self.db_hdlr.closeDBConnection(db_conn)\r\n self.files_to_populate.clear()\r\n else:\r\n raise Exception(\"Database Connection Handler Not Found.\")\r\n else:\r\n raise Exception(\"No Counter files to be populated\")\r\n except (Exception, Exception) as detail:\r\n self.logString( MODULE_NAME + \".thrdBSCCntController.populateAllCounterInfo, \" +\\\r\n str( detail ), CONSTANTS.ERROR_LEVEL )\r\n \r\n def getFilesfromBSC( self ):\r\n try:\r\n self.ftp_client = clsUtilFTP( self.bsc_ip_address, self.bsc_user_name, self.bsc_password )\r\n self.ftp_client.connect()\r\n cmd_str = 'ZIFO:OMU:MEASUR;'\r\n response = self.ftp_client.SendCommand(cmd_str)\r\n \r\n self.readStoreFiles()\r\n except (Exception, Exception) as detail:\r\n self.logString( MODULE_NAME + \".thrdBSCCntController.getFilesfromBSC, \" +\\\r\n str( detail ), CONSTANTS.ERROR_LEVEL )\r\n \r\n def readStoreFiles( self ):\r\n try:\r\n file_handle = None\r\n \r\n self.ftp_client.changeDir( self.counter_files_path_at_bsc )\r\n file_list = self.extractFileNameList()\r\n \r\n for file_name in file_list:\r\n try: \r\n file_to_read = self.dir_path + file_name\r\n file_handle = open( file_to_read , CONSTANTS.FILE_MODE_BINARY_WRITE )\r\n self.ftp_client.retrieveBinary( taftp.CMD_RETR + \" \" + file_name, file_handle.write ) #transfer the file\r\n except (Exception, Exception) as detail:\r\n self.closeAndRemoveFile( file_handle, True )\r\n err_detail = MODULE_NAME + \".thrdBSCCntController.readStoreFiles, \" + str(detail)\r\n self.logString( err_detail, CONSTANTS.ERROR_LEVEL )\r\n finally:\r\n self.closeAndRemoveFile( file_handle )\r\n except (Exception, Exception) as detail:\r\n err_detail = MODULE_NAME + \".thrdBSCCntController.readStoreFiles, \" + str(detail)\r\n raise Exception(err_detail)\r\n \r\n def closeAndRemoveFile( self, file_handle, remove_file=False ):\r\n if not file_handle.closed:\r\n file_handle.close()\r\n if remove_file:\r\n os.remove(file_handle.name)\r\n \r\n def extractFileNameList( self ):\r\n try:\r\n list_file_names = []\r\n file_no = 1\r\n \r\n for file_no in range (1, self.configuration.max_files + 1): #+1 is done for range to be 1 to max \r\n str_num = utility.formatString( str(file_no), COUNTER_FILE_FORMAT )\r\n \r\n file_name = self.configuration.prefix + str_num + \".\" + self.configuration.postfix\r\n list_file_names.append( file_name )\r\n \r\n return list_file_names\r\n except (Exception, Exception) as detail:\r\n err_detail = MODULE_NAME + \".thrdBSCCntController.extractFileNameList, \" + str(detail)\r\n raise Exception(err_detail)\r\n \r\n def function1( self,cmd_param_dict):\r\n #while self.loop:\r\n try: \r\n #counter_event = self.queue.get() \r\n #self.logString( \"thrdBSCCntController Got Event \" + str( counter_event.type ) )\r\n \r\n #if ( CONSTANTS.EVENT_COUNTER_FETCH_START == counter_event.type ):\r\n self.startCounterFetching(cmd_param_dict ) \r\n self.getFilesfromBSC()\r\n self.function2()\r\n \r\n '''\r\n if not(CONSTANTS.STATUS_STOPPED == self.fetch_status):\r\n next_event = clsServEvent( CONSTANTS.EVENT_COUNTER_PARSE_START, self.constraint_dict )\r\n self.queue.put( next_event )\r\n ''' \r\n #elif ( CONSTANTS.EVENT_COUNTER_PARSE_START == counter_event.type ):\r\n self.parseAllCounterFiles()\r\n self.startCounterFetching(cmd_param_dict )\r\n '''\r\n if not(CONSTANTS.STATUS_STOPPED == self.fetch_status):\r\n next_event = clsServEvent( CONSTANTS.EVENT_COUNTER_POPULATE_START, None )\r\n self.queue.put( next_event )\r\n ''' \r\n #elif ( CONSTANTS.EVENT_COUNTER_POPULATE_START == counter_event.type ):\r\n self.populateAllCounterInfo() \r\n self.checkCounterFetchCompletion() #decide whether stop counter fetching or not\r\n self.updateStatus() #update the status\r\n '''\r\n elif ( CONSTANTS.EVENT_COUNTER_FETCH_STOP == counter_event.type ):\r\n self.updateStatus() \r\n else:\r\n raise Exception(\"Invalid event \" + str(queue_event.type))\r\n '''\r\n except (Exception, Exception) as detail:\r\n self.logString( MODULE_NAME + \".thrdBSCCntController.run, \" + str(detail), CONSTANTS.ERROR_LEVEL )\r\n \r\n def end(self): \r\n self.logString( \"Killing BSC Counter Fetching Thread...\", CONSTANTS.INFO_LEVEL )\r\n self.loop = False\r\n self.queue.put( clsServEvent(None) )\r\n \r\n self.logString( \"BSC Counter Fetching thread killed.\")\r\n\r\n def startCounterFetching( self, cmd_param_dict ):\r\n '''This function reads the BCF IDs, fetch_count, fetch_duration etc from command_param_dict.\r\n '''\r\n try:\r\n self.constraint_dict = self.getBCFInfo( cmd_param_dict[ CONSTANTS.PARAM_NAME_BCF_ID_LIST ],\\\r\n cmd_param_dict[ CONSTANTS.PARAM_NAME_LOG_PATH ])\r\n counter_event = clsServEvent( CONSTANTS.EVENT_COUNTER_FETCH_START, self.constraint_dict )\r\n self.counter_fetch_timer = clsBSCCntFetchTimer( counter_event, CONSTANTS.QUEUE_BSC_COUNTER_CONTROLLER,\\\r\n self.setup_config.obj_gen_setup_info.sys_timer_config.bsc_counter_fetch_interval)\r\n \r\n if CONSTANTS.PARAM_NAME_FETCH_COUNT in cmd_param_dict: #if fetch count is there in param dict\r\n count = cmd_param_dict[ CONSTANTS.PARAM_NAME_FETCH_COUNT ]\r\n self.fetch_count = int( count )\r\n elif CONSTANTS.PARAM_NAME_FETCH_DURATION in cmd_param_dict:\r\n duration = cmd_param_dict[ CONSTANTS.PARAM_NAME_FETCH_DURATION ]\r\n self.fetch_duration = utility.getDurationInSeconds( duration, FETCH_DURATION_SEPARATOR )\r\n \r\n cur_time = utility.getCurrentTimeinSeconds()\r\n self.end_time = cur_time + self.fetch_duration\r\n \r\n if (self.fetch_count > 0 or self.fetch_duration > 0): \r\n self.fetch_status = CONSTANTS.STATUS_ONGOING\r\n self.updateStatus()\r\n self.timer_cnt.register( self.counter_fetch_timer ) #register the fetch timer\r\n self.counter_fetch_timer.timeout() #fetch counter for the first time\r\n else:\r\n raise Exception(\"Counter Fetch can not be started because fetch_duration is \" +\\\r\n str(self.fetch_duration) + \" and fetch_count is \" + str(self.fetch_count) +\\\r\n \" while either fetch_duration or fetch_count must be > 0.\")\r\n except (Exception, Exception) as detail:\r\n err_detail = MODULE_NAME + \".thrdBSCCntController.startCounterFetching, \" + str(detail)\r\n raise Exception(err_detail)\r\n \r\n def checkCounterFetchCompletion( self ):\r\n '''This function registers/de-registers the self.counter_fetch_timer based on the Command (STOP_COUNTER_FETCH) or fetch_count/fetch_duration\r\n completed.\r\n '''\r\n try:\r\n counter_fetch_completed = False\r\n self.fetch_iteration = self.fetch_iteration + 1\r\n \r\n if self.fetch_count:\r\n if (self.fetch_count <= self.fetch_iteration):\r\n counter_fetch_completed = True\r\n else:#check for fetch duration completion\r\n cur_time = utility.getCurrentTimeinSeconds()\r\n if (self.end_time <= cur_time):\r\n counter_fetch_completed = True\r\n \r\n if counter_fetch_completed: \r\n self.timer_cnt.deRegister( self.counter_fetch_timer ) #deregister counter fetch timer\r\n self.fetch_status = CONSTANTS.STATUS_COMPLETED\r\n except (Exception, Exception) as detail:\r\n error_detail = MODULE_NAME + \".thrdBSCCntController.checkCounterFetchCompletion, \" + str(detail)\r\n raise Exception(error_detail) \r\n\r\n def updateStatus( self ):\r\n '''This function updates the status of TestSuite and TestCase.\r\n '''\r\n try:\r\n if (CONSTANTS.STATUS_ONGOING == self.fetch_status): #Execution is ongoing\r\n self.logString ( \"COUNTER FETCH ONGOING : Fetch cycle Completed : \" + str( self.fetch_iteration ) +\\\r\n \", Fetch Count : \" + str(self.fetch_count) +\\\r\n \", Fetch Duration : \" + str(self.fetch_duration), CONSTANTS.INFO_LEVEL )\r\n elif (CONSTANTS.STATUS_COMPLETED == self.fetch_status): #Execution is completed\r\n self.logString ( \"COUNTER FETCH COMPLETED : Fetch cycle Completed : \" + str( self.fetch_iteration ) +\\\r\n \", Fetch Count : \" + str(self.fetch_count) +\\\r\n \", Fetch Duration : \" + str(self.fetch_duration), CONSTANTS.INFO_LEVEL )\r\n elif (CONSTANTS.STATUS_STOPPED == self.fetch_status): #Execution is stopped forcefully\r\n self.logString ( \"COUNTER FETCH STOPPED : Fetch cycle Completed : \" + str( self.fetch_iteration ) +\\\r\n \", Fetch Count : \" + str(self.fetch_iteration) +\\\r\n \", Fetch Duration : \" + str(self.fetch_count), CONSTANTS.WARN_LEVEL )\r\n if not (CONSTANTS.STATUS_COMPLETED == self.fetch_status):\r\n self.timer_cnt.deRegister( self.counter_fetch_timer ) #de-register counter fetch timer \r\n except (Exception, Exception) as detail:\r\n error_detail = MODULE_NAME + \".thrdBSCCntController.updateStatus, \" + str(detail)\r\n raise Exception(error_detail) \r\n \r\n def getBCFInfo(self, bcf_id_list,log_path ):\r\n '''This function reads the bts_id, trx_id for each and every bcf in the list.\r\n '''\r\n try:\r\n result = None\r\n if bcf_id_list:\r\n bcf_id_list = str(bcf_id_list)\r\n obj_bsc_param = clsBSCParameters() #object to read BCF parameters from BSC\r\n bcf_id_list = bcf_id_list.split(BCF_ID_SEPARATOR) #convert into list\r\n bcf_id_list = [int(bcf_id) for bcf_id in bcf_id_list] #convert into int\r\n \r\n dict_all_bcf_info = {} #e.g. {44:dict_bcf_info} dict_bcf_info = {bts_id:[25,26], trx_id:[1,2,3,4]}\r\n for bcf_id in bcf_id_list:\r\n dict_bcf_info = obj_bsc_param.getBCFObjectInfo( self.bsc_intf_wrapper, bcf_id, log_path )\r\n if not dict_bcf_info == CONSTANTS.FAIL:\r\n dict_all_bcf_info[bcf_id] = dict_bcf_info\r\n \r\n result = dict_all_bcf_info\r\n return result\r\n except Exception as detail:\r\n error_detail = MODULE_NAME + \".thrdBSCCntController.getBCFInfo, \" + str(detail)\r\n raise Exception(error_detail)\r\n \r\n def logString( self, log_string, log_level=CONSTANTS.DEBUG_LEVEL ):\r\n \r\n ''' * FUNCTION NAME: \r\n * logString( self, log_string, log_level=CONSTANTS.DEBUG_LEVEL )\r\n *\r\n * DESCRIPTION: \r\n * The function does the following tasks\r\n * - Checks if logger instance is not None\r\n * - Calls logger's logString function to log into log file.\r\n *\r\n * INPUT:\r\n * log_string - The string to be written into log file\r\n * log_level - The level (DEBUG, INFO, WARN, ERROR)\r\n *\r\n * OUTPUT:\r\n * None\r\n *\r\n * RETURNS: \r\n * None\r\n * \r\n '''\r\n if self.logger:\r\n self.logger.logString( log_string, log_level )\r\n\r\nclass clsBSCCntFetchTimer(clsTimerObject):\r\n def __init__ ( self, event, queue, interval, periodic = True ):\r\n clsTimerObject.__init__( self, interval, periodic )\r\n self.event = event\r\n self.queue = queue\r\n \r\n def timeout(self): \r\n event_cnt.postEvent( self.queue , self.event )","sub_path":"kk_ADTRAN/Execution_Framework1_build25/Execution_Framework1_build25/bta_appl_bsccnt_controller.py","file_name":"bta_appl_bsccnt_controller.py","file_ext":"py","file_size_in_byte":19881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"586606","text":"from flask import Blueprint\nfrom flask_admin import BaseView, expose, AdminIndexView\nfrom flask_admin.contrib.sqla import ModelView\n\nfrom APP import db\nfrom APP.extends import admin\nfrom APP.models import Class17, Teacher, Student\n\nadmin_blue = Blueprint('admin_blue', __name__)\n\n\ndef app_blueprints(app):\n app.register_blueprint(admin_blue)\n\n\nclass Myviews(BaseView):\n @expose('/')\n def index(self):\n return self.render('index.html')\n\n\n#admin.add_view(Myviews(name='Hello 1', endpoint='test1', category='Test'))\nadmin.add_view(Myviews(name='Test'))\nadmin.add_view(ModelView(Class17, db.session))\nadmin.add_view(ModelView(Student, db.session))\nadmin.add_view(ModelView(Teacher, db.session))\n\n\n@admin_blue.route('/')\ndef index():\n return \"hello admin\"\n\n\n@admin_blue.route('/add_class17/')\ndef add_class17():\n for i in range(1, 5):\n class_17 = Class17.query.filter(Class17.id == 3).first()\n teacher = Teacher.query.filter(Teacher.id == i).first()\n print(teacher.class17s)\n teacher.class17s.append(class_17)\n db.session.add(teacher)\n db.session.commit()\n return \"ok\"\n\n\n@admin_blue.route('/add_student/')\ndef add_student():\n for i in range(1, 5):\n student = Student.query.filter(Student.id == i).first()\n teacher = Teacher.query.filter(Teacher.id == i).first()\n student.teachers.append(teacher)\n db.session.add(student)\n db.session.commit()\n return \"ok\"\n\n\n@admin_blue.route('/add_class/')\ndef add_class():\n class_17 = Class17(\n name=\"17计算机科学与技术\",\n class_num=2\n )\n db.session.add(class_17)\n db.session.commit()\n return \"ok\"\n","sub_path":"flask_admin_test/APP/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"596956485","text":"import math\nimport random\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#1 EUCLID'S ALGORITHM\ndef euclid(x,y):\n print(\"finding the gcd(\",x,\",\",y,\")...\")\n if (x < 0):\n print(x,\"is negative\")\n x = -1*x\n print(\"we multiply\",-1*x,\"by -1 to get\",x)\n if (y < 0):\n print(y,\"is negative\")\n y = -1*y\n print(\"we multiply\",-1*y,\"by -1 to get\",y)\n\n print(x,\"mod\",y, \"=\", x%y)\n \n if (x % y == 0):\n print(\"\\nthe gcd is\",y)\n return y\n else:\n return euclid(y,x%y)\n\ndef test_euclid():\n x = input(\"enter first number: \")\n y = input(\"enter second number: \")\n euclid(int(x),int(y))\n\n#2 GENERATING PRIME NUMBERS\ndef print_list(L):\n s=\"\"\n for x in L:\n s += str(x)+\",\"\n print(\"[\"+s[0:-1]+\"]\")\n\n\ndef sieve_gen(n):\n n+=1\n A = [True for i in range(n)]\n for i in range(2,int(math.sqrt(n))+1):\n j = 0\n B = []\n while(i**2+j*i < n):\n B.append(i**2+j*i)\n j+=1\n for x in B:\n A[x] = False\n P = []\n a = 2\n while a < n:\n if A[a]:\n P.append(a)\n a+=1\n return P\n\ndef test_sieve():\n n = input(\"enter a number: \")\n print_list(sieve_gen(int(n)))\n\n#3 PRIMALITY TEST\ndef trial_division(n):\n print(\"checking for a prime number between 2 and sqrt(\",n,\") that divides\", n)\n print(\"checking\",sieve_gen(int(math.sqrt(n))));\n for x in sieve_gen(int(math.sqrt(n))):\n if n%x == 0:\n print(n, \"is not prime it is divisible by\",x)\n return False\n print(n, \"is prime\")\n return True\n\ndef trial_division_printless(n):\n for x in sieve_gen(int(math.sqrt(n))):\n if n%x == 0:\n return False\n return True\n\n\ndef sieve_primality(n):\n print(\"generating prime numbers using Sieve of Eratosthenes\")\n print_list(sieve_gen(n))\n if (n<=1):\n print(n, \"is not prime\")\n return False\n elif (n<4):\n print(n, \"is prime\")\n return True\n elif (sieve_gen(n)[-1] == n):\n print(n,\"is prime because it is in the list\") \n return True\n else:\n print(n,\"is not prime because it is not in the list\")\n return False\n\ndef fermat_little_theorem(n):\n if n<4:\n print(n,\"is prime\")\n return True\n else:\n \n a = random.randint(2,n-2)\n print(\"picking a random number\",a,\"to test with\")\n if n%a==0:\n print(n, \"is not prime, it is divisible by\", a)\n return False\n else:\n print(\"testing if (\",a,\"^ (\",n,\"- 1 ) %\",n,\") == 0\")\n if ((a**(n-1) - 1)%n == 0):\n print(n, \"is prime\")\n return True\n else:\n print(n, \"is not prime\")\n return False\n\n#4 TRIAL DIVISION \n\ndef trial_division_factorization(n):\n ret = []\n temp = n\n A = sieve_gen(n);\n i = 2\n while i <= temp:\n if temp % i == 0:\n ret.append(i)\n temp = temp / i\n \n else:\n i = i + 1\n print(ret)\n return ret\n\n\ndef fermat_algorithm(n):\n ret = []\n if trial_division_printless(n):\n ret.append(int(n))\n return ret\n if n % 2 == 0:\n ret.append(2)\n ret.extend(fermat_algorithm(n/2))\n return ret\n temp = n\n y = 0\n while temp > 1:\n if trial_division_printless(temp):\n ret.append(int(temp))\n \n return ret\n\n x = math.sqrt(y * y + temp)\n\n if math.floor(math.sqrt(y * y + temp)) == math.sqrt(y * y + temp) and (x - y) != 1:\n temp2 = max(x-y,x+y)\n if(trial_division_printless(temp2)):\n ret.append(int(temp2))\n temp = temp / (temp2)\n y = 0\n else:\n ret.extend(fermat_algorithm(temp2))\n temp = temp / temp2\n y = 0\n else:\n y = y + 1\n \n \n return ret\n\n#5 PRIME DISTRIBUTION\n\ndef million_primes():\n file = open(\"primes1.txt\",\"r\")\n lines = file.readlines()\n file.close()\n temp = []\n ret = []\n for i in lines:\n temp.extend(i.split(\" \"))\n for i in temp:\n if i.isdigit():\n ret.append(int(i))\n return ret\n\ndef count():\n return len(million_primes())\ndef count_last(n):\n ret = 0\n temp = million_primes()\n for i in temp:\n if int(i) % 10 == n:\n ret = ret + 1\n return ret\n\ndef percent_last(n):\n return count_last(n) * 100.0 / 1000000.0\n\ndef count_last_following(n1,n2):\n ret = 0\n temp = million_primes()\n i = 0;\n while i < 999999:\n if int(temp[i]) % 10 == n1:\n if int(temp[i+1]) % 10 == n2:\n ret = ret + 1\n i = i + 1\n return ret\n\n\ndef percent_last_following(n1,n2):\n return count_last_following(n1,n2) * 100.0 / count_last(n1)\n\n\ndef count_twin_primes():\n ret = 0\n temp = million_primes()\n i = 0;\n while i < 999999:\n if int(temp[i]) - int(temp[i + 1]) > -3 and int(temp[i]) - int(temp[i + 1]) < 3:\n ret = ret + 1\n i = i + 1\n return ret\n\ndef plotx(n):\n temp = million_primes()\n plot = []\n i = 0\n while i <= n:\n numLess = 0\n for y in temp:\n if y < i:\n numLess = numLess + 1\n plot.append(numLess)\n i = i +1\n plt.plot(range(i),plot)\n plt.title(\"Number of Prime Numbers Below a Given Prime Number\")\n plt.xlabel(\"Prime Number\")\n plt.ylabel(\"Number of Primes\")\n plt.show(block=True)\n \n#6 PRIME REPRESENTATION\ndef plotPrime():\n x = []\n y = []\n j = 0;\n k = 0;\n while j < 10:\n k = 0;\n while k < 10:\n i = 0;\n temp = count_last_following(j,k)\n while i < temp:\n x.append(j)\n y.append(k)\n i = i + 1\n k = k + 1\n j = j + 1\n plt.hist2d(x, y)\n plt.title(\"Number of Prime Numbers Ending in a Given Digit vs The Next Prime Number Ending in a Given Digit\")\n plt.xlabel(\"Primes Ending in\")\n plt.ylabel(\"Following Primes Ending in\")\n plt.show(block = True)\n \ndef test_primality():\n print(\"Question 1\")\n print(\"1. Euclids\")\n print(\"Question 2\")\n print(\"2. Sieve Gen\")\n print(\"Question 3\")\n print(\"Tests for Primality:\")\n print(\"3: Trial Division\")\n print(\"4: Sieve of Eratosthenes\")\n print(\"5: Fermat Little Theroem\")\n print(\"Question 4\")\n print(\"Prime Factorization:\")\n print(\"6: Trial Division\")\n print(\"7: Fermat Factorization Algorithm\")\n print(\"Question 5\")\n print(\"Prime Distribution:\")\n print(\"8: Prime distribution\")\n print(\"Question 6\")\n print(\"9: Visual Prime distribution rendering\")\n test = input(\"pick a test: \")\n if int(test) > 0 and int(test) < 2:\n n1 = input(\"enter a first number to test: \")\n n2 = input(\"enter a second number to test: \")\n if int(test) == 1:\n b = time.time()\n euclid(int(n1), int(n2))\n e = time.time()\n print('in', round(e-b,5), 's')\n if int(test) > 1 and int(test) < 3:\n n = input(\"enter a number to test: \")\n if int(test) == 2:\n b = time.time()\n print(sieve_gen(int(n)))\n e = time.time()\n print(\"in\", round(e-b,5), \"s\")\n if int(test) > 2 and int(test) < 6:\n n = input(\"enter a number to test: \")\n if int(test) == 3:\n b = time.time()\n trial_division(int(n))\n e = time.time()\n print(\"in\", round(e-b,5), \"s\")\n elif int(test) == 4:\n b = time.time()\n sieve_primality(int(n))\n e = time.time()\n print(\"in\", round(e-b,5), \"s\")\n elif int(test) == 5:\n b = time.time() \n fermat_little_theorem(int(n))\n e = time.time()\n print(\"in\", round(e-b,5), \"s\") \n \n elif int(test) > 5 and int(test) < 8:\n n = input(\"enter a number to factor: \")\n if int(test) == 6:\n b = time.time()\n trial_division_factorization(int(n))\n e = time.time()\n print(\"in\", round(e-b,5), \"s\")\n elif int(test) == 7:\n \n b = time.time()\n print(fermat_algorithm(int(n)))\n e = time.time()\n print(\"in\", round(e-b,5), \"s\")\n \n elif int(test) < 10:\n if int(test) == 8:\n print(\"Total primes:\")\n print(count())\n print(\"Primes ending in 1:\")\n print(count_last(1))\n print(percent_last(1))\n print(\"Primes ending in 3:\")\n print(count_last(3))\n print(percent_last(3))\n print(\"Primes ending in 7:\")\n print(count_last(7))\n print(percent_last(7))\n print(\"Primes ending in 9:\")\n print(count_last(9))\n print(percent_last(9))\n\n print(\"Primes ending in 1 followed by a 1:\")\n print(count_last_following(1,1))\n print(percent_last_following(1,1))\n\n print(\"Primes ending in 1 followed by a 3:\")\n print(count_last_following(1,3))\n print(percent_last_following(1,3))\n\n print(\"Primes ending in 1 followed by a 7:\")\n print(count_last_following(1,7))\n print(percent_last_following(1,7))\n\n print(\"Primes ending in 1 followed by a 9:\")\n print(count_last_following(1,9))\n print(percent_last_following(1,9))\n\n print(\"Primes ending in 3 followed by a 1:\")\n print(count_last_following(3,1))\n print(percent_last_following(3,1))\n\n print(\"Primes ending in 3 followed by a 3:\")\n print(count_last_following(3,3))\n print(percent_last_following(3,3))\n\n print(\"Primes ending in 3 followed by a 7:\")\n print(count_last_following(3,7))\n print(percent_last_following(3,7))\n\n print(\"Primes ending in 3 followed by a 9:\")\n print(count_last_following(3,9))\n print(percent_last_following(3,9))\n\n print(\"Primes ending in 7 followed by a 1:\")\n print(count_last_following(7,1))\n print(percent_last_following(7,1))\n\n print(\"Primes ending in 7 followed by a 3:\")\n print(count_last_following(7,3))\n print(percent_last_following(7,3))\n\n print(\"Primes ending in 7 followed by a 7:\")\n print(count_last_following(7,7))\n print(percent_last_following(7,7))\n\n print(\"Primes ending in 7 followed by a 9:\")\n print(count_last_following(7,9))\n print(percent_last_following(7,9))\n\n print(\"Primes ending in 9 followed by a 1:\")\n print(count_last_following(9,1))\n print(percent_last_following(9,1))\n\n print(\"Primes ending in 9 followed by a 3:\")\n print(count_last_following(9,3))\n print(percent_last_following(9,3))\n\n print(\"Primes ending in 9 followed by a 7:\")\n print(count_last_following(9,7))\n print(percent_last_following(9,7))\n\n print(\"Primes ending in 9 followed by a 9:\")\n print(count_last_following(9,9))\n print(percent_last_following(9,9))\n\n print(\"Twin primes:\")\n print(count_twin_primes())\n \n n = input(\"enter the number of primes to generate until: \")\n plotx(int(n))\n \n elif int(test) == 9:\n plotPrime()\n else:\n test_primality()\n\n \nif (__name__==\"__main__\"):\n test_primality()\n","sub_path":"apn.py","file_name":"apn.py","file_ext":"py","file_size_in_byte":11744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"519690960","text":"from ftw.builder import Builder\nfrom ftw.builder import create\nfrom ftw.testbrowser import browsing\nfrom opengever.base.security import elevated_privileges\nfrom opengever.testing import IntegrationTestCase\nfrom opengever.trash.remover import RemoveConditionsChecker\nfrom opengever.trash.remover import Remover\nfrom plone import api\nfrom zope.i18n import translate\n\n\nclass TestRemoveConditionsChecker(IntegrationTestCase):\n\n def assert_error_messages(self, expected, msgs):\n self.assertEqual(expected, [translate(msg) for msg in msgs])\n\n def test_document_must_not_have_relations(self):\n self.login(self.manager)\n\n document_b = create(Builder('document')\n .relate_to([self.document])\n .trashed())\n\n checker = RemoveConditionsChecker(document_b)\n\n self.assertFalse(checker.removal_allowed())\n self.assert_error_messages(\n [u'The document has outgoing references to other documents.'], checker.error_msg)\n\n @browsing\n def test_document_must_not_have_backreferences(self, browser):\n self.login(self.manager, browser=browser)\n self.trash_documents(self.empty_document)\n\n browser.open(self.taskdocument, view='edit')\n browser.fill({'Related documents': [self.empty_document]})\n browser.find('Save').click()\n\n checker = RemoveConditionsChecker(self.empty_document)\n\n self.assertFalse(checker.removal_allowed())\n self.assert_error_messages(\n [u'The document is referred to by Document: {}.'.format(\n self.taskdocument.absolute_url(), self.taskdocument.title)],\n checker.error_msg)\n\n @browsing\n def test_check_does_not_fail_if_document_has_no_longer_existent_backrefs(self, browser):\n self.login(self.manager, browser=browser)\n\n browser.open(self.taskdocument, view='edit')\n browser.fill({'Related documents': [self.empty_document]})\n browser.find('Save').click()\n\n self.trash_documents(self.empty_document)\n\n checker = RemoveConditionsChecker(self.empty_document)\n self.assertFalse(checker.removal_allowed())\n\n with elevated_privileges():\n api.content.delete(obj=self.taskdocument)\n\n checker = RemoveConditionsChecker(self.empty_document)\n self.assertTrue(checker.removal_allowed())\n\n def test_document_must_be_trashed(self):\n self.login(self.manager)\n\n checker = RemoveConditionsChecker(self.empty_document)\n self.assertFalse(checker.removal_allowed())\n self.assert_error_messages(\n [u'The document has not been moved to the trash yet.'], checker.error_msg)\n\n def test_document_must_not_already_be_removed(self):\n self.login(self.manager)\n\n self.trash_documents(self.empty_document)\n Remover([self.empty_document]).remove()\n\n checker = RemoveConditionsChecker(self.empty_document)\n self.assertFalse(checker.removal_allowed())\n self.assert_error_messages(\n [u'The document has already been removed.'],\n checker.error_msg)\n\n def test_removal_allowed(self):\n self.login(self.manager)\n self.trash_documents(self.empty_document)\n\n checker = RemoveConditionsChecker(self.empty_document)\n self.assertTrue(checker.removal_allowed())\n","sub_path":"opengever/trash/tests/test_remove_conditions.py","file_name":"test_remove_conditions.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"253169624","text":"from library import turn_right, turn_around, jump\n\nsteps = 0\nwhile front_is_clear():\n move()\n steps += 1\n if wall_in_front():\n turn_around()\n center = int(steps / 2)\n print(center)\n for center in range(0, center):\n move()\n put()\n done()\n################################################################\n# WARNING: Do not change this comment.\n# Library Code is below.\n################################################################\ndef turn_around():\n turn_left()\n turn_left()\n\ndef turn_right():\n turn_left()\n turn_left()\n turn_left()\n \ndef jump():\n turn_left()\n while wall_on_right():\n move()\n turn_right()\n move()\n turn_right()\n while front_is_clear():\n move()\n turn_left()","sub_path":"reeborgs_world/Center 1.py","file_name":"Center 1.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"330491539","text":"import asyncio\nimport dropbox\nfrom dropbox.files import CommitInfo, UploadSessionCursor, UploadSessionFinishArg\nimport requests\nfrom multiprocessing.pool import ThreadPool\nfrom celery import shared_task\nfrom boxsdk import Client, OAuth2\nfrom boxsdk.network.default_network import DefaultNetwork\nfrom helpers.utils import (\n IVLE_DOWNLOAD_URL, get_logger, BOX_CLIENT_ID, BOX_CLIENT_SECRET,\n GDRIVE_CLIENT_SECRET, GDRIVE_CLIENT_ID, ONEDRIVE_CLIENT_ID,\n ONEDRIVE_CLIENT_SECRET, ONEDRIVE_SCOPES\n)\nfrom api.models import User\nfrom api.periodic_tasks.refresh_ivle_token import refresh_ivle_token\nfrom api.periodic_tasks.utils import (\n get_ivle_file_structure, get_ivle_modules, get_onedrive_client,\n attach_onedrive_session\n)\nfrom api.periodic_tasks.box import sync_box\nfrom api.periodic_tasks.gdrive import sync_gdrive\nfrom api.periodic_tasks.onedrive import sync_onedrive\nfrom pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\nfrom django.db import transaction\nimport datetime\n\nimport json\nimport onedrivesdk\nfrom django.utils import timezone\nimport tempfile\n\nlogger = get_logger()\n\n\ndef is_dropbox_folder(entry):\n return type(entry) is dropbox.files.FolderMetadata\n\n\ndef is_dropbox_file(entry):\n return type(entry) is dropbox.files.FileMetadata\n\n\ndef update_dropbox(user, dbx, dropbox_path, expected_folders, expected_files, files_to_batch_upload):\n dropbox_dir = dbx.files_list_folder(dropbox_path).entries\n dropbox_folders = set(f.name for f in dropbox_dir if is_dropbox_folder(f))\n dropbox_files = set(f.name for f in dropbox_dir if is_dropbox_file(f))\n\n for expected_file in expected_files:\n # TODO: Better way of handling / in name.\n expected_file_name = expected_file['FileName'].replace('/', '')\n expected_file_id = expected_file['ID']\n if expected_file_name in dropbox_files:\n continue\n\n r = requests.get(\n IVLE_DOWNLOAD_URL.format(\n AUTH_TOKEN=user.ivle_token,\n FILE_ID=expected_file_id),\n allow_redirects=True\n )\n file_path = dropbox_path + '/' + expected_file_name\n dbx.files_upload(r.content, file_path, autorename=True)\n # upload_session_start_result = dbx.files_upload_session_start(\n # r.content, close=True)\n # cursor = UploadSessionCursor(\n # session_id=upload_session_start_result.session_id,\n # offset=len(r.content))\n # commit_info = CommitInfo(path=file_path)\n # files_to_batch_upload.append(UploadSessionFinishArg(\n # cursor=cursor,\n # commit=commit_info\n # ))\n\n for folder in expected_folders:\n if folder['AllowUpload']:\n continue\n\n # TODO: Better way of handling / in name.\n expected_folder_name = folder['FolderName'].replace('/', '')\n folder_path = dropbox_path + '/' + expected_folder_name\n if expected_folder_name not in dropbox_folders:\n dbx.files_create_folder_v2(folder_path)\n update_dropbox(user, dbx, folder_path, folder['Folders'], folder['Files'], files_to_batch_upload)\n\n\ndef sync_dropbox_module(user, dbx, module, ivle_file_structure):\n root_dir = dbx.files_list_folder('')\n root_folders = {f.name for f in root_dir.entries if\n is_dropbox_folder(f)}\n\n module_year, module_sem = module['CourseAcadYear'], module['CourseSemester']\n module_code, module_id = module['CourseCode'], module['ID']\n\n # Some module codes have / in them, but Dropbox hates this.\n module_code = module_code.replace('/', '_')\n\n # TODO: Tidy this up.\n semester_root_folder = f'AY{module_year} {module_sem}'.replace('/', '')\n semester_root = f'/{semester_root_folder}'\n if semester_root_folder not in root_folders:\n try:\n dbx.files_create_folder_v2(semester_root)\n except:\n # TODO: hacky but i'm lazy for now.\n # Race condition here so just ignore if fail\n pass\n semester_dir = dbx.files_list_folder(semester_root)\n semester_folders = {f.name for f in semester_dir.entries if\n is_dropbox_folder(f)}\n\n # Create the root directory for the module if not already exist\n module_root = f'{semester_root}/{module_code}'\n if module_code not in semester_folders:\n dbx.files_create_folder_v2(module_root)\n # This module probably does not have a workbin\n # TODO: Refactor this to read from ivle_file_structure directly\n if module_id not in ivle_file_structure:\n return\n\n module_dir = dbx.files_list_folder(module_root)\n module_folders = {f.name for f in module_dir.entries if\n is_dropbox_folder(f)}\n files_to_batch_upload = []\n for workbin in ivle_file_structure[module_id]:\n # check if workbin exists\n # TODO: Now we assume multiple workbins do not have same name.\n # TODO: Better way of handling / in name.\n workbin_name = workbin['Title'].replace('/', '')\n workbin_root = f'{module_root}/{workbin_name}'\n if workbin_name not in module_folders:\n dbx.files_create_folder_v2(workbin_root)\n update_dropbox(\n user,\n dbx,\n workbin_root,\n iter(workbin['Folders']),\n [],\n files_to_batch_upload\n )\n # dbx.files_upload_session_finish_batch(files_to_batch_upload)\n\n\ndef sync_dropbox(user, dbx, modules, ivle_file_structure):\n # Don't set this too high because I think IVLE does rate limits,\n # so some threads may fail to get any downloads\n with ThreadPool(processes=3) as pool:\n pool.starmap(\n sync_dropbox_module,\n ((user, dbx, mod, ivle_file_structure)\n for mod in modules if user.synced_modules.filter(module_code=mod['CourseCode']).exists())\n )\n\n\ndef check_update_for_user(user):\n logger.info('Updating user')\n\n # Get modules info from IVLE\n logger.info('Getting IVLE modules')\n modules = get_ivle_modules(user)\n\n # Get all module workbins\n logger.info('Getting IVLE file structure')\n ivle_file_structure = get_ivle_file_structure(user, modules)\n\n if user.dropbox_token:\n # Get Dropbox info\n logger.info('Updating dropbox')\n dbx = dropbox.Dropbox(user.dropbox_token)\n\n # Update Dropbox using IVLE file structure\n sync_dropbox(user, dbx, modules, ivle_file_structure)\n\n if user.box_access_token and user.box_refresh_token:\n # TODO: I'm sure this will work 99% of the time. But need to confirm.\n oauth = OAuth2(\n client_id=BOX_CLIENT_ID,\n client_secret=BOX_CLIENT_SECRET,\n access_token=user.box_access_token,\n refresh_token=user.box_refresh_token\n )\n\n # Immediately regenerate a new token for the next use\n new_access_token, new_refresh_token = oauth.refresh(\n access_token_to_refresh=user.box_access_token)\n user.box_access_token = new_access_token\n user.box_refresh_token = new_refresh_token\n user.save()\n\n # This consumes our old tokens\n box = Client(oauth, DefaultNetwork())\n sync_box(user, box, modules, ivle_file_structure)\n\n if user.gdrive_token:\n\n logger.info('Updating google drive for ' + user.ivle_user_id)\n # configure sdk\n gauth = GoogleAuth('api/client_secrets.yaml')\n\n # To replace refresh token with actual value, NEED TRY CATCH HERE\n tf = tempfile.NamedTemporaryFile('r+')\n with open(\"api/credentials.json\", \"r+\") as jsonFile:\n data = json.load(jsonFile)\n data[\"refresh_token\"] = user.gdrive_token\n data[\"client_secret\"] = GDRIVE_CLIENT_SECRET\n data[\"client_id\"] = GDRIVE_CLIENT_ID\n json.dump(data, tf)\n\n tf.seek(0)\n\n # Try to load saved client credentials\n gauth.LoadCredentialsFile(tf.name)\n if gauth.credentials is None:\n # Authenticate if they're not there\n logger.error('Authentication failed for user for google drive')\n # gauth.LocalWebserverAuth()\n\n else:\n # Refresh them\n logger.info('Refreshing Gdrive token')\n gauth.Refresh()\n\n drive = GoogleDrive(gauth)\n sync_gdrive(user, drive, modules, ivle_file_structure)\n logger.info('Sync done for Gdrive')\n\n if user.onedrive_access_token:\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n client = get_onedrive_client(loop)\n attach_onedrive_session(client, user)\n client.auth_provider.refresh_token()\n\n # Save the new tokens after refreshing\n user.onedrive_access_token = client.auth_provider._session.access_token\n user.onedrive_refresh_token = client.auth_provider._session.refresh_token\n user.save()\n\n sync_onedrive(user, client, modules, ivle_file_structure)\n loop.close()\n\n\n@shared_task\ndef check_for_updates():\n users = User.objects.all()\n for user in users:\n check_for_update_fast.delay(user.ivle_user_id)\n\n\n@shared_task\ndef refresh_ivle_token_fast(ivle_user_id):\n refresh_ivle_token(ivle_user_id)\n\n\n@shared_task\ndef refresh_ivle_tokens():\n users = User.objects.all()\n for user in users:\n refresh_ivle_token_fast.delay(user.ivle_user_id)\n\n\n@shared_task\ndef check_for_update_fast(ivle_user_id):\n \"\"\"\n This is separate from `check_update_for_user` because this is a\n direct task for celery. (wrapper)\n \"\"\"\n try:\n should_sync = False\n\n # This thing is like a lock. Only need to lock user.sync_status though.\n with transaction.atomic():\n # We have multiple workers trying to sync for the same person\n # possibly\n user = User.objects.select_for_update().get(ivle_user_id=ivle_user_id)\n if not user.sync_status:\n user.sync_status = True\n should_sync = True\n user.save()\n\n if not should_sync:\n logger.info('Some other worker already syncing, returning')\n return\n\n with transaction.atomic():\n user = User.objects.select_for_update().get(ivle_user_id=ivle_user_id)\n user.last_synced = datetime.datetime.now()\n user.save()\n\n check_update_for_user(user)\n\n except Exception as e:\n logger.error('check_for_update_fast failed user %s' % ivle_user_id)\n logger.error(e)\n\n finally:\n\n # Unlock this row if this worker did sync\n if should_sync:\n with transaction.atomic():\n user = User.objects.select_for_update().get(ivle_user_id=ivle_user_id)\n user.sync_status = False\n user.save()\n\n\n@shared_task\ndef refresh_box_tokens():\n users = User.objects.all()\n for user in users:\n if not user.box_access_token:\n continue\n try:\n oauth = OAuth2(\n client_id=BOX_CLIENT_ID,\n client_secret=BOX_CLIENT_SECRET,\n access_token=user.box_access_token,\n refresh_token=user.box_refresh_token\n )\n\n # Regenerate a new token for the next use\n new_access_token, new_refresh_token = oauth.refresh(\n access_token_to_refresh=user.box_access_token)\n user.box_access_token = new_access_token\n user.box_refresh_token = new_refresh_token\n user.save()\n except Exception as e:\n logger.error(e)\n\n\n@shared_task\ndef refresh_onedrive_tokens():\n users = User.objects.all()\n for user in users:\n if not user.onedrive_access_token:\n continue\n try:\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n client = get_onedrive_client(loop)\n attach_onedrive_session(client, user)\n client.auth_provider.refresh_token()\n\n # Save the new tokens after refreshing\n user.onedrive_access_token = client.auth_provider._session.access_token\n user.onedrive_refresh_token = client.auth_provider._session.refresh_token\n user.save()\n\n loop.close()\n except Exception as e:\n logger.error(e)\n","sub_path":"backend/nuscloud/api/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":12297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"457097057","text":"import os\nimport shutil\nfrom pathlib import Path\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom scipy.stats import iqr\nimport time\n\n\n\n\n# Import arrays\npath = Path(\"./data\")\npaths = []\nfor x in path.iterdir():\n paths.append(x)\n print (x)\n\n#print(\"len(paths):\", len(paths))\ndates = []\nfor m in range(len(paths)):\n dates.append(str(paths[m])[5:])\n\n#print(\"dates:\", dates)\n\nassert len(paths) > 0, \"No data found\"\n\npath_to_graphs = paths[0]\n\nfor j in range(len(paths)):\n path_to_graphs = paths[j]\n f=open(str(path_to_graphs) + \"/title.txt\", \"r\")\n title =f.read()\n\n tmp2 = Path(path_to_graphs)\n graphs = []\n for x in tmp2.iterdir():\n x = str(x)\n if x[-4:] == \".npy\":\n graphs.append(x)\n\n test_graph = np.load(graphs[0])\n\n #print(\"test_graph:\", test_graph)\n #print(\"test_graph.shape:\", test_graph.shape)\n\n plots = np.empty((len(graphs), test_graph.shape[0], test_graph.shape[1]))\n\n for i in range(len(graphs)):\n plots[i, :, :] = np.load(graphs[i])\n \n #print(\"plots:\", plots)\n #print(\"plots.shape:\", plots.shape)\n\n\n # Creating graphs for the average testing success rate\n\n colors = [(0.0, 0.0, 1.0, 1.0), (0.0, 1.0, 0.0, 1.0), (1.0, 0.0, 0.0, 1.0), (0.7, 0.5, 0.85, 1.0), (0.0, 0.0, 0.0, 1.0), (0.5, 0.5, 0.5, 1.0), (0.5, 0.5, 0.0, 1.0), (0.5, 0.0, 0.5, 1.0), (0.0, 0.5, 0.5, 1.0)]\n\n x = np.arange(0, test_graph.shape[1], 1)\n print(\"x:\", x)\n fig, ax = plt.subplots()\n\n # Calculate interquartile range\n intq_range = np.empty((plots.shape[0], plots.shape[2]), dtype=float)\n for i in range(plots.shape[0]):\n intq_range[i] = iqr(plots[i, :, :], axis=0)\n\n print(\"intq_range:\", intq_range)\n # Calculate average success rate\n average = np.empty((plots.shape[0], plots.shape[2]), dtype=float)\n for i in range(plots.shape[0]):\n average[i] = np.mean(plots[i, :, :], axis=0)\n print(\"average:\", average)\n\n\n yerr = intq_range\n\n\n for k in range(plots.shape[0]):\n y = average[k]\n yerr = intq_range[k]\n ax.plot(x, y, color=colors[k])\n plt.fill_between(x, y-yerr, y+yerr, facecolor=colors[k], alpha=0.2)\n\n plt.title(title)\n plt.ylabel('Median Test Success Rate')\n ax.set_xlim((0, plots.shape[2]-1))\n ax.set_ylim((0.0, 1.2))\n plt.xlabel('Epoch')\n plt.grid(True)\n fig.set_size_inches(8, 4)\n Path(\"./figures\").mkdir(parents=True, exist_ok=True)\n\n plt.savefig(\"./figures/\" + \"success_rate_plot_\" + dates[j] + \".jpg\", dpi=400, facecolor='w', edgecolor='w',\n orientation='landscape',transparent=False, bbox_inches='tight')\n\n\n\n\n","sub_path":"fetch_environments/.archive/HAC_serial/create_plots.py","file_name":"create_plots.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"174202205","text":"from pygsuite.docs.doc_elements import BaseElement\n\n\nclass Headers(BaseElement):\n def __init__(self, element, document, last):\n BaseElement.__init__(self, element=element, document=document, last=last)\n self._paragraph = self._element.get(\"headers\")\n\n @property\n def section_style(self):\n return self._element.get(\"sectionStyle\")\n\n def delete(self):\n if not self.start_index:\n return\n if self.start_index == self.end_index:\n return\n self._document._mutation(\n [\n {\n \"deleteContentRange\": {\n \"range\": {\n \"segmentId\": None,\n \"startIndex\": self.start_index,\n \"endIndex\": self.end_index,\n }\n }\n }\n ]\n )\n","sub_path":"pygsuite/docs/doc_elements/headers.py","file_name":"headers.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"52831955","text":"import glob,os\n# cat=glob.glob(r'C:\\Xampp\\apache\\logs\\*.log')\n# cat=file.read()\n# print(cat)\ncat1=os.listdir(r'C:\\Xampp\\apache\\logs')\nfor i in cat1:\n f=open(i)\n li=f.readlines()\n print(li)\ncat2=open(r'C:\\Users\\Administrator\\Desktop\\daqiu.txt','r+')\n\n# file=open(r'C:\\Users\\Administrator\\Desktop\\daqiu.txt','r+')\n# cat1.seek(0)\n# file=open(r'C:\\Xampp\\apache\\logs','r+',encoding='utf8')\n# file.write(r'C:\\Users\\Administrator\\Desktop\\daqiu.txt')\n# cer=file.read()\n# print(cer)\nfile.close()","sub_path":"pythonone/pythoncode/PycharmProjects/woniuxy/python读写/xampp日志读取.py","file_name":"xampp日志读取.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"248521657","text":"\"\"\" Custom template system for osxphotos, implements metadata template language (MTL) \"\"\"\n\nimport datetime\nimport json\nimport locale\nimport logging\nimport os\nimport pathlib\nimport shlex\nimport sys\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nfrom textx import TextXSyntaxError, metamodel_from_file\n\nfrom ._constants import _UNKNOWN_PERSON, TEXT_DETECTION_CONFIDENCE_THRESHOLD\nfrom ._version import __version__\nfrom .datetime_formatter import DateTimeFormatter\nfrom .exiftool import ExifToolCaching\nfrom .export_db import ExportDB_ABC, ExportDBInMemory\nfrom .path_utils import sanitize_dirname, sanitize_filename, sanitize_pathpart\nfrom .text_detection import detect_text\nfrom .utils import expand_and_validate_filepath, load_function\n\n# TODO: a lot of values are passed from function to function like path_sep--make these all class properties\n\n# ensure locale set to user's locale\nlocale.setlocale(locale.LC_ALL, \"\")\n\nMTL_GRAMMAR_MODEL = str(pathlib.Path(__file__).parent / \"phototemplate.tx\")\n\n\"\"\"TextX metamodel for osxphotos template language \"\"\"\n\nPHOTO_VIDEO_TYPE_DEFAULTS = {\"photo\": \"photo\", \"video\": \"video\"}\n\nMEDIA_TYPE_DEFAULTS = {\n \"selfie\": \"selfie\",\n \"time_lapse\": \"time_lapse\",\n \"panorama\": \"panorama\",\n \"slow_mo\": \"slow_mo\",\n \"screenshot\": \"screenshot\",\n \"portrait\": \"portrait\",\n \"live_photo\": \"live_photo\",\n \"burst\": \"burst\",\n \"photo\": \"photo\",\n \"video\": \"video\",\n}\n\n# Permitted substitutions (each of these returns a single value or None)\nTEMPLATE_SUBSTITUTIONS = {\n \"{name}\": \"Current filename of the photo\",\n \"{original_name}\": \"Photo's original filename when imported to Photos\",\n \"{title}\": \"Title of the photo\",\n \"{descr}\": \"Description of the photo\",\n \"{media_type}\": (\n f\"Special media type resolved in this precedence: {', '.join(t for t in MEDIA_TYPE_DEFAULTS)}. \"\n \"Defaults to 'photo' or 'video' if no special type. \"\n \"Customize one or more media types using format: '{media_type,video=vidéo;time_lapse=vidéo_accélérée}'\"\n ),\n \"{photo_or_video}\": \"'photo' or 'video' depending on what type the image is. To customize, use default value as in '{photo_or_video,photo=fotos;video=videos}'\",\n \"{hdr}\": \"Photo is HDR?; True/False value, use in format '{hdr?VALUE_IF_TRUE,VALUE_IF_FALSE}'\",\n \"{edited}\": \"True if photo has been edited (has adjustments), otherwise False; use in format '{edited?VALUE_IF_TRUE,VALUE_IF_FALSE}'\",\n \"{edited_version}\": \"True if template is being rendered for the edited version of a photo, otherwise False. \",\n \"{favorite}\": \"Photo has been marked as favorite?; True/False value, use in format '{favorite?VALUE_IF_TRUE,VALUE_IF_FALSE}'\",\n \"{created.date}\": \"Photo's creation date in ISO format, e.g. '2020-03-22'\",\n \"{created.year}\": \"4-digit year of photo creation time\",\n \"{created.yy}\": \"2-digit year of photo creation time\",\n \"{created.mm}\": \"2-digit month of the photo creation time (zero padded)\",\n \"{created.month}\": \"Month name in user's locale of the photo creation time\",\n \"{created.mon}\": \"Month abbreviation in the user's locale of the photo creation time\",\n \"{created.dd}\": \"2-digit day of the month (zero padded) of photo creation time\",\n \"{created.dow}\": \"Day of week in user's locale of the photo creation time\",\n \"{created.doy}\": \"3-digit day of year (e.g Julian day) of photo creation time, starting from 1 (zero padded)\",\n \"{created.hour}\": \"2-digit hour of the photo creation time\",\n \"{created.min}\": \"2-digit minute of the photo creation time\",\n \"{created.sec}\": \"2-digit second of the photo creation time\",\n \"{created.strftime}\": \"Apply strftime template to file creation date/time. Should be used in form \"\n + \"{created.strftime,TEMPLATE} where TEMPLATE is a valid strftime template, e.g. \"\n + \"{created.strftime,%Y-%U} would result in year-week number of year: '2020-23'. \"\n + \"If used with no template will return null value. \"\n + \"See https://strftime.org/ for help on strftime templates.\",\n \"{modified.date}\": \"Photo's modification date in ISO format, e.g. '2020-03-22'; uses creation date if photo is not modified\",\n \"{modified.year}\": \"4-digit year of photo modification time; uses creation date if photo is not modified\",\n \"{modified.yy}\": \"2-digit year of photo modification time; uses creation date if photo is not modified\",\n \"{modified.mm}\": \"2-digit month of the photo modification time (zero padded); uses creation date if photo is not modified\",\n \"{modified.month}\": \"Month name in user's locale of the photo modification time; uses creation date if photo is not modified\",\n \"{modified.mon}\": \"Month abbreviation in the user's locale of the photo modification time; uses creation date if photo is not modified\",\n \"{modified.dd}\": \"2-digit day of the month (zero padded) of the photo modification time; uses creation date if photo is not modified\",\n \"{modified.dow}\": \"Day of week in user's locale of the photo modification time; uses creation date if photo is not modified\",\n \"{modified.doy}\": \"3-digit day of year (e.g Julian day) of photo modification time, starting from 1 (zero padded); uses creation date if photo is not modified\",\n \"{modified.hour}\": \"2-digit hour of the photo modification time; uses creation date if photo is not modified\",\n \"{modified.min}\": \"2-digit minute of the photo modification time; uses creation date if photo is not modified\",\n \"{modified.sec}\": \"2-digit second of the photo modification time; uses creation date if photo is not modified\",\n \"{modified.strftime}\": \"Apply strftime template to file modification date/time. Should be used in form \"\n + \"{modified.strftime,TEMPLATE} where TEMPLATE is a valid strftime template, e.g. \"\n + \"{modified.strftime,%Y-%U} would result in year-week number of year: '2020-23'. \"\n + \"If used with no template will return null value. Uses creation date if photo is not modified. \"\n + \"See https://strftime.org/ for help on strftime templates.\",\n \"{today.date}\": \"Current date in iso format, e.g. '2020-03-22'\",\n \"{today.year}\": \"4-digit year of current date\",\n \"{today.yy}\": \"2-digit year of current date\",\n \"{today.mm}\": \"2-digit month of the current date (zero padded)\",\n \"{today.month}\": \"Month name in user's locale of the current date\",\n \"{today.mon}\": \"Month abbreviation in the user's locale of the current date\",\n \"{today.dd}\": \"2-digit day of the month (zero padded) of current date\",\n \"{today.dow}\": \"Day of week in user's locale of the current date\",\n \"{today.doy}\": \"3-digit day of year (e.g Julian day) of current date, starting from 1 (zero padded)\",\n \"{today.hour}\": \"2-digit hour of the current date\",\n \"{today.min}\": \"2-digit minute of the current date\",\n \"{today.sec}\": \"2-digit second of the current date\",\n \"{today.strftime}\": \"Apply strftime template to current date/time. Should be used in form \"\n + \"{today.strftime,TEMPLATE} where TEMPLATE is a valid strftime template, e.g. \"\n + \"{today.strftime,%Y-%U} would result in year-week number of year: '2020-23'. \"\n + \"If used with no template will return null value. \"\n + \"See https://strftime.org/ for help on strftime templates.\",\n \"{place.name}\": \"Place name from the photo's reverse geolocation data, as displayed in Photos\",\n \"{place.country_code}\": \"The ISO country code from the photo's reverse geolocation data\",\n \"{place.name.country}\": \"Country name from the photo's reverse geolocation data\",\n \"{place.name.state_province}\": \"State or province name from the photo's reverse geolocation data\",\n \"{place.name.city}\": \"City or locality name from the photo's reverse geolocation data\",\n \"{place.name.area_of_interest}\": \"Area of interest name (e.g. landmark or public place) from the photo's reverse geolocation data\",\n \"{place.address}\": \"Postal address from the photo's reverse geolocation data, e.g. '2007 18th St NW, Washington, DC 20009, United States'\",\n \"{place.address.street}\": \"Street part of the postal address, e.g. '2007 18th St NW'\",\n \"{place.address.city}\": \"City part of the postal address, e.g. 'Washington'\",\n \"{place.address.state_province}\": \"State/province part of the postal address, e.g. 'DC'\",\n \"{place.address.postal_code}\": \"Postal code part of the postal address, e.g. '20009'\",\n \"{place.address.country}\": \"Country name of the postal address, e.g. 'United States'\",\n \"{place.address.country_code}\": \"ISO country code of the postal address, e.g. 'US'\",\n \"{searchinfo.season}\": \"Season of the year associated with a photo, e.g. 'Summer'; (Photos 5+ only, applied automatically by Photos' image categorization algorithms).\",\n \"{exif.camera_make}\": \"Camera make from original photo's EXIF information as imported by Photos, e.g. 'Apple'\",\n \"{exif.camera_model}\": \"Camera model from original photo's EXIF information as imported by Photos, e.g. 'iPhone 6s'\",\n \"{exif.lens_model}\": \"Lens model from original photo's EXIF information as imported by Photos, e.g. 'iPhone 6s back camera 4.15mm f/2.2'\",\n \"{uuid}\": \"Photo's internal universally unique identifier (UUID) for the photo, a 36-character string unique to the photo, e.g. '128FB4C6-0B16-4E7D-9108-FB2E90DA1546'\",\n \"{id}\": \"A unique number for the photo based on its primary key in the Photos database. \"\n + \"A sequential integer, e.g. 1, 2, 3...etc. Each asset associated with a photo (e.g. an image and Live Photo preview) will share the same id. \"\n + \"May be formatted using a python string format code. \"\n + \"For example, to format as a 5-digit integer and pad with zeros, use '{id:05d}' which results in \"\n + \"00001, 00002, 00003...etc. \",\n \"{album_seq}\": \"An integer, starting at 0, indicating the photo's index (sequence) in the containing album. \"\n + \"Only valid when used in a '--filename' template and only when '{album}' or '{folder_album}' is used in the '--directory' template. \"\n + 'For example \\'--directory \"{folder_album}\" --filename \"{album_seq}_{original_name}\"\\'. '\n + \"To start counting at a value other than 0, append append a period and the starting value to the field name. \"\n + \"For example, to start counting at 1 instead of 0: '{album_seq.1}'. \"\n + \"May be formatted using a python string format code. \"\n + \"For example, to format as a 5-digit integer and pad with zeros, use '{album_seq:05d}' which results in \"\n + \"00000, 00001, 00002...etc. \"\n + \"This may result in incorrect sequences if you have duplicate albums with the same name; see also '{folder_album_seq}'.\",\n \"{folder_album_seq}\": \"An integer, starting at 0, indicating the photo's index (sequence) in the containing album and folder path. \"\n + \"Only valid when used in a '--filename' template and only when '{folder_album}' is used in the '--directory' template. \"\n + 'For example \\'--directory \"{folder_album}\" --filename \"{folder_album_seq}_{original_name}\"\\'. '\n + \"To start counting at a value other than 0, append append a period and the starting value to the field name. \"\n + \"For example, to start counting at 1 instead of 0: '{folder_album_seq.1}' \"\n + \"May be formatted using a python string format code. \"\n + \"For example, to format as a 5-digit integer and pad with zeros, use '{folder_album_seq:05d}' which results in \"\n + \"00000, 00001, 00002...etc. \"\n + \"This may result in incorrect sequences if you have duplicate albums with the same name in the same folder; see also '{album_seq}'.\",\n \"{comma}\": \"A comma: ','\",\n \"{semicolon}\": \"A semicolon: ';'\",\n \"{questionmark}\": \"A question mark: '?'\",\n \"{pipe}\": \"A vertical pipe: '|'\",\n \"{openbrace}\": \"An open brace: '{'\",\n \"{closebrace}\": \"A close brace: '}'\",\n \"{openparens}\": \"An open parentheses: '('\",\n \"{closeparens}\": \"A close parentheses: ')'\",\n \"{openbracket}\": \"An open bracket: '['\",\n \"{closebracket}\": \"A close bracket: ']'\",\n \"{newline}\": r\"A newline: '\\n'\",\n \"{lf}\": r\"A line feed: '\\n', alias for {newline}\",\n \"{cr}\": r\"A carriage return: '\\r'\",\n \"{crlf}\": r\"a carriage return + line feed: '\\r\\n'\",\n \"{osxphotos_version}\": f\"The osxphotos version, e.g. '{__version__}'\",\n \"{osxphotos_cmd_line}\": \"The full command line used to run osxphotos\",\n}\n\nTEMPLATE_SUBSTITUTIONS_PATHLIB = {\n \"{export_dir}\": \"The full path to the export directory\",\n \"{filepath}\": \"The full path to the exported file\",\n}\n\n# Permitted multi-value substitutions (each of these returns None or 1 or more values)\nTEMPLATE_SUBSTITUTIONS_MULTI_VALUED = {\n \"{album}\": \"Album(s) photo is contained in\",\n \"{folder_album}\": \"Folder path + album photo is contained in. e.g. 'Folder/Subfolder/Album' or just 'Album' if no enclosing folder\",\n \"{keyword}\": \"Keyword(s) assigned to photo\",\n \"{person}\": \"Person(s) / face(s) in a photo\",\n \"{label}\": \"Image categorization label associated with a photo (Photos 5+ only). \"\n \"Labels are added automatically by Photos using machine learning algorithms to categorize images. \"\n \"These are not the same as {keyword} which refers to the user-defined keywords/tags applied in Photos.\",\n \"{label_normalized}\": \"All lower case version of 'label' (Photos 5+ only)\",\n \"{comment}\": \"Comment(s) on shared Photos; format is 'Person name: comment text' (Photos 5+ only)\",\n \"{exiftool}\": \"Format: '{exiftool:GROUP:TAGNAME}'; use exiftool (https://exiftool.org) to extract metadata, in form GROUP:TAGNAME, from image. \"\n \"E.g. '{exiftool:EXIF:Make}' to get camera make, or {exiftool:IPTC:Keywords} to extract keywords. \"\n \"See https://exiftool.org/TagNames/ for list of valid tag names. You must specify group (e.g. EXIF, IPTC, etc) \"\n \"as used in `exiftool -G`. exiftool must be installed in the path to use this template.\",\n \"{searchinfo.holiday}\": \"Holiday names associated with a photo, e.g. 'Christmas Day'; (Photos 5+ only, applied automatically by Photos' image categorization algorithms).\",\n \"{searchinfo.activity}\": \"Activities associated with a photo, e.g. 'Sporting Event'; (Photos 5+ only, applied automatically by Photos' image categorization algorithms).\",\n \"{searchinfo.venue}\": \"Venues associated with a photo, e.g. name of restaurant; (Photos 5+ only, applied automatically by Photos' image categorization algorithms).\",\n \"{searchinfo.venue_type}\": \"Venue types associated with a photo, e.g. 'Restaurant'; (Photos 5+ only, applied automatically by Photos' image categorization algorithms).\",\n \"{photo}\": \"Provides direct access to the PhotoInfo object for the photo. \"\n + \"Must be used in format '{photo.property}' where 'property' represents a PhotoInfo property. \"\n + \"For example: '{photo.favorite}' is the same as '{favorite}' and '{photo.place.name}' is the same as '{place.name}'. \"\n + \"'{photo}' provides access to properties that are not available as separate template fields but it assumes some knowledge of \"\n + \"the underlying PhotoInfo class. See https://rhettbull.github.io/osxphotos/ for additional documentation on the PhotoInfo class.\",\n \"{detected_text}\": \"List of text strings found in the image after performing text detection. \"\n + \"Using '{detected_text}' will cause osxphotos to perform text detection on your photos using the built-in macOS text detection algorithms which will slow down your export. \"\n + \"The results for each photo will be cached in the export database so that future exports with '--update' do not need to reprocess each photo. \"\n + \"You may pass a confidence threshold value between 0.0 and 1.0 after a colon as in '{detected_text:0.5}'; \"\n + f\"The default confidence threshold is {TEXT_DETECTION_CONFIDENCE_THRESHOLD}. \"\n + \"'{detected_text}' works only on macOS Catalina (10.15) or later. \"\n + \"Note: this feature is not the same thing as Live Text in macOS Monterey, which osxphotos does not yet support.\",\n \"{shell_quote}\": \"Use in form '{shell_quote,TEMPLATE}'; quotes the rendered TEMPLATE value(s) for safe usage in the shell, e.g. My file.jpeg => 'My file.jpeg'; only adds quotes if needed.\",\n \"{strip}\": \"Use in form '{strip,TEMPLATE}'; strips whitespace from begining and end of rendered TEMPLATE value(s).\",\n \"{function}\": \"Execute a python function from an external file and use return value as template substitution. \"\n + \"Use in format: {function:file.py::function_name} where 'file.py' is the name of the python file and 'function_name' is the name of the function to call. \"\n + \"The function will be passed the PhotoInfo object for the photo. \"\n + \"See https://github.com/RhetTbull/osxphotos/blob/master/examples/template_function.py for an example of how to implement a template function.\",\n}\n\nFILTER_VALUES = {\n \"lower\": \"Convert value to lower case, e.g. 'Value' => 'value'.\",\n \"upper\": \"Convert value to upper case, e.g. 'Value' => 'VALUE'.\",\n \"strip\": \"Strip whitespace from beginning/end of value, e.g. ' Value ' => 'Value'.\",\n \"titlecase\": \"Convert value to title case, e.g. 'my value' => 'My Value'.\",\n \"capitalize\": \"Capitalize first word of value and convert other words to lower case, e.g. 'MY VALUE' => 'My value'.\",\n \"braces\": \"Enclose value in curly braces, e.g. 'value => '{value}'.\",\n \"parens\": \"Enclose value in parentheses, e.g. 'value' => '(value')\",\n \"brackets\": \"Enclose value in brackets, e.g. 'value' => '[value]'\",\n \"shell_quote\": \"Quotes the value for safe usage in the shell, e.g. My file.jpeg => 'My file.jpeg'; only adds quotes if needed.\",\n \"function\": \"Run custom python function to filter value; use in format 'function:/path/to/file.py::function_name'. See example at https://github.com/RhetTbull/osxphotos/blob/master/examples/template_filter.py\",\n}\n\n# Just the substitutions without the braces\nSINGLE_VALUE_SUBSTITUTIONS = [\n field.replace(\"{\", \"\").replace(\"}\", \"\") for field in TEMPLATE_SUBSTITUTIONS\n]\n\nPATHLIB_SUBSTITUTIONS = [\n field.replace(\"{\", \"\").replace(\"}\", \"\") for field in TEMPLATE_SUBSTITUTIONS_PATHLIB\n]\n\nMULTI_VALUE_SUBSTITUTIONS = [\n field.replace(\"{\", \"\").replace(\"}\", \"\")\n for field in TEMPLATE_SUBSTITUTIONS_MULTI_VALUED\n]\n\nFIELD_NAMES = (\n SINGLE_VALUE_SUBSTITUTIONS + MULTI_VALUE_SUBSTITUTIONS + PATHLIB_SUBSTITUTIONS\n)\n\n# default values for string manipulation template options\nINPLACE_DEFAULT = \",\"\nPATH_SEP_DEFAULT = os.path.sep\n\nPUNCTUATION = {\n \"comma\": \",\",\n \"semicolon\": \";\",\n \"pipe\": \"|\",\n \"openbrace\": \"{\",\n \"closebrace\": \"}\",\n \"openparens\": \"(\",\n \"closeparens\": \")\",\n \"openbracket\": \"[\",\n \"closebracket\": \"]\",\n \"questionmark\": \"?\",\n \"newline\": \"\\n\",\n \"lf\": \"\\n\",\n \"cr\": \"\\r\",\n \"crlf\": \"\\r\\n\",\n}\n\n\n@dataclass\nclass RenderOptions:\n \"\"\"Options for PhotoTemplate.render\n\n template: str template\n none_str: str to use default for None values, default is '_'\n path_sep: optional string to use as path separator, default is os.path.sep\n expand_inplace: expand multi-valued substitutions in-place as a single string\n instead of returning individual strings\n inplace_sep: optional string to use as separator between multi-valued keywords\n with expand_inplace; default is ','\n filename: if True, template output will be sanitized to produce valid file name\n dirname: if True, template output will be sanitized to produce valid directory name\n strip: if True, strips leading/trailing whitespace from rendered templates\n edited_version: set to True if you want {edited_version} to resolve to True (e.g. exporting edited version of photo)\n export_dir: set to the export directory if you want to evalute {export_dir} template\n dest_path: set to the destination path of the photo (for use by {function} template), only valid with --filename\n filepath: set to value for filepath of the exported photo if you want to evaluate {filepath} template\n quote: quote path templates for execution in the shell\n exportdb: ExportDB object\n \"\"\"\n\n none_str: str = \"_\"\n path_sep: Optional[str] = PATH_SEP_DEFAULT\n expand_inplace: bool = False\n inplace_sep: Optional[str] = INPLACE_DEFAULT\n filename: bool = False\n dirname: bool = False\n strip: bool = False\n edited_version: bool = False\n export_dir: Optional[str] = None\n dest_path: Optional[str] = None\n filepath: Optional[str] = None\n quote: bool = False\n exportdb: Optional[ExportDB_ABC] = None\n\n\nclass PhotoTemplateParser:\n \"\"\"Parser for PhotoTemplate\"\"\"\n\n # implemented as Singleton\n\n def __new__(cls, *args, **kwargs):\n \"\"\"create new object or return instance of already created singleton\"\"\"\n if not hasattr(cls, \"instance\") or not cls.instance:\n cls.instance = super().__new__(cls)\n\n return cls.instance\n\n def __init__(self):\n \"\"\"return existing singleton or create a new one\"\"\"\n\n if hasattr(self, \"metamodel\"):\n return\n\n self.metamodel = metamodel_from_file(MTL_GRAMMAR_MODEL, skipws=False)\n\n def parse(self, template_statement):\n \"\"\"Parse a template_statement string\"\"\"\n return self.metamodel.model_from_str(template_statement)\n\n def fields(self, template_statement):\n \"\"\"Return list of fields found in a template statement; does not verify that fields are valid\"\"\"\n model = self.parse(template_statement)\n return [ts.template.field for ts in model.template_strings if ts.template]\n\n\nclass PhotoTemplate:\n \"\"\"PhotoTemplate class to render a template string from a PhotoInfo object\"\"\"\n\n def __init__(self, photo, exiftool_path=None):\n \"\"\"Inits PhotoTemplate class with photo\n\n Args:\n photo: a PhotoInfo instance.\n exiftool_path: optional path to exiftool for use with {exiftool:} template; if not provided, will look for exiftool in $PATH\n \"\"\"\n self.photo = photo\n self.exiftool_path = exiftool_path\n\n # holds value of current date/time for {today.x} fields\n # gets initialized in get_template_value\n self.today = None\n\n # get parser singleton\n self.parser = PhotoTemplateParser()\n\n # initialize render options\n # this will be done in render() but for testing, some of the lookup functions are called directly\n options = RenderOptions()\n self.options = options\n self.path_sep = options.path_sep\n self.inplace_sep = options.inplace_sep\n self.edited_version = options.edited_version\n self.none_str = options.none_str\n self.expand_inplace = options.expand_inplace\n self.filename = options.filename\n self.dirname = options.dirname\n self.strip = options.strip\n self.export_dir = options.export_dir\n self.filepath = options.filepath\n self.quote = options.quote\n self.dest_path = options.dest_path\n self.exportdb = options.exportdb or ExportDBInMemory(None)\n\n def render(\n self,\n template: str,\n options: RenderOptions,\n ):\n \"\"\"Render a filename or directory template\n\n Args:\n template: str template\n options: a RenderOptions instance\n\n Returns:\n ([rendered_strings], [unmatched]): tuple of list of rendered strings and list of unmatched template values\n \"\"\"\n\n if type(template) is not str:\n raise TypeError(f\"template must be type str, not {type(template)}\")\n\n self.options = options\n self.path_sep = options.path_sep\n self.inplace_sep = options.inplace_sep\n self.edited_version = options.edited_version\n self.none_str = options.none_str\n self.expand_inplace = options.expand_inplace\n self.filename = options.filename\n self.dirname = options.dirname\n self.strip = options.strip\n self.export_dir = options.export_dir\n self.dest_path = options.dest_path\n self.filepath = options.filepath\n self.quote = options.quote\n self.dest_path = options.dest_path\n self.exportdb = options.exportdb or self.exportdb\n\n try:\n model = self.parser.parse(template)\n except TextXSyntaxError as e:\n raise ValueError(f\"SyntaxError: {e}\")\n\n if not model:\n # empty string\n return [], []\n\n return self._render_statement(model)\n\n def _render_statement(\n self,\n statement,\n path_sep=None,\n ):\n path_sep = path_sep or self.path_sep\n results = []\n unmatched = []\n for ts in statement.template_strings:\n results, unmatched = self._render_template_string(\n ts, results=results, unmatched=unmatched, path_sep=path_sep\n )\n\n rendered_strings = results\n\n if self.filename:\n rendered_strings = [\n sanitize_filename(rendered_str) for rendered_str in rendered_strings\n ]\n\n if self.strip:\n rendered_strings = [\n rendered_str.strip() for rendered_str in rendered_strings\n ]\n\n return rendered_strings, unmatched\n\n def _render_template_string(\n self,\n ts,\n path_sep,\n results=None,\n unmatched=None,\n ):\n \"\"\"Render a TemplateString object\"\"\"\n\n results = results or [\"\"]\n unmatched = unmatched or []\n\n if ts.template:\n # have a template field to process\n field = ts.template.field\n field_part = field.split(\".\")[0]\n if field not in FIELD_NAMES and field_part not in FIELD_NAMES:\n unmatched.append(field)\n return [], unmatched\n\n subfield = ts.template.subfield\n\n # process filters\n filters = []\n if ts.template.filter is not None:\n filters = ts.template.filter.value\n\n # process path_sep\n if ts.template.pathsep is not None:\n path_sep = ts.template.pathsep.value\n\n # process delim\n if ts.template.delim is not None:\n # if value is None, means format was {+field}\n delim = ts.template.delim.value or \"\"\n else:\n delim = None\n\n if ts.template.bool is not None:\n is_bool = True\n if ts.template.bool.value is not None:\n bool_val, u = self._render_statement(\n ts.template.bool.value,\n path_sep=path_sep,\n )\n unmatched.extend(u)\n else:\n # blank bool value\n bool_val = [\"\"]\n else:\n is_bool = False\n bool_val = None\n\n # process default\n if ts.template.default is not None:\n # default is also a TemplateString\n if ts.template.default.value is not None:\n default, u = self._render_statement(\n ts.template.default.value,\n path_sep=path_sep,\n )\n unmatched.extend(u)\n else:\n # blank default value\n default = [\"\"]\n else:\n default = []\n\n # process conditional\n if ts.template.conditional is not None:\n operator = ts.template.conditional.operator\n negation = ts.template.conditional.negation\n if ts.template.conditional.value is not None:\n # conditional value is also a TemplateString\n conditional_value, u = self._render_statement(\n ts.template.conditional.value,\n path_sep=path_sep,\n )\n unmatched.extend(u)\n else:\n # this shouldn't happen\n conditional_value = [\"\"]\n else:\n operator = None\n negation = None\n conditional_value = []\n\n vals = []\n if (\n field in SINGLE_VALUE_SUBSTITUTIONS\n or field.split(\".\")[0] in SINGLE_VALUE_SUBSTITUTIONS\n ):\n vals = self.get_template_value(\n field,\n default=default,\n subfield=subfield,\n # delim=delim or self.inplace_sep,\n # path_sep=path_sep,\n )\n elif field == \"exiftool\":\n if subfield is None:\n raise ValueError(\n \"SyntaxError: GROUP:NAME subfield must not be null with {exiftool:GROUP:NAME}'\"\n )\n vals = self.get_template_value_exiftool(\n subfield,\n )\n elif field == \"function\":\n if subfield is None:\n raise ValueError(\n \"SyntaxError: filename and function must not be null with {function::filename.py:function_name}\"\n )\n vals = self.get_template_value_function(\n subfield,\n )\n elif field in MULTI_VALUE_SUBSTITUTIONS or field.startswith(\"photo\"):\n vals = self.get_template_value_multi(\n field, subfield, path_sep=path_sep, default=default\n )\n elif field.split(\".\")[0] in PATHLIB_SUBSTITUTIONS:\n vals = self.get_template_value_pathlib(field)\n else:\n unmatched.append(field)\n return [], unmatched\n\n vals = [val for val in vals if val is not None]\n\n if self.expand_inplace or delim is not None:\n sep = delim if delim is not None else self.inplace_sep\n vals = [sep.join(sorted(vals))] if vals else []\n\n for filter_ in filters:\n vals = self.get_template_value_filter(filter_, vals)\n\n # process find/replace\n if ts.template.findreplace:\n new_vals = []\n for val in vals:\n for pair in ts.template.findreplace.pairs:\n find = pair.find or \"\"\n repl = pair.replace or \"\"\n val = val.replace(find, repl)\n new_vals.append(val)\n vals = new_vals\n\n if operator:\n # have a conditional operator\n\n def string_test(test_function):\n \"\"\"Perform string comparison using test_function; closure to capture conditional_value, vals, negation\"\"\"\n match = False\n for c in conditional_value:\n for v in vals:\n if test_function(v, c):\n match = True\n break\n if match:\n break\n if (match and not negation) or (negation and not match):\n return [\"True\"]\n else:\n return []\n\n def comparison_test(test_function):\n \"\"\"Perform numerical comparisons using test_function; closure to capture conditional_val, vals, negation\"\"\"\n if len(vals) != 1 or len(conditional_value) != 1:\n raise ValueError(\n f\"comparison operators may only be used with a single value: {vals} {conditional_value}\"\n )\n try:\n match = bool(\n test_function(float(vals[0]), float(conditional_value[0]))\n )\n if (match and not negation) or (negation and not match):\n return [\"True\"]\n else:\n return []\n except ValueError as e:\n raise ValueError(\n f\"comparison operators may only be used with values that can be converted to numbers: {vals} {conditional_value}\"\n )\n\n if operator in [\"contains\", \"matches\", \"startswith\", \"endswith\"]:\n # process any \"or\" values separated by \"|\"\n temp_values = []\n for c in conditional_value:\n temp_values.extend(c.split(\"|\"))\n conditional_value = temp_values\n\n if operator == \"contains\":\n vals = string_test(lambda v, c: c in v)\n elif operator == \"matches\":\n vals = string_test(lambda v, c: v == c)\n elif operator == \"startswith\":\n vals = string_test(lambda v, c: v.startswith(c))\n elif operator == \"endswith\":\n vals = string_test(lambda v, c: v.endswith(c))\n elif operator == \"==\":\n match = sorted(vals) == sorted(conditional_value)\n if (match and not negation) or (negation and not match):\n vals = [\"True\"]\n else:\n vals = []\n elif operator == \"!=\":\n match = sorted(vals) != sorted(conditional_value)\n if (match and not negation) or (negation and not match):\n vals = [\"True\"]\n else:\n vals = []\n elif operator == \"<\":\n vals = comparison_test(lambda v, c: v < c)\n elif operator == \"<=\":\n vals = comparison_test(lambda v, c: v <= c)\n elif operator == \">\":\n vals = comparison_test(lambda v, c: v > c)\n elif operator == \">=\":\n vals = comparison_test(lambda v, c: v >= c)\n\n if is_bool:\n vals = default if not vals else bool_val\n elif not vals:\n vals = default or [self.none_str]\n\n pre = ts.pre or \"\"\n post = ts.post or \"\"\n\n rendered = [pre + val + post for val in vals]\n results_new = []\n for ren in rendered:\n for res in results:\n res_new = res + ren\n results_new.append(res_new)\n results = results_new\n\n else:\n # no template\n pre = ts.pre or \"\"\n post = ts.post or \"\"\n results = [r + pre + post for r in results]\n\n return results, unmatched\n\n def get_template_value(\n self,\n field,\n default,\n subfield=None,\n # bool_val=None,\n # delim=None,\n # path_sep=None,\n ):\n \"\"\"lookup value for template field (single-value template substitutions)\n\n Args:\n field: template field to find value for.\n default: the default value provided by the user\n bool_val: True value if expression is boolean\n delim: delimiter for expand in place\n path_sep: path separator for fields that are path-like\n subfield: subfield (value after : in field)\n\n Returns:\n The matching template value (which may be None).\n\n Raises:\n ValueError if no rule exists for field.\n \"\"\"\n\n if self.photo.uuid is None:\n return []\n\n # initialize today with current date/time if needed\n if self.today is None:\n self.today = datetime.datetime.now()\n\n value = None\n\n # wouldn't a switch/case statement be nice...\n if field == \"name\":\n value = pathlib.Path(self.photo.filename).stem\n elif field == \"original_name\":\n value = pathlib.Path(self.photo.original_filename).stem\n elif field == \"title\":\n value = self.photo.title\n elif field == \"descr\":\n value = self.photo.description\n elif field == \"media_type\":\n value = self.get_media_type(default)\n elif field == \"photo_or_video\":\n value = self.get_photo_video_type(default)\n elif field == \"hdr\":\n value = \"hdr\" if self.photo.hdr else None\n elif field == \"edited\":\n value = \"edited\" if self.photo.hasadjustments else None\n elif field == \"edited_version\":\n value = \"edited_version\" if self.edited_version else None\n elif field == \"favorite\":\n value = \"favorite\" if self.photo.favorite else None\n elif field == \"created.date\":\n value = DateTimeFormatter(self.photo.date).date\n elif field == \"created.year\":\n value = DateTimeFormatter(self.photo.date).year\n elif field == \"created.yy\":\n value = DateTimeFormatter(self.photo.date).yy\n elif field == \"created.mm\":\n value = DateTimeFormatter(self.photo.date).mm\n elif field == \"created.month\":\n value = DateTimeFormatter(self.photo.date).month\n elif field == \"created.mon\":\n value = DateTimeFormatter(self.photo.date).mon\n elif field == \"created.dd\":\n value = DateTimeFormatter(self.photo.date).dd\n elif field == \"created.dow\":\n value = DateTimeFormatter(self.photo.date).dow\n elif field == \"created.doy\":\n value = DateTimeFormatter(self.photo.date).doy\n elif field == \"created.hour\":\n value = DateTimeFormatter(self.photo.date).hour\n elif field == \"created.min\":\n value = DateTimeFormatter(self.photo.date).min\n elif field == \"created.sec\":\n value = DateTimeFormatter(self.photo.date).sec\n elif field == \"created.strftime\":\n if default:\n try:\n value = self.photo.date.strftime(default[0])\n except:\n raise ValueError(f\"Invalid strftime template: '{default}'\")\n else:\n value = None\n elif field == \"modified.date\":\n value = (\n DateTimeFormatter(self.photo.date_modified).date\n if self.photo.date_modified\n else DateTimeFormatter(self.photo.date).date\n )\n elif field == \"modified.year\":\n value = (\n DateTimeFormatter(self.photo.date_modified).year\n if self.photo.date_modified\n else DateTimeFormatter(self.photo.date).year\n )\n elif field == \"modified.yy\":\n value = (\n DateTimeFormatter(self.photo.date_modified).yy\n if self.photo.date_modified\n else DateTimeFormatter(self.photo.date).yy\n )\n elif field == \"modified.mm\":\n value = (\n DateTimeFormatter(self.photo.date_modified).mm\n if self.photo.date_modified\n else DateTimeFormatter(self.photo.date).mm\n )\n elif field == \"modified.month\":\n value = (\n DateTimeFormatter(self.photo.date_modified).month\n if self.photo.date_modified\n else DateTimeFormatter(self.photo.date).month\n )\n elif field == \"modified.mon\":\n value = (\n DateTimeFormatter(self.photo.date_modified).mon\n if self.photo.date_modified\n else DateTimeFormatter(self.photo.date).mon\n )\n elif field == \"modified.dd\":\n value = (\n DateTimeFormatter(self.photo.date_modified).dd\n if self.photo.date_modified\n else DateTimeFormatter(self.photo.date).dd\n )\n elif field == \"modified.dow\":\n value = (\n DateTimeFormatter(self.photo.date_modified).dow\n if self.photo.date_modified\n else DateTimeFormatter(self.photo.date).dow\n )\n elif field == \"modified.doy\":\n value = (\n DateTimeFormatter(self.photo.date_modified).doy\n if self.photo.date_modified\n else DateTimeFormatter(self.photo.date).doy\n )\n elif field == \"modified.hour\":\n value = (\n DateTimeFormatter(self.photo.date_modified).hour\n if self.photo.date_modified\n else DateTimeFormatter(self.photo.date).hour\n )\n elif field == \"modified.min\":\n value = (\n DateTimeFormatter(self.photo.date_modified).min\n if self.photo.date_modified\n else DateTimeFormatter(self.photo.date).min\n )\n elif field == \"modified.sec\":\n value = (\n DateTimeFormatter(self.photo.date_modified).sec\n if self.photo.date_modified\n else DateTimeFormatter(self.photo.date).sec\n )\n elif field == \"modified.strftime\":\n if default:\n try:\n date = self.photo.date_modified or self.photo.date\n value = date.strftime(default[0])\n except:\n raise ValueError(f\"Invalid strftime template: '{default}'\")\n else:\n value = None\n elif field == \"today.date\":\n value = DateTimeFormatter(self.today).date\n elif field == \"today.year\":\n value = DateTimeFormatter(self.today).year\n elif field == \"today.yy\":\n value = DateTimeFormatter(self.today).yy\n elif field == \"today.mm\":\n value = DateTimeFormatter(self.today).mm\n elif field == \"today.month\":\n value = DateTimeFormatter(self.today).month\n elif field == \"today.mon\":\n value = DateTimeFormatter(self.today).mon\n elif field == \"today.dd\":\n value = DateTimeFormatter(self.today).dd\n elif field == \"today.dow\":\n value = DateTimeFormatter(self.today).dow\n elif field == \"today.doy\":\n value = DateTimeFormatter(self.today).doy\n elif field == \"today.hour\":\n value = DateTimeFormatter(self.today).hour\n elif field == \"today.min\":\n value = DateTimeFormatter(self.today).min\n elif field == \"today.sec\":\n value = DateTimeFormatter(self.today).sec\n elif field == \"today.strftime\":\n if default:\n try:\n value = self.today.strftime(default[0])\n except:\n raise ValueError(f\"Invalid strftime template: '{default}'\")\n else:\n value = None\n elif field == \"place.name\":\n value = self.photo.place.name if self.photo.place else None\n elif field == \"place.country_code\":\n value = self.photo.place.country_code if self.photo.place else None\n elif field == \"place.name.country\":\n value = (\n self.photo.place.names.country[0]\n if self.photo.place and self.photo.place.names.country\n else None\n )\n elif field == \"place.name.state_province\":\n value = (\n self.photo.place.names.state_province[0]\n if self.photo.place and self.photo.place.names.state_province\n else None\n )\n elif field == \"place.name.city\":\n value = (\n self.photo.place.names.city[0]\n if self.photo.place and self.photo.place.names.city\n else None\n )\n elif field == \"place.name.area_of_interest\":\n value = (\n self.photo.place.names.area_of_interest[0]\n if self.photo.place and self.photo.place.names.area_of_interest\n else None\n )\n elif field == \"place.address\":\n value = (\n self.photo.place.address_str\n if self.photo.place and self.photo.place.address_str\n else None\n )\n elif field == \"place.address.street\":\n value = (\n self.photo.place.address.street\n if self.photo.place and self.photo.place.address.street\n else None\n )\n elif field == \"place.address.city\":\n value = (\n self.photo.place.address.city\n if self.photo.place and self.photo.place.address.city\n else None\n )\n elif field == \"place.address.state_province\":\n value = (\n self.photo.place.address.state_province\n if self.photo.place and self.photo.place.address.state_province\n else None\n )\n elif field == \"place.address.postal_code\":\n value = (\n self.photo.place.address.postal_code\n if self.photo.place and self.photo.place.address.postal_code\n else None\n )\n elif field == \"place.address.country\":\n value = (\n self.photo.place.address.country\n if self.photo.place and self.photo.place.address.country\n else None\n )\n elif field == \"place.address.country_code\":\n value = (\n self.photo.place.address.iso_country_code\n if self.photo.place and self.photo.place.address.iso_country_code\n else None\n )\n elif field == \"searchinfo.season\":\n value = self.photo.search_info.season if self.photo.search_info else None\n elif field == \"exif.camera_make\":\n value = self.photo.exif_info.camera_make if self.photo.exif_info else None\n elif field == \"exif.camera_model\":\n value = self.photo.exif_info.camera_model if self.photo.exif_info else None\n elif field == \"exif.lens_model\":\n value = self.photo.exif_info.lens_model if self.photo.exif_info else None\n elif field == \"uuid\":\n value = self.photo.uuid\n elif field == \"id\":\n value = format_str_value(self.photo._info[\"pk\"], subfield)\n elif field.startswith(\"album_seq\") or field.startswith(\"folder_album_seq\"):\n dest_path = self.dest_path\n if not dest_path:\n value = None\n else:\n if field.startswith(\"album_seq\"):\n album = pathlib.Path(dest_path).name\n album_info = _get_album_by_name(self.photo, album)\n else:\n album_info = _get_album_by_path(self.photo, dest_path)\n value = album_info.photo_index(self.photo) if album_info else None\n if value is not None:\n try:\n start_id = field.split(\".\", 1)\n value = int(value) + int(start_id[1])\n except IndexError:\n pass\n value = format_str_value(value, subfield)\n elif field in PUNCTUATION:\n value = PUNCTUATION[field]\n elif field == \"osxphotos_version\":\n value = __version__\n elif field == \"osxphotos_cmd_line\":\n value = \" \".join(sys.argv)\n else:\n # if here, didn't get a match\n raise ValueError(f\"Unhandled template value: {field}\")\n\n if self.filename:\n value = sanitize_pathpart(value)\n elif self.dirname:\n value = sanitize_dirname(value)\n\n # ensure no empty strings in value (see #512)\n value = None if value == \"\" else value\n\n return [value]\n\n def get_template_value_pathlib(self, field):\n \"\"\"lookup value for template pathlib template fields\n\n Args:\n field: template field to find value for.\n\n Returns:\n The matching template value (which may be None).\n\n Raises:\n ValueError if no rule exists for field.\n \"\"\"\n field_stem = field.split(\".\")[0]\n if field_stem not in PATHLIB_SUBSTITUTIONS:\n raise ValueError(f\"SyntaxError: Unknown field: {field}\")\n\n field_value = None\n try:\n field_value = getattr(self, field_stem)\n except AttributeError:\n raise ValueError(f\"Unknown path-like field: {field_stem}\")\n\n value = _get_pathlib_value(field, field_value, self.quote)\n\n if self.filename:\n value = sanitize_pathpart(value)\n elif self.dirname:\n value = sanitize_dirname(value)\n\n return [value]\n\n def get_template_value_filter(self, filter_, values):\n if filter_ == \"lower\":\n if values and type(values) == list:\n value = [v.lower() for v in values]\n else:\n value = [values.lower()] if values else []\n elif filter_ == \"upper\":\n if values and type(values) == list:\n value = [v.upper() for v in values]\n else:\n value = [values.upper()] if values else []\n elif filter_ == \"strip\":\n if values and type(values) == list:\n value = [v.strip() for v in values]\n else:\n value = [values.strip()] if values else []\n elif filter_ == \"capitalize\":\n if values and type(values) == list:\n value = [v.capitalize() for v in values]\n else:\n value = [values.capitalize()] if values else []\n elif filter_ == \"titlecase\":\n if values and type(values) == list:\n value = [v.title() for v in values]\n else:\n value = [values.title()] if values else []\n elif filter_ == \"braces\":\n if values and type(values) == list:\n value = [\"{\" + v + \"}\" for v in values]\n else:\n value = [\"{\" + values + \"}\"] if values else []\n elif filter_ == \"parens\":\n if values and type(values) == list:\n value = [\"(\" + v + \")\" for v in values]\n else:\n value = [\"(\" + values + \")\"] if values else []\n elif filter_ == \"brackets\":\n if values and type(values) == list:\n value = [\"[\" + v + \"]\" for v in values]\n else:\n value = [\"[\" + values + \"]\"] if values else []\n elif filter_ == \"shell_quote\":\n if values and type(values) == list:\n value = [shlex.quote(v) for v in values]\n else:\n value = [shlex.quote(values)] if values else []\n elif filter_.startswith(\"function:\"):\n value = self.get_template_value_filter_function(filter_, values)\n else:\n value = []\n return value\n\n def get_template_value_multi(self, field, subfield, path_sep, default):\n \"\"\"lookup value for template field (multi-value template substitutions)\n\n Args:\n field: template field to find value for.\n subfield: the template subfield value\n path_sep: path separator to use for folder_album field\n default: value of default field\n\n Returns:\n List of the matching template values or [].\n\n Raises:\n ValueError if no rule exists for field.\n \"\"\"\n\n \"\"\" return list of values for a multi-valued template field \"\"\"\n\n if self.photo.uuid is None:\n return []\n\n values = []\n if field == \"album\":\n values = self.photo.burst_albums if self.photo.burst else self.photo.albums\n elif field == \"keyword\":\n values = self.photo.keywords\n elif field == \"person\":\n values = self.photo.persons\n # remove any _UNKNOWN_PERSON values\n values = [val for val in values if val != _UNKNOWN_PERSON]\n elif field == \"label\":\n values = self.photo.labels\n elif field == \"label_normalized\":\n values = self.photo.labels_normalized\n elif field == \"folder_album\":\n values = []\n # photos must be in an album to be in a folder\n if self.photo.burst:\n album_info = self.photo.burst_album_info\n else:\n album_info = self.photo.album_info\n for album in album_info:\n if album.folder_names:\n # album in folder\n if self.dirname:\n # being used as a filepath so sanitize each part\n folder = path_sep.join(\n sanitize_dirname(f) for f in album.folder_names\n )\n folder += path_sep + sanitize_dirname(album.title)\n else:\n folder = path_sep.join(album.folder_names)\n folder += path_sep + album.title\n values.append(folder)\n elif self.dirname:\n values.append(sanitize_dirname(album.title))\n else:\n values.append(album.title)\n elif field == \"comment\":\n values = [\n f\"{comment.user}: {comment.text}\" for comment in self.photo.comments\n ]\n elif field == \"searchinfo.holiday\":\n values = self.photo.search_info.holidays if self.photo.search_info else []\n elif field == \"searchinfo.activity\":\n values = self.photo.search_info.activities if self.photo.search_info else []\n elif field == \"searchinfo.venue\":\n values = self.photo.search_info.venues if self.photo.search_info else []\n elif field == \"searchinfo.venue_type\":\n values = (\n self.photo.search_info.venue_types if self.photo.search_info else []\n )\n elif field == \"shell_quote\":\n values = [shlex.quote(v) for v in default if v]\n elif field == \"strip\":\n values = [v.strip() for v in default]\n elif field.startswith(\"photo\"):\n # provide access to PhotoInfo object\n properties = field.split(\".\")\n if len(properties) <= 1:\n raise ValueError(\n \"Missing property in {photo} template. Use in form {photo.property}.\"\n )\n obj = self.photo\n for i in range(1, len(properties)):\n property_ = properties[i]\n try:\n obj = getattr(obj, property_)\n if obj is None:\n break\n except AttributeError:\n raise ValueError(\n \"Invalid property for {photo} template: \" + f\"'{property_}'\"\n )\n if obj is None:\n values = []\n elif isinstance(obj, bool):\n values = [property_] if obj else []\n elif isinstance(obj, (str, int, float)):\n values = [str(obj)]\n else:\n values = [val for val in obj]\n elif field == \"detected_text\":\n values = _get_detected_text(self.photo, self.exportdb, confidence=subfield)\n else:\n raise ValueError(f\"Unhandled template value: {field}\")\n\n # sanitize directory names if needed, folder_album handled differently above\n if self.filename:\n values = [sanitize_pathpart(value) for value in values]\n elif self.dirname and field != \"folder_album\":\n # skip folder_album because it would have been handled above\n values = [sanitize_dirname(value) for value in values]\n\n # If no values, insert None so code below will substitute none_str for None\n values = values or []\n return values\n\n def get_template_value_exiftool(\n self,\n subfield,\n ):\n \"\"\"Get template value for format \"{exiftool:EXIF:Model}\" \"\"\"\n\n if self.photo is None:\n return []\n\n if not self.photo.path:\n return []\n\n exif = ExifToolCaching(self.photo.path, exiftool=self.exiftool_path)\n exifdict = exif.asdict(normalized=True)\n subfield = subfield.lower()\n if subfield in exifdict:\n values = exifdict[subfield]\n values = [values] if not isinstance(values, list) else values\n values = [str(v) for v in values]\n\n # sanitize directory names if needed\n if self.filename:\n values = [sanitize_pathpart(value) for value in values]\n elif self.dirname:\n values = [sanitize_dirname(value) for value in values]\n else:\n values = []\n\n return values\n\n def get_template_value_function(\n self,\n subfield,\n ):\n \"\"\"Get template value from external function\"\"\"\n\n if \"::\" not in subfield:\n raise ValueError(\n f\"SyntaxError: could not parse function name from '{subfield}'\"\n )\n\n filename, funcname = subfield.split(\"::\")\n\n filename_validated = expand_and_validate_filepath(filename)\n if not filename_validated:\n raise ValueError(f\"'{filename}' does not appear to be a file\")\n\n template_func = load_function(filename_validated, funcname)\n values = template_func(self.photo, options=self.options)\n\n if not isinstance(values, (str, list)):\n raise TypeError(\n f\"Invalid return type for function {funcname}: expected str or list\"\n )\n if type(values) == str:\n values = [values]\n\n # sanitize directory names if needed\n if self.filename:\n values = [sanitize_pathpart(value) for value in values]\n elif self.dirname:\n # sanitize but don't replace any \"/\" as user function may want to create sub directories\n values = [sanitize_dirname(value, replacement=None) for value in values]\n\n return values\n\n def get_template_value_filter_function(self, filter_, values):\n \"\"\"Filter template value from external function\"\"\"\n\n filter_ = filter_.replace(\"function:\", \"\")\n\n if \"::\" not in filter_:\n raise ValueError(\n f\"SyntaxError: could not parse function name from '{filter_}'\"\n )\n\n filename, funcname = filter_.split(\"::\")\n\n filename_validated = expand_and_validate_filepath(filename)\n if not filename_validated:\n raise ValueError(f\"'{filename}' does not appear to be a file\")\n\n template_func = load_function(filename_validated, funcname)\n\n if not isinstance(values, (list, tuple)):\n values = [values]\n values = template_func(values)\n\n if not isinstance(values, list):\n raise TypeError(\n f\"Invalid return type for function {funcname}: expected list\"\n )\n\n return values\n\n def get_photo_video_type(self, default):\n \"\"\"return media type, e.g. photo or video\"\"\"\n default_dict = parse_default_kv(default, PHOTO_VIDEO_TYPE_DEFAULTS)\n if self.photo.isphoto:\n return default_dict[\"photo\"]\n else:\n return default_dict[\"video\"]\n\n def get_media_type(self, default):\n \"\"\"return special media type, e.g. slow_mo, panorama, etc., defaults to photo or video if no special type\"\"\"\n default_dict = parse_default_kv(default, MEDIA_TYPE_DEFAULTS)\n p = self.photo\n if p.selfie:\n return default_dict[\"selfie\"]\n elif p.time_lapse:\n return default_dict[\"time_lapse\"]\n elif p.panorama:\n return default_dict[\"panorama\"]\n elif p.slow_mo:\n return default_dict[\"slow_mo\"]\n elif p.screenshot:\n return default_dict[\"screenshot\"]\n elif p.portrait:\n return default_dict[\"portrait\"]\n elif p.live_photo:\n return default_dict[\"live_photo\"]\n elif p.burst:\n return default_dict[\"burst\"]\n elif p.ismovie:\n return default_dict[\"video\"]\n else:\n return default_dict[\"photo\"]\n\n def get_photo_bool_attribute(self, attr, default, bool_val):\n # get value for a PhotoInfo bool attribute\n val = getattr(self.photo, attr)\n if val:\n return bool_val\n else:\n return default\n\n\ndef parse_default_kv(default, default_dict):\n \"\"\"parse a string in form key1=value1;key2=value2,... as used for some template fields\n\n Args:\n default: str, in form 'photo=foto;video=vidéo'\n default_dict: dict, in form {\"photo\": \"fotos\", \"video\": \"vidéos\"} with default values\n\n Returns:\n dict in form {\"photo\": \"fotos\", \"video\": \"vidéos\"}\n \"\"\"\n\n default_dict_ = default_dict.copy()\n if default:\n defaults = default[0].split(\";\")\n for kv in defaults:\n try:\n k, v = kv.split(\"=\")\n k = k.strip()\n v = v.strip()\n default_dict_[k] = v\n except ValueError:\n pass\n return default_dict_\n\n\ndef get_template_help():\n \"\"\"Return help for template system as markdown string\"\"\"\n # TODO: would be better to use importlib.abc.ResourceReader but I can't find a single example of how to do this\n help_file = pathlib.Path(__file__).parent / \"phototemplate.md\"\n with open(help_file, \"r\") as fd:\n md = fd.read()\n return md\n\n\ndef _get_pathlib_value(field, value, quote):\n \"\"\"Get the value for a pathlib.Path type template\n\n Args:\n field: the path field, e.g. \"filename.stem\"\n value: the value for the path component\n quote: bool; if true, quotes the returned path for safe execution in the shell\n \"\"\"\n parts = field.split(\".\")\n\n if len(parts) == 1:\n return shlex.quote(value) if quote else value\n\n if len(parts) > 2:\n raise ValueError(f\"Illegal value for path template: {field}\")\n\n path = parts[0]\n attribute = parts[1]\n path = pathlib.Path(value)\n try:\n val = getattr(path, attribute)\n val_str = str(val)\n if quote:\n val_str = shlex.quote(val_str)\n return val_str\n except AttributeError:\n raise ValueError(\"Illegal value for path template: {attribute}\")\n\n\ndef format_str_value(value, format_str):\n \"\"\"Format value based on format code in field in format id:02d\"\"\"\n if not format_str:\n return str(value)\n format_str = \"{0:\" + f\"{format_str}\" + \"}\"\n return format_str.format(value)\n\n\ndef _get_album_by_name(photo, album):\n \"\"\"Finds first album named album that photo is in and returns the AlbumInfo object, otherwise returns None\"\"\"\n for album_info in photo.album_info:\n if album_info.title == album:\n return album_info\n return None\n\n\ndef _get_album_by_path(photo, folder_album_path):\n \"\"\"finds the first album whose folder_album path matches and folder_album_path and returns the AlbumInfo object, otherwise, returns None\"\"\"\n\n for album_info in photo.album_info:\n # following code is how {folder_album} builds the folder path\n folder = \"/\".join(sanitize_dirname(f) for f in album_info.folder_names)\n folder += \"/\" + sanitize_dirname(album_info.title)\n if folder_album_path.endswith(folder):\n return album_info\n return None\n\n\ndef _get_detected_text(photo, exportdb, confidence=TEXT_DETECTION_CONFIDENCE_THRESHOLD):\n \"\"\"Returns the detected text for a photo\n {detected_text} uses this instead of PhotoInfo.detected_text() to cache the text for all confidence values\n \"\"\"\n if not photo.isphoto:\n return []\n\n confidence = (\n float(confidence)\n if confidence is not None\n else TEXT_DETECTION_CONFIDENCE_THRESHOLD\n )\n\n # _detected_text caches the text detection results in an extended attribute\n # so the first time this gets called is slow but repeated accesses are fast\n detected_text = photo._detected_text()\n exportdb.set_detected_text_for_uuid(photo.uuid, json.dumps(detected_text))\n return [text for text, conf in detected_text if conf >= confidence]\n","sub_path":"osxphotos/phototemplate.py","file_name":"phototemplate.py","file_ext":"py","file_size_in_byte":64192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"479201170","text":"import time\nfrom nanpy import (ArduinoApi, SerialManager)\n\ndefault_connection = SerialManager(device='/dev/ttyUSB1')\n\napi = ArduinoApi(default_connection)\n\npin = int(input('Enter Soil Sensor Pin: '))\n\napi.pinMode(pin, api.INPUT)\n\nloop = 10\n\nwhile loop > 0:\n\tresistance = api.analogRead(pin)\n\tprint('Resistance: ', resistance)\n\tloop -=1\n\ttime.sleep(5)","sub_path":"tests/soil_test.py","file_name":"soil_test.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"6820297","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function, division\nimport controlpy\nimport numpy as np\nimport rospy\nfrom std_msgs.msg import Float64\nfrom sympy import *\n\nlx = 0.188;\nly = 0.135;\nlab = lx+ly;\nt = 0.1; \n\n# A B matrix:\nA = np.matrix([[1,0,0],[0,1,0],[0,0,1]])\nB = np.matrix([[t*lab,t*lab,t*lab,t*lab],[t*lab,-t*lab,t*lab,-t*lab],[t,-t,-t,t]])\n\n# Define initial costs:\nQ = np.matrix([[1,0,0],[0,1,0],[0,0,1]])\nR = np.matrix([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]])\n\n# Compute the LQR controller\ngain, X, closedLoopEigVals = controlpy.synthesis.controller_lqr(A,B,Q,R)\n\n\nprint('The computed gain is:')\nprint(gain)\n\nx_dot = Symbol('x_dot')\ny_dot = Symbol('y_dot')\nth_dot = Symbol('th_dot')\n\nx = np.array([ [x_dot],[y_dot],[th_dot]])\n\nprint('x:')\nprint(x)\n\nu = -gain*x\n\nprint('u:')\nprint(u)\n\nprint('The closed loop eigenvalues are:')\nprint(closedLoopEigVals)\n\n\n\n\n\n\n\n","sub_path":"aiv_controller_lqr/scripts/aiv_lqr.py","file_name":"aiv_lqr.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"374938527","text":"import pygame as pg\nimport util\nfrom organism import Organism\nfrom animals import *\nfrom plants import *\nfrom random import randint\n\nclass World(object):\n def __init__(self, d):\n self.size = d.get_size()\n self.disp = d\n self.orgs = []\n self.types = []\n self.spawnNum = 0\n\n self.initialize()\n\n def initialize(self):\n self.orgs.append(Antelope(self, self.getRandPos()))\n self.orgs.append(Fox(self, self.getRandPos()))\n self.orgs.append(Grass(self, self.getRandPos()))\n self.orgs.append(Guarana(self, self.getRandPos()))\n self.orgs.append(Sheep(self, self.getRandPos()))\n self.orgs.append(SosnowskiBorch(self, self.getRandPos()))\n self.orgs.append(SowThistle(self, self.getRandPos()))\n self.orgs.append(Turtle(self, self.getRandPos()))\n self.orgs.append(Wolf(self, self.getRandPos()))\n self.orgs.append(Wolfberries(self, self.getRandPos()))\n\n for o in self.orgs:\n self.types.append(o.__class__)\n\n size = len(self.orgs)\n\n for i in range(0, size - 1):\n for j in range(0, 2):\n p = self.getRandPos()\n if not self.isFieldOccupied(p):\n self.orgs.append(self.orgs[i].__class__(self, p))\n\n def draw(self):\n self.disp.fill((0, 0, 0))\n for o in self.orgs:\n o.draw(self.disp)\n pg.display.update()\n\n def tryPlaceOrganism(self, pos):\n pos = self.fitPosToGrid(pos)\n if not self.isFieldOccupied(pos):\n self.orgs.append(self.types[self.spawnNum](self, pos))\n\n def getOrganisms(self):\n return self.orgs\n\n def setSpawnNum(self, n):\n self.spawnNum = n\n\n def isFieldOccupied(self, pos):\n p = util.floorDivTuples(pos, Organism.getSize())\n p = util.mltTuples(p, Organism.getSize())\n for o in self.orgs:\n if o.getPosition() == pos:\n return True\n else:\n return False\n\n def update(self):\n self.orgs.sort()\n for o in self.orgs:\n o.update()\n\n newOnes = []\n for o in self.orgs:\n if (not o.isDead()) and o.shalMultiply():\n n = o.placeNewborn()\n if n is not None:\n newOnes.append(n)\n self.orgs = [o for o in self.orgs if not o.isDead()]\n self.orgs += newOnes\n\n def getWidth(self):\n return self.size[0]\n\n def getHeight(self):\n return self.size[1]\n\n def fitPosToGrid(self, pos):\n p = util.floorDivTuples(pos, Organism.getSize())\n return util.mltTuples(p, Organism.getSize())\n\n def getRandPos(self):\n return self.fitPosToGrid((randint(0, self.getWidth() - Organism.getSize()[0]), randint(0, self.getHeight() - Organism.getSize()[1])))\n","sub_path":"simulationOOP/python/world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"2040822","text":"# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016 by University of Kassel and Fraunhofer Institute for Wind Energy and Energy\n# System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed by a\n# BSD-style license that can be found in the LICENSE file.\n\nimport numpy as np\nimport copy\n\nfrom pypower.idx_bus import NONE, BUS_I, BUS_TYPE\nfrom pypower.idx_gen import GEN_BUS, GEN_STATUS\nfrom pypower.idx_brch import F_BUS, T_BUS, BR_STATUS\nfrom pypower.idx_area import PRICE_REF_BUS\nfrom pypower.run_userfcn import run_userfcn\n\nfrom pandapower.build_branch import _build_branch_ppc, _switch_branches, _branches_with_oos_buses, \\\n _update_trafo_trafo3w_ppc\nfrom pandapower.build_bus import _build_bus_ppc, _calc_loads_and_add_on_ppc, \\\n _calc_shunts_and_add_on_ppc\nfrom pandapower.build_gen import _build_gen_ppc, _update_gen_ppc\nfrom pandapower.auxiliary import _set_isolated_buses_out_of_service, _write_lookup_to_net\nfrom pandapower.make_objective import _make_objective\n\ndef _pd2ppc(net, is_elems, calculate_voltage_angles=False, enforce_q_lims=False,\n trafo_model=\"pi\", init_results=False, copy_constraints_to_ppc=False,\n opf=False, cost_function=None, **kwargs):\n \"\"\"\n Converter Flow:\n 1. Create an empty pypower datatructure\n 2. Calculate loads and write the bus matrix\n 3. Build the gen (Infeeder)- Matrix\n 4. Calculate the line parameter and the transformer parameter,\n and fill it in the branch matrix.\n Order: 1st: Line values, 2nd: Trafo values\n 5. if opf: make opf objective (gencost)\n 6. convert internal ppci format for pypower powerflow / opf without out of service elements and rearanged buses\n\n INPUT:\n **net** - The Pandapower format network\n **is_elems** - In service elements from the network (see _select_is_elements())\n\n OPTIONAL PARAMETERS:\n **calculate_voltage_angles** (bool, False) - consider voltage angles in powerflow calculation\n (see the description of runpp())\n **enforce_q_lims** (bool, False) - respect generator reactive power limits (see description of runpp())\n **trafo_model** (str,pi) - transformer equivalent circuit model (see description of runpp())\n **init_results** (bool, False) - initialization method of the loadflow (see description of runpp())\n **copy_constraints_to_ppc** (bool, False) - additional constraints\n (like voltage boundaries, maximum thermal capacity of branches rateA and generator P and Q limits\n will be copied to the ppc). This is necessary for the OPF as well as the converter functions\n **opf** (bool, False) - changes to the ppc are necessary if OPF is calculated instead of PF\n **cost_function** (obj, None) - The OPF cost function\n\n\n OUTPUT:\n **ppc** - The simple matpower format network. Which consists of:\n ppc = {\n \"baseMVA\": 1., *float*\n \"version\": 2, *int*\n \"bus\": np.array([], dtype=float),\n \"branch\": np.array([], dtype=np.complex128),\n \"gen\": np.array([], dtype=float),\n \"gencost\" = np.array([], dtype=float), only for OPF\n \"internal\": {\n \"Ybus\": np.array([], dtype=np.complex128)\n , \"Yf\": np.array([], dtype=np.complex128)\n , \"Yt\": np.array([], dtype=np.complex128)\n , \"branch_is\": np.array([], dtype=bool)\n , \"gen_is\": np.array([], dtype=bool)\n }\n **ppci** - The \"internal\" pypower format network for PF calculations\n **bus_lookup** - Lookup Pandapower -> ppc / ppci indices\n \"\"\"\n ppc = _init_ppc(net)\n _init_lookups(net)\n\n if opf:\n # additional fields in ppc\n ppc[\"gencost\"] = np.array([], dtype=float)\n\n # init empty ppci\n ppci = copy.deepcopy(ppc)\n # generate ppc['bus'] and the bus lookup\n _build_bus_ppc(net, ppc, is_elems, init_results, copy_constraints_to_ppc=copy_constraints_to_ppc)\n # generate ppc['gen'] and fills ppc['bus'] with generator values (PV, REF nodes)\n _build_gen_ppc(net, ppc, is_elems, enforce_q_lims, calculate_voltage_angles,\n copy_constraints_to_ppc = False, opf=opf)\n # generate ppc['branch'] and directly generates branch values\n _build_branch_ppc(net, ppc, is_elems, calculate_voltage_angles, trafo_model,\n copy_constraints_to_ppc=copy_constraints_to_ppc)\n # adds P and Q for loads / sgens in ppc['bus'] (PQ nodes)\n _calc_loads_and_add_on_ppc(net, ppc, is_elems, opf=opf)\n # adds P and Q for shunts, wards and xwards (to PQ nodes)\n _calc_shunts_and_add_on_ppc(net, ppc, is_elems)\n # adds auxilary buses for open switches at branches\n _switch_branches(net, ppc, is_elems)\n # add auxilary buses for out of service buses at in service lines.\n # Also sets lines out of service if they are connected to two out of service buses\n _branches_with_oos_buses(net, ppc, is_elems)\n # sets buses out of service, which aren't connected to branches / REF buses\n _set_isolated_buses_out_of_service(net, ppc)\n\n # generates \"internal\" ppci format (for powerflow calc) from \"external\" ppc format and updates the bus lookup\n # Note: Also reorders buses and gens in ppc\n ppci = _ppc2ppci(ppc, ppci, net, is_elems)\n\n if opf:\n # make opf objective\n ppci = _make_objective(ppci, net, is_elems, cost_function, **kwargs)\n\n return ppc, ppci\n\ndef _init_ppc(net):\n # init empty ppc\n ppc = {\"baseMVA\": 1.\n , \"version\": 2\n , \"bus\": np.array([], dtype=float)\n , \"branch\": np.array([], dtype=np.complex128)\n , \"gen\": np.array([], dtype=float)\n , \"internal\": {\n \"Ybus\": np.array([], dtype=np.complex128)\n , \"Yf\": np.array([], dtype=np.complex128)\n , \"Yt\": np.array([], dtype=np.complex128)\n , \"branch_is\": np.array([], dtype=bool)\n , \"gen_is\": np.array([], dtype=bool)\n }\n }\n net[\"_ppc\"] = ppc\n return ppc\n\ndef _init_lookups(net):\n if \"_pd2ppc_lookups\" not in net:\n net[\"_pd2ppc_lookups\"] = {\"bus\": None,\n \"gen\": None,\n \"branch\": None}\n\n\ndef _ppc2ppci(ppc, ppci, net, is_elems):\n # BUS Sorting and lookups\n\n # get bus_lookup\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n # sort busses in descending order of column 1 (namely: 4 (OOS), 3 (REF), 2 (PV), 1 (PQ))\n ppc_buses = ppc[\"bus\"]\n ppc['bus'] = ppc_buses[ppc_buses[:, BUS_TYPE].argsort(axis=0)[::-1][:], ]\n # get OOS busses and place them at the end of the bus array (so that: 3\n # (REF), 2 (PV), 1 (PQ), 4 (OOS))\n oos_busses = ppc['bus'][:, BUS_TYPE] == NONE\n # there are no OOS busses in the ppci\n ppci['bus'] = ppc['bus'][~oos_busses]\n # in ppc the OOS busses are included and at the end of the array\n ppc['bus'] = np.r_[ppc['bus'][~oos_busses], ppc['bus'][oos_busses]]\n # generate bus_lookup_ppc_ppci (ppc -> ppci lookup)\n ppc_former_order = (ppc['bus'][:, BUS_I]).astype(int)\n aranged_buses = np.arange(len(ppc_buses))\n\n # lookup ppc former order -> consecutive order\n e2i = np.zeros(len(ppc_buses), dtype=int)\n e2i[ppc_former_order] = aranged_buses\n\n # save consecutive indices in ppc and ppci\n ppc['bus'][:, BUS_I] = aranged_buses\n ppci['bus'][:, BUS_I] = ppc['bus'][:len(ppci['bus']), BUS_I]\n\n # update lookups (pandapower -> ppci internal)\n _update_lookup_entries(net, bus_lookup, e2i, \"bus\")\n\n if 'areas' in ppc:\n if len(ppc[\"areas\"]) == 0: # if areas field is empty\n del ppc['areas'] # delete it (so it's ignored)\n\n # bus types\n bt = ppc[\"bus\"][:, BUS_TYPE]\n\n # update branch, gen and areas bus numbering\n ppc['gen'][:, GEN_BUS] = e2i[np.real(ppc[\"gen\"][:, GEN_BUS]).astype(int)].copy()\n ppc[\"branch\"][:, F_BUS] = e2i[np.real(ppc[\"branch\"][:, F_BUS]).astype(int)].copy()\n ppc[\"branch\"][:, T_BUS] = e2i[np.real(ppc[\"branch\"][:, T_BUS]).astype(int)].copy()\n\n # Note: The \"update branch, gen and areas bus numbering\" does the same as this:\n # ppc['gen'][:, GEN_BUS] = get_indices(ppc['gen'][:, GEN_BUS], bus_lookup_ppc_ppci)\n # ppc[\"branch\"][:, F_BUS] = get_indices(ppc[\"branch\"][:, F_BUS], bus_lookup_ppc_ppci)\n # ppc[\"branch\"][:, T_BUS] = get_indices( ppc[\"branch\"][:, T_BUS], bus_lookup_ppc_ppci)\n # but faster...\n\n if 'areas' in ppc:\n ppc[\"areas\"][:, PRICE_REF_BUS] = \\\n e2i[np.real(ppc[\"areas\"][:, PRICE_REF_BUS]).astype(int)].copy()\n\n # reorder gens (and gencosts) in order of increasing bus number\n sort_gens = ppc['gen'][:, GEN_BUS].argsort()\n new_gen_positions = np.arange(len(sort_gens))\n new_gen_positions[sort_gens] = np.arange(len(sort_gens))\n ppc['gen'] = ppc['gen'][sort_gens, ]\n\n # update gen lookups\n eg_end = len(is_elems['ext_grid'])\n gen_end = eg_end + len(is_elems['gen'])\n sgen_end = len(is_elems[\"sgen_controllable\"]) + gen_end if \"sgen_controllable\" in is_elems else gen_end\n load_end = len(is_elems[\"load_controllable\"]) + sgen_end if \"load_controllable\" in is_elems else sgen_end\n\n if eg_end > 0:\n _build_gen_lookups(net, \"ext_grid\", 0, eg_end, new_gen_positions, is_elems)\n if gen_end > eg_end:\n _build_gen_lookups(net, \"gen\", eg_end, gen_end, new_gen_positions, is_elems)\n if sgen_end > gen_end:\n _build_gen_lookups(net, \"sgen_controllable\", gen_end, sgen_end, new_gen_positions, is_elems)\n if load_end > sgen_end:\n _build_gen_lookups(net, \"load_controllable\", sgen_end, load_end, new_gen_positions, is_elems)\n\n # determine which buses, branches, gens are connected and\n # in-service\n n2i = ppc[\"bus\"][:, BUS_I].astype(int)\n bs = (bt != NONE) # bus status\n\n gs = ((ppc[\"gen\"][:, GEN_STATUS] > 0) & # gen status\n bs[n2i[np.real(ppc[\"gen\"][:, GEN_BUS]).astype(int)]])\n ppci[\"internal\"][\"gen_is\"] = gs\n\n brs = (np.real(ppc[\"branch\"][:, BR_STATUS]).astype(int) & # branch status\n bs[n2i[np.real(ppc[\"branch\"][:, F_BUS]).astype(int)]] &\n bs[n2i[np.real(ppc[\"branch\"][:, T_BUS]).astype(int)]]).astype(bool)\n ppci[\"internal\"][\"branch_is\"] = brs\n\n if 'areas' in ppc:\n ar = bs[n2i[ppc[\"areas\"][:, PRICE_REF_BUS].astype(int)]]\n # delete out of service areas\n ppci[\"areas\"] = ppc[\"areas\"][ar]\n\n # select in service elements from ppc and put them in ppci\n ppci[\"branch\"] = ppc[\"branch\"][brs]\n ppci[\"gen\"] = ppc[\"gen\"][gs]\n\n if 'dcline' in ppc:\n ppci['dcline'] = ppc['dcline']\n # execute userfcn callbacks for 'ext2int' stage\n if 'userfcn' in ppci:\n ppci = run_userfcn(ppci['userfcn'], 'ext2int', ppci)\n\n return ppci\n\ndef _update_lookup_entries(net, lookup, e2i, element):\n valid_bus_lookup_entries = lookup >= 0\n # update entries\n lookup[valid_bus_lookup_entries] = e2i[lookup[valid_bus_lookup_entries]]\n _write_lookup_to_net(net, element, lookup)\n\n\ndef _build_gen_lookups(net, element, ppc_start_index, ppc_end_index, sort_gens, is_elems):\n # get buses from pandapower and ppc\n pandapower_index = is_elems[element].index.values\n ppc_index = sort_gens[ppc_start_index: ppc_end_index]\n\n # init lookup\n lookup = -np.ones(max(pandapower_index) + 1, dtype=int)\n\n # update lookup\n lookup[pandapower_index] = ppc_index\n _write_lookup_to_net(net, element, lookup)\n\ndef _update_ppc(net, is_elems, recycle, calculate_voltage_angles=False, enforce_q_lims=False,\n trafo_model=\"pi\"):\n \"\"\"\n Updates P, Q values of the ppc with changed values from net\n\n @param is_elems:\n @return:\n \"\"\"\n # get the old ppc and lookup\n ppc = net[\"_ppc\"]\n ppci = copy.deepcopy(ppc)\n # adds P and Q for loads / sgens in ppc['bus'] (PQ nodes)\n _calc_loads_and_add_on_ppc(net, ppc, is_elems)\n # adds P and Q for shunts, wards and xwards (to PQ nodes)\n _calc_shunts_and_add_on_ppc(net, ppc, is_elems)\n # updates values for gen\n _update_gen_ppc(net, ppc, is_elems, enforce_q_lims, calculate_voltage_angles)\n if not recycle[\"Ybus\"]:\n # updates trafo and trafo3w values\n _update_trafo_trafo3w_ppc(net, ppc, calculate_voltage_angles, trafo_model)\n\n # get OOS busses and place them at the end of the bus array (so that: 3\n # (REF), 2 (PV), 1 (PQ), 4 (OOS))\n oos_busses = ppc['bus'][:, BUS_TYPE] == NONE\n # there are no OOS busses in the ppci\n ppci['bus'] = ppc['bus'][~oos_busses]\n # select in service elements from ppc and put them in ppci\n brs = ppc[\"internal\"][\"branch_is\"]\n gs = ppc[\"internal\"][\"gen_is\"]\n ppci[\"branch\"] = ppc[\"branch\"][brs]\n ppci[\"gen\"] = ppc[\"gen\"][gs]\n\n return ppc, ppci\n","sub_path":"pandapower/pd2ppc.py","file_name":"pd2ppc.py","file_ext":"py","file_size_in_byte":13007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"53675404","text":"import json\r\nimport sys\r\nsys.path.append(sys.path[0] + \"\\\\classes\\\\\")\r\nimport Pokemon\r\nimport Type\r\n\r\ndef readJSON(self, path): # Path: pythonmon-master/json/console.json\r\n try:\r\n with open(path, \"r\") as fp:\r\n return json.load(fp)\r\n except:\r\n return -1\r\n\r\n# ------------------------------- CONSOLA PRUEBAS & CARGA DE JSON -------------------------------\r\n# Comandos: /create /save\r\njpkmn = {}\r\ni = \"\"\r\npath = sys.path[0]\r\npath.replace(\"\\\\\", \"/\")\r\njtxt = readJSON(None, path + \"/json/console.json\")\r\nwhile i != \"/close\":\r\n print(\"Qué desea hacer?\")\r\n print(\"Comandos: \\n/create /save\")\r\n i = input().lower()\r\n if i == \"/create help\":\r\n print(jtxt[\"createh\"])\r\n elif i == \"/create pokemon\":\r\n print(jtxt[\"pkmnname\"])\r\n name = input().lower()\r\n name = name[0].upper() + name[1:len(name)].lower()\r\n print(jtxt[\"pkmndescr\"])\r\n descr = input()\r\n print(jtxt[\"pkmntype1\"]) # Falta búsqueda y validación con el JSON de types\r\n type1 = input().lower()\r\n print(jtxt[\"pkmntype2\"]) # Falta búsqueda y validación con el JSON de types\r\n type2 = input().lower()\r\n if type2 == \"\":\r\n type2 = \" \"\r\n print(jtxt[\"pkmnbaseatk\"])\r\n baseatk = input()\r\n print(jtxt[\"pkmnbasedf\"])\r\n basedf = input()\r\n print(jtxt[\"pkmnbasehp\"])\r\n basehp = input()\r\n print(jtxt[\"pkmndexnbr\"])\r\n dexnbr = input()\r\n pkmn = Pokemon.Pokemon(name, descr, type1, type2, baseatk, basedf, basehp, dexnbr, None, False)\r\n jpkmn = pkmn.loadJSON(jpkmn)\r\n print(jpkmn)\r\n elif i == \"/create type\":\r\n pass\r\n elif i == \"/create pokeball\":\r\n pass\r\n elif i == \"/create attack\":\r\n pass\r\n elif i[0:8] == \"/create \":\r\n print(jtxt[\"createerror\"])\r\n elif i == \"/save help\":\r\n print(jtxt[\"saveh\"])\r\n elif i == \"/save pokemon\":\r\n if jpkmn == \"\":\r\n print(jtxt[\"saveerror2\"].format(\"Pokémon\"))\r\n else:\r\n print(Pokemon.writeJSON(jpkmn, path + \"/json/pkmn.json\"))\r\n elif i[0:6] == \"/save \":\r\n print(jtxt[\"saveerror\"])","sub_path":"Pythonmon-master/game/admin-console.py","file_name":"admin-console.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"240312969","text":"n = int(input())\nX = list(map(int, input().split()))\n\nsortX = sorted(X)\n#%%\nlB = sortX[n//2-1]\nrB = sortX[n//2]\n\nfor x in X:\n if x <= lB:\n print(rB)\n else:\n print(lB)\n","sub_path":"Python_codes/p03379/s006264000.py","file_name":"s006264000.py","file_ext":"py","file_size_in_byte":187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"79385088","text":"'''\nWrite binary file\n\n1. After reading HadISST text file (same as D02),\n2. write SST values to binary file\n\n\nData file: Hadley Centre Sea Ice and Sea Surface Temperature data set (HadISST)\nSource: https://www.metoffice.gov.uk/hadobs/hadisst/data/download.html\nReferece: Rayner, N. A.; Parker, D. E.; Horton, E. B.; Folland, C. K.; Alexander, L. V.;\n Rowell, D. P.; Kent, E. C.; Kaplan, A. (2003) Global analyses of sea surface temperature,\n sea ice, and night marine air temperature since the late nineteenth century\n J. Geophys. Res.Vol. 108, No. D14, 4407, doi:10.1029/2002JD002670 \n\nBy Daeho Jin\n'''\n\nimport sys\nimport os.path\nimport numpy as np\n\ndef read_hadisst_manually(fname):\n \"\"\"\n Read Hadley SST Text file\n fname: include directory\n \"\"\"\n if not os.path.isfile(fname):\n #print( \"File does not exist:\"+fname); sys.exit()\n sys.exit(\"File does not exist: \"+fname)\n\n time_info, vals = [], []\n width= 6 # Values are of fixed width in the text file\n with open(fname,'r') as f:\n for i,line in enumerate(f):\n if len(line)<50: # Distinguish monthly header from sst data\n ww=line.strip().split()\n time_info.append([int(item) for item in ww[:3]])\n dims= [int(ww[3]),int(ww[5])]\n nct=0\n temp_array= [] # Initialize storage to save monthly sst data\n else:\n ww= [line[i:i+width] for i in range(0,len(line.strip()),width)]\n temp_array.append(ww)\n nct+=1\n if nct==dims[0]: # If one month map is completed\n vals.append(np.array(temp_array,dtype=np.int32))\n\n return np.asarray(time_info), np.asarray(vals)\n\ndef write_binary_data(filename, data, dtype=np.float32):\n ### First, check the file if already exist\n if os.path.isfile(filename):\n print(\"\\n{} already exist\".format(filename))\n answer= input(\"If want to overwrite, type 'y'; If want to append, type 'a'\\n\")\n if answer[0].lower()=='y':\n mode= 'wb' # 'wb' not just 'w'\n elif answer[0].lower()=='a':\n mode= 'ab' # 'append' 'binary'\n else:\n sys.exit(\"Your type {} is inappropriate.\".format(answer))\n\n ### Wirte sst to a binary file\n with open(filename, mode) as f:\n data.astype(dtype).tofile(f)\n\ndef main():\n indir= '../Data/'\n yrs= [2017,2019] # Starting year and ending year\n for yr in range(yrs[0],yrs[1]+1,1):\n #HadISST1_SST_2019.txt\n infn= indir+'HadISST1_SST_{}.txt'.format(yr)\n\n time_info,sst1= read_hadisst_manually(infn)\n if yr==yrs[0]:\n times= np.copy(time_info)\n sst= np.copy(sst1)\n else:\n times= np.concatenate((times,time_info), axis=0)\n sst= np.concatenate((sst,sst1),axis=0)\n\n print(times.shape,sst.shape)\n\n ### Transform values to degreeC by dividing it by 100\n sst= sst/100.\n\n ### Now latitude starting 90N to 90S, so should be flipped.\n sst= sst[:,::-1,:]\n\n ###---- Same as D02 so far\n ### Next, change land area values to -999.9 while keep ice-cover value(-10.00)\n new_undef= -999.9\n miss_idx= sst<-99.\n sst[miss_idx]= new_undef\n\n outdir= indir\n outfn= outdir+'HadISST1.sample.{}-{}.{}x{}x{}.f32dat'.format(*yrs,*sst.shape) # File info on file name\n write_binary_data(outfn, sst) # 'dtype=np.float3' option is omitted.\n return\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"D.Read_Write_Text+Binary_file/D04_Write_binary_file_HadISST_py3.py","file_name":"D04_Write_binary_file_HadISST_py3.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"123203751","text":"from tkinter import *\nfrom tkinter import filedialog\nfrom tkinter import colorchooser\nfrom PIL import Image, ImageDraw, ImageTk\nimport cv2\nimport numpy as np\n#from PIL.Image import Image\n\nfrom cv01distenceTest import distenceMeasure, imgdrawResult\n\ndef rgb2hex(rgb):\n hex_color = \"#\" + hex(rgb[0])[2:].zfill(2) + hex(rgb[1])[2:].zfill(2) + hex(rgb[2])[2:].zfill(2)\n return hex_color.upper()\n\nclass ImageEditor:\n image_draw: Image\n\n def __init__(self, width, height):\n self.width = width\n self.height = height\n self.file_path = r\"D:\\04DataSets\\04/box.jpg\"\n self.root = Tk()\n self.root.title(\"刘翠立的算法调试器\")\n\n self.brush_size = 15\n self.draw_color = (60, 60, 60)\n # 创建菜单栏\n menubar = Menu(self.root)\n filemenu = Menu(menubar, tearoff=0)\n filemenu.add_command(label=\"打开\", command=self.open_image)\n filemenu.add_command(label=\"保存\", command=self.save_image)\n menubar.add_cascade(label=\"文件\", menu=filemenu)\n self.root.config(menu=menubar)\n\n # 创建工具栏\n toolbar = Frame(self.root)\n self.pen_btn = Button(toolbar, text=\"画笔\", command=self.use_pen)\n self.pen_btn.pack(side=LEFT, padx=5, pady=5)\n\n self.color_btn = Button(toolbar, text=\"颜色\", command=self.choose_color)\n self.color_btn.pack(side=LEFT, padx=5, pady=5)\n\n self.screen_btn = Button(toolbar, text=\"吸色\", command=self.pick_color)\n self.screen_btn.pack(side=LEFT, padx=5, pady=5)\n\n self.reset_btn = Button(toolbar, text=\"重置\", command=self.img_reset)\n self.reset_btn.pack(side=LEFT, padx=5, pady=5)\n\n toolbar.pack(side=TOP, fill=X)\n\n # 创建画布\n self.canvas = Canvas(self.root, width=self.width, height=self.height)\n self.canvas.pack(fill=BOTH, expand=YES)\n\n # 初始化画笔\n self.image_show = Image.new(\"RGB\", (self.width, self.height), (255, 255, 255))\n self.draw = ImageDraw.Draw(self.image_show)\n self.tool = None\n self.image_original = None\n self.image_draw = None\n self.photo = None\n\n # 绑定事件\n self.canvas.bind(\"\", self.paint)\n #self.canvas.bind(\"\", self.reset)\n\n self.init_img()\n def init_img(self):\n self.image_original = Image.open(self.file_path)\n self.image_draw = self.image_original.copy()\n\n image_show_gray = cv2.cvtColor(np.array(self.image_draw), cv2.COLOR_RGB2GRAY)\n image_show_bgr = cv2.cvtColor(np.array(self.image_draw), cv2.COLOR_RGB2BGR) # PIL -> cv\n result_dict = distenceMeasure(image_show_gray)\n imgdrawResult(image_show_bgr, result_dict)\n\n self.draw = ImageDraw.Draw(self.image_draw)\n self.image_show = self.image_original.copy()\n self.photo = ImageTk.PhotoImage(self.image_show)\n self.canvas.create_image(0, 0, image=self.photo, anchor=NW)\n self.color_btn.configure(bg=str(rgb2hex(self.draw_color)))\n\n def open_image(self):\n file_path = filedialog.askopenfilename(title=\"选择图片\", filetypes=[(\"图片文件\", \"*.jpg;*.jpeg;*.png;*.bmp\")])\n if file_path:\n self.image_original = Image.open(file_path)\n self.image_draw = self.image_original.copy()\n self.draw = ImageDraw.Draw(self.image_draw)\n self.image_show = self.image_original.copy()\n self.photo = ImageTk.PhotoImage(self.image_show)\n self.canvas.create_image(0, 0, image=self.photo, anchor=NW)\n\n def save_image(self):\n file_path = filedialog.asksaveasfilename(title=\"保存图片\", defaultextension=\".png\",\n filetypes=[(\"PNG 文件\", \"*.png\"), (\"JPG 文件\", \"*.jpg\")])\n if file_path:\n self.image_draw.save(file_path)\n\n def use_pen(self):\n print(self.tool)\n self.tool = \"pen\"\n print(\"use_pen2\", self.tool)\n\n def choose_color(self):\n color = colorchooser.askcolor(title=\"选择颜色\")\n if color and color[0]:\n self.color_btn.configure(bg=str(color[1]))\n self.draw_color = color[0]\n\n def pick_color(self):\n print(\"pick_color1\", self.tool)\n self.tool = \"pick\"\n print(\"pick_color2\", self.tool)\n\n def paint(self, event):\n print(\"paint\", self.tool)\n if self.tool == \"pen\":\n x, y = event.x, event.y\n self.draw.ellipse((x - self.brush_size, y - self.brush_size, x + self.brush_size, y + self.brush_size),\n fill=self.draw_color, outline=self.draw_color)\n # self.draw.line([event.x, event.y, event.x + 1, event.y + 1], fill=self.draw_color, width=20)\n\n image_show_gray = cv2.cvtColor(np.asarray(self.image_draw), cv2.COLOR_RGB2GRAY)\n image_show_bgr = cv2.cvtColor(np.asarray(self.image_draw), cv2.COLOR_RGB2BGR) # PIL -> cv\n result_dict = distenceMeasure(image_show_gray)\n imgdrawResult(image_show_bgr, result_dict)\n self.image_show = Image.fromarray(cv2.cvtColor(image_show_bgr, cv2.COLOR_BGR2RGB)) # cv -> PIL\n\n # LineWhiteTop = resultDict[\"LineWhiteTop\"]\n # k = LineWhiteTop[1] / LineWhiteTop[0]\n # b = LineWhiteTop[3] - k * LineWhiteTop[2]\n # draw = ImageDraw.Draw(self.image_show)\n # draw.line([0, int(b), 600, int(k * 600 + b)], fill=(255, 0, 0), width=1)\n self.photo = ImageTk.PhotoImage(self.image_show)\n self.canvas.create_image(0, 0, image=self.photo, anchor=NW)\n elif self.tool == \"pick\":\n x, y = event.x, event.y\n print(self.image_show.getpixel((x, y)))\n self.color_btn.configure(bg=rgb2hex(self.image_show.getpixel((x, y))))\n self.draw_color = self.image_show.getpixel((x, y))\n\n def img_reset(self):\n self.init_img()\n #self.tool = None\n pass\n\n def run(self):\n self.root.mainloop()\n\n\nif __name__ == '__main__':\n app = ImageEditor(800, 600)\n app.run()\n","sub_path":"ml00project/pj3distence/cv02tk04.py","file_name":"cv02tk04.py","file_ext":"py","file_size_in_byte":6106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"315721866","text":"import pygame\r\nimport random\r\npygame.init()\r\nwin=pygame.display.set_mode((600,600))\r\n\r\npygame.display.set_caption(\"Space Invasion\")\r\n\r\nship=pygame.image.load('ship.png')\r\n\r\nbullet_image=pygame.image.load('laser.png')\r\nenemy1=pygame.image.load('enemy1.png')\r\nbg=pygame.image.load('bg.jpg')\r\nenemyship=pygame.image.load('espaceship2.png')\r\nenemy_laser=pygame.image.load('enemylaser.png')\r\nclock=pygame.time.Clock()\r\n\r\nalien=[]\r\ni=1\r\nfor i in range(1,5):\r\n alien.append(pygame.image.load('mystery1.png'))\r\n\r\n \r\ndef aliendraw(x,y,j):\r\n\r\n win.blit(alien[j],(x,y))\r\n\r\n\r\n\r\nplexplo=pygame.mixer.Sound('player_explosion.wav')\r\n\r\nenexplo=pygame.mixer.Sound('enemy_explosion.wav')\r\n\r\nshoot=pygame.mixer.Sound('shoot.wav')\r\n\r\ne_blast=pygame.image.load('e_explosion.png')\r\n\r\np_blast=pygame.image.load('p_explosion.png')\r\n\r\nmusic=pygame.mixer.music.load(\"music.mp3\")\r\npygame.mixer.music.play(-1)\r\n\r\n\r\nclass player(object):\r\n def __init__(self,x,y,width,height):\r\n self.x=x\r\n self.y=y\r\n self.width=width\r\n self.height=height\r\n self.vel=1\r\n self.life=10\r\n self.standing=True\r\n self.walkcount=0\r\n self.hitbox=(self.x,self.y,50,50)\r\n\r\n\r\n def draw(self,win):\r\n if self.walkcount+1>=27:\r\n self.walkcount=0\r\n if not(self.standing):\r\n self.walkcount=0\r\n win.blit(ship,(self.x,self.y))\r\n else:\r\n win.blit(ship,(self.x,self.y))\r\n \r\n \r\n self.hitbox=(self.x,self.y,50,50)\r\n font1=pygame.font.SysFont('comicsans',27,False,True)\r\n lives=font1.render('LIFE:',1,(255,0,0))\r\n win.blit(lives,(385,26))\r\n pygame.draw.rect(win,(255,0,0),(450,30,100,10))\r\n pygame.draw.rect(win,(0,128,0),(450,30,100-(10*(10-self.life)),10))\r\n #pygame.draw.rect(win,(255,0,0),self.hitbox,2)\r\n def hit(self):\r\n win.blit(p_blast,(self.x-27,self.y-13))\r\n self.x=300\r\n self.y=300\r\n self.life-=1\r\n self.walkcount=0\r\n plexplo.play()\r\n font=pygame.font.SysFont('comicsans',100)\r\n text=font.render('YOU ARE HIT',1,(255,0,0))\r\n win.blit(text,(100,250))\r\n pygame.display.update()\r\n i=0\r\n while i<300:\r\n pygame.time.delay(10)\r\n i+=1\r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n i=301\r\n pygame.quit()\r\n \r\n if self.life==0:\r\n font1=pygame.font.SysFont('comicsans',100)\r\n gameover=font1.render('GAME OVER',1,(255,0,0))\r\n win.blit(gameover,(100,390))\r\n #score=font1.render('SCORE: '+str(score),1,(255,0,0))\r\n \r\n #win.blit(score,(10,450))\r\n pygame.display.update()\r\n j=0\r\n while j<300:\r\n pygame.time.delay(10)\r\n j+=1\r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n j=301\r\n pygame.quit()\r\n pygame.quit()\r\n\r\n def round2(self):\r\n \r\n font2=pygame.font.SysFont('comicsans',100)\r\n victory=font2.render('VICTORY',1,(255,0,0))\r\n win.blit(victory,(150,355))\r\n pygame.display.update()\r\n j=0\r\n while j<300:\r\n pygame.time.delay(1)\r\n j+=1\r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n j=301\r\n pygame.quit()\r\n pygame.quit()\r\n\r\n\r\n \r\n\r\ndef redraw():\r\n no=0\r\n win.blit(bg,(0,0))\r\n p1.draw(win)\r\n if no<3:\r\n \r\n e1.draw(win)\r\n no+=1\r\n\r\n font=pygame.font.SysFont('comicsans',30,True,True)\r\n text=font.render('SCORE: '+str(score),1,(255,0,0))\r\n \r\n win.blit(text,(450,5))\r\n\r\n for bullet in bullets:\r\n bullet.draw(win)\r\n \r\n for elaser in enbullet:\r\n elaser.draw(win)\r\n for enemy2 in enemy_2:\r\n enemy2.draw(win)\r\n pygame.draw.rect(win,(0,255,0),(0,575,600,25))\r\n win.blit(enemyship,(0,0))\r\n \r\n pygame.display.update()\r\n\r\nclass enemy(object):\r\n def __init__(self,x,y,width,height,end):\r\n self.x=x\r\n self.y=y\r\n self.width=width\r\n self.height=height\r\n self.vel=2\r\n self.end=end\r\n self.path=[self.end,self.y]\r\n self.hitbox=(self.x,self.y,210,100)\r\n self.health=10\r\n self.visible=True\r\n self.walkcount=0\r\n def draw(self,win):\r\n self.move()\r\n \r\n if self.visible:\r\n win.blit(alien[0],(self.x,self.y))\r\n if self.walkcount+1>=27:\r\n self.walkcount=0\r\n self.hitbox=(self.x,self.y,50,50)\r\n #pygame.draw.rect(win,(255,0,0),self.hitbox,2)\r\n pygame.draw.rect(win,(255,0,0),(self.hitbox[0],self.hitbox[1]-20,50,10))\r\n pygame.draw.rect(win,(0,128,0),(self.hitbox[0],self.hitbox[1]-20,50-(5*(10-self.health)),10))\r\n\r\n else: \r\n win.blit(e_blast,(self.x,self.y))\r\n self.visible=False\r\n \r\n \r\n \r\n def move(self):\r\n if self.y<600:\r\n self.y+=0.5\r\n else:\r\n self.visible=False\r\n\r\n def hit(self):\r\n if self.health>0:\r\n self.health-=1\r\n else:\r\n self.visible=False\r\n enexplo.play()\r\n run=False\r\n\r\n \r\n \r\n \r\n \r\n\r\n \r\n\r\n\r\nclass laser(object):\r\n def __init__(self,x,y,height,width):\r\n self.x=x\r\n self.y=y \r\n self.height=height\r\n self.width=width\r\n self.visible=True\r\n def draw(self,win):\r\n self.move()\r\n if self.visible:\r\n win.blit(bullet_image,(self.x,self.y))\r\n def move(self):\r\n if self.y>0:\r\n self.y-=.25\r\n else:\r\n run=False\r\n self.visible=False\r\n \r\n \r\nclass enemylaser(object):\r\n def __init__(self,x,y,height,width,vel):\r\n self.x=random.randint(10,550)\r\n self.y=30 \r\n self.height=height\r\n self.width=width\r\n self.vel=vel\r\n self.visible=True\r\n def draw(self,win):\r\n self.move()\r\n if self.visible:\r\n win.blit(enemy_laser,(self.x,self.y))\r\n def move(self):\r\n if self.y<600:\r\n self.y+=vel\r\n else:\r\n run=False\r\n self.visible=False\r\n\r\n\r\nclass more_enemies(object):\r\n def __init__(self,x,y,height,width):\r\n self.x=random.randint(10,550)\r\n self.y=30 \r\n self.height=height\r\n self.width=width\r\n \r\n self.visible=True\r\n def draw(self,win):\r\n self.move()\r\n \r\n if self.visible:\r\n \r\n win.blit(enemy1,(self.x,self.y))\r\n else:\r\n win.blit(e_blast,(self.x,self.y))\r\n \r\n def move(self):\r\n if self.y<550:\r\n self.y+=1.5\r\n else:\r\n run=False\r\n self.visible=False\r\n \r\n\r\n\r\n#MAIN LOOP\r\n\r\nscore=0\r\np1=player(300,525,50,48)\r\nno=0\r\n\r\nrun=True\r\nj=1\r\nbullets=[]\r\nenbullet=[]\r\nenemy_2=[]\r\nshootloop=0\r\nfor images in alien:\r\n ranno=random.randint(1,500)\r\n e1=enemy(ranno,-20,64,64,800)\r\n \r\n\r\nwhile run: \r\n\r\n \r\n keys=pygame.key.get_pressed()\r\n \r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n pygame.quit()\r\n run=False\r\n rand1=random.randint(1,25)\r\n rand2=random.randint(1,25)\r\n if(rand1==rand2):\r\n # enemylaser(rand1,100,5,15)\r\n enbullet.append(enemylaser(rand1,100,5,15,2.5))\r\n if e1.visible==True: \r\n for bullet in bullets:\r\n if bullet.y-bullet.heighte1.hitbox[1]:\r\n if bullet.x+bullet.width>e1.hitbox[0] and bullet.x-bullet.width0:\r\n bullet.y-=2.5\r\n else:\r\n bullets.pop(bullets.index(bullet))\r\n if p1.hitbox[1]e1.hitbox[1]:\r\n if p1.hitbox[0]+p1.hitbox[2]>e1.hitbox[0] and p1.hitbox[0]0:\r\n bullet.y-=2.5\r\n else:\r\n bullets.pop(bullets.index(bullet))\r\n # if p1.y<=100:\r\n # p1.round2()\r\n rand5=random.randint(1,5)\r\n rand6=random.randint(1,5)\r\n\r\n #if e1.visible==False and p1.y<100:\r\n # p1.round2()\r\n rand7=random.randint(1,250)\r\n rand8=random.randint(1,250)\r\n if(rand7==rand8):\r\n enemy_2.append(more_enemies(rand7,100,50,36))\r\n for more in enemy_2:\r\n if more.y<570 and more.y>0:\r\n if more.yp1.hitbox[1]:\r\n if more.x+more.width>p1.hitbox[0] and more.x-more.widthbullet.y-bullet.height:\r\n if more.x-more.widthbullet.x-bullet.width:\r\n # more.visible=False\r\n bullets.pop(bullets.index(bullet))\r\n enemy_2.pop(enemy_2.index(more))\r\n score+=1\r\n \r\n\r\n\r\n \r\n # pass\r\n for elaser in enbullet:\r\n if elaser.y<600 and elaser.y>0:\r\n if elaser.yp1.hitbox[1]:\r\n if elaser.x+elaser.width>p1.hitbox[0] and elaser.x-elaser.widthbullet.y-bullet.height:\r\n if elaser.x-elaser.widthbullet.x-bullet.width:\r\n bullets.pop(bullets.index(bullet))\r\n enbullet.pop(enbullet.index(elaser)) \r\n \r\n\r\n \r\n\r\n\r\n # enbullet.append(enemylaser(rand1,100,5,15,5))\r\n \r\n # pass\r\n \r\n if shootloop>0:\r\n shootloop+=1\r\n \r\n if shootloop>9:\r\n shootloop=0\r\n \r\n vel=2.5\r\n \r\n if keys[pygame.K_LEFT] and p1.x>0:\r\n p1.x-=vel\r\n if keys[pygame.K_RIGHT] and p1.x<600-p1.width:\r\n p1.x+=vel\r\n if keys[pygame.K_UP] and p1.y>0:\r\n p1.y-=vel\r\n if keys[pygame.K_DOWN] and p1.y<600-p1.height:\r\n p1.y+=vel\r\n if keys[pygame.K_SPACE] and shootloop==0:\r\n if len(bullets)<10:\r\n shoot.play()\r\n bullets.append(laser(round(p1.x+p1.width//2),round(p1.y+p1.height//2),5,15))\r\n # enbullet.append(enemylaser(rand1,100,5,15))\r\n shootloop=1\r\n \r\n\r\n if e1.y>575 and e1.visible==True:\r\n p1.hit()\r\n score-=5\r\n e1.visible=False\r\n \r\n redraw()\r\n","sub_path":"Space Invasion/trial2.py","file_name":"trial2.py","file_ext":"py","file_size_in_byte":11911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"605500903","text":"from rest_framework import viewsets\nfrom rest_framework.decorators import list_route\nfrom lib.core.decorator.response import Core_connector\nfrom lib.utils.exceptions import PubErrorCustom\n\nfrom app.user.models import Users\nfrom app.goods.models import GoodsCateGory,Goods,Card,GoodsTheme\nfrom app.public.models import Banner,AttachMentGroup,AttachMent\n\n\nfrom app.cache.utils import RedisCaCheHandler\n\nfrom include.city import city\n\nclass SsyAPIView(viewsets.ViewSet):\n\n #刷新所有用户缓存数据\n @list_route(methods=['POST'])\n @Core_connector()\n def refeshUserRedis(self,request, *args, **kwargs):\n\n # ShengRes = RedisCaCheHandlerCitySheng()\n # Shengs=[]\n # for item in city:\n #\n # Shengs.append({\n # \"label\":item['label'],\n # \"value\":item['value']\n # })\n #\n # r = RedisCaCheHandlerCityShi()\n # childs=[ {\"label\":childs_item['label'],\"value\":childs_item['value']} for childs_item in item['children'] ]\n # r.redis_dict_set(item['value'],{\"value\":childs})\n # r = RedisCaCheHandlerCityXian()\n # if \"children\" in item:\n # for CityShiItem in item['children']:\n # if 'children' in CityShiItem:\n # childs = [{\"label\": childs_item['label'], \"value\": childs_item['value']} for childs_item in CityShiItem['children']]\n # else:\n # childs=[]\n # r.redis_dict_set(CityShiItem['value'], {\"value\":childs})\n # ShengRes.redis_set({\"value\":Shengs})\n\n RedisCaCheHandler(\n method=\"saveAll\",\n serialiers=\"UserModelSerializerToRedis\",\n table=\"user\",\n filter_value=Users.objects.filter(status='0'),\n must_key=\"userid\",\n ).run()\n\n RedisCaCheHandler(\n method=\"saveAll\",\n serialiers=\"GoodsCateGoryModelSerializerToRedis\",\n table=\"goodscategory\",\n filter_value=GoodsCateGory.objects.filter(),\n must_key=\"gdcgid\",\n ).run()\n\n RedisCaCheHandler(\n method=\"saveAll\",\n serialiers=\"GoodsModelSerializerToRedis\",\n table=\"goods\",\n filter_value=Goods.objects.filter(),\n must_key=\"gdid\",\n ).run()\n\n RedisCaCheHandler(\n method=\"saveAll\",\n serialiers=\"AttachMentGroupModelSerializerToRedis\",\n table=\"AttachMentGroup\",\n filter_value=AttachMentGroup.objects.filter(),\n must_key=\"id\",\n ).run()\n\n RedisCaCheHandler(\n method=\"saveAll\",\n serialiers=\"AttachMentModelSerializerToRedis\",\n table=\"AttachMent\",\n filter_value=AttachMent.objects.filter(),\n must_key=\"id\",\n ).run()\n\n RedisCaCheHandler(\n method=\"saveAll\",\n serialiers=\"CardModelSerializerToRedis\",\n table=\"card\",\n filter_value=Card.objects.filter(),\n must_key=\"id\",\n ).run()\n\n RedisCaCheHandler(\n method=\"saveAll\",\n serialiers=\"GoodsThemeModelSerializerToRedis\",\n table=\"goodstheme\",\n filter_value=GoodsTheme.objects.filter(),\n must_key=\"typeid\",\n ).run()\n\n return None","sub_path":"app/sys/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"124063963","text":"from unittest import TestCase\n\nfrom common.pythagorean import pythagoreanTripletsSummingTo\n\nclass TestPythagoreanTripletsSummingTo(TestCase):\n\n def testPythagoreanTripletsSummingTo(self):\n for data in self.__createTestData():\n x = data[\"x\"]\n expected = data[\"expected\"]\n\n with self.subTest(x = x, expected = expected):\n self.assertEqual(expected, list(pythagoreanTripletsSummingTo(x)))\n\n def testPythagoreanTripletsSummingToFailsWithNegativeN(self):\n with self.assertRaises(AssertionError):\n list(pythagoreanTripletsSummingTo(-1))\n\n def __createTestData(self):\n\n return [\n {\"x\" : 0, \"expected\" : []}\n , {\"x\" : 1, \"expected\" : []}\n , {\"x\" : 2, \"expected\" : []}\n , {\"x\" : 3, \"expected\" : []}\n , {\"x\" : 4, \"expected\" : []}\n , {\"x\" : 5, \"expected\" : []}\n , {\"x\" : 6, \"expected\" : []}\n , {\"x\" : 7, \"expected\" : []}\n , {\"x\" : 8, \"expected\" : []}\n , {\"x\" : 9, \"expected\" : []}\n , {\"x\" : 10, \"expected\" : []}\n , {\"x\" : 12, \"expected\" : [[3, 4, 5]]}\n , {\"x\" : 24, \"expected\" : [[6, 8, 10]]}\n , {\"x\" : 30, \"expected\" : [[5, 12, 13]]}\n , {\"x\" : 120, \"expected\" : [[30, 40, 50], [20, 48, 52], [24, 45, 51]]}\n , {\"x\" : 1000, \"expected\" : [[200, 375, 425]]}]\n","sub_path":"python/test/testPythagoreanTripletsSummingTo.py","file_name":"testPythagoreanTripletsSummingTo.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"240581993","text":"import argparse\nfrom src.engine import Engine\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', default='default.yml',\n help='config name')\n parser.add_argument('--tag', default='',\n help='tag to discern evaluation instances')\n args = parser.parse_args()\n\n engine = Engine(config_name=args.config, tag=args.tag)\n engine.evaluate()\n","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"378833607","text":"from framework.rom import meta\nfrom module.testsuite.benchmark.benchmark_write_db_command import BenchmarkWriteDbCommand\nfrom framework.command.save_result_command import SaveResultCommand\nfrom sqlalchemy import Table, DECIMAL, Integer, String, func, FLOAT\nfrom module.stream.port_stats import PortStats\nfrom module.stream.stream_block_stats import StreamBlockRxStats, StreamBlockTxStats\nfrom framework import sysentry\nfrom framework.rom.base_types import PARENT_CHILD_RELATION\nfrom framework.rom.manager import ROMManager\nfrom framework.media.media import EnumLineSpeed\nfrom framework.service import db_service\nimport sqlalchemy\nimport os\nfrom framework.utils import util\n\n\n@meta.rom(description='Rfc2889 write db command')\nclass Rfc2889WriteDbCommand(BenchmarkWriteDbCommand):\n def __init__(self):\n super().__init__()\n self.summary_node_description = 'RFC2889 Test Result View'\n self.detail_node_description = ''\n self._iteration_result = 0\n self._port_line_speeds = {}\n self._load_unit = ''\n\n def validate(self):\n super(Rfc2889WriteDbCommand, self).validate()\n self._iteration_result = 0\n self._port_line_speeds.clear()\n iteration_load_command = self._get_iteration_load_command()\n if iteration_load_command:\n self._load_unit = iteration_load_command.LoadUnit.display\n\n for stream_handle in self.command.StreamTemplateHandles:\n stream = ROMManager.get_object(stream_handle)\n port = stream.get_parent()\n line_speed = self._get_port_line_speed(port.get_selected_media().LineSpeed)\n if not port in self._port_line_speeds:\n self._port_line_speeds[port] = line_speed\n return True\n\n def _get_port_line_speed(self, line_speed):\n if line_speed == EnumLineSpeed.SPEED_10M:\n return 10 * 1000000\n elif line_speed == EnumLineSpeed.SPEED_100M:\n return 100 * 1000000\n elif line_speed == EnumLineSpeed.SPEED_1G:\n return 1000 * 1000000\n elif line_speed == EnumLineSpeed.SPEED_2_5G:\n return 2.5 * 1000 * 1000000\n elif line_speed == EnumLineSpeed.SPEED_5G:\n return 5 * 1000 * 1000000\n elif line_speed == EnumLineSpeed.SPEED_10G:\n return 10 * 1000 * 1000000\n elif line_speed == EnumLineSpeed.SPEED_25G:\n return 25 * 1000 * 1000000\n elif line_speed == EnumLineSpeed.SPEED_40G:\n return 40 * 1000 * 1000000\n elif line_speed == EnumLineSpeed.SPEED_50G:\n return 50 * 1000 * 1000000\n elif line_speed == EnumLineSpeed.SPEED_100G:\n return 100 * 1000 * 1000000\n else:\n return 0\n\n def _get_offered_load(self, iter_db_session):\n offered_load_bit_rate = 0\n offered_load_frame_rate = 0\n offered_load_percent = 0\n line_speed_sum = 0\n\n for port, line_speed in self._port_line_speeds.items():\n port_query = iter_db_session.query(func.sum(PortStats.dot_orm.OfferedLoadBitRate),\n func.sum(PortStats.dot_orm.OfferedLoadFrameRate)) \\\n .filter(PortStats.dot_orm.PortID == port.Name).first()\n line_speed_sum += line_speed\n if port_query:\n offered_load_bit_rate += port_query[0]\n offered_load_frame_rate += port_query[1]\n\n offered_load_percent = offered_load_bit_rate * 100 / line_speed_sum\n return util.convert_decimal(offered_load_percent), util.convert_decimal(offered_load_frame_rate), \\\n util.convert_decimal(offered_load_bit_rate/1000000)\n\n def _create_db_orm_cls(self):\n return db_service.SqliteDBService.create_orm_cls('Rfc2889SummaryResult',\n 'Rfc2889SummaryResultTable',\n (('TrialNo', Integer),\n ('FrameSize', String(50)),\n ('BurstSize', Integer),\n ('IterationNo', Integer),\n ('IntendedLoad', DECIMAL),\n ('OfferedLoadPercent', DECIMAL),\n ('OfferedLoadMbps', DECIMAL),\n ('OfferedLoadFps', DECIMAL),\n ('TxFrameCount', Integer),\n ('RxFrameCount', Integer),\n ('FrameLossCount', Integer),\n ('FrameLossPercent', String(50)),\n ('AddressLearnNumber', Integer),\n ('AddressLearnRate', Integer),\n ('TestGroup', String(50)),\n ('Port', String(50)),\n ('RxPort', String(50)),\n ('PortType', String(50)),\n ('HeadOfBlock', String(50)),\n ('BackPressure', String(50)),\n ('TestResult', String(50)),\n ('TestAllResult', String(50)),\n ('MaxLatency', FLOAT),\n ('AvgLatency', FLOAT),\n ('MinLatency', FLOAT),\n ('MaxJitter', FLOAT),\n ('AvgJitter', FLOAT),\n ('MinJitter', FLOAT),\n ),\n (('ID', Integer),))\n\n def _init_db_table(self, create_table=True):\n super()._init_db_table(create_table)\n # create the detailed summary result view node\n if not self.find_node('DatailedSummaryResult'):\n self._create_dot_result_node(self.root_node, 'DatailedSummaryResult',\n 'Detailed Summary Node',\n self.detail_node_description)\n\n self.summary_db_orm_cls = self._create_db_orm_cls()\n\n db_service.SqliteDBService.create_table('Rfc2889SummaryResult', self.summary_db_orm_cls, self.db_engine, create_table)\n if create_table:\n db_conn = self.db_engine.connect()\n sql = sqlalchemy.text('INSERT INTO SysTable ( TableName,Description) VALUES ( \"%s\",\"%s\")'\n % (str(self.summary_db_orm_cls), str(self.summary_db_orm_cls)))\n db_conn.execute(sql)\n\n def _init_orm_cls(self, orm_cls):\n orm_cls.TrialNo = 0\n orm_cls.FrameSize = 0\n orm_cls.BurstSize = 0\n orm_cls.IterationNo = 0\n orm_cls.IntendedLoad = 0\n orm_cls.TxFrameCount = 0\n orm_cls.RxFrameCount = 0\n orm_cls.OfferedLoadPercent = 0\n orm_cls.OfferedLoadFps = 0\n orm_cls.OfferedLoadMbps = 0\n orm_cls.FrameLossCount = 0\n orm_cls.FrameLossPercent = '0'\n orm_cls.AddressLearnNumber = 0\n orm_cls.AddressLearnRate = 0\n orm_cls.Port = ''\n orm_cls.RxPort = ''\n orm_cls.PortType = ''\n orm_cls.TestGroup = ''\n orm_cls.HeadOfBlock = ''\n orm_cls.BackPressure = ''\n orm_cls.TestAllResult = ''\n orm_cls.MaxLatency = 0\n orm_cls.AvgLatency = 0\n orm_cls.MinLatency = 0\n orm_cls.MaxJitter = 0\n orm_cls.AvgJitter = 0\n orm_cls.MinJitter = 0\n\n def _update_db_table(self, iter_db_session, iter_db_engine):\n total_tx = iter_db_session.query(func.sum(StreamBlockTxStats.dot_orm.TxStreamFrames)).first()\n total_rx = iter_db_session.query(func.sum(StreamBlockRxStats.dot_orm.RxStreamFrames)).first()\n\n stream_query = iter_db_session.query(func.max(StreamBlockRxStats.dot_orm.MaxLatency),\n func.avg(StreamBlockRxStats.dot_orm.AvaLatency),\n func.min(StreamBlockRxStats.dot_orm.MinLatency),\n func.max(StreamBlockRxStats.dot_orm.MaxJitter),\n func.avg(StreamBlockRxStats.dot_orm.AvaJitter),\n func.min(StreamBlockRxStats.dot_orm.MinJitter)).first()\n\n tmp = self.summary_db_orm_cls()\n self._init_orm_cls(tmp)\n\n tmp.TrialNo = self._get_current_trial_number()\n tmp.FrameSize = str(self._get_current_frame_size())\n tmp.BurstSize = self._get_current_burst_size()\n tmp.IterationNo = self._get_current_iteration_number()\n tmp.IntendedLoad = self._get_current_load()\n if total_tx[0]:\n tmp.TxFrameCount = total_tx[0]\n if total_rx[0]:\n tmp.RxFrameCount = total_rx[0]\n tmp.OfferedLoadPercent, tmp.OfferedLoadFps, tmp.OfferedLoadMbps = self._get_offered_load(iter_db_session)\n tmp.FrameLossCount = self._cal_frame_loss_count(tmp.TxFrameCount, tmp.RxFrameCount)\n frame_loss_percent, tmp.FrameLossPercent = self._cal_frame_loss_percent(tmp.FrameLossCount, tmp.TxFrameCount)\n self._iteration_result = self._judge_iteration_result(tmp.FrameLossCount, frame_loss_percent) if tmp.TxFrameCount else 1\n tmp.TestResult = 'Passed' if not self._iteration_result else 'Failed'\n tmp.TestAllResult = tmp.TestResult\n tmp.AddressLearnNumber = self._get_address_learn_number()\n tmp.AddressLearnRate = self._get_address_learn_rate()\n\n if stream_query:\n tmp.MaxLatency = util.convert_decimal(stream_query[0])\n tmp.AvgLatency = util.convert_decimal(stream_query[1])\n tmp.MinLatency = util.convert_decimal(stream_query[2])\n tmp.MaxJitter = util.convert_decimal(stream_query[3])\n tmp.AvgJitter = util.convert_decimal(stream_query[4])\n tmp.MinJitter = util.convert_decimal(stream_query[5])\n self.db_session.add(tmp)\n self._update_result_node()\n\n def _cal_frame_loss_count(self, tx_frame, rx_frame):\n return tx_frame - rx_frame if tx_frame > rx_frame else 0\n\n # iteration result: 0:passed, 1: failed\n def _judge_iteration_result(self, frame_loss, frame_loss_rate):\n return 1 if frame_loss else 0\n\n def _get_address_learn_number(self):\n return 0\n\n def _get_address_learn_rate(self):\n return 0\n\n def _get_frame_size_str(self, frame_size):\n if isinstance(frame_size, str):\n return '\"'+frame_size+'\"'\n else:\n return str(frame_size)\n\n def _update_result_node(self):\n trial_number = self._get_current_trial_number()\n frame_size = self._get_current_frame_size()\n load = self._get_current_load()\n iter_number = self._get_current_iteration_number()\n\n # create Trial the node\n trial_node_id = self.summary_node.Tag + '/Trial' + str(trial_number)\n trial_node = self.find_node(trial_node_id)\n if not trial_node:\n trial_node_name = 'TrialNode'\n trial_node_description = 'Trial: ' + str(trial_number)\n trial_node = self._create_dot_result_node(self.summary_node, trial_node_id, trial_node_name,\n trial_node_description)\n\n # create the frame size node\n frame_size_node_id = trial_node.Tag + '/FrameSize' + str(frame_size)\n frame_size_node = self.find_node(frame_size_node_id)\n if not frame_size_node:\n frame_size_node_name = 'FrameSizeNode'\n frame_size_node_description = 'Frame Size: ' + str(frame_size)\n frame_size_node = self._create_dot_result_node(trial_node, frame_size_node_id, frame_size_node_name,\n frame_size_node_description)\n\n # create the Load node\n load_node_id = frame_size_node.Tag + '/Load' + str(load) + '-Iter' + str(iter_number)\n load_node = self.find_node(load_node_id)\n if not load_node:\n load_node_name = 'LoadNode'\n load_node_description = 'Load: ' + str(load)+ ' Iteration: ' + str(iter_number)\n load_node = self._create_dot_result_node(frame_size_node, load_node_id, load_node_name, load_node_description)\n\n def _get_iter_db_name(self):\n trial_number = self._get_current_trial_number()\n frame_size = self._get_current_frame_size()\n load = self._get_current_load()\n return 'sqlite:///' + os.path.join(self.base_dir,self.base_file_name) + '-Trial-' + str(trial_number) + '-FrameSize-' + \\\n str(frame_size) + '-Load-' + str(load) + '.db'\n\n def _get_current_iteration_number(self):\n return self.get_current_iteration() + 1\n\n def _get_current_burst_size(self):\n return self.command.default_burst_size\n\n @property\n def iteration_result(self):\n return self._iteration_result\n\n @property\n def _trial_no(self):\n return 'TrialNo AS \"Trial No.\",'\n\n @property\n def _frame_size(self):\n return 'FrameSize AS \"Frame Size\",'\n\n @property\n def _burst_size(self):\n return 'BurstSize AS \"Burst Size\",'\n\n @property\n def _iteration_no(self):\n return 'IterationNo AS \"Iteration No.\",'\n\n @property\n def _intended_load_sql(self):\n if self._load_unit == 'Percent (%)':\n unit_str = '%'\n elif self._load_unit == 'frames/second':\n unit_str = 'fps'\n elif self._load_unit == 'Inter Frame Gap (byte)':\n unit_str = 'IFG'\n else:\n unit_str = self._load_unit\n return 'IntendedLoad AS \"Intended Load (' + unit_str + ')\",'\n\n @property\n def _offered_load_sql(self):\n return 'OfferedLoadPercent AS \"Offered Load (%)\", OfferedLoadFps AS \"Offered Load (fps)\", ' \\\n 'OfferedLoadMbps AS \"Offered Load (Mbps)\",'\n @property\n def _port_name(self):\n return 'Port AS \"Port Name\",'\n\n @property\n def _port_type(self):\n return 'PortType AS \"Port Type\",'\n\n @property\n def _learn_address_num(self):\n return 'AddressLearnNumber AS \"Number of Addresses\",'\n\n @property\n def _learn_address_rate(self):\n return 'AddressLearnRate AS \"Learning Rate (fps)\",'\n\n @property\n def _head_of_block(self):\n return '(case when HeadOfBlock = 0 then \"False\" else \"True\" end) AS \"Head of Line Blocking\",'\n\n @property\n def _rx_port(self):\n return 'RxPort AS \"Rx Port Name\",'\n\n @property\n def _back_pressure(self):\n return '(case when BackPressure = 0 then \"False\" else \"True\" end) AS \"Backpressure\" '\n\n @property\n def _test_group(self):\n return 'TestGroup AS \"Test Group\",'\n\n @property\n def _tx_frame_count(self):\n return 'TxFrameCount AS \"Tx Frame Count\",'\n\n @property\n def _rx_frame_count(self):\n return 'RxFrameCount AS \"Rx Frame Count\",'\n\n @property\n def _frame_loss_count(self):\n return 'FrameLossCount AS \"Lost Frames\",'\n\n @property\n def _frame_loss_percent(self):\n return 'FrameLossPercent AS \"Frame Loss Rate (%)\",'\n\n @property\n def _test_result(self):\n return 'TestResult AS \"Test Result\" '\n\n @property\n def _avg_latency(self):\n return 'AvgLatency AS \"Avg Latency (us)\",'\n\n @property\n def _max_latency(self):\n return 'MaxLatency AS \"Max Latency (us)\",'\n\n @property\n def _min_latency(self):\n return 'MinLatency AS \"Min Latency (us)\",'\n\n","sub_path":"CL/module/testsuite/benchmark/l2l3/rfc2889/rfc2889_write_db_command.py","file_name":"rfc2889_write_db_command.py","file_ext":"py","file_size_in_byte":16301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"534914466","text":"import turtle\nimport random\n\n# assighining colors to the snake:\nturtle.hideturtle()\nturtle.penup()\nsnake = turtle.clone()\nsnake.shape(\"square\") \n\n# snake and background color:\nbg_color = input(\"choose a background color, or die \")\nturtle.bgcolor(bg_color)\nsnake_color = input(\"choose a snake color, or don't, i don't care, jees! \")\nsnake.color(snake_color)\n # screen size:\nscreen_size = input(\"choose your preferd screen size:) or burn in hell \")\n\n# game difficulty:\ngame_diff = input(\"choose gmae difficulty noobbbbbbb \") \n# turtle movement spped:\nTIME_STEP = 105\nturtle.tracer(0,0.5) #This helps the turtle move smoothly.\n\n# defining SIZE_X and SIZE_Y:\nSIZE_X=800\nSIZE_Y=500\nturtle.setup(SIZE_X, SIZE_Y) \nturtle.penup() \n\nSQUARE_SIZE = 20\nSTART_LENGTH = 5 # the starting length of the snake.\n\n# Initialize lists\npos_list = []\nstamp_list = []\nfood_pos = []\nfood_stamps = []\n\nturtle.hideturtle()\n\nfor i in range(START_LENGTH):\n x_pos=snake.pos()[0] \n y_pos=snake.pos()[1]\n x_pos+= SQUARE_SIZE #increases the x_pos by SQUARE_SIZE(by 20)advances 20 to the right. \n my_pos=(x_pos,y_pos)\n snake.goto(x_pos,y_pos)\n pos_list.append(my_pos) # adds my_pos to pos_list. \n z = snake.stamp()\n stamp_list.append(z)\n \n# keyboard shortcuts\nUP_ARROW = \"Up\"\nLEFT_ARROW = \"Left\"\nDOWN_ARROW = \"Down\"\nRIGHT_ARROW = \"Right\"\n\n# assigning values to the variables:UP,DOWN,LEFT,RIGHT:\nUP = 0\nDOWN = 1\nRIGHT = 2\nLEFT = 3\n\n# setting the limits of the game area: RIGHT, LEFT, UP AND DOWN.\n# for small screen:\nif screen_size == \"small\" or screen_size == \"tiny\" or screen_size == \"not too big\": \n UP_EDGE = 260\n DOWN_EDGE = -260\n RIGHT_EDGE = 400\n LEFT_EDGE = -400\n\n# for big screeen:\nelif screen_size == \"big\" or screen_size == \"huge\" or screen_size == \"enormous\":\n UP_EDGE = 520\n DOWN_EDGE = -520\n RIGHT_EDGE = 840\n LEFT_EDGE = -840\n\ndef up():\n global direction # global changes the variable direction.\n direction = UP # changes the direction to up.\n print(\"Up key\")\n\ndef down():\n global direction\n direction = DOWN \n print( \"Down key\")\n\ndef left():\n global direction\n direction = LEFT\n print(\"Left key\")\n\ndef right():\n global direction\n direction = RIGHT \n print(\"Right key\")\n\n# telling snake what to do when a key is pressed:\nturtle.onkeypress(right, RIGHT_ARROW)\nturtle.onkeypress(left, LEFT_ARROW)\nturtle.onkeypress(up, UP_ARROW)\nturtle.onkeypress(down, DOWN_ARROW)\n\nturtle.listen()\n\n#creates food turtle for make_food\nturtle.register_shape(\"trash.gif\")\nfood = turtle.clone()\nfood.shape(\"trash.gif\")\n\n# making food appear randomly\ndef make_food():\n min_x =- int(RIGHT_EDGE/SQUARE_SIZE) +1\n max_x = int(RIGHT_EDGE/SQUARE_SIZE) - 1\n min_y =- int(UP_EDGE/SQUARE_SIZE) + 1\n max_y = int(UP_EDGE/SQUARE_SIZE) - 1\n\n # pick a position that is a multiple of SQUARE_SIZE:\n food_x = random.randint(min_x , max_x)*SQUARE_SIZE\n food_y = random.randint(min_y , max_y)*SQUARE_SIZE\n\n # make the food go to the randomly-generated position:\n food.goto(food_x, food_y)\n stamp_id = food.stamp() \n food_stamps.append(stamp_id)\n stamp_pos_tuple = (food_x, food_y)\n food_pos.append( stamp_pos_tuple)\n \n#moves the snake in the direction sset by global var\ndirection = RIGHT # point right at start of game\ndef move_snake():\n my_pos = snake.pos()\n x_pos = my_pos[0]\n y_pos = my_pos[1]\n if snake.pos() in pos_list[0:-1]:\n quit()\n # changing the snake's location:\n if direction == RIGHT:\n snake.goto(x_pos + SQUARE_SIZE, y_pos)\n elif direction == LEFT:\n snake.goto(x_pos - SQUARE_SIZE, y_pos)\n elif direction == UP:\n snake.goto(x_pos, y_pos + SQUARE_SIZE)\n elif direction == DOWN:\n snake.goto(x_pos, y_pos - SQUARE_SIZE)\n\n # grab pos of snake;\n new_pos = snake.pos()\n new_x_pos = new_pos[0]\n new_y_pos = new_pos[1]\n\n # checking if the snake is hitting the edges:\n # for playing without infinite walls \n if game_diff == \"hard\" or game_diff == \"torture\" or game_diff == \"why???\":\n if new_x_pos >= RIGHT_EDGE:\n print(\"Sorry, you hit the right corner, GAME OVER!\")\n quit()\n if new_x_pos <= LEFT_EDGE:\n print(\"Sorry, you hit the lrft corner, GAME OVER!\")\n quit()\n if new_y_pos >= UP_EDGE:\n print(\"Sorry, you hit the upper edge, GAME OVER!\")\n quit()\n if new_y_pos <= DOWN_EDGE:\n print(\"Sorry, you hit the downer edge, GAME OVER!\")\n quit()\n\n # for playing with infinite walls == game diif == easy\n if game_diff == \"easy\" or game_diff == \"ebmbarrassing\" or game_diff == \"light\":\n if new_x_pos >= RIGHT_EDGE:\n snake.goto(LEFT_EDGE , new_y_pos)\n if new_x_pos <= LEFT_EDGE:\n snake.goto(RIGHT_EDGE , new_y_pos)\n if new_y_pos >= UP_EDGE:\n snake.goto(new_x_pos,DOWN_EDGE)\n if new_y_pos <= DOWN_EDGE:\n snake.goto(new_x_pos,UP_EDGE )\n \n # the snake's position changed so we stamp it again.\n my_pos = snake.pos()\n pos_list.append(my_pos)\n new_stamp = snake.stamp() \n stamp_list.append(new_stamp) # keep stamp ids so i can delete them later.\n\n # poping the food that was consumed by the snake:\n global food_stamps, food_pos\n if snake.pos() in food_pos: # snake is on top of food item\n food_ind = food_pos.index(snake.pos())\n food.clearstamp(food_stamps[food_ind]) # removes eaten food stamp\n food_pos.pop(food_ind) # removes eaten food position\n food_stamps.pop(food_ind)\n print(\"Yummy.....\")\n #the snake gets longer\n else:\n old_stamp = stamp_list.pop(0)\n snake.clearstamp(old_stamp)\n pos_list.pop(0)\n\n if len(food_stamps) < 3:\n make_food()\n \n # moves snake automatically in a chosen refresh rate:\n turtle.ontimer(move_snake,TIME_STEP)\n\nmove_snake()\n\n\nturtle.mainloop()\n\n \n\n","sub_path":"rani20-mini-proj.py","file_name":"rani20-mini-proj.py","file_ext":"py","file_size_in_byte":5994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"648641376","text":"from src.models.assets_fundamentals.assets_fundamentals import Asset_Fundamentals\nfrom src.common.database import Database\nimport src.models.current_portfolio.constants as CurrentPortfolioConstants\nimport src.models.current_portfolio.errors as CurrentPortfolioErrors\n\n__author__ = \"stephanosgg\"\n\n\nclass Current_Portfolio(object):\n def __init__(self, _id, positions):\n \"\"\"\n :param _id: here the _id is a ISO formatted date, representing the date of the portfolio positions\n :param positions: this a list of dictionaries positions. It must not be empty\n \"\"\"\n self._id = _id\n self.positions = positions\n\n def __repr__(self):\n return \"\".format(self._id)\n\n def save_to_db(self):\n Current_Portfolio.remove_all() # since there should be only one current portfolio\n self.make_asset_fundamentals()\n Database.insert(collection=CurrentPortfolioConstants.COLLECTION, data=self.json())\n\n def make_asset_fundamentals(self):\n # create assets fundamentals that do not exist yet\n for each in self.positions:\n asset = Asset_Fundamentals.create_if_new(each['ticker'])\n\n def total_port_upside(self):\n upside = 0\n for each in self.positions:\n asset = Asset_Fundamentals.find_by_id(each['ticker'])\n upside += asset.total_upside(each['price']) * each['fund_weight']\n return upside\n\n def json(self):\n return self.__dict__\n\n @staticmethod\n def populate_from_json(performit_translated_json):\n _id = performit_translated_json['fund']['nav']['date']\n perf_positions = performit_translated_json['fund']['positions']\n positions = []\n for position in perf_positions:\n if position['security']['security_type'] == 'STOCK':\n positions.append(Current_Portfolio.create_json_for_position(\n ticker=position['security']['symbol'],\n price=float(position['price']),\n fund_weight=float(position['weight'])\n ))\n return Current_Portfolio(_id, positions)\n\n @staticmethod\n def remove_all():\n Database.remove(CurrentPortfolioConstants.COLLECTION, query={})\n\n @classmethod\n def get_current_portfolio(cls):\n data = Database.find(collection=CurrentPortfolioConstants.COLLECTION, query={})\n return cls(**data[0]) if data.count() > 0 else None\n\n @staticmethod\n def create_json_for_position(ticker, price, fund_weight):\n return {\n \"ticker\": ticker,\n \"price\": price,\n \"fund_weight\": fund_weight\n }\n","sub_path":"src/models/current_portfolio/current_portfolio.py","file_name":"current_portfolio.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"278837343","text":"#!/usr/bin/env python3\n\"\"\"\ndronetello.py\n\nDroNet implementation for the DJI/Ryze Tello\n\nWritten by Moritz Sperling\nBased on the work of A. Loquercio et al., 2018 (https://github.com/uzh-rpg/rpg_public_dronet)\nand D. Fuentes (https://github.com/damiafuentes/DJITelloPy)\n\nLicensed under the MIT License (see LICENSE for details)\n\"\"\"\n\nimport os\nimport sys\nimport cv2\nimport time\nimport math\nimport pygame\nimport numpy as np\nfrom pygame.locals import *\nfrom djitellopy import Tello\nfrom keras import backend as k\nfrom keras.models import model_from_json\n\nsys.path.insert(0, '../workflow/util/')\nfrom img_utils import pred_as_bar, pred_as_indicator\n\n\n#\n# 8888888 d8b 888 d8b 888 d8b 888 d8b\n# 888 Y8P 888 Y8P 888 Y8P 888 Y8P\n# 888 888 888 888\n# 888 88888b. 888 888888 888 8888b. 888 888 .d8888b 8888b. 888888 888 .d88b. 88888b.\n# 888 888 \"88b 888 888 888 \"88b 888 888 88K \"88b 888 888 d88\"\"88b 888 \"88b\n# 888 888 888 888 888 888 .d888888 888 888 \"Y8888b. .d888888 888 888 888 888 888 888\n# 888 888 888 888 Y88b. 888 888 888 888 888 X88 888 888 Y88b. 888 Y88..88P 888 888\n# 8888888 888 888 888 \"Y888 888 \"Y888888 888 888 88888P' \"Y888888 \"Y888 888 \"Y88P\" 888 888\n#\n\nclass FrontEnd(object):\n \"\"\" Maintains the Tello display and moves it through the keyboard keys.\n Press escape key to quit.\n The controls are:\n - Tab: Takeoff\n - Shift: Land\n - Space: Emergency shutdown\n - Backspace: Shutdown\n - WASW: Forward, backward, left and right.\n - Q and E: Counter clockwise and clockwise rotations\n - R and F: Up and down.\n - P: Switch through controllable parameters\n - + and -: Raise or lower parameter\n - #: Enable / Disable DroNet\n - C: Toggle recording of frames\n \"\"\"\n\n def __init__(self):\n\n # FlowDroNet Configuration\n self.json_model_path = \"./models/DroNet/model_struct.json\"\n self.weights_path = \"./models/DroNet/model_weights_new_best.h5\"\n self.output_folder = './recorded/'\n self.target_size = (200, 200)\n self.FPS = 10\n\n # config of controllable parameters\n # initial values\n self.controll_params = {\n 'speed': 100,\n 'alpha': 0.7,\n 'beta': 0.5,\n }\n # stepsize\n self.controll_params_d = {\n 'speed': 10,\n 'alpha': 0.1,\n 'beta': 0.1,\n }\n # max (min is 0)\n self.controll_params_m = {\n 'speed': 100,\n 'alpha': 1,\n 'beta': 1,\n }\n\n # Init internal variables\n self.for_back_velocity = 0\n self.left_right_velocity = 0\n self.up_down_velocity = 0\n self.yaw_velocity = 0\n self.internalSpeed = 100\n self.send_rc_control = False\n self.battery_percentage = 0\n self.v_old = 0\n self.sa_old = 0\n self.wasDroNet = True\n self.last_pred_col = 0\n self.last_pred_ang = 0\n self.lastTime = time.time()\n self.should_stop = False\n self.record_frames = False\n self.isArmed = False\n self.current_parameter = 0\n self.param_keys = list(self.controll_params.keys())\n\n # set keras to test phase\n k.set_learning_phase(0)\n\n # Tensorflow: load json and weights, then compile model\n with open(self.json_model_path, 'r') as json_file:\n loaded_model_json = json_file.read()\n self.net = model_from_json(loaded_model_json)\n self.net.load_weights(self.weights_path)\n self.net.compile(loss='mse', optimizer='sgd')\n\n # Create output folder if not exists\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n\n # Init pygame\n pygame.init()\n pygame.display.set_caption(\"DroNeTello\")\n self.screen = pygame.display.set_mode([985, 754])\n pygame.time.set_timer(USEREVENT + 1, int(1. / self.FPS * 1000))\n\n # Init Tello object that interacts with the Tello drone\n self.tello = Tello()\n\n #\n # 888b d888 d8b 888\n # 8888b d8888 Y8P 888\n # 88888b.d88888 888\n # 888Y88888P888 8888b. 888 88888b. 888 .d88b. .d88b. 88888b.\n # 888 Y888P 888 \"88b 888 888 \"88b 888 d88\"\"88b d88\"\"88b 888 \"88b\n # 888 Y8P 888 .d888888 888 888 888 888 888 888 888 888 888 888\n # 888 \" 888 888 888 888 888 888 888 Y88..88P Y88..88P 888 d88P\n # 888 888 \"Y888888 888 888 888 88888888 \"Y88P\" \"Y88P\" 88888P\"\n # 888\n # 888\n # 888\n #\n\n def run(self):\n\n if not self.tello.connect():\n print(\"Tello not connected\")\n return\n\n if not self.tello.set_speed(self.internalSpeed):\n print(\"Not set speed to lowest possible\")\n return\n\n # In case streaming is on. This happens when we quit this program without the escape key.\n if not self.tello.streamoff():\n print(\"Could not stop video stream\")\n return\n\n if not self.tello.streamon():\n print(\"Could not start video stream\")\n return\n\n frame_read = self.tello.get_frame_read()\n\n self.should_stop = False\n while not self.should_stop:\n # sometimes read battery status\n if np.random.random() < 0.05:\n self.battery_percentage = self.tello.get_battery()\n\n # read frame\n img = cv2.cvtColor(frame_read.frame, cv2.COLOR_BGR2RGB)\n\n # get output from dronet\n self.get_dronet_command(img)\n\n # update hud\n self.update_hud(img.copy())\n\n # save frame if recording\n if self.record_frames:\n fname = os.path.join(self.output_folder, time.strftime(\"record_%Y%m%d_%H%M%S_0.jpg\", time.gmtime()))\n i = 0\n while os.path.exists(fname):\n fname = fname.replace('_' + str(i) + '.', '_' + str(i + 1) + '.',)\n i = i + 1\n cv2.imwrite(fname, frame_read.frame)\n\n # handle input from dronet or user\n for event in pygame.event.get():\n if event.type == USEREVENT + 1:\n self.send_input()\n elif event.type == QUIT:\n self.should_stop = True\n elif event.type == KEYDOWN:\n if (event.key == K_ESCAPE) or (event.key == K_BACKSPACE):\n self.should_stop = True\n else:\n self.keydown(event.key)\n elif event.type == KEYUP:\n self.keyup(event.key)\n\n # shutdown stream\n if frame_read.stopped:\n frame_read.stop()\n break\n\n # wait a little\n time.sleep(1 / self.FPS)\n\n # always call before finishing to deallocate resources\n self.tello.end()\n time.sleep(1)\n exit(0)\n\n def get_dronet_command(self, img):\n # prep image\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = cv2.resize(img, self.target_size)\n img = np.asarray(img, dtype=np.float32) * np.float32(1.0 / 255.0)\n carry = np.array(img)[np.newaxis, :, :, np.newaxis]\n\n # inference\n outs = self.net.predict(carry, batch_size=None, verbose=0, steps=None)\n theta, p_t = outs[0][0], outs[1][0]\n self.last_pred_col = p_t\n self.last_pred_ang = theta\n\n # calculate real velocity and steering angle (see reference implementation)\n velocity = (1 - self.controll_params['alpha']) * self.v_old \\\n + self.controll_params['alpha'] * (1 - p_t) * self.controll_params['speed']\n steering_angle = (1 - self.controll_params['beta']) * self.sa_old \\\n + self.controll_params['beta'] * math.pi / 2 * theta\n sa_deg = -steering_angle / math.pi * 180\n\n # save current velocity and steering angle for next step\n self.v_old = velocity\n self.sa_old = steering_angle\n\n # set forward and yaw velocity if dronet is active\n if self.isArmed:\n self.for_back_velocity = int(velocity)\n self.yaw_velocity = int(sa_deg)\n\n #\n # 8888888 888 888b d888 888 888 888\n # 888 888 8888b d8888 888 888 888\n # 888 888 88888b.d88888 888 888 888\n # 888 88888b. 88888b. 888 888 888888 888Y88888P888 .d88b. 888888 88888b. .d88b. .d88888 .d8888b\n # 888 888 \"88b 888 \"88b 888 888 888 888 Y888P 888 d8P Y8b 888 888 \"88b d88\"\"88b d88\" 888 88K\n # 888 888 888 888 888 888 888 888 888 Y8P 888 88888888 888 888 888 888 888 888 888 \"Y8888b.\n # 888 888 888 888 d88P Y88b 888 Y88b. 888 \" 888 Y8b. Y88b. 888 888 Y88..88P Y88b 888 X88\n # 8888888 888 888 88888P\" \"Y88888 \"Y888 888 888 \"Y8888 \"Y888 888 888 \"Y88P\" \"Y88888 88888P'\n # 888\n # 888\n # 888\n #\n\n def keydown(self, key):\n \"\"\" Update velocities based on key pressed\n Arguments:\n key: pygame key\n \"\"\"\n if key == pygame.K_w: # set forward velocity\n self.isArmed = False\n self.for_back_velocity = self.controll_params['speed']\n elif key == pygame.K_s: # set backward velocity\n self.isArmed = False\n self.for_back_velocity = -self.controll_params['speed']\n elif key == pygame.K_a: # set left velocity\n self.isArmed = False\n self.left_right_velocity = -self.controll_params['speed']\n elif key == pygame.K_d: # set right velocity\n self.isArmed = False\n self.left_right_velocity = self.controll_params['speed']\n elif key == pygame.K_r: # set up velocity\n self.isArmed = False\n self.up_down_velocity = self.controll_params['speed']\n elif key == pygame.K_f: # set down velocity\n self.isArmed = False\n self.up_down_velocity = -self.controll_params['speed']\n elif key == pygame.K_e: # set yaw clockwise velocity\n self.isArmed = False\n self.yaw_velocity = self.controll_params['speed']\n elif key == pygame.K_q: # set yaw counter clockwise velocity\n self.isArmed = False\n self.yaw_velocity = -self.controll_params['speed']\n elif key == pygame.K_TAB: # takeoff\n self.tello.takeoff()\n self.send_rc_control = True\n elif key == pygame.K_LSHIFT: # land\n self.isArmed = False\n self.tello.land()\n self.send_rc_control = False\n elif key == pygame.K_SPACE: # emergency shutdown\n self.isArmed = False\n self.tello.emergency()\n self.send_rc_control = False\n self.should_stop = True\n elif key == pygame.K_HASH: # arm/disarm dronet\n self.isArmed = not self.isArmed\n self.for_back_velocity = 0\n self.yaw_velocity = 0\n elif key == pygame.K_p: # switch through parameters\n if self.current_parameter < len(self.controll_params) - 1:\n self.current_parameter = self.current_parameter + 1\n else:\n self.current_parameter = 0\n elif key == pygame.K_PLUS: # raise current parameter\n what = self.param_keys[self.current_parameter]\n if self.controll_params[what] < self.controll_params_m[what] - 0.01:\n self.controll_params[what] = self.controll_params[what] + self.controll_params_d[what]\n elif key == pygame.K_MINUS: # lower current parameter\n what = self.param_keys[self.current_parameter]\n if self.controll_params[what] > 0.01:\n self.controll_params[what] = self.controll_params[what] - self.controll_params_d[what]\n elif key == pygame.K_c: # toggle recording of frames\n self.record_frames = not self.record_frames\n\n def keyup(self, key):\n \"\"\" Update velocities based on key released\n Arguments:\n key: pygame key\n \"\"\"\n if key == pygame.K_w or key == pygame.K_s: # set zero forward/backward velocity\n self.for_back_velocity = 0\n elif key == pygame.K_a or key == pygame.K_d: # set zero left/right velocity\n self.left_right_velocity = 0\n elif key == pygame.K_r or key == pygame.K_f: # set zero up/down velocity\n self.up_down_velocity = 0\n elif key == pygame.K_q or key == pygame.K_e: # set zero yaw velocity\n self.yaw_velocity = 0\n\n def send_input(self):\n \"\"\" Update routine. Send velocities to Tello.\"\"\"\n print(\"V: \" + str(self.for_back_velocity) + \"; Y: \" + str(self.yaw_velocity))\n if self.send_rc_control:\n self.tello.send_rc_control(self.left_right_velocity, self.for_back_velocity, self.up_down_velocity,\n self.yaw_velocity)\n\n #\n # 888 888 888 888b d888 888 888 888\n # 888 888 888 8888b d8888 888 888 888\n # 888 888 888 88888b.d88888 888 888 888\n # 8888888888 .d88b. 888 88888b. .d88b. 888d888 888Y88888P888 .d88b. 888888 88888b. .d88b. .d88888 .d8888b\n # 888 888 d8P Y8b 888 888 \"88b d8P Y8b 888P\" 888 Y888P 888 d8P Y8b 888 888 \"88b d88\"\"88b d88\" 888 88K\n # 888 888 88888888 888 888 888 88888888 888 888 Y8P 888 88888888 888 888 888 888 888 888 888 \"Y8888b.\n # 888 888 Y8b. 888 888 d88P Y8b. 888 888 \" 888 Y8b. Y88b. 888 888 Y88..88P Y88b 888 X88\n # 888 888 \"Y8888 888 88888P\" \"Y8888 888 888 888 \"Y8888 \"Y888 888 888 \"Y88P\" \"Y88888 88888P'\n # 888\n # 888\n # 888\n #\n\n def update_hud(self, frame):\n\n \"\"\"Draw drone info and record on frame\"\"\"\n if self.isArmed:\n stats = [\"DroNet active.\"]\n if self.wasDroNet:\n stats.append(\"Predictions:\")\n stats.append(\"[C: {:4.3f}] [SA: {:4.3f}]\".format(float(self.last_pred_col), float(self.last_pred_ang)))\n stats.append(\"Commands:\")\n stats.append(\"[V: {:03d}] [SA: {:03d}]\".format(int(self.v_old), int(self.sa_old / math.pi * 180)))\n else:\n stats.append(\"Command overwritten ...\")\n else:\n stats = [\"DroNet disarmed.\", \"Predictions:\",\n \"[C: {:4.3f}] [SA: {:4.3f}]\".format(float(self.last_pred_col), float(self.last_pred_ang))]\n\n stats.append(self.param_keys[self.current_parameter]\n + \": {:4.1f}\".format(self.controll_params[self.param_keys[self.current_parameter]]))\n for idx, stat in enumerate(stats):\n text = stat.lstrip()\n cv2.putText(frame, text, (0, 30 + (idx * 30)),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.8, (255, 0, 0), lineType=30)\n\n # show blinking red dot when recording\n if self.record_frames:\n cv2.putText(frame, \"Recording\", (frame.shape[1] - 187, 38),\n cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0, 0), lineType=30)\n if round(time.time()) % 2 == 0:\n cv2.circle(frame, (frame.shape[1] - 30, 30), 15, (255, 0, 0), -1)\n\n # show battery percentage\n cv2.putText(frame, \"Battery: {:d} %\".format(int(self.battery_percentage)),\n (frame.shape[1] - 187, frame.shape[0] - 12),\n cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0, 0), lineType=30)\n\n # display steering angle prediction as indicator in the middle\n hspacer = np.ones((12, frame.shape[1], 3), dtype=np.uint8) * 255\n steerbar = pred_as_indicator(self.last_pred_ang, (20, frame.shape[1]), \"Steering\")\n steerbar = cv2.cvtColor(steerbar, cv2.COLOR_BGR2RGB)\n frame_out = np.vstack((frame, hspacer, steerbar))\n\n # display collision prediction as bar on the side\n vspacer = np.zeros((frame_out.shape[0], 5, 3), dtype=np.uint8)\n collbar = pred_as_bar(self.last_pred_col, (frame_out.shape[0], 20), \"Collision\")\n collbar = cv2.cvtColor(collbar, cv2.COLOR_BGR2RGB)\n frame_out = np.hstack((frame_out, vspacer, collbar))\n\n frame_out = np.fliplr(frame_out)\n frame_out = np.rot90(frame_out)\n frame_out = pygame.surfarray.make_surface(frame_out)\n self.screen.fill([0, 0, 0])\n self.screen.blit(frame_out, (0, 0))\n pygame.display.update()\n\n\ndef main():\n frontend = FrontEnd()\n\n # run frontend\n frontend.run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"DroNeTello/dronetello.py","file_name":"dronetello.py","file_ext":"py","file_size_in_byte":17993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"500879926","text":"import copy\nimport random \nimport torch \nfrom torch import nn \nimport torch.nn.functional as F \nfrom torchvision import transforms \nfrom math import pi, cos \nfrom collections import OrderedDict\nHPS = dict(\n max_steps=int(1000. * 1281167 / 4096), # 1000 epochs * 1281167 samples / batch size = 100 epochs * N of step/epoch\n # = total_epochs * len(dataloader) \n mlp_hidden_size=512,\n projection_size=256,\n base_target_ema=5e-4,\n batchnorm_kwargs=dict(\n decay_rate=0.9,\n eps=1e-5), \n seed=1337,\n)\n\n\nfrom .simsiam import D # a bit different but it's essentially the same thing: neg cosine sim & stop gradient\n\n\nclass MLP(nn.Module):\n def __init__(self, in_dim):\n super().__init__()\n\n self.layer1 = nn.Sequential(\n nn.Linear(in_dim, HPS['mlp_hidden_size']),\n nn.BatchNorm1d(HPS['mlp_hidden_size'], eps=HPS['batchnorm_kwargs']['eps'], momentum=1-HPS['batchnorm_kwargs']['decay_rate']),\n nn.ReLU(inplace=True)\n )\n self.layer2 = nn.Linear(HPS['mlp_hidden_size'], HPS['projection_size'])\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n return x\n\nclass BYOL(nn.Module):\n def __init__(self, backbone):\n super().__init__()\n\n self.backbone = backbone\n self.projector = MLP(backbone.output_dim)\n self.online_encoder = nn.Sequential(\n self.backbone,\n self.projector\n )\n print(\"first#########################\", flush=True)\n print(len([x for x in self.online_encoder.parameters()]), flush=True)\n print(len([x for x in self.backbone.parameters()]), flush=True)\n print(\"first_done#########################\", flush=True)\n\n self.target_encoder = copy.deepcopy(self.online_encoder)\n self.online_predictor = MLP(HPS['projection_size'])\n # raise NotImplementedError('Please put update_moving_average to training')\n\n def target_ema(self, k, K, base_ema=HPS['base_target_ema']):\n # tau_base = 0.996 \n # base_ema = 1 - tau_base = 0.996 \n # return 1 - (1-self.tau_base) * (cos(pi*k/K)+1)/2 \n\n tau_base = 1. - base_ema\n return 1. - (1. - tau_base) * (cos(pi*k/K)+1)/2 \n\n @torch.no_grad()\n def update_moving_average(self, global_step, max_steps):\n tau = self.target_ema(global_step, HPS['max_steps'])\n for online, target in zip(self.online_encoder.parameters(), self.target_encoder.parameters()):\n target.data = tau * target.data + (1 - tau) * online.data\n \n def forward(self, x1, x2):\n f_o, h_o = self.online_encoder, self.online_predictor\n f_t = self.target_encoder\n\n # with torch.no_grad():\n # print(\"AAAAAAAAAAAAAAAAAAAAAA\", flush=True)\n # print(len([x for x in f_o.parameters()]), flush=True)\n # total_diff = 0.\n # for p1, p2 in zip(f_o.parameters(), f_t.parameters()):\n # total_diff += torch.abs(p1.data - p2.data).sum().item()\n # print(\"\\n\", total_diff, flush=True)\n\n z1_o = f_o(x1)\n z2_o = f_o(x2)\n\n p1_o = h_o(z1_o)\n p2_o = h_o(z2_o)\n\n with torch.no_grad():\n z1_t = f_t(x1)\n z2_t = f_t(x2)\n \n L = D(p1_o, z2_t) / 2 + D(p2_o, z1_t) / 2 \n return {'loss': L}\n\n \n\nif __name__ == \"__main__\":\n pass\n","sub_path":"models/byol.py","file_name":"byol.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"183805226","text":"# -*- coding:utf-8 -*-\r\nfrom config import *\r\nfrom user_db import *\r\n\r\n\r\n'''\r\nop = cmd u_id,name,age,phone from users\r\n\r\ncmd\r\n insert/delete/select/update\r\nop1 \r\n u_id,name,age,phone\r\nop2\r\n into/from\r\n\r\ntb_name\r\n users\r\n'''\r\n\r\nlogin()\r\n\r\n\r\nwhile True:\r\n op = input(\"user_db>>>\")\r\n if op == 'exit':\r\n print(\"退出成功\")\r\n exit()\r\n cmd,op1,op2,tb_name = tuple(op.split())\r\n if op2 == 'from': \r\n if cmd == 'select':\r\n if op1 == 'all' :\r\n sel_user(tb_name)\r\n elif op1 == 'del':\r\n sel_user(tb_name,True)\r\n else:\r\n print(\"syntax error [all/del]\" )\r\n elif cmd == 'delete':\r\n del_user(op1)\r\n elif cmd == 'restore':\r\n res_user(op1)\r\n else:\r\n print(\"syntax error\" )\r\n elif op2 == 'into':\r\n if cmd == 'insert':\r\n if len(op1.split(',')) == 4:\r\n op1 = op1.split(',')\r\n u_id,name,age,phone = op1\r\n if u_id:\r\n add_user(u_id,name,age,phone,tb_name)\r\n print(\"insert name=%s success\" %(name,))\r\n else:\r\n print(\"syntax error\" ) \r\n else:\r\n print(\"syntax error\")\r\n\r\n elif cmd == 'update':\r\n if len(op1.split(',')) == 5:\r\n op1 = op1.split(',')\r\n old_name,u_id,name,age,phone = op1\r\n if u_id:\r\n update_user(old_name,u_id,name,age,phone)\r\n print(\"update name=%s success\" %(old_name,))\r\n else:\r\n print(\"syntax error\" ) \r\n else:\r\n print(\"syntax error\")\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"03/lizeyang/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"346899585","text":"# -*- coding:utf-8 -*-\n#爬动态网站\n__author__ ='LUO'\n\nimport os\nimport random\nimport time\nimport codecs\n\nfrom bs4 import BeautifulSoup\nfrom urllib import request\nimport requests\n\n\n\nglobal headers\nheaders = {\n\t\t\t'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',\n\t\t\t'Accept': 'application/json, text/javascript, */*; q=0.01',\n\t\t\t'Accept-Encoding': 'gzip, deflate',\n\t\t\t'Accept-Language': 'zh-CN,zh;q=0.9',\n\t\t\t'Connection': 'keep-alive'\n}\n\n\ndef get_img_url(url):\n\tresponse = requests.get(url, headers=headers)\n\tresponse.encoding = response.apparent_encoding\n\t# print(response.text)\n\tsoup = BeautifulSoup(response.text, 'html.parser')\n\timg_ = soup.find_all('img') # 找到所有含img的标签\n\tword = soup.find_all('input')[0].get('value')\n\n\tfor each in img_:\n\t\tsrc = str(each.get('src'))\n\t\tif src.endswith('f'):\n\t\t\treturn src, word\n\treturn '', word\n\n\ndef download_img(url,filename):\n\tglobal headers\n\tBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\ttry:\n\t\tos.mkdir(BASE_DIR + '\\\\' + word)\n\texcept Exception as e:\n\t\tprint(e)\n\tif url:\n\t\tfilename = BASE_DIR + '\\\\' + word + '\\\\' + 'hanzi.jpg'\n\t\ttry:\n\t\t\treq = request.Request(url=url, headers=headers)\n\t\t\tprint('下载图片%s中' % word)\n\t\t\tbinary_data = request.urlopen(req).read() #获取图片的二进制数据\n\t\t\ttemp_file = open(filename, 'wb') #创建文档\n\t\t\ttemp_file.write(binary_data)#将二进制文件写入文档中\n\t\t\ttemp_file.close() #关闭文档\n\t\t\t#这种下载方法总是被禁 request.urlretrieve(imgurl, path + '%s.jpg' % n) # 下载图片,并以path+数字的格式进行命名\n\t\t\tprint('下载图片%s完毕' % filename)\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\telse:\n\t\tpass\n\n\ndef main():\n\tBASE_URL = 'http://shuowen.chaziwang.com/shuowen-%s.html'\n\tn = 6894\n\twhile n <= 7000:\n\t\tslp = random.randint(0, 3)\n\t\tn = n + 1\n\t\turl = BASE_URL % str(n)\n\t\turl, word = get_img_url(url)\n\t\tdownload_img(url, word)\n\t\ttime.sleep(slp)\n\t\t\n\ndef main2():\n\tBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\tdirlist = []\n\tfor dirs in os.listdir(BASE_DIR):\n\t\tif os.path.isdir(dirs):\n\t\t\tdirlist.append(dirs)\n\tfor dirli in dirlist:\n\t\ttry:\n\t\t\tos.rename(dirli+'\\\\hanzi.jpg', dirli + \".jpg\")\n\t\texcept Exception as e:\n\t\t\tpass\n\n\n\nif __name__ == '__main__':\n\tmain2()\n","sub_path":"helper/shuowen.py","file_name":"shuowen.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"299125205","text":"import math\n\ndef create_complex_number(a, b ):\n '''\n Function that creates a complex number c given its real part a and imaginary part b.\n c = a + ib with a, b € R.\n Input : a, b\n Preconditions : a, b - are float\n Output : c\n Postconditions : c - complex number\n the real part of c = a\n the imaginary part of c = b\n '''\n return {\n \"re\":a,\n \"im\":b\n }\n\ndef get_Real(c):\n '''\n The function return the real part of the complex number c.\n Input : c\n Preconditions : c - complex number\n Output : r\n Postconditions : r - float, the real part of c\n '''\n return c[\"re\"]\n\ndef get_Imag(c):\n '''\n The function return the imaginary part of the complex number c.\n Input : c\n Preconditions : c - complex number\n Output : i\n Postconditions : i - float, the imaginary part of c\n '''\n return c[\"im\"] \n\ndef set_Real(c, x):\n '''\n The function sets the value of the real part of the complex number c at x.\n Input : c, x\n Preconditions : c - a complex number\n x - a float\n Output : c\n Postconditions : c - complex number with the real part equal with x.\n '''\n c[\"re\"] = x\n return c\n\ndef set_Imag(c, x):\n '''\n The function sets the value of the imaginary part of the complex number c at x.\n Input : c, x\n Preconditions : c - a complex number\n x - a float\n Output : c\n Postconditions : c - complex number with the imaginary part equal with x.\n '''\n c[\"im\"] = x\n return c\n\ndef toStr(c):\n '''\n The function writes a complex number like : a + ib, if b > 0\n a - ib, if b < 0\n '''\n if get_Imag(c) >= 0 :\n return ( str(c[\"re\"]) + ' + ' + str( c[\"im\"]) + \"i\" )\n else:\n return ( str(c[\"re\"]) + ' - ' + str(-c[\"im\"]) + \"i\" )\n \ndef add_number_to_list(list,c):\n '''\n The function adds the complex number c to the list.\n Input : list, c\n Preconditions : list - a list containing complex numbers\n c - a complex number\n Output : list\n Postconditions : list contains c at the end\n '''\n list.append(c)\n return list\n\ndef initialize_list(listComplex):\n '''\n This function initialises the listComplex list with some complex values.\n Input : listComplex\n Preconditions : listComplex - a empty list\n Output : listComplex\n Postconditions : listComplex will now have 10 complex numbers in it.\n '''\n a = create_complex_number(3.21,-4.90)\n listComplex = add_number_to_list(listComplex, a )\n a = create_complex_number(3.21,1.67)\n listComplex = add_number_to_list(listComplex, a )\n a = create_complex_number(-0.21,-9.90)\n listComplex = add_number_to_list(listComplex, a )\n a = create_complex_number(0,0)\n listComplex = add_number_to_list(listComplex, a )\n a = create_complex_number(0.01,-24.90)\n listComplex = add_number_to_list(listComplex, a )\n a = create_complex_number(3,-4)\n listComplex = add_number_to_list(listComplex, a )\n a = create_complex_number(2,1)\n listComplex = add_number_to_list(listComplex, a )\n a = create_complex_number(-6,-3)\n listComplex = add_number_to_list(listComplex, a )\n a = create_complex_number(3.21,0)\n listComplex = add_number_to_list(listComplex, a )\n a = create_complex_number(0,-4.9)\n listComplex = add_number_to_list(listComplex, a )\n return listComplex\n\ndef print_number_of_elements(nmb):\n '''\n The function determines and prints how many elements does list have.\n Input : nmb\n Preconditions : nbm - number of elements of a list\n Output :\n Postconditions :\n '''\n print()\n print('The list has ' + str(nmb) + ' complex numbers :')\n\ndef print_list(listComplex):\n '''\n This function prints the entire list of the complex numbers that are present in listComplex.\n Input : listComplex\n Preconditions : listComplex - is a list\n Output :\n Postconditions :\n '''\n print_number_of_elements(len(listComplex))\n for x in range (0,len(listComplex)):\n print( 'z' + str(x) + ' = ' + toStr(listComplex[x]) )\n\ndef help(x):\n '''\n This function prints all the commands that exist in the program, so that the user can use them.\n '''\n print()\n print('The commands for this program are as it follows:')\n print('EXIT, if you want to close the program.')\n print('ADD, if you want to add elements to the sequence.')\n print('PRINT, if you want to print the elements of the sequence.')\n print('SEQUENCE_REAL, if you want to print the list that contains the longest sequence of real numbers from the primary list.')\n print('SEQUENCE_MODULUS, if you want to print the list that contains the longest sequence of complex number with the modulus € [0,10].')\n\ndef command_introduction():\n '''\n This function prints the text that is shown to the user before introducing a command.\n '''\n print()\n print('What would you like to do ?')\n print('If you do not know the commands type HELP.')\n print()\n\ndef delete_spaces(array):\n '''\n This function delets all the spaces from the string sir.\n Input : sir - a string\n Preconditions :\n Output : res - the string sir but without the spaces\n Postconditions : \n '''\n array = array + ' '\n previous = -1\n res = ''\n for x in range(0,len(array)):\n if( array[x] == ' ' ):\n aux = array[previous+1:x]\n previous = x\n res = res + aux\n return res\n\ndef process_com(command):\n '''\n This function returns the command such that it is correct and interpretable by the program.\n Input : command, a string\n Preconditions :\n Output : command, the string but without spaces and containing only uppercase letters.\n '''\n command = delete_spaces(command)\n command = command.upper()\n return command\n\ndef exit():\n '''\n This function writes the text before the ending of the program.\n '''\n print('The program will close now. Bye !')\n print('T. Andreas made it.')\n\ndef sign_position( array ):\n '''\n This function return the position where + or - is situated in the array.\n Input : array\n Preconditions : array - string\n Output : x\n Postconditions : x is a natural number, x € [1,len(array)-2]\n '''\n sign_position = -1\n for x in range (1,len(array)):\n if array[x] == '+' or array[x] == '-' :\n return x\n\ndef determine_real_part(array):\n '''\n This function returns the real part of a complex number stored in the array string.\n Input : array\n Preconditions : array - a string\n Output : result\n Postconditions : result is a float memorising the real part of the complex number stored in array.\n '''\n auxiliary_array = array[0:sign_position(array)]\n result = float(auxiliary_array)\n return result\n\ndef determine_imag_part(array):\n '''\n This function returns the imaginary part of a complex number stored in the array string.\n Input : array\n Preconditions : array - a string\n Output : result\n Postconditions : result is a float memorising the imaginary part of the complex number stored in array.\n '''\n auxiliary_array = array[sign_position(array):(len(array)-1)]\n result = float(auxiliary_array)\n return result\n \n\ndef determine_number(array):\n '''\n This function determines the complex number which is stored in array as a string.\n Input : array\n Preconditions : array is a string\n Output : number\n Postconditions : number is a complex number\n '''\n real_part = determine_real_part(array)\n imag_part = determine_imag_part(array)\n number = create_complex_number(real_part, imag_part )\n return number\n\ndef add_element():\n '''\n This function returns a complex number read from the keyboard as a string.\n Input :\n Preconditions :\n Output : array\n Postconditions : a complex number as a string\n '''\n array = input()\n array = delete_spaces(array)\n return array\n\ndef add_element_ui(listComplex):\n '''\n This function adds a complex number to the list listComplex.\n Input : listComplex\n Preconditions : listComplex is a list of complex numbers\n Output : listComplex\n Postconditions : listComplex contains one more complex number\n '''\n print(\"Enter the complex number that you want to add, as a + bi with a, b € R :\")\n new_number = determine_number(add_element())\n listComplex = add_number_to_list(listComplex, new_number)\n return listComplex\n\n\ndef add_elements(listComplex, nmb):\n '''\n This function adds elements to the list listComplex.\n Input : listComplex, nmb\n Preconditions : listComplex is a list memorising complex numbers\n nmb is a number representing how many new numbers will be added to listComplex\n Output : listComplex\n Postconditions : listComplex but with nmb more complex numbers in it\n '''\n for x in range (0,nmb) :\n listComplex = add_element_ui(listComplex)\n return listComplex\n\ndef add_elements_ui(listComplex):\n '''\n This function reads from the keyboard how many numbers would the user like to add to the list and then adds them.\n Input : listComplex\n Preconditions : listComplex is a list memorising complex numbers\n Output :\n Postconditions :\n '''\n try:\n number = int(input(\"Enter how many numbers would you like to add : \"))\n except ValueError as ve :\n print(\"Please enter a natural number !\")\n add_elements(listComplex,number)\n print(\"Addition successfully done !\")\n\ndef check_real(c):\n '''\n This function checks whether a complex number has the imaginary part equal with 0.\n Input : c\n Preconditions : c is a complex number\n Output : True or False\n Postconditions : True if the imaginary part of c is equal with 0\n False if the imaginary part of c is not equal with 0\n '''\n if c[\"im\"] == 0 :\n return True\n return False\n\ndef get_sequence(listComplex, starting_position, final_position):\n '''\n This function goes through listComplex from strating_position to final_position copying the elements in another list.\n Input : listComplex, starting_position, final_position\n Preconditions : listComplex is a list of complex numbers\n starting_position is a natural number\n final_position is a natural number\n Output : listReal\n Postconditions : listReal a list containing complex numbers from listComplex\n '''\n list = []\n for x in range (starting_position, final_position + 1):\n list.append(listComplex[x])\n return list\n\ndef sequence_real(listComplex):\n '''\n This function determines the starting and ending indexes of the longest sequence that contains complex numbers with the imaginary part equal with 0.\n Input : listComplex\n Preconditions : listComplex is a list containing complex numbers\n Output :\n Postconditions :\n '''\n first_position = 0\n current_lenght = 0\n longest_sequence_lenght = -1\n first_position_longest = -1\n last_position_longest = -1\n for x in range (0,len(listComplex)):\n if check_real(listComplex[x]) == True:\n if current_lenght == 0:\n first_position = x\n current_lenght = current_lenght + 1\n if current_lenght > longest_sequence_lenght:\n longest_sequence_lenght = current_lenght\n first_position_longest = first_position\n last_position_longest = x\n else:\n current_lenght = 0\n first_position = 0\n list = get_sequence(listComplex, first_position_longest, last_position_longest )\n print_list(list)\n\ndef check_modulus(c):\n '''\n This function checks whether the modulus of c € [0,10].\n Input : c\n Preconditions : c is a complex number\n Output : True or False\n Postconditions : True if the modulus of c € [0,10]\n False if the modulus of c < 0 or > 10\n '''\n if math.sqrt(c[\"re\"] * c[\"re\"] + c[\"im\"] * c[\"im\"]) >= 0 and math.sqrt(c[\"re\"] * c[\"re\"] + c[\"im\"] * c[\"im\"]) <= 10 :\n return True\n return False\n\ndef sequence_modulus(listComplex):\n '''\n This function determines the starting and ending indexes of the longest sequence that contains complex numbers with the modulus € [0,10].\n Input : listComplex\n Preconditions : listComplex is a list containing complex numbers\n Output :\n Postconditions :\n '''\n first_position = 0\n current_lenght = 0\n longest_sequence_lenght = -1\n first_position_longest = -1\n last_position_longest = -1\n for x in range (0,len(listComplex)):\n if check_modulus(listComplex[x]) == True:\n if current_lenght == 0:\n first_position = x\n current_lenght = current_lenght + 1\n if current_lenght > longest_sequence_lenght:\n longest_sequence_lenght = current_lenght\n first_position_longest = first_position\n last_position_longest = x\n else:\n current_lenght = 0\n first_position = 0\n list = get_sequence(listComplex, first_position_longest, last_position_longest )\n print_list(list)\n \n \n# -------------------- TEST FUNCTIONS STARTING --------------------\n\ndef test_getters():\n '''\n This function tests whether the get functions work properly.\n '''\n real = 23.45\n imag = 11.23\n c = create_complex_number(real,imag)\n assert get_Real(c) == 23.45\n assert get_Imag(c) == 11.23\n \ndef test_setters():\n '''\n This function test whether the set functions worl properly.\n '''\n real = 23.45\n imag = 11.23\n c = create_complex_number(real,imag)\n c = set_Real(c,3.33)\n assert c[\"re\"] == 3.33\n c = set_Imag(c,6.9)\n assert c[\"im\"] == 6.9\n\ndef test_toStr():\n '''\n This function check whether toStr function return the right string for a complex number.\n '''\n a = create_complex_number(3.21,-4.91)\n assert toStr(a) == '3.21 - 4.91i'\n a = create_complex_number(1.75,2.88)\n assert toStr(a) == '1.75 + 2.88i'\n\ndef test_create_complex_number():\n '''\n This function check whether create_complex_number returns the right value.\n '''\n real = 23.45\n imag = 11.23\n c = create_complex_number(real,imag)\n assert get_Real(c) == real\n assert get_Imag(c) == imag\n\ndef test_add_number_to_list():\n '''\n This function chekcs whether add_number_to_list returnss the correct list.\n '''\n a = create_complex_number(2.42,-0.8)\n b = create_complex_number(1,2)\n testList = [a]\n testList = add_number_to_list(testList,b)\n assert testList == [a,b]\n\ndef test_initialize_list():\n '''\n This function checks whether initialize_list returns the proper list.\n '''\n testList = []\n testList = initialize_list(testList)\n assert testList[2][\"re\"] == -0.21\n assert testList[9][\"im\"] == -4.90\n assert testList[9][\"re\"] == 0\n assert testList[3][\"im\"] == 0\n assert testList[8][\"re\"] == 3.21\n assert testList[6][\"im\"] == 1\n\ndef test_delete_spaces():\n '''\n This function checks whether the fhe function delete_spaces delets all the spaces or not.\n '''\n assert delete_spaces('vin si eu la folbal ') == 'vinsieulafolbal'\n assert delete_spaces(' ana are mere dar nu are PERE ') == 'anaaremeredarnuarePERE'\n\ndef test_sign_position():\n '''\n This function checks whether the function sign_position return the proper number.\n '''\n test_array = \"-0.23-1i\"\n assert sign_position(test_array) == 5\n test_array = \"1+2.31i\"\n assert sign_position(test_array) == 1\n\ndef test_determine_real_part():\n '''\n This function checks whether determine_real_part returns the right value ( the real part ).\n '''\n assert determine_real_part(\"-0.23-1i\") == -0.23\n assert determine_real_part(\"1+2.31i\") == 1\n\ndef test_determine_imag_part():\n '''\n This function checks whether determine_imag_part returns the right value ( the imag part ).\n '''\n assert determine_imag_part(\"-0.23-1i\") == -1\n assert determine_imag_part(\"1+2.31i\") == 2.31\n\ndef test_determine_number():\n '''\n This function checks whether determine_number returns a complex number memorising the real and the imaginary part corretly.\n '''\n test_number = determine_number(\"-0.23-1i\")\n assert test_number[\"re\"] == -0.23\n assert test_number[\"im\"] == -1\n\ndef test_check_real():\n '''\n This function tests whether check_real properly determines whether a complex number has the imaginary part equal with 0.\n '''\n test_number = { \"re\" : -2.123, \"im\" : 0 }\n assert check_real(test_number) == True\n test_number = { \"re\" : -2.123, \"im\" : -2.123 }\n assert check_real(test_number) == False\n\ndef test_check_modulus():\n test_number = { \"re\" : -2.123, \"im\" : 0 }\n assert check_modulus(test_number) == True\n test_number = { \"re\" : 6.123, \"im\" : 123 }\n assert check_modulus(test_number) == False\n \ndef run_tests():\n '''\n This function runs all the test functions.\n '''\n test_getters()\n test_setters()\n test_toStr()\n test_create_complex_number()\n test_add_number_to_list()\n test_initialize_list()\n test_delete_spaces()\n test_sign_position()\n test_determine_real_part()\n test_determine_imag_part()\n test_determine_number()\n test_check_real()\n test_check_modulus()\n\n# -------------------- TEST FUNCTIONS ENDING --------------------\n\ndef main():\n listComplex = []\n listComplex = initialize_list(listComplex)\n print('WELCOME TO THE CANDY SHOP ! This program works with a sequence of complex numbers.')\n print_list(listComplex)\n commands = { \"PRINT\" : print_list, \"HELP\" : help, \"ADD\" : add_elements_ui, \"SEQUENCE_REAL\" : sequence_real, \"SEQUENCE_MODULUS\" : sequence_modulus }\n while True:\n command_introduction()\n com = input('Enter command >> ')\n com = process_com(com)\n if com in commands:\n commands[com](listComplex)\n elif( com == 'EXIT' ):\n exit()\n return\n else :\n print('Illegal command. Please try again !')\n \nrun_tests()\nmain()\n","sub_path":"1st semester/Fundamentals-of-programming/Assignment 2/Complex_Number.py","file_name":"Complex_Number.py","file_ext":"py","file_size_in_byte":18273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"42764411","text":"from __future__ import print_function\nimport sys, os, math\nimport ROOT\nimport numpy as np\nimport pickle\nimport pandas as pd\nimport numpy as np\nimport root_numpy\n\nfrom ROOT import TTree, TFile\n\n\n# HELPER FUNCTIONS\n###########################################################################################\n\nclass BreakLoop(Exception): pass\n\n\ndef GetJetShapes(rootFile, numSamples=-1, offset = 0, recompute = False):\n \"\"\"Returns a data frame containing the Jet shapes\n rootFile can be a pattern (e.g. /mydir/*.root)\n The data frame is saved to a pickled file and reloaded from it (unless recompute is True)\"\"\"\n\n\n import pandas as pd\n import numpy as np\n\n pickleFileName = rootFile[1:rootFile.rfind('.')]+'.pkl'\n pickleFileName=pickleFileName.replace('/','_')\n pickleFileName=pickleFileName.replace('*','ALL')\n\n try:\n if(recompute):\n raise ValueError('Recompute')\n # if the file cannot be opened, this raises an exception which leads to the actual recomputation.\n with open(pickleFileName, 'rb') as fileP:\n df = pickle.load(fileP)\n print (\"Loading from pickle file {0}\".format(pickleFileName))\n return df\n except:\n\n # Get raw input data from delphes\n import glob\n chain = ROOT.TChain(\"treeJets\")\n listOfFiles = glob.glob(rootFile)\n for fileIn in listOfFiles:\n chain.Add(fileIn)\n\n # The progress bar does not (yet) work on swan: commenting out.\n # try:\n # # show progress\n # from ipywidgets import FloatProgress\n # from IPython.display import display\n # progressBar = FloatProgress(min=0, max=100)\n # display(progressBar)\n # show_progress = True\n # except:\n # show_progress = False\n\n\n if numSamples < 0:\n numSamples = chain.GetEntries()\n data = pd.DataFrame(np.zeros((numSamples,4)), columns=['mass','ntowers','radial','dispersion'])\n\n\n # Loop over all events\n ijet = 0\n skipped = 0\n try:\n for event in chain:\n\n print('\\r'+' Processing {0} [{1}]'.format(rootFile, float(ijet)/numSamples*100 ), end='')\n sys.stdout.flush()\n\n # if show_progress:\n # # Update progress bar\n # progressBar.value = float(ijet)/numSamples*100\n\n # Do jet selection here\n if skipped < offset:\n skipped += 1\n continue\n\n # Fill data\n data.iloc[ijet] = CalculateJetShapes(event)\n\n ijet += 1\n if ijet >= numSamples:\n raise BreakLoop\n except BreakLoop:\n pass\n\n\n print('\\n')\n sys.stdout.flush()\n\n\n if ijet < numSamples:\n print('Only {:d} samples loaded (requested = {:d}). Not enough samples?'.format(ijet, numSamples))\n\n with open(pickleFileName, 'wb') as fileP:\n pickle.dump(data, fileP)\n\n return data\n\n\n###########################################################################################\n\ndef GetJetShapesFast(rootFileDir, numSamples=-1, offset = 0, recompute = False):\n \"\"\"Returns a data frame containing the Jet shapes\n rootFileDir should be a folder containing root files\n The jet shapes are saved to a root file and reloaded from it (unless recompute is True).\n This fast version uses a root macro to compute the shapes (iteration happens in C)\"\"\"\n\n\n #rootFileNameShapes = rootFileDir[1:rootFileDir.rfind('.')]+'_shapes.root'\n rootFileNameShapes = rootFileDir+'_shapes.root'\n rootFileNameShapes=rootFileNameShapes.replace('/','_')\n rootFileNameShapes=rootFileNameShapes.replace('*','ALL')\n\n #print(rootFileNameShapes)\n\n try:\n if(recompute):\n raise ValueError('Recompute')\n # if the file cannot be opened, this raises an exception which leads to the actual recomputation.\n data = GetShapesFromROOTFile(rootFileNameShapes)\n print (\"Loading from root file {0}\".format(rootFileNameShapes))\n except:\n\n # Compute shapes with external macro\n ROOT.gROOT.LoadMacro(\"CreateJetShapes.C\")\n ROOT.CreateJetShapes(rootFileDir,rootFileNameShapes,numSamples)\n\n data = GetShapesFromROOTFile(rootFileNameShapes)\n\n if len(data) < numSamples:\n print('Only {:d} samples loaded (requested = {:d}). Not enough samples?'.format(len(data), numSamples))\n\n return data\n\n\n\n\n\n###########################################################################################\ndef CalculateJetShapes(entry):\n \"\"\"Calculate jet shapes and add them to the jet instance\"\"\"\n leadingHadronPt = -999.\n subleadingHadronPt = -999.\n jetDispersionSum = 0\n jetDispersionSquareSum = 0\n numConst = 0\n\n ShapeRadial = 0.\n\n\n ntracks = entry.ntracks\n for itrack in range(0,ntracks):\n if abs(entry.trackEta[itrack]) > 20.: #FIXME: Do we need this?\n continue\n\n # Get leading hadron pt\n if entry.trackPt[itrack] > leadingHadronPt:\n subleadingHadronPt = leadingHadronPt\n leadingHadronPt = entry.trackPt[itrack]\n elif entry.trackPt[itrack] > subleadingHadronPt:\n subleadingHadronPt = entry.trackPt[itrack]\n\n deltaPhi = min(abs(entry.jetPhi-entry.trackPhi[itrack]), 2*math.pi- abs(entry.jetPhi-entry.trackPhi[itrack]))\n deltaEta = entry.jetEta-entry.trackEta[itrack]\n deltaR = math.sqrt(deltaPhi*deltaPhi + deltaEta*deltaEta)\n\n # Calculate properties important for shape calculation\n jetDispersionSum += entry.trackPt[itrack]\n jetDispersionSquareSum += entry.trackPt[itrack]*entry.trackPt[itrack]\n ShapeRadial += entry.trackPt[itrack]/entry.jetPt * deltaR\n\n numConst += 1\n\n # Calculate the shapes\n if numConst > 1:\n ShapeLeSub = leadingHadronPt - subleadingHadronPt\n else:\n ShapeLeSub = 1.\n\n if jetDispersionSum:\n ShapeDispersion = math.sqrt(jetDispersionSquareSum)/jetDispersionSum\n else:\n ShapeDispersion = 0.\n\n\n return [entry.jetMass, entry.ntowers, ShapeRadial, ShapeDispersion]\n\n\ndef GetShapesFromROOTFile(fileName):\n \"\"\"Opens a root file containing a treeShapes tree and loads it into a numpy array\"\"\"\n\n f = TFile(fileName)\n t = f.Get(\"treeShapes\")\n array = root_numpy.tree2array(t)\n return pd.DataFrame(array)\n\n","sub_path":"flat_helper.py","file_name":"flat_helper.py","file_ext":"py","file_size_in_byte":6494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"559474784","text":"from sleeper_wrapper import Players\n\ndef test_get_trending_players(capsys):\n\tplayers = Players()\n\tadded = players.get_trending_players(\"nfl\",\"add\", 1, 4)\n\n\tdropped = players.get_trending_players(\"nfl\",\"drop\")\n\n\t# with capsys.disabled():\n\t# \tprint(added)\n\t# \tprint(dropped)\n\ndef test_get_top_players():\n\tplayers = Players()\n\ttop_list = players.get_top_players_by_position('2020', 'QB', 'pts_half_ppr', 12)\n\tassert isinstance(top_list, dict)\n\tassert len(top_list) == 12\n","sub_path":"tests/test_players.py","file_name":"test_players.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"556627080","text":"import re, string, unicodedata\nimport nltk\n#import inflect\nfrom bs4 import BeautifulSoup\nfrom nltk import word_tokenize, sent_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import LancasterStemmer, WordNetLemmatizer\nimport speech_recognition as sr\nfrom pydub import AudioSegment\nfrom os import path\nimport xlrd\nimport warnings \nwarnings.filterwarnings(action = 'ignore') \nimport gensim \nfrom gensim.models import Word2Vec \nfrom fuzzywuzzy import fuzz \nfrom fuzzywuzzy import process \n\n \ndef remove_non_ascii(words):\n \"\"\"Remove non-ASCII characters from list of tokenized words\"\"\"\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words\n\ndef to_lowercase(words):\n \"\"\"Convert all characters to lowercase from list of tokenized words\"\"\"\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words\n\ndef remove_punctuation(words):\n \"\"\"Remove punctuation from list of tokenized words\"\"\"\n new_words = []\n for word in words:\n new_word = re.sub(r'[^\\w\\s]', '', word)\n if new_word != '':\n new_words.append(new_word)\n return new_words\n\ndef replace_numbers(words):\n \"\"\"Replace all interger occurrences in list of tokenized words with textual representation\"\"\"\n p = inflect.engine()\n new_words = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n new_words.append(new_word)\n else:\n new_words.append(word)\n return new_words\n\n \n\ndef remove_stopwords(words):\n \"\"\"Remove stop words from list of tokenized words\"\"\"\n new_words = []\n for word in words:\n # print(word)\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words\n\n \n\ndef stem_words(words):\n \"\"\"Stem words in list of tokenized words\"\"\"\n stemmer = LancasterStemmer()\n stems = []\n for word in words:\n stem = stemmer.stem(word)\n stems.append(stem)\n return stems\n\n \n\ndef lemmatize_verbs(words):\n \"\"\"Lemmatize verbs in list of tokenized words\"\"\"\n lemmatizer = WordNetLemmatizer()\n lemmas = []\n for word in words:\n lemma = lemmatizer.lemmatize(word, pos='v')\n lemmas.append(lemma)\n return lemmas\n\n \n\ndef normalize(words):\n words = remove_non_ascii(words)\n words = to_lowercase(words)\n words = remove_punctuation(words)\n #words = replace_numbers(words)\n words = remove_stopwords(words)\n words = stem_words(words)\n words = lemmatize_verbs(words)\n\n return words\n\n\n\n# src = \"C:/Users/raj_shah/Desktop/Hackathon/checkup.mp3\"\n# dest=\"converted.wav\"\n# sound=AudioSegment.from_mp3(src)\n# sound.export(dest,format=\"wav\")\n\nclass detectFraud:\n def __init__(self,sound):\n self.sound=sound\n\n def returnStatus(self):\n r = sr.Recognizer()\n with sr.AudioFile(self.sound) as source: \n r.adjust_for_ambient_noise(source)\n audio = r.record(source) \n\n try:\n words=r.recognize_google(audio)\n words = nltk.word_tokenize(words)\n # words = normalize(words)\n # print(words)\n l=tuple(words)\n l=list(words)\n words = ' '.join(map(str, words))\n # print(words)\n file1 = open(\"audio.txt\",\"w\")\n file1.write(words)\n file1.close()\n loc=\"words.xlsx\"\n dangerousWordFile = xlrd.open_workbook(loc)\n\n # l=[]\n # file1 = open(\"audio.txt\", \"r\") \n # s = file1.read() \n # # Replaces escape character with space \n # f = s.replace(\"\\n\", \" \") \n # data = [] \n # # iterate through each sentence in the file \n # for i in sent_tokenize(f): \n # temp = [] \n # # tokenize the sentence into words \n # for j in word_tokenize(i): \n # temp.append(j.lower()) \n # data.append(temp)\n # l=words\n # print(words)\n\n sheet = dangerousWordFile.sheet_by_index(0)\n rows=sheet.nrows\n columns=sheet.ncols\n\n list_for_model=[]\n\n for i in range(sheet.nrows):\n for j in range(sheet.ncols):\n list_for_model.append(sheet.cell_value(i,j))\n\n # print(list_for_model)\n\n list_for_check=tuple(list_for_model)\n list_for_check=list(list_for_model)\n for i in l:\n list_for_model.append(i)\n model1 = gensim.models.Word2Vec([list_for_model], min_count = 1, size = 10000, window = 5) \n\n # print(model1)\n\n #print(list_for_check)\n #print(list_for_model)\n\n # print(l)\n\n count = 0\n #print(l)\n\n for k in l:\n if(k not in list_for_check):\n # print(k)\n fuzzylist=process.extract(k, list_for_check)\n for tuples in fuzzylist:\n if(tuples[1]>85):\n cosine_similarity=model1.similarity(k,tuples[0])\n cosine_similarity*=100\n if(cosine_similarity>75):\n sheet.write(rows-1,ncols,tuples[0])\n print(\"Added to excel..\")\n\n else:\n #print(k,\"present\")\n count=count+1\n #print(count)\n \n if(count>=3):\n return True\n else:\n return False\n\n except Exception as e: \n print(e)\n\n\n","sub_path":"wordAnalyzer.py","file_name":"wordAnalyzer.py","file_ext":"py","file_size_in_byte":6036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"548704564","text":"## # ################################################################## #\n## # #\n## # IBM Confidential #\n## # OCO Source Materials #\n## # #\n## # (C) Copyright IBM Corp. 2018, 2018 #\n## # #\n## # The source code for this program is not published or otherwise #\n## # divested of its trade secrets, irrespective of what has been #\n## # deposited with the U.S. Copyright Office. #\n## # #\n## # ################################################################## #\nimport random\nfrom typing import List, Dict, Any\n\nimport numpy as np\nfrom scipy import stats\n\nfrom randomization_without_garbage.randomization_suggestion import RandomizationSuggestion\nfrom randomization_without_garbage.randomization_suggestion_pool import RandomizationSuggestionPool\n\n\nclass SuggestionSamplerV3(object):\n def __init__(self,\n min_suggestion_count: int,\n max_suggestion_count: int,\n disambiguation_percentage_parameter: float,\n disambiguation_length_parameter: float,\n first_suggestion_boost_parameter: float,\n first_suggestion_top_position_boost_parameter: float,\n disambiguation_percentage_parameter_sample_size: float,\n min_suggestion_score: float):\n\n super().__init__()\n self._min_suggestion_count = min_suggestion_count\n self._max_suggestion_count = max_suggestion_count\n self._disambiguation_percentage_parameter = disambiguation_percentage_parameter\n self._disambiguation_length_parameter = disambiguation_length_parameter\n self._first_suggestion_boost_parameter = first_suggestion_boost_parameter\n self._first_suggestion_top_position_boost_parameter = first_suggestion_top_position_boost_parameter\n\n # Do not play with these defaults unless you really know what you're doing\n self._disambiguation_percentage_parameter_sample_size = disambiguation_percentage_parameter_sample_size\n self._min_suggestion_score = min_suggestion_score\n\n def sample(self, suggestion_pool: RandomizationSuggestionPool) -> List[RandomizationSuggestion]:\n suggestion_pool_clone = self._get_preprocessed_pool(suggestion_pool)\n if suggestion_pool_clone.is_empty:\n sample = []\n else:\n disambiguate = self._sample_disambiguate(suggestion_pool_clone)\n if not disambiguate:\n sample = [suggestion_pool_clone.top_suggestion]\n else:\n disambiguation_length = self._sample_disambiguation_length(suggestion_pool_clone)\n sample = self._sample_n(suggestion_pool_clone, disambiguation_length)\n\n original_sample = suggestion_pool.find_sample(sample)\n self._boost_first_suggestion_to_the_top(original_sample, suggestion_pool.top_suggestion)\n return original_sample\n\n def calculate_propensities(self, suggestion_pool: RandomizationSuggestionPool) -> List[float]:\n suggestion_pool_clone = self._get_preprocessed_pool(suggestion_pool)\n propensities = []\n if suggestion_pool_clone.is_empty:\n pass\n else:\n pdf_disambiguation = self._pdf_disambiguate(suggestion_pool_clone)\n pdf_disambiguation_length = self._pdf_disambiguation_length(suggestion_pool_clone)\n\n for suggestion in suggestion_pool_clone.suggestions:\n suggestion_propensity = 0.\n for disambiguation_length in list(pdf_disambiguation_length.keys()):\n suggestion_propensity += pdf_disambiguation_length[disambiguation_length] * \\\n self._propensity_n(suggestion, suggestion_pool_clone,\n disambiguation_length)\n\n suggestion_propensity *= pdf_disambiguation[True]\n if suggestion_pool_clone.is_top_suggestion(suggestion):\n suggestion_propensity += pdf_disambiguation[False]\n\n propensities.append(suggestion_propensity)\n\n original_suggestion_order = suggestion_pool.get_original_order(suggestion_pool_clone)\n return [propensities[i] for i in original_suggestion_order]\n\n def _boost_first_suggestion_to_the_top(self, sample: List[RandomizationSuggestion],\n top_suggestion: RandomizationSuggestion):\n if top_suggestion in sample:\n r = random.random()\n if r < self._first_suggestion_top_position_boost_parameter:\n sample.insert(0, sample.pop(sample.index(top_suggestion)))\n\n def _sample_n(self, suggestion_pool: RandomizationSuggestionPool, n: int) -> List[RandomizationSuggestion]:\n single_draw_probas = self._get_skewed_probabilities(suggestion_pool)\n sample_indices = []\n for i in range(n):\n sampled_index = self._sample_key(single_draw_probas)\n sample_indices.append(sampled_index)\n single_draw_probas.pop(sampled_index)\n\n sampled_suggestions = [suggestion_pool.suggestions[index] for index in sample_indices]\n return sampled_suggestions\n\n def _propensity_n(self, suggestion: RandomizationSuggestion, suggestion_pool: RandomizationSuggestionPool,\n n: int) -> float:\n single_draw_probas = self._get_skewed_probabilities(suggestion_pool)\n for suggestion_index, orig_suggestion in enumerate(suggestion_pool.suggestions):\n if suggestion == orig_suggestion:\n break\n\n return 1. - self._proba_index_not_in_sample_of_size_n(suggestion_index, single_draw_probas, n)\n\n def _proba_index_not_in_sample_of_size_n(self, index: int, single_draw_probas: Dict[int, float], n: int) -> float:\n p_index = single_draw_probas[index]\n if p_index == 0:\n return 1.\n\n if n == 1:\n return 1. - p_index\n\n proba_index_not_in_sample = 0\n for i, p in single_draw_probas.items():\n if i == index:\n continue\n\n new_single_draw_probas = dict(single_draw_probas)\n new_single_draw_probas.pop(i)\n new_single_draw_probas = {k: v / (1 - p) for k, v in new_single_draw_probas.items()}\n proba_index_not_in_sample += p * self._proba_index_not_in_sample_of_size_n(index,\n new_single_draw_probas,\n n - 1)\n return proba_index_not_in_sample\n\n def _proba_item_not_in_sample_of_size_n(self, item_proba: float, other_single_draw_probas: List[float], n: int):\n if item_proba == 0:\n return 1.\n\n if n == 1:\n return 1. - item_proba\n\n proba_item_not_in_sample = 0.\n for i in range(len(other_single_draw_probas)):\n selected_proba = other_single_draw_probas[i]\n new_item_proba = item_proba / (1 - selected_proba)\n new_other_single_draw_probas = [p / (1 - selected_proba) for p_index, p in\n enumerate(other_single_draw_probas) if\n p_index != i]\n proba_item_not_in_sample += selected_proba * self._proba_item_not_in_sample_of_size_n(new_item_proba,\n new_other_single_draw_probas,\n n - 1)\n return proba_item_not_in_sample\n\n def _get_skewed_probabilities(self, suggestion_pool: RandomizationSuggestionPool) -> Dict[int, float]:\n scores = np.array(\n [suggestion.score if suggestion.is_visible else 0.0 for suggestion in suggestion_pool.suggestions])\n normalized_scores = scores / max(scores)\n skewed_scores = normalized_scores ** self._first_suggestion_boost_parameter\n skewed_probabilities = skewed_scores / sum(skewed_scores)\n return {i: proba for i, proba in enumerate(skewed_probabilities)}\n\n def _get_preprocessed_pool(self, suggestion_pool: RandomizationSuggestionPool) -> RandomizationSuggestionPool:\n cloned_pool = suggestion_pool.clone()\n cloned_pool._suggestions.sort(key=lambda sug: -sug.score)\n new_scores = [max(self._min_suggestion_score, suggestion.score) for suggestion in cloned_pool.suggestions]\n cloned_pool.set_scores(new_scores)\n return cloned_pool\n\n def _pdf_disambiguation_length(self, suggestion_pool: RandomizationSuggestionPool) -> Dict[int, float]:\n \"\"\" Probability distribution function of sample length \"\"\"\n pdf = {}\n min_length = self._min_suggestion_count\n max_length = min(self._max_suggestion_count, suggestion_pool.visible_count)\n if max_length < min_length:\n return {}\n probabilities = np.array(suggestion_pool.normalized_visible_suggestion_scores)[min_length:max_length]\n probabilities = np.minimum(1, probabilities * self._disambiguation_length_parameter)\n\n for n in range(0, max_length - min_length + 1):\n mass = self._probability_choose_n(probabilities, n)\n pdf[n + min_length] = mass\n\n return pdf\n\n def _probability_choose_n(self, probabilities_choose_one: List[float], n: int) -> float:\n m = len(probabilities_choose_one)\n proba_exactly_n_selected_from_probas_up_to_m = [[0 for j in range(n + 2)] for i in range(m + 1)]\n proba_exactly_n_selected_from_probas_up_to_m[0][1] = 1.\n\n for i in range(1, m + 1):\n for j in range(1, n + 2):\n p = probabilities_choose_one[i - 1]\n left_top = proba_exactly_n_selected_from_probas_up_to_m[i - 1][j - 1]\n top = proba_exactly_n_selected_from_probas_up_to_m[i - 1][j]\n proba_exactly_n_selected_from_probas_up_to_m[i][j] = p * left_top + (1 - p) * top\n\n return proba_exactly_n_selected_from_probas_up_to_m[m][n + 1]\n\n def _pdf_disambiguate(self, suggestion_pool: RandomizationSuggestionPool) -> Dict[bool, float]:\n if suggestion_pool.visible_count < self._min_suggestion_count:\n probability_disambiguation_occurs = 0.\n else:\n c1 = suggestion_pool.top_suggestion.score\n c2 = suggestion_pool.top_visible_suggestion_after_the_top_suggestion.score\n certainty_ratio = c2 / c1\n alpha = self._disambiguation_percentage_parameter * self._disambiguation_percentage_parameter_sample_size\n beta = (1 - self._disambiguation_percentage_parameter) * \\\n self._disambiguation_percentage_parameter_sample_size\n probability_disambiguation_occurs = stats.beta.cdf(certainty_ratio, alpha, beta)\n\n return {True: probability_disambiguation_occurs, False: 1 - probability_disambiguation_occurs}\n\n def _sample_disambiguation_length(self, suggestion_pool: RandomizationSuggestionPool) -> int:\n pdf = self._pdf_disambiguation_length(suggestion_pool)\n return self._sample_key(pdf)\n\n def _sample_disambiguate(self, suggestion_pool: RandomizationSuggestionPool) -> bool:\n pdf = self._pdf_disambiguate(suggestion_pool)\n return self._sample_key(pdf)\n\n def _sample_key(self, pdf: Dict[Any, float]) -> Any:\n keys = []\n values = []\n for k, v in pdf.items():\n keys.append(k)\n values.append(v)\n\n probas = list(values)\n probas = [p / sum(probas) for p in probas]\n r = random.random()\n\n for key_index in range(0, len(keys)):\n if sum(probas[:(key_index + 1)]) >= r:\n break\n\n return keys[key_index]\n","sub_path":"randomization_without_garbage/suggestion_sampler_v3.py","file_name":"suggestion_sampler_v3.py","file_ext":"py","file_size_in_byte":12216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"107663846","text":"from django.db import reset_queries\nfrom django.shortcuts import redirect, render,get_object_or_404\nfrom django.http import HttpResponse\nfrom django.contrib.auth.forms import UserCreationForm\n# Create your views here.\nfrom .forms import AdminSigupForm,BookForm\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom .models import *\ndef home(request):\n return render(request, 'home.html')\n\ndef main(request):\n context = {}\n context['segment'] = 'index'\n b=Book.objects.all()\n context['books']= b\n n=Book.objects.filter(status=False).count()\n context['issuedbook']=n\n n2=Book.objects.all().count()\n context['total']=n2\n context['available']=abs(n2-n)\n user=User.objects.all().count()\n context['totaluser']=user\n\n return render(request, 'index.html', context)\n\ndef register(request):\n form=AdminSigupForm()\n if request.method== 'POST':\n form=AdminSigupForm(request.POST)\n if form.is_valid():\n form.save()\n user = form.cleaned_data.get('username')\n messages.success(request, 'Account was created for ' + user)\n return redirect(\"login\")\n context={'form':form}\n return render(request, 'register.html',context)\n\ndef loginpage(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password =request.POST.get('password')\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('main')\n else:\n messages.info(request, 'Username OR password is incorrect')\n\n return render(request, 'login.html')\n\n@login_required(login_url=\"login\")\ndef logoutUser(request):\n\tlogout(request)\n\treturn redirect('login')\n\n@login_required(login_url=\"login\")\ndef bookdetails(request):\n b=Book.objects.all()\n context= {\"books\":b}\n return render(request, 'booklist.html', context) \n\n@login_required(login_url=\"login\")\ndef addbook_view(request):\n #now it is empty book form for sending to html\n form=BookForm()\n if request.method=='POST':\n #now this form have data from html\n form=BookForm(request.POST)\n if form.is_valid():\n user=form.save()\n return redirect(main)\n return render(request,'addbook.html',{'form':form})\n\n\n\n@login_required(login_url=\"login\")\ndef deletebook(request, id=None):\n instance = get_object_or_404(Book, id=id)\n instance.delete()\n return redirect('main')\n\n\n@login_required(login_url=\"login\")\ndef updatestatus(request, id=None):\n instance = get_object_or_404(Book, id=id)\n if instance.status:\n instance.status=False\n instance.save()\n return redirect('main')\n else:\n instance.status=True\n instance.save()\n return redirect('main')\n ","sub_path":"libraryApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"340308567","text":"import json\r\n\r\ncount= {}\r\ntrees = open('parse_train.dat').readlines()\r\n\r\n\r\ndef get_count(tree):\r\n if len(tree)==2:\r\n if tree[1] not in count:\r\n count[tree[1]]=1;\r\n else:\r\n count[tree[1]]+=1;\r\n else:\r\n get_count(tree[1])\r\n get_count(tree[2])\r\n\r\n\r\ndef process(tree):\r\n if len(tree) == 2 and count[tree[1]]<5:\r\n tree[1] = '_RARE_'\r\n elif len(tree) == 3:\r\n process(tree[1])\r\n process(tree[2])\r\n \r\n\r\n\r\nout=open(\"parse_train.dat.new\",'w')\r\n\r\nfor treestr in trees:\r\n tree = json.loads(treestr)\r\n get_count(tree)\r\n\r\n\r\nfor treestr in trees:\r\n tree = json.loads(treestr)\r\n process(tree)\r\n out.write(json.dumps(tree)+'\\n')\r\n \r\n\r\n\r\n \r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"PA2/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"131908307","text":"import pytest\nimport numpy as np\nimport proxmin\n\n\nimport scarlet\n\n\nclass TestProx(object):\n def test_prox_monotonic(self):\n X = np.arange(25).reshape(5, 5).astype(np.float64)\n # First test the nearest neighbor implementation\n prox = scarlet.operator.prox_strict_monotonic(X.shape, use_nearest=True, thresh=0)\n didx = [12, 17, 13, 7, 11, 18, 16, 6, 8, 14, 10, 22, 2, 9, 23, 5, 15, 3, 1, 19, 21, 0, 4, 20, 24]\n assert prox.func == scarlet.operator._prox_strict_monotonic\n assert prox.keywords[\"thresh\"] == 0\n assert prox.keywords['ref_idx'] == [6, 7, 7, 7, 8, 11, 12, 12, 12, 13, 11, 12, 12,\n 12, 13, 11, 12, 12, 12, 13, 16, 17, 17, 17, 18]\n np.testing.assert_array_equal(prox.keywords[\"dist_idx\"], didx)\n _X = X.copy()\n prox(_X, 0.0)\n nearest_X = [[0.0, 1.0, 2.0, 3.0, 4.0],\n [5.0, 6.0, 7.0, 8.0, 9.0],\n [10.0, 11.0, 12.0, 12.0, 12.0],\n [11.0, 12.0, 12.0, 12.0, 12.0],\n [12.0, 12.0, 12.0, 12.0, 12.0]]\n np.testing.assert_array_equal(_X, nearest_X)\n\n # Test that use_nearest=True and thresh !=0 are incompatible\n with pytest.raises(ValueError):\n prox = scarlet.operator.prox_strict_monotonic(X.shape, use_nearest=True, thresh=.25)\n\n # Now test weighted monotonicity\n prox = scarlet.operator.prox_strict_monotonic(X.shape, use_nearest=False, thresh=0)\n assert prox.func == scarlet.operator._prox_weighted_monotonic\n assert prox.keywords[\"thresh\"] == 0\n np.testing.assert_array_equal(prox.keywords[\"didx\"], didx[1:])\n np.testing.assert_array_equal(prox.keywords[\"offsets\"], [-6, -5, -4, -1, 1, 4, 5, 6])\n _X = X.copy()\n prox(_X, 0.0)\n weighted_X = [[0., 1., 2., 3., 4.],\n [5., 6., 7., 8., 9.],\n [9.74264069, 11., 12., 12., 10.82842712],\n [11.0306277, 11.70710678, 12., 12., 11.77123617],\n [11.55634919, 11.86886724, 11.91421356, 11.98324916, 11.92809042]]\n np.testing.assert_almost_equal(_X, weighted_X)\n # Use a threshold to force a gradient of 75% or steeper\n prox = scarlet.operator.prox_strict_monotonic(X.shape, use_nearest=False, thresh=.25)\n threshold_X = [[0.000000000, 1.000000000, 2.000000000, 3.000000000, 4.000000000],\n [5.000000000, 6.000000000, 7.000000000, 7.242640687, 5.806841831],\n [5.801461031, 9.000000000, 12.000000000, 9.000000000, 6.074431804],\n [5.895545844, 7.681980515, 9.000000000, 7.681980515, 5.935521488],\n [4.988519641, 5.949655012, 6.170941546, 5.949655012, 4.997301087]]\n _X = X.copy()\n prox(_X, 0.0)\n np.testing.assert_almost_equal(_X, threshold_X)\n\n # Skipped test for unused prox_cone\n def test_prox_center_on(self):\n X = np.zeros((5, 5))\n _X = X.copy()\n scarlet.operator.prox_center_on(_X, 0)\n result = X.copy()\n result[2, 2] = 1e-10\n np.testing.assert_array_equal(_X, result)\n\n _X = X.copy()\n scarlet.operator.prox_center_on(_X, 0, tiny=.1)\n result = X.copy()\n result[2, 2] = .1\n np.testing.assert_array_equal(_X, result)\n\n def test_prox_max_unity(self):\n X = np.arange(11, dtype=float)\n _X = X.copy()\n print(scarlet.operator.prox_max_unity(_X, 0))\n result = X/10\n np.testing.assert_array_equal(_X, result)\n\n def test_prox_sed_on(self):\n X = np.zeros((5, 5))\n _X = X.copy()\n scarlet.operator.prox_sed_on(_X, 0)\n result = np.ones_like(X) * 1e-10\n np.testing.assert_array_equal(_X, result)\n\n _X = X.copy()\n scarlet.operator.prox_sed_on(_X, 0, .1)\n result = np.ones_like(X) * .1\n np.testing.assert_array_equal(_X, result)\n\n def test_soft_symmetry(self):\n X = np.arange(25, dtype=float).reshape(5, 5)\n _X = X.copy()\n scarlet.operator.prox_soft_symmetry(_X, 0)\n result = np.ones_like(X) * 12\n np.testing.assert_array_equal(_X, result)\n\n _X = X.copy()\n scarlet.operator.prox_soft_symmetry(_X, 0, 0)\n np.testing.assert_array_equal(_X, X)\n\n _X = X.copy()\n scarlet.operator.prox_soft_symmetry(_X, 0, .5)\n result = [[6.0, 6.5, 7.0, 7.5, 8.0],\n [8.5, 9.0, 9.5, 10.0, 10.5],\n [11.0, 11.5, 12.0, 12.5, 13.0],\n [13.5, 14.0, 14.5, 15.0, 15.5],\n [16.0, 16.5, 17.0, 17.5, 18.0]]\n np.testing.assert_array_equal(_X, result)\n\n def test_kspace_symmetry(self):\n x = np.zeros((21, 21))\n x[8:13, 8:13] = [\n [1, 2, 3, 2, 1],\n [2, 3, 4, 3, 1],\n [3, 4, 5, 1, 1],\n [2, 3, 1, 1, 1],\n [1, 1, 1, 1, 1]\n ]\n _x = scarlet.operator.prox_kspace_symmetry(x, None, (0, 0))\n symmetric = (x[::-1, ::-1] + x) / 2\n np.testing.assert_almost_equal(_x, symmetric)\n\n def test_bulge_disk(self):\n disk_sed = np.arange(5)\n bulge_sed = np.arange(5)[::-1]\n new_sed = scarlet.operator.project_disk_sed(bulge_sed, disk_sed)\n np.testing.assert_array_equal(new_sed, [0, 5, 6, 7, 4])\n\n def test_uncentered(self):\n x = np.arange(35).reshape(5, 7) - 5\n shape = x.shape\n\n # Use a centered positivity function\n truth = x.copy()\n truth[x < 0] = 0\n _x = x.copy()\n scarlet.operator.uncentered_operator(_x, proxmin.operators.prox_plus, (2, 3), step=1)\n np.testing.assert_array_equal(_x, truth)\n\n # lower left positivity\n truth = x.copy()\n region = (slice(0, 3), slice(0, 5))\n truth[region][x[region] < 0] = 0\n _x = x.copy()\n scarlet.operator.uncentered_operator(_x, proxmin.operators.prox_plus, (1, 2), step=1)\n np.testing.assert_array_equal(_x, truth)\n\n # lower right positivity\n truth = x.copy()\n region = (slice(0, 3), slice(-5, shape[1]))\n truth[region][x[region] < 0] = 0\n _x = x.copy()\n scarlet.operator.uncentered_operator(_x, proxmin.operators.prox_plus, (1, shape[1]-3), step=1)\n np.testing.assert_array_equal(_x, truth)\n\n x = np.arange(35).reshape(5, 7)[::-1] - 5\n # upper left positivity\n truth = x.copy()\n region = (slice(-3, shape[0]), slice(0, 5))\n truth[region][x[region] < 0] = 0\n _x = x.copy()\n scarlet.operator.uncentered_operator(_x, proxmin.operators.prox_plus, (shape[0]-2, 2), step=1)\n np.testing.assert_array_equal(_x, truth)\n\n # upper right positivity\n truth = x.copy()\n region = (slice(-3, shape[0]), slice(-5, shape[1]))\n truth[region][x[region] < 0] = 0\n _x = x.copy()\n scarlet.operator.uncentered_operator(_x, proxmin.operators.prox_plus,\n (shape[0]-2, shape[1]-3), step=1)\n np.testing.assert_array_equal(_x, truth)\n\n def test_uncentered_fill(self):\n x = np.arange(35).reshape(5, 7) - 5\n shape = x.shape\n\n # Use a centered positivity function\n truth = np.zeros_like(x)\n truth[x > 0] = x[x > 0]\n _x = x.copy()\n scarlet.operator.uncentered_operator(_x, proxmin.operators.prox_plus, (2, 3), step=1, fill=0)\n np.testing.assert_array_equal(_x, truth)\n\n # lower left positivity\n truth = np.zeros_like(x)\n region = (slice(0, 3), slice(0, 5))\n truth[region][x[region] > 0] = x[region][x[region] > 0]\n _x = x.copy()\n scarlet.operator.uncentered_operator(_x, proxmin.operators.prox_plus, (1, 2), step=1, fill=0)\n np.testing.assert_array_equal(_x, truth)\n\n # lower right positivity\n truth = np.zeros_like(x)\n region = (slice(0, 3), slice(-5, shape[1]))\n truth[region][x[region] > 0] = x[region][x[region] > 0]\n _x = x.copy()\n scarlet.operator.uncentered_operator(_x, proxmin.operators.prox_plus, (1, shape[1]-3), step=1, fill=0)\n np.testing.assert_array_equal(_x, truth)\n\n x = np.arange(35).reshape(5, 7)[::-1] - 5\n # upper left positivity\n truth = np.zeros_like(x)\n region = (slice(-3, shape[0]), slice(0, 5))\n truth[region][x[region] > 0] = x[region][x[region] > 0]\n _x = x.copy()\n scarlet.operator.uncentered_operator(_x, proxmin.operators.prox_plus, (shape[0]-2, 2), step=1, fill=0)\n np.testing.assert_array_equal(_x, truth)\n\n # upper right positivity\n truth = np.zeros_like(x)\n region = (slice(-3, shape[0]), slice(-5, shape[1]))\n truth[region][x[region] > 0] = x[region][x[region] > 0]\n _x = x.copy()\n scarlet.operator.uncentered_operator(_x, proxmin.operators.prox_plus,\n (shape[0]-2, shape[1]-3), step=1, fill=0)\n np.testing.assert_array_equal(_x, truth)\n","sub_path":"tests/test_operator.py","file_name":"test_operator.py","file_ext":"py","file_size_in_byte":9054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"429625640","text":"from .models import Host\nfrom .foremanapi import foreman_get\nimport math\nimport re\nfrom .cron import update_facts,inventory,update_fact\nimport datetime\n\ndef async_update_host(datastructure, delay):\n\tfor result in datastructure:\n\t\ttry:\n\t\t\tlast_report = datetime.datetime.strptime(result['last_report'][:-4],'%Y-%m-%d %H:%M:%S')+datetime.timedelta(hours=8)\n\t\t\thost, created = Host.objects.update_or_create(host_id=result['id'],defaults={'name': result['name'],'hostgroup_id': result['hostgroup_id'],'environment_id': result['environment_id'],'created_at': result['created_at'][:-4],'last_report': last_report,'status': result['global_status_label']})\n\t\t\tif created:\n\t\t\t\tupdate_fact(host.host_id)\n\t\texcept Exception as e:\n\t\t\tcontinue\n\t\t\t# raise e \n # inventory()\n\ndef async_update_report(datastructure, delay):\n\tfor result in datastructure:\n\t\ttry:\n\t\t\treported_at = datetime.datetime.strptime(result['reported_at'][:-4],'%Y-%m-%d %H:%M:%S')+datetime.timedelta(hours=8)\n\t\t\treport,created = Report.objects.get_or_create(r_id=result['id'],defaults ={'host_id': result['host_id'],'reported_at': reported_at,'applied': result['status']['applied'],'restarted': result['status']['restarted'],'failed': result['status']['restarted'],'failed_restarts': result['status']['failed_restarts'],'skipped': result['status']['skipped']})\n\t\texcept Exception as e:\n\t\t\tcontinue","sub_path":"cmdb/update_async.py","file_name":"update_async.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"472158336","text":"#\r\n# Classes to connect, create, read from and write to SQLite databases.\r\n#\r\n# Copyright 2020 Mark McKinnon.\r\n# Contact: mark mckinnon gmail com\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n#\r\nimport os\r\nimport re\r\nimport sqlite3\r\n\r\nclass SQLiteDb(object):\r\n #Class that defines a sqlite3 database file.\r\n\r\n def __init__(self):\r\n \"\"\"Initializes the database file object.\"\"\"\r\n super(SQLiteDb, self).__init__()\r\n self._connection = None\r\n self._cursor = None\r\n self.filename = None\r\n self.read_only = None\r\n self.reserved_word_list_dict = {'ABORT':0, 'ACTION':0, 'ADD':0, 'AFTER':0, 'ALL':0, 'ALTER':0, 'ANALYZE':0, 'AND':0, 'AS':0, 'ASC':0, \\\r\n 'ATTACH':0, 'AUTOINCREMENT':0, 'BEFORE':0, 'BEGIN':0, 'BETWEEN':0, 'BY':0, 'CASCADE':0, 'CASE':0, \\\r\n 'CAST':0, 'CHECK':0, 'COLLATE':0, 'COLUMN':0, 'COMMIT':0, 'CONFLICT':0, 'CONSTRAINT':0, 'CREATE':0, \\\r\n 'CROSS':0, 'CURRENT_DATE':0, 'CURRENT_TIME':0, 'CURRENT_TIMESTAMP':0, 'DATABASE':0, 'DEFAULT':0, \\\r\n 'DEFERRABLE':0, 'DEFERRED':0, 'DELETE':0, 'DESC':0, 'DETACH':0, 'DISTINCT':0, 'DROP':0, 'EACH':0, \\\r\n 'ELSE':0, 'END':0, 'ESCAPE':0, 'EXCEPT':0, 'EXCLUSIVE':0, 'EXISTS':0, 'EXPLAIN':0, 'FAIL':0, 'FOR':0, \\\r\n 'FOREIGN':0, 'FROM':0, 'FULL':0, 'GLOB':0, 'GROUP':0, 'HAVING':0, 'IF':0, 'IGNORE':0, 'IMMEDIATE':0, \\\r\n 'IN':0, 'INDEX':0, 'INDEXED':0, 'INITIALLY':0, 'INNER':0, 'INSERT':0, 'INSTEAD':0, 'INTERSECT':0, 'INTO':0, \\\r\n 'IS':0, 'ISNULL':0, 'JOIN':0, 'KEY':0, 'LEFT':0, 'LIKE':0, 'LIMIT':0, 'MATCH':0, 'NATURAL':0, 'NO':0, \\\r\n 'NOT':0, 'NOTNULL':0, 'NULL':0, 'OF':0, 'OFFSET':0, 'ON':0, 'OR':0, 'ORDER':0, 'OUTER':0, 'PLAN':0, \\\r\n 'PRAGMA':0, 'PRIMARY':0, 'QUERY':0, 'RAISE':0, 'RECURSIVE':0, 'REFERENCES':0, 'REGEXP':0, 'REINDEX':0, \\\r\n 'RELEASE':0, 'RENAME':0, 'REPLACE':0, 'RESTRICT':0, 'RIGHT':0, 'ROLLBACK':0, 'ROW':0, 'SAVEPOINT':0, \\\r\n 'SELECT':0, 'SET':0, 'TABLE':0, 'TEMP':0, 'TEMPORARY':0, 'THEN':0, 'TO':0, 'TRANSACTION':0, 'TRIGGER':0, \\\r\n 'UNION':0, 'UNIQUE':0, 'UPDATE':0, 'USING':0, 'VACUUM':0, 'VALUES':0, 'VIEW':0, 'VIRTUAL':0, 'WHEN':0, \\\r\n 'WHERE':0, 'WITH':0, 'WITHOUT':0}\r\n\r\n\r\n def RemoveDB_File(self, file_name):\r\n #removes the database file if it exists\r\n #\r\n #Args:\r\n # file_name: the name of the file to delete.\r\n\r\n if os.path.isfile(file_name):\r\n os.remove(file_name)\r\n\t\r\n def Check_SQL_Reserved_Word(self, column_name):\r\n #Checks to see of the column name would be a reserved word or starts with a number, if it is then put quotes around it\r\n #\r\n #Args:\r\n # column_name: the column of a table.\r\n\r\n check_key = column_name.upper()\r\n if check_key in self.reserved_word_list_dict or column_name[0].isdigit():\r\n return \"'\" + column_name + \"'\"\r\n else:\r\n return column_name\t\r\n\r\n\r\n def create_question_bind_variables(self, number_of_columns):\r\n #Checks to see of the column name would be a reserved word or starts with a number, if it is then put quotes around it\r\n #\r\n #Args:\r\n # number_of_columns: the number of columns of bind variables.\r\n\t\r\n bind_variables = \" ?\"\r\n for i in range(1, number_of_columns):\r\n bind_variables = bind_variables + \", ?\"\r\n #bind_variables = bind_variables + \")\"\t \r\n return bind_variables\r\n\t\r\n def Close(self):\r\n #Closes the database file.\r\n #\r\n #Raises:\r\n # RuntimeError: if the database is not opened.\r\n \r\n if not self._connection:\r\n raise RuntimeError(u'Cannot close database not opened.')\r\n\r\n # We need to run commit or not all data is stored in the database.\r\n self._connection.commit()\r\n self._connection.close()\r\n\r\n self._connection = None\r\n self._cursor = None\r\n self.filename = None\r\n self.read_only = None\r\n\r\n def CreateTable(self, table_name, column_definitions):\r\n #Creates a table.\r\n #\r\n #Args:\r\n # table_name: the table name.\r\n # column_definitions: list of strings containing column definitions.\r\n\r\n #Raises:\r\n # RuntimeError: if the database is not opened or\r\n # if the database is in read-only mode.\r\n \r\n if not self._connection:\r\n raise RuntimeError(u'Cannot create table database not opened.')\r\n\r\n if self.read_only:\r\n raise RuntimeError(u'Cannot create table database in read-only mode.')\r\n\r\n sql_query = u'CREATE TABLE {0:s} ( {1:s} )'.format(\r\n table_name, column_definitions)\r\n \r\n #print (sql_query)\r\n \r\n self._cursor.execute(sql_query)\r\n\r\n def CreatePermanentTable(self, table_name):\r\n #Creates a table.\r\n #\r\n #Args:\r\n # table_name: the table name.\r\n\r\n #Raises:\r\n # RuntimeError: if the database is not opened or\r\n # if the database is in read-only mode.\r\n\r\n if not self._connection:\r\n raise RuntimeError(u'Cannot create table database not opened.')\r\n\r\n if self.read_only:\r\n raise RuntimeError(u'Cannot create table database in read-only mode.')\r\n\r\n sql_query = 'Create Table '+ table_name + ' as select * from ' + table_name + '_Temp;'\r\n\r\n #print (sql_query)\r\n\t\r\n self._cursor.execute(sql_query)\r\n\r\n def CreateTempTable(self, table_name, column_definitions):\r\n #Creates a table.\r\n #\r\n #Args:\r\n # table_name: the table name.\r\n # column_definitions: list of strings containing column definitions.\r\n\r\n #Raises:\r\n # RuntimeError: if the database is not opened or\r\n # if the database is in read-only mode.\r\n \r\n if not self._connection:\r\n raise RuntimeError(u'Cannot create table database not opened.')\r\n\r\n if self.read_only:\r\n raise RuntimeError(u'Cannot create table database in read-only mode.')\r\n\r\n sql_query = u'CREATE Temp TABLE {0:s} ( {1:s} )'.format(\r\n table_name, column_definitions)\r\n\r\n self._cursor.execute(sql_query)\r\n\r\n def AppendTempToPermanentTable(self, table_name):\r\n #Creates a table.\r\n #\r\n #Args:\r\n # table_name: the table name.\r\n\r\n #Raises:\r\n # RuntimeError: if the database is not opened or\r\n # if the database is in read-only mode.\r\n\r\n if not self._connection:\r\n raise RuntimeError(u'Cannot create table database not opened.')\r\n\r\n if self.read_only:\r\n raise RuntimeError(u'Cannot create table database in read-only mode.')\r\n\r\n sql_query = 'insert into '+ table_name + ' select * from ' + table_name + '_Temp;'\r\n\r\n #print (sql_query)\r\n\t\r\n self._cursor.execute(sql_query)\r\n\r\n def AddColumn(self, table_name, column_definitions):\r\n #Creates a table.\r\n #\r\n #Args:\r\n # table_name: the table name.\r\n # column_definitions: list of strings containing column definitions.\r\n\r\n #Raises:\r\n # RuntimeError: if the database is not opened or\r\n # if the database is in read-only mode.\r\n \r\n if not self._connection:\r\n raise RuntimeError(u'Cannot create table database not opened.')\r\n\r\n if self.read_only:\r\n raise RuntimeError(u'Cannot create table database in read-only mode.')\r\n\r\n sql_query = u'Alter TABLE {0:s} Add {1:s} '.format(\r\n table_name, column_definitions)\r\n\r\n self._cursor.execute(sql_query)\r\n\r\n def DropTable(self, table_name):\r\n #Creates a table.\r\n #\r\n #Args:\r\n # table_name: the table name to drop\r\n\r\n #Raises:\r\n # RuntimeError: if the database is not opened or\r\n # if the database is in read-only mode.\r\n \r\n if not self._connection:\r\n raise RuntimeError(u'Cannot create table database not opened.')\r\n\r\n if self.read_only:\r\n raise RuntimeError(u'Cannot create table database in read-only mode.')\r\n\r\n sql_query = u'Drop TABLE {0:s} '.format(\r\n table_name)\r\n\r\n self._cursor.execute(sql_query)\r\n\r\n def InsertValues(self, table_name, column_definitions, column_bind_values):\r\n #Inserts values into a table.\r\n #\r\n #Args:\r\n # table_name: the table name.\r\n # column_definitions: list of strings containing column.\r\n # column_values: the values to actually inserted\r\n\r\n #Raises:\r\n # RuntimeError: if the database is not opened or\r\n # if the database is in read-only mode.\r\n \r\n if not self._connection:\r\n raise RuntimeError(u'Cannot create table database not opened.')\r\n\r\n if self.read_only:\r\n raise RuntimeError(u'Cannot create table database in read-only mode.')\r\n\r\n sql_query = u'insert into {0:s} ( {1:s} ) values ( {2:s} )'.format(\r\n table_name, column_definitions, column_bind_values)\r\n\r\n self._cursor.execute(sql_query)\r\n\r\n def InsertBindValues(self, table_name, column_definitions, column_bind_values, column_values):\r\n #Inserts values into a table.\r\n #\r\n #Args:\r\n # table_name: the table name.\r\n # column_definitions: list of strings containing column.\r\n # column_values: the values to actually inserted\r\n\r\n #Raises:\r\n # RuntimeError: if the database is not opened or\r\n # if the database is in read-only mode.\r\n \r\n if not self._connection:\r\n raise RuntimeError(u'Cannot create table database not opened.')\r\n\r\n if self.read_only:\r\n raise RuntimeError(u'Cannot create table database in read-only mode.')\r\n\r\n sql_query = u'insert into {0:s} ( {1:s} ) values ( {2:s} )'.format(\r\n table_name, column_definitions, column_bind_values)\r\n\r\n #print (sql_query)\r\n\t\r\n self._cursor.execute(sql_query, column_values)\r\n\r\n def TableExists(self, table_name):\r\n # Checks if the table exists in the database\r\n\r\n # Args:\r\n # table_name: the table name.\r\n\r\n #Raises:\r\n # RuntimeError: if the database is not opened or\r\n # if the database is in read-only mode.\r\n\r\n if not self._connection:\r\n raise RuntimeError(\r\n u'Cannot determine if table exists database not opened.')\r\n\r\n sql_query = u'SELECT name FROM sqlite_master WHERE type = \"table\" AND name = \"{0:s}\"'.format(table_name)\r\n\r\n self._cursor.execute(sql_query)\r\n if self._cursor.fetchone():\r\n has_table = True\r\n else:\r\n has_table = False\r\n return has_table\r\n\t\r\n def SelectOneRow (self, sql_query):\r\n # Checks if the table exists in the database\r\n\r\n # Args:\r\n # sql_query: query you want to execute.\r\n\r\n #Raises:\r\n # RuntimeError: if the database is not opened or\r\n # if the database is in read-only mode.\r\n\r\n if not self._connection:\r\n raise RuntimeError(\r\n u'Cannot determine if table exists database not opened.')\r\n\r\n self._cursor.execute(sql_query)\r\n return self._cursor.fetchone()\r\n\r\n def SelectAllRows (self, sql_query):\r\n # Checks if the table exists in the database\r\n\r\n # Args:\r\n # sql_query: query you want to execute.\r\n\r\n #Raises:\r\n # RuntimeError: if the database is not opened or\r\n # if the database is in read-only mode.\r\n\r\n if not self._connection:\r\n raise RuntimeError(\r\n u'Cannot determine if table exists database not opened.')\r\n\r\n self._cursor.execute(sql_query)\r\n return self._cursor.fetchall()\r\n\r\n def Open(self, filename, read_only=False):\r\n #Opens the database file.\r\n\r\n #Args:\r\n # filename: the filename of the database.\r\n # read_only: optional boolean value to indicate the database should be\r\n # opened in read-only mode. The default is false. Since sqlite3\r\n # does not support a real read-only mode we fake it by only\r\n # permitting SELECT queries.\r\n\r\n #Returns:\r\n # A boolean containing True if successful or False if not.\r\n\r\n #Raises:\r\n # RuntimeError: if the database is already opened.\r\n \r\n if self._connection:\r\n raise RuntimeError(u'Cannot open database already opened.')\r\n\r\n self.filename = filename\r\n self.read_only = read_only\r\n\r\n self._connection = sqlite3.connect(filename)\r\n if not self._connection:\r\n return False\r\n\r\n self._cursor = self._connection.cursor()\r\n if not self._cursor:\r\n return False\r\n\r\n return True\r\n","sub_path":"Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":12918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"396056251","text":"#!/usr/bin/python3\n\n\"\"\"Translate basic Frictionless Table-Schema table definitions to Deriva.\"\"\"\n\nimport os\nimport sys\nimport json\nfrom deriva.core import tag\nfrom deriva.core.ermrest_model import builtin_types, Table, Column, Key, ForeignKey\n\nschema_tag = 'tag:isrd.isi.edu,2019:table-schema-leftovers'\nresource_tag = 'tag:isrd.isi.edu,2019:table-resource'\n\n# translate table-schema definitions into deriva definitions\nschema_name = 'CFDE'\n\ndef make_type(type, format):\n \"\"\"Choose appropriate ERMrest column types...\"\"\"\n if type == \"string\":\n return builtin_types.text\n if type == \"datetime\":\n return builtin_types.timestamptz\n if type == \"date\":\n return builtin_types.date\n if type == \"integer\":\n return builtin_types.int8\n if type == \"number\":\n return builtin_types.float8\n if type == \"list\":\n # assume a list is a list of strings for now...\n return builtin_types[\"text[]\"]\n raise ValueError('no mapping defined yet for type=%s format=%s' % (type, format))\n\ndef make_column(cdef):\n cdef = dict(cdef)\n constraints = cdef.get(\"constraints\", {})\n cdef_name = cdef.pop(\"name\")\n nullok = not constraints.pop(\"required\", False)\n description = cdef.pop(\"description\", None)\n return Column.define(\n cdef_name,\n make_type(\n cdef.get(\"type\", \"string\"),\n cdef.get(\"format\", \"default\"),\n ),\n nullok=nullok,\n comment=description,\n annotations={\n schema_tag: cdef,\n }\n )\n\ndef make_key(tname, cols):\n return Key.define(\n cols,\n constraint_names=[ [schema_name, \"%s_%s_key\" % (tname, \"_\".join(cols))] ],\n )\n\ndef make_fkey(tname, fkdef):\n fkcols = fkdef.pop(\"fields\")\n fkcols = [fkcols] if isinstance(fkcols, str) else fkcols\n reference = fkdef.pop(\"reference\")\n pktable = reference.pop(\"resource\")\n pktable = tname if pktable == \"\" else pktable\n to_name = reference.pop(\"title\", None)\n pkcols = reference.pop(\"fields\")\n pkcols = [pkcols] if isinstance(pkcols, str) else pkcols\n annotations = {\n schema_tag: fkdef,\n }\n if to_name is not None:\n annotations[tag.foreign_key] = {\"to_name\": to_name}\n return ForeignKey.define(\n fkcols,\n schema_name,\n pktable,\n pkcols,\n constraint_names=[ [schema_name, \"%s_%s_fkey\" % (tname, \"_\".join(fkcols))] ],\n annotations=annotations\n )\n\ndef make_table(tdef):\n provide_system = not (os.getenv('SKIP_SYSTEM_COLUMNS', 'false').lower() == 'true')\n tname = tdef[\"name\"]\n if provide_system:\n system_columns = Table.system_column_defs()\n system_keys = Table.system_key_defs()\n # customize the system column templates...\n for col in system_columns:\n cname = col['name']\n col['comment'] = {\n 'RID': 'Immutable record identifier (system-generated).',\n 'RCT': 'Record creation time (system-generated).',\n 'RMT': 'Record last-modification time (system-generated).',\n 'RCB': 'Record created by (system-generated).',\n 'RMB': 'Record last-modified by (system-generated).',\n }[cname]\n display_names = {\n 'RCT': 'Creation Time',\n 'RMT': 'Modification Time',\n 'RCB': 'Created By',\n 'RMB': 'Modified By',\n }\n if cname != 'RID':\n col['annotations'] = {tag.display: {\"name\": display_names[cname]}}\n system_fkeys = [\n ForeignKey.define(\n [cname], 'public', 'ERMrest_Client', ['ID'],\n constraint_names=[['CFDE', '%s_%s_fkey' % (tname, cname)]]\n )\n for cname in ['RCB', 'RMB']\n ]\n else:\n system_columns = []\n system_keys = []\n system_fkeys = []\n tcomment = tdef.get(\"description\")\n tdef_resource = tdef\n tdef = tdef_resource.pop(\"schema\")\n keys = []\n keysets = set()\n pk = tdef.pop(\"primaryKey\", None)\n if isinstance(pk, str):\n pk = [pk]\n if isinstance(pk, list):\n keys.append(make_key(tname, pk))\n keysets.add(frozenset(pk))\n tdef_fields = tdef.pop(\"fields\", None)\n for cdef in tdef_fields:\n if cdef.get(\"constraints\", {}).pop(\"unique\", False):\n kcols = [cdef[\"name\"]]\n if frozenset(kcols) not in keysets:\n keys.append(make_key(tname, kcols))\n keysets.add(frozenset(kcols))\n tdef_fkeys = tdef.pop(\"foreignKeys\", [])\n title = tdef_resource.get(\"title\", None)\n annotations = {\n resource_tag: tdef_resource,\n schema_tag: tdef,\n }\n if title is not None:\n annotations[tag.display] = {\"name\": title}\n return Table.define(\n tname,\n column_defs=system_columns + [\n make_column(cdef)\n for cdef in tdef_fields\n ],\n key_defs=system_keys + keys,\n fkey_defs=system_fkeys + [\n make_fkey(tname, fkdef)\n for fkdef in tdef_fkeys\n ],\n comment=tcomment,\n provide_system=False,\n annotations=annotations,\n )\n\ndef make_model(tableschema):\n resources = tableschema['resources']\n return {\n \"schemas\": {\n schema_name: {\n \"schema_name\": schema_name,\n \"tables\": {\n tdef[\"name\"]: make_table(tdef)\n for tdef in resources\n }\n }\n }\n }\n\ndef main():\n \"\"\"Translate basic Frictionless Table-Schema table definitions to Deriva.\n\n - Reads table-schema JSON on standard input\n - Writes deriva schema JSON on standard output\n\n The output JSON is suitable for POST to an /ermrest/catalog/N/schema\n resource on a fresh, empty catalog.\n\n Example:\n\n python3 -m cfde_deriva.tableschema < table-schema/cfde-core-model.json\n\n Optionally:\n\n run with SKIP_SYSTEM_COLUMNS=true to suppress generation of ERMrest\n system columns RID,RCT,RCB,RMT,RMB for each table.\n\n\"\"\"\n json.dump(make_model(json.load(sys.stdin)), sys.stdout, indent=2)\n return 0\n\nif __name__ == '__main__':\n exit(main())\n","sub_path":"cfde_deriva/tableschema.py","file_name":"tableschema.py","file_ext":"py","file_size_in_byte":6208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"563720693","text":"'''\ndata module\n'''\n\nimport user_manipulation as um\nimport controller\n\n\nclass DataManipulation():\n '''\n data manipulation\n '''\n\n def __init__(self):\n\n self.request_state = (\"register\", \"bind\", \"requestCIP\", \"close\")\n self.users = um.UserManipulation()\n\n def data_handler(self, addr, data):\n '''\n data handler\n '''\n\n # register\n if data[0] == self.request_state[0]:\n if self.users.check_permission(data[1], str(addr)):\n return \"ok \" + addr[0] + \" \" + str(addr[1])\n else:\n return \"fail\"\n\n # bind\n elif data[0] == self.request_state[1]:\n return self.users.bind_user(data[1], data[2], data[3], data[4])\n\n #requestCIP\n elif data[0] == self.request_state[2]:\n ctl = controller.Controller()\n return ctl.get_controller_ip(data[1])\n\n # close\n elif data[0] == self.request_state[3]:\n self.users.delete_user(data[1])\n return \"ok\"\n","sub_path":"ver_ctl/check_upper_layer_connection/singalServer/data_manipulation.py","file_name":"data_manipulation.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"467005352","text":"num = int(input())\nsecond = str(num % 10)\nfirst = str(num // 10)\nend = \"\"\nif num == 0:\n end = \"zero\"\nif num == 100:\n end = \"one hundred\"\nelif second == \"1\":\n second = \"one\"\nelif second == \"2\":\n second = \"two\"\nelif second == \"3\":\n second = \"three\"\nelif second == \"4\":\n second = \"four\"\nelif second == \"5\":\n second = \"five\"\nelif second == \"6\":\n second = \"six\"\nelif second == \"7\":\n second = \"seven\"\nelif second == \"8\":\n second = \"eight\"\nelif second == \"9\":\n second = \"nine\"\nif 0 < num < 10:\n end = second\nelif 13 <= num < 20:\n end = second + \"teen\"\nif 10 <= num < 20:\n if num == 10:\n end = \"ten\"\n elif num == 11:\n end = \"eleven\"\n elif num == 12:\n end = \"twelve\"\n elif num == 13:\n end = \"thirteen\"\n elif num == 15:\n end = \"fifteen\"\n elif num == 18:\n end = \"eighteen\"\nelif 20 <= num < 100:\n if first == \"2\":\n first = \"twenty\"\n elif first == \"3\":\n first = \"thirty\"\n elif first == \"4\":\n first = \"forty\"\n elif first == \"5\":\n first = \"fifty\"\n elif first == \"6\":\n first = \"sixty\"\n elif first == \"7\":\n first = \"seventy\"\n elif first == \"8\":\n first = \"eighty\"\n elif first == \"9\":\n first = \"ninety\"\n if second != \"0\":\n end = first + \" \" + second\n else:\n end = first\nif num < 0 or num > 100:\n end = \"invalid number\"\nprint(end)","sub_path":"ProgrammingBasicsWithPython2018/03_simple_conditions/16. Numberse 0...100 to Text.py","file_name":"16. Numberse 0...100 to Text.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"107384040","text":"# -*-coding=utf-8-*-\n\n# @Time : 2018/9/12 10:47\n# @File : async_demo.py\nimport asyncio\nimport random\nimport time\n\nimport aiohttp\n\n\nasync def async_function():\n return 1\n\nasync def async_generator():\n yield 1\n\n\n# print(type(async_function),type(async_generator))\n# print(type(async_function()),type(async_generator()))\ntry:\n ret=async_function().send(None)\n print(ret)\nexcept StopIteration as e:\n print(e)\n\n\nclass Potato(object):\n\n # def __init__(self,name):\n # self.name=name\n #\n # def __repr__(self):\n # return self.name\n\n @classmethod\n def make(cls,num,*args,**kwgs):\n potatos = []\n for i in range(num):\n potatos.append(cls.__new__(cls,*args,**kwgs))\n\n return potatos\n\n# p=Potato('google')\n# print(p)\n\npotatos = Potato.make(10)\nprint(len(potatos))\n# for i in potatos:\n # print(i)\n\n\n\ndef ask_for_more():\n time.sleep(random.random())\n potatos.extend(Potato.make(random.randint(1,10)))\n\ndef take_potatos(num):\n count = 0\n while True:\n if len(potatos)==0:\n ask_for_more()\n # time.sleep(1)\n else:\n p=potatos.pop()\n yield p\n count+=1\n if count == num:\n\n break\n\n\n#没有使用异步\ndef sync_mode():\n start=time.time()\n def buy_potato():\n buckets =[]\n for p in take_potatos(50):\n print(p)\n buckets.append(p)\n\n\n buy_potato()\n print('time used {}'.format(time.time()-start))\n\n\ndef async_mode():\n\n potatos = Potato.make(10)\n async def ask_for_more():\n # time.sleep(random.random())\n await asyncio.sleep(random.random())\n potatos.extend(Potato.make(random.randint(1, 10)))\n\n async def take_potatos_async(num):\n count = 0\n while True:\n if len(potatos) == 0:\n await ask_for_more()\n # time.sleep(1)\n\n p = potatos.pop()\n yield p\n count += 1\n if count == num:\n break\n\n async def buy_potato_async():\n buckets =[]\n async for p in take_potatos_async(50):\n # print(p)\n buckets.append(p)\n\n start = time.time()\n loop = asyncio.get_event_loop()\n\n loop.run_until_complete(buy_potato_async())\n # loop.run_until_complete(asyncio.wait([buy_potato_async(),buy_potato_async()]))\n loop.close()\n print('use time {}'.format(time.time()-start))\n\n\ndef class_async():\n class ThreeTwoOne:\n async def begin(self):\n print(3)\n await asyncio.sleep(1)\n print(2)\n await asyncio.sleep(1)\n print(1)\n await asyncio.sleep(1)\n return\n\n async def game():\n t = ThreeTwoOne()\n await t.begin()\n print('start')\n\n loop=asyncio.get_event_loop()\n loop.run_until_complete(asyncio.wait([game(),game(),game()]))\n loop.close()\n # game()\n\n\n\ndef yield_from_demo():\n def sub_gen():\n yield 1\n yield 2\n yield 3\n yield 4\n\n def gen():\n return (yield from sub_gen())\n\n\n def main_app():\n for val in gen():\n print(val)\n\n main_app()\n\nimport types\n# 使用语法测试\ndef use_keyword():\n\n @types.coroutine\n def compute(x,y):\n print('Compute {} + {} ...'.format(x,y))\n yield from asyncio.sleep(1.0)\n return x+y\n\n async def print_sum(x,y):\n result = await compute(x,y)\n print('{} + {} = {}'.format(x,y,result))\n\n loop =asyncio.get_event_loop()\n loop.run_until_complete(print_sum(9,9))\n loop.close()\n\ndef liaoxuefeng_async():\n async def hello():\n print('hello')\n await asyncio.sleep(1)\n print('hello again')\n\n loop =asyncio.get_event_loop()\n loop.run_until_complete(hello())\n loop.close()\n\nasync def http_demo(url):\n # url = 'https://github.com'\n headers = {'User-Agent':'android 8'}\n async with aiohttp.ClientSession() as session:\n async with session.get(url,headers=headers) as resp:\n print(await resp.text())\n\n\n# async def http_\n\nloop =asyncio.get_event_loop()\nurl_list = ['http://30daydo.com','https://github.com']\nfunc_list = [http_demo(url) for url in url_list]\nloop.run_until_complete(asyncio.wait(func_list))\nloop.close()\n\n# class_async()\n# yield_from_demo()\n# sync_mode()\n# async_mode()\n# use_keyword()\n# liaoxuefeng_async()","sub_path":"async_demo.py","file_name":"async_demo.py","file_ext":"py","file_size_in_byte":4421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"223313507","text":"\"\"\"Object that deals with the sensitivity test.\"\"\"\n\nimport os\nimport json\nimport pickle\nimport itertools\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport c3.utils.display as display\nfrom c3.optimizers.optimizer import Optimizer\nimport matplotlib.pyplot as plt\nfrom c3.utils.utils import log_setup\nfrom c3.libraries.estimators import dv_g_LL_prime, g_LL_prime_combined, g_LL_prime, neg_loglkh_multinom_norm\n\n\nclass SET(Optimizer):\n \"\"\"Object that deals with the sensitivity test.\n\n Parameters\n ----------\n dir_path : str\n Filepath to save results\n fom : callable\n Figure of merit\n sampling : str\n Sampling method from the sampling library\n batch_sizes : list\n Number of points to select from each dataset\n sweep_map : list\n Identifiers to be swept\n state_labels : list\n Identifiers for the qubit subspaces\n algorithm : callable\n From the algorithm library\n options : dict\n Options to be passed to the algorithm\n same_dyn : boolean\n ?\n run_name : str\n User specified name for the run, will be used as root folder\n \"\"\"\n\n def __init__(\n self,\n dir_path,\n fom,\n estimator_list,\n sampling,\n batch_sizes,\n state_labels=None,\n sweep_map=None,\n sweep_bounds=None,\n algorithm=None,\n run_name=None,\n same_dyn=False,\n options={}\n ):\n \"\"\"Initiliase.\"\"\"\n super().__init__(algorithm=algorithm)\n self.fom = fom\n self.estimator_list = estimator_list\n self.sampling = sampling\n self.batch_sizes = batch_sizes\n self.state_labels = state_labels\n self.sweep_map = sweep_map\n self.opt_map = [sweep_map[0]]\n self.sweep_bounds = sweep_bounds\n self.options = options\n self.inverse = False\n self.learn_data = {}\n self.same_dyn = same_dyn\n self.log_setup(dir_path, run_name)\n\n def log_setup(self, dir_path, run_name):\n \"\"\"\n Create the folders to store data.\n\n Parameters\n ----------\n dir_path : str\n Filepath\n run_name : str\n User specified name for the run\n\n \"\"\"\n self.dir_path = os.path.abspath(dir_path)\n if run_name is None:\n run_name = \"sensitivity\" \\\n + self.algorithm.__name__ + '-' \\\n + self.sampling.__name__ + '-' \\\n + self.fom.__name__\n self.logdir = log_setup(self.dir_path, run_name)\n self.logname = \"sensitivity.log\"\n\n def read_data(self, datafiles):\n # TODO move common methods of sensitivity and c3 to super class\n \"\"\"\n Open data files and read in experiment results.\n\n Parameters\n ----------\n datafiles : list of str\n List of paths for files that contain learning data.\n \"\"\"\n for target, datafile in datafiles.items():\n with open(datafile, 'rb+') as file:\n self.learn_data[target] = pickle.load(file)\n\n def load_best(self, init_point):\n \"\"\"\n Load a previous parameter point to start the optimization from.\n\n Parameters\n ----------\n init_point : str\n File location of the initial point\n\n \"\"\"\n with open(init_point) as init_file:\n best = init_file.readlines()\n best_exp_opt_map = [tuple(a) for a in json.loads(best[0])]\n init_p = json.loads(best[1])['params']\n self.exp.set_parameters(init_p, best_exp_opt_map)\n\n def select_from_data(self, batch_size):\n \"\"\"\n Select a subset of each dataset to compute the goal function on.\n\n Parameters\n ----------\n batch_size : int\n Number of points to select\n\n Returns\n -------\n list\n Indeces of the selected data points.\n \"\"\"\n learn_from = self.learn_from\n sampling = self.sampling\n indeces = sampling(learn_from, batch_size)\n if self.inverse:\n return list(set(all) - set(indeces))\n else:\n return indeces\n\n def sensitivity(self):\n \"\"\"\n Run the sensitivity analysis.\n\n \"\"\"\n self.nice_print = self.exp.print_parameters\n\n print(\"Initial parameters:\")\n print(self.exp.print_parameters())\n for ii in range(len(self.sweep_map)):\n self.dfname = \"data.dat\"\n self.opt_map = [self.sweep_map[ii]]\n self.options['bounds'] = [self.sweep_bounds[ii]]\n print(f\"C3:STATUS:Sweeping {self.opt_map}: {self.sweep_bounds[ii]}\")\n self.log_setup(self.dir_path, \"_\".join(self.opt_map[0]))\n self.start_log()\n print(f\"C3:STATUS:Saving as: {os.path.abspath(self.logdir + self.logname)}\")\n x0 = self.exp.get_parameters(self.opt_map, scaled=False)\n self.init_gateset_params = self.exp.gateset.get_parameters()\n self.init_gateset_opt_map = self.exp.gateset.list_parameters()\n try:\n self.algorithm(\n x0,\n fun=self.fct_to_min,\n fun_grad=self.fct_to_min_autograd,\n grad_lookup=self.lookup_gradient,\n options=self.options\n )\n except KeyboardInterrupt:\n pass\n self.exp.set_parameters(x0, self.opt_map, scaled=False)\n\n # #=== Get the resulting data ======================================\n\n # Xs=np.array(list(learner.data.keys()))\n # Ys=np.array(list(learner.data.values()))\n # Ks=np.argsort(Xs)\n # Xs=Xs[Ks]\n # Ys=Ys[Ks]\n\n def goal_run(self, val):\n \"\"\"\n Evaluate the figure of merit for the current model parameters.\n\n Parameters\n ----------\n val : tf.Tensor\n Current model parameters\n\n Returns\n -------\n tf.float64\n Figure of merit\n\n \"\"\"\n exp_values = []\n exp_stds = []\n sim_values = []\n exp_shots = []\n goals = []\n seq_weigths = []\n count = 0\n #TODO: seq per point is not constant. Remove.\n\n # print(\"tup: \" + str(tup))\n # print(\"val: \" + str(val))\n # print(self.opt_map)\n self.exp.set_parameters(val, self.opt_map, scaled=False)\n # print(\"params>>> \")\n # print(self.exp.print_parameters(self.opt_map))\n\n # print(\"self.learn_data.items(): \" + str(len(self.learn_data.items())))\n for target, data in self.learn_data.items():\n\n self.learn_from = data['seqs_grouped_by_param_set']\n self.gateset_opt_map = data['opt_map']\n indeces = self.select_from_data(self.batch_sizes[target])\n\n for ipar in indeces:\n # if count % 100 == 0:\n # print(\"count: \" + str(count))\n\n count += 1\n m = self.learn_from[ipar]\n gateset_params = m['params']\n gateset_opt_map = self.gateset_opt_map\n m_vals = m['results']\n m_stds = np.array(m['results_std'])\n m_shots = m['shots']\n sequences = m['seqs']\n num_seqs = len(sequences)\n if target == 'all':\n num_seqs = len(sequences) * 3\n\n self.exp.gateset.set_parameters(\n self.init_gateset_params,\n self.init_gateset_opt_map,\n scaled=False\n )\n self.exp.gateset.set_parameters(\n gateset_params, gateset_opt_map, scaled=False\n )\n # We find the unique gates used in the sequence and compute\n # only them.\n self.exp.opt_gates = list(\n set(itertools.chain.from_iterable(sequences))\n )\n self.exp.get_gates()\n self.exp.evaluate(sequences)\n sim_vals = self.exp.process(labels=self.state_labels[target])\n\n exp_stds.extend(m_stds)\n exp_shots.extend(m_shots)\n\n if target == 'all':\n goal = neg_loglkh_multinom_norm(\n m_vals,\n tf.stack(sim_vals),\n tf.constant(m_stds, dtype=tf.float64),\n tf.constant(m_shots, dtype=tf.float64)\n )\n else:\n goal = g_LL_prime(\n m_vals,\n tf.stack(sim_vals),\n tf.constant(m_stds, dtype=tf.float64),\n tf.constant(m_shots, dtype=tf.float64)\n )\n goals.append(goal.numpy())\n seq_weigths.append(num_seqs)\n sim_values.extend(sim_vals)\n exp_values.extend(m_vals)\n\n with open(self.logdir + self.logname, 'a') as logfile:\n logfile.write(\n \"\\n Parameterset {}, #{} of {}:\\n {}\\n {}\\n\".format(\n ipar + 1,\n count,\n len(indeces),\n json.dumps(self.gateset_opt_map),\n self.exp.gateset.get_parameters(\n self.gateset_opt_map, to_str=True\n ),\n )\n )\n logfile.write(\n \"Sequence Simulation Experiment Std Shots\"\n \" Diff\\n\"\n )\n\n for iseq in range(len(sequences)):\n m_val = np.array(m_vals[iseq])\n m_std = np.array(m_stds[iseq])\n shots = np.array(m_shots[iseq])\n sim_val = sim_vals[iseq].numpy()\n int_len = len(str(num_seqs))\n with open(self.logdir + self.logname, 'a') as logfile:\n for ii in range(len(sim_val)):\n logfile.write(\n f\"{iseq + 1:8} \"\n f\"{float(sim_val[ii]):8.6f} \"\n f\"{float(m_val[ii]):8.6f} \"\n f\"{float(m_std[ii]):8.6f} \"\n f\"{float(shots[0]):8} \"\n f\"{float(m_val[ii]-sim_val[ii]):8.6f}\\n\"\n )\n logfile.flush()\n\n goal = g_LL_prime_combined(goals, seq_weigths)\n # TODO make gradient free function use any fom\n\n with open(self.logdir + self.logname, 'a') as logfile:\n logfile.write(\"\\nFinished batch with \")\n logfile.write(\"{}: {}\\n\".format(self.fom.__name__, goal))\n print(\"{}: {}\".format(self.fom.__name__, goal))\n for est in self.estimator_list:\n val = float(\n est(exp_values, sim_values, exp_stds, exp_shots).numpy()\n )\n logfile.write(\"{}: {}\\n\".format(est.__name__, val))\n #print(\"{}: {}\".format(est.__name__, val))\n print(\"\")\n logfile.flush()\n\n self.optim_status['params'] = [\n par.numpy().tolist()\n for par in self.exp.get_parameters(self.opt_map)\n ]\n self.optim_status['goal'] = goal\n self.evaluation += 1\n return goal\n","sub_path":"c3/optimizers/sensitivity.py","file_name":"sensitivity.py","file_ext":"py","file_size_in_byte":11582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"91639247","text":"import re\nimport sys\n\n\ndef parse_user_input():\n allRegex = re.compile(r'^show\\b\\sall\\b')\n addRegex = re.compile(r'(add)\\s(\\w+)\\s(\\w+).*(\\d\\w).*(\\d)\\s\\w+?\\s(\\w+)') #len[0] = 5\n showRegex = re.compile(r'(show)\\s(\\w+)\\s(\\w+).*?(\\d\\w)\\s?(\\w+)') #len[0] = 4\n\n regex = [allRegex, addRegex, showRegex]\n\n input_user = input('Please input your command: ')\n if input_user == 'exit':\n sys.exit('Shut down')\n for i in regex:\n mo = i.findall(input_user)\n if mo:\n mo = mo[0]\n return mo\n\n\nif __name__ == '__main__':\n test = parse_user_input()\n print(test)\n a = test[0][1] + ' ' + test[0][2] + ' ' + test[0][3]\n print(a)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"594042609","text":"from bs4 import BeautifulSoup as bs\nimport requests\nimport re\n\nclass ArticleParser(object):\n def __init__(self):\n # 특정 문자( [], (), <> 등 )를 제거하는 코드\n # self.pre_remove = re.compile(\"[\\()\\<>\\[].*?[\\]<\\>(\\)]\")\n\n self.special_symbol = re.compile('[\\{\\}\\[\\]\\/?,;:|*~`!^\\-_+<>@\\#$%&n▲▶◆◀■\\'\\\"\\\\\\【】]')\n self.content_pattern = re.compile('본문 내용|TV플레이어|동영상 뉴스|flash 오류를 우회하기 위한 함수 추가fuctio flashremoveCallback|tt|t|앵커 멘트|xa0|앵커|(사진)')\n\n def clearContent(self, text):\n # 특정 문자( [], (), <> 등 )를 제거하는 코드\n # pre_text = re.sub(self.pre_remove, '', text)\n special_symbol_removed_content = re.sub(self.special_symbol, '', text)\n\n # 기사 본문에서 필요없는 특수문자 및 본문 양식 등을 다 지움\n # special_symbol_removed_content = re.sub(self.special_symbol, '', text)\n end_phrase_removed_content = re.sub(self.content_pattern, '', special_symbol_removed_content)\n blank_removed_content = end_phrase_removed_content.strip().replace(' ', '') # 공백 에러 삭제\n reversed_content = ''.join(reversed(blank_removed_content)) # 기사 내용을 reverse\n content = ''\n\n for i in range(0, len(blank_removed_content)):\n # reverse 된 기사 내용 중, \".다\"로 끝나는 경우 기사 내용이 끝난 것이기 때문에\n # 기사 내용이 끝난 후의 광고, 기자 등의 정보는 삭제\n if reversed_content[i:i + 2] == '.다':\n content = ''.join(reversed(reversed_content[i:]))\n break\n\n for i in range(0, len(content)):\n if content[i:i + 4] == '기자 =':\n content = content[i + 4:]\n elif content[i: i + 2] == '기자':\n content = content[i + 2:]\n break\n \n return content\n\n def clearHeadline(self, text):\n # 기사 제목에서 필요없는 특수문자들을 지움\n special_symbol_removed_headline = re.sub(self.special_symbol, '', text)\n\n return special_symbol_removed_headline\n \n def findNewsTotalpage(self, url):\n # 당일 기사 목록 전체 페이지 수를 알아냄\n try:\n totalpage_url = url\n request_content = requests.get(totalpage_url)\n document_content = bs(request_content.content, 'html.parser')\n headline_tag = document_content.find('div', {'class': 'paging'}).find('strong')\n regex = re.compile(r'(?P\\d+)')\n match = regex.findall(str(headline_tag))\n \n return int(match[0])\n except Exception:\n\n return 0","sub_path":"19-1/MachineLearningProject/Project/ArticleParser.py","file_name":"ArticleParser.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"253518062","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jul 24 14:04:40 2018\r\n\r\n@author: Administrator\r\n\"\"\"\r\n#(可视化展示)练习11:\r\n#1.获取学校名\r\n#2.获取学校招生人数\r\n#3.在Echarts展示\r\n\r\nf=open('E:\\\\大数据实训\\\\课程作业\\\\母本.txt',encoding='gbk').readlines()\r\nschoolls=[]\r\ndata=[]\r\nfor line in f:\r\n schoolls.append(line.split('(')[1].split(',')[0])\r\n data.append(line.split(',')[1].split(')')[0])\r\n print(schoolls)\r\n print(data)\r\n ","sub_path":"练习题11.py","file_name":"练习题11.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"270155432","text":"import numpy as np\nimport torch\nimport os\nimport sys\nimport functools\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn import init\nimport torch.nn.functional as F\nimport torchvision.models as M\n\n\nclass ResNeXtBottleneck(nn.Module):\n def __init__(self, in_channels=256, out_channels=256, stride=1, cardinality=32, dilate=1):\n super(ResNeXtBottleneck, self).__init__()\n D = out_channels // 2\n self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)\n self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=dilate, dilation=dilate,\n groups=cardinality,\n bias=False)\n self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)\n self.shortcut = nn.Sequential()\n if stride != 1:\n self.shortcut.add_module('shortcut',\n nn.AvgPool2d(2, stride=2))\n\n def forward(self, x):\n bottleneck = self.conv_reduce.forward(x)\n bottleneck = F.leaky_relu(bottleneck, 0.2, True)\n bottleneck = self.conv_conv.forward(bottleneck)\n bottleneck = F.leaky_relu(bottleneck, 0.2, True)\n bottleneck = self.conv_expand.forward(bottleneck)\n x = self.shortcut.forward(x)\n return x + bottleneck\n\n\nclass Tunnel(nn.Module):\n def __init__(self, len=1, *args):\n super(Tunnel, self).__init__()\n\n tunnel = [ResNeXtBottleneck(*args) for _ in range(len)]\n self.tunnel = nn.Sequential(*tunnel)\n\n def forward(self, x):\n return self.tunnel(x)\n\n\nclass DilateTunnel(nn.Module):\n def __init__(self, depth=4):\n super(DilateTunnel, self).__init__()\n\n tunnel = [ResNeXtBottleneck(dilate=1) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(dilate=2) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(dilate=4) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(dilate=8) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(dilate=1) for _ in range(14)]\n\n self.tunnel = nn.Sequential(*tunnel)\n\n def forward(self, x):\n return self.tunnel(x)\n\n\nclass def_netG(nn.Module):\n def __init__(self, ngf=32):\n super(def_netG, self).__init__()\n\n self.toH = nn.Sequential(nn.Conv2d(4, ngf, kernel_size=7, stride=1, padding=3), nn.LeakyReLU(0.2, True))\n\n self.to0 = nn.Sequential(nn.Conv2d(1, ngf // 2, kernel_size=3, stride=1, padding=1),\n nn.LeakyReLU(0.2, True))\n self.to1 = nn.Sequential(nn.Conv2d(ngf // 2, ngf, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(0.2, True))\n self.to2 = nn.Sequential(nn.Conv2d(ngf, ngf * 2, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(0.2, True))\n self.to3 = nn.Sequential(nn.Conv2d(ngf * 3, ngf * 4, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(0.2, True))\n self.to4 = nn.Sequential(nn.Conv2d(ngf * 4, ngf * 8, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(0.2, True))\n\n tunnel4 = [ResNeXtBottleneck(ngf * 8, ngf * 8, cardinality=32, dilate=1) for _ in range(20)]\n tunnel4 += [nn.Conv2d(ngf * 8, ngf * 4 * 4, kernel_size=3, stride=1, padding=1),\n nn.PixelShuffle(2),\n nn.LeakyReLU(0.2, True)]\n self.tunnel4 = nn.Sequential(*tunnel4)\n\n depth = 2\n tunnel = [ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=16, dilate=1) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=16, dilate=2) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=16, dilate=4) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=16, dilate=2),\n ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=16, dilate=1)]\n tunnel3 = nn.Sequential(*tunnel)\n\n self.tunnel3 = nn.Sequential(nn.Conv2d(ngf * 8, ngf * 4, kernel_size=3, stride=1, padding=1),\n nn.LeakyReLU(0.2, True),\n tunnel3,\n nn.Conv2d(ngf * 4, ngf * 2 * 4, kernel_size=3, stride=1, padding=1),\n nn.PixelShuffle(2),\n nn.LeakyReLU(0.2, True)\n )\n\n tunnel = [ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=8, dilate=1) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=8, dilate=2) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=8, dilate=4) for _ in range(depth)]\n tunnel += [ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=8, dilate=2),\n ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=8, dilate=1)]\n tunnel2 = nn.Sequential(*tunnel)\n\n self.tunnel2 = nn.Sequential(nn.Conv2d(ngf * 4, ngf * 2, kernel_size=3, stride=1, padding=1),\n nn.LeakyReLU(0.2, True),\n tunnel2,\n nn.Conv2d(ngf * 2, ngf * 4, kernel_size=3, stride=1, padding=1),\n nn.PixelShuffle(2),\n nn.LeakyReLU(0.2, True)\n )\n\n tunnel = [ResNeXtBottleneck(ngf, ngf, cardinality=4, dilate=1)]\n tunnel += [ResNeXtBottleneck(ngf, ngf, cardinality=4, dilate=2)]\n tunnel += [ResNeXtBottleneck(ngf, ngf, cardinality=4, dilate=4)]\n tunnel += [ResNeXtBottleneck(ngf, ngf, cardinality=4, dilate=2),\n ResNeXtBottleneck(ngf, ngf, cardinality=4, dilate=1)]\n tunnel1 = nn.Sequential(*tunnel)\n\n self.tunnel1 = nn.Sequential(nn.Conv2d(ngf * 2, ngf, kernel_size=3, stride=1, padding=1),\n nn.LeakyReLU(0.2, True),\n tunnel1,\n nn.Conv2d(ngf, ngf * 2, kernel_size=3, stride=1, padding=1),\n nn.PixelShuffle(2),\n nn.LeakyReLU(0.2, True)\n )\n\n self.exit = nn.Conv2d(ngf, 3, kernel_size=3, stride=1, padding=1)\n\n def forward(self, x, hint):\n v = self.toH(hint)\n\n x0 = self.to0(x)\n x1 = self.to1(x0)\n x2 = self.to2(x1)\n x3 = self.to3(torch.cat([x2, v], 1))\n x4 = self.to4(x3)\n\n x = self.tunnel4(x4)\n\n x = self.tunnel3(torch.cat([x, x3.detach()], 1))\n x = self.tunnel2(torch.cat([x, x2.detach()], 1))\n x = self.tunnel1(torch.cat([x, x1.detach()], 1))\n x = F.tanh(self.exit(torch.cat([x, x0.detach()], 1)))\n return x\n\n\nclass def_netD(nn.Module):\n def __init__(self, ndf=64):\n super(def_netD, self).__init__()\n\n sequence = [\n nn.Conv2d(1, ndf, kernel_size=4, stride=2, padding=1, bias=False), # 256\n nn.LeakyReLU(0.2, True),\n\n ResNeXtBottleneck(ndf, ndf, cardinality=8, dilate=1),\n ResNeXtBottleneck(ndf, ndf, cardinality=8, dilate=1, stride=2),\n nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=False), # 128\n ResNeXtBottleneck(ndf * 2, ndf * 2, cardinality=8, dilate=1),\n ResNeXtBottleneck(ndf * 2, ndf * 2, cardinality=8, dilate=1, stride=2),\n nn.Conv2d(ndf * 2, ndf * 4, kernel_size=1, stride=1, padding=0, bias=False), # 64\n ResNeXtBottleneck(ndf * 4, ndf * 4, cardinality=8, dilate=1),\n ResNeXtBottleneck(ndf * 4, ndf * 4, cardinality=8, dilate=1, stride=2),\n # nn.Conv2d(ndf * 4, ndf * 8, kernel_size=1, stride=1, padding=0, bias=False), # 32\n # ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),\n # ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2), # 16\n ]\n\n self.model = nn.Sequential(*sequence)\n\n sequence = [\n nn.Conv2d(ndf * 4 + 3, ndf * 8, kernel_size=3, stride=1, padding=1, bias=False), # 32\n nn.LeakyReLU(0.2, True),\n\n ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),\n ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2), # 16\n ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),\n ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2), # 8\n ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),\n ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2), # 4\n ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),\n nn.Conv2d(ndf * 8, ndf * 8, kernel_size=4, stride=1, padding=0, bias=False), # 1\n nn.LeakyReLU(0.2, True),\n\n ]\n\n self.prototype = nn.Sequential(*sequence)\n\n self.out = nn.Linear(512, 1)\n\n def forward(self, color, sketch):\n color = F.avg_pool2d(color, 16, 16)\n sketch = self.model(sketch)\n out = self.prototype(torch.cat([sketch, color], 1))\n return self.out(out.view(color.size(0), -1))\n\n\ndef def_netF():\n vgg16 = M.vgg16()\n vgg16.load_state_dict(torch.load('vgg16-397923af.pth'))\n vgg16.features = nn.Sequential(\n *list(vgg16.features.children())[:9]\n )\n for param in vgg16.parameters():\n param.requires_grad = False\n return vgg16.features\n","sub_path":"models/simplify.py","file_name":"simplify.py","file_ext":"py","file_size_in_byte":9638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"49577305","text":"class Ship:\n def __init__(self, game, x, y, radius):\n\n (self.x, self.y, self.radius) = (x, y, radius)\n \n\nif __name__ == '__main__':\n one = Ship(None, 20, 30, 5)\n two = Ship(None, 120, -30, 50) \n print(one.radius)\n print(two)","sub_path":"lab15/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"380281827","text":"# 运用爬虫方式,依次递进找到最后一个网页\n# The program will use urllib to read the HTML from the data files below, extract the href= vaues from the anchor tags, scan for a tag that is in a particular position relative to the first name in the list, follow that link and repeat the process a number of times and report the last name you find.\n\n# Find the link at position 18 (the first name is 1). Follow that link. Repeat this process 7 times. The answer is the last name that you retrieve.\n# Hint: The first character of the name of the last page that you will load is: M\nfrom bs4 import BeautifulSoup\nimport urllib.request, urllib.error, urllib.parse\nimport re\n\nurl = 'http://python-data.dr-chuck.net/known_by_Cleo.html'\n\nindex = 18\ncount = 7\n\nfor i in range(count):\n html = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(html, 'html.parser')\n\n tags = soup('a')\n url = tags[index-1].get('href', None)\n\nprint('最终网址为', url)\n\nprint('名称为', re.findall('_([a-zA-Z]+).h', url)[0])\n\n","sub_path":"summer-2019/learn-python/follow-links.py","file_name":"follow-links.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"150653202","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 11 23:15:51 2020\n\n@author: Fredrik Forsberg\n\"\"\"\n\nimport rospy\nimport tf2_ros\nimport tf2_geometry_msgs # Needed\nfrom geometry_msgs.msg import PoseStamped, TransformStamped\nfrom aruco_msgs.msg import MarkerArray\n\n###\n\n\nclass ArUcoDetect:\n def __init__(self, node_name, aruco_markers_subscription_topic):\n # Initiate node\n rospy.init_node(node_name)\n \n self.detected_markers = None # Initiates as None\n \n self.tf_broadcaster = tf2_ros.TransformBroadcaster()\n self.tf_buf = tf2_ros.Buffer()\n self.tf_listener = tf2_ros.TransformListener(self.tf_buf)\n \n #Subscribe to image topic\n rospy.Subscriber(aruco_markers_subscription_topic, MarkerArray, self.markers_callback)\n \n #\n \n \n def markers_callback(self, markers):\n self.detected_markers = markers\n \n #\n \n \n def run(self):\n rate = rospy.Rate(10) # Hz\n \n while not rospy.is_shutdown():\n \n if self.detected_markers is not None:\n \n transforms = [self.transform_from_marker(m) for m in self.detected_markers.markers]\n transforms = [t for t in transforms if t is not None]\n \n self.tf_broadcaster.sendTransform(transforms)\n \n rate.sleep()\n \n # rospy.spin() shouldn't be needed since it sleeps until rospy.is_shutdown() returns True\n # Keeping it to be on the safe side\n rospy.spin()\n \n #\n \n\n def transform_from_marker(self, m):\n m_camera = PoseStamped()\n m_camera.header.frame_id = \"cf1/camera_link\"\n m_camera.pose.position= m.pose.pose.position\n m_camera.pose.orientation = m.pose.pose.orientation\n \n # Translate the marker into the base_link frame\n if not self.tf_buf.can_transform(m_camera.header.frame_id, 'cf1/base_link', rospy.Time(0.0)):\n rospy.logwarn_throttle(5.0, 'No transform from %s to cf1/base_link' % m_camera.header.frame_id)\n return\n \n\n m_base = self.tf_buf.transform(m_camera, 'cf1/base_link')\n \n # Translate the marker into the odom frame\n if not self.tf_buf.can_transform(m_base.header.frame_id, 'cf1/odom', m_base.header.stamp):\n if not self.tf_buf.can_transform(m_base.header.frame_id, 'cf1/odom', rospy.Time(0.0)):\n rospy.logwarn_throttle(5.0, 'No transform from %s to cf1/odom' % m_base.header.frame_id)\n return\n else:\n odom_transform = self.tf_buf.lookup_transform(m_base.header.frame_id, \"cf1/odom\", rospy.Time(0.0))\n m_base.header.stamp = odom_transform.header.stamp\n else:\n m_base.header.stamp = m.header.stamp\n \n \n m_odom = self.tf_buf.transform(m_base,'cf1/odom')\n \n transform = TransformStamped()\n transform.header.stamp = odom_transform.header.stamp\n transform.header.frame_id = \"cf1/odom\"\n transform.child_frame_id = \"aruco/detected\" + str(m.id)\n \n transform.transform.translation = m_odom.pose.position\n transform.transform.rotation = m_odom.pose.orientation\n \n return transform\n \n\n###\n \n\nif __name__ == '__main__':\n aruco_transform_broadcaster = ArUcoDetect('ArUcoDetect', '/aruco/markers')\n\t\n aruco_transform_broadcaster.run()\n","sub_path":"DD2419-PRAS/milestone/scripts/alt_milestone2_aruco/ArUcoDetect.py","file_name":"ArUcoDetect.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"300165323","text":"\nimport cv2, imutils\nimport numpy as np\n\ndef recognize(images_path, training_path, pass_info = False):\n if \"camera_1\" in training_path:\n threshhold =75\n area = 110\n elif \"camera_2\" in training_path:\n threshhold =151\n area = 70\n elif \"camera_3\" in training_path:\n threshhold = 151\n area = 70\n ####### training part ###############\n samples = np.loadtxt(training_path+'generalsamples.data', np.float32)\n responses = np.loadtxt(training_path+'generalresponses.data', np.float32)\n responses = responses.reshape((responses.size, 1))\n\n model = cv2.ml.KNearest_create()\n model.train(samples, cv2.ml.ROW_SAMPLE, responses)\n\n ############################# testing part #########################\n\n image = cv2.imread(images_path)\n out = np.zeros(image.shape, np.uint8)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n thresh = cv2.threshold(blurred, threshhold, 255, cv2.THRESH_BINARY)[1]#dont change 151\n\n cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n # new = cv2.drawContours(image, cnts, -1, (0, 255, 0), 3)\n # cv2.imshow('im', new)\n # cv2.imshow('out', out)\n # cv2.waitKey(0)\n content = []\n for c in cnts:\n # compute the center of the contour\n M = cv2.moments(c)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n\n # draw the contour and center of the shape on the image\n cv2.circle(image, (cX, cY), 7, (255, 255, 255), -1)\n [x, y, w, h] = cv2.boundingRect(c)\n if cv2.contourArea(c)>area: #the higher the threshold, the smaller the area\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n roi = thresh[y:y + h, x:x + w]\n roismall = cv2.resize(roi, (10, 10))\n roismall = roismall.reshape((1, 100))\n roismall = np.float32(roismall)\n retval, results, neigh_resp, dists = model.findNearest(roismall, k=1)\n string = str(int((results[0][0])))\n if pass_info: #returns temp between C and F symbols (camera 1)\n content.append((string, [cX,cY]))\n else:\n if string != str(42):\n cv2.putText(out, string, (x, y + h), 0, 1, (0, 255, 0))\n content.append(string)\n content.reverse()\n return content\n","sub_path":"flask_ocr/backend/ocr_contour.py","file_name":"ocr_contour.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"133240015","text":"import os\nimport logging\n\nfrom configobj import ConfigObj\n\nROOT_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')\nCONFIG_PATH = os.path.join(ROOT_PATH, 'wukong', 'config', 'gateway.cfg')\nconfig = ConfigObj(CONFIG_PATH)\n\nMASTER_IP = 'localhost'\nMASTER_TCP_PORT = 9010\nSELF_IP_INTERFACE = config.get(\"SELF_IP_INTERFACE\", 'lo')\n#SELF_IP_INTERFACE = config.get(\"SELF_IP_INTERFACE\", 'lo0') # for MacOSX\nSELF_TCP_PORT = config.get('SELF_TCP_PORT', 9001)\nCONNECTION_RETRIES = 2\nNETWORK_TIMEOUT = 3.0\n\nENABLE_AUTONET = False\nAUTONET_MAC_ADDR_LEN = 8\n\nUNITTEST_MODE = True\nUNITTEST_WAIT_SEC = 5\n\nENABLE_MONITOR = True\n#MONGODB_URL = None\nMONGODB_URL= \"mongodb://140.112.170.32:27017/Wukong\"\n\nLOG_LEVEL = logging.DEBUG\n\nTRANSPORT_DEV_ADDR = config.get('DEV_ADDR', '/dev/ttyACM0')\n# TRANSPORT_DEV_ADDR = config.get('DEV_ADDR', '/dev/cu.usbmodem1411') # for MacOSX\n# TRANSPORT_DEV_ADDR = config.get('DEV_ADDR', 'lo:9000')\n\nTRANSPORT_DEV_TYPE = config.get('GATEWAY_TYPE', 'zwave')\n# TRANSPORT_DEV_TYPE = config.get('GATEWAY_TYPE', 'zigbee')\n# TRANSPORT_DEV_TYPE = config.get('GATEWAY_TYPE', 'udp')\n\nENABLE_PROGRESSION = True\nPSERVER_IP = 'localhost'\nPSERVER_UDP_PORT = 8000","sub_path":"wukong/gateway/gtwconfig.py","file_name":"gtwconfig.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"15089834","text":"import sys\nfrom PyQt5 import QtCore, QtWidgets, QtGui\n\n\nclass Ventana_calendario(QtWidgets.QDialog):\n def __init__(self, parent = None):\n QtWidgets.QDialog.__init__(self, parent)\n self.__setupUi()\n \n def __setupUi(self):\n self.__contenedor = QtWidgets.QVBoxLayout()\n\n #WIDGETS\n self.__calendario = QtWidgets.QCalendarWidget()\n \n self.__contenedor.addWidget(self.__calendario)\n\n self.setLayout(self.__contenedor)\n\nclass Ventana_movimiento_categoria(QtWidgets.QDialog):\n signal= QtCore.pyqtSignal()\n\n def __init__(self, parent = None):\n QtWidgets.QDialog.__init__(self, parent)\n self.__setupUi()\n \n def __setupUi(self):\n self.__contenedor = QtWidgets.QVBoxLayout()\n\n #WIDGETS\n self.__line_nombre = QtWidgets.QLineEdit()\n self.__line_descripcion = QtWidgets.QTextEdit()\n self.__btn_registrar = QtWidgets.QPushButton(\"Aceptar\")\n\n self.__line_nombre.setPlaceholderText(\"Nombre\")\n self.__line_descripcion.setPlaceholderText(\"Descripción\")\n \n self.__contenedor.addWidget(self.__line_nombre)\n self.__contenedor.addWidget(self.__line_descripcion)\n self.__contenedor.addWidget(self.__btn_registrar)\n\n self.__btn_registrar.clicked.connect(self.__on_btn_registrar)\n\n self.setLayout(self.__contenedor)\n\n def __on_btn_registrar(self):\n self.signal.emit()\n self.__limpiar()\n self.close()\n \n def __limpiar(self):\n self.__line_nombre.setText(\"\")\n self.__line_descripcion.setText(\"\")\n \n def closeEvent(self, evnt):\n self.__limpiar()\n\n def obtener_datos(self):\n return self.__line_nombre.text(), self.__line_descripcion.toPlainText()\n\nclass Ventana_ingresos_egreso(QtWidgets.QDialog):\n signal = QtCore.pyqtSignal()\n\n def __init__(self, parent = None):\n QtWidgets.QDialog.__init__(self, parent)\n self.__setupUi()\n \n def __setupUi(self):\n self.__contenedor = QtWidgets.QVBoxLayout()\n\n #WIDGETS\n self.__line_monto = QtWidgets.QLineEdit()\n self.__cbx_movimientos = QtWidgets.QComboBox()\n self.__cbx_categorias = QtWidgets.QComboBox()\n self.__line_descripcion = QtWidgets.QTextEdit()\n self.__cal_fecha = QtWidgets.QCalendarWidget()\n self.__boton = QtWidgets.QPushButton(\"Aceptar\")\n \n self.__line_monto.setPlaceholderText(\"Monto\")\n self.__cbx_movimientos.setPlaceholderText(\"Movimiento\")\n self.__cbx_categorias.setPlaceholderText(\"Categoria\")\n self.__line_descripcion.setPlaceholderText(\"Descripcion\")\n \n self.__contenedor.addWidget(self.__line_monto)\n self.__contenedor.addWidget(self.__cbx_movimientos)\n self.__contenedor.addWidget(self.__cbx_categorias)\n self.__contenedor.addWidget(self.__line_descripcion)\n self.__contenedor.addWidget(self.__cal_fecha)\n self.__contenedor.addWidget(self.__boton)\n\n self.__boton.clicked.connect(self.__on_btn_registrar)\n\n self.setLayout(self.__contenedor)\n\n def __on_btn_registrar(self):\n self.signal.emit()\n self.__limpiar()\n self.close()\n\n def __limpiar(self):\n self.__line_monto.setText(\"\")\n self.__cbx_movimientos.clear()\n self.__cbx_categorias.clear()\n self.__line_descripcion.setText(\"\")\n self.__cal_fecha.setSelectedDate(QtCore.QDate.currentDate())\n\n def closeEvent(self, evnt):\n self.__limpiar()\n\n def configurar_menu_desplegable(self, movimientos, categorias):\n self.__cbx_movimientos.addItems(movimientos.values())\n self.__cbx_categorias.addItems(categorias.values())\n\n def obtener_datos(self):\n return self.__line_monto.text(), self.__cbx_movimientos.currentIndex(),\\\n self.__cbx_categorias.currentIndex(),self.__line_descripcion.toPlainText(),\\\n self.__cal_fecha.selectedDate().toString()\n \n\n\nclass Vista(QtWidgets.QWidget):\n \n calcular_balance = QtCore.pyqtSignal()\n agregar_ingreso = QtCore.pyqtSignal()\n agregar_egreso = QtCore.pyqtSignal()\n agregar_movimiento = QtCore.pyqtSignal()\n agregar_categoria_ingreso = QtCore.pyqtSignal()\n agregar_categoria_egreso = QtCore.pyqtSignal()\n actualizar_mov_cat_ingreso = QtCore.pyqtSignal()\n actualizar_mov_cat_egreso = QtCore.pyqtSignal()\n\n\n def __init__(self):\n QtWidgets.QWidget.__init__(self)\n\n self.ventana_calendario = Ventana_calendario()\n self.ventana_agregar_ingreso = Ventana_ingresos_egreso()\n self.ventana_agregar_egreso = Ventana_ingresos_egreso()\n self.ventana_agregar_movimiento = Ventana_movimiento_categoria()\n self.ventana_agregar_categoria_ingreso = Ventana_movimiento_categoria()\n self.ventana_agregar_categoria_egreso = Ventana_movimiento_categoria()\n\n self.ventana_agregar_categoria_egreso.signal.connect(lambda: self.agregar_categoria_egreso.emit())\n self.ventana_agregar_categoria_ingreso.signal.connect(lambda: self.agregar_categoria_ingreso.emit())\n self.ventana_agregar_movimiento.signal.connect(lambda: self.agregar_movimiento.emit())\n self.ventana_agregar_ingreso.signal.connect(lambda: [signal.emit() for signal in (self.agregar_ingreso, self.calcular_balance)])\n self.ventana_agregar_egreso.signal.connect(lambda: [signal.emit() for signal in (self.agregar_egreso, self.calcular_balance)])\n\n self.__setupUi()\n\n def __setupUi(self):\n self.__layout = QtWidgets.QFormLayout()\n\n #WIDGETS\n self.__label_balance = QtWidgets.QLabel('$0')\n self.__btn_calendario = QtWidgets.QPushButton(\"Calendario\")\n self.__btn_ingreso = QtWidgets.QPushButton(\"Nuevo Ingreso\")\n self.__btn_egreso = QtWidgets.QPushButton(\"Nuevo Egreso\")\n self.__btn_movimiento = QtWidgets.QPushButton(\"Nuevo Movimiento\")\n self.__btn_categoria_ingreso = QtWidgets.QPushButton(\"Nueva Categoria Ingreso\")\n self.__btn_categoria_egreso = QtWidgets.QPushButton(\"Nueva Categoria Egreso\")\n \n self.__layout.addRow(\"Balance: \", self.__label_balance)\n self.__layout.addRow(self.__btn_calendario)\n self.__layout.addRow(self.__btn_ingreso)\n self.__layout.addRow(self.__btn_egreso)\n self.__layout.addRow(self.__btn_movimiento)\n self.__layout.addRow(self.__btn_categoria_ingreso)\n self.__layout.addRow(self.__btn_categoria_egreso)\n\n self.__btn_calendario.clicked.connect(self.__on_btn_calendario_clicked)\n self.__btn_ingreso.clicked.connect(self.__on_btn_ingreso_clicked)\n self.__btn_egreso.clicked.connect(self.__on_btn_egreso_clicked)\n self.__btn_movimiento.clicked.connect(self.__on_btn_movimiento_clicked)\n self.__btn_categoria_ingreso.clicked.connect(self.__on_btn_categoria_ingreso_clicked)\n self.__btn_categoria_egreso.clicked.connect(self.__on_btn_categoria_egreso_clicked)\n \n self.setLayout(self.__layout)\n \n def __on_btn_calendario_clicked(self):\n self.ventana_calendario.exec()\n \n def __on_btn_ingreso_clicked(self):\n self.actualizar_mov_cat_ingreso.emit()\n self.ventana_agregar_ingreso.exec()\n\n def __on_btn_egreso_clicked(self):\n self.actualizar_mov_cat_egreso.emit()\n self.ventana_agregar_egreso.exec()\n\n def __on_btn_movimiento_clicked(self):\n self.ventana_agregar_movimiento.exec_()\n\n def __on_btn_categoria_ingreso_clicked(self):\n self.ventana_agregar_categoria_ingreso.exec_()\n\n def __on_btn_categoria_egreso_clicked(self):\n self.ventana_agregar_categoria_egreso.exec_()\n\n def actualizar_balance(self, valor):\n self.__label_balance.setText(str(valor))\n\nif __name__ == \"__main__\":\n def fun():\n datos = vista.ventana_agregar_ingreso.obtener_datos()\n id = list(men1.values())\n print(id[datos[3]])\n\n app = QtWidgets.QApplication(sys.argv)\n vista = Vista()\n men1 = {\"hola\": 0, \"camaleon\": 1}\n men2 = {\"pantufla\": 0, \"ladrillo\": 1}\n vista.ventana_agregar_ingreso.configurar_menu_desplegable(men1, men2)\n vista.agregar_ingreso.connect(fun)\n vista.show()\n app.exec()","sub_path":"vista/finanzas.py","file_name":"finanzas.py","file_ext":"py","file_size_in_byte":8211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"390256749","text":"import panel as pn\nfrom nltk.stem import (PorterStemmer, SnowballStemmer)\nfrom nltk.tokenize import RegexpTokenizer\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n\nfrom app.test_train import trainer\n\nimport io\nimport pandas as pd\nimport param\n\n\nclass PreProcessor(param.Parameterized):\n \n # df will be the variable holding the dataframe of text\n df = param.DataFrame()\n # title to display for each tab\n name_of_page = param.String(default = 'Name of page')\n # dataframe to display.\n display_df = param.DataFrame(default = pd.DataFrame())\n # stopword_df is the dataframe containing the stopewords\n stopword_df = param.DataFrame(default = pd.DataFrame())\n \n stopwords = param.List(default = [])\n X = param.Array(default = None)\n \n ready = param.Boolean(\n default=False,\n doc='trigger for moving to the next page',\n ) \n \n def __init__(self, **params):\n super().__init__(**params)\n \n \n \n # button for the pre-processing page\n self.continue_button = pn.widgets.Button(name='Continue',\n width = 100,\n button_type='primary')\n\n self.continue_button.on_click(self.continue_ready)\n \n # load text widgets \n self.header_checkbox = pn.widgets.Checkbox(name='Header included in file')\n self.load_file = pn.widgets.FileInput()\n self.load_file.link(self.df, callbacks={'value': self.load_df})\n self.header_checkbox = pn.widgets.Checkbox(name='Header included in file')\n \n # tokenize widgets\n self.search_pattern_input = pn.widgets.TextInput(name='Search Pattern', value = '\\w+', width = 100)\n \n # remove stop words widgets\n self.load_words_button = pn.widgets.FileInput()\n self.load_words_button.link(self.stopwords, callbacks={'value': self.load_stopwords})\n \n # stem widgets\n self.stem_choice = pn.widgets.Select(name='Select', options=['Porter', 'Snowball'])\n \n # embedding widgets\n \n self.we_model = pn.widgets.Select(name='Select', options=['SKLearn Count Vectorizer'])\n\n \n @param.output('X', 'display_df')\n def output(self):\n return self.X, self.display_df\n \n \n @param.depends('display_df')\n def df_pane(self):\n return pn.WidgetBox(self.display_df,\n height = 300,\n width = 400)\n \n # load text page functions\n #-----------------------------------------------------------------------------------------------------\n def load_df(self, df, event):\n info = io.BytesIO(self.load_file.value)\n if self.header_checkbox.value==True:\n self.df = pd.read_csv(info)\n else:\n self.df = pd.read_csv(info, sep='\\n', header = None, names=['text'])\n \n self.display_df = self.df\n \n def load_text_page(self):\n helper_text = (\n \"This simple Sentiment Analysis NLP app will allow you to select a few different options \" +\n \"for some preprocessing steps to prepare your text for testing and training. \" +\n \"It will then allow you to choose a model to train, the percentage of data to \" +\n \"preserve for test, while the rest will be used to train the model. Finally, \" +\n \"some initial metrics will be displayed to determine how well the model did to predict \" +\n \"the testing results.\" +\n \" \" +\n \"Please choose a csv file that contains lines of text to analyze. This text should \" +\n \"have a text column as well as a sentiment column. If there is a header included in the file, \" +\n \"make sure to check the header checkbox.\"\n )\n return pn.Row(\n pn.Column(\n pn.pane.Markdown(f'##Load Text:'),\n pn.Column(\n helper_text,\n self.header_checkbox,\n self.load_file\n ),\n ),\n pn.Column(\n pn.Spacer(height=52),\n self.df_pane,\n \n )\n \n )\n\n #-----------------------------------------------------------------------------------------------------\n \n # tokenize page options\n #-----------------------------------------------------------------------------------------------------\n def tokenize_option_page(self):\n \n help_text = (\"Tokenization will break your text into a list of single articles \" +\n \"(ex. ['A', 'cat', 'walked', 'into', 'the', 'house', '.']). Specify a regular \" +\n \"expression (regex) search pattern to use for splitting the text.\")\n \n return pn.Column(\n pn.pane.Markdown(f'##Tokenize options:'),\n pn.WidgetBox(help_text, self.search_pattern_input,\n height = 300,\n width = 300\n \n )\n )\n \n #-----------------------------------------------------------------------------------------------------\n \n \n # remove stopwords page \n #-----------------------------------------------------------------------------------------------------\n \n def remove_stopwords_page(self):\n \n help_text = (\n \"Stop words are words that do not add any value to the sentiment of the text. \" +\n \"Removing them may improve your sentiment results. You may load a list of stop words \" +\n \"to exclude from your text.\"\n )\n return pn.Row(\n pn.Column(\n pn.pane.Markdown(f'##Load Stopwords:'),\n pn.WidgetBox(help_text, self.load_words_button,\n height = 300,\n width = 300\n \n )\n ),\n pn.Column(\n pn.Spacer(height=52),\n pn.WidgetBox(self.stopword_df,\n height = 300,\n width = 400)\n \n )\n )\n \n def load_stopwords(self, stopwords, event):\n info = io.BytesIO(self.load_words_button.value)\n self.stopwords = pd.read_pickle(info)\n self.stopword_df = pd.DataFrame({'stop words': self.stopwords})\n\n #-----------------------------------------------------------------------------------------------------\n \n # stemming page \n #-----------------------------------------------------------------------------------------------------\n \n def stemmer_page(self):\n help_text = (\n \"Stemming is a normalization step for the words in your text. Something that is \" +\n \"plural should probably still be clumped together with a singular version of a word, \" +\n \"for example. Stemming will basically remove the ends of words. Here you can choose \" + \n \"between a Porter Stemmer or Snowball Stemmer. Porter is a little less aggressive than \" +\n \"Snowball, however, Snowball is considered a slight improvement over Porter.\"\n )\n return pn.Column(\n pn.pane.Markdown(f'##Stemmer options:'),\n pn.WidgetBox(help_text, self.stem_choice,\n height = 300,\n width = 300)\n )\n \n #-----------------------------------------------------------------------------------------------------\n \n # embedding page \n #-----------------------------------------------------------------------------------------------------\n \n def word_embedding_page(self):\n \n help_text = (\"Embedding the process of turning words into numerical vectors. \" +\n \"There have been several algorithms developed to do this, however, currently in this \" +\n \"app, the sklearn count vectorizer is available. This algorithm will return a sparse \" +\n \"matrix represention of all the words in your text.\"\n )\n \n \n \n return pn.Column(\n pn.pane.Markdown(f'##Choose embedding model:'),\n pn.WidgetBox(help_text, self.we_model,\n height = 300,\n width = 300\n \n )\n \n )\n \n #-----------------------------------------------------------------------------------------------------\n \n def continue_ready(self, event):\n\n # Set up for tokenization\n tokenizer = RegexpTokenizer(self.search_pattern_input.value)\n\n # Set up for stemming\n if self.stem_choice.value == 'Porter':\n stemmer = PorterStemmer() \n else:\n stemmer = SnowballStemmer()\n\n # Set up for embedding\n if self.we_model.value == 'SKLearn Count Vectorizer':\n # Create a vectorizer instance\n vectorizer = CountVectorizer(max_features=1000)\n\n corpus = []\n #loop through each line of data\n for n in range(len(self.display_df)): \n sentence = self.display_df.iloc[n].text\n\n #1. Tokenize\n tokens = tokenizer.tokenize(sentence)\n\n #2. remove stop words\n tokens_no_sw = [word for word in tokens if not word in self.stopwords]\n\n #3. stem the remaining words\n stem_words = [stemmer.stem(x) for x in tokens_no_sw]\n\n #Join the words back together as one string and append this string to your corpus.\n corpus.append(' '.join(stem_words))\n\n X = vectorizer.fit_transform(corpus).toarray()\n labels = self.display_df['sentiment']\n\n xlist = []\n for n in range(len(X)):\n xlist.append(list(X[n]))\n self.X = X\n self.display_df = pd.DataFrame({'embeddings': xlist, 'sentiment': labels})\n \n self.ready = True\n \n def panel(self):\n \n return pn.Column(\n pn.Tabs(\n ('Load Text', self.load_text_page),\n ('Tokenize', self.tokenize_option_page),\n ('Remove Stopwords', self.remove_stopwords_page),\n ('Stem', self.stemmer_page),\n ('Embed', self.word_embedding_page)\n ),\n self.continue_button\n )\n \n","sub_path":"app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":10623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"303225685","text":"import re\nimport os\nfrom functools import reduce\nimport tkinter\nfrom tkinter import messagebox\nimport json\nfrom collections import defaultdict\nfrom operator import add\n\nimport numpy as np\nfrom scipy import ndimage as ndi\nimport h5py as h5\nimport matplotlib\nmatplotlib.use(\"Qt5Agg\")\nfrom matplotlib import pyplot as plt\n\n\n### SETTINGS\n\n# folder containing the raw files\nraw_folder = '/Users/david/Desktop'\n# file to save the sorting to\nout_file = '/Users/david/Desktop/sorting0.json'\n\n# at which level to sort\nupper_level = 'sted'\n\n# plotting color range\nscale_red = (0, 20)\nscale_green = (0, 30)\nscale = (scale_red, scale_green)\n\n### DEFS\n\ndef get_data(acquisition, configurations=(0,), channels=(0, 1)):\n res = []\n for c in configurations:\n for ch in channels:\n dta = np.squeeze(np.array(acquisition[str(c)][str(ch)]))\n res.append(dta)\n return tuple(res)\n\n\ndef levels_to_str(levels):\n return '_'.join(reduce(add, levels))\n\n\ndef plot_rgb(data1, data2, scales=((0, 255), (0,255)), project_fun=np.sum, smooth=False):\n\n f, axs = plt.subplots(3, 2)\n (ax00, ax01, ax10, ax11, ax20, ax21) = axs.ravel()\n\n if smooth:\n data1 = ndi.gaussian_filter(data1, 1.0)\n data2 = ndi.gaussian_filter(data2, 1.0)\n\n xy_rgb = np.dstack(\n [project_fun(data1, axis=0), project_fun(data2, axis=0), np.zeros_like(project_fun(data1, axis=0))])\n xy_rgb[:,:,0] = np.clip(xy_rgb[:,:,0], scales[0][0], scales[0][1])\n xy_rgb[:,:,1] = np.clip(xy_rgb[:,:,1], scales[1][0], scales[1][1])\n xy_rgb[:,:,0] = np.interp(xy_rgb[:,:,0], scales[0], (0, 255))\n xy_rgb[:,:,1] = np.interp(xy_rgb[:,:,1], scales[1], (0, 255))\n xy_rgb = xy_rgb.astype(np.uint8)\n ax00.imshow(xy_rgb)\n ax00.set_title('XY')\n\n xz_rgb = np.dstack(\n [project_fun(data1, axis=1), project_fun(data2, axis=1), np.zeros_like(project_fun(data1, axis=1))])\n xz_rgb[:,:,0] = np.clip(xz_rgb[:,:,0], scales[0][0], scales[0][1])\n xz_rgb[:,:,1] = np.clip(xz_rgb[:,:,1], scales[1][0], scales[1][1])\n xz_rgb[:,:,0] = np.interp(xz_rgb[:,:,0], scales[0], (0, 255))\n xz_rgb[:,:,1] = np.interp(xz_rgb[:,:,1], scales[1], (0, 255))\n xz_rgb = xz_rgb.astype(np.uint8)\n ax01.imshow(xz_rgb)\n ax01.set_title('XZ')\n\n '''\n yz_rgb = np.dstack(\n [project_fun(data1, axis=2), project_fun(data2, axis=2), np.zeros_like(project_fun(data1, axis=2))])\n yz_rgb = np.clip(yz_rgb, scales[0], scales[1]).astype(np.int)\n yz_rgb = np.interp(yz_rgb, scales, (0, 255)).astype(np.int)\n '''\n\n ax10.imshow(xy_rgb[:,:,0], cmap='gray')\n ax10.set_title('Channel 1')\n\n ax11.imshow(xz_rgb[:,:,0], cmap='gray')\n ax11.set_title('Channel 1')\n\n ax20.imshow(xy_rgb[:,:,1], cmap='gray')\n ax20.set_title('Channel 2')\n\n ax21.imshow(xz_rgb[:,:,1], cmap='gray')\n ax21.set_title('Channel 2')\n\n\n### MAIN\n\np = re.compile('_?(.+?)_?([0-9]+)')\nquality = defaultdict(dict)\n\nif os.path.exists(out_file):\n with open(out_file, 'r') as jfd:\n quality.update(json.load(jfd))\n\nroot = tkinter.Tk()\nroot.withdraw()\n\ntry:\n for file in [os.path.join(raw_folder, f) for f in os.listdir(raw_folder) if f.endswith('.h5')]:\n with h5.File(file, 'r') as fd:\n filename = file.rsplit(os.sep)[-1]\n ex = fd['experiment']\n for k in ex.keys():\n levels = p.findall(k)\n is_level = reduce(lambda o, n: o or n[0] == upper_level, levels, False)\n lvlstr = levels_to_str(levels)\n\n if lvlstr in quality[filename]:\n continue\n\n if is_level:\n\n data_c0, data_c1 = get_data(ex[k])\n\n plot_rgb(data_c0, data_c1, scales=scale, project_fun=np.max)\n\n plt.draw()\n plt.show(block=False)\n\n decision = messagebox.askyesnocancel(\"Good or bad?\")\n plt.close('all')\n\n if decision is None:\n raise InterruptedError()\n\n quality[filename][lvlstr] = decision\n\nexcept InterruptedError:\n print('Interrupted by user.')\nexcept Exception as e:\n print(e)\n\n# save file\nfinally:\n with open(out_file, 'w+') as jfd:\n json.dump(quality, jfd, indent=1)\n\nroot.destroy()\n","sub_path":"hdf_sorting.py","file_name":"hdf_sorting.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"436973077","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## TUGAS - 1\n\n# ### Nama : Hafizh Akbar Alam\n\n# ## Soal \n\n# ### Nomor 1\n\n# In[89]:\n\n\nNama = \"Hafizh Akbar Alam\"\nUmur = int(22)\nTinggi = float(183.0)\n\n\n# In[90]:\n\n\nprint(f'Nama Saya adalah {Nama} Umur saya {Umur} tahun , dan tinggi saya adalah {Tinggi} cm')\n\n\n# #### Nomor 2\n\n# In[92]:\n\n\nr = float(input('Masukkan jari-jari = '))\npi = 22/7\nLuas = float(pi*r*r)\nprint(f'Luas lingkaran dengan jari-jari {r} cm adalah {Luas} cm\\u00b2')\nprint('Luas lingkaran setelah dibulatkan adalah ' \" %.2f \" %Luas, 'cm\\u00b2')\n\n\n# #### Nomor 3\n\n# In[93]:\n\n\nNilai_Ujian_Teori = float(input('Masukkan Nilai Ujian Teori = '))\nNilai_Ujian_Praktek = float(input('Masukkan Nilai Ujian Praktek = '))\n\nif (Nilai_Ujian_Teori >= 70.0) and (Nilai_Ujian_Praktek >= 70.0):\n print('Selamat,Anda Lulus !')\nelif (Nilai_Ujian_Teori >= 70.0) and (Nilai_Ujian_Praktek < 70.0):\n print('Anda harus mengulang ujian praktek')\nelif (Nilai_Ujian_Teori < 70.0) and (Nilai_Ujian_Praktek >= 70.0):\n print('Anda harus mengulang ujian teori')\nelse:\n print('Anda harus mengulang ujian teori dan praktik')\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Tugas - 1.py","file_name":"Tugas - 1.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"650745320","text":"import numpy as np\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport pygmo as pg\r\nfrom sklearn.metrics import mean_absolute_error\r\n\r\n###################################################################################################\r\n\"\"\"\r\nInitial Recommendation list with C items for each user\r\n\"\"\"\r\nTargets=np.genfromtxt('.../TestML.csv',delimiter=',')\r\nTrainings=np.genfromtxt('.../TrainML.csv',delimiter=',')\r\nPredictions=np.genfromtxt('.../PredictionML.csv',delimiter=',')\r\nRating_Matrix=np.genfromtxt('.../RatingMatrix.csv',delimiter=',')\r\n\r\nC=50 #lenght of initial recommendation list for each user\r\n\r\nnonzero=[]\r\nfor i in range(len(Targets)):\r\n nonzero.append(np.count_nonzero(Targets[i]))\r\n\r\nPredictions_sort=np.argsort(-Predictions, axis=1)\r\nPredictions_sort_value = np.zeros((len(Rating_Matrix), len(Rating_Matrix.T)))\r\nfor i in range(len(Predictions)):\r\n Predictions_sort_value[i] = Predictions[i][Predictions_sort[i]]\r\n\r\nTargets_sort_value = np.zeros((len(Rating_Matrix), len(Rating_Matrix.T)))\r\nfor i in range(len(Targets)):\r\n Targets_sort_value[i] = Targets[i][Predictions_sort[i]]\r\n\r\nfor i in range(len(Predictions_sort)):\r\n for j in range(len(Predictions_sort.T)):\r\n if Predictions_sort_value[i][j]==0:\r\n Predictions_sort[i][j]=-1 #no item to be recommended when reach -1\r\n\r\nrecommend=np.zeros((len(Rating_Matrix), C))\r\nfor i in range(C):\r\n recommend[:, ][:, i]=Predictions_sort[:, ][:, i]\r\n\r\n\r\n#Predictions[0][int(recommend[0][0])] #the predictions of those items\r\n###################################################################################################\r\n\"\"\"\r\nUnpopularity:\r\nthrough mean and variance T.Jambor(2010)\r\n\"\"\"\r\nmean = np.true_divide(Trainings.sum(0),(Trainings!=0).sum(0))\r\n\r\nc2 = Trainings[:]\r\nc2 = c2.astype('float')\r\nc2[c2 == 0]=np.NaN\r\nVar = np.nanvar(c2,axis=0)\r\n\r\nUnPop = 1/(mean*(Var+1)**2)\r\n###################################################################################################\r\n\"\"\"\r\nItem Provider binary matrix\r\n\"\"\"\r\nItemProvider = pd.read_csv('.../prov_ML1M.csv',delimiter=',',header = None,encoding='latin-1') # through IMDBpython.py\r\nprov = np.array(ItemProvider)\r\n###################################################################################################\r\n\"\"\"\r\nMOEA\r\n\"\"\"\r\n###\r\ndef selection(pop): # k: lenght of list\r\n pop1 = list(filter(lambda x: x != -1, pop)) # pop -1 s\r\n parent1 = random.sample(pop1, k)\r\n parent2 = random.sample(pop1, k)\r\n return parent1 ,parent2\r\n###\r\ndef fitness(Predictions_row, p1, p2, Targets_row):\r\n\r\n Sigma_pred1 = Predictions_row[np.array(p1).astype(int)].mean()\r\n Sigma_pred2 = Predictions_row[np.array(p2).astype(int)].mean()\r\n Sigma_Target1 = Targets_row[np.array(p1).astype(int)].mean()\r\n Sigma_Target2 = Targets_row[np.array(p2).astype(int)].mean()\r\n Sigma_unpop1 = UnPop[np.array(p1).astype(int)].mean()\r\n Sigma_unpop2 = UnPop[np.array(p2).astype(int)].mean()\r\n\r\n providerNum = 8\r\n pro1 = prov[np.array(p1).astype(int)]\r\n pro2 = prov[np.array(p2).astype(int)]\r\n a=[]\r\n b=[]\r\n for j in range(len(prov.T)):\r\n a.append(pro1[0][j] | pro1[1][j] | pro1[2][j] | pro1[3][j] | pro1[4][j])\r\n b.append(pro2[0][j] | pro2[1][j] | pro2[2][j] | pro2[3][j] | pro2[4][j])\r\n # print(\"p_coverage for p1= \",sum(a)/len(prov.T))\r\n p1_cov = sum(a)\r\n p2_cov = sum(b)\r\n if abs(p1_cov - p2_cov) <= 3:\r\n if (Sigma_pred1 >= Sigma_pred2 and Sigma_unpop1 >= Sigma_unpop2) or (\r\n Sigma_pred1 > Sigma_pred2 and Sigma_unpop1 < Sigma_unpop2):\r\n Sigma_pred = Sigma_pred1\r\n Sigma_Target = Sigma_Target1\r\n Sigma_unpop = Sigma_unpop1\r\n sigma_pFair = p1_cov\r\n p = p1\r\n elif (Sigma_pred2 >= Sigma_pred1 and Sigma_unpop2 >= Sigma_unpop1) or (\r\n Sigma_pred1 < Sigma_pred2 and Sigma_unpop1 > Sigma_unpop2):\r\n Sigma_pred = Sigma_pred2\r\n Sigma_Target = Sigma_Target2\r\n Sigma_unpop = Sigma_unpop2\r\n sigma_pFair = p2_cov\r\n p = p2\r\n elif (p1_cov - p2_cov) >= 4:\r\n Sigma_pred = Sigma_pred1\r\n Sigma_Target = Sigma_Target1\r\n Sigma_unpop = Sigma_unpop1\r\n sigma_pFair = p1_cov\r\n p = p1\r\n elif (p2_cov - p1_cov) >= 4:\r\n Sigma_pred = Sigma_pred2\r\n Sigma_Target = Sigma_Target2\r\n Sigma_unpop = Sigma_unpop2\r\n sigma_pFair = p2_cov\r\n p = p2\r\n\r\n\r\n return Sigma_pred, Sigma_unpop, sigma_pFair, p, Sigma_Target\r\n###\r\ndef crossover(p1, p2):\r\n k=5 # lenght of list\r\n off1 = []\r\n off2 = []\r\n pointer = np.random.randint(1,k)\r\n off1 = p1[:pointer] + p2[pointer:]\r\n off2 = p2[:pointer] + p1[pointer:]\r\n\r\n # eliminate duplicates\r\n while True:\r\n if len(off1) != len(set(off1)):\r\n dupes = [x for n, x in enumerate(off1) if x in off1[:n]] # duplicate elements\r\n # print(dupes)\r\n for h in range(len(dupes)): # the indexes of duplicates\r\n index = [i for i, x in enumerate(off1) if x == dupes[h]]\r\n for g in range(len(index)):\r\n if off1[index[g]] != p1[index[g]]:\r\n off1[index[g]] = p1[index[g]]\r\n dupes = [x for n, x in enumerate(off1) if x in off1[:n]]\r\n if dupes == []:\r\n break\r\n\r\n while True:\r\n if len(off2) != len(set(off2)):\r\n dupes = [x for n, x in enumerate(off2) if x in off2[:n]] # duplicate elements\r\n # print(dupes)\r\n for h in range(len(dupes)): # the indexes of duplicates\r\n index = [i for i, x in enumerate(off2) if x == dupes[h]]\r\n for g in range(len(index)):\r\n if off2[index[g]] != p2[index[g]]:\r\n off2[index[g]] = p2[index[g]]\r\n dupes = [x for n, x in enumerate(off2) if x in off2[:n]]\r\n if dupes == []:\r\n break\r\n\r\n return off1, off2\r\n###\r\ndef mutation(pop, off1, off2, p1, p2):\r\n k = 5 # lenght of list\r\n pop1 = list(filter(lambda x: x != -1, pop)) # remove -1 s\r\n pointer = np.random.randint(0, k)\r\n while True:\r\n a = random.sample(list(pop1), 1)[0]\r\n if (a not in p1) and (a not in p2):\r\n off1[pointer] = a\r\n break\r\n\r\n pointer = np.random.randint(0, k)\r\n while True:\r\n a = random.sample(list(pop1), 1)[0]\r\n if (a not in p1) and (a not in p2):\r\n off2[pointer] = a\r\n break\r\n\r\n return off1, off2\r\n###################################################################################################\r\n\"\"\"\r\nmain\r\n\"\"\"\r\nk=5 # length of list\r\ngens = 30 # number of generations\r\nNP = 80 #size of population\r\nproviderNum = 8 # or len(prov.T) : number of providers (movie companies)\r\n\r\nSigma_pred = np.zeros((NP,len(recommend)))\r\nSigma_Target = np.zeros((NP,len(recommend)))\r\nSigma_unpop = np.zeros((NP,len(recommend)))\r\nSigma_pFair = np.zeros((NP,len(recommend)))\r\nlists=[]\r\n\r\n\r\nfor y in range(NP):\r\n print(y)\r\n for i in range(len(recommend)):\r\n #print(i)\r\n ui = recommend[i]\r\n if np.count_nonzero(ui + 1) >= 11: # ui+1 : because of -1 s\r\n pp = selection(ui)\r\n p1 = pp[0]\r\n p2 = pp[1]\r\n # fitness(Predictions[i], p1, p2)\r\n f = fitness(Predictions[i], p1, p2, Targets[i])\r\n Sigma_pred_max = f[0]\r\n Sigma_unpop_max = f[1]\r\n Sigma_pFair_max = f[2]\r\n lists_max = f[3]\r\n Sigma_Target_max = f[4]\r\n\r\n for j in range(gens):\r\n #print(j)\r\n # crossover (two offsprings)\r\n c = crossover(p1, p2)\r\n off1 = c[0]\r\n off2 = c[1]\r\n # mutation (two offsprings)\r\n m = mutation(ui, off1, off2, p1, p2)\r\n off1 = m[0]\r\n off2 = m[1]\r\n s = fitness(Predictions[i], off1, off2, Targets[i])\r\n if (s[2]-Sigma_pFair_max) >= 3:\r\n Sigma_pred_max = s[0]\r\n Sigma_unpop_max = s[1]\r\n Sigma_pFair_max = s[2]\r\n lists_max = s[3]\r\n elif (s[0] >= Sigma_pred_max and s[1] >= Sigma_unpop_max) or (s[0] > Sigma_pred_max and s[1] < Sigma_unpop_max):\r\n Sigma_pred_max = s[0]\r\n Sigma_unpop_max = s[1]\r\n Sigma_pFair_max = s[2]\r\n lists_max = s[3]\r\n # print(\"Sigma_pred_max: {}\".format(Sigma_pred_max), \" Sigma_unpop_max: {}\".format(Sigma_unpop_max), \" lists_max: {}\".format(lists_max))\r\n p1 = off1\r\n p2 = off2\r\n Sigma_pred[y][i] = Sigma_pred_max\r\n Sigma_unpop[y][i] = Sigma_unpop_max\r\n Sigma_Target[y][i] = Sigma_Target_max\r\n Sigma_pFair[y][i] = Sigma_pFair_max\r\n lists.extend(lists_max)\r\n\r\n elif np.count_nonzero(ui + 1) <= k:\r\n lists_max = ui[np.nonzero(ui+1)]\r\n Sigma_pred[y][i] = Predictions[i][lists_max.astype(int)].mean()\r\n Sigma_Target[y][i] = Targets[i][lists_max.astype(int)].mean()\r\n Sigma_unpop[y][i] = UnPop[lists_max.astype(int)].mean()\r\n\r\n pro = prov[lists_max.astype(int)]\r\n a = []\r\n for j in range(len(prov.T)):\r\n kk=np.count_nonzero(ui + 1)\r\n if kk==5:\r\n a.append(pro[0][j] | pro[1][j] | pro[2][j] | pro[3][j] | pro[4][j])\r\n elif kk==4:\r\n a.append(pro[0][j] | pro[1][j] | pro[2][j] | pro[3][j])\r\n elif kk==3:\r\n a.append(pro[0][j] | pro[1][j] | pro[2][j])\r\n elif kk==2:\r\n a.append(pro[0][j] | pro[1][j])\r\n elif kk==1:\r\n a.append(pro[0][j])\r\n elif kk==0:\r\n a.append(0)\r\n p_cov = sum(a)\r\n Sigma_pFair[y][i] = p_cov\r\n\r\n lists.extend(lists_max)\r\n zeroo = k-len(lists_max)\r\n lists.extend([-1] * zeroo)\r\n\r\n else:\r\n lists_max = ui[:k]\r\n Sigma_pred[y][i] = Predictions[i][lists_max.astype(int)].mean()\r\n Sigma_Target[y][i] = Targets[i][lists_max.astype(int)].mean()\r\n Sigma_unpop[y][i] = UnPop[lists_max.astype(int)].mean()\r\n\r\n pro = prov[lists_max.astype(int)]\r\n a = []\r\n for j in range(len(prov.T)):\r\n a.append(pro[0][j] | pro[1][j] | pro[2][j] | pro[3][j] | pro[4][j])\r\n p_cov = sum(a)\r\n Sigma_pFair[y][i] = p_cov\r\n\r\n lists.extend(lists_max)\r\n\r\nlistss=np.array(lists).reshape(NP,len(recommend)*k)\r\n\r\nSigma_pred[np.isnan(Sigma_pred)] = 0\r\nSigma_unpop[np.isnan(Sigma_unpop)] = 0\r\nSigma_Target[np.isnan(Sigma_Target)] = 0\r\nSigma_pFair[np.isnan(Sigma_pFair)] = 0\r\n\r\n#show the generated lists for user 0\r\nuser_index=0\r\nli = listss[:,user_index*k:(user_index*k)+k]\r\nlis = list(filter(lambda x: x != -1, np.unique(li)))\r\n#print(\"lists generated by GA for user {} is \\n{}: \".format(user_index, li))\r\n#print(\"and {} unique items are: {}\".format(len(lis),lis))\r\n\r\n#####################################################################################################\r\n\"\"\"\r\nPareto Front\r\n\"\"\"\r\ndef identify_pareto(points):\r\n # Count number of items\r\n population_size = points.shape[0]\r\n # Create a NumPy index for scores on the pareto front (zero indexed)\r\n population_ids = np.arange(population_size)\r\n # Create a starting list of items on the Pareto front\r\n # All items start off as being labelled as on the Parteo front\r\n pareto_front = np.ones(population_size, dtype=bool)\r\n # Loop through each item. This will then be compared with all other items\r\n for i in range(population_size):\r\n # Loop through all other items\r\n for j in range(population_size):\r\n # Check if our 'i' pint is dominated by out 'j' point\r\n if all(points[j] >= points[i]) and any(points[j] > points[i]):\r\n # j dominates i. Label 'i' point as not on Pareto front\r\n pareto_front[i] = 0\r\n # Stop further comparisons with 'i' (no more comparisons needed)\r\n break\r\n # Return ids of scenarios on pareto front\r\n return population_ids[pareto_front]\r\n\r\nPF_total=[]\r\nfor pf in range(len(Sigma_pred.T)): #users\r\n r = Sigma_pred[:,][:,pf]\r\n un = Sigma_unpop[:,][:,pf]\r\n fair = Sigma_pFair[:, ][:, pf]\r\n\r\n PF = []\r\n points = []\r\n for ii in range(len(r)): #NPs\r\n points.append([-r[ii], -un[ii], -fair[ii]])\r\n if len(np.unique(points))== 2:\r\n PF.append(np.array([0]))\r\n else:\r\n #pareto = identify_pareto(-np.array(points))\r\n #PF.append(-np.array(points)[pareto])\r\n PF.append(identify_pareto(-np.array(points)))\r\n PF_total.append([PF])\r\n\r\n# plot PFs for user 0\r\npf=0\r\n\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport matplotlib.pyplot as plt\r\n\r\nn = list(np.arange(1, NP + 1))\r\nfig = plt.figure()\r\nax = fig.add_subplot(111, projection='3d')\r\nr = Sigma_pred[:, ][:, pf]\r\nun = Sigma_unpop[:, ][:, pf]\r\nfair = Sigma_pFair[:, ][:, pf]\r\nprint(\"PFs for user {} is \\n{}: \".format(pf, PF_total[pf][0][0]))\r\n\r\nax.scatter(r, un, fair, c='r', marker='o')\r\nfor i, txt in enumerate(n):\r\n ax.annotate(txt, (r[i], un[i]))\r\n\r\nax.scatter(r[PF_total[pf][0][0]], un[PF_total[pf][0][0]], fair[PF_total[pf][0][0]], label='PF', s=100, marker=(5, 1))\r\n#ax.plot(r[PF_total[pf][0][0]], un[PF_total[pf][0][0]], fair[PF_total[pf][0][0]], color='r')\r\nax.set_xlabel('prediction average')\r\nax.set_ylabel('unpopularity average')\r\nax.set_zlabel('provider coverage')\r\nplt.legend()\r\nplt.show()\r\n#####################################################################################################\r\n\r\n\"\"\"\r\nevaluation for CF model : accuracy\r\n\"\"\"\r\nCF=recommend[:,:k]\r\nmae = []\r\nfor i in range(len(recommend)):\r\n mae.append(mean_absolute_error(Predictions[i][CF[i].astype(int)], Targets[i][CF[i].astype(int)]))\r\nMAE_CF = np.array(mae).mean()\r\nprint(\"MAE for CF (1 top-k list for each user wrt accuracy only): \",MAE_CF)\r\n\r\n#####################################################################################################\r\n\"\"\"\r\nevaluation for MOEA model :accuracy\r\n\"\"\"\r\n#show the generated lists for user 0\r\nuser_index=0\r\nli = listss[:,user_index*k:(user_index*k)+k]\r\nlis = list(filter(lambda x: x != -1, np.unique(li)))\r\nprint(\"PF lists generated for user {} is \\n{}: \".format(user_index, li[PF_total[user_index][0][0]]))\r\n\r\nmae1 = []\r\nfor i in range(len(Predictions)):\r\n user_index = i\r\n li = listss[:, user_index * k:(user_index * k) + k]\r\n a = Predictions[i][li[PF_total[i][0][0]].astype(int)]\r\n b = Targets[i][li[PF_total[i][0][0]].astype(int)]\r\n mae1.append(mean_absolute_error(a,b))\r\nMAE_MOEA_avg = np.array(mae1).mean()\r\nprint(\"MAE for MOEA lists (average): \",MAE_MOEA_avg)\r\n\r\nmae_min = []\r\nfor i in range(len(Predictions)):\r\n user_index = i\r\n li = listss[:, user_index * k:(user_index * k) + k]\r\n a = Predictions[i][li[PF_total[i][0][0]].astype(int)]\r\n b = Targets[i][li[PF_total[i][0][0]].astype(int)]\r\n mae_eachuser = []\r\n for j in range(len(PF_total[user_index][0][0])):\r\n mae_eachuser.append(mean_absolute_error(a[j], b[j]))\r\n mae_min.append(min(mae_eachuser))\r\nMAE_MOEA_min = np.array(mae_min).mean()\r\nprint(\"MAE for MOEA lists (Minimum): \",MAE_MOEA_min)\r\n\r\nmae_max = []\r\nfor i in range(len(Predictions)):\r\n user_index = i\r\n li = listss[:, user_index * k:(user_index * k) + k]\r\n a = Predictions[i][li[PF_total[i][0][0]].astype(int)]\r\n b = Targets[i][li[PF_total[i][0][0]].astype(int)]\r\n mae_eachuser = []\r\n for j in range(len(PF_total[user_index][0][0])):\r\n mae_eachuser.append(mean_absolute_error(a[j], b[j]))\r\n mae_max.append(max(mae_eachuser))\r\nMAE_MOEA_max = np.array(mae_max).mean()\r\nprint(\"MAE for MOEA lists (Maximum): \",MAE_MOEA_max)\r\n\r\n#####################################################################################################\r\n\"\"\"\r\nevaluation for PF provider coverage\r\n\"\"\"\r\nproviderNum=8\r\n\r\np_covv=[]\r\nfor i in range(len(PF_total)):\r\n li = listss[:, i * k:(i * k) + k]\r\n #lis = np.array(list(filter(lambda x: x != -1, np.unique(li))))\r\n ll = []\r\n for j in range(len(PF_total[i][0][0])):\r\n ll=list(li[PF_total[i][0][0]][0])\r\n ll.extend(list(li[PF_total[i][0][0]][j]))\r\n lll=list(filter(lambda x: x != -1, np.unique(ll)))\r\n proo = prov[np.array(lll).astype(int)]\r\n proo_df = pd.DataFrame(proo, columns=('p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8'))\r\n p_covv.append(np.count_nonzero(proo_df.sum(axis=0)))\r\n\r\nprint(\"average of PF provider coverage for all users\", (np.array(p_covv)/providerNum *100).mean())\r\n#####################################################################################################\r\n\"\"\"\r\nevaluation for CF provider coverage\r\n\"\"\"\r\n\r\np_covv_cf=[]\r\nfor i in range(len(PF_total)):\r\n cf=recommend[i][:k]\r\n lll=list(filter(lambda x: x != -1, np.unique(cf)))\r\n proo = prov[np.array(lll).astype(int)]\r\n proo_df = pd.DataFrame(proo, columns=('p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8'))\r\n p_covv_cf.append(np.count_nonzero(proo_df.sum(axis=0)))\r\n\r\nprint(\"average of CF provider coverage for all users\", (np.array(p_covv_cf)/providerNum *100).mean())\r\n#####################################################################################################\r\n\"\"\"\r\nevaluation for CF long tail coverage\r\n\"\"\"\r\nper=0.7\r\nLT_cov_CF=[]\r\nfor i in range(len(PF_total)):\r\n cf = recommend[i][:k]\r\n lll = list(filter(lambda x: x != -1, np.unique(cf)))\r\n unpop = UnPop[np.array(lll).astype(int)]\r\n a = [i for i, x in enumerate(unpop >= np.quantile(UnPop, per)) if x]\r\n LT_cov_CF.extend(np.array(lll)[a])\r\n\r\ncount=0\r\nfor i in range(len(UnPop)):\r\n if UnPop[i]>=np.quantile(UnPop,per):\r\n count+=1\r\n\r\nprint(\"average of CF Long Tail coverage for all users\", len(np.unique(np.array(LT_cov_CF)))/count)\r\n\r\n#####################################################################################################\r\n\"\"\"\r\nevaluation for PF long tail coverage\r\n\"\"\"\r\nLT_cov=[]\r\nfor i in range(len(PF_total)):\r\n li = listss[:, i * k:(i * k) + k]\r\n ll = []\r\n for j in range(len(PF_total[i][0][0])):\r\n ll=list(li[PF_total[i][0][0]][0])\r\n ll.extend(list(li[PF_total[i][0][0]][j]))\r\n lll=list(filter(lambda x: x != -1, np.unique(ll)))\r\n unpop = UnPop[np.array(lll).astype(int)]\r\n a = [i for i, x in enumerate(unpop >= np.quantile(UnPop, per)) if x]\r\n LT_cov.extend(np.array(lll)[a])\r\n\r\ncount=0\r\nfor i in range(len(UnPop)):\r\n if UnPop[i]>=np.quantile(UnPop,per):\r\n count+=1\r\n\r\nprint(\"average of PF Long Tail coverage for all users\", len(np.unique(np.array(LT_cov)))/count)\r\n\r\n","sub_path":"GA_RS(3-obj).py","file_name":"GA_RS(3-obj).py","file_ext":"py","file_size_in_byte":18847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"54160823","text":"from __future__ import absolute_import, division\nfrom barvikron import *\nfrom six.moves import range\nimport itertools\nimport pytest\n\n\ndef sturmfels_expected(b):\n \"\"\"\n Closed formula from Sturmfels (1995).\n \"\"\"\n u, v, w = sorted(b, reverse=True)\n\n if (u + v + w) % 2 == 1:\n return 0\n\n if u >= v + w:\n psi = v * w / 2 + v * w ** 2 / 8 - w ** 3 / 24\n\n if u % 2 == 0 and v % 2 == 0:\n psi += 1 + v / 2 + 2 * w / 3\n elif u % 2 == 1 and v % 2 == 1:\n psi += 1 / 2 + v / 2 + 5 * w / 12\n else:\n psi += 1 / 2 + 3 * v / 8 + 13 * w / 24\n else:\n psi = (\n -u ** 2 / 8\n - v ** 2 / 8\n - w ** 2 / 8\n + u * v / 4\n + u * w / 4\n + v * w / 4\n + u ** 3 / 48\n - u ** 2 * v / 16\n - u ** 2 * w / 16\n + u * v ** 2 / 16\n + u * v * w / 8\n + u * w ** 2 / 16\n - v ** 3 / 48\n - v ** 2 * w / 16\n + v * w ** 2 / 16\n - w ** 3 / 16\n )\n if u % 2 == 0 and v % 2 == 0:\n psi += 1 + u / 6 + v / 3 + w / 2\n elif u % 2 == 1 and v % 2 == 1:\n psi += 1 / 2 + u / 6 + v / 3 + w / 4\n else:\n psi += 1 / 2 + u / 6 + 5 * v / 24 + 3 * w / 8\n return int(round(psi))\n\n\ndef test_construction():\n vpn = VectorPartitionFunction([[1, 0], [0, 1], [1, 1]])\n assert vpn.A.shape == (3, 2)\n\n\n@pytest.mark.parametrize(\"b\", itertools.product(range(1, 5), repeat=3))\ndef test_sturmfels_evaluation(b, evaluator):\n sturmfels_vpn = VectorPartitionFunction(\n [[2, 1, 1, 0, 0, 0], [0, 1, 0, 2, 1, 0], [0, 0, 1, 0, 1, 2]]\n )\n\n # compare evaluation for all b in {1,...,4}^3\n got = sturmfels_vpn.eval(b, evaluator)\n expected = sturmfels_expected(b)\n assert got == expected\n","sub_path":"tests/test_parfun.py","file_name":"test_parfun.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"259354201","text":"import os, json, requests, sys\nimport parser_helper\nimport config\nfrom requests.auth import HTTPBasicAuth\n\nargs = sys.argv\nbuilder = None\ncommit_id = None\n\n# Get build info arguments\nif (len(args) == 3):\n\tbuilder = args[1]\n\tcommit_id = args[2]\t\n\n# Gets output template. This will include build information if it is given\njson_tests = parser_helper.getJsonTemplate(builder, commit_id)\n\nos.chdir(config.RESULTS_PATH)\n\n# Step through the directory with the output json files\nfor root, dirs, files in os.walk('.'):\n\tfor jsonFile in files:\n\t\ttest = parser_helper.openJSON(jsonFile)\n\t\tjson_tests[\"tests\"].append(test)\n\t\tos.unlink(os.path.join(root, jsonFile)) \n\n# Get username and pw for authentication\njson_data = open(config.PATH,'r')\ndata = json.load(json_data)\njson_data.close()\n\nheaders = {'content-type': 'application/json'}\nurl = config.URL\n\n\n# Send post request\nr = requests.post(url, data=json.dumps(json_tests), headers=headers, \n\tauth=HTTPBasicAuth(data['username'], data['pw']))\n\n# Print the status\nprint(\"Request status = \" + str(r.status_code))\nprint(r.text)\n","sub_path":"parser/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"164583952","text":"# vim: set et sw=4 sts=4 fileencoding=utf-8:\n#\n# Python camera library for the Rasperry-Pi camera module\n# Copyright (c) 2013-2017 Dave Jones \n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the copyright holder nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import (\n unicode_literals,\n print_function,\n division,\n absolute_import,\n )\n\n# Make Py2's str and range equivalent to Py3's\nstr = type('')\n\nfrom pymp4.parser import Box, UNITY_MATRIX\nfrom construct import Container\nfrom collections import namedtuple\nfrom io import BytesIO\nimport struct\n\n\nNAL_TYPE_SPS = 7 # NAL type for a sequence parameter set\nNAL_TYPE_PPS = 8 # NAL type for a picture parameter set\n\n\nSPSIndications = namedtuple('SPSIndications', ('profile', 'compatibility', 'level'))\nNALSizePatch = namedtuple('NALSizePatch', ('offset_in_mdat', 'size_to_write'))\n\nDEFAULT_SPS_INDICATIONS = SPSIndications(profile=100, compatibility=0, level=40)\n\ndef nal_get_unit_type(nal_data):\n # The first NAL byte has this structure:\n # 1 forbidden bit (0)\n # 2 bits for nal ref idc\n # 5 bits for nal type\n return nal_data[0] & ((1 << 5) - 1)\n\n\ndef sps_get_indications(nal_data):\n assert(nal_get_unit_type(nal_data) == NAL_TYPE_SPS)\n # After the nal type, follows the profile_idc, the \"constraint set\", aka\n # the compatibility byte, followed by level_idc\n return SPSIndications(profile=nal_data[1], compatibility=nal_data[2], level=nal_data[3])\n\n\nSTATIC_FTYP = Box.build(\n Container(type=b'ftyp')(\n major_brand=b'isom')(\n minor_version=0x200)(\n compatible_brands=[b'isom', b'iso2', b'avc1', b'mp41']))\n\n\nSTATIC_EMPTY_MDAT = Box.build(Container(type=b'mdat')(data=b''))\n\n\n\nclass MP4Muxer(object):\n def __init__(self):\n super(MP4Muxer, self).__init__()\n self.indications = None\n self.pic_parm_sets = set()\n self.seq_parm_sets = set()\n self.current_mdat_size = 0\n\n self._sample_sizes = []\n self._nal_size_patches = []\n self._sps_header_buffer = BytesIO()\n self._last_frame_was_sps = False\n self.current_frame_size = 0\n\n def _write(self, data):\n pass\n\n def _seek(self, offset):\n pass\n\n\n def begin(self):\n self._output_mp4_header()\n\n\n def end(self, framerate, resolution):\n self._output_mp4_footer(framerate, resolution)\n self._patch_mdat_size()\n self._patch_nal_sizes()\n\n\n @property\n def mdat_offset(self):\n return len(STATIC_FTYP)\n\n\n @property\n def mdat_payload_offset(self):\n return self.mdat_offset + len(STATIC_EMPTY_MDAT)\n\n\n @property\n def nal_prefix(self):\n return b'\\x00\\x00\\x00\\x01'\n\n\n def _flush_sps_header_buffer(self, length):\n self._sps_header_buffer.seek(0)\n retval = self._sps_header_buffer.read(length)\n self._sps_header_buffer.seek(0)\n return retval\n\n\n def _record_frame_size(self, frame_size, frame_is_sps_header):\n # SPS headers gets a special treatment and they're considered part\n # of the next frame, as far as MP4 is concerned\n if self._last_frame_was_sps:\n self._sample_sizes[-1] += frame_size\n else:\n self._sample_sizes.append(frame_size)\n if not frame_is_sps_header:\n # SPS headers are already patched by process_sps_header. Normal\n # frames are not cached, so we can't patch their size in advance.\n # So we store the patch information and we fix up the stream\n # afterwards, to avoid seeking now.\n self._nal_size_patches.append(NALSizePatch(\n offset_in_mdat=self.current_mdat_size,\n size_to_write=frame_size - len(self.nal_prefix)))\n # We subtract the len of the NAL prefix because that is not part of the\n # length field in this case.\n # Finally, increment the total mdat size and update the internal flag\n self.current_mdat_size += frame_size\n self._last_frame_was_sps = frame_is_sps_header\n\n\n def _process_sps_header(self, sps_header_data):\n assert(sps_header_data.startswith(self.nal_prefix))\n # We remove the first element because it's going to be empty,\n # since the header starts with the NAL prefix\n nal_units = sps_header_data.split(self.nal_prefix)[1:]\n for nal_unit in nal_units:\n # Write to the stream the length of the nal unit. If in the native\n # architecture 'I' is more than a 32 bit integer, truncate\n self._write(struct.pack('>I', len(nal_unit))[-len(self.nal_prefix):])\n self._write(nal_unit)\n # Special treatment for SPS and PPS\n nal_type = nal_get_unit_type(nal_unit)\n if nal_type == NAL_TYPE_PPS:\n self.pic_parm_sets.add(nal_unit)\n elif nal_type == NAL_TYPE_SPS:\n self.seq_parm_sets.add(nal_unit)\n # For SPS, also extract the indications if needed\n if self.indications is None:\n self.indications = sps_get_indications(nal_unit)\n\n\n def _output_mp4_header(self):\n self._write(STATIC_FTYP)\n self._write(STATIC_EMPTY_MDAT)\n\n\n def _patch_mdat_size(self):\n # Move to the position where the mdat size was\n self._seek(self.mdat_offset)\n # Write the actual mdat size as big endian 32 bit integer\n self._write(struct.pack('>I',\n self.current_mdat_size + len(STATIC_EMPTY_MDAT))[-4:]\n )\n\n\n def _patch_nal_sizes(self):\n for offset_in_mdat, size_to_write in self._nal_size_patches:\n self._seek(self.mdat_payload_offset + offset_in_mdat)\n # Write the actual mdat size as big endian 32 bit integer\n self._write(struct.pack('>I', size_to_write)[-len(self.nal_prefix):])\n\n\n def append(self, data, frame_is_sps_header, frame_is_complete):\n self.current_frame_size += len(data)\n\n # Sps gets special treatment because it goes first in a buffer\n if frame_is_sps_header:\n self._sps_header_buffer.write(data)\n if frame_is_complete:\n # Flush the SPS header and reset the buffer\n self._process_sps_header(self._flush_sps_header_buffer(self.current_frame_size))\n else:\n # Direct to output\n self._write(data)\n\n if frame_is_complete:\n # Store the size for this sample and reset the current one\n self._record_frame_size(self.current_frame_size, frame_is_sps_header)\n self.current_frame_size = 0\n\n def _output_mp4_footer(self, framerate, resolution):\n # Extact all the variables used after in the construction of the boxes\n sample_count = len(self._sample_sizes)\n timescale = framerate.numerator\n sample_delta = framerate.denominator\n duration = sample_count * sample_delta\n chunk_offset = self.mdat_payload_offset\n width = resolution[0]\n height = resolution[1]\n profile, compatibility, level = DEFAULT_SPS_INDICATIONS if self.indications is None else self.indications\n sample_sizes = self._sample_sizes\n sps = list(self.seq_parm_sets)\n pps = list(self.pic_parm_sets)\n\n # Build all the boxes we need\n HDLR = Container(type=b'hdlr')\n HDLR(version=0)\n HDLR(flags=0)\n HDLR(handler_type=b'vide')\n HDLR(name='VideoHandler')\n\n MDHD = Container(type=b'mdhd')\n MDHD(version=0)\n MDHD(flags=0)\n MDHD(creation_time=0)\n MDHD(modification_time=0)\n MDHD(timescale=timescale)\n MDHD(duration=duration)\n MDHD(language='und')\n\n URL_ = Container(type=b'url ')\n URL_(version=0)\n URL_(flags=Container(self_contained=True))\n URL_(location=None)\n\n DREF = Container(type=b'dref')\n DREF(version=0)\n DREF(flags=0)\n DREF(data_entries=[URL_])\n\n DINF = Container(type=b'dinf')\n DINF(children=[DREF])\n\n STTS = Container(type=b'stts')\n STTS(version=0)\n STTS(flags=0)\n STTS(entries=[Container(sample_count=sample_count)(sample_delta=sample_delta)])\n\n AVCC = Container(type=b'avcC')\n AVCC(version=1)\n AVCC(profile=profile)\n AVCC(compatibility=compatibility)\n AVCC(level=level)\n AVCC(nal_unit_length_field=3)\n AVCC(sps=sps)\n AVCC(pps=pps)\n\n AVC1 = Container(format=b'avc1')\n AVC1(data_reference_index=1)\n AVC1(version=0)\n AVC1(revision=0)\n AVC1(vendor=b'')\n AVC1(temporal_quality=0)\n AVC1(spatial_quality=0)\n AVC1(width=width)\n AVC1(height=height)\n AVC1(horizontal_resolution=72)\n AVC1(vertical_resolution=72)\n AVC1(data_size=0)\n AVC1(frame_count=1)\n AVC1(compressor_name=b'')\n AVC1(depth=24)\n AVC1(color_table_id=-1)\n AVC1(avc_data=AVCC)\n\n STSD = Container(type=b'stsd')\n STSD(version=0)\n STSD(flags=0)\n STSD(entries=[AVC1])\n\n STSC = Container(type=b'stsc')\n STSC(version=0)\n STSC(flags=0)\n STSC(entries=[Container(first_chunk=1)(samples_per_chunk=sample_count)(sample_description_index=1)])\n\n STCO = Container(type=b'stco')\n STCO(version=0)\n STCO(flags=0)\n STCO(entries=[Container(chunk_offset=chunk_offset)])\n\n STSZ = Container(type=b'stsz')\n STSZ(version=0)\n STSZ(flags=0)\n STSZ(sample_size=0)\n STSZ(sample_count=sample_count)\n STSZ(entry_sizes=sample_sizes)\n\n STBL = Container(type=b'stbl')\n STBL(children=[STSD, STTS, STSC, STSZ, STCO])\n\n VMHD = Container(type=b'vmhd')\n VMHD(version=0)\n VMHD(flags=1)\n VMHD(graphics_mode=0)\n VMHD(opcolor=Container(red=0)(green=0)(blue=0))\n\n MINF = Container(type=b'minf')\n MINF(children=[VMHD, DINF, STBL])\n\n MDIA = Container(type=b'mdia')\n MDIA(children=[MDHD, HDLR, MINF])\n\n # Width and height in TKHD are 16.16 integers\n TKHD = Container(type=b'tkhd')\n TKHD(version=0)\n TKHD(flags=3)\n TKHD(creation_time=0)\n TKHD(modification_time=0)\n TKHD(track_ID=1)\n TKHD(duration=duration)\n TKHD(layer=0)\n TKHD(alternate_group=0)\n TKHD(volume=0)\n TKHD(matrix=UNITY_MATRIX)\n TKHD(width=width << 16)\n TKHD(height=height << 16)\n\n TRAK = Container(type=b'trak')\n TRAK(children=[TKHD, MDIA])\n\n MVHD = Container(type=b'mvhd')\n MVHD(version=0)\n MVHD(flags=0)\n MVHD(creation_time=0)\n MVHD(modification_time=0)\n MVHD(timescale=timescale)\n MVHD(duration=duration)\n MVHD(rate=0x10000)\n MVHD(volume=0x100)\n MVHD(matrix=UNITY_MATRIX)\n MVHD(pre_defined=[0, 0, 0, 0, 0, 0])\n MVHD(next_track_ID=2)\n\n MOOV = Container(type=b'moov')\n MOOV(children=[MVHD, TRAK])\n\n # Finally write\n self._write(Box.build(MOOV))\n\n\n","sub_path":"picamera/mp4.py","file_name":"mp4.py","file_ext":"py","file_size_in_byte":12494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"622211968","text":"# -----------------------------------------------------------------------------\n# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.\n# Distributed under the (new) BSD License.\n# -----------------------------------------------------------------------------\nimport numpy as np\nfrom glumpy import app, gl, glm, gloo\nfrom glumpy.geometry import colorcube\n\nvertex = \"\"\"\nuniform mat4 u_model; // Model matrix\nuniform mat4 u_view; // View matrix\nuniform mat4 u_projection; // Projection matrix\nattribute vec4 a_color; // Vertex color\nattribute vec3 a_position; // Vertex position\nvarying vec4 v_color; // Interpolated fragment color (out)\nvoid main()\n{\n v_color = a_color;\n gl_Position = u_projection * u_view * u_model * vec4(a_position,1.0);\n}\n\"\"\"\n\nfragment = \"\"\"\nvarying vec4 v_color; // Interpolated fragment color (in)\nvoid main()\n{\n gl_FragColor = v_color;\n}\n\"\"\"\n\n# window = app.Window(width=1024, height=1024, color=(1,1,1,1))\nwindow = app.Window(width=1024, height=1024, color=(0.30, 0.30, 0.35, 1.00))\n\n@window.event\ndef on_draw(dt):\n global phi, theta\n window.clear()\n\n # Filled cube\n cube.draw(gl.GL_TRIANGLES, I)\n \n # Rotate cube\n theta += 0.5 # degrees\n phi += 0.5 # degrees\n model = np.eye(4, dtype=np.float32)\n glm.rotate(model, theta, 0, 0, 1)\n glm.rotate(model, phi, 0, 1, 0)\n cube['u_model'] = model\n\n\n@window.event\ndef on_resize(width, height):\n cube['u_projection'] = glm.perspective(45.0, width / float(height), 2.0, 100.0)\n\n@window.event\ndef on_init():\n gl.glEnable(gl.GL_DEPTH_TEST)\n\n\nV = np.zeros(8, [(\"a_position\", np.float32, 3),\n (\"a_color\", np.float32, 4)])\nV[\"a_position\"] = [[ 1, 1, 1], [-1, 1, 1], [-1,-1, 1], [ 1,-1, 1],\n [ 1,-1,-1], [ 1, 1,-1], [-1, 1,-1], [-1,-1,-1]]\nV[\"a_color\"] = [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 1, 0, 1],\n [1, 1, 0, 1], [1, 1, 1, 1], [1, 0, 1, 1], [1, 0, 0, 1]]\nV = V.view(gloo.VertexBuffer)\nI = np.array([0,1,2, 0,2,3, 0,3,4, 0,4,5, 0,5,6, 0,6,1,\n 1,6,7, 1,7,2, 7,4,3, 7,3,2, 4,7,6, 4,6,5], dtype=np.uint32)\nI = I.view(gloo.IndexBuffer)\n\ncube = gloo.Program(vertex, fragment)\ncube.bind(V)\n\ncube['u_model'] = np.eye(4, dtype=np.float32)\ncube['u_view'] = glm.translation(0, 0, -5)\nphi, theta = 40, 30\n\napp.run()\n","sub_path":"examples/tutorial/color-cube.py","file_name":"color-cube.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"316674143","text":"#!python3\nfrom tkinter import *\n\nimport rockPaperScissors\nimport hangman\nimport pokerDice\n\nroot = Tk()\nroot.title(\"Jed's Micro and Mini Games Collection\")\n\nmainFrame = Frame(root, height = 200, width = 500)\nmainFrame.pack_propagate(0)\nmainFrame.pack(padx = 5, pady = 5)\n\nintro = Label(mainFrame, text=\"\"\"\nWelcome to my Games Collection!\nPlease select one of the following games to play:\n\"\"\")\nintro.pack(side = TOP)\n\nrpsButton = Button(mainFrame, text = \"Rock, Paper, Scissors\", command = rockPaperScissors.gui)\nrpsButton.pack()\n\nhmButton = Button(mainFrame, text = \"Hangman\", command = hangman.gui)\nhmButton.pack()\n\npdButton = Button(mainFrame, text = \"Poker Dice\", command = pokerDice.gui)\npdButton.pack()\n\nexitButton = Button(mainFrame,text=\"Quit\", command = root.destroy)\nexitButton.pack(side = BOTTOM)\n\nroot.mainloop()\n\n","sub_path":"myFirstPyGUI.py","file_name":"myFirstPyGUI.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"17956919","text":"#!/usr/bin/python\n\nimport sys\nimport re\nimport os\nfrom lxml import etree\nimport xml.etree.ElementTree as ET\nfrom collections import defaultdict\nimport pprint\n \n# keyword in C\nkeyword = ['auto', 'break', 'case', 'char', 'const', 'continue', 'default',\n 'do', 'double', 'else', 'enum', 'extern', 'float', 'for', 'goto',\n 'if', 'int', 'long', 'register', 'return', 'short', 'signed', 'sizeof',\n 'static', 'struct', 'switch', 'typedef', 'union', 'unsigned', 'void',\n 'volatile', 'while']\n\n# the solve function try to decide whether the token is valid\n# such as 018 is a constant(number), but not valid\n\ndef solve_separate(string, state, fix):\n # this function solve the separate\n # fix the offset\n fix[0] = 1\n return [string, \"separate\", True]\n\ndef solve_constant(string, state, fix):\n # solve the constant - string, number\n # number - character - 1\n # char - character - 0\n # string - character - 0\n if \"'\" in string or '\"' in string:\n fix[0] = 1\n # wrong case handle\n if len(string) == 3:\n return [string, \"WRONG\", False]\n if \"'\" in string and len(string) > 4:\n return [string, \"WRONG\", False]\n else:\n fix[0] = 2\n # the regrex string for check the wrong case of the number constant\n # 088, 0x-2(put in the solve_wrong function) \n check1 = re.compile('^0[0-7]*[8-9]+[0-7]*')\n if check1.findall(string) and '.' not in string:\n return [string, \"WRONG\", False]\n\n return [string, \"constant\", True]\n\ndef solve_name(string, state, fix):\n # this function solve the word: name or the keyword\n # need to check and decide whether the name or the keyword\n # keyword - 1, name - 2\n global keyword\n fix[0] = 2\n for i in keyword:\n if i in string and len(i) == len(string) - 2:\n return [i, \"keyword\", True]\n return [string, \"identifier\", True]\n\ndef solve_operator(string, state, fix):\n # solve the operator\n if state in [401, 405, 409, 412, 415, 418, 421, 424, 428, 433, 437, 443]:\n fix[0] = 2\n else:\n fix[0] = 1\n return [string, \"operator\", True]\n\ndef solve_wrong(string, state, fix):\n # the wrong case handle function\n fix[0] = 1\n return [string, \"WRONG\", False]\n\ndef init_table():\n # this function try to create the defaultdict of the DFA \n # and return to use\n\n # main_table save the state and the table or list [process, paramater]\n # table save the character and the state need to change, do not save the function\n\n main_table = defaultdict(dict)\n\n # state 0 \n table = defaultdict(int)\n table['^[1-9]$']= 1\n table['^0$'] = 8\n table[r'^\\'$'] = 12\n table[r'^\"$'] = 15\n table['^[a-zA-Z_]$'] = 600\n table[r'^[,;\\{\\}\\s#:]$'] = 200\n table['^[+]$'] = 400\n table['^[-]$'] = 404\n table['^[*]$'] = 408\n table['^[/]$'] = 411\n table['^[%]$'] = 414\n table['^[=]$'] = 417\n table['^[!]$'] = 420\n table['^[<]$'] = 423\n table['^[>]$'] = 427\n table['^[~]$'] = 431\n table['^[&]$'] = 432\n table['^[\\|]$'] = 436\n table['^[\\.]$'] = 440\n table['^[\\?]$'] = 441\n table['^[\\^]$'] = 442\n table['^[\\[\\]\\(\\)]$'] = 446\n main_table[0] = table\n\n # ----------- const (number / string) ----------\n # state 1\n table = defaultdict(int)\n table['^[0-9]$']= 1\n table[r'^[\\.]$']= 2\n table['^[eE]$'] = 4\n table['^[\\s+\\-*/%&\\|?;,:\\)\\]\\}\\^<>!=]$'] = 7\n main_table[1] = table\n\n # state 2\n table = defaultdict(int)\n table['^[0-9]$']= 3\n table['^[\\s+\\-*/%&\\|?;,:\\)\\]\\}\\^<>!=]$'] = 7\n main_table[2] = table\n\n # state 3\n table = defaultdict(int)\n table['^[0-9]$']= 3\n table['^[\\s+\\-*/%&\\|?;,:\\)\\]\\}\\^<>!=]$'] = 7\n table['^[eE]$'] = 4\n main_table[3] = table\n\n # state 4, the wrong case like: 2013.2e+a, Gcc cut into 2013.2e+a as a whole word\n table = defaultdict(int)\n table['^[0-9]$']= 6\n table['^[+\\-]$']= 5\n # different because +/- turn the state into 5 state\n table['^[\\s*/%&\\|?;,:\\)\\]\\}\\^<>!=]$'] = 7\n main_table[4] = table\n\n # state 5\n table = defaultdict(int)\n table['^[0-9]$']= 6\n main_table[5] = table\n\n # state 6\n table = defaultdict(int)\n table['^[0-9]$']= 6\n table['^[\\s+\\-*/%&\\|?;,:\\)\\]\\}\\^<>!=]$'] = 7\n main_table[6] = table\n\n # state 7\n main_table[7] = [solve_constant, 1]\n\n # state 8\n table = defaultdict(int)\n table['^[8-9]$']= 1\n table['^[0-7]$']= 9\n table['^[xX]$'] = 10\n table['^[\\s+\\-*/%&\\|?;,:\\)\\]\\}\\^<>!=]$'] = 7\n table['^[\\.]$'] = 2\n main_table[8] = table\n\n # state 9\n table = defaultdict(int)\n table['^[0-7]$']= 9\n table['^[8-9]$']= 1\n table['^[\\s+\\-*/%&\\|?;,:\\)\\]\\}\\^<>!=]$'] = 7\n table['^[\\.]$'] = 2\n main_table[9] = table\n\n # state 10\n table = defaultdict(int)\n table['^[0-9a-fA-F]$'] = 11\n main_table[10] = table\n\n # state 11\n table = defaultdict(int)\n table['^[0-9a-fA-F]$'] = 11\n # careful of the case a[23], function(23)\n table['^[\\s+\\-*/%&\\|?;,:\\)\\]\\}\\^<>!=]$'] = 7\n main_table[11] = table\n\n # state 12\n table = defaultdict(int)\n table[r'^[^\\\\\\']$'] = 12\n table[r'^[\\\\]$']= 13\n table['^[\\']$'] = 14\n main_table[12] = table\n\n # state 13\n table = defaultdict(int)\n table['^.$'] = 12\n main_table[13] = table\n\n # state 14\n main_table[14] = [solve_constant, 0]\n \n # state 15\n table = defaultdict(int)\n table[r'^[^\\\\\\\"]$'] = 15\n table[r'^[\\\\]$']= 16\n table['^[\\\"]$'] = 17\n main_table[15] = table\n\n # state 16\n table = defaultdict(int)\n table['^.$'] = 15\n main_table[16] = table\n\n # state 17\n main_table[17] = [solve_constant, 0]\n\n # ---------- separate ----------\n # state 200\n main_table[200] = [solve_separate, None]\n\n # ---------- name ----------\n # state 600\n table = defaultdict(int)\n table['^[0-9a-zA-Z_]$'] = 600\n # carefule of the switch-case struct \n table['^[\\s+\\->]$']= 405\n table['^[-]$'] = 406\n table['^[=]$'] = 407\n table['^[>]$'] = 445\n main_table[404] = table\n\n # state 405\n main_table[405] = [solve_operator, None]\n\n # state 406\n main_table[406] = [solve_operator, None]\n\n # state 407\n main_table[407] = [solve_operator, None]\n\n # state 408\n table = defaultdict(int)\n table['^[^=]$'] = 409\n table['^[=]$'] = 410\n main_table[408] = table\n\n # state 409\n main_table[409] = [solve_operator, None]\n \n # state 410\n main_table[410] = [solve_operator, None]\n \n # state 411\n table = defaultdict(int)\n table['^[^=]$'] = 412\n table['^[=]$'] = 413\n main_table[411] = table\n\n # state 412\n main_table[412] = [solve_operator, None]\n \n # state 413\n main_table[413] = [solve_operator, None]\n \n # state 414\n table = defaultdict(int)\n table['^[^=]$'] = 415\n table['^[=]$'] = 416\n main_table[414] = table\n\n # state 415\n main_table[415] = [solve_operator, None]\n \n # state 416\n main_table[416] = [solve_operator, None]\n \n # state 417\n table = defaultdict(int)\n table['^[^=]$'] = 418\n table['^[=]$'] = 419\n main_table[417] = table\n\n # state 418\n main_table[418] = [solve_operator, None]\n \n # state 419\n main_table[419] = [solve_operator, None]\n\n # state 420\n table = defaultdict(int)\n table['^[^=]$'] = 421\n table['^[=]$'] = 422\n main_table[420] = table\n\n # state 421\n main_table[421] = [solve_operator, None]\n \n # state 422\n main_table[422] = [solve_operator, None]\n \n # state 423\n table = defaultdict(int)\n table['^[^<=]$']= 424\n table['^[<]$'] = 425\n table['^[=]$'] = 426\n main_table[423] = table\n\n # state 424\n main_table[424] = [solve_operator, None]\n\n # state 425\n main_table[425] = [solve_operator, None]\n\n # state 426\n main_table[426] = [solve_operator, None]\n \n # state 427\n table = defaultdict(int)\n table['^[^>=]$']= 428\n table['^[>]$'] = 429\n table['^[=]$'] = 430\n main_table[427] = table\n\n # state 428\n main_table[428] = [solve_operator, None]\n\n # state 429\n main_table[429] = [solve_operator, None]\n\n # state 430\n main_table[430] = [solve_operator, None]\n\n # state 431\n main_table[431] = [solve_operator, None]\n \n # state 432\n table = defaultdict(int)\n table['^[^&=]$']= 433\n table['^[&]$'] = 434\n table['^[=]$'] = 435\n main_table[432] = table\n\n # state 433\n main_table[433] = [solve_operator, None]\n\n # state 434\n main_table[434] = [solve_operator, None]\n\n # state 435\n main_table[435] = [solve_operator, None]\n \n # state 436\n table = defaultdict(int)\n table['^[^\\|=]$']= 437\n table['^[\\|]$'] = 438\n table['^[=]$'] = 439\n main_table[436] = table\n\n # state 437\n main_table[437] = [solve_operator, None]\n\n # state 438\n main_table[438] = [solve_operator, None]\n\n # state 439\n main_table[439] = [solve_operator, None]\n\n # state 440\n main_table[440] = [solve_operator, None]\n\n # state 441\n main_table[441] = [solve_operator, None]\n \n # state 442\n table = defaultdict(int)\n table['^[^=]$'] = 443\n table['^[=]$'] = 444\n main_table[442] = table;\n\n # state 443\n main_table[443] = [solve_operator, None]\n\n # state 444\n main_table[444] = [solve_operator, None]\n\n # state 445\n main_table[445] = [solve_operator, None]\n\n # state 446\n main_table[446] = [solve_operator, None]\n\n # state -1, wrong case handle\n main_table[-1] = [solve_wrong, None]\n\n # state -2, wrong case handle step back 1\n main_table[-2] = [solve_wrong, None]\n\n return main_table\n\ndef run(filename, main_table, keyword):\n # read the file\n # run the main algorithm\n\n # init the register\n char_register = None\n state_register = 0\n string_register = ''\n\n # the begin and the end is for the string_register\n begin = 0\n end = 0\n\n # counter\n count = 0\n collection = []\n\n # line counter\n l_count = 0\n\n # preprocess, get all lines\n with open(filename, 'r') as f:\n data = f.read()\n ll = []\n for index, i in enumerate(data):\n if i == '\\n':\n ll.append(index)\n\n with open(filename, 'r') as f:\n data = f.read()\n # fix the file end without any separate\n data += ' '\n length = len(data)\n while end < length:\n if state_register != -1:\n char_register = data[end]\n end += 1\n # renew the string_register\n string_register += char_register\n if isinstance(main_table[state_register], defaultdict):\n # the middle \n for i in main_table[state_register]:\n check = re.compile(i)\n if check.findall(char_register):\n # find the row in the sub table\n state_register = main_table[state_register][i]\n break\n else:\n # Error wrong case\n print(\"Scaner find wrong:\", \\\n \"string -\", string_register, \"/ char -\", char_register)\n # Once find the wrong case, only need to untread one step\n state_register = -1\n else:\n # these lines try to find and label the error like: 0xxyz\n # but can not find the error like: 0x-2\n check = re.compile('^[,;\\(\\)\\[\\]\\{\\}\\s#:+\\-*\\%^&\\|=!]$')\n if (state_register == -1 or state_register == -2) \\\n and len(check.findall(char_register)) == 0:\n state_register = -2\n continue\n\n # end process\n # only in this case, change the begin\n # back to one character age\n\n # how many character need to go back\n fix = [0]\n res = main_table[state_register][0](string_register, state_register, fix)\n\n # go back, untread\n string_register = string_register[0:-fix[0]]\n res[0] = string_register\n\n end -= fix[0]\n \n # add the line msg\n index = 0\n for line_number, line_index in enumerate(ll):\n if end <= line_index:\n index = line_number + 1\n break\n res.append(index)\n\n if res[0].strip():\n count += 1\n print(str(count) + '\\t' + res[0] + '\\t\\t\\t' + res[1])\n # add the token into the collection\n collection.append(res)\n\n state_register = 0\n char_register = None\n string_register = ''\n begin = end\n \n check = re.compile('^[\\s]$')\n if len(string_register) > 0 and not check.findall(string_register):\n # need to after process\n # just for the case like the file end with \"abcd and without \"\n count += 1\n print(str(count) + '\\t' + string_register + '\\t\\t\\t' + \"WRONG\")\n collection.append([string_register, \"WRONG\", False, len(ll)])\n\n return collection\n\ndef write_file(path, collections):\n # write the tokens into the file\n root = ET.Element('project')\n\n # fix the project name\n filename = os.path.split(path)[1]\n end = filename.index('.')\n filename = filename[0:end + 1]\n filename += 'c'\n\n root.set('name', filename)\n\n # tokens\n tokens = ET.SubElement(root, 'tokens')\n\n # add token into XML tree\n for index, value in enumerate(collections):\n token = ET.SubElement(tokens, 'token')\n \n number = ET.Element('number')\n number.text = str(index + 1)\n \n val = ET.Element('value')\n val.text = value[0]\n\n ty = ET.Element('type')\n ty.text = value[1]\n\n line = ET.Element('line')\n line.text = str(value[3])\n\n va = ET.Element('valid')\n va.text = str(value[2])\n\n token.append(number)\n token.append(val)\n token.append(ty)\n token.append(line)\n token.append(va)\n\n tree = ET.ElementTree(root)\n root = tree.getroot()\n v = ET.tostring(root)\n res = etree.tostring(etree.fromstring(v), pretty_print=True).decode()\n\n with open(path, 'w') as f:\n f.write('\\n')\n f.write(res)\n\nif __name__ == \"__main__\":\n '''\n main_table = init_table()\n # sys/argv[1]\n # test the test_file which I made\n print(\"Number\" + '\\t' + \"Key\" + '\\t\\t\\t' + \"Value\")\n print(\"-\" * 50)\n collection = run('./mix/misaka.c', main_table, keyword)\n write_file('./test.token.xml', collection)\n\n '''\n # using\n main_table = init_table()\n collection = run(sys.argv[1], main_table, keyword)\n write_file(sys.argv[2], collection)\n","sub_path":"run/script/gencode/scaner.py","file_name":"scaner.py","file_ext":"py","file_size_in_byte":15938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"377520770","text":"\"\"\"\nhttptools based HTTP protocol.\n\"\"\"\nimport asyncio\nimport logging\nimport traceback\nimport warnings\n\nimport httptools\n\nfrom kyoukai.request import Request\nfrom kyoukai.response import Response\nfrom kyoukai.exc import HTTPException, exc_from\nfrom kyoukai.context import HTTPRequestContext\n\nCRITICAL_ERROR_TEXT = \"\"\"HTTP/1.0 500 INTERNAL SERVER ERROR\nServer: Kyoukai\nX-Powered-By: Kyoukai\nContent-Type: text/html; charset=utf-8\n\n\nCritical Server Error\n

      Critical Server Error

      \n

      An unrecoverable error has occurred within Kyoukai.\nIf you are the developer, please report this at the Kyoukai issue\ntracker.\n\"\"\".replace(\"\\n\", \"\\r\\n\")\n\n\nclass HTTPToolsHandler: # pragma: no cover\n \"\"\"\n A callback handler that works with the HTTPTools library.\n\n This class does some downright horrible things in order to be compatible with httptool's weird mix of callbacks\n and normal functions, involving asyncio events.\n \"\"\"\n\n def __init__(self, protocol: 'KyoukaiProtocol'):\n self.protocol = protocol\n\n # This defines the current request.\n self.current_request = None\n\n def reset(self):\n \"\"\"\n Resets the current request.\n Should be called after the message is complete.\n \"\"\"\n self.current_request = None\n\n def on_message_begin(self):\n \"\"\"\n Called when a message has begun.\n\n This creates the new Request.\n \"\"\"\n self.current_request = self.protocol.app.request_cls()\n\n def on_header(self, name: bytes, value: bytes):\n \"\"\"\n Called when a header is set.\n \"\"\"\n # Decode the name and the values to get the header.\n self.current_request.headers[name.decode()] = value.decode()\n\n def on_body(self, body: bytes):\n \"\"\"\n Called when the body is received.\n\n This sets self.current_request.body.\n \"\"\"\n self.current_request.body += body.decode()\n\n def on_url(self, url: bytes):\n \"\"\"\n Called when a URL is recieved.\n\n This is undocumented in the HTTPTools README.\n \"\"\"\n self.current_request.full_path = url\n\n def on_message_complete(self):\n \"\"\"\n Called when a message is complete.\n\n This calls the event set() to ensure the protocol continues on with parsing.\n \"\"\"\n self.protocol.parser_ready.set()\n\n\nclass KyoukaiProtocol(asyncio.Protocol): # pragma: no cover\n \"\"\"\n The Kyoukai protocol.\n \"\"\"\n\n def __init__(self, app, parent_context):\n self.app = app\n self._transport = None\n self.ip = None\n self.client_port = None\n\n self.logger = logging.getLogger(\"Kyoukai\")\n\n self.loop = asyncio.get_event_loop()\n\n # Asphalt contexts\n self.parent_context = parent_context\n\n # Request lock.\n # This ensures that requests are processed serially, and responded to in the correct order, as the lock is\n # released after processing a request completely.\n self.lock = asyncio.Lock()\n\n # Parser event.\n # Set when the HTTPTools parser is ready to hand over the new request to Kyoukai.\n self.parser_ready = asyncio.Event()\n\n # The parser itself.\n # This is created per connection.\n self.parser_obb = HTTPToolsHandler(self)\n self.parser = httptools.HttpRequestParser(self.parser_obb)\n\n # Define a waiter, that 'waits' on the event to clear.\n # Once the wait is over, it then delegates the request.\n self.waiter = None\n\n async def _safe_handle_error(self, context: HTTPRequestContext, exception: Exception):\n \"\"\"\n \"Safely\" handle a HTTP exception.\n\n This is **only** called when Kyoukai fails to process a HTTP error.\n ``delegate_request`` safely attempts to process errors properly. If an error within Kyoukai happens,\n it will pass back down to this layer, which is very very bad.\n\n This first calls ``app.handle_http_exception``.\n If that fails, it sends the critical error text.\n\n :param exception: The exception to send down the line.\n :return:\n \"\"\"\n self.logger.warning(\"Exception happened during HTTP parsing.\")\n self.logger.warning(\"This is not necessarily a bug.\")\n traceback.print_exc()\n # Convert the exception.\n new_e = exc_from(exception)\n try:\n await self.app.handle_http_error(new_e, self, context)\n except Exception:\n # Critical error.\n self.logger.critical(\"Unhandled exception inside Kyoukai, when processing a HTTP error!\")\n self.logger.critical(\"This is a bug. Please report it!\")\n self.logger.critical(\"\".join(traceback.format_exc()))\n self.write(CRITICAL_ERROR_TEXT.encode())\n self.close()\n\n def reset(self):\n \"\"\"\n Resets the HTTP parser.\n \"\"\"\n self.parser_obb.reset()\n # Reset the current protocol.\n self.parser = httptools.HttpRequestParser(self.parser_obb)\n\n async def _wait(self):\n \"\"\"\n Waits for the request to be ready.\n :return:\n \"\"\"\n await self.parser_ready.wait()\n # Remove the current waiter.\n self.waiter = None\n # Unset the event. We're ready to begin processing.\n self.parser_ready.clear()\n\n # Take in the request, and call parse_all().\n request = self.parser_obb.current_request\n # Set a handful of properties manually.\n request.version = self.parser.get_http_version()\n request.method = self.parser.get_method().decode()\n request.should_keep_alive = self.parser.should_keep_alive()\n # Set the IP and the port on the request.\n request.ip = self.ip\n request.port = self.client_port\n # Create the new HTTPRequestContext.\n ctx = HTTPRequestContext(request, self.app, self.parent_context)\n # Parse all fields in the Exception.\n try:\n request.parse_all()\n except HTTPException as e:\n # Handle the HTTP exception.\n await self._safe_handle_error(ctx, e)\n return\n except Exception as e:\n await self._safe_handle_error(ctx, e)\n return\n finally:\n self.reset()\n\n # Reset the parser.\n self.parser_obb.reset()\n\n # Create the delegate_request task.\n try:\n await self.app.delegate_request(self, ctx)\n except Exception as exc:\n await self._safe_handle_error(ctx, exc)\n finally:\n self.reset()\n\n def connection_made(self, transport: asyncio.Transport):\n \"\"\"\n Called when a connection is made, and is used to store the connection data.\n \"\"\"\n try:\n self.ip, self.client_port = transport.get_extra_info(\"peername\")\n except ValueError:\n # Sometimes socket.socket.getpeername() isn't available, so it tried to unpack a None.\n # Or, it returns None (wtf?)\n # So just provide some fake values.\n warnings.warn(\"getpeername() returned None, cannot provide transport information.\")\n self.ip, self.client_port = None, None\n self._transport = transport\n\n self.logger.debug(\"Recieved connection from {}:{}\".format(*transport.get_extra_info(\"peername\")))\n\n def connection_lost(self, exc):\n \"\"\"\n Called when a connection is lost.\n \"\"\"\n self._empty_state()\n\n def data_received(self, data: bytes):\n \"\"\"\n Called when data is received.\n\n This is the bulk of the processing.\n \"\"\"\n # Feed the data to the parser.\n try:\n self.parser.feed_data(data)\n except httptools.HttpParserInvalidMethodError as e:\n ctx = HTTPRequestContext(None, self.app, self.parent_context)\n # Transform it into a 405.\n exc = exc_from(e)\n exc.code = 405\n self.loop.create_task(self._safe_handle_error(ctx, exc))\n except httptools.HttpParserError as e:\n ctx = HTTPRequestContext(None, self.app, self.parent_context)\n # Transform it into a 400.\n exc = exc_from(e)\n exc.code = 400\n self.loop.create_task(self._safe_handle_error(ctx, exc))\n\n # Wait on the event.\n if self.waiter is None:\n self.waiter = self.loop.create_task(self._wait())\n return\n\n def handle_resp(self, response: Response):\n \"\"\"\n Shortcut for :meth:``write_response``.\n \"\"\"\n return self.write_response(response)\n\n def write_response(self, response: Response):\n \"\"\"\n Writes a :class:`Response` to the protocol\n\n :param response: The response to write.\n \"\"\"\n data = response.to_bytes()\n self.write(data)\n\n # Protocol level methods.\n def write(self, data: bytes):\n \"\"\"\n Writes to the transport stream.\n\n This is an **internal method.** This should not be used by the developer.\n\n .. versionadded:: 1.9\n\n :param data: The data to send, byte encoded.\n \"\"\"\n self._transport.write(data)\n\n def _empty_state(self):\n \"\"\"\n Closes locks and the waiter.\n \"\"\"\n # Turn off the waiter.\n if self.waiter is not None:\n self.waiter.cancel()\n\n self.waiter = None\n\n # Empty out the lock waiters by cancelling the tasks.\n for waiter in self.lock._waiters:\n waiter.cancel()\n\n def close(self):\n \"\"\"\n Closes the transport stream.\n\n This an **internal method.** This should not be used by the developer.\n\n .. versionadded:: 1.9\n \"\"\"\n # Empty the state.\n self._empty_state()\n\n # Then, close the transport.\n self._transport.close()\n","sub_path":"kyoukai/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":9966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"292636908","text":"from __future__ import absolute_import\n\n\"\"\" Script to compile the spectrum shift data for COS FUV and NUV data.\n\n\"\"\"\n\nimport glob\nimport os\nimport shutil\nimport sys\nimport logging\nlogger = logging.getLogger(__name__)\n\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nfrom astropy.time import Time\n\nimport scipy\nfrom scipy.stats import linregress\nfrom datetime import datetime\n\nfrom astropy.io import fits\nfrom astropy.table import Table\n\nfrom ..database.db_tables import open_settings, load_connection\nfrom ..utils import remove_if_there\n\n#-------------------------------------------------------------------------------\n\ndef fppos_shift(lamptab_name, segment, opt_elem, cenwave, fpoffset):\n lamptab = fits.getdata(os.path.join(os.environ['lref'], lamptab_name))\n\n if 'FPOFFSET' not in lamptab.names:\n return 0\n\n index = np.where((lamptab['segment'] == segment) &\n (lamptab['opt_elem'] == opt_elem) &\n (lamptab['cenwave'] == cenwave) &\n (lamptab['fpoffset'] == fpoffset))[0]\n\n offset = lamptab['FP_PIXEL_SHIFT'][index][0]\n\n return offset\n\n#-------------------------------------------------------------------------------\n\ndef pull_flashes(filename):\n \"\"\"Calculate lampflash values for given file\n\n Parameters\n ----------\n filename : str\n file to calculate lamp shifts from\n\n Returns\n -------\n out_info : dict\n dictionary of pertinent value\n\n \"\"\"\n\n with fits.open(filename) as hdu:\n out_info = {'date': hdu[1].header['EXPSTART'],\n 'rootname': hdu[0].header['ROOTNAME'],\n 'proposid': hdu[0].header['PROPOSID'],\n 'detector': hdu[0].header['DETECTOR'],\n 'segment': hdu[0].header['SEGMENT'],\n 'opt_elem': hdu[0].header['OPT_ELEM'],\n 'cenwave': hdu[0].header['CENWAVE'],\n 'fppos': hdu[0].header.get('FPPOS', None),\n 'filetype': hdu[0].header.get('FILETYPE', None)}\n\n t = Time(out_info['date'], format='mjd')\n out_info['cal_date'] = t.iso\n\n if '_lampflash.fits' in filename:\n out_info['lamptab'] = hdu[0].header['LAMPTAB'].split('$')[-1]\n\n fpoffset = out_info['fppos'] - 3\n\n if not len(hdu[1].data):\n yield out_info\n else:\n for i, line in enumerate(hdu[1].data):\n out_info['flash'] = (i // 2) + 1\n out_info['x_shift'] = line['SHIFT_DISP'] - fppos_shift(out_info['lamptab'],\n line['segment'],\n out_info['opt_elem'],\n out_info['cenwave'],\n fpoffset)\n\n out_info['y_shift'] = line['SHIFT_XDISP']\n out_info['found'] = line['SPEC_FOUND']\n out_info['segment'] = line['SEGMENT']\n\n #-- don't need too much precision here\n out_info['x_shift'] = round(out_info['x_shift'], 5)\n out_info['y_shift'] = round(out_info['y_shift'], 5)\n\n yield out_info\n\n\n elif '_rawacq.fits' in filename:\n #-- Technically it wasn't found.\n out_info['found'] = False\n out_info['fppos'] = -1\n out_info['flash'] = 1\n out_info['segment'] = 'N/A'\n\n spt = fits.open(filename.replace('rawacq', 'spt'))\n\n if not spt[1].header['LQTAYCOR'] > 0:\n out_info['x_shift'] = None\n out_info['y_shift'] = None\n else:\n # These are in COS RAW coordinates, so shifted 90 degrees from\n # user and backwards\n out_info['x_shift'] = 1023 - spt[1].header['LQTAYCOR']\n out_info['y_shift'] = 1023 - spt[1].header['LQTAXCOR']\n\n yield out_info\n\n else:\n yield out_info\n\n#-------------------------------------------------------------------------------\n\ndef fit_data(xdata, ydata):\n stats = linregress(xdata, ydata)\n\n parameters = (stats[0], stats[1])\n err = 0\n fit = scipy.polyval(parameters, xdata)\n\n return fit, xdata, parameters, err\n\n#-------------------------------------------------------------------------------\n\ndef make_shift_table(connection_string):\n\n Session, engine = load_connection(connection_string)\n\n #-- this is a crude implementation, but it lets me use the rest of the\n #-- plotting code as-is\n data = []\n for i, row in enumerate(engine.execute(\"\"\"SELECT * FROM lampflash\n WHERE x_shift IS NOT NULL AND\n y_shift IS NOT NULL;\"\"\")):\n if not i:\n keys = row.keys()\n data.append(row.values())\n\n data = Table(rows=data, names=keys)\n\n return data\n\n#-------------------------------------------------------------------------------\n\ndef make_plots(data, out_dir):\n mpl.rcParams['figure.subplot.hspace'] = 0.05\n\n sorted_index = np.argsort(data['date'])\n data = data[sorted_index]\n\n G140L = np.where((data['opt_elem'] == 'G140L'))[0]\n G140L_A = np.where((data['opt_elem'] == 'G140L') &\n (data['segment'] == 'FUVA'))[0]\n G140L_B = np.where((data['opt_elem'] == 'G140L') &\n (data['segment'] == 'FUVB'))[0]\n\n G130M = np.where((data['opt_elem'] == 'G130M'))[0]\n G130M_A = np.where((data['opt_elem'] == 'G130M') &\n (data['segment'] == 'FUVA'))[0]\n G130M_B = np.where((data['opt_elem'] == 'G130M') &\n (data['segment'] == 'FUVB'))[0]\n\n G160M = np.where((data['opt_elem'] == 'G160M'))[0]\n G160M_A = np.where((data['opt_elem'] == 'G160M') &\n (data['segment'] == 'FUVA'))[0]\n G160M_B = np.where((data['opt_elem'] == 'G160M') &\n (data['segment'] == 'FUVB'))[0]\n\n G230L = np.where((data['opt_elem'] == 'G230L'))[0]\n G230L_A = np.where((data['opt_elem'] == 'G230L') &\n (data['segment'] == 'NUVA'))[0]\n G230L_B = np.where((data['opt_elem'] == 'G230L') &\n (data['segment'] == 'NUVB'))[0]\n G230L_C = np.where((data['opt_elem'] == 'G230L') &\n (data['segment'] == 'NUVC'))[0]\n\n G225M = np.where((data['opt_elem'] == 'G225M'))[0]\n G225M_A = np.where((data['opt_elem'] == 'G225M') &\n (data['segment'] == 'NUVA'))[0]\n G225M_B = np.where((data['opt_elem'] == 'G225M') &\n (data['segment'] == 'NUVB'))[0]\n G225M_C = np.where((data['opt_elem'] == 'G225M') &\n (data['segment'] == 'NUVC'))[0]\n\n G285M = np.where((data['opt_elem'] == 'G285M'))[0]\n G285M_A = np.where((data['opt_elem'] == 'G285M') &\n (data['segment'] == 'NUVA'))[0]\n G285M_B = np.where((data['opt_elem'] == 'G285M') &\n (data['segment'] == 'NUVB'))[0]\n G285M_C = np.where((data['opt_elem'] == 'G285M') &\n (data['segment'] == 'NUVC'))[0]\n\n G185M = np.where((data['opt_elem'] == 'G185M'))[0]\n G185M_A = np.where((data['opt_elem'] == 'G185M') &\n (data['segment'] == 'NUVA'))[0]\n G185M_B = np.where((data['opt_elem'] == 'G185M') &\n (data['segment'] == 'NUVB'))[0]\n G185M_C = np.where((data['opt_elem'] == 'G185M') &\n (data['segment'] == 'NUVC'))[0]\n\n NUV = np.where((data['opt_elem'] == 'G230L') |\n (data['opt_elem'] == 'G185M') |\n (data['opt_elem'] == 'G225M') |\n (data['opt_elem'] == 'G285M'))[0]\n\n #############\n\n fig = plt.figure( figsize=(16,8) )\n ax = fig.add_subplot(3,1,1)\n\n ax.plot( data['date'][G130M_A], data['x_shift'][G130M_A],'b.',label='G130M')\n ax.plot( data['date'][G130M_B], data['x_shift'][G130M_B],'b.')\n ax.xaxis.set_ticklabels( ['' for item in ax.xaxis.get_ticklabels()] )\n\n ax2 = fig.add_subplot(3,1,2)\n ax2.plot( data['date'][G160M_A], data['x_shift'][G160M_A],'g.',label='G160M')\n ax2.plot( data['date'][G160M_B], data['x_shift'][G160M_B],'g.')\n ax2.xaxis.set_ticklabels( ['' for item in ax2.xaxis.get_ticklabels()] )\n\n ax3 = fig.add_subplot(3,1,3)\n ax3.plot( data['date'][G140L_A], data['x_shift'][G140L_A],'y.',label='G140L')\n ax3.plot( data['date'][G140L_B], data['x_shift'][G140L_B],'y.')\n\n ax.legend(shadow=True, numpoints=1, loc='upper left')\n fig.suptitle('FUV SHIFT1[A/B]')\n ax.set_xlabel('MJD')\n ax.set_ylabel('SHIFT1[A/B] (pixels)')\n\n for axis,index in zip([ax,ax2,ax3],[G130M,G160M,G140L]):\n #axis.set_ylim(-300,300)\n axis.set_xlim( data['date'].min(),data['date'].max()+50 )\n axis.set_ylabel('SHIFT1[A/B/C] (pixels)')\n axis.axhline(y=0,color='r')\n axis.axhline(y=285,color='k',lw=3,ls='--',zorder=1,label='Search Range')\n axis.axhline(y=-285,color='k',lw=3,ls='--',zorder=1)\n fit,ydata,parameters,err = fit_data( data['date'][index],data['x_shift'][index] )\n axis.plot( ydata,fit,'k-',lw=3,label='%3.5fx'%(parameters[0]) )\n axis.legend(bbox_to_anchor=(1,1), loc='upper left', ncol=1, numpoints=1,shadow=True,prop={'size':10})\n\n remove_if_there(os.path.join(out_dir,'FUV_shifts.png'))\n fig.savefig(os.path.join(out_dir,'FUV_shifts.png'))\n plt.close(fig)\n os.chmod(os.path.join(out_dir,'FUV_shifts.png'),0o766)\n\n ##########\n\n fig = plt.figure(figsize=(16, 18))\n ax = fig.add_subplot(7, 1, 1)\n ax.plot(data['date'][G185M_A].data, data['x_shift'][G185M_A].data, 'bo', label='G185M')\n ax.plot(data['date'][G185M_B].data, data['x_shift'][G185M_B].data, 'bo', markeredgecolor='k')\n ax.plot(data['date'][G185M_C].data, data['x_shift'][G185M_C].data, 'bo', markeredgecolor='k')\n ax.axhline(y=0, color='red')\n\n #--second timeframe\n transition_fraction = (56500.0 - data['date'].min()) / \\\n (data['date'].max() - data['date'].min())\n\n ax.axhline(y=58, xmin=0, xmax=transition_fraction, color='k',\n lw=3, ls='--', zorder=1, label='Search Range')\n ax.axhline(y=-58, xmin=0, xmax=transition_fraction,\n color='k', lw=3, ls='--', zorder=1)\n\n ax.axhline(y=58 - 20, xmin=transition_fraction, xmax=1,\n color='k', lw=3, ls='--', zorder=1)\n ax.axhline(y=-58 - 20, xmin=transition_fraction,\n xmax=1, color='k', lw=3, ls='--', zorder=1)\n #--\n\n sigma = data['x_shift'][G185M_A].std()\n\n ax.xaxis.set_ticklabels(['' for item in ax.xaxis.get_ticklabels()])\n\n ax2 = fig.add_subplot(7, 1, 2)\n ax2.plot(data['date'][G225M_A], data['x_shift'][G225M_A], 'ro', label='G225M')\n ax2.plot(data['date'][G225M_B], data['x_shift'][G225M_B], 'ro', markeredgecolor='k')\n ax2.plot(data['date'][G225M_C], data['x_shift'][G225M_C], 'ro', markeredgecolor='k')\n ax2.axhline(y=0, color='red')\n\n #--second timeframe\n transition_fraction = (56500.0 - data['date'].min()) / \\\n (data['date'].max() - data['date'].min())\n\n ax2.axhline(y=58, xmin=0, xmax=transition_fraction, color='k', lw=3, ls='--', zorder=1, label='Search Range')\n ax2.axhline(y=-58, xmin=0, xmax=transition_fraction, color='k', lw=3, ls='--', zorder=1)\n\n ax2.axhline(y=58 - 10, xmin=transition_fraction, xmax=1,\n color='k', lw=3, ls='--', zorder=1)\n ax2.axhline(y=-58 - 10, xmin=transition_fraction,\n xmax=1, color='k', lw=3, ls='--', zorder=1)\n #--\n\n sigma = data['x_shift'][G225M_A].std()\n\n ax2.xaxis.set_ticklabels(['' for item in ax2.xaxis.get_ticklabels()])\n\n ax3 = fig.add_subplot(7, 1, 3)\n ax3.plot(data['date'][G285M_A], data['x_shift'][G285M_A], 'yo', label='G285M')\n ax3.plot(data['date'][G285M_B], data['x_shift']\n [G285M_B], 'yo', markeredgecolor='k')\n ax3.plot(data['date'][G285M_C], data['x_shift']\n [G285M_C], 'yo', markeredgecolor='k')\n ax3.axhline(y=0, color='red')\n ax3.axhline(y=58, color='k', lw=3, ls='--', zorder=1, label='Search Range')\n ax3.axhline(y=-58, color='k', lw=3, ls='--', zorder=1)\n\n sigma = data['x_shift'][G285M_A].std()\n\n ax3.xaxis.set_ticklabels(['' for item in ax3.xaxis.get_ticklabels()])\n\n ax4 = fig.add_subplot(7, 1, 4)\n ax4.plot(data['date'][G230L_A], data['x_shift'][G230L_A], 'go', label='G230L')\n ax4.plot(data['date'][G230L_B], data['x_shift']\n [G230L_B], 'go', markeredgecolor='k')\n ax4.plot(data['date'][G230L_C], data['x_shift']\n [G230L_C], 'go', markeredgecolor='k')\n\n ax4.axhline(y=0, color='red')\n\n #--second timeframe\n transition_fraction = (55535.0 - data['date'].min()) / \\\n (data['date'].max() - data['date'].min())\n\n ax4.axhline(y=58, xmin=0, xmax=transition_fraction, color='k',\n lw=3, ls='--', zorder=1, label='Search Range')\n ax4.axhline(y=-58, xmin=0, xmax=transition_fraction,\n color='k', lw=3, ls='--', zorder=1)\n\n ax4.axhline(y=58 - 40, xmin=transition_fraction, xmax=1,\n color='k', lw=3, ls='--', zorder=1)\n ax4.axhline(y=-58 - 40, xmin=transition_fraction,\n xmax=1, color='k', lw=3, ls='--', zorder=1)\n #--\n ax4.xaxis.set_ticklabels(['' for item in ax3.xaxis.get_ticklabels()])\n sigma = data['x_shift'][G230L_A].std()\n\n ax.set_title('NUV SHIFT1[A/B/C]')\n for axis, index in zip([ax, ax2, ax3, ax4], [G185M, G225M, G285M, G230L]):\n #axis.set_ylim(-110, 110)\n axis.set_xlim(data['date'].min(), data['date'].max() + 50)\n axis.set_ylabel('SHIFT1[A/B/C] (pixels)')\n fit, ydata, parameters, err = fit_data(\n data['date'][index], data['x_shift'][index])\n axis.plot(ydata, fit, 'k-', lw=3, label='%3.5fx' % (parameters[0]))\n axis.legend(bbox_to_anchor=(1,1), loc='upper left', ncol=1, numpoints=1, shadow=True, fontsize=12)\n\n ax4.set_xlabel('date')\n\n ax = fig.add_subplot(7, 1, 5)\n ax.plot(data['date'][NUV], data['x_shift'][NUV], '.')\n fit, ydata, parameters, err = fit_data(\n data['date'][NUV], data['x_shift'][NUV])\n ax.plot(ydata, fit, 'k-', lw=3, label='%3.5fx' % (parameters[0]))\n ax.legend(bbox_to_anchor=(1,1), loc='upper left', ncol=1,numpoints=1, shadow=True)\n ax.set_ylabel('All NUV')\n ax.xaxis.set_ticklabels(['' for item in ax.xaxis.get_ticklabels()])\n ax.set_xlim(data['date'].min(), data['date'].max() + 50)\n #ax.set_ylim(-110, 110)\n\n mirrora = np.where((data['opt_elem'] == 'MIRRORA')\n & (data['x_shift'] > 0))[0]\n ax = fig.add_subplot(7, 1, 6)\n ax.plot(data['date'][mirrora], data['x_shift'][mirrora], '.')\n fit, ydata, parameters, err = fit_data(\n data['date'][mirrora], data['x_shift'][mirrora])\n ax.plot(ydata, fit, 'k-', lw=3, label='%3.5fx' % (parameters[0]))\n ax.legend(bbox_to_anchor=(1,1), loc='upper left', ncol=1,numpoints=1, shadow=True)\n ax.set_xlim(data['date'].min(), data['date'].max() + 50)\n ax.set_ylabel('MIRRORA')\n ax.set_xlabel('date')\n #ax.set_ylim(460, 630)\n\n mirrorb = np.where((data['opt_elem'] == 'MIRRORB')\n & (data['x_shift'] > 0))[0]\n ax = fig.add_subplot(7, 1, 7)\n ax.plot(data['date'][mirrorb], data['x_shift'][mirrorb], '.')\n fit, ydata, parameters, err = fit_data(\n data['date'][mirrorb], data['x_shift'][mirrorb])\n ax.plot(ydata, fit, 'k-', lw=3, label='%3.5fx' % (parameters[0]))\n ax.legend(bbox_to_anchor=(1,1), loc='upper left', ncol=1,numpoints=1, shadow=True)\n ax.set_xlim(data['date'].min(), data['date'].max() + 50)\n ax.set_ylabel('MIRRORB')\n ax.set_xlabel('date')\n #ax.set_ylim(260, 400)\n\n remove_if_there(os.path.join(out_dir, 'NUV_shifts.png'))\n fig.savefig(os.path.join(out_dir, 'NUV_shifts.png'),\n bbox_inches='tight',\n pad_inches=.5)\n plt.close(fig)\n os.chmod(os.path.join(out_dir, 'NUV_shifts.png'),0o766)\n\n ##############\n\n for elem in ['MIRRORA', 'MIRRORB']:\n mirror = np.where((data['opt_elem'] == elem)\n & (data['x_shift'] > 0))[0]\n fig = plt.figure(figsize=(8, 4))\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(data['date'][mirror], data['x_shift'][mirror], '.')\n fit, ydata, parameters, err = fit_data(data['date'][mirror],\n data['x_shift'][mirror])\n ax.plot(ydata, fit, 'r-', lw=3, label='%3.5f +/- %3.5f' %\n (parameters[0], err))\n ax.legend(numpoints=1, shadow=True, loc='upper left')\n ax.set_xlim(data['date'].min(), data['date'].max() + 50)\n #ax.set_ylim(460, 630)\n remove_if_there(os.path.join(out_dir, '{}_shifts.png'.format(elem.upper())))\n fig.savefig(os.path.join(out_dir, '{}_shifts.png'.format(elem.upper())))\n plt.close(fig)\n os.chmod((os.path.join(out_dir, '{}_shifts.png'.format(elem.upper()))),0o766)\n\n\n for grating in list(set(data['opt_elem'])):\n fig = plt.figure()\n ax = fig.add_axes([.1, .1, .75, .8])\n ax.set_title(grating)\n for cenwave in list(set(data['cenwave'])):\n index = np.where((data['opt_elem'] == grating) &\n (data['cenwave'] == cenwave))[0]\n if not len(index):\n continue\n\n xdata = np.array(map(int, data['date'][index]))\n ydata = data['x_shift'][index]\n new_ydata = []\n new_xdata = []\n for day in range(xdata.min(), xdata.max() + 1):\n index = np.where(xdata == day)[0]\n #n_times = len(index)\n median = np.median(ydata[index])\n new_ydata.append(median)\n new_xdata.append(day)\n\n if cenwave < 1700:\n ms = 6\n ylim = (-140, 80)\n else:\n ms = 10\n ylim = (-80, 80)\n\n ax.plot(new_xdata, new_ydata, '.', ms=ms, alpha=.7, label='%d' %\n (cenwave))\n\n plt.legend(numpoints=1, shadow=True, bbox_to_anchor=(1.05, 1),\n loc='upper left', borderaxespad=0., prop={'size': 8})\n ax.set_xlim(data['date'].min(), data['date'].max() + 50)\n #ax.set_ylim(ylim[0], ylim[1])\n remove_if_there(os.path.join(out_dir, '%s_shifts_color.pdf' %\n (grating)))\n fig.savefig(os.path.join(out_dir, '%s_shifts_color.pdf' %\n (grating)))\n plt.close(fig)\n os.chmod(os.path.join(out_dir, '%s_shifts_color.pdf' %\n (grating)), 0o766)\n\n#----------------------------------------------------------\n\ndef make_plots_2(data, out_dir):\n \"\"\" Making the plots for the shift2 value\n \"\"\"\n\n sorted_index = np.argsort(data['date'])\n data = data[sorted_index]\n '''\n for cenwave in set(data['cenwave']):\n cw_index = np.where(data['cenwave'] == cenwave)\n all_segments = set(data[cw_index]['segment'])\n n_seg = len(all_segments)\n\n fig = plt.figure()\n fig.suptitle('Shift2/{}'.format(cenwave))\n\n for i, segment in enumerate(all_segments):\n print cenwave, segment\n index = np.where( (data['segment'] == segment) &\n (data['cenwave'] == cenwave) )\n\n ax = fig.add_subplot(n_seg, 1, i+1)\n ax.plot(data[index]['date'], data[index]['y_shift'], 'o')\n ax.set_xlabel('date')\n ax.set_ylabel('SHIFT2 {}'.format(segment))\n\n fig.savefig(os.path.join(out_dir, 'shift2_{}.png'.format(cenwave)))\n plt.close(fig)\n '''\n\n for cenwave in set(data['cenwave']):\n cw_index = np.where(data['cenwave'] == cenwave)\n all_segments = set(data[cw_index]['segment'])\n n_seg = len(all_segments)\n\n fig = plt.figure()\n fig.suptitle('Shift2 vs Shift1 {}'.format(cenwave))\n\n for i, segment in enumerate(all_segments):\n index = np.where( (data['segment'] == segment) &\n (data['cenwave'] == cenwave) )\n\n ax = fig.add_subplot(n_seg, 1, i+1)\n ax.plot(data[index]['x_shift'], data[index]['y_shift'], 'o')\n ax.set_xlabel('x_shift')\n ax.set_ylabel('y_shift')\n #ax.set_ylabel('SHIFT2 vs SHIFT1 {}'.format(segment))\n #ax.set_ylim(-20, 20)\n remove_if_there(os.path.join(out_dir, 'shift_relation_{}.png'.format(cenwave)))\n fig.savefig(os.path.join(out_dir, 'shift_relation_{}.png'.format(cenwave)))\n plt.close(fig)\n os.chmod(os.path.join(out_dir, 'shift_relation_{}.png'.format(cenwave)), 0o766)\n\n\n#----------------------------------------------------------\n\ndef fp_diff(data):\n index = np.where((data['detector'] == 'FUV'))[0]\n data = data[index]\n\n datasets = list(set(data['dataset']))\n datasets.sort()\n\n all_cenwaves = set(data['cenwave'])\n diff_dict = {}\n for cenwave in all_cenwaves:\n diff_dict[cenwave] = []\n\n ofile = open(os.path.join(out_dir, 'shift_data.txt'), 'w')\n for name in datasets:\n a_shift = None\n b_shift = None\n try:\n a_shift = data['x_shift'][np.where((data['dataset'] == name) &\n (data['segment'] == 'FUVA'))[0]][0]\n b_shift = data['x_shift'][np.where((data['dataset'] == name) &\n (data['segment'] == 'FUVB'))[0]][0]\n except IndexError:\n continue\n\n cenwave = data['cenwave'][np.where((data['dataset'] == name) &\n (data['segment'] == 'FUVA'))[0]][0]\n opt_elem = data['opt_elem'][np.where((data['dataset'] == name) &\n (data['segment'] == 'FUVA'))[0]][0]\n fppos = data['fppos'][np.where((data['dataset'] == name) &\n (data['segment'] == 'FUVA'))[0]][0]\n mjd = data['date'][np.where((data['dataset'] == name) &\n (data['segment'] == 'FUVA'))[0]][0]\n diff = a_shift - b_shift\n\n diff_dict[cenwave].append((mjd, diff))\n ofile.write('%5.5f %s %d %d %3.2f %3.2f \\n' %\n (mjd, opt_elem, cenwave, fppos, a_shift, b_shift))\n\n for cenwave in diff_dict:\n all_diff = [line[1] for line in diff_dict[cenwave]]\n all_mjd = [line[0] for line in diff_dict[cenwave]]\n\n if not len(all_diff):\n continue\n\n plt.figure(figsize=(8, 5))\n plt.plot(all_mjd, all_diff, 'o', label='%s' % (cenwave))\n plt.xlabel('MJD')\n plt.ylabel('SHIFT1 difference (pixels)')\n plt.title(cenwave)\n plt.legend(shadow=True, numpoints=1, loc='upper left')\n remove_if_there(os.path.join(out_dir, 'difference_%s.pdf' % (cenwave)))\n plt.savefig(os.path.join(out_dir, 'difference_%s.pdf' % (cenwave)))\n plt.close()\n os.chmod(os.path.join(out_dir, 'difference_%s.pdf' % (cenwave)), 0o766)\n\n # for cenwave in diff_dict:\n # all_diff = diff_dict[cenwave]\n # print all_diff\n # if not len(all_diff): continue\n # plt.plot(all_diff,bins=100)\n # plt.ylabel('Frequency (counts)')\n # plt.xlabel('SHIFT1A difference (pixels)')\n # plt.title(cenwave)\n # plt.savefig('plot_%s.pdf'%(cenwave) )\n\n # plt.clf()\n\n#----------------------------------------------------------\n\ndef monitor():\n \"\"\"Run the entire suite of monitoring\n \"\"\"\n\n logger.info(\"starting monitor\")\n\n settings = open_settings()\n\n webpage_dir = os.path.join(settings['webpage_location'], 'shifts')\n monitor_dir = os.path.join(settings['monitor_location'], 'Shifts')\n\n for place in [webpage_dir, monitor_dir]:\n if not os.path.exists(place):\n logger.debug(\"creating monitor location: {}\".format(place))\n os.makedirs(place)\n\n flash_data = make_shift_table(settings['connection_string'])\n make_plots(flash_data, monitor_dir)\n make_plots_2(flash_data, monitor_dir)\n #fp_diff(flash_data)\n\n for item in glob.glob(os.path.join(monitor_dir, '*.p??')):\n remove_if_there(os.path.join(webpage_dir, os.path.basename(item)))\n shutil.copy(item, webpage_dir)\n\n logger.info(\"finish monitor\")\n\n#----------------------------------------------------------\n'''\ndef make_shift_table(files):\n data = []\n for filename in files:\n for flash in pull_flashes(filename):\n if not []:\n keys = flash.keys()\n data.append(flash)\n print(flash)\n\n data = Table(rows=data, names=keys)\n\n return data\n'''\n","sub_path":"cos_monitoring/osm/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":24951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"228844727","text":"# 84. Largest Rectangle in Histogram\n# Given n non-negative integers representing the histogram's bar' \\\n# height where the width of each bar is 1, find the area of largest rectangle in the histogram.\n\n# Input: [2,1,5,6,2,3]\n# Output: 10\n\n# idea - divide and conquer\nclass Solution(object):\n def __init__(self):\n self.res=[]\n def largestRectangleArea(self, heights):\n \"\"\"\n :type heights: List[int]\n :rtype: int\n \"\"\"\n if not heights:\n return 0\n\n def m(heights):\n if not heights:\n return\n r = min(heights) * len(heights)\n self.res.append(r)\n self.largestRectangleArea(heights[:heights.index(min(heights))])\n self.largestRectangleArea(heights[heights.index(min(heights)) + 1:])\n\n m(heights)\n return max(self.res) if self.res else []\n\n # idea - The stack maintain the indexes of buildings with ascending height.\n # Before adding a new building pop the building who is taller than the new one.\n # The building popped out represent the height of a rectangle with the new building\n # as the right boundary and the current stack top as the left boundary.\n # Calculate its area and update ans of maximum area. Boundary is handled using dummy buildings.\n def largestRectangleArea1(self, height):\n \"\"\"\n :type heights: List[int]\n :rtype: int\n \"\"\"\n\n height.append(0)\n stack = [-1]\n ans = 0\n for i in xrange(len(height)):\n # greedily to find the highest column and then find the number how many columns taller than that one as width\n while height[i] < height[stack[-1]]:\n h = height[stack.pop()]\n w = i - stack[-1] - 1\n ans = max(ans, h * w)\n stack.append(i)\n # height.pop()\n return ans\n\n\nif __name__=='__main__':\n print(Solution().largestRectangleArea([2,1,5,6,2,3]))\n","sub_path":"Top_Question/largestrectangle.py","file_name":"largestrectangle.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"583372023","text":"import os\nimport util\nfrom shutil import copyfile\n\nproject = 'calculus/'\nsub = '\\graphicspath{{/Users/telliott/Github/figures/}}'\n\npath = '/Users/telliott/Github/' + project + 'files'\nL = os.listdir(path)\nL.sort()\n\nfor fn in L:\n if fn.startswith('.'):\n continue\n pL = list()\n print(fn)\n data = util.load(path + '/' + fn)\n data = data.strip().split('\\n')\n for line in data:\n if line.startswith('\\graphicspath'):\n pL.append(sub)\n else:\n pL.append(line)\n fh = open(path + '/' + fn, 'w')\n fh.write('\\n'.join(pL))\n fh.close()\n\n","sub_path":"scripts/sub.py","file_name":"sub.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"107558367","text":"'''\n3\n3 1 2\n'''\nn = int(input())\na = [int(x) for x in input().split()]\ncnt = 1\nwhile True:\n for i in range(n):\n if a[i+1] a[x]:\n break\n \n\n","sub_path":"HackerEarth/April_circuit_18_1.py","file_name":"April_circuit_18_1.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"203541784","text":"#!/usr/bin/env python\n# coding=utf-8\n\n\n# Definition for singly-linked list.\nclass ListNode(object):\n\n def __init__(self, x):\n self.val = x\n self.next = None\n\n def __str__(self):\n nums = []\n current = self\n while current is not None:\n nums.append(str(current.val))\n current = current.next\n return ' -> '.join(nums)\n\n __repr__ = __str__\n\n\nclass Solution(object):\n\n \"\"\"\n You are given two linked lists representing two non-negative numbers.\n The digits are stored in reverse order and each of their nodes contain\n a single digit. Add the two numbers and return it as a linked list.\n\n Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)\n Output: 7 -> 0 -> 8\n \"\"\"\n\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n\n test:\n >>> a, a.next, a.next.next = ListNode(2), ListNode(4), ListNode(3)\n >>> b, b.next, b.next.next = ListNode(5), ListNode(6), ListNode(4)\n >>> Solution().addTwoNumbers(a, b)\n 7 -> 0 -> 8\n \"\"\"\n head = ListNode(0)\n current, carray = head, 0\n\n while l1 is not None or l2 is not None:\n val = carray\n if l1 is not None:\n val += l1.val\n l1 = l1.next\n if l2 is not None:\n val += l2.val\n l2 = l2.next\n carray, val = val/10, val % 10\n current.next = ListNode(val)\n current = current.next\n\n if carray == 1:\n current.next = ListNode(1)\n\n return head.next\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","sub_path":"Python/002_Add_Two_Numbers.py","file_name":"002_Add_Two_Numbers.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"216501202","text":"import keras\nfrom radical.radical_data_helpers import load_data\nfrom sklearn.metrics import confusion_matrix,classification_report\nimport pandas as pd\n\nif __name__ == '__main__':\n # model = keras.models.load_model('./checkpoint/cnn_sentence_weights.010-0.9652.hdf5')\n # model = keras.models.load_model('./checkpoint/cnn_word_pinyin_weights.009-0.9660.hdf5')\n # model = keras.models.load_model('./checkpoint/cnn_sentence_pinyin_weights.007-0.9593.hdf5')\n model = keras.models.load_model('./checkpoint/cnn_radical_weights_3.028-0.8901.hdf5')\n\n model.summary()\n x, y, x_eval, y_eval,sentence_raw,max_length = load_data()\n # x_pinyin, y, embeddings_matrix_2, x_pinyin_eval, y_eval = load_pinyin_data()\n # x, y, embeddings_matrix, x_eval, y_eval,sentence_raw = load_word_data()\n # loss_and_metric = model.evaluate(x_eval,y_e val,batch_size=64)\n prediction = model.predict(x_eval)\n y_pre = []\n for i in prediction:\n if i[0]>i[1]:\n y_pre.append(0)\n else:\n y_pre.append(1)\n evaluate = pd.DataFrame({'review':sentence_raw,'trueL':y_eval,'preL':y_pre})\n evaluate.to_csv('./radical_wubi_evaluate-pianpang.csv',header=True,index=False)\n report = classification_report(y_eval,y_pre,digits=3)\n print(report)\n print('#########################################')\n metric = confusion_matrix(y_eval,y_pre)\n print(\"混淆矩阵:\")\n print(metric)","sub_path":"radical/radical_eval.py","file_name":"radical_eval.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"296896272","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\nfrom utils import gen_pdf\n\n\"\"\"\n# TODO: Need to learn how `norm.fit()` work -> maximum likelihood\n\nAutomatic sensor modeling flow:\n1. Move to a random position in space\n2. Take 200 samples (dataset)\n3. Using `norm.fit()` to estimate mu, std\n4. Do steps 1. to 3. for two more different random positions in space. (Now there's three mu values and std values)\n5. Get the avg. of all std values as the model parameter\n6. using this `std`, do `norm.fit()` again to refine the mu estimates.\n\n\"\"\"\n\nif __name__ == \"__main__\":\n np.random.seed(123)\n\n # ** Model the sensor **\n\n mu, sigma = 20, 1 # mean and standard deviation\n\n sigma = sigma\n\n s = np.random.normal(mu, sigma, 200)\n\n bins = np.arange(-3, 3 + 2) - 0.5\n bins += mu\n print(bins)\n\n x_pos = np.linspace(-3, 3, 100)\n x_pos += mu\n\n # dual\n\n fig, ax1 = plt.subplots()\n plt.title('Ideal model')\n\n color = 'blue'\n ax1.set_axisbelow(True)\n ax1.grid(axis='x')\n ax1.set_xlabel('Sample (cm)')\n ax1.set_ylabel('Frequency', color=color)\n count, bins, ignored = ax1.hist(s, bins, density=False, color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n # ax2.set_axisbelow(True)\n ax2.grid(axis='y')\n\n pdf = 1/(sigma * np.sqrt(2 * np.pi)) * \\\n np.exp(- (x_pos - mu)**2 / (2 * sigma**2))\n\n color = 'red'\n # we already handled the x-label with ax1\n ax2.set_ylabel('Probability', color=color)\n ax2.plot(x_pos, pdf, color=color,\n label='Ideal model P() - $N(\\mu={},\\sigma={})$'.format(mu, sigma))\n ax2.tick_params(axis='y', labelcolor=color)\n ax2.legend()\n\n fig.tight_layout() # otherwise the right y-label is slightly clipped\n plt.savefig('sm_ideal.pdf')\n # plt.show()\n plt.close()\n\n # ** acquire synthetic samples from the sensor\n n_samples = 200\n mesurement = 50\n std = sigma\n sensor_data = np.random.normal(mesurement, std, n_samples)\n\n # set sensor resolution to 1 cm\n sensor_data = np.round(sensor_data)\n\n bins = np.arange(-3, 3 + 2) - 0.5\n bins = bins\n bins += mesurement\n\n bin_pos = np.arange(-3, 3+1) + mesurement\n print(bin_pos)\n\n fig, axs = plt.subplots(1, 2)\n fig.suptitle('Sensor mesurements')\n ax = axs[0]\n\n count, bins, ignored = ax.hist(sensor_data, bins)\n\n print(np.min(sensor_data))\n\n ax.set_title('Frequency')\n\n ax.set_axisbelow(True)\n ax.grid()\n\n ax = axs[1]\n prob = count/n_samples\n ax.bar(bin_pos, prob)\n\n mu_est, sigma_est = norm.fit(sensor_data)\n mu_est = np.round(mu_est)\n sigma_est = np.round(sigma_est)\n\n x_pos = np.linspace(-3, 3, 100) + mesurement\n\n ax.plot(x_pos, gen_pdf(x_pos, mu_est, sigma_est), c='r',\n label='Ideal model P() - $N(\\mu={},\\sigma={})$'.format(mu_est, sigma_est))\n\n ax.legend()\n ax.set_axisbelow(True)\n ax.grid()\n ax.set_title('Probability')\n ax.set_xlabel('Z (cm)')\n\n plt.show()\n","sub_path":"MyCode/Bayes Filter/sensor_model.py","file_name":"sensor_model.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"403030020","text":"import os\nimport numpy as np\nimport pandas as pd\n\n\ndef exhaust_possible_seeds(tourney_slots_df, seeds, max_depth_seeds = None):\n \"\"\"\n Recursive function to get all of the possible seeds for each slot.\n Can be used to add a \"possible_teams\" column to the tournament slots data\n\n Parameters\n ----------\n tourney_slots_df : DataFrame\n The provided tournament slots data.\n parent_seeds : list\n A list of seeds from each round. (When rd > 1 they are really the names of rounds).\n\n Returns\n -------\n list\n A list of all possible seeds that could reach that round.\n\n \"\"\"\n \n ## Get slots that seeds came from (earlier in the tourney)\n earlier_rd_rows = tourney_slots_df[tourney_slots_df['Slot'].isin(seeds)].copy()\n \n ## Get the round that these are still \"seeds\"\n seed_rd_rows = tourney_slots_df[(tourney_slots_df['StrongSeed'].isin(seeds)) |\n (tourney_slots_df['WeakSeed'].isin(seeds))].copy()\n ## Find the earliest round that the seed is in\n min_round_seeds = np.min(seed_rd_rows['round'])\n \n ## Find the ones in this dataframe that are new\n new_seeds = list((set(earlier_rd_rows['StrongSeed']) | set(earlier_rd_rows['WeakSeed'])) - set(seeds))\n if min_round_seeds == 1:\n seeds_reached_max_depth = list(set(seeds) - set(new_seeds))\n else:\n seeds_reached_max_depth = None\n \n if len(new_seeds) > 0:\n ## If there's another level deeper, add new seeds we got this time, and run again\n return exhaust_possible_seeds(tourney_slots_df, new_seeds, seeds_reached_max_depth)\n else:\n ## If this is the deepest level, don't run it again, just return the list\n if min_round_seeds == 0:\n possible_seeds = seeds\n if max_depth_seeds is not None:\n possible_seeds.extend(max_depth_seeds)\n else:\n possible_seeds = seeds\n \n possible_seeds = list(set([s for s in possible_seeds if s not in tourney_slots_df['Slot']])) ## dedup\n possible_seeds.sort()\n \n return possible_seeds\n\n\ndef get_round_met(tourney_slots_df, seed_1, seed_2):\n \"\"\"\n Get the round that two seeds meet in the slots dataframe\n\n Parameters\n ----------\n tourney_slots_df : DataFrame\n The provided tournament slots data.\n seed_1 : str\n Seed of team 1.\n seed_2 : str\n Seed of team 2.\n\n Returns\n -------\n rd : int\n Minimum round number that two teams can feasibly meet.\n \"\"\"\n \n teams_in_slot = tourney_slots_df.possible_teams.apply(lambda x: ((seed_1 in x) and \n (seed_2 in x)))\n slots_with_teams = tourney_slots_df[teams_in_slot]\n \n rd = np.min(slots_with_teams['round'])\n \n return rd\n\n\ndef find_round_prob(sub_df, probs_df, team_id, rnd):\n \"\"\"\n Get the probability that a given team reaches a round\n \n Parameters\n ----------\n sub_df : DataFrame\n Submission dataframe with round added.\n team_name : str\n Name of team.\n rnd : int\n Round of interest to get probability for.\n \n Returns\n -------\n rnd_prob : float\n Probability that a team reaches the round.\n \"\"\"\n ## Get all possible matchups for the team in prior round\n team_round_preds = sub_df[((sub_df['TeamID_1'] == team_id) | \n (sub_df['TeamID_2'] == team_id)) &\n (sub_df['round'] == rnd-1)].copy()\n \n ## Defining a dict of column names corresponding to the round before\n rd_cols = {0: 'Round0',\n 1: 'Round1',\n 2: 'Round2',\n 3: 'Sweet16',\n 4: 'Elite8',\n 5: 'Final4',\n 6: 'Final',\n 7: 'Champ'}\n \n if len(team_round_preds) == 1:\n ## only 1 matchup to worry about, just get the corresponding prob for that team\n if team_id in list(team_round_preds['TeamID_1']):\n rnd_prob = float(team_round_preds['Pred'])\n else:\n rnd_prob = 1-float(team_round_preds['Pred'])\n if rnd > 0:\n team_prob_reaching = float(probs_df[probs_df['TeamID'] == team_id][rd_cols[rnd-1]])\n rnd_prob = rnd_prob*team_prob_reaching\n elif len(team_round_preds) == 0:\n ## If they didn't have any games in the prior round, \n ### probability is 1 (applies to non-play-in teams making round 1)\n rnd_prob = 1\n else:\n ## Using a json-like structure containing probabilities that an opposing team would make that round\n ### and probability that the team of interest would beat that team\n conditional_team_probs = {}\n \n ## Keys for the dict: possible teams\n possible_teams = list((set(team_round_preds['TeamID_1']) | \n set(team_round_preds['TeamID_2'])) - set([team_id]))\n \n ## Used for lookup in value 1 below\n prob_reaching_df = (probs_df[probs_df['TeamID'].isin(possible_teams)]\n [['TeamID', rd_cols[rnd-1]]]\n .rename(columns = {rd_cols[rnd-1]: 'prob_reaching_rd'}))\n \n for t in possible_teams:\n within_dict = {}\n ## Value 1 (conditional part): chances that the opposing teams make the round\n within_dict['prob_reaching'] = float(prob_reaching_df[prob_reaching_df['TeamID'] == t]['prob_reaching_rd']) \n \n ## Value 2: win probability for the team of interest over the possible opposing team\n if team_id < t:\n win_prob = float(team_round_preds[team_round_preds['TeamID_2'] == t]['Pred'])\n else:\n win_prob = 1-float(team_round_preds[team_round_preds['TeamID_1'] == t]['Pred'])\n within_dict['win_prob'] = win_prob\n conditional_team_probs[t] = within_dict\n \n ### Get probability for the team for the prior round\n team_prob_reaching = float(probs_df[probs_df['TeamID'] == team_id][rd_cols[rnd-1]])\n \n ### Calculate probability of making to the round of interest\n rnd_games_probs = (np.sum([(t['prob_reaching']*t['win_prob'])\n for t in conditional_team_probs.values()]))\n \n rnd_prob = rnd_games_probs*team_prob_reaching\n \n return rnd_prob\n\n\ndef compute_conditional_probs(sub_filepath, league = 'men'):\n \"\"\"\n Function to take the submission file and calculate conditional probabilities for each team/round.\n\n :param sub_filepath (str): location of Kaggle data submission\n :param league (str): either 'men' or 'women'\n :return: DataFrame containing probabilities for each team to make each round\n\n \"\"\"\n \n ## Prefix according to league\n if league == 'men':\n prefix = 'M'\n else:\n prefix = 'W'\n \n ## Get the sample submission and break out the ID\n sub_df = pd.read_csv(sub_filepath)\n sub_df['Season'] = sub_df['ID'].apply(lambda x: x.split('_')[0]).astype(int)\n sub_df['TeamID_1'] = sub_df['ID'].apply(lambda x: x.split('_')[1]).astype(int)\n sub_df['TeamID_2'] = sub_df['ID'].apply(lambda x: x.split('_')[2]).astype(int)\n \n ## Get the seeds and slots\n tourney_seeds_df = pd.read_csv(f\"{prefix}NCAATourneySeeds.csv\")\n tourney_seeds_df = tourney_seeds_df[tourney_seeds_df['Season'] == 2021].copy()\n tourney_slots_df = pd.read_csv(f\"{prefix}NCAATourneySlots.csv\")\n if league == 'men':\n tourney_slots_df = tourney_slots_df[tourney_slots_df['Season'] == 2021].copy()\n \n ## Merge in seeds to submission\n tourney_seeds_df = tourney_seeds_df.rename(columns = {'TeamID': 'TeamID_1', 'Seed': 'Seed_1'})\n sub_df = sub_df.merge(tourney_seeds_df, on = ['TeamID_1', 'Season'])\n tourney_seeds_df = tourney_seeds_df.rename(columns = {'TeamID_1': 'TeamID_2', 'Seed_1': 'Seed_2'})\n sub_df = sub_df.merge(tourney_seeds_df, on = ['TeamID_2', 'Season'])\n \n ## Add a field to slots with the possible teams that could reach that round\n tourney_slots_df['round'] = tourney_slots_df['Slot'].apply(lambda x: int(x[1]) if x.startswith('R')\n else 0)\n tourney_slots_df['possible_teams'] = tourney_slots_df.apply(lambda x: \n exhaust_possible_seeds(tourney_slots_df,\n [x['StrongSeed'],\n x['WeakSeed']]), axis = 1)\n \n ## Add a column to tourney results with the round that the 2 teams meet\n sub_df['round'] = sub_df.apply(lambda x: get_round_met(tourney_slots_df, x['Seed_1'], x['Seed_2']),\n axis = 1)\n \n ## Get the team names to merge in\n team_names_df = pd.read_csv(f\"{prefix}Teams.csv\")\n \n ## Merge team 1 name\n team_names_df = (team_names_df[['TeamID', 'TeamName']]\n .rename(columns={'TeamName': 'TeamName_1',\n 'TeamID': 'TeamID_1'}))\n sub_df = sub_df.merge(team_names_df, how='left', on='TeamID_1')\n\n ## Merge team 2 name\n team_names_df = team_names_df.rename(columns={'TeamName_1': 'TeamName_2',\n 'TeamID_1': 'TeamID_2'})\n sub_df = sub_df.merge(team_names_df, how='left', on='TeamID_2')\n \n ## For each team, go through each round and calculate the prob. that they'll be in the next round\n ## based on the submission.\n team_names = list(set(sub_df['TeamName_1']) | set(sub_df['TeamName_2']))\n team_names.sort()\n probs_df = pd.DataFrame({'TeamName': team_names})\n team_names_df = team_names_df.rename(columns={'TeamName_2': 'TeamName',\n 'TeamID_2': 'TeamID'})\n probs_df = probs_df.merge(team_names_df, on = 'TeamName')\n probs_df['Round0'] = 1\n \n ## Fill in probabilities, round by round\n probs_df['Round1'] = probs_df['TeamID'].apply(lambda x: find_round_prob(sub_df, probs_df, x, 1))\n probs_df['Round2'] = probs_df['TeamID'].apply(lambda x: find_round_prob(sub_df, probs_df, x, 2))\n probs_df['Sweet16'] = probs_df['TeamID'].apply(lambda x: find_round_prob(sub_df, probs_df, x, 3))\n probs_df['Elite8'] = probs_df['TeamID'].apply(lambda x: find_round_prob(sub_df, probs_df, x, 4))\n probs_df['Final4'] = probs_df['TeamID'].apply(lambda x: find_round_prob(sub_df, probs_df, x, 5))\n probs_df['Final'] = probs_df['TeamID'].apply(lambda x: find_round_prob(sub_df, probs_df, x, 6))\n probs_df['Champ'] = probs_df['TeamID'].apply(lambda x: find_round_prob(sub_df, probs_df, x, 7))\n \n probs_df = probs_df.drop(columns = ['Round0'])\n \n return probs_df","sub_path":"viz/bracket_builder/calculate.py","file_name":"calculate.py","file_ext":"py","file_size_in_byte":10875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"91014342","text":"import math\nimport os\nimport pickle\nimport sys\nfrom itertools import product\nfrom os.path import join as pjoin, exists\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport random\nfrom PIL import Image\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.optim import SGD\nfrom tqdm import tqdm\nfrom tensorboardX import SummaryWriter\nfrom torchvision.utils import make_grid\n\nfrom augment import FundusAOICrop, CompostImageAndLabel\nfrom model import mean_iou, Mrcnn\nfrom util.files import assert_exist, check_exist\nfrom util.logs import get_logger\nfrom util.npdraw import draw_bounding_box\nfrom util.segmentation2bbox import segmentation2bbox\nfrom model import restore_box_reg\nimport matplotlib.pyplot as plt\nlogger = get_logger('ma detection')\nlogWriter = SummaryWriter(logdir=f'log/fuck')\n\ndebug = os.getenv('DEBUG')\n\n\nclass VGG(nn.Module):\n def __init__(self, init_weights=True):\n super(VGG, self).__init__()\n self.features = self.make_layers(\n [64, 64, 'M', # 3, 5, 6\n 128, 128, 'M', # 10, 14 16\n 256, 256, 512, 'M', # 24, 32, 40, 44\n 512, 512, 512, 'M', # 60, 76, 92, 100\n # 512, 512, 512, 'M',\n ], # 132, 164, 196, 212\n batch_norm=True)\n\n self.rpn_sliding_window = nn.Conv2d(\n 512, 256, 1, 1, 0\n )\n self.box_classification = nn.Conv2d(256, 2 * 1, 1)\n self.box_regression = nn.Conv2d(256, 2 * 1, 1)\n if init_weights:\n self._initialize_weights()\n\n def forward(self, x):\n x = self.features(x)\n rpn_feature = self.rpn_sliding_window(x)\n box_predict = self.box_classification(rpn_feature)\n box_regression = self.box_regression(rpn_feature)\n return box_predict, box_regression\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n @staticmethod\n def make_layers(cfg, batch_norm=False):\n layers = []\n in_channels = 3\n dilation = 1\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n elif v == 'D':\n dilation = 2\n else:\n conv2d = nn.Conv2d(\n in_channels, v,\n kernel_size=3,\n padding=dilation,\n dilation=dilation)\n if batch_norm:\n layers += [\n conv2d, nn.BatchNorm2d(v),\n nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n\n\nclass ChallengeDB:\n def __init__(self,\n root='/home/d/data/challenge/A. Segmentation/',\n split=None):\n self.split = split\n self.transform = [\n FundusAOICrop(),\n ]\n if split == 'train':\n self.dataFiles = tuple((\n assert_exist(pjoin(\n root,\n f'1. Original Images/a. Training Set/IDRiD_{i:02d}.jpg')),\n check_exist(pjoin(\n root,\n f'2. All Segmentation Groundtruths/a. Training Set/'\n f'1. Microaneurysms/IDRiD_{i:02d}_MA.tif')),\n check_exist(pjoin(\n root,\n f'2. All Segmentation Groundtruths/a. Training Set/'\n f'2. Haemorrhages/IDRiD_{i:02d}_HE.tif')),\n check_exist(pjoin(\n root,\n f'2. All Segmentation Groundtruths/a. Training Set/'\n f'3. Hard Exudates/IDRiD_{i:02d}_EX.tif')),\n check_exist(pjoin(\n root,\n f'2. All Segmentation Groundtruths/a. Training Set/'\n f'4. Soft Exudates/IDRiD_{i:02d}_SE.tif')),\n check_exist(pjoin(\n root,\n f'2. All Segmentation Groundtruths/a. Training Set/'\n f'5. Optic Disc/IDRiD_{i:02d}_OD.tif'))\n ) for i in range(1, 55))\n elif split == 'test':\n self.dataFiles = tuple((\n assert_exist(pjoin(\n root,\n f'1. Original Images/b. Testing Set/IDRiD_{i:02d}.jpg')),\n check_exist(pjoin(\n root,\n f'2. All Segmentation Groundtruths/'\n f'b. Testing Set/1. Microaneurysms/IDRiD_{i:02d}_MA.tif')),\n check_exist(pjoin(\n root,\n f'2. All Segmentation Groundtruths/'\n f'b. Testing Set/2. Haemorrhages/IDRiD_{i:02d}_HE.tif')),\n check_exist(pjoin(\n root,\n f'2. All Segmentation Groundtruths/b. Testing Set/'\n f'3. Hard Exudates/IDRiD_{i:02d}_EX.tif')),\n check_exist(pjoin(\n root,\n f'2. All Segmentation Groundtruths/b. Testing Set/'\n f'4. Soft Exudates/IDRiD_{i:02d}_SE.tif')),\n check_exist(pjoin(\n root,\n f'2. All Segmentation Groundtruths/b. Testing Set/'\n f'5. Optic Disc/IDRiD_{i:02d}_OD.tif'))\n ) for i in range(55, 82))\n else:\n raise Exception(f'split ({split}) not recognized!')\n self.cacheTransform = CompostImageAndLabel(self.transform)\n self.cacheDir = 'runs/cache/'\n\n def _getCacheItem(self, index):\n cacheName = pjoin(\n self.cacheDir,\n f'ma_detection.{self.split}.{index}.pkl')\n if False and exists(cacheName):\n result = pickle.load(open(cacheName, 'rb'))\n return result\n else:\n logger.debug(f'miss {cacheName}')\n files = self.dataFiles[index]\n images = []\n for i in files:\n if i is None:\n images.append(np.zeros(images[-1].shape[:2], np.uint8))\n else:\n images.append(np.array(Image.open(i)))\n record = self.cacheTransform(*images)\n try:\n pickle.dump(record, open(cacheName, 'wb'))\n except Exception as e:\n os.remove(cacheName)\n raise e\n return record\n\n def __getitem__(self, index):\n logger.debug(f'getting {index}')\n if index >= len(self.dataFiles):\n raise IndexError()\n index = index % len(self.dataFiles)\n images = self._getCacheItem(index)\n image = images[0]\n xx = (np.zeros(images[1].shape, images[1].dtype), *images[1:])\n labels = np.array(xx)\n labels = labels.argmax(0)\n return image, labels\n\n def __len__(self):\n return len(self.dataFiles)\n\n\ndef slide_image(img, size, overlap=0.2):\n image_patch = []\n top_left_points = []\n stride = (1 - overlap) * size\n\n def calculate_start_p(stride, size):\n num_step = math.ceil(size / stride)\n stride = size / num_step\n for i in range(num_step + 1):\n yield math.floor(stride * i)\n\n for row, col in product(\n calculate_start_p(stride, img.shape[0] - size),\n calculate_start_p(stride, img.shape[1] - size)):\n row = min(img.shape[0] - size, row)\n col = min(img.shape[1] - size, col)\n image_patch.append(img[row:row + size, col:col + size, ::])\n top_left_points.append((row, col))\n return image_patch, top_left_points\n\n\nclass BBloader(Dataset):\n def __init__(self, split, archers=None):\n self.split = split\n if archers is None:\n archers = [\n (32, 32),\n # (48, 48),\n # (64, 64),\n ]\n self.archers = archers\n self.n_archer = len(self.archers)\n self.ratio = 16\n self.file_list = self._make_slices()\n self.thresh = 0.99\n self.smooth_factor = 100000\n\n def _make_slices(self):\n store_dir = f'runs/fundus_image_data/{self.split}'\n csv_file = pjoin(store_dir, f'list.csv')\n image_size = 512\n if exists(csv_file):\n dd = pd.read_csv(csv_file)\n return dd\n if not exists(store_dir):\n os.makedirs(store_dir, exist_ok=True)\n data = ChallengeDB(split=self.split)\n records = []\n for img, gt in data:\n logger.info(img.shape)\n logger.info(gt.shape)\n image_patches, cornels = slide_image(img, image_size)\n for p, c in zip(image_patches, cornels):\n idx = len(records)\n record_name = pjoin(store_dir, f'data{idx}.pickle')\n gtp = gt[\n c[0]:c[0] + image_size,\n c[1]:c[1] + image_size]\n all_bbox = []\n for lesionType in range(4):\n bbox = segmentation2bbox(gtp == lesionType + 1)\n bbox = list(map(\n lambda x: (*x, lesionType),\n bbox\n ))\n logger.info(bbox)\n all_bbox += bbox\n pickle.dump(\n (p, all_bbox),\n open(record_name, 'wb'))\n records.append(dict(\n file=record_name\n ))\n data_csv = pd.DataFrame.from_records(records)\n data_csv.to_csv(csv_file, index=False)\n return data_csv\n\n def get_bounding_box(self, index):\n if index >= self.__len__():\n raise IndexError\n image, bbox = pickle.load(open(self.file_list.file[index], 'rb'))\n return image, bbox\n\n def __getitem__(self, index):\n if index >= self.__len__():\n raise IndexError()\n image, bbox = pickle.load(open(self.file_list.file[index], 'rb'))\n image = image.astype(np.float)\n image = image.transpose((2, 0, 1)) / 255\n image = image.astype(np.float32)\n nchannel, nrow, ncol = image.shape\n\n arow, acol = nrow // self.ratio, ncol // self.ratio\n archor_reg = np.zeros((self.n_archer, 2, arow, acol), np.float32)\n arc_to_bbox_map = np.zeros((self.n_archer, 1, arow, acol), np.int) - 1\n\n center_rows = [self.ratio // 2 + i * self.ratio for i in range(arow)]\n center_cols = [self.ratio // 2 + i * self.ratio for i in range(acol)]\n iou_map = np.zeros((len(bbox), self.n_archer, 1, arow, acol))\n # mean_iou_map has shape of n_bbox, n_archer, 1, row, col\n for bbox_idx, label_box in enumerate(bbox):\n if label_box[-1] != 0:\n continue\n # iou_map = np.zeros((self.n_archer, 1, arow, acol), np.float)\n for irow, icol, iarc in product(\n range(arow),\n range(acol),\n range(self.n_archer)):\n abox = (\n center_rows[irow],\n center_cols[icol],\n *self.archers[iarc]\n )\n iou_map[bbox_idx, iarc, 0, irow, icol] = mean_iou(\n abox, label_box)\n # TODO deal with the fucking mean_iou thing\n if debug and len(bbox) > 0:\n show_img = np.max(iou_map, axis=1, keepdims=True)\n logger.info(show_img.shape)\n show_img = make_grid(torch.Tensor(iou_map[:, 0, :, :, :]))\n logWriter.add_image('iou_map', show_img, index)\n logger.info(show_img.shape)\n plt.figure()\n plt.imshow(show_img)\n plt.show()\n if len(bbox) == 0:\n positive = np.zeros(iou_map.shape[1:])\n negative = np.zeros(iou_map.shape[1:])\n else:\n # logger.info(iou_map.shape)\n iou_map = np.max(iou_map, axis=0)\n positive = iou_map > 0.5\n positive = positive.astype(np.float32)\n negative = iou_map < 0.2\n negative = negative.astype(np.float32)\n\n loss_area = ((positive) + (negative)) > 0\n n_postive = np.sum(positive)\n mask = positive > 0\n random_sample = np.random.rand(*mask.shape) * loss_area\n mask += random_sample >= min(self.thresh, np.max(random_sample) * 0.98)\n mask = mask > 0\n mask = mask.astype(np.float32)\n n_psample = np.sum((mask * positive) > 0)\n n_nsample = np.sum((mask * negative) > 0)\n self.thresh -= n_psample / self.smooth_factor\n self.thresh += n_nsample / self.smooth_factor\n # logger.info(f'{n_psample:6d} {n_nsample:6d} {self.thresh:.6f}')\n\n archor_cls = np.concatenate(\n (negative.astype(np.int), positive.astype(np.int)),\n axis=1)\n\n # logger.info(f'{arow}, {acol}, {self.n_archer}, {len(bbox)}')\n for irow, icol, iarc in product(\n range(arow),\n range(acol),\n range(self.n_archer)):\n if positive[iarc, 0, irow, icol] == 0:\n continue\n current_bbox = bbox[arc_to_bbox_map[iarc, 0, irow, icol]]\n t_row = (current_bbox[0] - center_rows[irow])\\\n / self.archers[iarc][0]\n t_col = (current_bbox[1] - center_cols[icol])\\\n / self.archers[iarc][1]\n archor_reg[iarc, :, irow, icol] = (\n t_row, t_col)\n archor_cls = archor_cls.astype(np.float32)\n return image, (archor_cls, archor_reg, mask)\n\n def __len__(self):\n return self.file_list.__len__()\n\n\nclass MaDetector:\n def __init__(\n self,\n device=None,\n model=None,\n train_data=None,\n test_data=None,\n ratio=None,\n net=None):\n self.device = torch.device(device) if device else torch.device('cpu')\n self.net = VGG() if net is None else net\n if model is not None:\n logger.info(f'loading form {model}')\n dd = torch.load(model)\n self.net.load_state_dict(dd)\n self.net.to(self.device)\n self.epoach = 0\n self.train_data = BBloader('train', [(30, 30)]) \n self.test_data = BBloader('train', [(30, 30)])\n if ratio is None:\n self.ratio = self.train_data.ratio\n else:\n self.ratio = ratio\n self.train_loader = DataLoader(\n self.train_data,\n 3,\n True,\n num_workers=0)\n self.optm = SGD(\n self.net.parameters(),\n lr=0.0003,\n momentum=0.9,\n nesterov=True,\n weight_decay=0.00005\n )\n\n def train(self):\n self.net.train()\n tt = tqdm(self.train_loader, total=len(self.train_loader))\n for idx, (img, (cls, reg, mask)) in enumerate(tt):\n npmask = mask.numpy()\n mask = mask.to(self.device)\n\n img = img.to(self.device)\n cls = cls.to(self.device)\n reg = reg.to(self.device)\n mask = mask.to(self.device)\n pcls, preg = self.net(img)\n\n # Calculate Loss_cls\n pcls = pcls.reshape(cls.shape)\n pcls = torch.nn.functional.log_softmax(pcls, dim=2)\n pcls = pcls * cls\n pcls = pcls * mask\n L_cls = - pcls.sum() / mask.sum()\n\n # Calculate Loss_reg\n preg = preg.reshape(reg.shape)\n L_reg = torch.abs(preg - reg)\n L_reg = torch.where(L_reg < 1, 0.5 * L_reg ** 2, L_reg - 0.5)\n positive = cls[:, :, 1:, :, :]\n positive_sum = positive.sum()\n L_reg = L_reg * positive\n L_reg = L_reg.sum() / positive_sum\n\n self.optm.zero_grad()\n loss = L_cls + 0.1 * L_reg\n loss.backward()\n self.optm.step()\n\n loss = loss.detach().cpu()\n tt.set_postfix_str(str(loss))\n logWriter.add_scalar(\n 'train/loss',\n float(loss),\n (self.epoach - 1) * len(self.train_loader) + idx)\n\n def step(self, n=300):\n for i in tqdm(range(n), total=n):\n self.epoach += 1\n self.train()\n\n\ndef get_bbox_statics():\n bbox = BBloader(split='train')\n row_len = []\n col_len = []\n for i in range(bbox.__len__()):\n image, box = bbox.get_bounding_box(i)\n for crow, ccol, rrow, rcol, ltype in box:\n if ltype != 0:\n continue\n row_len.append(rrow)\n col_len.append(rcol)\n row_len = np.array(row_len)\n col_len = np.array(col_len)\n # fig, ax = plt.subplots(tight_layout=True)\n plt.hist2d(row_len, col_len, bins=30)\n plt.colorbar()\n plt.show()\n\nif __name__ == '__main__':\n get_bbox_statics()\n sys.exit(0)\n\n detector = MaDetector(\n device='cuda',\n net=VGG(),\n )\n detector.step()\n sys.exit(0)\n\n loader = BBloader(split='train')\n\n for i in range(100):\n idx = random.randint(0, len(loader))\n iimg, (classification, regression, mask) = loader[idx]\n logger.info(iimg.shape)\n\n plt.figure()\n plt.imshow(classification[0, 1, :, :])\n plt.colorbar()\n\n plt.figure()\n plt.imshow(iimg.transpose(1, 2, 0))\n\n plt.figure()\n plt.imshow(mask[0, 0, ::])\n plt.show()\n sys.exit(0)\n\n logger.info(iimg.shape)\n\n image = Image.open('/data/home/d/data/challenge/A. Segmentation/'\n '1. Original Images/a. Training Set/IDRiD_34.jpg')\n image = np.array(image)\n imgs, pos = slide_image(image, 512, 0.2)\n result_patchs = []\n\n heatmap = np.zeros((1, image.shape[0]//16, image.shape[1]//16))\n counter = np.zeros((1, image.shape[0]//16, image.shape[1]//16))\n for img_patchs, p in zip(imgs, pos):\n cls, reg = det.predict(img_patchs)\n p = tuple(i // 16 for i in p)\n hh = cls[:, 1, :, :]-cls[:, 0, :, :]\n heatmap[:, p[0]:p[0]+32, p[1]:p[1]+32] += hh\n counter[:, p[0]:p[0]+32, p[1]:p[1]+32] += 1\n heatmap /= counter\n for iarchor in range(15):\n plt.figure()\n plt.imshow(heatmap[iarchor, :, :])\n plt.colorbar()\n plt.figure()\n plt.imshow(counter[iarchor, :, :])\n plt.show()\n","sub_path":"ma_detection/.ipynb_checkpoints/MA_detection-checkpoint.py","file_name":"MA_detection-checkpoint.py","file_ext":"py","file_size_in_byte":18832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"277071345","text":"import os\nimport sys\nfrom collections import defaultdict\n\n\ndef get_directory_path():\n if len(sys.argv) > 1:\n directory = sys.argv[1]\n else:\n return None\n if not os.path.isdir(directory):\n return None\n return directory\n\n\ndef get_uniqum_filenames_in_dir(directory):\n filenames_dict = defaultdict(list)\n for dirname, subdirnames, filenames in os.walk(directory):\n for filename in filenames:\n fullpath = os.path.join(os.path.abspath(dirname), filename)\n fullpath = os.path.realpath(fullpath)\n try:\n file_size = os.path.getsize(fullpath)\n except (OSError,):\n continue\n filenames_dict[(filename, file_size)].append(fullpath)\n return filenames_dict\n\n\ndef print_duplicates(files_dict):\n for (filename, size), paths in sorted(files_dict.items()):\n if len(paths) > 1:\n print('{}, size {} bytes'.format(filename, size))\n print(*paths, sep='\\n')\n print()\n\n\nif __name__ == '__main__':\n directory = get_directory_path()\n if not directory:\n exit('Folder path is incorrect or empty')\n files_dict = get_uniqum_filenames_in_dir(directory)\n print_duplicates(files_dict)\n","sub_path":"duplicates.py","file_name":"duplicates.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"135467278","text":"# encoding: utf-8\r\n\r\nfrom test001.items import BookItem\r\nimport settings, datetime, scrapy, MySQLdb, jieba, os, re, gzip, collections, json, string, demjson, xpinyin, cn2an, logging, time, random, html, difflib, simhash\r\n\r\nclass CBookWalker(scrapy.Spider):\r\n\t# URI 头部,防止 403\r\n\theader = {\r\n\t\t\"Connection\": \"keep-alive\",\r\n\t\t\"Accept\": \"text/html, application/xhtml+xml, application/xml; q = 0.9, image/webp, */*; q = 0.8\",\r\n\t\t\"User-Agent\": \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC1)\",\r\n\t\t\"Accept-Encoding\": \"gzip, deflate, sdch\",\r\n\t\t\"Accept-Language\": \"zh-CN, zh; q = 0.8\",\r\n\t\t\"Cache-Control\": \"no-cache\",\r\n\t\t\"Content-Type\": \"application/x-www-form-urlencoded\",\r\n\t\t\"Referrer\": \"https://www.google.com/search?q=%e5%b0%8f%e8%af%b4%e4%b8%8b%e8%bd%bd\",\r\n\t}\r\n\t\r\n\t# 速度设置\r\n\tcustom_settings = {\r\n\t\t\"DOWNLOAD_DELAY\": 1\r\n\t}\r\n\t\r\n\t# 保存的路径\r\n\tpath_base = os.path.join(settings.BASE_DIR, \"../books\")\r\n\tdatabase = None\r\n\tbook_cache = {}\r\n\tlogger = None\r\n\tlast_mismatch = None\r\n\tcheck_complete_state = 1\r\n\tcached_anti_theft = {}\r\n\tcached_irrelevant_chapter = {}\r\n\ttime_of_start = datetime.datetime.now()\r\n\ttotal_books = 0\r\n\tcurrent_books = 0\r\n\tnum_same_match = 3\r\n\t\r\n\t# 代理\r\n\t# proxy = \"http://127.0.0.1:7135\"\r\n\t\r\n\tBAD_TRIM = {\r\n\t\t\"zhang\" : \"章\", \"hui\" : \"回\", \"bu\" : \"部\", \"jie\" : \"节\", \"ji\" : \"集\", \"juan\" : \"卷\", \"ce\" : \"册\", \"mu\" : \"幕\", \"hua\" : \"话\", \"pian\" : \"篇\",\r\n\t}\r\n\t\r\n\tCN_NUMBERS = {\r\n\t\t# 拼音\r\n\t\t\"ling\": \"零\", \"yi\": \"一\", \"er\": \"二\", \"san\": \"三\", \"si\": \"四\", \"wu\": \"五\", \"liu\": \"六\", \"qi\": \"七\", \"ba\": \"八\", \"jiu\": \"九\",\r\n\t\t\"lu\": \"六\", \"liang\": \"二\", \"shi\": \"十\", \"bai\": \"百\", \"qian\": \"千\", \"wan\": \"万\", \"o\": \"零\", \"O\": \"零\", \"〇\": \"零\",\r\n\t\t\r\n\t\t# 奇奇怪怪的字符\r\n\t\t\"①\" : \"1\", \"②\" : \"2\", \"③\" : \"3\", \"④\" : \"4\", \"⑤\" : \"5\", \"⑥\" : \"6\", \"⑦\" : \"7\", \"⑧\" : \"8\", \"⑨\" : \"9\", \"⑩\" : \"10\", \r\n\t\t\"㈠\" : \"1\", \"㈡\" : \"2\", \"㈢\" : \"3\", \"㈣\" : \"4\", \"㈤\" : \"5\", \"㈥\" : \"6\", \"㈦\" : \"7\", \"㈧\" : \"8\", \"㈨\" : \"9\", \"㈩\" : \"10\",\r\n\t\t\"ⅰ\" : \"1\", \"ⅱ\" : \"2\", \"ⅲ\" : \"3\", \"ⅳ\" : \"4\", \"ⅴ\" : \"5\", \"ⅵ\" : \"6\", \"ⅶ\" : \"7\", \"ⅷ\" : \"8\", \"ⅸ\" : \"9\", \"ⅹ\" : \"10\",\r\n\t\t\"⒈\" : \"1\", \"⒉\" : \"2\", \"⒊\" : \"3\", \"⒋\" : \"4\", \"⒌\" : \"5\", \"⒍\" : \"6\", \"⒎\" : \"7\", \"⒏\" : \"8\", \"⒐\" : \"9\", \"⒑\" : \"10\",\r\n\t}\r\n\t\r\n\tCN_ENDLINE = [\r\n\t\t'。', '?', '!', '”', ' 章', '》', '※', \"”\", \"】\", \"」\", \"』\", \";\", \"…\",\r\n\t]\r\n\t\r\n\tKEYWORD_FINISHED = [\r\n\t\t\"大结局\",\r\n\t\t\"完本\",\r\n\t\t\"后记\",\r\n\t\t\"全书完\",\r\n\t\t\"最终章\",\r\n\t\t\"尾声\",\r\n\t\t\"新书\",\r\n\t\t\"完结\",\r\n\t\t\"结局\",\r\n\t\t\"番外\",\r\n\t\t\"(终)\",\r\n\t\t\"后续\",\r\n\t\t\"後记\",\r\n\t\t\"全文完\",\r\n\t\t\"全终\",\r\n\t\t\"(完)\",\r\n\t\t\"末章\",\r\n\t\t\"最终回\",\r\n\t]\r\n\t\r\n\tKEYWORD_IGNORE = [\r\n\t\t\"请假\",\r\n\t\t\"请个假\",\r\n\t\t\"明天更新\",\r\n\t\t\"上架感言\",\r\n\t\t\"节快乐\",\r\n\t\t\"单章\",\r\n\t\t\"恢复更新\",\r\n\t\t\"道歉\",\r\n\t\t\"抱歉\",\r\n\t\t\"停更\",\r\n\t\t\"鸽一天\",\r\n\t\t\"卡文\",\r\n\t\t\"练车\",\r\n\t\t\"通知\",\r\n\t\t\"假条\",\r\n\t\t\"渡劫\",\r\n\t\t\"无更\",\r\n\t\t\"该章节已被锁定\",\r\n\t\t\"断更\",\r\n\t\t\"生病\",\r\n\t\t\"推迟更\",\r\n\t\t\"整理思路\",\r\n\t\t\"咕一天\",\r\n\t\t\"咕咕咕\",\r\n\t\t\"停电\",\r\n\t\t\"住院\",\r\n\t\t\"新的一\",\r\n\t\t\"没有更新\",\r\n\t\t\"致歉\",\r\n\t\t\"更新延迟\",\r\n\t\t\"码字\",\r\n\t\t\"写不出来\",\r\n\t\t\"对不起\",\r\n\t\t\"没能码完\",\r\n\t\t\"没能写完\",\r\n\t\t\"延迟到\",\r\n\t\t\"推迟到\",\r\n\t\t\"出差\",\r\n\t\t\"更新\",\r\n\t\t\"新书\",\r\n\t\t\"推书\",\r\n\t\t\"修仙\",\r\n\t]\r\n\t\r\n\t# 关键字替换\r\n\tREPLACE_REGEX = {\r\n\t\t# ===格式整理===\r\n\t\t# \"\\\\(|\\\\[|\\\\{|(|【|{\":\"(\",\r\n\t\t# \"\\\\)|\\\\]|\\\\}|)|】|}\":\")\",\r\n\t\t\r\n\t\t# 需要?\r\n\t\t\",\": \",\",\r\n\t\t# \":\": \":\", \"\\\\?\":\"?\", # 会造成起点的图片无法替换\r\n\t\t\r\n\t\t\"\\\\*|*\": \"*\",\r\n\t\t\"[wWwW]{3}\": \"www\",\r\n\t\t\"w{3}(\\u3001|\\u3002)\": \"www.\",\r\n\t\t\"[cCcC][oOoO][mMmM]\": \"com\",\r\n\t\t\"[nNnN][eeEE][ttTT]\": \"net\",\r\n\t\t\"[cCcC][nNnN]\": \"cn\",\r\n\t\t\"(\\\\.|\\u3001|\\u3002)com\": \".com\",\r\n\t\t\"(\\\\.|\\u3001|\\u3002)net\": \".net\",\r\n\t\t\"(\\\\.|\\u3001|\\u3002)cn\": \".cn\",\r\n\t\t\"[pPpP][sSsS][::]\": \"ps:\",\r\n\t\t\"。{5,7}\": \"……\", \"~{2,50}\": \"——\", \"…{3,40}\": \"……\", \"-{3,20}\": \"——\",\r\n\t\t#\"。(,|,|。)\": \"。\",\r\n\t\t# \"?(,|,)\": \"?\",\r\n\t\t#\"”(,|,|。)\": \"”\",\r\n\t\t\"@{3,}\": \"\",\r\n\t\t\r\n\t\t# === 段末的多余的r ===\r\n\t\t\"\\\\\\\\r
      \": \"
      \",\r\n\t\t\r\n\t\t# === 一些特殊的替换 ===\r\n\t\t\"\\\\[+CP.*(http:#file.*\\\\.jpg)\\\\]+\": \"\",\r\n\t\t\"『(.)』\": \"\\\\1\", # \"『色』\": \"色\",\r\n\t\t\"\\\\((.)\\\\)\": \"\\\\1\", # \"(色)\": \"色\",\r\n\t\t\r\n\t\t# === 去广告 ===\r\n\t\t\"\\\\[搜索最新更新尽在[a-z\\\\.]+\\\\]\": \"\",\r\n\t\t\"手机用户请到m.qidian.com阅读。\": \"\",\r\n\t\t\".{2,4}中文网欢迎广大书友\": \"\",\r\n\t\t\"访问下载txt小说|◎雲來閣免费万本m.yunlaige.com◎\": \"\",\r\n\t\t\"〖∷更新快∷无弹窗∷纯文字∷.*?〗\": \"\",\r\n\t\t'超快稳定更新小说[,,]': '', \"本文由 。。 首发\": \"\",\r\n\t\t'”小说“小说章节更新最快': '',\r\n\t\t'如果觉得好看,请把本站网址推荐给您的朋友吧!': '',\r\n\t\t'本站手机网址:  请互相通知向您QQ群【微博/微信】论坛贴吧推荐宣传介绍!': '',\r\n\t\t\"fqXSw\\\\.com\": \"\", \"\\\\.5du|\\\\.5du5\\\\.\": \"\",\r\n\t\t\"\\\\[\\\\]\": \"\",\r\n\t\t\"如果您觉得网不错就多多分享本站谢谢各位读者的支持\": \"\",\r\n\t\t\"全文字无广告|\\\\(看书窝 看书窝 无弹窗全文阅读\\\\)\": \"\",\r\n\t\t\"。。+[\\\\s ]*看最新最全小说\": \"\",\r\n\t\t\"水印广告测试\": \"\",\r\n\t\t\"\\\\(平南文学网\\\\)\": \"\", \"讀蕶蕶尐說網\": \"\",\r\n\t\t\"比奇提示:如何快速搜自己要找的书籍\": \"\", \"《百度书名\\\\+比奇》即可快速直达\": \"\",\r\n\t\t\"~无~错~小~说\": \"\",\r\n\t\t\r\n\t\t\"\\\\(一秒记住小说界\\\\)|\\\\*一秒记住\\\\*\": \"\",\r\n\t\t\"uutxt\\\\.org|3vbook\\\\.cn|www\\\\.qbwx\\\\.com|WWw\\\\.YaNkuai\\\\.com|www\\\\.btzw\\\\.com|www\\\\.23uS\\\\.com\": \"\",\r\n\t\t\"txt53712/\": \"\",\r\n\t\t\"\\xa0{4,12}\": \"\\xa0\\xa0\\xa0\\xa0\\xa0\\xa0\\xa0\",\r\n\t\t\r\n\t\t# === 通用去广告\r\n\t\t\"[wwW]{1,3}[.\\\\.]23uS[.\\\\.](?:coM|com)\": \"\",\r\n\t\t\r\n\t\t# === 星号屏蔽字还原 ===\r\n\t\t# === 八九 ===\r\n\t\t\"十有(\\\\*{2})\": \"十有八九\",\r\n\t\t\"十有bā'九\": \"十有八九\",\r\n\t\t\"(\\\\*{2})不离十\": \"八九不离十\",\r\n\t\t\"(\\\\*{2})点\": \"八九点\",\r\n\t\t\"(\\\\*{2})个\": \"八九个\",\r\n\t\t\"(\\\\*{2})岁\": \"八九岁\",\r\n\t\t\"(\\\\*{2})成\": \"八九成\",\r\n\t\t\"(\\\\*{2})年\": \"八九年\",\r\n\t\t\"一八(\\\\*{2})\": \"一八八九\",\r\n\t\t\r\n\t\t# === SM ===\r\n\t\t\"G(\\\\*{2})\": \"GSM\",\r\n\t\t\r\n\t\t# === 情色 ===\r\n\t\t\"感(\\\\*{2})彩\": \"感情色彩\",\r\n\t\t\r\n\t\t# === 大法 ===\r\n\t\t\"强(\\\\*{2})u5B9D\": \"强大法宝\",\r\n\t\t\"强(\\\\*{2})宝\": \"强大法宝\",\r\n\t\t\"种魔(\\\\*{2})\": \"种魔大法\",\r\n\t\t\"巨(\\\\*{2})\": \"巨大法\",\r\n\t\t\"强(\\\\*{2})术\": \"强大法术\",\r\n\t\t\"(\\\\*{2})师\": \"大法师\",\r\n\t\t\r\n\t\t# === 肉体 ===\r\n\t\t\"(\\\\*{2})凡胎\": \"肉体凡胎\",\r\n\t\t\"夺取她的(\\\\*{2})\": \"夺取她的肉体\",\r\n\t\t\"夺取他的(\\\\*{2})\": \"夺取他的肉体\",\r\n\t\t\"(\\\\*{2})与精神\": \"肉体与精神\",\r\n\t\t\"(\\\\*{2})素材\": \"肉体素材\",\r\n\t\t\"(\\\\*{2})材料\": \"肉体材料\",\r\n\t\t\"在(\\\\*{2})上\": \"在肉体上\",\r\n\t\t\r\n\t\t# === 赤裸 ===\r\n\t\t\"(\\\\*{4})着\": \"赤裸着\",\r\n\t\t\"(\\\\*{2})裸\": \"赤裸裸\",\r\n\t\t\"浑身(\\\\*{2})\": \"浑身赤裸\",\r\n\t\t\r\n\t\t# === 射 ===\r\n\t\t\"枪(\\\\*{4})\": \"枪发射\",\r\n\t\t\"(\\\\*{4})而出\": \"喷射而出\",\r\n\t\t\"光(\\\\*{2})\": \"光四射\",\r\n\t\t\r\n\t\t# === 鱼水 ===\r\n\t\t\"(\\\\*{2})之欢\": \"鱼水之欢\",\r\n\t\t\r\n\t\t# === 国军 ===\r\n\t\t\"(\\\\*{2})队\": \"国军队\",\r\n\t\t\"(\\\\*{2})舰\": \"国军舰\",\r\n\t\t\"(\\\\*{2})方\": \"国军方\",\r\n\t\t\r\n\t\t# === 露阴 ===\r\n\t\t\"暴(\\\\*{2})谋\": \"暴露阴谋\",\r\n\t\t\r\n\t\t# === 欲望 ===\r\n\t\t\"的(\\\\*{2})是无止境的\": \"的欲望是无止境的\",\r\n\t\t\"邪恶的(\\\\*{2})\": \"邪恶的欲望\",\r\n\t\t\"被(\\\\*{2})支配\": \"被欲望支配\",\r\n\t\t\"掀桌的(\\\\*{2})\": \"掀桌的欲望\",\r\n\t\t\"控制不住(\\\\*{2})\": \"控制不住欲望\",\r\n\t\t\"求生的(\\\\*{2})\": \"求生的欲望\",\r\n\t\t\"求生(\\\\*{2})\": \"求生欲望\",\r\n\t\t\"购买(\\\\*{2})\": \"购买欲望\",\r\n\t\t\"永无止境的(\\\\*{2})\": \"永无止境的欲望\",\r\n\t\t\"(\\\\*{2})的发泄\": \"欲望的发泄\",\r\n\t\t\"发泄(\\\\*{2})\": \"发泄欲望\",\r\n\t\t\"杀戮(\\\\*{2})\": \"杀戮欲望\",\r\n\t\t\"(\\\\*{2})和本能\": \"欲望和本能\",\r\n\t\t\r\n\t\t# === 呻吟 ===\r\n\t\t\"不堪重负的(\\\\*{2})\": \"不堪重负的呻吟\",\r\n\t\t\"(\\\\*{2})声\": \"呻吟声\",\r\n\t\t\"颤抖(\\\\*{2})\": \"颤抖呻吟\",\r\n\t\t\"(\\\\*{2})颤抖\": \"呻吟颤抖\",\r\n\t\t\r\n\t\t# === 独立 ===\r\n\t\t\"宣布(\\\\*{2})\": \"宣布独立\",\r\n\t\t\"(\\\\*{2})空间\": \"独立空间\",\r\n\t\t\r\n\t\t# === 荡漾 ===\r\n\t\t\"波纹(\\\\*{2})\": \"波纹荡漾\",\r\n\t\t\r\n\t\t# === 喘息 ===\r\n\t\t\"(\\\\*{2})之机\": \"喘息之机\",\r\n\t\t\r\n\t\t# === 大波 ===\r\n\t\t\"一(\\\\*{2})\": \"一大波\",\r\n\t\t\r\n\t\t# === 上供 ===\r\n\t\t\"(\\\\*{2})奉\": \"上供奉\",\r\n\t\t\r\n\t\t# === 奸淫 ===\r\n\t\t\"(\\\\*{2})掳掠\": \"奸淫掳掠\",\r\n\t\t\r\n\t\t# === 失身 ===\r\n\t\t\"有(\\\\*{2})份\": \"有失身份\",\r\n\t\t\r\n\t\t# === 六合 ===\r\n\t\t\"(\\\\*{2})八荒\": \"六合八荒\",\r\n\t\t\r\n\t\t# === 人祸 ===\r\n\t\t\"天灾(\\\\*{2})\": \"天灾人祸\",\r\n\t\t\r\n\t\t# === 轮红 ===\r\n\t\t\"一(\\\\*{2})日\": \"一轮红日\",\r\n\t\t\r\n\t\t# === 西藏 ===\r\n\t\t\"东躲(\\\\*{2})\": \"东躲西藏\",\r\n\t\t\r\n\t\t# === 被操 ===\r\n\t\t\"(\\\\*{2})纵\": \"被操纵\",\r\n\t\t\r\n\t\t# === 穷屌 ===\r\n\t\t\"(\\\\*{2})丝\": \"穷屌丝\",\r\n\t\t\r\n\t\t# === 销魂 ===\r\n\t\t\"(\\\\*{2})滋味\": \"销魂滋味\",\r\n\t\t\r\n\t\t# === 色水 ===\r\n\t\t\"血(\\\\*{2})晶\": \"血色水晶\",\r\n\t\t\r\n\t\t# === 偷用 ===\r\n\t\t\"偷(\\\\*{2})\": \"偷偷用\",\r\n\t\t\r\n\t\t# === 乳交 ===\r\n\t\t\"水(\\\\*{2})融\": \"水乳交融\",\r\n\t\t\r\n\t\t# === 多字替换 ===\r\n\t\t\"cao之过急\": \"操之过急\", \"chunguang大泄\": \"春光大泄\",\r\n\t\t\"大公无si\": \"大公无私\",\r\n\t\t\"fu道人家\": \"妇道人家\", \"放sōng'xià来\": \"放松下来\",\r\n\t\t\"奸yin掳掠\": \"奸淫掳掠\",\r\n\t\t\"空dangdang\": \"空荡荡\",\r\n\t\t\"突发qing况\": \"突发情况\",\r\n\t\t\"yin奉阳违\": \"阴奉阳违\", \"一yin一阳\": \"一阴一阳\",\r\n\t\t\r\n\t\t# === 双字替换 ===\r\n\t\t\"暧m[eè][iì]\":\"暧昧\",\r\n\t\t\"bàn\\\\s*fǎ\":\"办法\", \"bucuo\":\"不错\", \"不liáng\":\"不良\", \"b[ěe]i(\\\\s| )*j[īi]ng\":\"北京\", \"bǐ\\\\s*shǒu\":\"匕首\", \"半shen\":\"半身\", \"b[ìi]j[ìi]ng\":\"毕竟\", \"报(了?)jing\":\"报\\\\1警\", \"bèi'pò\":\"被迫\", \"包yǎng\":\"包养\", \"(?:biǎo|婊\\\\\\\\?)子\":\"婊子\", \"biǎo\\\\s*xiàn\\\\s*\":\"表现\",\r\n\t\t\"chifan\":\"吃饭\", \"ch[oō]ngd[oò]ng\":\"冲动\", \"chong物\":\"宠物\", \"cao(练|作)\":\"操\\\\1\", \"出gui\":\"出轨\", \"chu\\\\s*xian\":\"出现\", \"缠mian\":\"缠绵\", \"成shu\":\"成熟\", \"(?:赤|chi)\\\\s*lu[oǒ]\":\"赤裸\", \"春guang\":\"春光\", \"chun风\":\"春风\", \"chuang伴\":\"床伴\", \"沉mi\":\"沉迷\", \"沉lun\":\"沉沦\", \"刺ji\":\"刺激\", \"chao红\":\"潮红\", \"初chun\":\"初春\", \""chi\\\\s*luo"\":\"赤裸\", \"cí\\\\s*zhí\":\"辞职\",\r\n\t\t\"dān\\\\s*xīn\":\"当心\", \"dang校\":\"党校\", \"da子\":\"鞑子\", \"大tui\":\"大腿\", \"dǎ\\\\s*suàn\":\"打算\", \"dá\\\\s*àn\":\"答案\", \"dài\\\\s*lǐ\":\"代理\", \"dengdai\":\"等待\", \"电huà\":\"电话\", \"diàn\\\\s*huà\":\"电话\", \"diàn\\\\s*yǐng\":\"电影\", \"diao丝\":\"屌丝\", \"d[úu](?:\\\\s| |
      )*l[ìi]\":\"独立\", \"d[uú]\\\\s{0,2}c[áa]i\":\"独裁\", \"d?[iì]f[āa]ng\":\"地方\", \"d[ìi]\\\\s*d[ūu]\":\"帝都\", \"di国|帝guo\":\"帝国\", \"du[oò]落\":\"堕落\", \"坠luò\":\"坠落\",\r\n\t\t\"f[ǎa]ngf[óo]\":\"仿佛\", \"fei踢\":\"飞踢\", \"fēi\\\\s*wén\":\"飞吻\", \"feng流\":\"风流\", \"风liu\":\"风流\", \"f[èe]nn[ùu]\":\"愤怒\", \"fǎn\\\\s*yīng\":\"反应\", \"fú\\\\s*wù\":\"服务\", \"fù\\\\s*chóu\":\"复仇\",\r\n\t\t\"gao潮\":\"高潮\", \"高氵朝\":\"高潮\", \"gāo\\\\s*xìng\\\\s*\":\"高兴\", \"干chai\":\"干柴\", \"勾yin\":\"勾引\", \"gu[oò]ch[ée]ng\":\"过程\", \"gu[āa]n\\\\s*x[iì]\":\"关系\", \"官\\\\s*fāng\":\"官方\", \"g[ǎa]nji[àa]o\":\"感觉\", \"国wu院\":\"国务院\", \"gù\\\\s*yì\\\\s*\":\"故意\", \"guofen\":\"过分\", \"guān\\\\s*fāng\":\"官方\",\r\n\t\t\"hā\\\\s*hā\\\\s*\":\"哈哈\", \"h[aǎ]ode\":\"好的\", \"hù士\":\"护士\", \"火qiang\":\"火枪\", \"huó\\\\s*dòng\":\"活动\", \"há'guó\":\"韩国\", \"han住\":\"含住\", \"hai洛因\":\"海洛因\", \"红fen\":\"红粉\", \"火yao\":\"火药\", \"h[ǎa]oxi[àa]ng\":\"好像\", \"hu[áa]ngs[èe]\":\"黄色\", \"皇d[ìi]\":\"皇帝\", \"昏昏yu睡\":\"昏昏欲睡\", \"回dang\":\"回荡\", \"huí\\\\s*qù\\\\s*\":\"回去\", \"hé\\\\s*shì\\\\s*\":\"合适\", \"hàn\\\\s*jiān\":\"汉奸\",\r\n\t\t\"jian(臣|细)\":\"奸\\\\1\", \"奸yin\":\"奸淫\", \"jiànmiàn\":\"见面\", \"jian货\":\"贱货\", \"jing察\":\"警察\", \"jǐng\\\\s*chá\":\"警察\", \"j[ìi]nháng\":\"进行\", \"jīng\\\\s*guò\":\"经过\", \"ji烈\":\"激烈\", \"j[iì](nv|女)\":\"妓女\", \"jirou\":\"鸡肉\", \"ji者\":\"记者\", \"jì\\\\s*xù\\\\s*\":\"继续\", \"ju花\":\"菊花\", \"j[īi]动\":\"激动\", \"jili[èe]\":\"激烈\", \"肌r[òo]u\":\"肌肉\", \"ji射\":\"激射\", \"ji[ēe]ch[uù]\":\"接触\", \"ji��\\\\s*shì\":\"就是\", \"j[ùu]li[èe]\":\"剧烈\", \"jǐng惕\":\"警惕\", \"节cao\":\"节操\", \"浸yin\":\"浸淫\", \"jù\\\\s*jué\\\\s*\":\"拒绝\", \"jue色\":\"角色\",\r\n\t\t\"k[ěe]n[ée]ng\":\"可能\", \"开bao\":\"开苞\", \"k[àa]o近\":\"靠近\", \"口wen\":\"口吻\", \"kankan\":\"看看\",\r\n\t\t\"ling辱\":\"凌辱\", \"luan蛋\":\"卵蛋\", \"脸sè\":\"脸色\", \"lu出\":\"露出\", \"流máng\":\"流氓\", \"lun理\":\"伦理\", \"lì\\\\s*qì\":\"力气\", \"lán\\\\s*jié\":\"拦截\", \"lìng\\\\s*lèi\":\"另类\", \"lè\\\\s*suǒ\":\"勒索\", \"lòudòng\":\"漏洞\",\r\n\t\t\"m[ǎa]ny[ìi]\":\"满意\", \"m[ǎa]sh[àa]ng\":\"马上\", \"m[ée]iy[oǒ]u\":\"没有\", \"mei国\":\"美国\", \"měi\\\\s*nǚ\":\"美女\", \"mèi\\\\s*mèi\":\"妹妹\", \"m[íi]ngb[áa]i\":\"明白\", \"迷huan\":\"迷幻\", \"mi茫\":\"迷茫\", \"mó\\\\s*yàng\":\"模样\", \"m[íi]n\\\\s{0,2}zh[ǔu]\":\"民主\", \"迷jian\":\"迷奸\", \"mimi糊糊\":\"迷迷糊糊\", \"mì\\\\s*shū\":\"秘书\", \"末(?:\\\\s|
      )*ì\":\"末日\", \"面se\":\"面色\", \"mengmeng\":\"蒙蒙\", \"màn\\\\s*huà\":\"漫画\",\r\n\t\t\"nàme\":\"那么\", \"n[ǎa]o\\\\s*d[àa]i\":\"脑袋\", \"n[ée]ngg[oò]u\":\"能够\", \"nán\\\\s{0,2}hǎi\":\"那会\", \"内jian\":\"内奸\", \"[内內]y[iī]\":\"内衣\", \"内ku\":\"内裤\",\r\n\t\t\"pi[áa]o客\":\"嫖客\", \"p[áa]ngbi[āa]n\":\"旁边\",\r\n\t\t\"q[íi]gu[àa]i\":\"奇怪\", \"qì\\\\s*chē\":\"汽车\", \"qing\\\\s*(ren|人)\":\"情人\", \"qin兽\":\"禽兽\", \"q[iī]ngch[uǔ]\":\"清楚\", \"què\\\\s*dìng\":\"确定\", \"球mi\":\"球迷\", \"青chun\":\"青春\", \"青lou\":\"青楼\", \"qingkuang\":\"情况\", \"qiang[ \\\\s]*jian\":\"强奸\",\r\n\t\t\"re\\\\s*nao\":\"热闹\", \"r[úu]gu[oǒ]\":\"如果\", \"r[oó]ngy[ìi]\":\"容易\", \"ru(房|白色)\":\"乳\\\\1\", \"rén员\":\"人员\", \"rén形\":\"人形\", \"人chao\":\"人潮\", \"renmen\":\"人名\", \"ruǎn\\\\s*jiàn\":\"软件\", \"rì\\\\s*běn\":\"日本\", \"日\\\\s*běn\":\"日本\",\r\n\t\t\"shàng\\\\s*mén\":\"上门\", \"上jiang\":\"上将\", \"she(门|术|手|程|击)\":\"射\\\\1\", \"sudu\":\"速度\", \"shú\\\\s*nǚ\":\"熟女\", \"shuijue\":\"睡觉\", \"shide\":\"是的\", \"sh[iì]ji[eè]\":\"世界\", \"sh[ií]ji[aā]n\":\"时间\", \"sh[ií]h[oò]u\":\"时候\", \"sh[ií]me\":\"什么\", \"si人\":\"私人\", \"shi女\":\"侍女\", \"shi身\":\"失身\", \"sh[ūu]j[ìi]\":\"书记\", \"shu女\":\"熟女\", \"shu[ \\\\s]?xiong\":\"酥胸\", \"(?:上|shang)chuang\":\"上床\", \"shǒu\\\\s*jī\":\"手机\", \"呻y[íi]n\":\"呻吟\", \"sh[ēe]ngzh[íi]\":\"生殖\", \"深gu\":\"深谷\", \"双xiu\":\"双修\", \"生r[ìi]\":\"生日\", \"si盐\":\"私盐\", \"shi卫\":\"侍卫\", \"si下\":\"私下\", \"sao扰\":\"骚扰\", \"shuang\\\\s*feng\":\"双峰\", \"shǎo\\\\s*fù\":\"少妇\", \"shì\\\\s*pín\":\"视频\", \"shè\\\\s*xiàng\":\"摄像\",\r\n\t\t\"t[uū]r[áa]n\":\"突然\", \"tiaojiao\":\"调教\", \"tí\\\\s*gòng\":\"提供\", \"偷qing\":\"偷情\", \"推dao\":\"推倒\", \"脱guang\":\"脱光\", \"t[èe]bi[ée]\":\"特别\", \"t[ōo]nggu[òo]\":\"通过\", \"同ju\":\"同居\", \"tian来tian去\":\"舔来舔去\",\r\n\t\t\"w[ēe]ixi[ée]\":\"威胁\", \"wèizh[ìi]\":\"位置\", \"wei员\":\"委员\", \"w[èe]nti\":\"问题\", \"wèi\\\\s*dào\\\\s*\":\"味道\", \"wú\\\\s*nài\":\"无奈\", \"wǔ\\\\s*qì\":\"武器\", \"weilai\":\"未来\",\r\n\t\t\"xiu长\":\"修长\", \"亵du\":\"亵渎\", \"xing福\":\"幸福\", \"xìng\\\\s*yùn\":\"幸运\", \"小bo\":\"小波\", \"小niū\":\"小妞\", \"xiong([^a-z])\":\"胸\\\\1\", \"小tui\":\"小腿\", \"xiang港\":\"香港\", \"xiàohuà\":\"笑话\", \"xiāo\\\\s*shòu\":\"销售\", \"xiàn\\\\'zhì\":\"限制\", \"xiàn\\\\s*jīn\":\"现金\", \"xiāng\\\\s*zǐ\":\"箱子\", \"xiōng\\\\s*dì\":\"兄弟\", \"选zé\":\"选择\", \"xìn\\\\s*hào\":\"信号\", \"xìng\\\\s*gǎn\":\"性感\", \"xiǎo\\\\s*jiě\":\"小姐\", \"xìn\\\\s*hào\":\"信号\", \"xià\\\\s*zhù\":\"下注\",\r\n\t\t\"yì\\\\s*wài\\\\s*\":\"意外\", \"yin(冷|暗|谋|险|沉|沟|癸派|后)\":\"阴\\\\1\", \"y[iī]y[àa]ng\":\"一样\", \"y[īi]di[ǎa]n\":\"一点\", \"yī\\\\s*zhèn\":\"一阵\", \"y[ǐi]j[īi]ng\":\"已经\", \"疑huo\":\"疑惑\", \"yí\\\\s*huò\":\"疑惑\", \"影mi\":\"影迷\", \"yin荡\":\"淫荡\", \"yin贼\":\"淫贼\", \"阳w[ěe]i\":\"阳痿\", \"yao头\":\"摇头\", \"yaotou\":\"摇头\", \"摇tou\":\"摇头\", \"yezhan\":\"野战\", \"you饵\":\"诱饵\", \"(?:you|诱)(?:惑|huo)\":\"诱惑\", \"you导\":\"诱导\", \"引you\":\"引诱\", \"you人\":\"诱人\", \"youshi\":\"有事\", \"you\\\\s*xiu\":\"优秀\", \"御yòng\":\"御用\", \"旖ni\":\"旖旎\", \"yu念\":\"欲念\", \"you敌深入\":\"诱敌深入\", \"影she\":\"影射\", \"牙qian\":\"牙签\", \"一yè情\":\"一夜情\", \"yīng\\\\s*yǔ\":\"英语\",\r\n\t\t\"z[iì]j[iǐ]\":\"自己\", \"z[ìi](?:\\\\s|
      | )*y[oó]u\":\"自由\", \"zh[iī]d?[àa]u?o\":\"知道\", \"zixin\":\"自信\", \"zhì'fú\":\"制服\", \"zhì\\\\s*fú\":\"制服\", \"zha药\":\"炸药\", \"zhan有\":\"占有\", \"zhào\\\\s*piàn\":\"照片\", \"zhè\\\\s*gè\":\"这个\", \"政f[ǔu]|zheng府\":\"政府\", \"zh[èe]ng\\\\s{0,2}f[uǔ]\":\"政府\", \"zong理\":\"总理\", \"zh[ōo]ngy[āa]ng\":\"中央\", \"中yang\":\"中央\", \"zu[oǒ]\\\\s*y[oò]u\":\"左右\", \"zhǔ\\\\s*dòng\":\"主动\", \"zh[oō]uw[ée]i\":\"周围\", \"zhōu\\\\s*nián\":\"周年\", \"中nan海\":\"中南海\", \"中j委\":\"中纪委\", \"中zu部\":\"中组部\", \"政zhi局\":\"政治局\", \"(昨|一|时|余)(?:
      | |\\\\s)*ì\":\"\\\\1日\", \"照she\":\"照射\", \"zhǔn\\\\s*bèi\\\\s*\":\"准备\", \"zhu义\":\"主义\",\r\n\t\t\r\n\t\t\"

      \\\\n

      \\\\s*ì\":\"日\",\r\n\t\t\r\n\t\t'曹艹': '曹操',\r\n\t\t'JI昂': '激昂',\r\n\t\t'□□无暇': '自顾无暇',\r\n\t\t'法律/界': '法律界',\r\n\t\t'人/类': '人类',\r\n\t\t'恐怖/主义': '恐怖主义',\r\n\t\t'颠/覆': '颠覆',\r\n\t\t'民.事.司.法.裁.判': '民事司法裁判',\r\n\t\t'南海/问题': '南海问题',\r\n\t\t'圈圈/功': '法轮功',\r\n\t\t'镇/压': '镇压',\r\n\t\t'赤.裸': '赤裸',\r\n\t\t'欲·望': '欲望',\r\n\t\t'nv真': '女真',\r\n\t\t'土gai': '土改',\r\n\t\t'狗·屎': '狗屎',\r\n\t\t'du立': '独立',\r\n\t\t'发sao': '发骚',\r\n\t\t'奸/夫/淫/妇': '奸夫淫妇',\r\n\t\t'爱qing': '爱情',\r\n\t\t'抚mo': '抚摸',\r\n\t\t'神qing': '神情',\r\n\t\t'公~务~员': '公务员',\r\n\t\t'原着': '原著',\r\n\t\t'□□部分': '高潮部分',\r\n\t\t'角□□面': '角色情面',\r\n\t\t'艹': '操',\r\n\t\t'淫/靡/香/艳': '淫靡香艳',\r\n\t\t'毒丨药': '毒药',\r\n\t\t'登6': '登陆',\r\n\t\t'天□□美': '天性爱美',\r\n\t\t'双丨飞': '双飞',\r\n\t\t'高chao': '高潮',\r\n\t\t'pi股': '屁股',\r\n\t\t'情/趣': '情趣',\r\n\t\t'情/欲': '情欲',\r\n\t\t'炸/弹': '炸弹',\r\n\t\t'赤/身': '赤身',\r\n\t\t'果/体': '裸体',\r\n\t\t'zhong国': '中国',\r\n\t\t'帝国#主义': '帝国主义',\r\n\t\t'形形□□': '形形色色',\r\n\t\t'yuwang': '欲望',\r\n\t\t'shuangtui': '双腿',\r\n\t\t'城/管': '城管',\r\n\t\t'调丨教': '调教',\r\n\t\t'银/行/卡': '银行卡',\r\n\t\t'裸/体': '裸体',\r\n\t\t'光/裸': '光裸',\r\n\t\t'嫩/女': '嫩女',\r\n\t\t'维/谷': '维谷',\r\n\t\t'开□□谈': '开始交谈',\r\n\t\t'破碎的□□': '破碎的呻吟',\r\n\t\t'pi霜': '砒霜',\r\n\t\t'ma醉': '麻醉',\r\n\t\t'麻zui': '麻醉',\r\n\t\t'nue杀': '虐杀',\r\n\t\t'后gong': '后宫',\r\n\t\t'林荫dao': '林荫道',\r\n\t\t'分/身': '分身',\r\n\t\t'克/隆': '克隆',\r\n\t\t'性/需要': '性需要',\r\n\t\t'黑/帮': '黑帮',\r\n\t\t'政-府': '政府',\r\n\t\t'八/九': '八九',\r\n\t\t'不~着~寸~缕': '不着寸缕',\r\n\t\t'肉~体': '肉体',\r\n\t\t'蹲□子': '蹲下身子',\r\n\t\t'ji情': '激情',\r\n\t\t'xie恶': '邪恶',\r\n\t\t'Z国': '中国',\r\n\t\t'创/世': '创世',\r\n\t\t'紫jin城': '紫禁城',\r\n\t\t'□□在外': '裸露在外',\r\n\t\t'光怪6离': '光怪陆离',\r\n\t\t'邪/教': '邪教',\r\n\t\t'粗bao': '粗暴',\r\n\t\t'yin邪': '淫邪',\r\n\t\t'小biao砸': '小婊砸',\r\n\t\t\r\n\t\t'牛1b': '牛b', '微1博': '微博', '内1衣': '内衣',\r\n\t\t\r\n\t\tr\"\\-\\xa0\\-\" : \"一\",\r\n\t\tr\" \\?\\?\\?\\?\" : r\" \",\r\n\t\tr\" \\?\\?\" : r\" \",\r\n\t}\r\n\t\r\n\t# 单字替换,可能会误替换,所以需要特殊处理\r\n\tREPLACE_ONCE = {\r\n\t\t\"Ai\" : \"爱\",\r\n\t\t\"b[āà]ng\":\"棒\",\"bào\":\"爆\",\"bà\":\"吧\",\"bī\":\"逼\",\"bō\":\"波\", \"biàn\":\"便\", \"相b\" : \"相比\", \"无b\" : \"比\", \"b迫\" : \"逼迫\", \"b视\" : \"鄙视\", \"b起\" : \"比起\", \"b向\" : \"逼向\", \"b如\" : \"比如\",\r\n\t\t\"cāo\":\"操\", \"cǎo\":\"草\", \"cào\":\"操\", \"chāng\":\"娼\", \"chang\":\"娼\", \"cháo\":\"潮\", \"chā\":\"插\", \"chéng\":\"成\", \"chōu\":\"抽\", \"chuáng\":\"床\", \"chún\":\"唇\", \"chūn\":\"春\", \"cuō\":\"搓\", \"cū\":\"粗\", \"cHa\" : \"插\", \"cH0U\" : \"抽\", \"ChUan\" : \"喘\", \"C作\" : \"操作\", \"C纵\" : \"操纵\", \"C心\" : \"操心\", \"cHeN\" : \"成\", \"rEn\" : \"人\", \"cia\" : \"擦\", \"cha\" : \"插\", \"chUaN\" : \"床\",\r\n\t\t\"dǎng\":\"党\", \"dàng\":\"荡\", \"dāo\":\"刀\", \"dòng\":\"洞\", \"diao\":\"屌\", \"diǎn\":\"点\",\r\n\t\t\"fǎ\":\"法\", \"féi\":\"肥\", \"fù\":\"妇\", \"FaNG\" : \"放\", \"dANg\" : \"荡\",\r\n\t\t\"guān\":\"官\", \"g净\" : \"干净\", \"GU\" : \"股\", \"Ga0\" : \"搞\", \"G0u\" : \"沟\", \"gUi\" : \"龟\", \"gan\" : \"感\", \"g0ng\" : \"宫\",\r\n\t\t\"hán\":\"含\", \"hóu\":\"喉\", \"hòu\":\"后\", \"h(u)?ā\":\"花\", \"huá\":\"华\", \"huì\":\"会\", \"huò\":\"惑\", \"hùn\":\"混\", \"hún\":\"魂\", \"h色\" : \"黄色\",\r\n\t\t\"jiǔ\":\"九\", \"j[īi]ng\":\"精\", \"jìn\":\"禁\", \"jǐng\":\"警\", \"jiāng\":\"江\", \"jiān\":\"奸\", \"jiāo\":\"交\", \"jūn\":\"军\", \"jū\":\"拘\", \"jú\":\"局\", \"jī\":\"激\", \"激ān\":\"奸\", \"jiàn\" : \"贱\", \"JiNg\" : \"精\", \"Jiao\" : \"娇\", \"小J\" : \"小鸡\", \"J皮\" : \"鸡皮\", \"花j\" : \"花茎\", \"J诈\" : \"奸诈\", \"J汤\" : \"鸡汤\",\r\n\t\t\"kù\":\"裤\", \"kàn\":\"看\", \"kuai\" : \"快\", \"K腿\" : \"裤腿\", \"K子\" : \"裤子\", \"KuI\" : \"窥\",\r\n\t\t\"[1l]àng\":\"浪\", \"liáo\":\"撩\", \"liú\":\"流\", \"lì\":\"莉\", \"liè\":\"烈\", \"[1l]uàn\":\"乱\", \"lún\":\"伦\", \"luǒ\":\"裸\", \"lòu\":\"露\", \"[l1]ù\":\"露\", \"lǜ\":\"绿\", \"liàn\":\"练\", \"露b\" : \"露比\", \"lAn\" : \"滥\", \"luoli\" : \"萝莉\",\r\n\t\t\"mǎi\":\"买\", \"mài\":\"卖\", \"máo\":\"毛\", \"mā\":\"妈\", \"méng\":\"蒙\", \"mén\":\"门\", \"miè\":\"灭\", \"mí\":\"迷\", \"mì\":\"蜜\", \"mō\":\"摸\", \"miàn\":\"面\", \"mī\" : \"咪\", \"m0\" : \"摸\",\r\n\t\t\"nǎi\":\"奶\", \"nèn\":\"嫩\", \"niào\":\"尿\", \"niē\":\"捏\", \"nòng\":\"弄\", \"nǚ\":\"女\", \"nVX\" : \"女性\", \"nV\" : \"女\", \"nEnG\" : \"嫩\",\r\n\t\t\"pào\":\"炮\", \"piàn\":\"片\", \"pò\":\"破\", \"pì\" : \"屁\", \"P股\" : \"屁股\", \"Pa0\" : \"炮\",\r\n\t\t\"qi[āa]ng\":\"枪\", \"qíng\":\"情\", \"qīn\":\"亲\", \"qiú\":\"求\", \"quán\":\"全\", \"qù\":\"去\",\r\n\t\t\"rén\":\"人\", \"r[ìi]\":\"日\", \"rǔ\":\"乳\", \"r0U\" : \"肉\", \"r0u\" : \"柔\",\r\n\t\t\r\n\t\t# s\r\n\t\t\"sǎ\":\"洒\", \"sāo\":\"骚\", \"sǎo\":\"骚\", \"sè\":\"色\", \"se\":\"色\", \"shā\":\"杀\", \"sE\" : \"色\", \"Sh\" : \"湿\", \"ShAnG\" : \"上\",\r\n\t\t\"shēn\":\"身\", # \"shēn\":\"呻\", # 2个重复的,误替换且是单字怎么办\r\n\t\t\"shén\":\"神\", \"shè\":\"射\", \"shǐ\":\"屎\", \"shì\":\"侍\", \"sǐ\":\"死\", \"sī\":\"私\", \"shǔn\":\"吮\", \"sǔn\":\"吮\", \"sū\":\"酥\", \"shào\":\"绍\", \"Si\" : \"死\", \"索X\" : \"索性\", \"sHE\" : \"射\", \"j1N\" : \"进\", \"SaO\" : \"骚\",\r\n\t\t\r\n\t\t\"tān\":\"贪\", \"tiǎn\":\"舔\", \"t[ǐi]ng\":\"挺\", \"tǐ\":\"体\", \"tǒng\":\"捅\", \"tōu\":\"偷\", \"tou\":\"偷\", \"tuǐ\":\"腿\", \"tūn\":\"吞\", \"tún\":\"臀\", \"tiáo\":\"调\", \"tài\":\"态\", \"tào\":\"套\", \"本T\" : \"本体\", \"身T\" : \"身体\", \"T内\" : \"体内\", \"T质\" : \"体质\", \"尸T\" : \"尸体\", \"T0Ng\" : \"捅\", \"T1aN\" : \"舔\", \"t0u\" : \"偷\",\r\n\t\t\"wēn\":\"温\", \"wěn\":\"吻\", \"waNg\" : \"望\", \"W蔑\" : \"诬蔑\", \"wUhuI\" : \"污秽\", \"W点\" : \"污点\", \"W染\" : \"污染\",\r\n\t\t\"xiǎo\":\"小\", \"xiào\":\"笑\", \"xìng\":\"性\", \"xing\":\"性\", \"xiōng\":\"胸\", \"xī\":\"吸\", \"xí\":\"习\", \"xì\":\"系\", \"xìn\":\"信\", \"xué\":\"穴\", \"xuè\":\"穴\", \"xùe\":\"穴\", \"xuan\":\"宣\", \"xiàng\":\"象\", \"x1\" : \"吸\", \"x口\" : \"胸口\", \"可能X\" : \"可能性\", \"x前\" : \"胸前\", \"实质X\" : \"实质性\", \"抗X\" : \"抗性\", \"x中\" : \"心中\", \"x膛\" : \"胸膛\",\r\n\t\t\"yāng\":\"央\", \"yàn\":\"艳\", \"yīn\":\"阴\", \"yào\":\"药\", \"yé\":\"爷\", \"yòu\":\"诱\", \"zàng\":\"脏\", \"y[ùu]\":\"欲\", \"yín\":\"淫\", \"yì\":\"意\", \"yà\":\"讶\", \"Y影\" : \"阴影\", \"y接\" : \"硬接\", \"Y唱\" : \"吟唱\", \"血Ye\" : \"血液\", \"Ye体\" : \"液体\", \"yAn\" : \"阴暗\", \"YAn\" : \"艳\", \"血Ye\" : \"血液\", \"夕y\" : \"夕阳\", \"Y谋\" : \"阴谋\", \"yU\" : \"欲\", \"Y霾\" : \"阴霾\",\r\n\t\t\"zhēn\":\"针\", \"zēn\":\"针\", \"zhà\":\"炸\", \"zhèng\":\"政\", \"zǒu\":\"走\", \"zuì\":\"罪\", \"zuò\":\"做\", \"zhōng\":\"中\", \"zhAYA0\" : \"炸药\",\r\n\t\t\r\n\t\t\"diàn\" : \"殿\", \"rè\" : \"热\", \"yǔn\" : \"允\", \"ài\" : \"爱\", \"ròu\" : \"肉\",\r\n\t\t\"後\" : \"后\", \"麽\" : \"么\", \"於\" : \"于\",\r\n\t}\r\n\t\r\n\tREPLACE_ONCE_FIX = {\r\n\t\t# ===误替换还原===\r\n\t\t\"碧欲\":\"碧玉\", \"美欲\":\"美玉\",\"欲石\":\"玉石\",\"惜欲\":\"惜玉\",\"宝欲\":\"宝玉\",\r\n\t\t\"品性\":\"品行\", \"德性\":\"德行\",\r\n\t\t\"波ok\":\"book\", \"波SS\":\"BOSS\",\r\n\t\t\r\n\t\t# ===其他修正===\r\n\t\t\"弥俩\":\"你俩\",\r\n\t\t\"妳\":\"你\",\r\n\t\t# \"圞|垩|卝|龘\":\"\",\r\n\t\t\"大6\":\"大陆\",\r\n\t\t\r\n\t\t\"\\\\(?呻\\\\)?体\" : \"身体\", \"\\\\(?呻\\\\)?上\" : \"身上\", \"\\\\(?呻\\\\)?子\" : \"身子\", \"下\\\\(?呻\\\\)?\" : \"下身\", \"\\\\(?呻\\\\)?下\" : \"身下\",\r\n\t\t\"\\\\(\\\\((.){1,2}\\\\)\\\\1\\\\)\" : \"\\\\1\", \"\\\\((.){1,2}\\\\)\" : \"\\\\1\",\r\n\t}\r\n\t\r\n\tREPLACE_REMOVE = [\r\n\t\t# 长文字替换\r\n\t\t# 排序代码:newArr = arr.sort((a, b) => { var diff = a.charCodeAt(1) - b.charCodeAt(1); if (diff == 0) return b.length - a.length; return diff; })\r\n\t\t'本站域名已经更换为,老域名已经停用,请大家重新收藏,并使用新域名访问。',\r\n\t\t\"\\\\(跪求订阅、打赏、催更票、月票、鲜花的支持!\\\\)\",\r\n\t\t\"\\\\(?未完待续请搜索飄天文學,小说更好更新更快!\",\r\n\t\t\"\\\\(跪求订阅、打赏、催更票、月票、鲜花的支持!\",\r\n\t\t\"\\\\(看小说到网\\\\)\",\r\n\t\t\"\\\\(未完待续。\\\\)\",\r\n\t\t\"\\\\(本章完\\\\)\",\r\n\t\t\"16977小游戏每天更新好玩的小游戏,等你来发现!\",\r\n\t\t\"(800小说网 www.800Book.net 提供Txt免费下载)最新章节全文阅读-..-\",\r\n\t\t\"(800小说网 www.800Book.net 提供Txt免费下载)\",\r\n\t\t\"\\\\[800\\\\]\\\\[站页面清爽,广告少,\",\r\n\t\t\"\\\\[看本书最新章节请到求书 .\\\\]\",\r\n\t\t\"(\\\\s*君子聚义堂)\",\r\n\t\t\"readx;\",\r\n\t\t\"txt电子书下载/\",\r\n\t\t\"txt全集下载\",\r\n\t\t\"txt小说下载\",\r\n\t\t\"\\\\|优\\\\|优\\\\|小\\\\|说\\\\|更\\\\|新\\\\|最\\\\|快\\\\|www.uuxs.cc\\\\|\",\r\n\t\t\"\\\\|每两个看言情的人当中,就有一个注册过可°乐°小°说°网的账号。\",\r\n\t\t\"思ˊ路ˋ客,更新最快的!\",\r\n\t\t\"恋上你看书网 630bookla ,最快更新.*\",\r\n\t\t\",举报后维护人员会在两分钟内校正章节内容,请耐心等待,并刷新页面。\",\r\n\t\t\"追书必备\",\r\n\t\t\"-优-优-小-说-更-新-最-快-www.UUXS.CC-\",\r\n\t\t\"-优-优-小-说-更-新-最-快x\",\r\n\t\t\"来可乐网看小说\",\r\n\t\t\"纯文字在线阅读本站域名手机同步阅读请访问\",\r\n\t\t\"本文由  首发\",\r\n\t\t\"樂文小说\",\r\n\t\t'最快更新无错小说阅读,请访问 请收藏本站阅读最新小说!',\r\n\t\t\"最新章节全文阅读看书神器\\\\.yankuai\\\\.\",\r\n\t\t\"最新章节全文阅读(..首发)\",\r\n\t\t\"最新章节全文阅读【首发】\",\r\n\t\t\"最新章节全文阅读\",\r\n\t\t\"看本书最新章节请到800小说网(www.800book.net)\",\r\n\t\t\"(本章未完,请翻页)\",\r\n\t\t\"手机用户请浏览m.biqugezw.com阅读,更优质的阅读体验。\",\r\n\t\t\"手机用户请浏览阅读,更优质的阅读体验。\",\r\n\t\t\"阅读,更优质的阅读体验。\",\r\n\t\t\"手机最省流量无广告的站点。\",\r\n\t\t\"手机看小说哪家强手机阅\",\r\n\t\t\"如果你喜欢本站[〖]?一定要记住[】]?(?:网址|地址)哦\",\r\n\t\t\"看清爽的小说就到\",\r\n\t\t\"请用搜索引擎(?:搜索关键词)?.*?完美破防盗章节,各种小说任你观看\",\r\n\t\t\"完美破防盗章节,请用搜索引擎各种小说任你观看\",\r\n\t\t\"破防盗章节,请用搜索引擎各种小说任你观看\",\r\n\t\t\"(?:搜索引擎)?各种小说任你观看,破防盗章节\",\r\n\t\t\"章节错误,点此举报\\\\(免注册\\\\)\",\r\n\t\t\"热门小说最新章节全文阅读.。 更新好快。\",\r\n\t\t\"【阅读本书最新章节,请搜索800】\",\r\n\t\t\"亲,百度搜索眼&快,大量小说免费看。\",\r\n\t\t\"亲,眼&快,大量小说免费看。\",\r\n\t\t'下载免费阅读器!!',\r\n\t\t'笔趣阁 .,最快更新.*最新章节!',\r\n\t\t'请大家搜索(书迷楼)看最全!更新最快的小说',\r\n\t\t'更新快无广告。',\r\n\t\t'【鳳.{1,2}凰.{1,2}小说网 更新快 无弹窗 请搜索f.h.xiao.shuo.c.o.m】',\r\n\t\t'【可换源APP看书软件:书掌柜APP或直接访问官方网站shuzh.net】',\r\n\t\t'[●★▲]手机下载APP看书神器.*',\r\n\t\t\"m.?手机最省流量的站点。\",\r\n\t\t'm.?手机最省流量.无广告的站点。',\r\n\t\t'底部字链推广位',\r\n\t\t'us最快',\r\n\t\t'APPapp',\r\n\t\t'久看中文网首发',\r\n\t\t'顶点小说 23US.com更新最快',\r\n\t\t\"转载请注明出处:www.123du.cc 。\",\r\n\t\t\"转载请注明出处:www.123ds.org 。\",\r\n\t\t\"转载请注明出处:www.duzheba.cc 。\",\r\n\t\t\"本文来源:123读书网。\",\r\n\t\t\"本文来源:读`者~吧。\",\r\n\t\tr\"[a-zA-Z0-9\\u4e00-\\u9fa5]+你:看后求收藏123读书网,接着再看好方便。\",\r\n\t\tr\"[a-zA-Z0-9\\u4e00-\\u9fa5]+你:看后求收藏读.?者.?吧,接着再看好方便。\",\r\n\t\t\"本文来源:123读书网。\",\r\n\t\t\"天才一秒记住本站地址:.。手机版阅读网址\",\r\n\t\t\"本文来源:123读书网。\",\r\n\t\t\"支持:.*?,请把本站分享给你们的好友!手机端:.*?报错以及求更请留言。\",\r\n\t\t\"【.*?】提醒书友谨记:本站网址:.*?永不丢失!\",\r\n\t\t\"百度搜不到pkgg言情小说的建议使用360,搜狗去搜索,求书,报错以及求更请留言。\",\r\n\t\t\"pkgg言情小说小說网\",\r\n\t\tr\"《.*》无错章节将持续在搜更新,站内无任何广告,还请大家收藏和推荐搜!\",\r\n\t\tr\"喜欢.*请大家收藏:(.*) ?.*搜.*更新速度最快。\",\r\n\t\t\r\n\t\t# 复杂规则的替换\r\n\t\t'(看小说到|爱玩爱看就来|就爱上|喜欢)?(\\\\s|<|>|&| |[+@@=:;``%?》《〈︾-])?[乐樂](\\\\s|<|>|&| |[+@@=:;``%?》《〈︾-])?[文].*?[说說][网]?[|]?(.*(3w|[wωWw]{1,3}|[Mm]).*[mMm])?[}。\\\\s]?(乐文小说)?',\r\n\t\t'(本文由|小说)?(\\\\s| )?((3w|[wWw]{1,3}|[Mm]).)?\\\\s?[lしlL][wωWw][xχXx][sSs][55][22][00].*[mMm][|\\\\s]?(首发(哦亲)?)?',\r\n\t\t'([『【↑△↓@︾]+[\\u4E00-\\u9FA5]){2,6}[】|]',\r\n\t\t\r\n\t\t# 包含 \\P 的替换\r\n\t\t# '\\\\P{1,2}[顶頂].{1,3}[点小].*?o?[mw,]',\r\n\t\t# '\\\\P.?长.{1,2}风.{1,2}文.{1,2}学.*?[tx]',\r\n\t\t# '\\\\P无.错.*?[cC][oO][mM]',\r\n\t\t'[;\\\\(]顶.{0,2}点.小说',\r\n\t\t'2长2风2文2学,w¢$',\r\n\t\t'》长>风》',\r\n\t\t\r\n\t\t# 包含 .* 的,可能有多余的替换\r\n\t\t'看无防盗章节的小说,请用搜索引擎搜索关键词.*',\r\n\t\t'(?:完美)?破防盗章节,请用搜索引擎搜索关键词.*',\r\n\t\t'搜索引擎搜索关键词,各种任你观看,破防盗章节',\r\n\t\t'破防盗完美章节,请用搜索引擎.*各种小说任你观看',\r\n\t\t'如您已(?:閱讀|阅读)到此章节.*?敬请记住我们新的网址\\\\s*。',\r\n\t\t'↗百度搜:.*?直达网址.*?↖',\r\n\t\t\"[:《〈|~∨∟∑]{1,2}长.{1,2}风.*?et\",\r\n\t\t'\\\\[限时抢购\\\\].*',\r\n\t\t'支持网站发展.逛淘宝买东西就从这里进.*',\r\n\t\t'ps[::]想听到更多你们的声音,想收到更多你们的建议,现在就搜索微信公众号“qdread”并加关注,给.*?更多支持!',\r\n\t\t'(?:ps[::])?看《.*?》背后的独家故事.*?告诉我吧!',\r\n\t\t'(?天上掉馅饼的好活动.*?微信公众号!)?',\r\n\t\t'(微信添加.*qdread微信公众号!)',\r\n\t\t'jiemei如您已阅读到此章节,请移步到.*?\\\\[ads:本站换新网址啦,速记方法:,.\\\\]',\r\n\t\t'先给自己定个小目标:比如收藏笔趣阁.*',\r\n\t\t'请记住本书首发域名.*',\r\n\t\t'记住手机版网址.*',\r\n\t\t'.*关注微信公众号.*',\r\n\t\t'一秒记住.*',\r\n\t\t\"【完本神站】手机阅读网址 m.(wanbentxt|xinwanben).com 喜欢就分享一下\",\r\n\t\tr\"支持\\.\\\\\\^完\\*本\\*神\\*站\\*\\\\\\^\\.把本站分享那些需要的小伙伴!找不到书请留言!\",\r\n\t\t\"支持↘完本♂看♂书↙\",\r\n\t\t\"支持(綄本神站)把本站分享那些需要的小伙伴!找不到书请留言!\",\r\n\t\tr\"【追书帮】提醒各位天才们谨记本站网址: [\\w\\.]+\",\r\n\t\t\r\n\t\t# 短文字替换\r\n\t\t'\\\\[txt全集下载\\\\]',\r\n\t\t'\\\\[\\\\s*超多好看小说\\\\]',\r\n\t\t'⊙四⊙五⊙中⊙文☆→',\r\n\t\t'\\\\[ads:本站换新网址啦,速记方法:.*?\\\\]',\r\n\t\t'[》《|~]无(?:.|>)错(?:.|>)小说',\r\n\t\t'`无`错`小说`www.``com', '+无+错+小说+3w++',\r\n\t\t'\\\\|优\\\\|优\\\\|小\\\\|说\\\\|更\\\\|新\\\\|最\\\\|快X',\r\n\t\t'▲∴', '8,ww←',\r\n\t\t\"/www.23+?[Ww][Xx].[Cc]om/ig\",\r\n\t\t\"/热门推荐:、+/g\",\r\n\t\t\"/h2>/g\",\r\n\t\t# '[《〈》>\\\\+|~[\\\\]]无\\\\1错\\\\1', '》无>错》',\r\n\t\tr\"谨记我们的网址,祝大家阅读愉快!别忘了多多宣传宣传。\",\r\n\t\tr\"谨记我们的网址,祝大家阅读愉快!\",\r\n\t\tr\"【提示】:如果觉得此文不错,请推荐给更多小伙伴吧!分享也是一种享受。\",\r\n\t\tr\"【提示】:如果觉得此文不错,请推荐给更多小伙伴吧!\",\r\n\t\tr\"【完本神站】手机阅读网址 m.(wanbentxt|xinwanben).com 喜欢就分享一下\",\r\n\t\tr\"支持\\.\\\\\\^完\\*本\\*神\\*站\\*\\\\\\^\\.把本站分享那些需要的小伙伴!找不到书请留言!\",\r\n\t\tr\"提示:浏览器搜索(书名)+(完 本 神 立占)可以快速找到你在本站看的书!\",\r\n\t\tr\"支持↘完本♂看♂书↙\",\r\n\t\tr\"支持(綄本神站)把本站分享那些需要的小伙伴!找不到书请留言!\",\r\n\t\tr\"本↘书↘首↘发↘求.书.帮↘https?://[\\w\\.]+/\",\r\n\t\tr\"♂浏♂览♂器♂搜索\\{\\\\一♂六♂八♂看♂书\\\\\\}:可以快速找到你看的文\",\r\n\t\t\r\n\t\t'女凤免费小说抢先看', '女凤小说网全文字 无广告',\r\n\t\t'乐文小说网?', '《乐〈文《小说', '乐文移动网', '頂点小说', '頂點小說',\r\n\t\t'追小说哪里快去眼快',\r\n\t\t'\\\\[书库\\\\].\\\\[774\\\\]\\\\[buy\\\\].kuai',\r\n\t\t'www.938xs.com',\r\n\t\t'小說,.biquge5200.',\r\n\t\t\"看更多精品好书搜\",\r\n\t\tr\"\\^\\- 完 \\-\\^\",\r\n\t\tr\"\\^\\- 本 \\-\\^\",\r\n\t\tr\"\\^\\- 神 \\-\\^\",\r\n\t\tr\"\\^\\- 站 \\-\\^\",\r\n\t\tr\"喜欢神站记得收藏哦,多多推荐给更多爱看书的朋友!\",\r\n\t\tr\"\\?\\?\\?\\?\",\r\n\t\tr\"支持【.*?】把本站分享那些需要的小伙伴!找不到书请留言!\",\r\n\t\tr\"★看★最★新★章★节★百★度★搜★求★书★帮★\",\r\n\t\tr\"★首★发★求★书★帮★\",\r\n\t\tr\"免-费-首-发→【求】【书】【帮】\",\r\n\t\tr\"「\\^求\\^书\\^帮\\^首\\~发」\",\r\n\t\tr\"【最新首发】\",\r\n\t\tr\"↘ 免费↙\",\r\n\t\tr\"↘ 首发↙\",\r\n\t\tr\"↘  ↙\",\r\n\t\tr\"↘ 看 ↙\",\r\n\t\tr\"↘ 书 ↙\",\r\n\t\tr\"↘♂♂♂看♂书↙:\\.\",\r\n\t\tr\"支持↘♂♂♂看♂书↙把本��分享那些需要的小伙伴!找不到书请留言!\",\r\n\t\tr\"♂♂3\",\r\n\t\tr\"提示:浏览器搜索(书名)\\+\\{一,六,八,看书\\}可以快速找到你在本站看的书!\",\r\n\t\t# r\"【狂沙文学网】提醒书友谨记:\",\r\n\t\t# r\"支持:狂沙文学网,请把本站分享给你们的好友!\",\r\n\t\tr\"断、青、丝、小、说、网、首、发~\",\r\n\t\t# r\"支持:狂沙文学网,请把本站分享给你们的好友!\",\r\n\t\tr\"支持\\s*(.*?)\\s*把本站分享那些需要的小伙伴!找不到书请(?:首页)?留言!\",\r\n\t\tr\"支持\\s*(.*?)\\s*把本站分享那些需要的小伙伴!找不到书请(?:首页)?留言\",\r\n\t\tr\"支持.*?把本站分享那些需要的小伙伴!找不到书请(?:首页)?留言!\",\r\n\t\tr\"您可以在百度里搜索\\s*“.*?”\\s*查找最新章节!\",\r\n\t\tr\"支持\\s*(.*?)\\s*把本站分享那些需\",\r\n\t\tr\"[a-zA-Z0-9_]+\\(.*?\\);?\",\r\n\t\tr\"手机端:[:\\w\\./]*?,百度搜不到狂沙文学网的建议使用360,搜狗去搜索,求书,报错以及求更请留言。\",\r\n\t\tr\"(www|m|wap)\\.okma\\.net\",\r\n\t\tr\"(www|m|wap)\\.eqeq\\.net\",\r\n\t\tr\"本站网址: [:\\w\\./]*\",\r\n\t\tr\"手机用户请到\\s*\",\r\n\t\tr\"\\s*阅读最新章节\",\r\n\t\tr\"为了方便下次阅读,你可以《加入书签》记录本次(.*)阅读记录,下次打开书架即可看到!请向你的朋友(QQ、博客、微信等方式)推荐本书\\s*\",\r\n\t\tr\"[^a-zA-Z0-9\\u4e00-\\u9fa5],谢谢您的支持!!([^a-zA-Z0-9\\u4e00-\\u9fa5]|$)\",\r\n\t\tr\"《.*》无错章节将持续在更新,站内无任何广告,还请大家收藏和推荐!\",\r\n\t\tr\"支持.*请留言。\",\r\n\t\tr\"\\w+://[/\\w\\.\\-]+\",\r\n\t\tr\"【.*】提醒书友谨记:本站网址:.*?一秒记住、永不丢失!\",\r\n\t\tr\"【.*?www\\.iqiwx\\.com.*?】\",\r\n\t\t\r\n\t\tr\"(推荐下|插一句|广个告),.*(,更新快|,离线朗读|手机都支持)!\",\r\n\t\tr\"插播一个app.*。\",\r\n\t\tr\"求助下,.*我的书吧。\",\r\n\t\tr\"推荐一个app.*!\",\r\n\t\tr\"书友们之前用的.*。\",\r\n\t\tr\".*【领红包】.*\",\r\n\t\tr\".*【看书领红包】.*\",\r\n\t\tr\".*【书友大本营】.*\",\r\n\t\tr\".*【书友福利】.*\",\r\n\t\tr\".*【书友红包】.*\",\r\n\t\tr\".*【看书福利】.*\",\r\n\t\tr\".*【看书红包】.*\",\r\n\t\tr\".*【送红包】.*\",\r\n\t\tr\".*领现金红包.*\",\r\n\t\tr\".*现金、点币.*\",\r\n\t\tr\".*888现金红包.*\",\r\n\t\tr\".*现金or点币.*\",\r\n\t\t\r\n\t\t\"/'ads_wz_txt;',|百度搜索|无弹窗小说网|更新快无弹窗纯文字|高品质更新|小说章节更新最快|\\(百度搜.\\)|全文字手打|“” 看|无.弹.窗.小.说.网|追书网|〖∷∷无弹窗∷纯文字∷ 〗/g\",\r\n\t\t\r\n\t\t# wanbentxt 专用\r\n\t\tr\"\\&nbp;\",\r\n\t\tr\"UG8dW\",\r\n\t\tr\"\",\r\n\t\tr\"手机直接访问:m\\.(wanbentxt|xinwanben)\\.com\",\r\n\t\tr\"记不住网址,可以:【完本神站】\",\r\n\t\tr\"手机访问[::][\\w\\.]*\",\r\n\t\tr\"电脑访问[::][\\w\\.]*\",\r\n\t]\r\n\t\r\n\tADVERT_KEYWORDS = [\r\n\t\tr\"手机阅读\",\r\n\t\tr\"手机用户\",\r\n\t\tr\"网址\",\r\n\t\tr\"本站\",\r\n\t\tr\"分享\", # 有可能误判\r\n\t\tr\"一秒记住\",\r\n\t\tr\"首发\",\r\n\t\tr\"小说\",\r\n\t\tr\"转载\",\r\n\t\tr\"笔趣阁\",\r\n\t\tr\"支持\",\r\n\t\tr\"留言\",\r\n\t\tr\".com\",\r\n\t\tr\".cc\",\r\n\t\tr\".cn\",\r\n\t\tr\".net\",\r\n\t\tr\".la\",\r\n\t\tr\".org\",\r\n\t\tr\".co\",\r\n\t\tr\".me\",\r\n\t\tr\"wap.\",\r\n\t\tr\"m.\",\r\n\t\tr\"www.\",\r\n\t\tr\"无错\",\r\n\t\tr\"收藏\",\r\n\t\tr\"《\",\r\n\t\tr\"》\",\r\n\t\tr\"m.\",\r\n\t\tr\"加入书签\",\r\n\t\tr\"最新章节\",\r\n\t\tr\"推荐本书\",\r\n\t\tr\"喜欢\",\r\n\t\tr\"广告\",\r\n\t\tr\"推荐\",\r\n\t\tr\"谨记\",\r\n\t\tr\"百度\",\r\n\t\tr\"搜狗\",\r\n\t\tr\"报错\",\r\n\t\tr\"app\",\r\n\t\tr\"关注\",\r\n\t\tr\"红包\",\r\n\t\tr\"福利\",\r\n\t\tr\"作者的话\",\r\n\t\tr\"下载地址\",\r\n\t\tr\"txt\",\r\n\t]\r\n\t\r\n\t# 准备工作\r\n\tdef start_requests(self):\r\n\t\t# 日志\r\n\t\thandler = logging.FileHandler(os.path.join(settings.BASE_DIR, \"../logs.log\"), encoding=\"utf-8\")\r\n\t\thandler.setLevel(logging.ERROR)\r\n\t\thandler.setFormatter(logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"))\r\n\t\tself.logger = logging.getLogger(self.name)\r\n\t\tself.logger.addHandler(handler)\r\n\t\tself.time_of_start = datetime.datetime.now()\r\n\t\t\r\n\t\t# 检查文件夹是否有效\r\n\t\tif not os.path.exists(self.path_base):\r\n\t\t\tos.mkdir(self.path_base)\r\n\t\t\r\n\t\tif not self.connect_db(False):\r\n\t\t\traise Exception(\"连接数据库失败\")\r\n\t\t\r\n\t\tdont_filter = False\r\n\t\tif hasattr(self, \"dont_filter\"):\r\n\t\t\tdont_filter = self.dont_filter\r\n\t\t\r\n\t\tstart_urls = []\r\n\t\tfor url in self.start_urls:\r\n\t\t\tif url and url not in start_urls:\r\n\t\t\t\tstart_urls.append(url)\r\n\t\t\r\n\t\tself.total_books = len(start_urls)\r\n\t\tprint(\"总计:{}丨可用:{}\".format(len(self.start_urls), self.total_books))\r\n\t\t\r\n\t\tif hasattr(self, \"proxy\") and self.proxy:\r\n\t\t\treturn [scrapy.Request(x, callback = self.parse, headers = self.header, meta = {\"proxy\" : self.proxy}, dont_filter = dont_filter) for x in start_urls if len(x) > 0]\r\n\t\t\r\n\t\treturn [scrapy.Request(x, callback = self.parse, headers = self.header, dont_filter = dont_filter) for x in start_urls]\r\n\t#end - start_requests\r\n\t\r\n\t# 处理书本信息\r\n\tdef proccess_book_info(self, item, url=None):\r\n\t\t# 清理多余内容\r\n\t\titem[\"book_title\"] = html.unescape(item[\"book_title\"].strip(\"\\r\\n\\t\\xA0\\u3000   \"))\r\n\t\tif item[\"book_author\"]:\r\n\t\t\titem[\"book_author\"] = html.unescape(item[\"book_author\"].strip(\"\\r\\n\\t\\xA0\\u3000   \"))\r\n\t\telse:\r\n\t\t\titem[\"book_author\"] = \"\"\r\n\t\tif item[\"book_description\"]:\r\n\t\t\titem[\"book_description\"] = html.unescape(item[\"book_description\"].strip(\"\\r\\n\\t\\xA0\\u3000   \"))\r\n\t\telse:\r\n\t\t\titem[\"book_description\"] = \"\"\r\n\t\t\r\n\t\t# 尝试创建书本信息\r\n\t\tbookId = None\r\n\t\tcursor = self.database.cursor()\r\n\t\tnumRows = 0\r\n\t\t# self.logger.debug(\"CALL create_book\")\r\n\t\t\r\n\t\ttry:\r\n\t\t\tnumRows = cursor.execute(\"CALL create_book('{}', '{}', '{}', '{}', '{}');\".format(\r\n\t\t\t\tself.escape(item[\"book_title\"]),\r\n\t\t\t\tself.escape(item[\"book_author\"] or \"\"),\r\n\t\t\t\tself.escape(item[\"book_description\"] or \"\"),\r\n\t\t\t\tself.escape(item[\"book_type\"] or \"\"),\r\n\t\t\t\tself.escape(self.name)\r\n\t\t\t))\r\n\t\texcept Exception as e:\r\n\t\t\tself.logger.error(\"错误:执行语句失败:{}\".format(\"CALL create_book('{}', '{}', '{}', '{}', '{}');\".format(\r\n\t\t\t\tself.escape(item[\"book_title\"]),\r\n\t\t\t\tself.escape(item[\"book_author\"] or \"\"),\r\n\t\t\t\tself.escape(item[\"book_description\"] or \"\"),\r\n\t\t\t\tself.escape(item[\"book_type\"] or \"\"),\r\n\t\t\t\tself.escape(self.name)\r\n\t\t\t)), exc_info = True)\r\n\t\t\ttime.sleep(random.randint(1, 3))\r\n\t\t\t\r\n\t\t\tif str(e).find(\"gone away\") > 0 or str(e).find(\"Lost Connection\") > 0:\r\n\t\t\t\tself.connect_db()\r\n\t\t\t\r\n\t\t\treturn self.proccess_book_info(item, url)\r\n\t\t#end - try\r\n\t\t\r\n\t\t# self.logger.debug(\"CALL create_book finished\")\r\n\t\t\r\n\t\tif numRows > 0:\r\n\t\t\tbookId = int(cursor.fetchone()[0])\r\n\t\t\r\n\t\tspider = None\r\n\t\ttry:\r\n\t\t\tcursor.execute(\"INSERT IGNORE INTO book_spider (book_id, url, spider) VALUES ('{}', '{}', '{}');\".format(\r\n\t\t\t\tbookId, self.escape(url), self.escape(self.name)\r\n\t\t\t))\r\n\t\t\tcursor.execute(\"UPDATE book_spider SET touch_time = CURRENT_TIMESTAMP WHERE book_id = '{}' AND url = '{}';\".format(\r\n\t\t\t\tbookId, self.escape(url)\r\n\t\t\t))\r\n\t\t\tself.database.commit()\r\n\t\t\t\r\n\t\t\tif cursor.execute(\"SELECT id FROM book_spider WHERE book_id = '{}' AND url = '{}';\".format(bookId, self.escape(url))) > 0:\r\n\t\t\t\tspider = int(cursor.fetchone()[0])\r\n\t\texcept Exception as e:\r\n\t\t\tself.logger.error(\"更新书本信息失败\", exc_info = True)\r\n\t\t\r\n\t\tunique = item[\"book_title\"] + \" | \" + item[\"book_author\"]\r\n\t\tself.book_cache[unique] = {\r\n\t\t\t\"item\" : item,\r\n\t\t\t\"book_id\" : bookId,\r\n\t\t\t\"file_name\" : os.path.join(self.path_base,\r\n\t\t\t\titem[\"book_title\"].replace(\"/\", \"-\").replace(\"\\\\\", \"-\") + \"丨\" +\r\n\t\t\t\titem[\"book_author\"].replace(\"/\", \"-\").replace(\"\\\\\", \"-\") + \".txt\"\r\n\t\t\t),\r\n\t\t\t\"ordered\" : self.is_real_chapter(bookId),\r\n\t\t\t\"spider\" : spider,\r\n\t\t}\r\n\t\t\r\n\t\tif not os.path.exists(self.book_cache[unique][\"file_name\"]):\r\n\t\t\tself.generate_txt_file(bookId, self.book_cache[unique][\"file_name\"])\r\n\t\t\r\n\t\tself.check_complete(bookId)\r\n\t\tprint(\"{}《{}》{}丨{}丨{}\".format(bookId, item[\"book_title\"], item[\"book_author\"], item.get(\"book_finish\"), item.get(\"book_lastupdate\")))\r\n\t\t\r\n\t\tself.current_books += 1\r\n\t\tos.system(\"title {} - {}/{}\".format(self.name, self.current_books, self.total_books))\r\n\t\t\r\n\t\treturn unique\r\n\t#end - proccess_book_info\r\n\t\r\n\ttimeout_pcl = 1\r\n\t\r\n\t# 处理章节列表\r\n\tdef proccess_chapter_list(self, unique, chapter_list, callee, referrer = \"\", chapter_name = []):\r\n\t\tif not chapter_list:\r\n\t\t\tself.logger.error(\"{} 没有章节列表\".format(referrer))\r\n\t\t\traise Exception(\"错误:没有章节列表\")\r\n\t\t\r\n\t\theader = self.header.copy()\r\n\t\theader[\"Referrer\"] = referrer\r\n\t\tbookId = self.book_cache[unique][\"book_id\"]\r\n\t\titem = self.book_cache[unique][\"item\"]\r\n\t\t\r\n\t\tself.book_cache[unique][\"metadata\"] = item[\"book_title\"] + \"丨\" + \\\r\n\t\t\titem[\"book_author\"] + \"\\n\\n\" + \\\r\n\t\t\titem[\"book_description\"].strip(\"\\r\\n\\t\\xA0\\u3000   \") + \"\\n\\n\\n\"\r\n\t\tself.book_cache[unique][\"num_chapter\"] = len(chapter_list)\r\n\t\tself.book_cache[unique][\"chapters\"] = collections.OrderedDict()\r\n\t\t\r\n\t\t# self.logger.debug(\"start html.unescape\")\r\n\t\tchapter_name = [ html.unescape(t.strip(\"\\r\\n\\t\\xA0\\u3000   \")) for t in chapter_name ]\r\n\t\t# self.logger.debug(\"start clear_number\")\r\n\t\tchapter_name_clean = [ self.unified_symbol(self.gen_title_expr(ch, False)[-1]) or ch for ch in chapter_name ]\r\n\t\t# self.logger.debug(\"start get_book_last_info\")\r\n\t\tlastNumChapters, lastChapterNo, lastChapterName = self.get_book_last_info(bookId, item, chapter_name_clean)\r\n\t\t\r\n\t\tif lastChapterNo or lastChapterName:\r\n\t\t\tprint(\"{}《{}》{}丨{}/{}/{}丨ch.{} {}\".format(bookId, item[\"book_title\"], item[\"book_author\"], lastNumChapters, len(chapter_list), len(chapter_name), lastChapterNo, lastChapterName))\r\n\t\telse:\r\n\t\t\tprint(\"{}《{}》{}丨{}/{}/{}\".format(bookId, item[\"book_title\"], item[\"book_author\"], lastNumChapters, len(chapter_list), len(chapter_name)))\r\n\t\t\r\n\t\tif lastNumChapters - len(chapter_list) > 50:\r\n\t\t\tself.logger.warning(\"警告:《{}》{} 的章节数量差异过大:{}~{}\".format(item[\"book_title\"], item[\"book_author\"], lastNumChapters, len(chapter_list)))\r\n\t\tif chapter_name and chapter_name.count(chapter_name[0]) > 1:\r\n\t\t\tself.logger.error(\"《{}》{} 错误:列表中存在防盗章(多个第一章):{}丨{}\".format(item[\"book_title\"], item[\"book_author\"], chapter_name[0], referrer))\r\n\t\t\r\n\t\t# 还没有更新\r\n\t\t# if lastNumChapters >= len(chapter_list):\r\n\t\t\t# return None\r\n\t\t\r\n\t\t# self.logger.debug(\"start update book_info\")\r\n\t\ttry:\r\n\t\t\tcursor = self.database.cursor()\r\n\t\t\tupdater = []\r\n\t\t\tif item[\"book_description\"]:\r\n\t\t\t\tupdater.append(\"description = '{}'\".format(self.escape(item[\"book_description\"].strip(\"\\r\\n\\t\\xA0\\u3000   \"))))\r\n\t\t\tif item[\"book_cover\"]:\r\n\t\t\t\tupdater.append(\"cover = '{}'\".format(self.escape(item[\"book_cover\"])))\r\n\t\t\tif cursor.execute(\"SELECT id FROM book_info WHERE id = {} AND url IS NULL;\".format(bookId)) > 0:\r\n\t\t\t\tupdater.append(\"url = '{}'\".format(self.escape(referrer)))\r\n\t\t\t\r\n\t\t\tif updater:\r\n\t\t\t\tcursor.execute(\"UPDATE book_info SET {} WHERE id = '{}';\".format(\", \".join(updater), bookId))\r\n\t\t\t\tself.database.commit()\r\n\t\texcept Exception as e:\r\n\t\t\tif str(e).find(\"timeout\") > 0:\r\n\t\t\t\tself.logger.error(\"更新书本信息失败\", exc_info = True)\r\n\t\t\t\ttime.sleep(self.timeout_pcl)\r\n\t\t\t\tself.timeout_pcl *= random.randint(1, 3)\r\n\t\t\t\treturn self.proccess_chapter_list(unique, chapter_list, callee, referrer, chapter_name)\r\n\t\t\t#end - if\r\n\t\t#end except\r\n\t\t\r\n\t\tself.timeout_pcl = 1\r\n\t\t\r\n\t\tresults = []\r\n\t\tchapter_list = [ self.fix_url(url, referrer) for url in chapter_list ]\r\n\t\t\r\n\t\t# 这个好慢...\r\n\t\t# self.logger.debug(\"start detect_chapter\")\r\n\t\tchapter_real = [ self.detect_chapter(ch) for ch in chapter_name ]\r\n\t\t\r\n\t\t# self.logger.debug(\"start find title same\")\r\n\t\tnearSameTitle = []\r\n\t\tfor i in range(len(chapter_name_clean)):\r\n\t\t\tcnc_gp = chapter_name_clean[max(i - self.num_same_match, 0) : min(i + self.num_same_match, len(chapter_name_clean))]\r\n\t\t\tif cnc_gp.count(chapter_name_clean[i]) > 1:\r\n\t\t\t\t# print(\"附近存在相同标题 {}→{}\".format(chapter_name_clean[i], cnc_gp))\r\n\t\t\t\tnearSameTitle.append(i)\r\n\t\t#end for\r\n\t\t\r\n\t\tdont_filter = False\r\n\t\tif hasattr(self, \"dont_filter\"):\r\n\t\t\tdont_filter = self.dont_filter\r\n\t\t\r\n\t\tsame_title = []\r\n\t\tfor i in range(len(chapter_name_clean)):\r\n\t\t\tv = int(i in nearSameTitle)\r\n\t\t\tif v and hasattr(self, \"base_same_title\"):\r\n\t\t\t\tv += 1\r\n\t\t\tsame_title.append(v)\r\n\t\t#end for\r\n\t\t\r\n\t\t# 补齐缺失的章节\r\n\t\tfor i in self.get_missing_chapter(bookId, chapter_name_clean, chapter_list, chapter_real, same_title)[1]:\r\n\t\t\tignore = False\r\n\t\t\tfor kw in self.KEYWORD_IGNORE:\r\n\t\t\t\t# if chapter_name[i].find(kw) > -1:\r\n\t\t\t\tif re.search(kw, chapter_name[i]):\r\n\t\t\t\t\tignore = True\r\n\t\t\t\t\tbreak\r\n\t\t\t#end - for\r\n\t\t\t\r\n\t\t\tif ignore and i <= lastChapterNo and not chapter_real[i]:\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\tmeta = {\r\n\t\t\t\t\"unique_id\": unique,\r\n\t\t\t\t\"book_title\": item[\"book_title\"],\r\n\t\t\t\t\"book_id\": str(bookId),\r\n\t\t\t\t\"chapter\": str(i),\r\n\t\t\t\t\"file_name\" : self.book_cache[unique].get(\"file_name\"),\r\n\t\t\t\t\r\n\t\t\t\t# 附近有相同标题,避免被覆盖\r\n\t\t\t\t\"same_title\" : ( i in nearSameTitle ),\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\tif i <= lastChapterNo:\r\n\t\t\t\tmeta[\"is_inserted\"] = \"1\"\r\n\t\t\tif hasattr(self, \"proxy\") and self.proxy:\r\n\t\t\t\tmeta[\"proxy\"] = self.proxy\r\n\t\t\tif hasattr(self, \"chapterRequiired\"):\r\n\t\t\t\tself.chapterRequiired.append(chapter_name[i])\r\n\t\t\t\r\n\t\t\tresults.append(scrapy.Request(\r\n\t\t\t\tchapter_list[i],\r\n\t\t\t\tcallback = callee,\r\n\t\t\t\theaders = header,\r\n\t\t\t\tmeta = meta,\r\n\t\t\t\tdont_filter = dont_filter\r\n\t\t\t))\r\n\t\t#end - for\r\n\t\t\r\n\t\treturn results\r\n\t#end - proccess_chapter_list\r\n\t\r\n\t# 处理章节\r\n\tdef proccess_chapter(self, title, context, meta, referrer = \"\"):\r\n\t\tif not title or not context:\r\n\t\t\tself.logger.error(\"{} 没有标题或内容\".format(referrer))\r\n\t\t\traise Exception(\"错误:没有标题或内容\")\r\n\t\t\r\n\t\tunique = meta[\"unique_id\"]\r\n\t\tcached = unique in self.book_cache\r\n\t\tif not cached:\r\n\t\t\tprint(\"错误:{} 缓存失踪\".format(unique))\r\n\t\t\t# return None\r\n\t\t\r\n\t\ttitle = html.unescape(title)\r\n\t\tcontext = html.unescape(context)\r\n\t\t\r\n\t\tif title.startswith(\"正文\"):\r\n\t\t\ttitle = title.replace(\"正文卷\", \"\").replace(\"正文\", \"\")\r\n\t\tif title.startswith(\"章节目录\"):\r\n\t\t\ttitle = title.replace(\"章节目录\", \"\")\r\n\t\ttitle = title.strip(\"\\r\\n\\t\\xA0\\u3000   \")\r\n\t\t\r\n\t\toldTitle = title[:]\r\n\t\ttitle = self.unified_symbol(self.gen_title_expr(title, False)[-1] or title)\r\n\t\t\r\n\t\tchapterReal = self.detect_chapter(oldTitle)\r\n\t\tconsoleText = \"\"\r\n\t\t\r\n\t\tif cached:\r\n\t\t\tconsoleText = \"书名:{}丨章节:{}/{}丨标题:{}→{}丨索引:{}丨行数:{}\".format(\r\n\t\t\t\tmeta[\"book_title\"],\r\n\t\t\t\tint(meta[\"chapter\"]) + 1,\r\n\t\t\t\tself.book_cache[unique][\"num_chapter\"],\r\n\t\t\t\toldTitle, title,\r\n\t\t\t\tchapterReal,\r\n\t\t\t\tlen(context)\r\n\t\t\t)\r\n\t\telse:\r\n\t\t\tconsoleText = \"书名:{}丨章节:{}丨标题:{}→{}丨索引:{}丨行数:{}\".format(\r\n\t\t\t\tmeta[\"book_title\"],\r\n\t\t\t\tint(meta[\"chapter\"]) + 1,\r\n\t\t\t\toldTitle, title,\r\n\t\t\t\tchapterReal,\r\n\t\t\t\tlen(context)\r\n\t\t\t)\r\n\t\t#end - if\r\n\t\t\r\n\t\tif isinstance(context, list):\r\n\t\t\tcontext = \"\\n\".join(context)\r\n\t\t\r\n\t\tcontext = self.correct_content(context)\r\n\t\t\r\n\t\tchapterText = \"\"\r\n\t\tif type(context) == str:\r\n\t\t\tcontext = context.replace(\"
      \", \"\\n\").replace(\"

      \", \"\\n\").replace(\"

      \", \"\")\r\n\t\t\tcontext = context.split(\"\\n\")\r\n\t\t\r\n\t\thasComplete = 1\r\n\t\tnumWordCount = 0\r\n\t\tfor part in context:\r\n\t\t\tline = part.strip(\"\\r\\n\\t\\xA0\\u3000   \").replace(\"
      \", \"\").replace(\"

      \", \"\").replace(\"

      \", \"\")\r\n\t\t\tif len(line) < 1:\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\t# 处理未正确更新的情况\r\n\t\t\tterminator = line.find(\"正在手打中,请稍等片刻\")\r\n\t\t\tif terminator <= -1:\r\n\t\t\t\tterminator = line.find(\"内容更新后,请重新刷新页面\")\r\n\t\t\tif terminator <= -1:\r\n\t\t\t\tterminator = line.find(\"请稍后刷新访问\")\r\n\t\t\tif terminator <= -1:\r\n\t\t\t\tterminator = line.find(\"防采集\")\r\n\t\t\tif terminator <= -1:\r\n\t\t\t\tterminator = line.find(\"下一页继续阅读\")\r\n\t\t\tif terminator <= -1:\r\n\t\t\t\tterminator = line.find(\"章节正在入库\")\r\n\t\t\t\r\n\t\t\tif terminator > -1:\r\n\t\t\t\tif cached:\r\n\t\t\t\t\tprint(\"错误:《{}》{} 第 {} 章 {} 发现未完整标记\".format(meta[\"book_title\"], self.book_cache[unique][\"item\"][\"book_author\"], meta[\"chapter\"], title))\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\"错误:《{}》 第 {} 章 {} 发现未完整标记\".format(meta[\"book_title\"], meta[\"chapter\"], title))\r\n\t\t\t\thasComplete = 0\r\n\t\t\t\t\r\n\t\t\t\t# 检查是否真的没有更新,有时候这破站会有 bug 的\r\n\t\t\t\tline = line[:terminator]\r\n\t\t\t\t# if len(line) < 99:\r\n\t\t\t\t\t# return None\r\n\t\t\t#end - if\r\n\t\t\t\r\n\t\t\twords = self.calc_num_words(line)\r\n\t\t\tif words >= 300:\r\n\t\t\t\tprint(\"警告:第 {} 章 {} 单行过长,进行强制折行\".format(meta[\"chapter\"], title))\r\n\t\t\t\tline = line.replace(\"。\", \"。\\n\\n  \")\r\n\t\t\t\r\n\t\t\t# 自然段留两个空格,结尾留一个空行\r\n\t\t\tchapterText += \"  \" + line + \"\\n\\n\"\r\n\t\t\tnumWordCount += words\r\n\t\t# end - for\r\n\t\t\r\n\t\titem = None\r\n\t\tif cached:\r\n\t\t\titem = self.book_cache[unique][\"item\"]\r\n\t\t\r\n\t\tconsoleText += \"丨字数:{}\".format(numWordCount)\r\n\t\t\r\n\t\tif numWordCount < 300:\r\n\t\t\thasComplete = 0\r\n\t\telif int(meta[\"chapter\"]) > 0 and (not chapterReal or chapterReal > 1) and self.check_anti_theft(meta[\"book_id\"], chapterText):\r\n\t\t\thasComplete = 0\r\n\t\t\tself.logger.error(\"《{}》{} 被检测到是防盗章 {}\".format(meta[\"book_title\"], title, referrer))\r\n\t\t\treturn None\r\n\t\telif self.check_relation_chapter(meta[\"book_id\"], int(meta[\"chapter\"]), chapterReal, title, chapterText):\r\n\t\t\tself.logger.error(\"《{}》{} 被检测到有防盗内容 {}\".format(meta[\"book_title\"], title, referrer))\r\n\t\t\treturn None\r\n\t\t\r\n\t\tif not hasComplete and self.check_irrelevant_chapter(meta[\"book_id\"], chapterReal):\r\n\t\t\thasComplete = 1\r\n\t\t\tself.logger.info(\"《{}》{} 也许是无关章节,停止更新 {}\".format(meta[\"book_title\"], title, chapterReal))\r\n\t\t\r\n\t\tif cached and self.book_cache[unique][\"ordered\"] and chapterReal == None and not hasComplete:\r\n\t\t\tself.logger.error(\"提示:《{}》第 {} 章 {} 或许还没有更新完整!\".format(meta[\"book_title\"], meta[\"chapter\"], title))\r\n\t\t\thasComplete = 1\r\n\t\telif not hasComplete:\r\n\t\t\tself.logger.error(\"警告:《{}》第 {} 章 {} 还没有更新完整!\".format(meta[\"book_title\"], meta[\"chapter\"], title))\r\n\t\t\r\n\t\tif len(chapterText.strip(\"\\r\\n\\t\\xA0\\u3000   \")) <= 0 or len(chapterText.replace(\"小美~\", \"\").strip(\"\\r\\n\\t\\xA0\\u3000   \")) <= 0:\r\n\t\t\tprint(\"错误:《{}》章节 {} 的内容是空的\".format(meta[\"book_title\"], title))\r\n\t\t\tif cached:\r\n\t\t\t\tself.logger.error(\"错误:《{}》{} 第 {} 章 {} 的内容是空的\".format(meta[\"book_title\"], item[\"book_author\"], meta[\"chapter\"], title))\r\n\t\t\telse:\r\n\t\t\t\tself.logger.error(\"错误:《{}》 第 {} 章 {} 的内容是空的\".format(meta[\"book_title\"], meta[\"chapter\"], title))\r\n\t\t\treturn None\r\n\t\t\r\n\t\tsame_title = meta.get(\"same_title\")\r\n\t\tif same_title:\r\n\t\t\tsame_title = 1\r\n\t\t\tif hasattr(self, \"base_same_title\"):\r\n\t\t\t\tsame_title += 1\r\n\t\telse:\r\n\t\t\tsame_title = 0\r\n\t\t\r\n\t\tspider = 0\r\n\t\tif cached:\r\n\t\t\tspider = self.book_cache[unique].get(\"spider\")\r\n\t\t\r\n\t\tchapterId, updateFlags, lastChapterName = self.save_chapter(meta[\"book_id\"], title, chapterText, meta[\"chapter\"], chapterReal, referrer, hasComplete, numWordCount, same_title)\r\n\t\tif updateFlags == 1:\r\n\t\t\tconsoleText += \"丨更新:id.{} {}\".format(chapterId, lastChapterName)\r\n\t\telif updateFlags == 2:\r\n\t\t\tconsoleText += \"丨更新完整:id.{} {}\".format(chapterId, lastChapterName)\r\n\t\t\t\r\n\t\t\tif cached:\r\n\t\t\t\t# 需要重建 txt 文件\r\n\t\t\t\tself.book_cache[unique][\"rebuild\"] = True\r\n\t\t\t#end if\r\n\t\telif updateFlags == 3:\r\n\t\t\tconsoleText += \"丨未更新:id.{} {}\".format(chapterId, lastChapterName)\r\n\t\t#end - if\r\n\t\t\r\n\t\tif cached and not updateFlags and not meta.get(\"is_inserted\"):\r\n\t\t\tif chapterReal != None:\r\n\t\t\t\tself.book_cache[unique][\"chapters\"][int(\\\r\n\t\t\t\t\tmeta[\"chapter\"])] = \"第\" + str(chapterReal) + \"章 \" + title + \"\\n\\n\" +\\\r\n\t\t\t\t\tchapterText.replace(\" \", \"  \").replace(\" \", \"  \").replace(\"  \", \"  \").replace(r\"\\n\", \"\\n\") + \"\\n\"\r\n\t\t\telse:\r\n\t\t\t\tself.book_cache[unique][\"chapters\"][int(\\\r\n\t\t\t\t\tmeta[\"chapter\"])] = \"第\" + str(meta[\"chapter\"]) + \"章 \" + title + \"\\n\\n\" +\\\r\n\t\t\t\t\tchapterText.replace(\" \", \"  \").replace(\" \", \"  \").replace(\"  \", \"  \").replace(r\"\\n\", \"\\n\") + \"\\n\"\r\n\t\telif cached and not updateFlags:\r\n\t\t\t# 中间插入了章节,强制进行重建\r\n\t\t\tself.book_cache[unique][\"reflush\"] = True\r\n\t\t\tself.book_cache[unique][\"rebuild\"] = True\r\n\t\t#end - if\r\n\t\t\r\n\t\tif spider and cached and (updateFlags == 0 or updateFlags == 2):\r\n\t\t\ttry:\r\n\t\t\t\tif updateFlags == 2:\r\n\t\t\t\t\tself.database.cursor().execute(\"UPDATE book_spider SET num_chapter = num_chapter + 1, bad_chapter = IF(bad_chapter, bad_chapter - 1, bad_chapter), update_time = CURRENT_TIMESTAMP WHERE id = '{}';\".format(spider))\r\n\t\t\t\telif hasComplete:\r\n\t\t\t\t\tself.database.cursor().execute(\"UPDATE book_spider SET num_chapter = num_chapter + 1, num_words = num_words + {}, last_chapter = {}, update_time = CURRENT_TIMESTAMP WHERE id = '{}';\".format(numWordCount, chapterId, spider))\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.database.cursor().execute(\"UPDATE book_spider SET bad_chapter = bad_chapter + 1, update_time = CURRENT_TIMESTAMP WHERE id = '{}';\".format(spider))\r\n\t\t\t\tself.database.commit()\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tself.logger.error(\"更新状态失败:{}\".format(e), exc_info=True)\r\n\t\t#end if\r\n\t\t\r\n\t\tprint(consoleText)\r\n\t\t\r\n\t\tsample_first, sample_last = self.get_chapter_sample(chapterText)\r\n\t\tif (updateFlags == 0 or updateFlags == 1 or updateFlags == 2) and len(sample_first) >= 2 and len(sample_last) >= 2:\r\n\t\t\ttry:\r\n\t\t\t\tself.database.cursor().execute((\r\n\t\t\t\t\t\"INSERT INTO book_relation \"\r\n\t\t\t\t\t\"(book_id, chapter_id, title, chapter_no, chapter_real, first_line, second_line, last_line, previous_line) VALUES \"\r\n\t\t\t\t\t\"('{}', '{}', '{}', {}, {}, '{}', '{}', '{}', '{}') ON DUPLICATE KEY UPDATE \"\r\n\t\t\t\t\t\"title = '{}', chapter_no = {}, chapter_real = {}, first_line = '{}', second_line = '{}', last_line = '{}', previous_line = '{}';\").format(\r\n\t\t\t\t\t\r\n\t\t\t\t\t# INSERT INTO ... VALUES\r\n\t\t\t\t\tmeta[\"book_id\"], chapterId,\r\n\t\t\t\t\tself.escape(title), meta[\"chapter\"], chapterReal or \"NULL\", self.escape(sample_first[0]), self.escape(sample_first[1]),\r\n\t\t\t\t\tself.escape(sample_last[0]), self.escape(sample_last[1]),\r\n\t\t\t\t\t\r\n\t\t\t\t\t# ON DUPLICATE KEY UPDATE\r\n\t\t\t\t\tself.escape(title), meta[\"chapter\"], chapterReal or \"NULL\", self.escape(sample_first[0]), self.escape(sample_first[1]),\r\n\t\t\t\t\tself.escape(sample_last[0]), self.escape(sample_last[1]),\r\n\t\t\t\t))\r\n\t\t\t\tself.database.commit()\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tself.logger.error(\"添加关系错误:{}丨内容:{}\".format(str(e), (\r\n\t\t\t\t\t\"INSERT INTO book_relation \"\r\n\t\t\t\t\t\"(book_id, chapter_id, title, chapter_no, chapter_real, first_line, second_line, last_line, previous_line) VALUES \"\r\n\t\t\t\t\t\"('{}', '{}', '{}', {}, {}, '{}', '{}', '{}', '{}') ON DUPLICATE KEY UPDATE \"\r\n\t\t\t\t\t\"title = '{}', chapter_no = {}, chapter_real = {}, first_line = '{}', second_line = '{}', last_line = '{}', previous_line = '{}';\").format(\r\n\t\t\t\t\t\r\n\t\t\t\t\t# INSERT INTO ... VALUES\r\n\t\t\t\t\tmeta[\"book_id\"], chapterId,\r\n\t\t\t\t\tself.escape(title), meta[\"chapter\"], chapterReal or \"NULL\", self.escape(sample_first[0]), self.escape(sample_first[1]),\r\n\t\t\t\t\tself.escape(sample_last[0]), self.escape(sample_last[1]),\r\n\t\t\t\t\t\r\n\t\t\t\t\t# ON DUPLICATE KEY UPDATE\r\n\t\t\t\t\tself.escape(title), meta[\"chapter\"], chapterReal or \"NULL\", self.escape(sample_first[0]), self.escape(sample_first[1]),\r\n\t\t\t\t\tself.escape(sample_last[0]), self.escape(sample_last[1]),\r\n\t\t\t\t)))\r\n\t\t#end if\r\n\t\t\r\n\t\tif self.last_mismatch and self.last_mismatch[\"book_id\"] != meta[\"book_id\"]:\r\n\t\t\tprint(\"生成:\" + self.last_mismatch[\"file_name\"])\r\n\t\t\tself.generate_txt_file(self.last_mismatch[\"book_id\"], self.last_mismatch[\"file_name\"])\r\n\t\t\tself.last_mismatch = None\r\n\t\t#end if\r\n\t\t\r\n\t\tif not cached and meta.get(\"file_name\") and meta.get(\"book_id\"):\r\n\t\t\tif not updateFlags:\r\n\t\t\t\tself.last_mismatch = { \"book_id\" : meta[\"book_id\"], \"file_name\" : meta[\"file_name\"] }\r\n\t\t\treturn None\r\n\t\t#end if\r\n\t\t\r\n\t\tif not cached:\r\n\t\t\tself.logger.warning(\"缓存失踪异常未处理:{}\".format(unique))\r\n\t\t\treturn None\r\n\t\t\r\n\t\t# 直到完成才保存文件\r\n\t\tif int(meta[\"chapter\"]) < self.book_cache[unique][\"num_chapter\"] - 1:\r\n\t\t\treturn None\r\n\t\t\r\n\t\t# 保存到文件\r\n\t\tif self.book_cache[unique].get(\"reflush\") or self.book_cache[unique][\"chapters\"]:\r\n\t\t\tif self.book_cache[unique].get(\"rebuild\") or not os.path.exists(self.book_cache[unique][\"file_name\"]):\r\n\t\t\t\t# os.remove(self.book_cache[unique][\"file_name\"])\r\n\t\t\t\tself.generate_txt_file(self.book_cache[unique][\"book_id\"], self.book_cache[unique][\"file_name\"])\r\n\t\t\telif self.book_cache[unique][\"chapters\"]:\r\n\t\t\t\tfile = open(self.book_cache[unique][\"file_name\"], \"a+\", encoding=\"utf-8\")\r\n\t\t\t\tfor index, text in self.book_cache[unique][\"chapters\"].items():\r\n\t\t\t\t\tfile.write(text)\r\n\t\t\t\tfile.close()\r\n\t\t\telse:\r\n\t\t\t\tself.logger.warning(\"{} 可能需要写文件,但什么也没有发生 {}\".format(self.book_cache[unique][\"file_name\"], referrer))\r\n\t\t\t#end if\r\n\t\t#end if\r\n\t\t\r\n\t\tprint(\"书名:{}丨作者:{}丨章节数:{}\".format(\r\n\t\t\titem[\"book_title\"], item[\"book_author\"], self.book_cache[unique][\"num_chapter\"]\r\n\t\t))\r\n\t\t\r\n\t\tdel self.book_cache[unique]\r\n\t\treturn item\r\n\t#end - proccess_chapter\r\n\t\r\n\tdef closed(self, reason):\r\n\t\t# 处理未保存的内容\r\n\t\tfor unique, data in self.book_cache.items():\r\n\t\t\tprint(\"正在回收:{}\".format(unique))\r\n\t\t\tif data.get(\"reflush\") or data.get(\"chapters\"):\r\n\t\t\t\tif data.get(\"rebuild\") or not os.path.exists(data[\"file_name\"]):\r\n\t\t\t\t\tself.generate_txt_file(data[\"book_id\"], data[\"file_name\"])\r\n\t\t\t\telif data.get(\"chapters\"):\r\n\t\t\t\t\twith open(data[\"file_name\"], \"a+\", encoding=\"utf-8\") as f:\r\n\t\t\t\t\t\tfor index, text in data[\"chapters\"].items():\r\n\t\t\t\t\t\t\tf.write(text)\r\n\t\t\t\t\t#end with\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.logger.warning(\"{} 可能需要写文件,但什么也没有发生\".format(data[\"file_name\"]))\r\n\t\t\t#end if\r\n\t\t#end for\r\n\t\t\r\n\t\tself.book_cache = {}\r\n\t\ttimeleft = str(datetime.datetime.now() - self.time_of_start)\r\n\t\tself.logger.info(\"结束,耗时 {},原因 {}\".format(timeleft, reason))\r\n\t\tself.database = None\r\n\t\tself.logger = None\r\n\t#end - closed\r\n\t\r\n\tdef generate_txt_file(self, bookId, fileName, unordered = False):\r\n\t\tcursor = self.database.cursor()\r\n\t\tnumRows = cursor.execute(\"SELECT name, author, description, force_mode FROM book_info WHERE id = {};\".format(bookId))\r\n\t\tif numRows <= 0:\r\n\t\t\treturn False\r\n\t\t\r\n\t\tmode = 0\r\n\t\trow = cursor.fetchone()\r\n\t\tforce_mode = int(row[3])\r\n\t\tcontent = \"《{}》{}\\n\\n{}\\n\\n\".format(row[0], row[1], row[2])\r\n\t\tchapters = []\r\n\t\t\r\n\t\t# 检查根据标题得到的章节号是否不重复\r\n\t\tif force_mode == 1 or (force_mode == -1 and not unordered and self.is_real_chapter(bookId)):\r\n\t\t\tprint(\"生成模式:真实章节序号\")\r\n\t\t\tmode = 3\r\n\t\t\t\r\n\t\t\t# 章节号是唯一的,不存在卷的情况下,优先使用章节顺序\r\n\t\t\tnumRows = cursor.execute(\"SELECT chapter_no, title, content, chapter_real FROM book_data WHERE book_id = {} ORDER BY IF(ISNULL(chapter_real), chapter_no, chapter_real) ASC;\".format(bookId))\r\n\t\t\tif numRows > 0:\r\n\t\t\t\tchapters = cursor.fetchall()\r\n\t\telse:\r\n\t\t\tmode = 0\r\n\t\t\t\r\n\t\t\tif force_mode == 2:\r\n\t\t\t\t# 章节号非唯一,存在分卷,只能使用网站定义的顺序了\r\n\t\t\t\tprint(\"生成模式:预定义章节序号\")\r\n\t\t\t\tnumRows = cursor.execute(\"SELECT chapter_no, title, content, chapter_real FROM book_data WHERE book_id = {} ORDER BY chapter_no ASC;\".format(bookId))\r\n\t\t\t\tif numRows > 0:\r\n\t\t\t\t\tchapters = cursor.fetchall()\r\n\t\t\telse:\r\n\t\t\t\t# 分卷查询\r\n\t\t\t\tprint(\"生成模式:分卷+真实章节序号\")\r\n\t\t\t\tnumRows = cursor.execute(\"SELECT chapter_no, chapter_real, title FROM book_data WHERE book_id = {} AND chapter_real = 1 ORDER BY chapter_no ASC;\".format(bookId))\r\n\t\t\t\tif numRows > 0:\r\n\t\t\t\t\tbegin = 0\r\n\t\t\t\t\tolder = \"\"\r\n\t\t\t\t\tfor i, volume in enumerate(cursor.fetchall()):\r\n\t\t\t\t\t\tnumRows = cursor.execute(\"SELECT chapter_no, title, content, chapter_real FROM book_data WHERE book_id = {} AND (chapter_no BETWEEN {} AND {}) ORDER BY IF(ISNULL(chapter_real), chapter_no, chapter_real) ASC;\".format(bookId, begin, int(volume[0]) - 1))\r\n\t\t\t\t\t\t# print(\"SELECT chapter_no, title, content, chapter_real FROM book_data WHERE book_id = {} AND (chapter_no BETWEEN {} AND {}) ORDER BY IF(ISNULL(chapter_real), chapter_no, chapter_real) ASC;\".format(bookId, begin, int(volume[0]) - 1))\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t# print(\"第{}卷:数量{},第{}章:{}\".format(i, numRows, volume[1], volume[2]))\r\n\t\t\t\t\t\tif difflib.SequenceMatcher(None, older, volume[2]).ratio() > 0.8:\r\n\t\t\t\t\t\t\tprint(\"警告:卷识别冲突:{}.{}≈{}.{}\".format(begin, older, volume[0], volume[2]))\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tbegin = volume[0]\r\n\t\t\t\t\t\tolder = volume[2]\r\n\t\t\t\t\t\tif numRows > 0:\r\n\t\t\t\t\t\t\tchapters += cursor.fetchall()\r\n\t\t\t\t\t#end for\r\n\t\t\t\t\t\r\n\t\t\t\t\tnumRows = cursor.execute(\"SELECT chapter_no, title, content, chapter_real FROM book_data WHERE book_id = {} AND chapter_no >= {} ORDER BY IF(ISNULL(chapter_real), chapter_no, chapter_real) ASC;\".format(bookId, begin))\r\n\t\t\t\t\t# print(\"SELECT chapter_no, title, content, chapter_real FROM book_data WHERE book_id = {} AND chapter_no >= {} ORDER BY IF(ISNULL(chapter_real), chapter_no, chapter_real) ASC;\".format(bookId, begin))\r\n\t\t\t\t\tif numRows > 0:\r\n\t\t\t\t\t\tchapters += cursor.fetchall()\r\n\t\t\t\t#end if\r\n\t\t\t#end if\r\n\t\t#end - if\r\n\t\t\r\n\t\tif len(chapters) <= 0:\r\n\t\t\treturn False\r\n\t\t\r\n\t\tfor chapter in chapters:\r\n\t\t\tchapterName = str(chapter[1])\r\n\t\t\t\r\n\t\t\t# chapter_real 一般不应该是 0 的,所以没毛病\r\n\t\t\tchapterId = chapter[3] or chapter[0]\r\n\t\t\t\r\n\t\t\t# 给没有章节号的章节添加章节号,以免 calibre/easypub 检测不到章节\r\n\t\t\tchapterName = \"第\" + str(chapterId) + \"章 \" + chapterName\r\n\t\t\t\r\n\t\t\tcontent += chapterName + \"\\n\\n\" + \\\r\n\t\t\t\tself.correct_content(str(chapter[2])).replace(\"      \", \"  \").replace(\" \", \"  \").replace(\r\n\t\t\t\t\"  \", \"  \").replace(\"    \", \"  \").replace(r\"\\n\", \"\\n\") + \"\\n\\n\"\r\n\t\t#end - for\r\n\t\t\r\n\t\tfile = open(fileName, \"w\", encoding=\"utf-8\")\r\n\t\tfile.seek(0)\r\n\t\tfile.write(content)\r\n\t\tfile.close()\r\n\t\treturn True\r\n\t#end - generate_txt_file\r\n\t\r\n\tdef check_complete(self, bookId):\r\n\t\tcursor = self.database.cursor()\r\n\t\tnumRows = cursor.execute(\"SELECT title, content FROM book_data WHERE id = ( SELECT last_chapter FROM book_info WHERE id = {} ) AND complete = 0;\".format(bookId))\r\n\t\tif numRows:\r\n\t\t\trow = cursor.fetchone()\r\n\t\t\tfor kw in self.KEYWORD_FINISHED:\r\n\t\t\t\t# if str(row[0]).find(kw) > -1:\r\n\t\t\t\tif re.search(kw, str(row[0])):\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tcursor.execute(\"UPDATE book_info SET complete = 1 WHERE id = {} AND complete = 0;\".format(bookId))\r\n\t\t\t\t\t\t# print(\"标记为完本:\" + str(bookId))\r\n\t\t\t\t\t\tself.database.commit()\r\n\t\t\t\t\texcept Exception as e:\r\n\t\t\t\t\t\tif str(e).find(\"gone away\") > 0 or str(e).find(\"Lost Connection\") > 0:\r\n\t\t\t\t\t\t\tself.connect_db()\r\n\t\t\t\t\tbreak\r\n\t\t#end - if\r\n\t#end - check_complete\r\n\t\r\n\tdef is_real_chapter(self, bookId):\r\n\t\tcursor = self.database.cursor()\r\n\t\t# numRows = cursor.execute(\"SELECT CAST(COUNT(DISTINCT chapter_no) AS DECIMAL) / COUNT(DISTINCT chapter_real) <= {} FROM book_data WHERE book_id = {};\".format(\r\n\t\t\t# settings.MAX_SAME_RATE, bookId\r\n\t\t# ))\r\n\t\t# return (numRows > 0 and cursor.fetchone()[0])\r\n\t\t\r\n\t\tnumRows = cursor.execute(\"SELECT check_real_mode('{}');\".format(bookId))\r\n\t\tif numRows > 0 and cursor.fetchone()[0]:\r\n\t\t\treturn True\r\n\t\treturn False\r\n\t#end - is_real_chapter\r\n\t\r\n\t# 删除标题非重要部分\r\n\tdef gen_title_expr(self, title, encode=True):\r\n\t\tresults = [ title ]\r\n\t\t\r\n\t\twhile True:\r\n\t\t\ttitle = self.clear_number(title)\r\n\t\t\tif title and title not in results:\r\n\t\t\t\tresults.append(title)\r\n\t\t\telse:\r\n\t\t\t\tbreak\r\n\t\t#end while\r\n\t\t\r\n\t\tif encode:\r\n\t\t\treturn json.dumps(results, ensure_ascii=False)\r\n\t\t\r\n\t\treturn results\r\n\t#end - gen_title_expr\r\n\t\r\n\ttimeout_sc = 1\r\n\t\r\n\tdef save_chapter(self, bookId, title, context, no, real, url, complete, words, same_title=0):\r\n\t\tcursor = self.database.cursor()\r\n\t\tnumRows = 0\r\n\t\t# self.logger.debug(\"CALL save_chapter\")\r\n\t\t\r\n\t\ttry:\r\n\t\t\tnumRows = cursor.execute(\"CALL save_chapter('{}', '{}', '{}', '{}', {}, '{}', '{}', '{}', '{}');\".format(\r\n\t\t\t\tbookId, self.escape(title), self.escape(context),\r\n\t\t\t\tno, real or \"NULL\", self.escape(url), complete, words, same_title\r\n\t\t\t))\r\n\t\texcept Exception as e:\r\n\t\t\tself.logger.error(\"错误:执行语句失败:{}\".format(\"CALL save_chapter('{}', '{}', '{}', '{}', {}, '{}', '{}', '{}', '{}');\".format(\r\n\t\t\t\tbookId, self.escape(title), self.escape(context),\r\n\t\t\t\tno, real or \"NULL\", self.escape(url), complete, words, same_title\r\n\t\t\t)), exc_info = True)\r\n\t\t\ttime.sleep(self.timeout_sc)\r\n\t\t\tself.timeout_sc *= random.randint(1, 3)\r\n\t\t\t\r\n\t\t\tif str(e).find(\"gone away\") > 0 or str(e).find(\"Lost Connection\") > 0:\r\n\t\t\t\tself.connect_db()\r\n\t\t\t\r\n\t\t\tif str(e).find(\"Duplicate entry\") == -1:\r\n\t\t\t\treturn self.save_chapter(bookId, title, context, no, real, url, complete, words, same_title)\r\n\t\t#end - try\r\n\t\t\r\n\t\tself.timeout_sc = 1\r\n\t\t# self.logger.debug(\"CALL save_chapter finished\")\r\n\t\t\r\n\t\tif numRows > 0:\r\n\t\t\trow = cursor.fetchone()\r\n\t\t\treturn int(row[0]), int(row[1]), str(row[2])\r\n\t\t\r\n\t\treturn None, None, None\r\n\t#end - save_chapter\r\n\t\r\n\t# 获取上次最后章节序和上次章节数\r\n\tdef get_book_last_info(self, bookId, item, chapter_name):\r\n\t\tlastChapter = \"\"\r\n\t\tlastChapterNo = -1\r\n\t\tbookInfo = [ 0, 0 ]\r\n\t\tcursor = self.database.cursor()\r\n\t\t\r\n\t\t# 获取状态信息\r\n\t\tnumRows = cursor.execute(\"SELECT num_chapter, last_chapter FROM book_info WHERE id = '{}';\".format(bookId))\r\n\t\tif numRows > 0:\r\n\t\t\tbookInfo = cursor.fetchone()\r\n\t\t\t\r\n\t\t\t# 获取章节列表匹配顺序\r\n\t\t\tif bookInfo[1] and int(bookInfo[1]) > 0:\r\n\t\t\t\tnumRows = cursor.execute(\"SELECT title, chapter_no FROM book_data WHERE id = '{}';\".format(bookInfo[1]))\r\n\t\t\t\tif numRows > 0:\r\n\t\t\t\t\trow = cursor.fetchone()\r\n\t\t\t\t\tlastChapter = str(row[0]).strip()\r\n\t\t\t\t\t\r\n\t\t\t\t\tif row[1]:\r\n\t\t\t\t\t\tlastChapterNo = int(row[1])\r\n\t\t\t\t\t\r\n\t\t\t\t\tif lastChapterNo < len(chapter_name) and lastChapterNo >= 0:\r\n\t\t\t\t\t\tif chapter_name[lastChapterNo].strip() != lastChapter:\r\n\t\t\t\t\t\t\tlastChapterNo = 0\r\n\t\t\t\t\t\t\tself.logger.warning(\"警告:检查 {} 最新章节序号或章节名字不匹配 no.{} 【{}】【{}】\".format(bookId, lastChapterNo, lastChapter, chapter_name[lastChapterNo]))\r\n\t\t\t\t\t#end - if\r\n\t\t\t\t\t\r\n\t\t\t\t\t# 寻找上次位置\r\n\t\t\t\t\tif lastChapterNo <= 0 and lastChapter and chapter_name:\r\n\t\t\t\t\t\t# 逆序搜索效果更好\r\n\t\t\t\t\t\tfor i in reversed(range(len(chapter_name))):\r\n\t\t\t\t\t\t\t# list.index 不支持逆序\r\n\t\t\t\t\t\t\tif chapter_name[i].strip() == lastChapter:\r\n\t\t\t\t\t\t\t\tlastChapterNo = i\r\n\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t\t#end - if\r\n\t\t\t\t\t\t#end - for\r\n\t\t\t\t\t#end - if\r\n\t\t\t\t\t\r\n\t\t\t\t\tif lastChapterNo <= 0:\r\n\t\t\t\t\t\tprint(\"警告:找不到上次的位置:\" + lastChapter)\r\n\t\t\t\t#end - if\r\n\t\t\t#end - if\r\n\t\t#end - if\r\n\t\t\r\n\t\treturn int(bookInfo[0]), lastChapterNo, lastChapter\r\n\t#end - get_book_info\r\n\t\r\n\t# 清理和修复内容\r\n\tdef correct_content(self, context):\r\n\t\tfor rule in self.REPLACE_REMOVE:\r\n\t\t\ttry:\r\n\t\t\t\tcontext = re.sub(rule, \"\", context)\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tself.logger.error(\"替换移除错误:{}\".format(rule), exc_info=True)\r\n\t\tfor rule, replacor in self.REPLACE_REGEX.items():\r\n\t\t\ttry:\r\n\t\t\t\tcontext = re.sub(rule, replacor, context)\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tself.logger.error(\"替换内容错误:{}\".format(rule), exc_info=True)\r\n\t\tfor rule, replacor in self.REPLACE_ONCE.items():\r\n\t\t\ttry:\r\n\t\t\t\tcontext = re.sub(rule, replacor, context)\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tself.logger.error(\"替换单字错误:{}\".format(rule), exc_info=True)\r\n\t\tfor rule, replacor in self.REPLACE_ONCE_FIX.items():\r\n\t\t\ttry:\r\n\t\t\t\tcontext = re.sub(rule, replacor, context)\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tself.logger.error(\"替换修复错误:{}\".format(rule), exc_info=True)\r\n\t\t#end - for\r\n\t\t\r\n\t\treturn self.content_format(context)\r\n\t#end - correct_content\r\n\t\r\n\t# 计算中文字数\r\n\tdef calc_num_words(self, text):\r\n\t\ttotal = 0\r\n\t\tfor c in text:\r\n\t\t\tif self.is_chinese_word(c):\r\n\t\t\t\ttotal += 1\r\n\t\treturn total\r\n\t#end - calc_num_words\r\n\t\r\n\tdef is_chinese_word(self, c):\r\n\t\treturn ( c not in string.ascii_letters and not c.isdigit() and not c.isspace() and c.isalpha() )\r\n\t#end - is_chinese_word\r\n\t\r\n\tdef clear_number(self, chapterName):\r\n\t\t# 删除没用的「正文」字样\r\n\t\tif chapterName.startswith(\"正文卷\"):\r\n\t\t\tchapterName = chapterName.replace(\"正文卷\", \"\").strip(\"\\r\\n\\t\\xA0\\u3000   \").lstrip(\":、,。.;:—-._·\\r\\n\\t\\xA0\\u3000   ??\")\r\n\t\tif chapterName.startswith(\"正文\"):\r\n\t\t\tchapterName = chapterName.replace(\"正文\", \"\").strip(\"\\r\\n\\t\\xA0\\u3000   \").lstrip(\":、,。.;:—-._·\\r\\n\\t\\xA0\\u3000   ??\")\r\n\t\tif chapterName.startswith(\"章节目录\"):\r\n\t\t\tchapterName = chapterName.replace(\"章节目录\", \"\").strip(\"\\r\\n\\t\\xA0\\u3000   \").lstrip(\":、,。.;:—-._·\\r\\n\\t\\xA0\\u3000   ??\")\r\n\t\t\r\n\t\t# 尝试清理后边的括号\r\n\t\tchapterName = self.unified_symbol(chapterName)\r\n\t\toriginal = chapterName[:]\r\n\t\twrepl = re.search(r\"([^()]*?(第?[一二三四五六七八九十0123456789]*(?:章|更|/[一二三四五六七八九十0123456789]*))[^()]*?)$\", chapterName)\r\n\t\tif wrepl:\r\n\t\t\tchapterName = chapterName.replace(wrepl.group(1), \"\").rstrip(\"\\r\\n\\t\\xA0\\u3000   \")\r\n\t\twdrepl = re.search(r\"([^()]*?((?:[两仨二三四五六七八九十0-9]0*|[两仨二三四五六七八九十0-9][千万kKwW]|[千万])?(?:大章|章|字|字大章|字章))[^()]*?)$\", chapterName)\r\n\t\tif wdrepl:\r\n\t\t\tchapterName = chapterName.replace(wdrepl.group(1), \"\").rstrip(\"\\r\\n\\t\\xA0\\u3000   \")\r\n\t\tqrepl = re.search(r\"([^()]*?((?:新书|月底|月初|月末)?求(?:保底)?(?:推荐|推荐票|订阅|收藏|支持|投资|票票|推荐票票|票|首订|月票)+(?:[\\s\\xA0\\u3000,、~~!和]*求?(?:保底)?(?:推荐|推荐票|订阅|收藏|支持|投资|票票|推荐票票|票|月票|首订)+)*[啊啦呐!~~]*)[^()]*?)$\", chapterName)\r\n\t\tif qrepl:\r\n\t\t\tchapterName = chapterName.replace(qrepl.group(1), \"\").rstrip(\"\\r\\n\\t\\xA0\\u3000   \")\r\n\t\tmrepl = re.search(r\"([^()]*?([两仨二三四五六七八九十0-9]章?合[一1](?:大章|大)?[!~~]*)[^()]*?)$\", chapterName)\r\n\t\tif mrepl:\r\n\t\t\tchapterName = chapterName.replace(mrepl.group(1), \"\").rstrip(\"\\r\\n\\t\\xA0\\u3000   \")\r\n\t\tjgrepl = re.search(r\"([^()]*?(补?[一两仨二三四五六七八九十0-9]*(?:打赏|万赏|千赏|月票|万票|千票|推荐票|推荐|万推|千推|收藏|首订|万收|千收|刀片|营养液|均定)加更[!~~]*)[^()]*?)$\", chapterName)\r\n\t\tif jgrepl:\r\n\t\t\tchapterName = chapterName.replace(jgrepl.group(1), \"\").rstrip(\"\\r\\n\\t\\xA0\\u3000   \")\r\n\t\tsrepl = re.search(r\"([\\r\\n\\t\\xA0\\u3000   修改,、~~!]*)$\", chapterName)\r\n\t\tif srepl:\r\n\t\t\tchapterName = chapterName.replace(srepl.group(0), \"\")\r\n\t\telse:\r\n\t\t\tchapterName = original\r\n\t\t\r\n\t\t# 纯数字开头\r\n\t\t# redg = re.search(r\"^\\d+[章回部节集卷册幕话篇\\xA0\\u3000\\s:、,。.;:—\\-\\._·,]?\", chapterName)\r\n\t\t# if redg:\r\n\t\t\t# return chapterName.replace(redg.group(0), \"\").strip(\"\\r\\n\\t\\xA0\\u3000   \").lstrip(\":、,。.;:—._\\r\\n\\t\\xA0\\u3000   \")\r\n\t\t\r\n\t\t# 正常的章节开头\r\n\t\thaveVolPrefix = (\r\n\t\t\tre.search(\"第[\\r\\n\\t\\xA0\\u3000   ]*[0123456789一二三四五六七八九十零〇Oo百千两万亿壹贰叁肆伍陆柒捌玖貮拾佰仟萬億]+[\\r\\n\\t\\xA0\\u3000   ]*卷.*第[\\r\\n\\t\\xA0\\u3000   ]*[0123456789一二三四五六七八九十零〇Oo百千两万亿壹贰叁肆伍陆柒捌玖貮拾佰仟萬億]+[\\r\\n\\t\\xA0\\u3000   ]*[章回部节集卷册幕话篇]\", chapterName) or\r\n\t\t\tre.search(\"卷[\\r\\n\\t\\xA0\\u3000   ]*[0123456789一二三四五六七八九十零〇Oo百千两万亿壹贰叁肆伍陆柒捌玖貮拾佰仟萬億]+[\\r\\n\\t\\xA0\\u3000   ]*.*第[\\r\\n\\t\\xA0\\u3000   ]*[0123456789一二三四五六七八九十零〇Oo百千两万亿壹贰叁肆伍陆柒捌玖貮拾佰仟萬億]+[\\r\\n\\t\\xA0\\u3000   ]*[章回部节集卷册幕话篇]\", chapterName)\r\n\t\t) != None\r\n\t\t\r\n\t\trec = None\r\n\t\tif haveVolPrefix:\r\n\t\t\trec = re.match(r\"^([第卷]?(?:[0123456789一二三四五六七八九十零〇Oo百千两万亿壹贰叁肆伍陆柒捌玖貮拾佰仟萬億]+)[章回部节集卷册幕话篇\\xA0\\u3000\\s:、,。.;:—\\-\\._·, ;\\))][^(\\(【\\[\\{]*?(?:[第卷]?(?:[0123456789一二��四五六七八九十零〇Oo百千两万亿壹贰叁肆伍陆柒捌玖貮拾佰仟萬億]+)[章回部节集卷册幕话篇\\xA0\\u3000\\s:、,。.;:—\\-\\._·, ;])).*$\", chapterName)\r\n\t\tif not rec:\r\n\t\t\trec = re.match(r\"^([第卷]?(?:[0123456789一二三四五六七八九十零〇Oo百千两万亿壹贰叁肆伍陆柒捌玖貮拾佰仟萬億]+)[章回部节集卷册幕话篇\\xA0\\u3000\\s:、,。.;:—\\-\\._·, ;\\))]).*$\", chapterName)\r\n\t\tif rec:\r\n\t\t\treturn chapterName.replace(rec.group(1), \"\").strip(\"\\r\\n\\t\\xA0\\u3000   \").lstrip(\":、,。.;:—-._\\r\\n\\t\\xA0\\u3000   ??·\")\r\n\t\t\r\n\t\t# 不太正常的章节开头,有几率匹配到错误的内容\r\n\t\trecs = None\r\n\t\tif haveVolPrefix:\r\n\t\t\trecs = re.match(r\"^([第卷](?:[0123456789一二三四五六七八九十零〇Oo百千两万亿壹贰叁肆伍陆柒捌玖貮拾佰仟萬億]+)[章回部节集卷册幕话篇\\xA0\\u3000\\s:、,。.;:—\\-\\._·, ;\\))]?[^(\\(【\\[\\{]*?(?:[第卷](?:[0123456789一二三四五六七八九十零〇Oo百千两万亿壹贰叁肆伍陆柒捌玖貮拾佰仟萬億]+)[章回部节集卷册幕话篇\\xA0\\u3000\\s:、,。.;:—\\-\\._·, ;]?)).*$\", chapterName)\r\n\t\tif not recs:\r\n\t\t\trecs = re.match(r\"^([第卷](?:[0123456789一二三四五六七八九十零〇Oo百千两万亿壹贰叁肆伍陆柒捌玖貮拾佰仟萬億]+)[章回部节集卷册幕话篇\\xA0\\u3000\\s:、,。.;:—\\-\\._·, ;\\))]?).*$\", chapterName)\r\n\t\tif recs:\r\n\t\t\ttry:\r\n\t\t\t\tword = chapterName[chapterName.find(recs.group(1)) + len(recs.group(1))]\r\n\t\t\t\tpy = xpinyin.Pinyin().get_pinyin(word, \"\")\r\n\t\t\t\tif py in self.BAD_TRIM:\r\n\t\t\t\t\treturn chapterName.replace(recs.group(1) + word, \"\").strip(\"\\r\\n\\t\\xA0\\u3000   \").lstrip(\":、,。.;:—-._·\\r\\n\\t\\xA0\\u3000   ??·\")\r\n\t\t\texcept Exception as e:\r\n\t\t\t\t# self.logger.error(\"错误:检查章节名的错别字失败:{}丨{}丨{}\".format(chapterName, recs.group(1), chapterName.find(recs.group(1))), exc_info = True)\r\n\t\t\t\tpass\r\n\t\t\t# return chapterName.replace(recs.group(1), \"\").strip(\"\\r\\n\\t\\xA0\\u3000   \").lstrip(\":、,。.;:—_\\r\\n\\t\\xA0\\u3000   \")\r\n\t\t#end - if\r\n\t\t\r\n\t\t# 只有数字那种,不好用,错误率太高了\r\n\t\t# redgc = re.search(r\"^[0123456789一二三四五六七八九十零〇百千两万亿壹贰叁肆伍陆柒捌玖貮拾佰仟萬億]+[\\xA0\\u3000\\s:、,。.;:—\\-\\._·,]\", chapterName)\r\n\t\t# if redgc:\r\n\t\t\t# return chapterName.replace(redgc.group(0), \"\").strip(\"\\r\\n\\t\\xA0\\u3000   \").lstrip(\":、,。.;:—-._·\\r\\n\\t\\xA0\\u3000   \")\r\n\t\t\r\n\t\treturn chapterName\r\n\t#end - clear_number\r\n\t\r\n\tdef clear_space(self, chapterName):\r\n\t\treturn chapterName.replace(\"\\xA0\", \"\").replace(\"\\u3000\", \"\").replace(\" \", \"\")\r\n\t#end - clear_space\r\n\t\r\n\tdef fix_chapter_number(self, chapterName):\r\n\t\tif not chapterName:\r\n\t\t\treturn None\r\n\t\t\r\n\t\tstart = 0\r\n\t\tpy = xpinyin.Pinyin()\r\n\t\tif py.get_pinyin(chapterName[0], \"\") == \"di\" or chapterName[0] == \"卷\":\r\n\t\t\tstart += 1\r\n\t\t#end - if\r\n\t\t\r\n\t\t# 由于 一 和 亿 拼音相同,所以不考虑\r\n\t\tNUMBERS = \"0123456789零一二三四五六七八九十百千万\"\r\n\t\t\r\n\t\tlength = len(chapterName)\r\n\t\tfor i in range(start, length):\r\n\t\t\tif chapterName[i] not in NUMBERS:\r\n\t\t\t\tp = py.get_pinyin(chapterName[i], \"\")\r\n\t\t\t\tif p in self.CN_NUMBERS:\r\n\t\t\t\t\tchapterName = chapterName[:i] + self.CN_NUMBERS[p] + chapterName[i + 1:]\r\n\t\t\t\telse:\r\n\t\t\t\t\tstart = i\r\n\t\t\t\t\tbreak\r\n\t\t#end - for\r\n\t\t\r\n\t\tif start > 1 and start < len(chapterName):\r\n\t\t\tp = py.get_pinyin(chapterName[start], \"\")\r\n\t\t\tif p in self.BAD_TRIM:\r\n\t\t\t\tchapterName = chapterName[:start] + self.BAD_TRIM[p] + chapterName[start + 1:]\r\n\t\t#end - if\r\n\t\t\r\n\t\treturn chapterName\r\n\t#end - detect_number\r\n\t\r\n\ttimeout_gmc = 1\r\n\t\r\n\t# 获取不存在或未完成的章节\r\n\tdef get_missing_chapter(self, bookId, chapter_name, chapter_list, chapter_real, same_title=0):\r\n\t\tcursor = self.database.cursor()\r\n\t\tnumRows = 0\r\n\t\t\r\n\t\tchapter_list_clone = chapter_list.copy()\r\n\t\tif hasattr(self, \"dont_check_urls\") and self.dont_check_urls:\r\n\t\t\tchapter_list_clone = [ \"\" for url in chapter_list_clone ]\r\n\t\t\r\n\t\t# self.logger.debug(\"CALL fetch_missing_chapter({})\".format(len(chapter_list)))\r\n\t\ttry:\r\n\t\t\tnumRows = cursor.execute(\"CALL fetch_missing_chapter('{}', '{}', '{}', '{}', '{}', '{}');\".format(\r\n\t\t\t\tbookId,\r\n\t\t\t\tself.escape(json.dumps(chapter_name, ensure_ascii=False)),\r\n\t\t\t\tself.escape(json.dumps(chapter_list_clone, ensure_ascii=False)),\r\n\t\t\t\tself.escape(json.dumps(chapter_real, ensure_ascii=False)),\r\n\t\t\t\tself.escape(json.dumps(same_title, ensure_ascii=False)),\r\n\t\t\t\tself.check_complete_state\r\n\t\t\t))\r\n\t\texcept Exception as e:\r\n\t\t\tself.logger.error(\"CALL fetch_missing_chapter('{}', '{}', '{}', '{}', '{}', '{}');\".format(\r\n\t\t\t\tbookId,\r\n\t\t\t\tself.escape(json.dumps(chapter_name, ensure_ascii=False)),\r\n\t\t\t\tself.escape(json.dumps(chapter_list_clone, ensure_ascii=False)),\r\n\t\t\t\tself.escape(json.dumps(chapter_real, ensure_ascii=False)),\r\n\t\t\t\tself.escape(json.dumps(same_title, ensure_ascii=False)),\r\n\t\t\t\tself.check_complete_state\r\n\t\t\t), exc_info = True)\r\n\t\t\ttime.sleep(self.timeout_gmc)\r\n\t\t\tself.timeout_gmc *= random.randint(1, 3)\r\n\t\t\t\r\n\t\t\tif str(e).find(\"gone away\") > 0 or str(e).find(\"Lost Connection\") > 0:\r\n\t\t\t\tself.connect_db()\r\n\t\t\t\r\n\t\t\treturn self.get_missing_chapter(bookId, chapter_name, chapter_list, same_title)\r\n\t\t#end - try\r\n\t\t\r\n\t\tself.timeout_gmc = 1\r\n\t\t# self.logger.debug(\"CALL fetch_missing_chapter({}) finished\".format(len(chapter_list)))\r\n\t\t\r\n\t\tif numRows > 0:\r\n\t\t\trow = cursor.fetchone()\r\n\t\t\tresults = ( demjson.decode(str(row[0])), demjson.decode(str(row[1])) )\r\n\t\t\t\r\n\t\t\tif results[0] and results[1]:\r\n\t\t\t\tself.logger.info(str(row[0]))\r\n\t\t\t#end - if\r\n\t\t\t\r\n\t\t\treturn results\r\n\t\t#end - if\r\n\t\t\r\n\t\treturn [], []\r\n\t#end - get_missing_chapter\r\n\t\r\n\t# 将中文章节号转换为数字章节号\r\n\tdef detect_chapter(self, chapterName):\r\n\t\tif chapterName == None:\r\n\t\t\treturn None\r\n\t\t\r\n\t\t# 第x卷 * 第y章 *\r\n\t\tchapterInfo = re.match(\r\n\t\t\tr\"^(?:正文[\\xA0\\u3000\\s]|正文卷[\\xA0\\u3000\\s]|章节目录[\\xA0\\u3000\\s])?\"\\\r\n\t\t\tr\"(?:\"\\\r\n\t\t\t\tr\"第[\\xA0\\u3000\\s(【\\(\\[\\{]*(?:[0123456789零一二三四五六七八九十百千万]+)[\\xA0\\u3000\\s)】\\)\\]\\}]*[部集卷册\\xA0\\u3000\\s:、,。.;:—\\-\\._·, ;\\))]\"\\\r\n\t\t\tr\"|\"\\\r\n\t\t\t\tr\"卷[\\xA0\\u3000\\s(【\\(\\[\\{]*(?:[0123456789零一二三四五六七八九十百千万]+)[\\xA0\\u3000\\s)】\\)\\]\\}]*[部集卷册\\xA0\\u3000\\s:、,。.;:—\\-\\._·, ;\\))]?\"\\\r\n\t\t\tr\").*?\"\\\r\n\t\t\tr\"[第]?[\\xA0\\u3000\\s(【\\(\\[\\{]*([0123456789零一二三四五六七八九十百千万]+)[\\xA0\\u3000\\s)】\\)\\]\\}]*[章回部节集卷册幕话篇\\xA0\\u3000\\s:、,。.;:—\\-\\._·, ;\\))]?\"\\\r\n\t\t\tr\".*$\",\r\n\t\t\tself.fix_chapter_number(chapterName)\r\n\t\t)\r\n\t\t\r\n\t\t# 第x章 *\r\n\t\tif not chapterInfo:\r\n\t\t\tchapterInfo = re.match(\r\n\t\t\t\tr\"^(?:正文[\\xA0\\u3000\\s]|正文卷[\\xA0\\u3000\\s]|章节目录[\\xA0\\u3000\\s])?\"\\\r\n\t\t\t\tr\"[第卷]?[\\xA0\\u3000\\s(【\\(\\[\\{]*([0123456789零一二三四五六七八九十百千万]+)[\\xA0\\u3000\\s)】\\)\\]\\}]*[章回部节集卷册幕话篇\\xA0\\u3000\\s:、,。.;:—\\-\\._·, ;\\))]?\"\\\r\n\t\t\t\tr\".*$\",\r\n\t\t\t\tself.fix_chapter_number(chapterName)\r\n\t\t\t)\r\n\t\t#end if\r\n\t\t\r\n\t\t# * 第x章 *\r\n\t\tif not chapterInfo:\r\n\t\t\tchapterInfo = re.match(\r\n\t\t\t\tr\"^(?:正文[\\xA0\\u3000\\s]|正文卷[\\xA0\\u3000\\s]|章节目录[\\xA0\\u3000\\s])?\"\\\r\n\t\t\t\tr\".*?\"\\\r\n\t\t\t\tr\"[第][\\xA0\\u3000\\s(【\\(\\[\\{]*([0123456789零一二三四五六七八九十百千万]+)[\\xA0\\u3000\\s)】\\)\\]\\}]*[章回部节集卷册幕话篇\\xA0\\u3000\\s:、,。.;:—\\-\\._·, ;\\))]\"\\\r\n\t\t\t\tr\".*$\",\r\n\t\t\t\tself.fix_chapter_number(chapterName)\r\n\t\t\t)\r\n\t\t#end if\r\n\t\t\r\n\t\t# 【x】 *\r\n\t\tif not chapterInfo:\r\n\t\t\tchapterInfo = re.match(\r\n\t\t\t\tr\"^(?:正文[\\xA0\\u3000\\s]|正文卷[\\xA0\\u3000\\s]|章节目录[\\xA0\\u3000\\s])?\"\\\r\n\t\t\t\tr\"[\\(\\[\\{<([{<【〖〈〔「『﹙﹛﹝‹«]([0123456789零一二三四五六七八九十百千万]+)[\\)\\]\\}>)]}>】〗〉〕」』﹚﹜﹞›»]\"\\\r\n\t\t\t\tr\".*$\",\r\n\t\t\t\tself.fix_chapter_number(chapterName)\r\n\t\t\t)\r\n\t\t#end if\r\n\t\t\r\n\t\tif not chapterInfo:\r\n\t\t\treturn None\r\n\t\t\r\n\t\tif chapterInfo.group(1).isdigit():\r\n\t\t\treturn int(chapterInfo.group(1))\r\n\t\t\r\n\t\ttry:\r\n\t\t\treturn cn2an.cn2an(chapterInfo.group(1), \"smart\")\r\n\t\texcept:\r\n\t\t\tpass\r\n\t\t\r\n\t\t\"\"\"\r\n\t\tCN_NUM = {\r\n\t\t\t\"〇\" : 0, \"一\" : 1, \"二\" : 2, \"三\" : 3, \"四\" : 4, \"五\" : 5, \"六\" : 6, \"七\" : 7, \"八\" : 8, \"九\" : 9, \"零\" : 0,\r\n\t\t\t\"壹\" : 1, \"贰\" : 2, \"叁\" : 3, \"肆\" : 4, \"伍\" : 5, \"陆\" : 6, \"柒\" : 7, \"捌\" : 8, \"玖\" : 9, \"貮\" : 2, \"两\" : 2,\r\n\t\t\t\"0\" : 0, \"1\" : 1, \"2\" : 2, \"3\" : 3, \"4\" : 4, \"5\" : 5, \"6\" : 6, \"7\" : 7, \"8\" : 8, \"9\" : 9,\r\n\t\t}\r\n\t\tCN_UNIT = {\r\n\t\t\t\"十\" : 10, \"拾\" : 10, \"百\" : 100, \"佰\" : 100, \"千\" : 1000, \"仟\" : 1000, \"万\" : 10000, \"萬\" : 10000,\r\n\t\t\t\"亿\" : 100000000, \"億\" : 100000000, \"兆\" : 1000000000000,\r\n\t\t}\r\n\t\t\r\n\t\tunit = 0 # current\r\n\t\tldig = [] # digest\r\n\t\tfor cndig in reversed(chapterInfo.group(1)):\r\n\t\t\tif cndig in CN_UNIT:\r\n\t\t\t\tunit = CN_UNIT.get(cndig)\r\n\t\t\t\tif unit == 10000 or unit == 100000000:\r\n\t\t\t\t\tldig.append(unit)\r\n\t\t\t\t\tunit = 1\r\n\t\t\telse:\r\n\t\t\t\tdig = CN_NUM.get(cndig)\r\n\t\t\t\tif dig == None:\r\n\t\t\t\t\traise Exception(\"未知数字字符:\" + cndig)\r\n\t\t\t\t\r\n\t\t\t\tif unit:\r\n\t\t\t\t\tdig *= unit\r\n\t\t\t\t\tunit = 0\r\n\t\t\t\tldig.append(dig)\r\n\t\t\r\n\t\tif unit == 10:\r\n\t\t\tldig.append(10)\r\n\t\t\r\n\t\tval, tmp = 0, 0\r\n\t\tfor x in reversed(ldig):\r\n\t\t\tif x == 10000 or x == 100000000:\r\n\t\t\t\tval += tmp * x\r\n\t\t\t\ttmp = 0\r\n\t\t\telif x != None:\r\n\t\t\t\ttmp += x\r\n\t\tval += tmp\r\n\t\treturn val\r\n\t\t\"\"\"\r\n\t\t\r\n\t\treturn None\r\n\t#end - detect_chapter\r\n\t\r\n\t# 删除多余换行\r\n\tdef content_format(self, content):\r\n\t\tif isinstance(content, str):\r\n\t\t\tcontent = content.split(\"\\n\")\r\n\t\t\r\n\t\tnl = False\r\n\t\tresults = \"\"\r\n\t\tfor line in content:\r\n\t\t\tline = line.rstrip(\"\\r\\n\\t\\xA0\\u3000   \")\r\n\t\t\tif not line:\r\n\t\t\t\t# results += \"\\n\"\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\tif len(line) >= 40 or re.search(r\"^第\\d+章\", line) or re.search(r\"。\\d{1,2}$\", line):\r\n\t\t\t\tresults += line + \"\\n\"\r\n\t\t\t\tcontinue\r\n\t\t\t#end if\r\n\t\t\t\r\n\t\t\tchar = line[-1]\r\n\t\t\tnl = ((not self.is_chinese_word(char) and char != \",\") or char in self.CN_ENDLINE)\r\n\t\t\t# nl = not((self.is_chinese_word(char) or char == \",\") and char not in self.CN_ENDLINE)\r\n\t\t\t\r\n\t\t\tif nl:\r\n\t\t\t\tresults += line + \"\\n\\n\"\r\n\t\t\telif results.endswith(\"\\n\"):\r\n\t\t\t\tresults += line\r\n\t\t\telse:\r\n\t\t\t\tresults += line.lstrip(\"\\r\\n\\t\\xA0\\u3000   \")\r\n\t\t#end for\r\n\t\t\r\n\t\t# 将单个换行替换为多个换行\r\n\t\tresults = re.sub(r\"([^\\r\\n])\\n  \", r\"\\1\\n\\n  \", results)\r\n\t\t\r\n\t\t# 修复中间的空格\r\n\t\tresults = re.sub(r\"([^\\r\\n])  \", r\"\\1 \", results)\r\n\t\t\r\n\t\t# 修复换行不正常\r\n\t\tresults = results.replace(r\"\\n\", \"\\n\")\r\n\t\t\r\n\t\treturn results\r\n\t#end - content_format\r\n\t\r\n\tdef unified_symbol(self, content):\r\n\t\tSYMBOL = {\r\n\t\t\t\"/[\" : \"[\",\r\n\t\t\t\"/]\" : \"[\",\r\n\t\t\t\"/_\" : \"_\",\r\n\t\t\t\"/(\" : \"(\",\r\n\t\t\t\"/)\" : \")\",\r\n\t\t\t\"//\" : \"/\",\r\n\t\t\t\",\" : \",\",\r\n\t\t\t# \".\" : \".\",\r\n\t\t\t\" (\" : \"(\",\r\n\t\t\t\" (\" : \"(\",\r\n\t\t\t\"(\" : \"(\",\r\n\t\t\t\")\" : \")\",\r\n\t\t\t# \"[\" : \"【\",\r\n\t\t\t# \"]\" : \"】\",\r\n\t\t\t\":\" : \":\",\r\n\t\t\t# \"·\" : \"•\",\r\n\t\t\t\"!\" : \"!\",\r\n\t\t\t\"?\" : \"?\",\r\n\t\t\t# \"-\" : \"—\",\r\n\t\t\t# \"|\" : \"丨\",\r\n\t\t\t# \"。。。\" : \"...\",\r\n\t\t\t# \"*\" : \"*\",\r\n\t\t\t# \"+\" : \"+\",\r\n\t\t\t\"\\u3000\" : \" \",\r\n\t\t\t\"\\xA0\" : \" \",\r\n\t\t\t\"~\" : \"~\",\r\n\t\t\t\"*\" : \"*\",\r\n\t\t\t\"=\" : \"=\",\r\n\t\t\t\"+\" : \"+\",\r\n\t\t\t\"|\" : \"|\",\r\n\t\t\t\"。。。\" : \"...\",\r\n\t\t\t\"[\" : \"[\",\r\n\t\t\t\"]\" : \"]\",\r\n\t\t\t\".\" : \".\",\r\n\t\t}\r\n\t\t\r\n\t\tfor k, v in SYMBOL.items():\r\n\t\t\tcontent = content.replace(k, v)\r\n\t\t\r\n\t\tdig = re.search(r\"([\\xA0\\u3000\\s]([0123456789一二三四五六七八九十百零〇上中下终完改补]+))$\", content)\r\n\t\tif dig:\r\n\t\t\tcontent = content.replace(dig.group(1), \"(\" + dig.group(2) + \")\")\r\n\t\t\r\n\t\tif content.endswith(\".\") and not content.endswith(\"..\"):\r\n\t\t\tcontent = content[:-1] + \"。\"\r\n\t\t\r\n\t\treturn content\r\n\t#end - unified_symbol\r\n\t\r\n\tdef get_chapter_sample(self, content, num_sample=2, ignore_said=False):\r\n\t\tlines = [ s.strip(\"\\r\\n\\t\\xA0\\u3000   \") for s in content.strip(\"\\r\\n\\t\\xA0\\u3000   \").split(\"\\n\") if s.strip(\"\\r\\n\\t\\xA0\\u3000   \") ]\r\n\t\tlines = [ re.sub(r\"[^a-zA-Z0-9\\u4e00-\\u9fa5。?!”;、~》)’,]+[。;]([^a-zA-Z0-9\\u4e00-\\u9fa5]|$)\", \"\", s) for s in lines ]\r\n\t\t\r\n\t\tfirst = []\r\n\t\tlast = []\r\n\t\t\r\n\t\t# 正向取样\r\n\t\tfor text in lines:\r\n\t\t\t# 字太少不适合取样\r\n\t\t\twords = self.calc_num_words(text)\r\n\t\t\t\r\n\t\t\t# 避免对广告取样\r\n\t\t\tfor kw in self.ADVERT_KEYWORDS:\r\n\t\t\t\tif text.find(kw) > -1:\r\n\t\t\t\t\twords = 0\r\n\t\t\t\t\tbreak\r\n\t\t\t#end for\r\n\t\t\t\r\n\t\t\tif words < 5:\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\t# 避免样本相同\r\n\t\t\tif first.count(text) > 0:\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\t# 避免喊话内容\r\n\t\t\tif ignore_said and text.startswith(\"“\") and text.endswith(\"”\"):\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\tfirst.append(text)\r\n\t\t\tif len(first) >= num_sample:\r\n\t\t\t\tbreak\r\n\t\t#end for\r\n\t\t\r\n\t\t# 反向取样\r\n\t\tfor text in reversed(lines):\r\n\t\t\t# 字太少不适合取样\r\n\t\t\twords = self.calc_num_words(text)\r\n\t\t\t\r\n\t\t\t# 避免对广告取样\r\n\t\t\tfor kw in self.ADVERT_KEYWORDS:\r\n\t\t\t\tif text.find(kw) > -1:\r\n\t\t\t\t\twords = 0\r\n\t\t\t\t\tbreak\r\n\t\t\t#end for\r\n\t\t\t\r\n\t\t\tif words < 5:\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\t# 避免样本相同\r\n\t\t\tif last.count(text) > 0:\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\t# 避免喊话内容\r\n\t\t\tif ignore_said and text.startswith(\"“\") and text.endswith(\"”\"):\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\tlast.append(text)\r\n\t\t\tif len(last) >= num_sample:\r\n\t\t\t\tbreak\r\n\t\t#end for\r\n\t\t\r\n\t\treturn first, last\r\n\t#end - get_chapter_sample\r\n\t\r\n\tdef check_relation_chapter(self, book_id, chapter_no, chapter_real, title, content):\r\n\t\tfirst_line, last_line = self.get_chapter_sample(content)\r\n\t\tif len(first_line) < 2 or len(last_line) < 2:\r\n\t\t\treturn False\r\n\t\t\r\n\t\tcursor = self.database.cursor()\r\n\t\tnumRows = cursor.execute(\r\n\t\t\t\"SELECT title, chapter_no, chapter_real FROM book_relation WHERE book_id = '{}' AND ((first_line = '{}' AND second_line = '{}') OR (last_line = '{}' AND previous_line = '{}'));\".format(\r\n\t\t\tbook_id, self.escape(first_line[0]), self.escape(first_line[1]), self.escape(last_line[0]), self.escape(last_line[1])\r\n\t\t))\r\n\t\t\r\n\t\tif numRows > 0:\r\n\t\t\tfor row in cursor.fetchall():\r\n\t\t\t\tif row[0] == title or row[1] == chapter_no or row[2] == chapter_real:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\t\r\n\t\t\t\tprint(\"冲突:{},no={},real={}。f1:{}丨f2:{}丨b1:{}丨b2:{}丨来源:{},no={},real={}\".format(\r\n\t\t\t\t\trow[0], row[1], row[2],\r\n\t\t\t\t\tfirst_line[0], first_line[1], last_line[0], last_line[1],\r\n\t\t\t\t\ttitle, chapter_no, chapter_real\r\n\t\t\t\t))\r\n\t\t\t\treturn True\r\n\t\t#end if\r\n\t\t\r\n\t\treturn False\r\n\t#end - check_relation_chapter\r\n\t\r\n\t# 尝试检测防盗章/凑字数章\r\n\tdef check_anti_theft(self, bookId, content, num_sample=2):\r\n\t\tif bookId not in self.cached_anti_theft:\r\n\t\t\tcursor = self.database.cursor()\r\n\t\t\t\r\n\t\t\t# 尝试选择第一章(而不是序章/作品相关)\r\n\t\t\tnumRows = cursor.execute(\"SELECT content, chapter_no FROM book_data WHERE book_id = {} AND chapter_real = 1;\".format(bookId))\r\n\t\t\tif numRows != 1:\r\n\t\t\t\tnumRows = cursor.execute(\"SELECT content, chapter_real FROM book_data WHERE book_id = {} AND chapter_no = 0;\".format(bookId))\r\n\t\t\tif numRows <= 0:\r\n\t\t\t\treturn False\r\n\t\t\t\r\n\t\t\trow = cursor.fetchone()\r\n\t\t\tfirst_line, last_line = self.get_chapter_sample(row[0], num_sample, True)\r\n\t\t\tself.cached_anti_theft[bookId] = { \"begin\" : first_line, \"end\" : last_line }\r\n\t\t\t# print(\"{} 样本信息:{},样本ID:{}\".format(bookId, str(self.cached_anti_theft[bookId]), row[1]))\r\n\t\t#end if\r\n\t\t\r\n\t\tif bookId not in self.cached_anti_theft:\r\n\t\t\treturn False\r\n\t\t\r\n\t\t# 获取有字的一行\r\n\t\tlines = [ s.strip(\"\\r\\n\\t\\xA0\\u3000   \") for s in content.strip(\"\\r\\n\\t\\xA0\\u3000   \").split(\"\\n\") if s.strip(\"\\r\\n\\t\\xA0\\u3000   \") ]\r\n\t\tlines = [ re.sub(r\"[^a-zA-Z0-9\\u4e00-\\u9fa5。?!”;、~》)’,]+[。;]([^a-zA-Z0-9\\u4e00-\\u9fa5]|$)\", \"\", s) for s in lines ]\r\n\t\tpair = self.cached_anti_theft[bookId]\r\n\t\t\r\n\t\tfirst_line, last_line = self.get_chapter_sample(content, num_sample, True)\r\n\t\t# print(\"{} 取样信息:{},{}\".format(bookId, str(first_line), str(last_line)))\r\n\t\tmsg = []\r\n\t\t\r\n\t\tif pair[\"begin\"] and first_line:\r\n\t\t\tnum_repeat = 0\r\n\t\t\tfor text in first_line:\r\n\t\t\t\tif lines.count(text) > 1:\r\n\t\t\t\t\tnum_repeat += 1\r\n\t\t\t\t\tmsg.append(\"前重复行:{}\".format(text))\r\n\t\t\tif num_repeat > 0 and num_repeat >= len(first_line):\r\n\t\t\t\tprint(str(msg))\r\n\t\t\t\treturn True\r\n\t\t\t\r\n\t\t\tnum_repeat = 0\r\n\t\t\tmax_repeat = min(len(pair[\"begin\"]), len(first_line))\r\n\t\t\tfor i in range(max_repeat):\r\n\t\t\t\tif difflib.SequenceMatcher(None, first_line[i], pair[\"begin\"][i]).ratio() > 0.9:\r\n\t\t\t\t\tnum_repeat += 1\r\n\t\t\t\t\tmsg.append(\"前相似行:{}≈{}\".format(first_line[i], pair[\"begin\"][i]))\r\n\t\t\tif num_repeat > 0 and num_repeat >= max_repeat:\r\n\t\t\t\tprint(str(msg))\r\n\t\t\t\treturn True\r\n\t\t#end if\r\n\t\t\r\n\t\tif pair[\"end\"] and last_line:\r\n\t\t\tnum_repeat = 0\r\n\t\t\tfor text in last_line:\r\n\t\t\t\tif lines.count(text) > 1:\r\n\t\t\t\t\tnum_repeat += 1\r\n\t\t\t\t\tmsg.append(\"后重复行:{}\".format(text))\r\n\t\t\tif num_repeat > 0 and num_repeat >= len(last_line):\r\n\t\t\t\tprint(str(msg))\r\n\t\t\t\treturn True\r\n\t\t\t\r\n\t\t\tnum_repeat = 0\r\n\t\t\tmax_repeat = min(len(pair[\"end\"]), len(last_line))\r\n\t\t\tfor i in range(max_repeat):\r\n\t\t\t\tif difflib.SequenceMatcher(None, last_line[i], pair[\"end\"][i]).ratio() > 0.9:\r\n\t\t\t\t\tnum_repeat += 1\r\n\t\t\t\t\tmsg.append(\"后相似行:{}≈{}\".format(last_line[i], pair[\"end\"][i]))\r\n\t\t\tif num_repeat > 0 and num_repeat >= max_repeat:\r\n\t\t\t\tprint(str(msg))\r\n\t\t\t\treturn True\r\n\t\t#end if\r\n\t\t\r\n\t\t# self.logger.warning(\"{} 的待检查内容是空的\".format(bookId))\r\n\t\treturn False\r\n\t#end - check_anti_theft\r\n\t\r\n\t# 尝试检测无关章节,避免重复更新\r\n\tdef check_irrelevant_chapter(self, bookId, haveNo):\r\n\t\tif bookId not in self.cached_irrelevant_chapter:\r\n\t\t\tcursor = self.database.cursor()\r\n\t\t\tnumRows = cursor.execute(\"SELECT COUNT(chapter_real) FROM book_data WHERE book_id = {} AND NOT ISNULL(chapter_real);\".format(bookId))\r\n\t\t\tif numRows <= 0:\r\n\t\t\t\treturn False\r\n\t\t\t\r\n\t\t\tself.cached_irrelevant_chapter[bookId] = (int(cursor.fetchone()[0]) > 50)\r\n\t\t#end if\r\n\t\t\r\n\t\tif bookId not in self.cached_irrelevant_chapter:\r\n\t\t\treturn False\r\n\t\t\r\n\t\treturn (self.cached_irrelevant_chapter[bookId] and not haveNo)\r\n\t#end - check_irrelevant_chapter\r\n\t\r\n\tdef remove_page_mark(self, url):\r\n\t\t# 除了主页外,应该没有会在末尾用斜杠的吧\r\n\t\tm = re.search(r\"((\\d+)[_\\-]\\d+)(?:\\.(?:html|shtml|xhtml|htm))?$\", url)\r\n\t\tif m:\r\n\t\t\treturn url.replace(m.group(1), m.group(2))\r\n\t\t\r\n\t\treturn url\r\n\t#end - remove_page_mark\r\n\t\r\n\tdef escape(self, text, *args):\r\n\t\tif not text:\r\n\t\t\treturn \"\"\r\n\t\t\r\n\t\treturn self.database.escape_string(text).decode(\"utf-8\")\r\n\t#end - escape\r\n\t\r\n\tdef log(self, text, *args):\r\n\t\tf = open(os.path.join(settings.BASE_DIR, \"../logs.log\"), \"a+\", encoding=\"utf-8\")\r\n\t\tf.write(\"[\" + datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \"] \" + text.format(*args) + \"\\n\")\r\n\t\tf.close()\r\n\t#end - log\r\n\t\r\n\t# 补全短 URL\r\n\tdef fix_url(self, url, referrer):\r\n\t\tif not referrer or not url or url.startswith(\"http\") or len(referrer) <= 0:\r\n\t\t\treturn url\r\n\t\t\r\n\t\tif url.startswith(\"/\"):\r\n\t\t\treturn referrer[:referrer.find(\"/\", 8)] + url\r\n\t\tif url.startswith(\":\"):\r\n\t\t\treturn referrer[:referrer.find(\":\")] + url\r\n\t\t\r\n\t\treturn referrer[:referrer.rfind(\"/\") + 1] + url\r\n\t#end - fix_url\r\n\t\r\n\tdef connect_db(self, retry = True):\r\n\t\ttry:\r\n\t\t\tself.database = MySQLdb.connect(\r\n\t\t\t\thost = settings.MYSQL_HOST,\r\n\t\t\t\tport = settings.MYSQL_PORT,\r\n\t\t\t\tuser = settings.MYSQL_USERNAME,\r\n\t\t\t\tpasswd = settings.MYSQL_PASSWORD,\r\n\t\t\t\tdb = settings.MYSQL_DATABASE,\r\n\t\t\t\tcharset = \"utf8mb4\"\r\n\t\t\t)\r\n\t\texcept:\r\n\t\t\tself.database = None\r\n\t\t#end try\r\n\t\t\r\n\t\tif not self.database:\r\n\t\t\tif retry:\r\n\t\t\t\tprint(\"数据库连接失败,5 秒后重试\")\r\n\t\t\t\ttime.sleep(5)\r\n\t\t\t\treturn self.connect_db()\r\n\t\t\t\r\n\t\t\tprint(\"数据库连接失败\")\r\n\t\t\treturn False\r\n\t\t#end if\r\n\t\t\r\n\t\tself.database.autocommit(True)\r\n\t\tself.database.cursor().execute(\"SET @NUM_MATCH_SIZE = {};\".format(self.num_same_match))\r\n\t\tself.database.commit()\r\n\t\treturn True\r\n\t#end - connect_db\r\n\t\r\n\tdef sql_exec(self, content, *argv):\r\n\t\tcursor = self.database.cursor()\r\n\t\tnum_rows = 0\r\n\t\tif len(argv) > 0:\r\n\t\t\tcontent = content.format(*argv)\r\n\t\t\r\n\t\ttry:\r\n\t\t\tnum_rows = cursor.execute(content)\r\n\t\texcept Exception as e:\r\n\t\t\tinfo = str(e)\r\n\t\t\tself.logger.error(\"执行语句失败:{},原因:{}\".format(content, info), exc_info=True)\r\n\t\t\t\r\n\t\t\t# 连接丢失\r\n\t\t\tif info.find(\"gone away\") > -1 or info.find(\"Lost Connection\"):\r\n\t\t\t\tself.connect_db(True)\r\n\t\t\t\r\n\t\t\treturn self.sql_exec(content)\r\n\t\t\"\"\"\r\n\t\texcept MySQLdb.OperationalError as e:\r\n\t\t\tself.logger.error(\"[链接异常] 执行语句失败:{},原因:{},正在尝试重新连接\".format(content, e), exc_info=True)\r\n\t\t\tself.connect_db(True)\r\n\t\t\treturn self.sql_exec(content)\r\n\t\texcept MySQLdb.IntegrityError as e:\r\n\t\t\tself.logger.error(\"[约束异常] 执行语句失败:{},原因:{},无需处理\".format(content, e), exc_info=True)\r\n\t\t\treturn None\r\n\t\texcept MySQLdb.ProgrammingError as e:\r\n\t\t\tself.logger.error(\"[语法错误] 执行语句失败:{},原因:{},无法处理\".format(content, e), exc_info=True)\r\n\t\t\traise e\r\n\t\texcept MySQLdb.InterfaceError as e:\r\n\t\t\tself.logger.error(\"[库异常] 执行语句失败:{},原因:{},无法处理\".format(content, e), exc_info=True)\r\n\t\t\traise e\r\n\t\texcept MySQLdb.NotSupportedError as e:\r\n\t\t\tself.logger.error(\"[指令异常] 执行语句失败:{},原因:{},无法处理\".format(content, e), exc_info=True)\r\n\t\t\traise e\r\n\t\texcept MySQLdb.Warning as e:\r\n\t\t\tself.logger.warning(\"[警告] 执行语句警告:{},信息:{}\".format(content, e))\r\n\t\t\"\"\"\r\n\t\t#end try\r\n\t\t\r\n\t\tif num_rows > 1:\r\n\t\t\treturn list(cursor.fetchall())\r\n\t\t#end if\r\n\t\t\r\n\t\tif num_rows == 1:\r\n\t\t\trow = cursor.fetchone()\r\n\t\t\tif len(row) == 1:\r\n\t\t\t\treturn row[0]\r\n\t\t\treturn row\r\n\t\t#end if\r\n\t\t\r\n\t\treturn None\r\n\t#end - sql_exec\r\n\t\r\n\tdef popcnt(v):\r\n\t\tv = v - ((v >> 1) & 0x5555555555555555)\r\n\t\tv = (v & 0x3333333333333333) + ((v >> 2) & 0x3333333333333333)\r\n\t\tv = (v + (v >> 4)) & 0x0f0f0f0f0f0f0f0f\r\n\t\tv = v + (v >> 8)\r\n\t\tv = v + (v >> 16)\r\n\t\tv = v + (v >> 32)\r\n\t\treturn v & 0x7f\r\n\t#end - popcnt\r\n#end - CBookWalker\r\n","sub_path":"test001/spiders/include_book_walker.py","file_name":"include_book_walker.py","file_ext":"py","file_size_in_byte":96139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"88291221","text":"\nimport cv2\nimport numpy as np\nfrom os import listdir\nfrom os.path import isfile, join, basename\n\n\nrefs_dir = \"/home/vazqmig/Files/pok/env/src/images/references_ccards/gray/\"\nonlyfiles = [f for f in listdir(refs_dir) if isfile(join(refs_dir, f))]\nonlyfiles\n\nnumber_images_references = {}\nfor fn in onlyfiles:\n ima = cv2.imread(join(refs_dir, fn), cv2.IMREAD_GRAYSCALE)\n num = basename(fn)[:-4]\n number_images_references[num] = ima\n\n\n# ########################################\n# ###### SETUP ###########################\n\ndef write_references():\n # read references and thresdhold..\n refs_dir = \"/home/vazqmig/Files/pok/env/src/images/references_ccards/\"\n onlyfiles = [f for f in listdir(refs_dir) if isfile(join(refs_dir, f))]\n onlyfiles\n\n for fn in onlyfiles:\n ima = cv2.imread(join(refs_dir, fn))\n ima = cv2.cvtColor(ima, cv2.COLOR_BGR2GRAY)\n # ret, ima = cv2.threshold(ima, 127, 255, 0)\n # cv2.imshow('Gray', ima)\n # cv2.waitKey(0)\n num = basename(fn)\n rdir = \"/home/vazqmig/Files/pok/env/src/images/references_ccards/gray/\"\n cv2.imwrite(join(rdir, num), ima)\n\n\n##################################################\n# template matching\n##################################################\n\ntable_fn = \"/home/vazqmig/Files/pok/env/src/images/7.PNG\"\nimg_rgb = cv2.imread(table_fn)\ntable_im = img_rgb\nimg_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)\nret, thres = cv2.threshold(img_gray, 127, 255, 0)\n\nrdir = \"/home/vazqmig/Files/pok/env/src/images/references_ccards/gray/\"\n\nwt, ht = img_gray.shape[::-1]\nwt\nht\nnax = 386/865\nnay = 405/533\nnbx = 442/865\nnby = 462/533\nncx = 242/865\nncy = 188/533\nndx = 625/865\nndy = 238/533\n\nh_begin = (int(wt*nax), int(ht*nay))\nh_end = (int(wt*nbx), int(ht*nby))\ncc_begin = (int(wt*ncx), int(ht*ncy))\ncc_end = (int(wt*ndx), int(ht*ndy))\n\nh_begin\nh_end\ncc_begin\ncc_end\n\n\ndef is_point_in_rect(point, rect):\n return (point[0] >= rect[0][0] and point[0] <= rect[1][0]\n and point[1] >= rect[0][1] and point[1] <= rect[1][1])\n\n\n# im = img_gray\nim = img_gray\nimhero = im[h_begin[1]:h_end[1], h_begin[0]:h_end[0]]\ncv2.imshow('hero', imhero)\ncv2.waitKey(0)\n\nimcc = im[cc_begin[1]:cc_end[1], cc_begin[0]:cc_end[0]]\ncv2.imshow('ccards', imcc)\ncv2.waitKey(0)\n\n##################################################\n\n\nwrite_references()\nrefs_dir = \"/home/vazqmig/Files/pok/env/src/images/references_ccards/gray/\"\nonlyfiles = [f for f in listdir(refs_dir) if isfile(join(refs_dir, f))]\nonlyfiles\n\nnumber_images_references = {}\nfor fn in onlyfiles:\n ima = cv2.imread(join(refs_dir, fn), cv2.IMREAD_GRAYSCALE)\n num = basename(fn)[:-4]\n number_images_references[num] = ima\n\n\ntable_fn = \"/home/vazqmig/Files/pok/env/src/images/3.PNG\"\ntable_im = cv2.imread(table_fn)\nimg_gray = cv2.cvtColor(table_im, cv2.COLOR_BGR2GRAY)\nret, thres = cv2.threshold(img_gray, 127, 255, 0)\n\nimage = thres\nimage = img_gray\n\nfor key in number_images_references.keys():\n\n print('testing template', key)\n template = number_images_references[key]\n w, h = template.shape[::-1]\n # method = cv2.TM_SQDIFF_NORMED\n method = cv2.TM_CCOEFF_NORMED\n res = cv2.matchTemplate(image, template, method)\n threshold = 0.8\n loc = np.where(res >= threshold)\n hero_rect = (h_begin, h_end)\n for pt in zip(*loc[::-1]):\n x = pt[0]\n y = pt[1]\n print('found template', key, 'at', x, y)\n col = (0, 0, 255)\n cv2.rectangle(table_im, pt, (x + w, y + h), col, 2)\n hplace = is_point_in_rect(pt, hero_rect)\n if hplace:\n print('in hero cards')\n cv2.putText(table_im, key, (x, y-10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2)\n\n\ncv2.imshow('res', table_im)\ncv2.waitKey()\ncv2.destroyAllWindows()\n","sub_path":"template_find_cards.py","file_name":"template_find_cards.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"228380225","text":"#word_level_align.py\n#first aligns a dataset to the words vectors and collapses other modalities (by taking average of them for the duration of the word). After this operation every modality will have the same frequency (same as word vectors). Then the code aligns based on opinion labels (note that collapse does not happen for this step.\n\nimport mmsdk\nfrom mmsdk import mmdatasdk\nimport numpy\n\n\ndef myavg(intervals,features):\n return numpy.average(features,axis=0)\n\ncmumosi_highlevel=mmdatasdk.mmdataset(mmdatasdk.cmu_mosi.highlevel,'cmumosi/')\ncmumosi_highlevel.align('glove_vectors',collapse_functions=[myavg])\ncmumosi_highlevel.add_computational_sequences(mmdatasdk.cmu_mosi.labels,'cmumosi/')\ncmumosi_highlevel.align('Opinion Segment Labels')\n\ndeploy_files={x:x for x in cmumosi_highlevel.computational_sequences.keys()}\n\ncmumosi_highlevel.deploy(\"./deployed\",deploy_files)\n\naligned_cmumosi_highlevel=mmdatasdk.mmdataset('./deployed')\n\n\n","sub_path":"examples/mmdatasdk_examples/word_level_align.py","file_name":"word_level_align.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"617171865","text":"# -*- coding: utf-8 -*-\n\n# python imports\nimport sys\nimport struct\nfrom enum import Enum\n\nPY3 = sys.version_info > (3,)\n\n\nclass ECell(Enum):\n\tEmpty = 0\n\tFrontlineDelivery = 1\n\tMaterial = 2\n\tBacklineDelivery = 3\n\tMachine = 4\n\n\nclass MachineStatus(Enum):\n\tIdle = 0\n\tWorking = 1\n\tAmmoReady = 2\n\n\nclass MaterialType(Enum):\n\tPowder = 0\n\tIron = 1\n\tCarbon = 2\n\tGold = 3\n\tShell = 4\n\n\nclass AmmoType(Enum):\n\tRifleBullet = 0\n\tTankShell = 1\n\tHMGBullet = 2\n\tMortarShell = 3\n\tGoldenTankShell = 4\n\n\nclass UnitType(Enum):\n\tSoldier = 0\n\tTank = 1\n\tHeavyMachineGunner = 2\n\tMortar = 3\n\tGoldenTank = 4\n\n\nclass AgentType(Enum):\n\tWarehouse = 0\n\tFactory = 1\n\n\nclass Position(object):\n\n\t@staticmethod\n\tdef name():\n\t\treturn 'Position'\n\n\n\tdef __init__(self, index=None):\n\t\tself.initialize(index)\n\t\n\n\tdef initialize(self, index=None):\n\t\tself.index = index\n\t\n\n\tdef serialize(self):\n\t\ts = b''\n\t\t\n\t\t# serialize self.index\n\t\ts += b'\\x00' if self.index is None else b'\\x01'\n\t\tif self.index is not None:\n\t\t\ts += struct.pack('i', self.index)\n\t\t\n\t\treturn s\n\t\n\n\tdef deserialize(self, s, offset=0):\n\t\t# deserialize self.index\n\t\ttmp0 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp0:\n\t\t\tself.index = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\toffset += 4\n\t\telse:\n\t\t\tself.index = None\n\t\t\n\t\treturn offset\n\n\nclass Material(object):\n\n\t@staticmethod\n\tdef name():\n\t\treturn 'Material'\n\n\n\tdef __init__(self, type=None, position=None, count=None, c_capacity=None):\n\t\tself.initialize(type, position, count, c_capacity)\n\t\n\n\tdef initialize(self, type=None, position=None, count=None, c_capacity=None):\n\t\tself.type = type\n\t\tself.position = position\n\t\tself.count = count\n\t\tself.c_capacity = c_capacity\n\t\n\n\tdef serialize(self):\n\t\ts = b''\n\t\t\n\t\t# serialize self.type\n\t\ts += b'\\x00' if self.type is None else b'\\x01'\n\t\tif self.type is not None:\n\t\t\ts += struct.pack('b', self.type.value)\n\t\t\n\t\t# serialize self.position\n\t\ts += b'\\x00' if self.position is None else b'\\x01'\n\t\tif self.position is not None:\n\t\t\ts += self.position.serialize()\n\t\t\n\t\t# serialize self.count\n\t\ts += b'\\x00' if self.count is None else b'\\x01'\n\t\tif self.count is not None:\n\t\t\ts += struct.pack('i', self.count)\n\t\t\n\t\t# serialize self.c_capacity\n\t\ts += b'\\x00' if self.c_capacity is None else b'\\x01'\n\t\tif self.c_capacity is not None:\n\t\t\ts += struct.pack('i', self.c_capacity)\n\t\t\n\t\treturn s\n\t\n\n\tdef deserialize(self, s, offset=0):\n\t\t# deserialize self.type\n\t\ttmp1 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp1:\n\t\t\ttmp2 = struct.unpack('b', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\tself.type = MaterialType(tmp2)\n\t\telse:\n\t\t\tself.type = None\n\t\t\n\t\t# deserialize self.position\n\t\ttmp3 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp3:\n\t\t\tself.position = Position()\n\t\t\toffset = self.position.deserialize(s, offset)\n\t\telse:\n\t\t\tself.position = None\n\t\t\n\t\t# deserialize self.count\n\t\ttmp4 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp4:\n\t\t\tself.count = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\toffset += 4\n\t\telse:\n\t\t\tself.count = None\n\t\t\n\t\t# deserialize self.c_capacity\n\t\ttmp5 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp5:\n\t\t\tself.c_capacity = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\toffset += 4\n\t\telse:\n\t\t\tself.c_capacity = None\n\t\t\n\t\treturn offset\n\n\nclass Machine(object):\n\n\t@staticmethod\n\tdef name():\n\t\treturn 'Machine'\n\n\n\tdef __init__(self, position=None, status=None, current_ammo=None, construction_rem_time=None):\n\t\tself.initialize(position, status, current_ammo, construction_rem_time)\n\t\n\n\tdef initialize(self, position=None, status=None, current_ammo=None, construction_rem_time=None):\n\t\tself.position = position\n\t\tself.status = status\n\t\tself.current_ammo = current_ammo\n\t\tself.construction_rem_time = construction_rem_time\n\t\n\n\tdef serialize(self):\n\t\ts = b''\n\t\t\n\t\t# serialize self.position\n\t\ts += b'\\x00' if self.position is None else b'\\x01'\n\t\tif self.position is not None:\n\t\t\ts += self.position.serialize()\n\t\t\n\t\t# serialize self.status\n\t\ts += b'\\x00' if self.status is None else b'\\x01'\n\t\tif self.status is not None:\n\t\t\ts += struct.pack('b', self.status.value)\n\t\t\n\t\t# serialize self.current_ammo\n\t\ts += b'\\x00' if self.current_ammo is None else b'\\x01'\n\t\tif self.current_ammo is not None:\n\t\t\ts += struct.pack('b', self.current_ammo.value)\n\t\t\n\t\t# serialize self.construction_rem_time\n\t\ts += b'\\x00' if self.construction_rem_time is None else b'\\x01'\n\t\tif self.construction_rem_time is not None:\n\t\t\ts += struct.pack('i', self.construction_rem_time)\n\t\t\n\t\treturn s\n\t\n\n\tdef deserialize(self, s, offset=0):\n\t\t# deserialize self.position\n\t\ttmp6 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp6:\n\t\t\tself.position = Position()\n\t\t\toffset = self.position.deserialize(s, offset)\n\t\telse:\n\t\t\tself.position = None\n\t\t\n\t\t# deserialize self.status\n\t\ttmp7 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp7:\n\t\t\ttmp8 = struct.unpack('b', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\tself.status = MachineStatus(tmp8)\n\t\telse:\n\t\t\tself.status = None\n\t\t\n\t\t# deserialize self.current_ammo\n\t\ttmp9 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp9:\n\t\t\ttmp10 = struct.unpack('b', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\tself.current_ammo = AmmoType(tmp10)\n\t\telse:\n\t\t\tself.current_ammo = None\n\t\t\n\t\t# deserialize self.construction_rem_time\n\t\ttmp11 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp11:\n\t\t\tself.construction_rem_time = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\toffset += 4\n\t\telse:\n\t\t\tself.construction_rem_time = None\n\t\t\n\t\treturn offset\n\n\nclass FrontlineDelivery(object):\n\n\t@staticmethod\n\tdef name():\n\t\treturn 'FrontlineDelivery'\n\n\n\tdef __init__(self, ammos=None, delivery_rem_time=None, c_delivery_duration=None):\n\t\tself.initialize(ammos, delivery_rem_time, c_delivery_duration)\n\t\n\n\tdef initialize(self, ammos=None, delivery_rem_time=None, c_delivery_duration=None):\n\t\tself.ammos = ammos\n\t\tself.delivery_rem_time = delivery_rem_time\n\t\tself.c_delivery_duration = c_delivery_duration\n\t\n\n\tdef serialize(self):\n\t\ts = b''\n\t\t\n\t\t# serialize self.ammos\n\t\ts += b'\\x00' if self.ammos is None else b'\\x01'\n\t\tif self.ammos is not None:\n\t\t\ttmp12 = b''\n\t\t\ttmp12 += struct.pack('I', len(self.ammos))\n\t\t\twhile len(tmp12) and tmp12[-1] == b'\\x00'[0]:\n\t\t\t\ttmp12 = tmp12[:-1]\n\t\t\ts += struct.pack('B', len(tmp12))\n\t\t\ts += tmp12\n\t\t\t\n\t\t\tfor tmp13 in self.ammos:\n\t\t\t\ts += b'\\x00' if tmp13 is None else b'\\x01'\n\t\t\t\tif tmp13 is not None:\n\t\t\t\t\ts += struct.pack('b', tmp13.value)\n\t\t\t\ts += b'\\x00' if self.ammos[tmp13] is None else b'\\x01'\n\t\t\t\tif self.ammos[tmp13] is not None:\n\t\t\t\t\ts += struct.pack('i', self.ammos[tmp13])\n\t\t\n\t\t# serialize self.delivery_rem_time\n\t\ts += b'\\x00' if self.delivery_rem_time is None else b'\\x01'\n\t\tif self.delivery_rem_time is not None:\n\t\t\ts += struct.pack('i', self.delivery_rem_time)\n\t\t\n\t\t# serialize self.c_delivery_duration\n\t\ts += b'\\x00' if self.c_delivery_duration is None else b'\\x01'\n\t\tif self.c_delivery_duration is not None:\n\t\t\ts += struct.pack('i', self.c_delivery_duration)\n\t\t\n\t\treturn s\n\t\n\n\tdef deserialize(self, s, offset=0):\n\t\t# deserialize self.ammos\n\t\ttmp14 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp14:\n\t\t\ttmp15 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\ttmp16 = s[offset:offset + tmp15]\n\t\t\toffset += tmp15\n\t\t\ttmp16 += b'\\x00' * (4 - tmp15)\n\t\t\ttmp17 = struct.unpack('I', tmp16)[0]\n\t\t\t\n\t\t\tself.ammos = {}\n\t\t\tfor tmp18 in range(tmp17):\n\t\t\t\ttmp21 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp21:\n\t\t\t\t\ttmp22 = struct.unpack('b', s[offset:offset + 1])[0]\n\t\t\t\t\toffset += 1\n\t\t\t\t\ttmp19 = AmmoType(tmp22)\n\t\t\t\telse:\n\t\t\t\t\ttmp19 = None\n\t\t\t\ttmp23 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp23:\n\t\t\t\t\ttmp20 = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\t\t\toffset += 4\n\t\t\t\telse:\n\t\t\t\t\ttmp20 = None\n\t\t\t\tself.ammos[tmp19] = tmp20\n\t\telse:\n\t\t\tself.ammos = None\n\t\t\n\t\t# deserialize self.delivery_rem_time\n\t\ttmp24 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp24:\n\t\t\tself.delivery_rem_time = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\toffset += 4\n\t\telse:\n\t\t\tself.delivery_rem_time = None\n\t\t\n\t\t# deserialize self.c_delivery_duration\n\t\ttmp25 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp25:\n\t\t\tself.c_delivery_duration = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\toffset += 4\n\t\telse:\n\t\t\tself.c_delivery_duration = None\n\t\t\n\t\treturn offset\n\n\nclass Warehouse(object):\n\n\t@staticmethod\n\tdef name():\n\t\treturn 'Warehouse'\n\n\n\tdef __init__(self, materials=None, materials_reload_rem_time=None, c_materials_reload_duration=None):\n\t\tself.initialize(materials, materials_reload_rem_time, c_materials_reload_duration)\n\t\n\n\tdef initialize(self, materials=None, materials_reload_rem_time=None, c_materials_reload_duration=None):\n\t\tself.materials = materials\n\t\tself.materials_reload_rem_time = materials_reload_rem_time\n\t\tself.c_materials_reload_duration = c_materials_reload_duration\n\t\n\n\tdef serialize(self):\n\t\ts = b''\n\t\t\n\t\t# serialize self.materials\n\t\ts += b'\\x00' if self.materials is None else b'\\x01'\n\t\tif self.materials is not None:\n\t\t\ttmp26 = b''\n\t\t\ttmp26 += struct.pack('I', len(self.materials))\n\t\t\twhile len(tmp26) and tmp26[-1] == b'\\x00'[0]:\n\t\t\t\ttmp26 = tmp26[:-1]\n\t\t\ts += struct.pack('B', len(tmp26))\n\t\t\ts += tmp26\n\t\t\t\n\t\t\tfor tmp27 in self.materials:\n\t\t\t\ts += b'\\x00' if tmp27 is None else b'\\x01'\n\t\t\t\tif tmp27 is not None:\n\t\t\t\t\ts += tmp27.serialize()\n\t\t\t\ts += b'\\x00' if self.materials[tmp27] is None else b'\\x01'\n\t\t\t\tif self.materials[tmp27] is not None:\n\t\t\t\t\ts += self.materials[tmp27].serialize()\n\t\t\n\t\t# serialize self.materials_reload_rem_time\n\t\ts += b'\\x00' if self.materials_reload_rem_time is None else b'\\x01'\n\t\tif self.materials_reload_rem_time is not None:\n\t\t\ts += struct.pack('i', self.materials_reload_rem_time)\n\t\t\n\t\t# serialize self.c_materials_reload_duration\n\t\ts += b'\\x00' if self.c_materials_reload_duration is None else b'\\x01'\n\t\tif self.c_materials_reload_duration is not None:\n\t\t\ts += struct.pack('i', self.c_materials_reload_duration)\n\t\t\n\t\treturn s\n\t\n\n\tdef deserialize(self, s, offset=0):\n\t\t# deserialize self.materials\n\t\ttmp28 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp28:\n\t\t\ttmp29 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\ttmp30 = s[offset:offset + tmp29]\n\t\t\toffset += tmp29\n\t\t\ttmp30 += b'\\x00' * (4 - tmp29)\n\t\t\ttmp31 = struct.unpack('I', tmp30)[0]\n\t\t\t\n\t\t\tself.materials = {}\n\t\t\tfor tmp32 in range(tmp31):\n\t\t\t\ttmp35 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp35:\n\t\t\t\t\ttmp33 = Position()\n\t\t\t\t\toffset = tmp33.deserialize(s, offset)\n\t\t\t\telse:\n\t\t\t\t\ttmp33 = None\n\t\t\t\ttmp36 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp36:\n\t\t\t\t\ttmp34 = Material()\n\t\t\t\t\toffset = tmp34.deserialize(s, offset)\n\t\t\t\telse:\n\t\t\t\t\ttmp34 = None\n\t\t\t\tself.materials[tmp33] = tmp34\n\t\telse:\n\t\t\tself.materials = None\n\t\t\n\t\t# deserialize self.materials_reload_rem_time\n\t\ttmp37 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp37:\n\t\t\tself.materials_reload_rem_time = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\toffset += 4\n\t\telse:\n\t\t\tself.materials_reload_rem_time = None\n\t\t\n\t\t# deserialize self.c_materials_reload_duration\n\t\ttmp38 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp38:\n\t\t\tself.c_materials_reload_duration = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\toffset += 4\n\t\telse:\n\t\t\tself.c_materials_reload_duration = None\n\t\t\n\t\treturn offset\n\n\nclass BacklineDelivery(object):\n\n\t@staticmethod\n\tdef name():\n\t\treturn 'BacklineDelivery'\n\n\n\tdef __init__(self, materials=None, ammos=None):\n\t\tself.initialize(materials, ammos)\n\t\n\n\tdef initialize(self, materials=None, ammos=None):\n\t\tself.materials = materials\n\t\tself.ammos = ammos\n\t\n\n\tdef serialize(self):\n\t\ts = b''\n\t\t\n\t\t# serialize self.materials\n\t\ts += b'\\x00' if self.materials is None else b'\\x01'\n\t\tif self.materials is not None:\n\t\t\ttmp39 = b''\n\t\t\ttmp39 += struct.pack('I', len(self.materials))\n\t\t\twhile len(tmp39) and tmp39[-1] == b'\\x00'[0]:\n\t\t\t\ttmp39 = tmp39[:-1]\n\t\t\ts += struct.pack('B', len(tmp39))\n\t\t\ts += tmp39\n\t\t\t\n\t\t\tfor tmp40 in self.materials:\n\t\t\t\ts += b'\\x00' if tmp40 is None else b'\\x01'\n\t\t\t\tif tmp40 is not None:\n\t\t\t\t\ts += struct.pack('b', tmp40.value)\n\t\t\t\ts += b'\\x00' if self.materials[tmp40] is None else b'\\x01'\n\t\t\t\tif self.materials[tmp40] is not None:\n\t\t\t\t\ts += struct.pack('i', self.materials[tmp40])\n\t\t\n\t\t# serialize self.ammos\n\t\ts += b'\\x00' if self.ammos is None else b'\\x01'\n\t\tif self.ammos is not None:\n\t\t\ttmp41 = b''\n\t\t\ttmp41 += struct.pack('I', len(self.ammos))\n\t\t\twhile len(tmp41) and tmp41[-1] == b'\\x00'[0]:\n\t\t\t\ttmp41 = tmp41[:-1]\n\t\t\ts += struct.pack('B', len(tmp41))\n\t\t\ts += tmp41\n\t\t\t\n\t\t\tfor tmp42 in self.ammos:\n\t\t\t\ts += b'\\x00' if tmp42 is None else b'\\x01'\n\t\t\t\tif tmp42 is not None:\n\t\t\t\t\ts += struct.pack('b', tmp42.value)\n\t\t\t\ts += b'\\x00' if self.ammos[tmp42] is None else b'\\x01'\n\t\t\t\tif self.ammos[tmp42] is not None:\n\t\t\t\t\ts += struct.pack('i', self.ammos[tmp42])\n\t\t\n\t\treturn s\n\t\n\n\tdef deserialize(self, s, offset=0):\n\t\t# deserialize self.materials\n\t\ttmp43 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp43:\n\t\t\ttmp44 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\ttmp45 = s[offset:offset + tmp44]\n\t\t\toffset += tmp44\n\t\t\ttmp45 += b'\\x00' * (4 - tmp44)\n\t\t\ttmp46 = struct.unpack('I', tmp45)[0]\n\t\t\t\n\t\t\tself.materials = {}\n\t\t\tfor tmp47 in range(tmp46):\n\t\t\t\ttmp50 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp50:\n\t\t\t\t\ttmp51 = struct.unpack('b', s[offset:offset + 1])[0]\n\t\t\t\t\toffset += 1\n\t\t\t\t\ttmp48 = MaterialType(tmp51)\n\t\t\t\telse:\n\t\t\t\t\ttmp48 = None\n\t\t\t\ttmp52 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp52:\n\t\t\t\t\ttmp49 = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\t\t\toffset += 4\n\t\t\t\telse:\n\t\t\t\t\ttmp49 = None\n\t\t\t\tself.materials[tmp48] = tmp49\n\t\telse:\n\t\t\tself.materials = None\n\t\t\n\t\t# deserialize self.ammos\n\t\ttmp53 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp53:\n\t\t\ttmp54 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\ttmp55 = s[offset:offset + tmp54]\n\t\t\toffset += tmp54\n\t\t\ttmp55 += b'\\x00' * (4 - tmp54)\n\t\t\ttmp56 = struct.unpack('I', tmp55)[0]\n\t\t\t\n\t\t\tself.ammos = {}\n\t\t\tfor tmp57 in range(tmp56):\n\t\t\t\ttmp60 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp60:\n\t\t\t\t\ttmp61 = struct.unpack('b', s[offset:offset + 1])[0]\n\t\t\t\t\toffset += 1\n\t\t\t\t\ttmp58 = AmmoType(tmp61)\n\t\t\t\telse:\n\t\t\t\t\ttmp58 = None\n\t\t\t\ttmp62 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp62:\n\t\t\t\t\ttmp59 = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\t\t\toffset += 4\n\t\t\t\telse:\n\t\t\t\t\ttmp59 = None\n\t\t\t\tself.ammos[tmp58] = tmp59\n\t\telse:\n\t\t\tself.ammos = None\n\t\t\n\t\treturn offset\n\n\nclass Factory(object):\n\n\t@staticmethod\n\tdef name():\n\t\treturn 'Factory'\n\n\n\tdef __init__(self, machines=None, c_mixture_formulas=None, c_construction_durations=None, c_ammo_box_sizes=None):\n\t\tself.initialize(machines, c_mixture_formulas, c_construction_durations, c_ammo_box_sizes)\n\t\n\n\tdef initialize(self, machines=None, c_mixture_formulas=None, c_construction_durations=None, c_ammo_box_sizes=None):\n\t\tself.machines = machines\n\t\tself.c_mixture_formulas = c_mixture_formulas\n\t\tself.c_construction_durations = c_construction_durations\n\t\tself.c_ammo_box_sizes = c_ammo_box_sizes\n\t\n\n\tdef serialize(self):\n\t\ts = b''\n\t\t\n\t\t# serialize self.machines\n\t\ts += b'\\x00' if self.machines is None else b'\\x01'\n\t\tif self.machines is not None:\n\t\t\ttmp63 = b''\n\t\t\ttmp63 += struct.pack('I', len(self.machines))\n\t\t\twhile len(tmp63) and tmp63[-1] == b'\\x00'[0]:\n\t\t\t\ttmp63 = tmp63[:-1]\n\t\t\ts += struct.pack('B', len(tmp63))\n\t\t\ts += tmp63\n\t\t\t\n\t\t\tfor tmp64 in self.machines:\n\t\t\t\ts += b'\\x00' if tmp64 is None else b'\\x01'\n\t\t\t\tif tmp64 is not None:\n\t\t\t\t\ts += tmp64.serialize()\n\t\t\t\ts += b'\\x00' if self.machines[tmp64] is None else b'\\x01'\n\t\t\t\tif self.machines[tmp64] is not None:\n\t\t\t\t\ts += self.machines[tmp64].serialize()\n\t\t\n\t\t# serialize self.c_mixture_formulas\n\t\ts += b'\\x00' if self.c_mixture_formulas is None else b'\\x01'\n\t\tif self.c_mixture_formulas is not None:\n\t\t\ttmp65 = b''\n\t\t\ttmp65 += struct.pack('I', len(self.c_mixture_formulas))\n\t\t\twhile len(tmp65) and tmp65[-1] == b'\\x00'[0]:\n\t\t\t\ttmp65 = tmp65[:-1]\n\t\t\ts += struct.pack('B', len(tmp65))\n\t\t\ts += tmp65\n\t\t\t\n\t\t\tfor tmp66 in self.c_mixture_formulas:\n\t\t\t\ts += b'\\x00' if tmp66 is None else b'\\x01'\n\t\t\t\tif tmp66 is not None:\n\t\t\t\t\ts += struct.pack('b', tmp66.value)\n\t\t\t\ts += b'\\x00' if self.c_mixture_formulas[tmp66] is None else b'\\x01'\n\t\t\t\tif self.c_mixture_formulas[tmp66] is not None:\n\t\t\t\t\ttmp67 = b''\n\t\t\t\t\ttmp67 += struct.pack('I', len(self.c_mixture_formulas[tmp66]))\n\t\t\t\t\twhile len(tmp67) and tmp67[-1] == b'\\x00'[0]:\n\t\t\t\t\t\ttmp67 = tmp67[:-1]\n\t\t\t\t\ts += struct.pack('B', len(tmp67))\n\t\t\t\t\ts += tmp67\n\t\t\t\t\t\n\t\t\t\t\tfor tmp68 in self.c_mixture_formulas[tmp66]:\n\t\t\t\t\t\ts += b'\\x00' if tmp68 is None else b'\\x01'\n\t\t\t\t\t\tif tmp68 is not None:\n\t\t\t\t\t\t\ts += struct.pack('b', tmp68.value)\n\t\t\t\t\t\ts += b'\\x00' if self.c_mixture_formulas[tmp66][tmp68] is None else b'\\x01'\n\t\t\t\t\t\tif self.c_mixture_formulas[tmp66][tmp68] is not None:\n\t\t\t\t\t\t\ts += struct.pack('i', self.c_mixture_formulas[tmp66][tmp68])\n\t\t\n\t\t# serialize self.c_construction_durations\n\t\ts += b'\\x00' if self.c_construction_durations is None else b'\\x01'\n\t\tif self.c_construction_durations is not None:\n\t\t\ttmp69 = b''\n\t\t\ttmp69 += struct.pack('I', len(self.c_construction_durations))\n\t\t\twhile len(tmp69) and tmp69[-1] == b'\\x00'[0]:\n\t\t\t\ttmp69 = tmp69[:-1]\n\t\t\ts += struct.pack('B', len(tmp69))\n\t\t\ts += tmp69\n\t\t\t\n\t\t\tfor tmp70 in self.c_construction_durations:\n\t\t\t\ts += b'\\x00' if tmp70 is None else b'\\x01'\n\t\t\t\tif tmp70 is not None:\n\t\t\t\t\ts += struct.pack('b', tmp70.value)\n\t\t\t\ts += b'\\x00' if self.c_construction_durations[tmp70] is None else b'\\x01'\n\t\t\t\tif self.c_construction_durations[tmp70] is not None:\n\t\t\t\t\ts += struct.pack('i', self.c_construction_durations[tmp70])\n\t\t\n\t\t# serialize self.c_ammo_box_sizes\n\t\ts += b'\\x00' if self.c_ammo_box_sizes is None else b'\\x01'\n\t\tif self.c_ammo_box_sizes is not None:\n\t\t\ttmp71 = b''\n\t\t\ttmp71 += struct.pack('I', len(self.c_ammo_box_sizes))\n\t\t\twhile len(tmp71) and tmp71[-1] == b'\\x00'[0]:\n\t\t\t\ttmp71 = tmp71[:-1]\n\t\t\ts += struct.pack('B', len(tmp71))\n\t\t\ts += tmp71\n\t\t\t\n\t\t\tfor tmp72 in self.c_ammo_box_sizes:\n\t\t\t\ts += b'\\x00' if tmp72 is None else b'\\x01'\n\t\t\t\tif tmp72 is not None:\n\t\t\t\t\ts += struct.pack('b', tmp72.value)\n\t\t\t\ts += b'\\x00' if self.c_ammo_box_sizes[tmp72] is None else b'\\x01'\n\t\t\t\tif self.c_ammo_box_sizes[tmp72] is not None:\n\t\t\t\t\ts += struct.pack('i', self.c_ammo_box_sizes[tmp72])\n\t\t\n\t\treturn s\n\t\n\n\tdef deserialize(self, s, offset=0):\n\t\t# deserialize self.machines\n\t\ttmp73 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp73:\n\t\t\ttmp74 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\ttmp75 = s[offset:offset + tmp74]\n\t\t\toffset += tmp74\n\t\t\ttmp75 += b'\\x00' * (4 - tmp74)\n\t\t\ttmp76 = struct.unpack('I', tmp75)[0]\n\t\t\t\n\t\t\tself.machines = {}\n\t\t\tfor tmp77 in range(tmp76):\n\t\t\t\ttmp80 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp80:\n\t\t\t\t\ttmp78 = Position()\n\t\t\t\t\toffset = tmp78.deserialize(s, offset)\n\t\t\t\telse:\n\t\t\t\t\ttmp78 = None\n\t\t\t\ttmp81 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp81:\n\t\t\t\t\ttmp79 = Machine()\n\t\t\t\t\toffset = tmp79.deserialize(s, offset)\n\t\t\t\telse:\n\t\t\t\t\ttmp79 = None\n\t\t\t\tself.machines[tmp78] = tmp79\n\t\telse:\n\t\t\tself.machines = None\n\t\t\n\t\t# deserialize self.c_mixture_formulas\n\t\ttmp82 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp82:\n\t\t\ttmp83 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\ttmp84 = s[offset:offset + tmp83]\n\t\t\toffset += tmp83\n\t\t\ttmp84 += b'\\x00' * (4 - tmp83)\n\t\t\ttmp85 = struct.unpack('I', tmp84)[0]\n\t\t\t\n\t\t\tself.c_mixture_formulas = {}\n\t\t\tfor tmp86 in range(tmp85):\n\t\t\t\ttmp89 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp89:\n\t\t\t\t\ttmp90 = struct.unpack('b', s[offset:offset + 1])[0]\n\t\t\t\t\toffset += 1\n\t\t\t\t\ttmp87 = AmmoType(tmp90)\n\t\t\t\telse:\n\t\t\t\t\ttmp87 = None\n\t\t\t\ttmp91 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp91:\n\t\t\t\t\ttmp92 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\t\toffset += 1\n\t\t\t\t\ttmp93 = s[offset:offset + tmp92]\n\t\t\t\t\toffset += tmp92\n\t\t\t\t\ttmp93 += b'\\x00' * (4 - tmp92)\n\t\t\t\t\ttmp94 = struct.unpack('I', tmp93)[0]\n\t\t\t\t\t\n\t\t\t\t\ttmp88 = {}\n\t\t\t\t\tfor tmp95 in range(tmp94):\n\t\t\t\t\t\ttmp98 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\t\t\toffset += 1\n\t\t\t\t\t\tif tmp98:\n\t\t\t\t\t\t\ttmp99 = struct.unpack('b', s[offset:offset + 1])[0]\n\t\t\t\t\t\t\toffset += 1\n\t\t\t\t\t\t\ttmp96 = MaterialType(tmp99)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttmp96 = None\n\t\t\t\t\t\ttmp100 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\t\t\toffset += 1\n\t\t\t\t\t\tif tmp100:\n\t\t\t\t\t\t\ttmp97 = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\t\t\t\t\toffset += 4\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttmp97 = None\n\t\t\t\t\t\ttmp88[tmp96] = tmp97\n\t\t\t\telse:\n\t\t\t\t\ttmp88 = None\n\t\t\t\tself.c_mixture_formulas[tmp87] = tmp88\n\t\telse:\n\t\t\tself.c_mixture_formulas = None\n\t\t\n\t\t# deserialize self.c_construction_durations\n\t\ttmp101 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp101:\n\t\t\ttmp102 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\ttmp103 = s[offset:offset + tmp102]\n\t\t\toffset += tmp102\n\t\t\ttmp103 += b'\\x00' * (4 - tmp102)\n\t\t\ttmp104 = struct.unpack('I', tmp103)[0]\n\t\t\t\n\t\t\tself.c_construction_durations = {}\n\t\t\tfor tmp105 in range(tmp104):\n\t\t\t\ttmp108 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp108:\n\t\t\t\t\ttmp109 = struct.unpack('b', s[offset:offset + 1])[0]\n\t\t\t\t\toffset += 1\n\t\t\t\t\ttmp106 = AmmoType(tmp109)\n\t\t\t\telse:\n\t\t\t\t\ttmp106 = None\n\t\t\t\ttmp110 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp110:\n\t\t\t\t\ttmp107 = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\t\t\toffset += 4\n\t\t\t\telse:\n\t\t\t\t\ttmp107 = None\n\t\t\t\tself.c_construction_durations[tmp106] = tmp107\n\t\telse:\n\t\t\tself.c_construction_durations = None\n\t\t\n\t\t# deserialize self.c_ammo_box_sizes\n\t\ttmp111 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp111:\n\t\t\ttmp112 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\ttmp113 = s[offset:offset + tmp112]\n\t\t\toffset += tmp112\n\t\t\ttmp113 += b'\\x00' * (4 - tmp112)\n\t\t\ttmp114 = struct.unpack('I', tmp113)[0]\n\t\t\t\n\t\t\tself.c_ammo_box_sizes = {}\n\t\t\tfor tmp115 in range(tmp114):\n\t\t\t\ttmp118 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp118:\n\t\t\t\t\ttmp119 = struct.unpack('b', s[offset:offset + 1])[0]\n\t\t\t\t\toffset += 1\n\t\t\t\t\ttmp116 = AmmoType(tmp119)\n\t\t\t\telse:\n\t\t\t\t\ttmp116 = None\n\t\t\t\ttmp120 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp120:\n\t\t\t\t\ttmp117 = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\t\t\toffset += 4\n\t\t\t\telse:\n\t\t\t\t\ttmp117 = None\n\t\t\t\tself.c_ammo_box_sizes[tmp116] = tmp117\n\t\telse:\n\t\t\tself.c_ammo_box_sizes = None\n\t\t\n\t\treturn offset\n\n\nclass Agent(object):\n\n\t@staticmethod\n\tdef name():\n\t\treturn 'Agent'\n\n\n\tdef __init__(self, type=None, position=None, materials_bag=None, c_materials_bag_capacity=None, ammos_bag=None, c_ammos_bag_capacity=None):\n\t\tself.initialize(type, position, materials_bag, c_materials_bag_capacity, ammos_bag, c_ammos_bag_capacity)\n\t\n\n\tdef initialize(self, type=None, position=None, materials_bag=None, c_materials_bag_capacity=None, ammos_bag=None, c_ammos_bag_capacity=None):\n\t\tself.type = type\n\t\tself.position = position\n\t\tself.materials_bag = materials_bag\n\t\tself.c_materials_bag_capacity = c_materials_bag_capacity\n\t\tself.ammos_bag = ammos_bag\n\t\tself.c_ammos_bag_capacity = c_ammos_bag_capacity\n\t\n\n\tdef serialize(self):\n\t\ts = b''\n\t\t\n\t\t# serialize self.type\n\t\ts += b'\\x00' if self.type is None else b'\\x01'\n\t\tif self.type is not None:\n\t\t\ts += struct.pack('b', self.type.value)\n\t\t\n\t\t# serialize self.position\n\t\ts += b'\\x00' if self.position is None else b'\\x01'\n\t\tif self.position is not None:\n\t\t\ts += self.position.serialize()\n\t\t\n\t\t# serialize self.materials_bag\n\t\ts += b'\\x00' if self.materials_bag is None else b'\\x01'\n\t\tif self.materials_bag is not None:\n\t\t\ttmp121 = b''\n\t\t\ttmp121 += struct.pack('I', len(self.materials_bag))\n\t\t\twhile len(tmp121) and tmp121[-1] == b'\\x00'[0]:\n\t\t\t\ttmp121 = tmp121[:-1]\n\t\t\ts += struct.pack('B', len(tmp121))\n\t\t\ts += tmp121\n\t\t\t\n\t\t\tfor tmp122 in self.materials_bag:\n\t\t\t\ts += b'\\x00' if tmp122 is None else b'\\x01'\n\t\t\t\tif tmp122 is not None:\n\t\t\t\t\ts += struct.pack('b', tmp122.value)\n\t\t\t\ts += b'\\x00' if self.materials_bag[tmp122] is None else b'\\x01'\n\t\t\t\tif self.materials_bag[tmp122] is not None:\n\t\t\t\t\ts += struct.pack('i', self.materials_bag[tmp122])\n\t\t\n\t\t# serialize self.c_materials_bag_capacity\n\t\ts += b'\\x00' if self.c_materials_bag_capacity is None else b'\\x01'\n\t\tif self.c_materials_bag_capacity is not None:\n\t\t\ts += struct.pack('i', self.c_materials_bag_capacity)\n\t\t\n\t\t# serialize self.ammos_bag\n\t\ts += b'\\x00' if self.ammos_bag is None else b'\\x01'\n\t\tif self.ammos_bag is not None:\n\t\t\ttmp123 = b''\n\t\t\ttmp123 += struct.pack('I', len(self.ammos_bag))\n\t\t\twhile len(tmp123) and tmp123[-1] == b'\\x00'[0]:\n\t\t\t\ttmp123 = tmp123[:-1]\n\t\t\ts += struct.pack('B', len(tmp123))\n\t\t\ts += tmp123\n\t\t\t\n\t\t\tfor tmp124 in self.ammos_bag:\n\t\t\t\ts += b'\\x00' if tmp124 is None else b'\\x01'\n\t\t\t\tif tmp124 is not None:\n\t\t\t\t\ts += struct.pack('b', tmp124.value)\n\t\t\t\ts += b'\\x00' if self.ammos_bag[tmp124] is None else b'\\x01'\n\t\t\t\tif self.ammos_bag[tmp124] is not None:\n\t\t\t\t\ts += struct.pack('i', self.ammos_bag[tmp124])\n\t\t\n\t\t# serialize self.c_ammos_bag_capacity\n\t\ts += b'\\x00' if self.c_ammos_bag_capacity is None else b'\\x01'\n\t\tif self.c_ammos_bag_capacity is not None:\n\t\t\ts += struct.pack('i', self.c_ammos_bag_capacity)\n\t\t\n\t\treturn s\n\t\n\n\tdef deserialize(self, s, offset=0):\n\t\t# deserialize self.type\n\t\ttmp125 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp125:\n\t\t\ttmp126 = struct.unpack('b', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\tself.type = AgentType(tmp126)\n\t\telse:\n\t\t\tself.type = None\n\t\t\n\t\t# deserialize self.position\n\t\ttmp127 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp127:\n\t\t\tself.position = Position()\n\t\t\toffset = self.position.deserialize(s, offset)\n\t\telse:\n\t\t\tself.position = None\n\t\t\n\t\t# deserialize self.materials_bag\n\t\ttmp128 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp128:\n\t\t\ttmp129 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\ttmp130 = s[offset:offset + tmp129]\n\t\t\toffset += tmp129\n\t\t\ttmp130 += b'\\x00' * (4 - tmp129)\n\t\t\ttmp131 = struct.unpack('I', tmp130)[0]\n\t\t\t\n\t\t\tself.materials_bag = {}\n\t\t\tfor tmp132 in range(tmp131):\n\t\t\t\ttmp135 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp135:\n\t\t\t\t\ttmp136 = struct.unpack('b', s[offset:offset + 1])[0]\n\t\t\t\t\toffset += 1\n\t\t\t\t\ttmp133 = MaterialType(tmp136)\n\t\t\t\telse:\n\t\t\t\t\ttmp133 = None\n\t\t\t\ttmp137 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp137:\n\t\t\t\t\ttmp134 = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\t\t\toffset += 4\n\t\t\t\telse:\n\t\t\t\t\ttmp134 = None\n\t\t\t\tself.materials_bag[tmp133] = tmp134\n\t\telse:\n\t\t\tself.materials_bag = None\n\t\t\n\t\t# deserialize self.c_materials_bag_capacity\n\t\ttmp138 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp138:\n\t\t\tself.c_materials_bag_capacity = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\toffset += 4\n\t\telse:\n\t\t\tself.c_materials_bag_capacity = None\n\t\t\n\t\t# deserialize self.ammos_bag\n\t\ttmp139 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp139:\n\t\t\ttmp140 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\ttmp141 = s[offset:offset + tmp140]\n\t\t\toffset += tmp140\n\t\t\ttmp141 += b'\\x00' * (4 - tmp140)\n\t\t\ttmp142 = struct.unpack('I', tmp141)[0]\n\t\t\t\n\t\t\tself.ammos_bag = {}\n\t\t\tfor tmp143 in range(tmp142):\n\t\t\t\ttmp146 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp146:\n\t\t\t\t\ttmp147 = struct.unpack('b', s[offset:offset + 1])[0]\n\t\t\t\t\toffset += 1\n\t\t\t\t\ttmp144 = AmmoType(tmp147)\n\t\t\t\telse:\n\t\t\t\t\ttmp144 = None\n\t\t\t\ttmp148 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp148:\n\t\t\t\t\ttmp145 = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\t\t\toffset += 4\n\t\t\t\telse:\n\t\t\t\t\ttmp145 = None\n\t\t\t\tself.ammos_bag[tmp144] = tmp145\n\t\telse:\n\t\t\tself.ammos_bag = None\n\t\t\n\t\t# deserialize self.c_ammos_bag_capacity\n\t\ttmp149 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp149:\n\t\t\tself.c_ammos_bag_capacity = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\toffset += 4\n\t\telse:\n\t\t\tself.c_ammos_bag_capacity = None\n\t\t\n\t\treturn offset\n\n\nclass Unit(object):\n\n\t@staticmethod\n\tdef name():\n\t\treturn 'Unit'\n\n\n\tdef __init__(self, type=None, health=None, c_individual_health=None, c_individual_damage=None, c_damage_distribution=None, ammo_count=None, reload_rem_time=None, c_reload_duration=None):\n\t\tself.initialize(type, health, c_individual_health, c_individual_damage, c_damage_distribution, ammo_count, reload_rem_time, c_reload_duration)\n\t\n\n\tdef initialize(self, type=None, health=None, c_individual_health=None, c_individual_damage=None, c_damage_distribution=None, ammo_count=None, reload_rem_time=None, c_reload_duration=None):\n\t\tself.type = type\n\t\tself.health = health\n\t\tself.c_individual_health = c_individual_health\n\t\tself.c_individual_damage = c_individual_damage\n\t\tself.c_damage_distribution = c_damage_distribution\n\t\tself.ammo_count = ammo_count\n\t\tself.reload_rem_time = reload_rem_time\n\t\tself.c_reload_duration = c_reload_duration\n\t\n\n\tdef serialize(self):\n\t\ts = b''\n\t\t\n\t\t# serialize self.type\n\t\ts += b'\\x00' if self.type is None else b'\\x01'\n\t\tif self.type is not None:\n\t\t\ts += struct.pack('b', self.type.value)\n\t\t\n\t\t# serialize self.health\n\t\ts += b'\\x00' if self.health is None else b'\\x01'\n\t\tif self.health is not None:\n\t\t\ts += struct.pack('i', self.health)\n\t\t\n\t\t# serialize self.c_individual_health\n\t\ts += b'\\x00' if self.c_individual_health is None else b'\\x01'\n\t\tif self.c_individual_health is not None:\n\t\t\ts += struct.pack('i', self.c_individual_health)\n\t\t\n\t\t# serialize self.c_individual_damage\n\t\ts += b'\\x00' if self.c_individual_damage is None else b'\\x01'\n\t\tif self.c_individual_damage is not None:\n\t\t\ts += struct.pack('i', self.c_individual_damage)\n\t\t\n\t\t# serialize self.c_damage_distribution\n\t\ts += b'\\x00' if self.c_damage_distribution is None else b'\\x01'\n\t\tif self.c_damage_distribution is not None:\n\t\t\ttmp150 = b''\n\t\t\ttmp150 += struct.pack('I', len(self.c_damage_distribution))\n\t\t\twhile len(tmp150) and tmp150[-1] == b'\\x00'[0]:\n\t\t\t\ttmp150 = tmp150[:-1]\n\t\t\ts += struct.pack('B', len(tmp150))\n\t\t\ts += tmp150\n\t\t\t\n\t\t\tfor tmp151 in self.c_damage_distribution:\n\t\t\t\ts += b'\\x00' if tmp151 is None else b'\\x01'\n\t\t\t\tif tmp151 is not None:\n\t\t\t\t\ts += struct.pack('b', tmp151.value)\n\t\t\t\ts += b'\\x00' if self.c_damage_distribution[tmp151] is None else b'\\x01'\n\t\t\t\tif self.c_damage_distribution[tmp151] is not None:\n\t\t\t\t\ts += struct.pack('f', self.c_damage_distribution[tmp151])\n\t\t\n\t\t# serialize self.ammo_count\n\t\ts += b'\\x00' if self.ammo_count is None else b'\\x01'\n\t\tif self.ammo_count is not None:\n\t\t\ts += struct.pack('i', self.ammo_count)\n\t\t\n\t\t# serialize self.reload_rem_time\n\t\ts += b'\\x00' if self.reload_rem_time is None else b'\\x01'\n\t\tif self.reload_rem_time is not None:\n\t\t\ts += struct.pack('i', self.reload_rem_time)\n\t\t\n\t\t# serialize self.c_reload_duration\n\t\ts += b'\\x00' if self.c_reload_duration is None else b'\\x01'\n\t\tif self.c_reload_duration is not None:\n\t\t\ts += struct.pack('i', self.c_reload_duration)\n\t\t\n\t\treturn s\n\t\n\n\tdef deserialize(self, s, offset=0):\n\t\t# deserialize self.type\n\t\ttmp152 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp152:\n\t\t\ttmp153 = struct.unpack('b', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\tself.type = UnitType(tmp153)\n\t\telse:\n\t\t\tself.type = None\n\t\t\n\t\t# deserialize self.health\n\t\ttmp154 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp154:\n\t\t\tself.health = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\toffset += 4\n\t\telse:\n\t\t\tself.health = None\n\t\t\n\t\t# deserialize self.c_individual_health\n\t\ttmp155 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp155:\n\t\t\tself.c_individual_health = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\toffset += 4\n\t\telse:\n\t\t\tself.c_individual_health = None\n\t\t\n\t\t# deserialize self.c_individual_damage\n\t\ttmp156 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp156:\n\t\t\tself.c_individual_damage = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\toffset += 4\n\t\telse:\n\t\t\tself.c_individual_damage = None\n\t\t\n\t\t# deserialize self.c_damage_distribution\n\t\ttmp157 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp157:\n\t\t\ttmp158 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\ttmp159 = s[offset:offset + tmp158]\n\t\t\toffset += tmp158\n\t\t\ttmp159 += b'\\x00' * (4 - tmp158)\n\t\t\ttmp160 = struct.unpack('I', tmp159)[0]\n\t\t\t\n\t\t\tself.c_damage_distribution = {}\n\t\t\tfor tmp161 in range(tmp160):\n\t\t\t\ttmp164 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp164:\n\t\t\t\t\ttmp165 = struct.unpack('b', s[offset:offset + 1])[0]\n\t\t\t\t\toffset += 1\n\t\t\t\t\ttmp162 = UnitType(tmp165)\n\t\t\t\telse:\n\t\t\t\t\ttmp162 = None\n\t\t\t\ttmp166 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp166:\n\t\t\t\t\ttmp163 = struct.unpack('f', s[offset:offset + 4])[0]\n\t\t\t\t\toffset += 4\n\t\t\t\telse:\n\t\t\t\t\ttmp163 = None\n\t\t\t\tself.c_damage_distribution[tmp162] = tmp163\n\t\telse:\n\t\t\tself.c_damage_distribution = None\n\t\t\n\t\t# deserialize self.ammo_count\n\t\ttmp167 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp167:\n\t\t\tself.ammo_count = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\toffset += 4\n\t\telse:\n\t\t\tself.ammo_count = None\n\t\t\n\t\t# deserialize self.reload_rem_time\n\t\ttmp168 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp168:\n\t\t\tself.reload_rem_time = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\toffset += 4\n\t\telse:\n\t\t\tself.reload_rem_time = None\n\t\t\n\t\t# deserialize self.c_reload_duration\n\t\ttmp169 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp169:\n\t\t\tself.c_reload_duration = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\toffset += 4\n\t\telse:\n\t\t\tself.c_reload_duration = None\n\t\t\n\t\treturn offset\n\n\nclass Base(object):\n\n\t@staticmethod\n\tdef name():\n\t\treturn 'Base'\n\n\n\tdef __init__(self, c_area=None, agents=None, frontline_deliveries=None, warehouse=None, backline_delivery=None, factory=None, units=None):\n\t\tself.initialize(c_area, agents, frontline_deliveries, warehouse, backline_delivery, factory, units)\n\t\n\n\tdef initialize(self, c_area=None, agents=None, frontline_deliveries=None, warehouse=None, backline_delivery=None, factory=None, units=None):\n\t\tself.c_area = c_area\n\t\tself.agents = agents\n\t\tself.frontline_deliveries = frontline_deliveries\n\t\tself.warehouse = warehouse\n\t\tself.backline_delivery = backline_delivery\n\t\tself.factory = factory\n\t\tself.units = units\n\t\n\n\tdef serialize(self):\n\t\ts = b''\n\t\t\n\t\t# serialize self.c_area\n\t\ts += b'\\x00' if self.c_area is None else b'\\x01'\n\t\tif self.c_area is not None:\n\t\t\ttmp170 = b''\n\t\t\ttmp170 += struct.pack('I', len(self.c_area))\n\t\t\twhile len(tmp170) and tmp170[-1] == b'\\x00'[0]:\n\t\t\t\ttmp170 = tmp170[:-1]\n\t\t\ts += struct.pack('B', len(tmp170))\n\t\t\ts += tmp170\n\t\t\t\n\t\t\tfor tmp171 in self.c_area:\n\t\t\t\ts += b'\\x00' if tmp171 is None else b'\\x01'\n\t\t\t\tif tmp171 is not None:\n\t\t\t\t\ts += tmp171.serialize()\n\t\t\t\ts += b'\\x00' if self.c_area[tmp171] is None else b'\\x01'\n\t\t\t\tif self.c_area[tmp171] is not None:\n\t\t\t\t\ts += struct.pack('b', self.c_area[tmp171].value)\n\t\t\n\t\t# serialize self.agents\n\t\ts += b'\\x00' if self.agents is None else b'\\x01'\n\t\tif self.agents is not None:\n\t\t\ttmp172 = b''\n\t\t\ttmp172 += struct.pack('I', len(self.agents))\n\t\t\twhile len(tmp172) and tmp172[-1] == b'\\x00'[0]:\n\t\t\t\ttmp172 = tmp172[:-1]\n\t\t\ts += struct.pack('B', len(tmp172))\n\t\t\ts += tmp172\n\t\t\t\n\t\t\tfor tmp173 in self.agents:\n\t\t\t\ts += b'\\x00' if tmp173 is None else b'\\x01'\n\t\t\t\tif tmp173 is not None:\n\t\t\t\t\ts += struct.pack('b', tmp173.value)\n\t\t\t\ts += b'\\x00' if self.agents[tmp173] is None else b'\\x01'\n\t\t\t\tif self.agents[tmp173] is not None:\n\t\t\t\t\ts += self.agents[tmp173].serialize()\n\t\t\n\t\t# serialize self.frontline_deliveries\n\t\ts += b'\\x00' if self.frontline_deliveries is None else b'\\x01'\n\t\tif self.frontline_deliveries is not None:\n\t\t\ttmp174 = b''\n\t\t\ttmp174 += struct.pack('I', len(self.frontline_deliveries))\n\t\t\twhile len(tmp174) and tmp174[-1] == b'\\x00'[0]:\n\t\t\t\ttmp174 = tmp174[:-1]\n\t\t\ts += struct.pack('B', len(tmp174))\n\t\t\ts += tmp174\n\t\t\t\n\t\t\tfor tmp175 in self.frontline_deliveries:\n\t\t\t\ts += b'\\x00' if tmp175 is None else b'\\x01'\n\t\t\t\tif tmp175 is not None:\n\t\t\t\t\ts += tmp175.serialize()\n\t\t\n\t\t# serialize self.warehouse\n\t\ts += b'\\x00' if self.warehouse is None else b'\\x01'\n\t\tif self.warehouse is not None:\n\t\t\ts += self.warehouse.serialize()\n\t\t\n\t\t# serialize self.backline_delivery\n\t\ts += b'\\x00' if self.backline_delivery is None else b'\\x01'\n\t\tif self.backline_delivery is not None:\n\t\t\ts += self.backline_delivery.serialize()\n\t\t\n\t\t# serialize self.factory\n\t\ts += b'\\x00' if self.factory is None else b'\\x01'\n\t\tif self.factory is not None:\n\t\t\ts += self.factory.serialize()\n\t\t\n\t\t# serialize self.units\n\t\ts += b'\\x00' if self.units is None else b'\\x01'\n\t\tif self.units is not None:\n\t\t\ttmp176 = b''\n\t\t\ttmp176 += struct.pack('I', len(self.units))\n\t\t\twhile len(tmp176) and tmp176[-1] == b'\\x00'[0]:\n\t\t\t\ttmp176 = tmp176[:-1]\n\t\t\ts += struct.pack('B', len(tmp176))\n\t\t\ts += tmp176\n\t\t\t\n\t\t\tfor tmp177 in self.units:\n\t\t\t\ts += b'\\x00' if tmp177 is None else b'\\x01'\n\t\t\t\tif tmp177 is not None:\n\t\t\t\t\ts += struct.pack('b', tmp177.value)\n\t\t\t\ts += b'\\x00' if self.units[tmp177] is None else b'\\x01'\n\t\t\t\tif self.units[tmp177] is not None:\n\t\t\t\t\ts += self.units[tmp177].serialize()\n\t\t\n\t\treturn s\n\t\n\n\tdef deserialize(self, s, offset=0):\n\t\t# deserialize self.c_area\n\t\ttmp178 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp178:\n\t\t\ttmp179 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\ttmp180 = s[offset:offset + tmp179]\n\t\t\toffset += tmp179\n\t\t\ttmp180 += b'\\x00' * (4 - tmp179)\n\t\t\ttmp181 = struct.unpack('I', tmp180)[0]\n\t\t\t\n\t\t\tself.c_area = {}\n\t\t\tfor tmp182 in range(tmp181):\n\t\t\t\ttmp185 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp185:\n\t\t\t\t\ttmp183 = Position()\n\t\t\t\t\toffset = tmp183.deserialize(s, offset)\n\t\t\t\telse:\n\t\t\t\t\ttmp183 = None\n\t\t\t\ttmp186 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp186:\n\t\t\t\t\ttmp187 = struct.unpack('b', s[offset:offset + 1])[0]\n\t\t\t\t\toffset += 1\n\t\t\t\t\ttmp184 = ECell(tmp187)\n\t\t\t\telse:\n\t\t\t\t\ttmp184 = None\n\t\t\t\tself.c_area[tmp183] = tmp184\n\t\telse:\n\t\t\tself.c_area = None\n\t\t\n\t\t# deserialize self.agents\n\t\ttmp188 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp188:\n\t\t\ttmp189 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\ttmp190 = s[offset:offset + tmp189]\n\t\t\toffset += tmp189\n\t\t\ttmp190 += b'\\x00' * (4 - tmp189)\n\t\t\ttmp191 = struct.unpack('I', tmp190)[0]\n\t\t\t\n\t\t\tself.agents = {}\n\t\t\tfor tmp192 in range(tmp191):\n\t\t\t\ttmp195 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp195:\n\t\t\t\t\ttmp196 = struct.unpack('b', s[offset:offset + 1])[0]\n\t\t\t\t\toffset += 1\n\t\t\t\t\ttmp193 = AgentType(tmp196)\n\t\t\t\telse:\n\t\t\t\t\ttmp193 = None\n\t\t\t\ttmp197 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp197:\n\t\t\t\t\ttmp194 = Agent()\n\t\t\t\t\toffset = tmp194.deserialize(s, offset)\n\t\t\t\telse:\n\t\t\t\t\ttmp194 = None\n\t\t\t\tself.agents[tmp193] = tmp194\n\t\telse:\n\t\t\tself.agents = None\n\t\t\n\t\t# deserialize self.frontline_deliveries\n\t\ttmp198 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp198:\n\t\t\ttmp199 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\ttmp200 = s[offset:offset + tmp199]\n\t\t\toffset += tmp199\n\t\t\ttmp200 += b'\\x00' * (4 - tmp199)\n\t\t\ttmp201 = struct.unpack('I', tmp200)[0]\n\t\t\t\n\t\t\tself.frontline_deliveries = []\n\t\t\tfor tmp202 in range(tmp201):\n\t\t\t\ttmp204 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp204:\n\t\t\t\t\ttmp203 = FrontlineDelivery()\n\t\t\t\t\toffset = tmp203.deserialize(s, offset)\n\t\t\t\telse:\n\t\t\t\t\ttmp203 = None\n\t\t\t\tself.frontline_deliveries.append(tmp203)\n\t\telse:\n\t\t\tself.frontline_deliveries = None\n\t\t\n\t\t# deserialize self.warehouse\n\t\ttmp205 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp205:\n\t\t\tself.warehouse = Warehouse()\n\t\t\toffset = self.warehouse.deserialize(s, offset)\n\t\telse:\n\t\t\tself.warehouse = None\n\t\t\n\t\t# deserialize self.backline_delivery\n\t\ttmp206 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp206:\n\t\t\tself.backline_delivery = BacklineDelivery()\n\t\t\toffset = self.backline_delivery.deserialize(s, offset)\n\t\telse:\n\t\t\tself.backline_delivery = None\n\t\t\n\t\t# deserialize self.factory\n\t\ttmp207 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp207:\n\t\t\tself.factory = Factory()\n\t\t\toffset = self.factory.deserialize(s, offset)\n\t\telse:\n\t\t\tself.factory = None\n\t\t\n\t\t# deserialize self.units\n\t\ttmp208 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp208:\n\t\t\ttmp209 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\ttmp210 = s[offset:offset + tmp209]\n\t\t\toffset += tmp209\n\t\t\ttmp210 += b'\\x00' * (4 - tmp209)\n\t\t\ttmp211 = struct.unpack('I', tmp210)[0]\n\t\t\t\n\t\t\tself.units = {}\n\t\t\tfor tmp212 in range(tmp211):\n\t\t\t\ttmp215 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp215:\n\t\t\t\t\ttmp216 = struct.unpack('b', s[offset:offset + 1])[0]\n\t\t\t\t\toffset += 1\n\t\t\t\t\ttmp213 = UnitType(tmp216)\n\t\t\t\telse:\n\t\t\t\t\ttmp213 = None\n\t\t\t\ttmp217 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp217:\n\t\t\t\t\ttmp214 = Unit()\n\t\t\t\t\toffset = tmp214.deserialize(s, offset)\n\t\t\t\telse:\n\t\t\t\t\ttmp214 = None\n\t\t\t\tself.units[tmp213] = tmp214\n\t\telse:\n\t\t\tself.units = None\n\t\t\n\t\treturn offset\n\n\nclass World(object):\n\n\t@staticmethod\n\tdef name():\n\t\treturn 'World'\n\n\n\tdef __init__(self, max_cycles=None, bases=None, total_healths=None):\n\t\tself.initialize(max_cycles, bases, total_healths)\n\t\n\n\tdef initialize(self, max_cycles=None, bases=None, total_healths=None):\n\t\tself.max_cycles = max_cycles\n\t\tself.bases = bases\n\t\tself.total_healths = total_healths\n\t\n\n\tdef serialize(self):\n\t\ts = b''\n\t\t\n\t\t# serialize self.max_cycles\n\t\ts += b'\\x00' if self.max_cycles is None else b'\\x01'\n\t\tif self.max_cycles is not None:\n\t\t\ts += struct.pack('i', self.max_cycles)\n\t\t\n\t\t# serialize self.bases\n\t\ts += b'\\x00' if self.bases is None else b'\\x01'\n\t\tif self.bases is not None:\n\t\t\ttmp218 = b''\n\t\t\ttmp218 += struct.pack('I', len(self.bases))\n\t\t\twhile len(tmp218) and tmp218[-1] == b'\\x00'[0]:\n\t\t\t\ttmp218 = tmp218[:-1]\n\t\t\ts += struct.pack('B', len(tmp218))\n\t\t\ts += tmp218\n\t\t\t\n\t\t\tfor tmp219 in self.bases:\n\t\t\t\ts += b'\\x00' if tmp219 is None else b'\\x01'\n\t\t\t\tif tmp219 is not None:\n\t\t\t\t\ttmp220 = b''\n\t\t\t\t\ttmp220 += struct.pack('I', len(tmp219))\n\t\t\t\t\twhile len(tmp220) and tmp220[-1] == b'\\x00'[0]:\n\t\t\t\t\t\ttmp220 = tmp220[:-1]\n\t\t\t\t\ts += struct.pack('B', len(tmp220))\n\t\t\t\t\ts += tmp220\n\t\t\t\t\t\n\t\t\t\t\ts += tmp219.encode('ISO-8859-1') if PY3 else tmp219\n\t\t\t\ts += b'\\x00' if self.bases[tmp219] is None else b'\\x01'\n\t\t\t\tif self.bases[tmp219] is not None:\n\t\t\t\t\ts += self.bases[tmp219].serialize()\n\t\t\n\t\t# serialize self.total_healths\n\t\ts += b'\\x00' if self.total_healths is None else b'\\x01'\n\t\tif self.total_healths is not None:\n\t\t\ttmp221 = b''\n\t\t\ttmp221 += struct.pack('I', len(self.total_healths))\n\t\t\twhile len(tmp221) and tmp221[-1] == b'\\x00'[0]:\n\t\t\t\ttmp221 = tmp221[:-1]\n\t\t\ts += struct.pack('B', len(tmp221))\n\t\t\ts += tmp221\n\t\t\t\n\t\t\tfor tmp222 in self.total_healths:\n\t\t\t\ts += b'\\x00' if tmp222 is None else b'\\x01'\n\t\t\t\tif tmp222 is not None:\n\t\t\t\t\ttmp223 = b''\n\t\t\t\t\ttmp223 += struct.pack('I', len(tmp222))\n\t\t\t\t\twhile len(tmp223) and tmp223[-1] == b'\\x00'[0]:\n\t\t\t\t\t\ttmp223 = tmp223[:-1]\n\t\t\t\t\ts += struct.pack('B', len(tmp223))\n\t\t\t\t\ts += tmp223\n\t\t\t\t\t\n\t\t\t\t\ts += tmp222.encode('ISO-8859-1') if PY3 else tmp222\n\t\t\t\ts += b'\\x00' if self.total_healths[tmp222] is None else b'\\x01'\n\t\t\t\tif self.total_healths[tmp222] is not None:\n\t\t\t\t\ts += struct.pack('i', self.total_healths[tmp222])\n\t\t\n\t\treturn s\n\t\n\n\tdef deserialize(self, s, offset=0):\n\t\t# deserialize self.max_cycles\n\t\ttmp224 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp224:\n\t\t\tself.max_cycles = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\toffset += 4\n\t\telse:\n\t\t\tself.max_cycles = None\n\t\t\n\t\t# deserialize self.bases\n\t\ttmp225 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp225:\n\t\t\ttmp226 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\ttmp227 = s[offset:offset + tmp226]\n\t\t\toffset += tmp226\n\t\t\ttmp227 += b'\\x00' * (4 - tmp226)\n\t\t\ttmp228 = struct.unpack('I', tmp227)[0]\n\t\t\t\n\t\t\tself.bases = {}\n\t\t\tfor tmp229 in range(tmp228):\n\t\t\t\ttmp232 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp232:\n\t\t\t\t\ttmp233 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\t\toffset += 1\n\t\t\t\t\ttmp234 = s[offset:offset + tmp233]\n\t\t\t\t\toffset += tmp233\n\t\t\t\t\ttmp234 += b'\\x00' * (4 - tmp233)\n\t\t\t\t\ttmp235 = struct.unpack('I', tmp234)[0]\n\t\t\t\t\t\n\t\t\t\t\ttmp230 = s[offset:offset + tmp235].decode('ISO-8859-1') if PY3 else s[offset:offset + tmp235]\n\t\t\t\t\toffset += tmp235\n\t\t\t\telse:\n\t\t\t\t\ttmp230 = None\n\t\t\t\ttmp236 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp236:\n\t\t\t\t\ttmp231 = Base()\n\t\t\t\t\toffset = tmp231.deserialize(s, offset)\n\t\t\t\telse:\n\t\t\t\t\ttmp231 = None\n\t\t\t\tself.bases[tmp230] = tmp231\n\t\telse:\n\t\t\tself.bases = None\n\t\t\n\t\t# deserialize self.total_healths\n\t\ttmp237 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\toffset += 1\n\t\tif tmp237:\n\t\t\ttmp238 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\toffset += 1\n\t\t\ttmp239 = s[offset:offset + tmp238]\n\t\t\toffset += tmp238\n\t\t\ttmp239 += b'\\x00' * (4 - tmp238)\n\t\t\ttmp240 = struct.unpack('I', tmp239)[0]\n\t\t\t\n\t\t\tself.total_healths = {}\n\t\t\tfor tmp241 in range(tmp240):\n\t\t\t\ttmp244 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp244:\n\t\t\t\t\ttmp245 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\t\toffset += 1\n\t\t\t\t\ttmp246 = s[offset:offset + tmp245]\n\t\t\t\t\toffset += tmp245\n\t\t\t\t\ttmp246 += b'\\x00' * (4 - tmp245)\n\t\t\t\t\ttmp247 = struct.unpack('I', tmp246)[0]\n\t\t\t\t\t\n\t\t\t\t\ttmp242 = s[offset:offset + tmp247].decode('ISO-8859-1') if PY3 else s[offset:offset + tmp247]\n\t\t\t\t\toffset += tmp247\n\t\t\t\telse:\n\t\t\t\t\ttmp242 = None\n\t\t\t\ttmp248 = struct.unpack('B', s[offset:offset + 1])[0]\n\t\t\t\toffset += 1\n\t\t\t\tif tmp248:\n\t\t\t\t\ttmp243 = struct.unpack('i', s[offset:offset + 4])[0]\n\t\t\t\t\toffset += 4\n\t\t\t\telse:\n\t\t\t\t\ttmp243 = None\n\t\t\t\tself.total_healths[tmp242] = tmp243\n\t\telse:\n\t\t\tself.total_healths = None\n\t\t\n\t\treturn offset\n","sub_path":"PythonServer/app/ks/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":46114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"454654656","text":"#!/usr/bin/python\n\nimport numpy as np\nimport cv2\n\nname1 = '10334.jpg'\nname2 = '10335.jpg'\n\n\ndef nothing(x):\n pass\n\ndef rotate(img):\n #return cv2.flip(cv2.transpose(img),0)\n return img\n\n# Load an color image in grayscale\nimg1 = cv2.imread(name1,cv2.IMREAD_COLOR)\nimg1 = rotate(img1)\n\ncv2.namedWindow('image', cv2.WINDOW_NORMAL)\ncv2.createTrackbar('T','image',0,255,nothing)\ncv2.imshow('image',img1)\n\n\nimg2 = cv2.imread(name2,cv2.IMREAD_COLOR)\nimg2 = rotate(img2)\n\ncv2.namedWindow('image2', cv2.WINDOW_NORMAL)\ncv2.createTrackbar('T','image2',0,255,nothing)\ncv2.imshow('image2',img2)\n\n\n\nb1,g1,r1 = cv2.split(img1)\nb2,g2,r2 = cv2.split(img2)\n\ndef add_window(a,b,name ,blur, tr):\n cv2.namedWindow(name, cv2.WINDOW_NORMAL)\n img = cv2.absdiff(a,b)\n\n def d(t):\n if blur:\n img2 = cv2.GaussianBlur(img,(5,5),0)\n else:\n img2 = img\n r, img_t = cv2.threshold(img2, t, 255, tr) #)\n cv2.imshow(name,img_t)\n\n cv2.createTrackbar('T',name,0,255,d)\n cv2.imshow(name ,img)\n\n\nadd_window(g2,g1,'g: cv2.THRESH_BINARY+cv2.THRESH_OTSU',False, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\nadd_window(r2,r1,'r: cv2.THRESH_BINARY+cv2.THRESH_OTSU',False, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\nadd_window(b2,b1,'b: cv2.THRESH_BINARY+cv2.THRESH_OTSU',False, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n\nrgb2 = b2>>2\nrgb2 +=r2>>2\nrgb2 +=g2>>2\nrgb1 = b1>>2\nrgb1 +=r1>>2\nrgb1 +=g1>>2\n\n\nadd_window(rgb2,rgb1,'rgb: cv2.THRESH_BINARY',False, cv2.THRESH_BINARY) #cv2.THRESH_TOZERO)\nadd_window(rgb2,rgb1,'rgb: cv2.THRESH_BINARY+cv2.THRESH_OTSU',False, cv2.THRESH_BINARY+cv2.THRESH_OTSU) #cv2.THRESH_TOZERO)\n\n\n\nimg = cv2.absdiff(rgb2,rgb1)\nr, img_t = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\nimg_tt = img_t\n\ndef reduce_pix(a):\n retu = []\n cy = -1\n cx = -1\n cx2 = 0\n for i in a:\n x = i[1]\n y = i[0]\n if cy != y:\n if cy >= 0:\n# print cx,cy\n retu.append([(cx+cx2)/2,cy])\n\n cy = y\n cx = x\n cx2 = x\n elif x < cx2+5:\n cx2 = x\n else:\n# print '!', cx,cy\n retu.append([(cx+cx2)/2,cy])\n cx2 = x\n cx = x\n return retu\n\nnp.set_printoptions(threshold=np.inf)\na = np.argwhere(img_tt)\n\n#print a, '!'\n#a.view('int32,int32') #.sort(order=['f1'],axis=0)\nrp = reduce_pix(a)\n#print a\nname = 'points, rgb: cv2.THRESH_BINARY+cv2.THRESH_OTSU'\ncv2.namedWindow(name, cv2.WINDOW_NORMAL)\ncv2.imshow(name ,img_tt)\n\nimg2 = cv2.bitwise_not(img2,img2,mask = img_tt)\nfor i in rp:\n cv2.circle(img2, (i[0],i[1]), 1, (255,0,0), -1)\n\nname = 'image2+points'\ncv2.namedWindow(name, cv2.WINDOW_NORMAL)\ncv2.imshow(name,img2)\n\n\n\nwhile(1):\n k = cv2.waitKey(1) & 0xFF\n if k == 27:\n break\n\n# t = cv2.getTrackbarPos('T','b')\n# r, img_t = cv2.threshold(img5, t, 0, cv2.THRESH_TOZERO)\n# cv2.imshow('b',img_t)\n\n\n#cv2.waitKey(0)\ncv2.destroyAllWindows()\n\n","sub_path":"utils/line/print_img.py","file_name":"print_img.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"1034940","text":"from csv import writer\nfrom datetime import datetime\nfrom nltk.classify.util import accuracy\nfrom nltk.corpus import stopwords\nfrom nltk.data import load\nfrom nltk.tokenize import TreebankWordTokenizer\nimport _pickle\nimport ujson\nstopWords = set(stopwords.words('english'))\ntokenizerFrases = load('tokenizers/punkt/english.pickle')\ntokenizerPalavras = TreebankWordTokenizer()\narquivoClassificador = open('classificador.pickle', 'rb')\nclassificador = _pickle.load(arquivoClassificador)\narquivoClassificador.close()\narquivoClassificados = open('classificados.json')\nclassificados = ujson.load(arquivoClassificados)\narquivoClassificados.close()\nsentimentos = []\ncomeco = datetime.now()\nfor resposta in classificados:\n\tfrases = tokenizerFrases.tokenize(resposta['corpo'])\n\tfeature = {}\n\tfor frase in frases:\n\t\tpalavras = tokenizerPalavras.tokenize(frase)\n\t\tpalavras = [palavra for palavra in palavras if palavra not in stopWords]\n\t\tfor palavra in palavras:\n\t\t\tfeature[palavra] = True\n\tsentimentos.append((feature, resposta, classificador.classify(feature)))\ntempo = datetime.now() - comeco\nfeaturesClassificados = []\nlinhas = [['Resposta', 'Pontos', 'Sentimento - Naive Bayes com Sentiwordnet', 'Sentimento - AlchemyAPI']]\nfor feature, resposta, sentimento in sentimentos:\n\ttexto = resposta['corpo']\n\tfrases = tokenizerFrases.tokenize(texto)\n\tsentimentoTemp = resposta['sentimento']\n\tfeaturesClassificados.append((feature, sentimentoTemp))\n\tlinhas.append([texto, resposta['pontos'], sentimento, sentimentoTemp])\narquivoMedicoes = open('medicoes_analise_sequencial.txt', 'w')\narquivoMedicoes.write('Tempo de Execução = ' + str(tempo) + '\\nPrecisão = {0:.2f}%'.format(accuracy(classificador, featuresClassificados) * 100))\narquivoMedicoes.close()\narquivoResultados = open('resultados_sem_stopwords.csv', 'w', newline='')\nw = writer(arquivoResultados, delimiter=',')\nw.writerows(linhas)\narquivoResultados.close()","sub_path":"Análise de Sentimento - SentiWordNet com Naive Bayes/analise_sentimento_sentiwordnet_bayes_sequencial.py","file_name":"analise_sentimento_sentiwordnet_bayes_sequencial.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"152800969","text":"#!/usr/bin/env python\n\nimport sys\nimport collections\nimport itertools\n\nimport aoc\n\ninfilename = 'input.txt'\nif len(sys.argv) > 2 and sys.argv[1] == '-i':\n infilename = sys.argv[2]\n\nprint('Using input file: %s' % infilename)\n\nf = open(infilename, 'r')\ndata = f.readlines()\nf.close()\n\ndata = [line.strip() for line in data]\n\n\n\ndata = list(map(float, data))\nmass = sum(data)\nmass = int(mass/3) - 2\n\n\nmass_sum = 0\nfor d in data:\n mass_sum += int(d/3) - 2\nprint('Part 1:', mass_sum)\n\ndef fuel(x):\n return int(x/3) - 2\n\nmass_sum = 0\nfor d in data:\n f = fuel(d)\n while f > 0:\n mass_sum += f\n f = fuel(f)\n\nprint('Part 2:', mass_sum)\n","sub_path":"2019/01/puzzle.py","file_name":"puzzle.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"408814432","text":"import pandas as pd\r\nimport numpy as np\r\nimport seaborn as se\r\nimport datetime\r\nimport math\r\nimport nltk\r\nnltk.download('vader_lexicon')\r\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\r\n\r\n# import data.\r\ndata = pd.read_csv(\"C:\\\\Users\\\\qxu5\\\\Desktop\\\\reddit\\\\GME_Post.csv\")\r\ndel data['Unnamed: 0']\r\ndata = data.sort_values(by = ['year', 'month', 'day', 'hour'])\r\ndata.index = range(len(data))\r\ndata = data.iloc[0:11183,:]\r\n\r\n\r\n# Add Title sentiment.\r\nsid = SentimentIntensityAnalyzer()\r\ntitle = []\r\nfor each in range(len(data)):\r\n if type(data.loc[each, 'title']) == str:\r\n title.append(sid.polarity_scores(data.loc[each, 'title'])['compound'])\r\n else:\r\n title.append(data.loc[6, 'title'])\r\ndata['Title_sentiment'] = title\r\n\r\n# Get comments sentiment.\r\ncom_score = []\r\ncom = []\r\nposttime = []\r\nID = []\r\ndata['Top_comments'] = data['Top_comments'].fillna('0')\r\nfor post1 in range(len(data)):\r\n if data['Top_comments'][post1] != '0':\r\n catch = data['Top_comments'][post1].split('], [')\r\n for each in catch:\r\n if each != '[]':\r\n crop = each.split('datetime.datetime')\r\n com_score.append(sid.polarity_scores(crop[0])['compound'])\r\n com.append(crop[0])\r\n d = each.split('datetime.datetime')[1]\r\n d = d.replace(']','')\r\n d = d.replace('(','')\r\n d = d.replace(')','')\r\n d = d.split(', ')\r\n time = d[1]+'/'+d[2]+'/'+d[0]+' '+d[3]+':'+d[4]\r\n posttime.append(datetime.datetime.strptime(time, '%m/%d/%Y %H:%M'))\r\n ID.append(data['ID'][post1])\r\ncomm = pd.DataFrame({\"comments\":com, \"Comment_sentiment\":com_score, 'ID':ID, 'Post_Time':posttime})\r\ncomm['year'] = comm['Post_Time'].dt.year\r\ncomm['month'] = comm['Post_Time'].dt.month\r\ncomm['day'] = comm['Post_Time'].dt.day\r\ncomm['hour'] = comm['Post_Time'].dt.hour\r\n\r\nnew = pd.merge(data, comm, on = ['year', 'month', 'day', 'hour'], how = 'outer')\r\nll = new[['year', 'month', 'day', 'hour', 'Title_sentiment', 'Comment_sentiment']].groupby(['year', 'month', 'day', 'hour']).mean()\r\nkk = ll.index.to_frame()\r\nkk['Title_sentiment'] = ll['Title_sentiment']\r\nkk['Comment_sentiment'] = ll['Comment_sentiment']\r\nkk.index = range(len(kk))\r\n\r\nfinal = pd.merge(kk,new[['year', 'month', 'day', 'hour', 'return']], on = ['year', 'month', 'day', 'hour'], how = 'outer')\r\nfinal = final.drop_duplicates()\r\nfinal['Title_sentiment'] = final['Title_sentiment'].fillna(0)\r\nfinal['Comment_sentiment'] = final['Comment_sentiment'].fillna(0)\r\nfinal['senti'] = final['Title_sentiment'] + final['Comment_sentiment']\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"impose_sentimeng.py","file_name":"impose_sentimeng.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"357947501","text":"import os\nimport sys\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pidb.settings')\nimport django\ndjango.setup()\nfrom tabby.models import *\n\ndef main():\n with open('tags') as f:\n str = f.read()\n list = str.split('\\n')\n tag_list = []\n for tem in list:\n if len(tem) > 2:\n tag = {}\n tem_list = tem.split('$')\n print(tem_list)\n tag['tag_name'] = tem_list[0]\n tag['base_tag'] = tem_list[1]\n tag['description'] = tem_list[2]\n tag_list.append(tag)\n '''\n for tag in Category.objects.all():\n tag.delete()\n '''\n for tag in tag_list:\n if tag['tag_name'] == tag['base_tag']:\n tem_tag = Category(name=tag['tag_name'], description=tag['description'])\n else:\n tem_tag = Category(name=tag['tag_name'], description=tag['description'], base=Category.objects.get(name=tag['base_tag']))\n tem_tag.save()\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"231716147","text":"# -*- coding:utf8 -*-\n# @TIME :2019/6/19 11:06\n# @Author : 洪松\n# @File : __init__.py.py\nimport sys\nsys.setrecursionlimit(100000) # 例如这里设置为十万\n\ndic = {}\nwith open('D:\\藏汉词典\\藏汉词典.txt', 'r', encoding='utf-16') as f:\n for line in f.readlines():\n result = line.strip('\\n').split('\\t')\n # print(result)\n dic[result[0]] = result[1]\n# print(dic)\n\nby_key = sorted(dic.items(),key=lambda item:item[0])\n# print(by_key)\nkey = []\nfor i in by_key:\n # print(i)\n key.append(i[0])\nprint(key)\n\n# 有序列表的二叉搜索(假设列表按升序排列)\n\n\ndef binary_search(target, sortedLyst):\n left = 0\n right = len(sortedLyst) - 1\n while left <= right:\n midpoint = (left+right)//2\n if target == sortedLyst[midpoint]:\n return sortedLyst[midpoint]\n elif target < sortedLyst[midpoint]:\n right = midpoint - 1\n else:\n left = midpoint + 1\n return -1\n\nc = input('Please enter the word you want to find:')\nif c not in key:\n print('当前输入元素不在列表中!')\nelse:\n print('The position of the requried word in the list is:')\n answer = binary_search(c, key)\n print('{key}: {value}'.format(key=answer, value=dic[answer]))\n","sub_path":"Task_16/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"351283498","text":"import argparse\nimport subprocess\nimport os\nfrom Bio.Blast.Applications import NcbiblastnCommandline\nimport re\nimport glob\nimport pandas as pd\n\n###############################################################################################################\n# HiPR-FISH : blast probes\n###############################################################################################################\n\n###############################################################################################################\n# Workflow functions\n###############################################################################################################\n\ndef blast_taxon_individual_probe(infile, blast_database, blast_output_filename):\n out_format = '6 qseqid sseqid pident qcovhsp length mismatch gapopen qstart qend sstart send evalue bitscore staxids qseq sseq'\n return_code = subprocess.check_call(['blastn', '-db', blast_database, '-query', infile, '-out', blast_output_filename, '-outfmt', out_format, '-task', 'blastn-short', '-max_hsps', '1', '-max_target_seqs', '100000', '-strand', 'minus', '-evalue', '100', '-num_threads', '1'])\n return(return_code)\n\ndef blast_taxon_probes(taxon_probe_directory, blast_database):\n probe_filenames = glob.glob('{}/*.fasta'.format(taxon_probe_directory))\n probe_blast_results = pd.DataFrame(index = probe_filenames, columns = ['blast_return_code'])\n for filename in probe_filenames:\n blast_output_filename = '{}.blast.out'.format(filename)\n return_code = blast_taxon_individual_probe(filename, blast_database, blast_output_filename)\n probe_blast_results.loc[filename, 'blast_return_code'] = return_code\n return(probe_blast_results)\n\n###############################################################################################################\n# main function\n###############################################################################################################\n\ndef main():\n parser = argparse.ArgumentParser('Blast FISH probes designed for a complex microbial community')\n parser.add_argument('input_probe_fasta_dir', type = str, help = 'Input file containing all probes designed by primer3')\n parser.add_argument('oriented_pacbio_filename', type = str, help = 'Input FASTA file containing full length 16S sequences of the complex microbial community')\n args = parser.parse_args()\n\n design_level_dir = os.path.split(args.input_probe_fasta_dir)[0]\n probes_dir, design_target = os.path.split(design_level_dir)\n blast_complete_filename = '{}_blast_complete.txt'.format(args.input_probe_fasta_dir)\n blast_return_code_filename = '{}_blast_return_code.csv'.format(args.input_probe_fasta_dir)\n probe_blast_results = blast_taxon_probes(args.input_probe_fasta_dir, args.oriented_pacbio_filename)\n probe_blast_results.to_csv(blast_return_code_filename, index = None)\n file = open(blast_complete_filename, 'w')\n file.write('Probe blast for taxon {} is done.'.format(design_target))\n file.close()\n return\n\nif __name__ == '__main__':\n main()\n","sub_path":"probe_design/scripts/HIPRFISH_strain_v1/hiprfish_blast_probes.py","file_name":"hiprfish_blast_probes.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"211347743","text":"from setuptools import setup\nimport pocketcasts\n\ndef read_file(name):\n with open(name) as fd:\n return fd.read()\n\nsetup(\n name=\"pocketcasts-api\",\n version=pocketcasts.__version__,\n author=pocketcasts.__author__,\n description=pocketcasts.__doc__,\n url=pocketcasts.__url__,\n license='MIT',\n py_modules=['pocketcasts'],\n long_description=read_file('README.md')\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"547202410","text":"from __future__ import unicode_literals\n\nfrom copy import deepcopy\nfrom mezzanine.conf import settings\n\nfrom django.contrib import admin\nfrom mezzanine.core.admin import TeamOwnableAdmin\nfrom mezzanine_agenda.models import Event, EventLocation, EventPrice, EventCategory,\\\n ExternalShop, Season\nfrom mezzanine_agenda.forms import EventAdminForm\nfrom mezzanine.core.admin import DisplayableAdmin, OwnableAdmin,\\\n BaseTranslationModelAdmin\n\n\nclass EventAdminBase(admin.ModelAdmin):\n\n model = Event\n\n\nclass EventAdmin(TeamOwnableAdmin, DisplayableAdmin):\n \"\"\"\n Admin class for events.\n \"\"\"\n\n fieldsets = deepcopy(EventAdminBase.fieldsets)\n exclude = (\"short_url\", )\n list_display = [\"title\", \"start\", \"end\", \"user\", \"rank\", \"status\", \"admin_link\"]\n if settings.EVENT_USE_FEATURED_IMAGE:\n list_display.insert(0, \"admin_thumb\")\n list_filter = deepcopy(DisplayableAdmin.list_filter) + (\"location\",)\n ordering = ('-start',)\n form = EventAdminForm\n\n def save_form(self, request, form, change):\n \"\"\"\n Super class ordering is important here - user must get saved first.\n \"\"\"\n OwnableAdmin.save_form(self, request, form, change)\n return DisplayableAdmin.save_form(self, request, form, change)\n\n\nclass EventLocationAdmin(admin.ModelAdmin):\n \"\"\"\n Admin class for event locations. Hides itself from the admin menu\n unless explicitly specified.\n \"\"\"\n\n fieldsets = (\n (\n None,\n {\n \"fields\": (\n \"title\",\n \"address\",\n \"postal_code\",\n \"city\",\n \"room\",\n \"mappable_location\",\n \"lat\",\n \"lon\",\n \"description\",\n \"link\"\n )\n }\n ),\n )\n\n def in_menu(self):\n \"\"\"\n Hide from the admin menu unless explicitly set in ``ADMIN_MENU_ORDER``.\n \"\"\"\n for (name, items) in settings.ADMIN_MENU_ORDER:\n if \"mezzanine_agenda.EventLocation\" in items:\n return True\n return False\n\n\nclass SeasonAdminBase(admin.ModelAdmin):\n\n list_display = [\"title\", 'start', 'end']\n model = Season\n\n\nclass ExternalShopAdmin(BaseTranslationModelAdmin):\n\n model = ExternalShop\n\n\nadmin.site.register(Event, EventAdmin)\nadmin.site.register(EventLocation, EventLocationAdmin)\nadmin.site.register(EventPrice)\nadmin.site.register(EventCategory)\nadmin.site.register(ExternalShop, ExternalShopAdmin)\nadmin.site.register(Season, SeasonAdminBase)\n","sub_path":"mezzanine_agenda/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"606711626","text":"import numpy as np\nimport pdb\nimport scipy.special as sp\n\nfrom .base import StructuredModel\nfrom .utils import crammer_singer_joint_feature\n\n\nclass GeneralizedMultiClassClf(StructuredModel):\n \"\"\"Formulate linear multiclass SVM in C-S style in CRF framework.\n\n Inputs x are simply feature arrays, labels y are 0 to n_classes.\n\n Notes\n ------\n No bias / intercept is learned. It is recommended to add a constant one\n feature to the data.\n\n It is also highly recommended to use n_jobs=1 in the learner when using\n this model. Trying to parallelize the trivial inference will slow\n the infernce down a lot!\n\n Parameters\n ----------\n n_features : int\n Number of features of inputs x.\n If None, it is inferred from data.\n\n n_classes : int, default=None\n Number of classes in dataset.\n If None, it is inferred from data.\n\n class_weight : None, or array-like\n Class weights. If an array-like is passed, it must have length\n n_classes. None means equal class weights.\n\n rescale_C : bool, default=False\n Whether the class-weights should be used to rescale C (liblinear-style)\n or just rescale the loss.\n \"\"\"\n def __init__(self, n_features=None, n_classes=None, class_weight=None,\n rescale_C=False, Loss=None):\n # one weight-vector per class\n self.n_states = n_classes\n self.n_features = n_features\n self.rescale_C = rescale_C\n self.class_weight = class_weight\n self.inference_calls = 0\n self._set_size_joint_feature()\n self._set_class_weight()\n if Loss is None:\n self.Loss = np.dot(np.expand_dims(self.class_weight, 1), np.ones((1, self.n_states)))\n np.fill_diagonal(self.Loss, 0.0)\n else:\n self.Loss = Loss\n\n def _set_size_joint_feature(self):\n if None not in [self.n_states, self.n_features]:\n self.size_joint_feature = self.n_states * self.n_features\n\n def initialize(self, X, Y):\n n_features = X.shape[1]\n if self.n_features is None:\n self.n_features = n_features\n elif self.n_features != n_features:\n raise ValueError(\"Expected %d features, got %d\"\n % (self.n_features, n_features))\n\n n_classes = len(np.unique(np.hstack([y.ravel() for y in Y])))\n if self.n_states is None:\n self.n_states = n_classes\n elif self.n_states != n_classes:\n raise ValueError(\"Expected %d classes, got %d\"\n % (self.n_states, n_classes))\n self._set_size_joint_feature()\n self._set_class_weight()\n\n def output_embedding(self, X, Y):\n n_samples = Y.shape[0]\n mu = np.zeros((Y.shape[0], self.n_states))\n for i in range(Y.shape[0]):\n mu[i, Y[i]] = 1\n return mu\n\n def __repr__(self):\n return (\"%s(n_features=%d, n_classes=%d)\"\n % (type(self).__name__, self.n_features, self.n_states))\n\n def joint_feature(self, x, y, y_true=None):\n \"\"\"Compute joint feature vector of x and y.\n\n Feature representation joint_feature, such that the energy of the configuration\n (x, y) and a weight vector w is given by np.dot(w, joint_feature(x, y)).\n\n Parameters\n ----------\n x : nd-array, shape=(n_features,)\n Input sample features.\n\n y : int\n Class label. Between 0 and n_classes.\n\n y_true : int\n True class label. Needed if rescale_C==True.\n\n\n Returns\n -------\n p : ndarray, shape (size_joint_feature,)\n Feature vector associated with state (x, y).\n \"\"\"\n # put feature vector in the place of the weights corresponding to y\n result = np.zeros((self.n_states, self.n_features))\n result[y, :] = x\n if self.rescale_C:\n if y_true is None:\n raise ValueError(\"rescale_C is true, but no y_true was passed\"\n \" to joint_feature.\")\n result *= self.class_weight[y_true]\n\n return result.ravel()\n\n def batch_joint_feature(self, X, Y, Y_true=None):\n result = np.zeros((self.n_states, self.n_features))\n if self.rescale_C:\n if Y_true is None:\n raise ValueError(\"rescale_C is true, but no y_true was passed\"\n \" to joint_feature.\")\n for l in range(self.n_states):\n mask = Y == l\n class_weight = self.class_weight[Y_true[mask]][:, np.newaxis]\n result[l, :] = np.sum(X[mask, :] * class_weight, axis=0)\n else:\n # if we don't have class weights, we can use our efficient\n # implementation\n assert(X.shape[0] == Y.shape[0])\n assert(X.shape[1] == self.n_features)\n crammer_singer_joint_feature(X, Y, result)\n return result.ravel()\n\n def mean_joint_feature(self, x, q):\n \"\"\"Compute mean joint feature vector of x and y.\n\n Feature representation joint_feature, such that the energy of the configuration\n (x, y) and a weight vector w is given by np.dot(w, joint_feature(x, y)).\n\n Parameters\n ----------\n x : nd-array, shape=(n_features,)\n Input sample features.\n\n q : ndarray, shape=(n_classes)\n Probability vector\n\n\n Returns\n -------\n p : ndarray, shape (size_joint_feature,)\n Mean feature vector.\n \"\"\"\n # put feature vector in the place of the weights corresponding to y\n result = np.ones((self.n_states, self.n_features))\n result = result * x.reshape(1, -1)\n result = result * q.reshape(-1, 1)\n if self.rescale_C:\n raise NotImplementedError\n return result.ravel()\n\n def batch_mean_joint_feature(self, X, Q, Y_true=None):\n \"\"\"\n Returns\n -------\n result: ndarray, shape=(n_states * n_features)\n Sum of mean joint features for all data points in the batch\n \"\"\"\n result = np.zeros(self.size_joint_feature)\n if self.rescale_C:\n raise NotImplementedError\n else:\n # if we don't have class weights, we can use our efficient\n # implementation\n assert(X.shape[0] == Q.shape[0])\n assert(X.shape[1] == self.n_features)\n for i in range(X.shape[0]):\n result += self.mean_joint_feature(X[i], Q[i])\n return result\n\n def inference(self, x, w, relaxed=None, return_energy=False):\n \"\"\"Inference for x using parameters w.\n\n Finds armin_y np.dot(w, joint_feature(x, y)), i.e. best possible prediction.\n\n For an unstructured multi-class model (this model), this\n can easily done by enumerating all possible y.\n\n Parameters\n ----------\n x : ndarray, shape (n_features,)\n Input sample features.\n\n w : ndarray, shape=(size_joint_feature,)\n Parameters of the SVM.\n\n relaxed : ignored\n\n Returns\n -------\n y_pred : int\n Predicted class label.\n \"\"\"\n self.inference_calls += 1\n scores = np.dot(w.reshape(self.n_states, -1), x)\n if return_energy:\n return np.argmax(scores), np.max(scores)\n return np.argmax(scores)\n\n def batch_inference(self, X, w, relaxed=None):\n scores = np.dot(X, w.reshape(self.n_states, -1).T)\n return np.argmax(scores, axis=1)\n\n def oracle(self, scores):\n \"\"\"Computes the result of the oracle q_hat and the resulting energy given the scores.\n\n Returns\n -------\n q_hat: ndarray, shape (n_classes)\n Probability with highest error.\n \"\"\"\n ind, en = np.argsort(scores)[::-1], np.sort(scores)[::-1]\n en = [en[:(j+1)].sum() / (j + 1) - 1 / (j + 1) for j in range(self.n_states)]\n # en = [en[j] / (j + 1) - 1 / (j + 1) for j in range(self.n_states)]\n maxx = np.argmax(en)\n inds = ind[:(maxx + 1)]\n val = np.sum(en[:(maxx + 1)]) + 1\n q_hat = np.zeros_like(scores)\n q_hat[inds] = 1 / (maxx.astype(float) + 1.0)\n # pdb.set_trace()\n return q_hat, val\n\n def general_loss_oracle(self, scores, mu0, \n max_iter = 10, check_dual_gap=False, eta=1.):\n scores = np.expand_dims(scores, 1)\n nu = np.expand_dims(mu0, 1)\n y = np.argmin(np.dot(self.Loss, mu0), 0)\n p = np.expand_dims(np.zeros(self.n_states), 1)\n p[y] = 1\n # pdb.set_trace()\n # nu = np.ones((self.n_states, 1)) / self.n_states\n # eta predicted by theory\n # eta = 1 / (2 * np.linalg.norm(self.Loss, ord=2) * np.log(self.n_states))\n mu_avg = np.zeros((self.n_states, 1))\n q_avg = np.zeros((self.n_states, 1))\n for k in range(max_iter):\n q = sp.softmax(-eta * np.dot(self.Loss, nu) + np.log(p + 1e-3) + 1)\n mu = sp.softmax(eta * np.dot(self.Loss.T, p) + eta * scores + np.log(nu + 1e-3) + 1)\n p = sp.softmax(-eta * np.dot(self.Loss, mu) + np.log(q + 1e-3) + 1)\n nu = sp.softmax(eta * np.dot(self.Loss.T, q) + eta * scores + np.log(mu + 1e-3) + 1)\n q_avg = k * q_avg / (k+1) + q / (k+1) \n mu_avg = k * mu_avg / (k+1) + mu / (k+1) \n if k == (max_iter - 1):\n m1 = np.max(np.dot(self.Loss.T, q_avg) + scores)\n m2 = np.min(np.dot(self.Loss, mu_avg) \n + np.dot(scores.T, mu_avg))\n dual_gap = m1 - m2\n # print(\"dual gap %f\", dual_gap)\n en = np.dot(q_avg.T, np.dot(self.Loss, mu_avg) + np.dot(scores.T, mu_avg))\n return mu_avg.ravel(), np.asscalar(en), dual_gap\n\n def loss_augmented_inference(self, x, mu, w, relaxed=None,\n return_energy=False):\n \"\"\"Loss-augmented inference for x and y using parameters w.\n\n Minimizes over q_hat:\n np.dot(np.dot(joint_feature(x, -), q_hat), w) + min_y np.dot(loss(y, -), q_hat))\n\n Parameters\n ----------\n x : ndarray, shape (n_features,)\n Unary evidence / input to augment.\n\n y : int\n Ground truth labeling relative to which the loss\n will be measured. NOT USEED IN THIS GENERALIZED CASE!\n\n w : ndarray, shape (size_joint_feature,)\n Weights that will be used for inference.\n\n Returns\n -------\n q_hat : ndarray, shape (n_classes)\n Probability with highest error\n \"\"\"\n self.inference_calls += 1\n scores = np.dot(w.reshape(self.n_states, -1), x)\n # other_classes = np.arange(self.n_states) != y\n # else:\n # scores[other_classes] += self.class_weight[y]\n # q_hat2, en1 = self.oracle(scores)\n q_hat2, en2, err_oracle = self.general_loss_oracle(scores, mu)\n # print(\"q_hat exact\", q_hat1)\n # print(\"q_hat app\", q_hat2)\n # print(\"en1 %f en2 %f\", (en1, en2))\n # pdb.set_trace()\n err_oracle = 0\n return q_hat2, err_oracle\n\n\n def batch_loss_augmented_inference(self, X, mu_hats, w, relaxed=None):\n \"\"\"\n Returns\n -------\n Q_hats: ndarray, shape (batch_size, n_classes)\n Matrix of Probabilities with highest error.\n \"\"\"\n scores = np.dot(X, w.reshape(self.n_states, -1).T)\n Q_hats = np.zeros((scores.shape[0], self.n_states))\n for j in range(scores.shape[0]):\n # q_hat, _ = self.oracle(scores[j])\n q_hat, _ = self.loss_augmented_inference(X[j], mu_hats[j], w)\n Q_hats[j] = q_hat\n return Q_hats\n\n def loss(self, y, y_hat):\n return self.Loss[y, y_hat]\n # return self.class_weight[y] * (y != y_hat)\n\n def batch_loss(self, Y, Y_hat):\n losses = [self.loss(Y[i], Y_hat[i]) for i in range(Y.shape[0])]\n return np.array(losses)\n # return self.class_weight[Y] * (Y != Y_hat)\n\n def cond_loss(self, y, q):\n cond_loss = np.dot(self.Loss, np.expand_dims(q, 1))\n return cond_loss[y]\n # return self.class_weight[y] * (1 - q[y])\n\n def batch_cond_loss(self, Y, Q):\n # not implemented for class_weight different\n cond_losses = [self.cond_loss(Y[i], Q[i]) for i in range(Y.shape[0])]\n return np.array(cond_losses)\n # return self.class_weight[Y] * (1 - Q[np.arange(0, Y.shape[0]), Y.astype(int)])\n # return 1 - Q[np.arange(0, Y.shape[0]), Y.astype(int)]\n\n def Bayes_risk(self, q):\n cond_loss = np.dot(self.Loss, np.expand_dims(q, 1))\n opt = np.min(cond_loss)\n return opt\n\n def batch_Bayes_risk(self, Q):\n bayes_risks = [self.Bayes_risk(Q[i]) for i in range(Q.shape[0])]\n return np.array(bayes_risks)\n","sub_path":"pystruct/models/generalized_unstructured_svm.py","file_name":"generalized_unstructured_svm.py","file_ext":"py","file_size_in_byte":12907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"616526374","text":"# Leetcode 90. Subsets II\nclass Solution:\n def subsetsWithDup(self, nums):\n res = []\n nums.sort()\n def dfs(idx, path):\n res.append(path)\n for i in range(idx, len(nums)):\n if i > idx and nums[i] == nums[i-1]:\n continue\n dfs(i+1, path+[nums[i]])\n dfs(0, [])\n return res\nRun = Solution()\nRun.subsetsWithDup([1,2,2])","sub_path":"Questions/Permutations/Subsets_II.py","file_name":"Subsets_II.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"240896685","text":"from django.shortcuts import render, get_object_or_404 , redirect\nfrom django.http import Http404\n# Create your views here.\nfrom .models import BlogPost\nfrom .forms import BlogPostModelForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.admin.views.decorators import staff_member_required\n\n\n\n\ndef blog_post_detail_page(request,slug):\n\n\t# querySet = BlogPost.objects.filter(slug=slug)\n\t# if querySet.count() == 0 :\n\t# \traise Http404\n\t# else:\n\t# \tobj = querySet.first()\n\t#obj = querySet.first()\n\tprint(\"Django Says\", request.method, request.path, request.user)\n\tobj = get_object_or_404(BlogPost,slug=slug)\n\ttemplate_name = 'blog/detail.html'\n\tcontext= {\"object\":obj}\n\treturn render(request, template_name, context)\n\n\ndef blog_post_list_view(request):\n\t#qs=BlogPost.objects.filter(title__icontains='hello')\n\tqs=BlogPost.objects.all().published()\n\tif request.user.is_authenticated:\n\t\tmy_qs = BlogPost.objects.filter(user=request.user)\n\t\tqs=(qs| my_qs).distinct()\n\ttemplate_name ='blog/list.html'\n\tcontext={\"object_list\":qs}\n\treturn render(request,template_name,context)\n\n\n\ndef blog_post_detail_view(request,slug):\n\tobj = get_object_or_404(BlogPost,slug=slug)\n\ttemplate_name ='blog/detail.html'\n\tcontext={\"object\":obj}\n\treturn render(request,template_name,context)\n\n@staff_member_required\n#@login_required\ndef blog_post_create_view(request):\n\t# if not request.user.is_authenticated:\n\t# \treturn render(request,\"not-a-user.html\",{})\n\tform = BlogPostModelForm(request.POST or None, request.FILES or None )\n\tif form.is_valid():\n\t\tobj=form.save(commit=False)\n\t\tobj.user=request.user\n\t\tobj.save()\n\t\t # print(form.cleaned_data)\n\t\t # title = form.cleaned_data['title']\n\t\t #obj=BlogPost.objects.create(**form.cleaned_data)\n\t\tform = BlogPostModelForm()\n\ttemplate_name ='form.html'\n\tcontext={'form':form}\n\treturn render(request,template_name,context)\n\n\ndef blog_post_update_view(request, slug):\n\tobj = get_object_or_404(BlogPost,slug=slug)\n\tform = BlogPostModelForm(request.POST or None, instance = obj)\n\tif form.is_valid():\n\t\tform.save()\n\ttemplate_name ='form.html'\n\tcontext={'form':form,\"title\":f\"Update {obj.title}\"}\n\treturn render(request,template_name,context)\n\n\ndef blog_post_delete_view(request, slug):\n\tobj = get_object_or_404(BlogPost,slug=slug)\n\ttemplate_name ='blog/delete.html'\n\tif request.method == 'POST':\n\t\tobj.delete()\n\t\treturn redirect(\"/blog\")\n\tcontext={\"object\":obj}\n\treturn render(request,template_name,context)","sub_path":"cfeproj/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"325901037","text":"#!/Users/lyjsmac/opt/anaconda3/bin/python3.8\n# -*- encoding: utf-8 -*-\n'''\n@File : 爬楼梯.py\n@Time : 2021/1/17 11:08 下午\n@Author : fancycarp\n@Contact : woshiliyujian@gmail.com\n@Desc : None\n'''\n\n# here put the import lib\nclass Solution(object):\n def climbStairs(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n if n <= 1:\n return n\n\n x = 0\n y = 1\n z = 1\n for i in range(2, n + 1):\n x, y = y, z\n z = x + y\n return z","sub_path":"Week_07/爬楼梯.py","file_name":"爬楼梯.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"460814485","text":"#!/usr/bin/env python\nimport os,sys\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\nimport argparse\n\nfrom multiagent.environment import MultiAgentEnv\nfrom multiagent.policy import InteractivePolicy\nimport multiagent.scenarios as scenarios\nfrom multiagent.speaker_listener import SpeakerListenerScenario\nimport copy\nimport numpy as np\nfrom multiagent.dqn.grid_env import Env\nfrom multiagent.dqn.dqn_agent import DQNAgent\nfrom multiagent.dqn.rule_based_policy import RuleBasedPolicy\nfrom multiagent.dqn.memory import Memory\n\nimport pylab\nfrom multiagent.grid.distribution import Distribution\nimport logging\n\nTOTAL_EPISODES = 15000\n\nif __name__ == \"__main__\":\n max_agent_count = 50\n max_victim_count = 50\n\n info = {\n \"env\": {\"Ny\": 5,\n \"Nx\": 5,\n \"Unit\": 80\n },\n \"agent\": {\"policy_mode\": \"epsgreedy\", # \"epsgreedy\", \"softmax\"\n \"eps\": 1.0,\n \"eps_decay\": 2.0 * np.log(10.0) / TOTAL_EPISODES},\n \"brain\": {\"discount\": 0.99,\n \"learning_rate\": 0.9},\n \"memory\": {}\n }\n\n env = Env(max_agent_count, max_victim_count, info)\n\n agent_count = 2\n victim_count = 2\n\n distribution = Distribution()\n\n volunteer_distribution = distribution.get_distribution_of_volunteers()\n agent_count = len(volunteer_distribution)\n\n for i in range(agent_count):\n agent = DQNAgent(actions=list(range(env.n_actions)), agent_id=i, env=env, options={'distributed': False})\n row_col = volunteer_distribution[i]\n if len(row_col) != 2:\n raise Exception('Invalid volunteer position')\n\n env.add_agent_at_row_col(agent, row_col[0], row_col[1])\n\n victim_distribution = distribution.get_distribution_of_vitims()\n victim_count = len(victim_distribution)\n for i in range(victim_count):\n row_col = victim_distribution[i]\n if len(row_col) != 3:\n raise Exception('Invalid victim position')\n\n env.add_victim_at_row_col(row_col[0], row_col[1], row_col[2])\n\n env.pack_canvas()\n\n memory = Memory(info)\n policy = RuleBasedPolicy(env, info)\n logger = Env.setup_custom_logger(\"app\", logging.INFO)\n q_table_logger = Env.setup_custom_logger(\"qtable\", logging.INFO, 'q_table.log')\n global_step = 0\n episodes = []\n scores = []\n episode_time_steps = []\n\n for episode in range(TOTAL_EPISODES):\n # state_n is position of each agent {agent_0: [r1, c1], agent_1: [r2, c2]}\n env.reset_n()\n counter = 0\n cumulative_reward = 0\n score = 0\n\n episode_time_step = 0\n\n while True:\n env.render()\n done = False\n # reward_n = np.zeros(agent_count)\n counter = counter + 1\n # take action and proceed one step in the environment\n global_step += 1\n episode_time_step += 1\n # next_state_n = copy.deepcopy(state_n)\n state_n = env.current_state()\n action_n = policy.get_action_n(state_n, episode=episode)\n next_state_n = []\n reward_n = []\n done_n = []\n\n for i in range(agent_count):\n agent = env.get_agent(i)\n action = action_n[i]\n state = state_n[i]\n next_state, reward, done = env.agent_step(agent, action)\n\n next_state_n.append(next_state)\n reward_n.append(reward)\n done_n.append(done)\n\n if state[0] != next_state[0] or state[1] != next_state[1]:\n agent.set_last_action(action)\n\n cumulative_reward += reward\n score += reward\n\n # action_n.append(action)\n\n policy.learn(state_n, action_n, reward_n, next_state_n, episode)\n\n if env.is_terminated() or sum(done_n) > 0:\n done = True\n\n # logger.info(\"state=\" + str(state_n) + \"; action=\" + str(action_n) + \"; reward=\" + str(\n # sum(reward_n)) + \"; next_state=\" + str(next_state_n))\n\n # state_n = copy.deepcopy(next_state_n)\n env.print_value_all(RuleBasedPolicy.Q_TABLE)\n\n # if episode ends, then break\n if done:\n scores.append(score)\n episodes.append(episode)\n episode_time_steps.append(episode_time_step)\n\n # print(\"episode:\", episode, \" score:\", score, \" episode time_step:\", episode_time_step, \" global time:\", global_step)\n\n logger.info(\"episode:\" + str(episode) + \" score:\" + str(score) + \" episode time_step:\" + str(episode_time_step) + \" global time:\" + str(global_step))\n break\n\n # Update model when episode finishes\n # policy.update(memory, env)\n if episode % 10 == 0:\n pylab.figure(1)\n pylab.plot(episodes, scores, 'b')\n pylab.savefig(\"./save_graph/q_policy_score.png\")\n\n pylab.figure(2)\n pylab.plot(episodes, episode_time_steps, 'b')\n pylab.savefig(\"./save_graph/q_policy_time_step.png\")\n\n # for log_r in policy.get_qtable():\n # q_table_logger.info(log_r)\n\n # Clear memory for next episode\n # memory.clear_memory()","sub_path":"multiagent/dqn/run_ruled_base.py","file_name":"run_ruled_base.py","file_ext":"py","file_size_in_byte":5270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"308352707","text":"import requests\nfrom urllib.parse import quote\n\nfrom lastfmhistory.track import Track\n\nclass APIWrapper():\n\t\"\"\"Send API calls for determining scrobbles\"\"\"\n\t\n\tdef __init__(self, api_key, username):\n\t\t\"\"\"Initialize API credentials and URL\"\"\"\n\t\tself.username = username\n\t\tself.api_key = api_key\n\t\tself.base_url = 'http://ws.audioscrobbler.com/2.0/?'\n\t\t\n\tdef get_all_artists(self):\n\t\t\"\"\"Get all played artists for account\"\"\"\t\t\n\t\t# Format API call and send request\n\t\trequest_params = {\n\t\t\t'limit': '1000',\n\t\t\t'method': 'library.getartists',\n\t\t\t'user': self.username,\n\t\t\t}\n\t\tresponse = self.send_api_request(request_params)\n\t\t\n\t\tartists_list = response['artists']['artist']\n\t\t\n\t\t# A single request is limited to the first 1000 artists,\n\t\t# so additional calls may be needed to populate entire list\n\t\ttotal_info = response['artists']['@attr']\n\t\twhile int(total_info['page']) < int(total_info['totalPages']):\n\t\t\trequest_params['page'] = str(int(total_info['page'])+1)\n\t\t\tresponse = self.send_api_request(request_params)\n\t\t\tartists_list += response['artists']['artist']\n\t\t\ttotal_info = response['artists']['@attr']\n\t\t\n\t\treturn artists_list\n\t\t\n\tdef get_all_artist_scrobbles(self, artist):\n\t\t\"\"\"Get all scrobbles for an individual artist\"\"\"\n\t\t# Format API call and send request\n\t\trequest_params = {\n\t\t\t'artist': artist,\n\t\t\t'limit': '200',\n\t\t\t'method': 'user.getartisttracks',\n\t\t\t'user': self.username,\n\t\t\t}\n\t\tresponse = self.send_api_request(request_params)\n\t\t\n\t\ttracks_list = response['artisttracks']['track']\n\t\t\n\t\t# A single request is limited to the first 200 tracks,\n\t\t# so additional calls may be needed to populate entire list\n\t\twhile len(response['artisttracks']['track']) > 0:\n\t\t\tcurrent_page = response['artisttracks']['@attr']['page']\n\t\t\trequest_params['page'] = str(int(current_page)+1)\n\t\t\tresponse = self.send_api_request(request_params)\n\t\t\ttracks_list += response['artisttracks']['track']\n\t\t\t\n\t\treturn tracks_list\n\t\t\n\tdef get_all_recent_scrobbles(self):\n\t\t\"\"\"Get all recent scrobbles for a user\"\"\"\n\t\tprint('\\n - Searching through all recent scrobble history')\n\t\t# Format API call and send request\n\t\trequest_params = {\n\t\t\t'limit': '1000',\n\t\t\t'method': 'user.getrecenttracks',\n\t\t\t'user': self.username,\n\t\t\t}\n\t\tresponse = self.send_api_request(request_params)\n\t\t\n\t\ttracks_list = response['recenttracks']['track']\n\t\t\n\t\ttotal_pages = response['recenttracks']['@attr']['totalPages']\n\t\t\n\t\t# A single request is limited to the first 1000 tracks, so\n\t\t# additional calls may be needed to populate entire list\n\t\twhile len(response['recenttracks']['track']) > 0:\n\t\t\tcurrent_page = response['recenttracks']['@attr']['page']\n\t\t\trequest_params['page'] = str(int(current_page)+1)\n\t\t\tresponse = self.send_api_request(request_params)\n\t\t\ttracks_list += response['recenttracks']['track']\n\t\t\tprint(' - Page ' + current_page + ' out of ' + total_pages)\n\t\t\n\t\treturn tracks_list\n\t\t\n\tdef get_all_songs_by_artist(self, artist):\n\t\t\"\"\"Get top 100 songs by given artist\"\"\"\n\t\t# Format API call and send request\n\t\trequest_params = {\n\t\t\t'artist': artist,\n\t\t\t'limit': '100',\n\t\t\t'method': 'artist.gettoptracks',\n\t\t\t}\n\t\tresponse = self.send_api_request(request_params)\n\t\t\n\t\t# Return top tracks list by artist\n\t\ttop_tracks = response['toptracks']['track']\n\t\treturn top_tracks\n\t\t\n\tdef get_total_artist_scrobbles(self, artist):\n\t\t\"\"\"Get total scrobble count for an artist\"\"\"\n\t\t# Format API call and send request\n\t\trequest_params = {\n\t\t\t'artist': artist,\n\t\t\t'username': self.username,\n\t\t\t'method': 'artist.getinfo',\n\t\t\t}\n\t\tresponse = self.send_api_request(request_params)\n\t\t\n\t\ttry:\n\t\t\treturn int(response['artist']['stats']['userplaycount'])\n\t\texcept KeyError:\n\t\t\treturn 0\n\t\t\n\tdef get_total_track_scrobbles(self, track):\n\t\t\"\"\"Get total scrobbles for an individual track\"\"\"\n\t\t# Format API call and send request\n\t\trequest_params = {\n\t\t\t'username': self.username,\n\t\t\t'method': 'track.getInfo',\n\t\t\t}\n\t\t\t\n\t\t# Search by mbid if available, otherwise artist and track name\n\t\tif track.mbid:\n\t\t\trequest_params['mbid'] = track.mbid\n\t\telse:\n\t\t\trequest_params['artist'] = track.artist['#text']\n\t\t\trequest_params['track'] = track.song\n\t\t\t\n\t\tresponse = self.send_api_request(request_params)\n\t\t\n\t\t# Return playcount or 0 if it does not exist\n\t\ttry:\n\t\t\treturn int(response['track']['userplaycount'])\n\t\texcept KeyError:\n\t\t\treturn 0\n\t\t\n\tdef format_api_request(self, params_dict):\n\t\t\"\"\"Format API request\"\"\"\n\t\tbase_url = ('http://ws.audioscrobbler.com/2.0/?format=json'\n\t\t\t\t\t'&api_key=' + self.api_key)\n\t\tfor param, value in params_dict.items():\n\t\t\t# Percent encoding\n\t\t\tif param == 'artist':\n\t\t\t\tvalue = quote(value)\n\t\t\telif param == 'album':\n\t\t\t\tvalue = quote(value)\n\t\t\t\t\n\t\t\tbase_url += '&' + param + '=' + value\n\t\treturn base_url\n\t\t\n\tdef format_artist_tracks(self, unform_list):\n\t\t\"\"\"Format tracks to a dictionary with a list of plays\"\"\"\n\t\tform_list = []\n\t\t\n\t\t# Consolidate list of individual tracks and plays into track\n\t\t# objects with a play attribute\n\t\tfor track_info in unform_list:\n\t\t\t\n\t\t\t# Find track in list or None if not found\n\t\t\tname = track_info['name']\n\t\t\ttrack = next((x for x in form_list if x.song == name), None)\n\t\t\t\n\t\t\t# If track already in list add a new play, otherwise create\n\t\t\t# a new track\n\t\t\tif track:\n\t\t\t\ttrack.add_play(track_info['date'])\n\t\t\telse:\n\t\t\t\tartist = track_info['artist']\n\t\t\t\talbum = track_info['album']\n\t\t\t\tmbid = track_info['mbid']\n\t\t\t\tsong = track_info['name']\n\t\t\t\t\n\t\t\t\tnew_track = Track(artist, album, song, mbid)\n\t\t\t\tnew_track.add_play(track_info['date'])\n\t\t\t\t\n\t\t\t\tform_list.append(new_track)\n\t\t\t\t\n\t\treturn form_list\n\t\t\n\tdef send_api_request(self, params_dict):\n\t\t\"\"\"Send API request with given parameters\"\"\"\n\t\trequest_url = self.format_api_request(params_dict)\n\t\tr = requests.get(request_url)\n\t\tif r.status_code != 200:\n\t\t\tprint(\"Error with request\")\n\t\treturn r.json()\n","sub_path":"lastfmhistory/api_wrapper.py","file_name":"api_wrapper.py","file_ext":"py","file_size_in_byte":5759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"496655228","text":"\"\"\"application URLs\n\"\"\"\nfrom django.urls import path, include\nfrom rest_framework import routers\nfrom .views import HomePage, ClosestPointsViewSet\n\n# define the api router\nrouter = routers.DefaultRouter()\nrouter.register(r'closestpoints', ClosestPointsViewSet)\n\n# define the urlpatterns\napp_name = 'application'\nurlpatterns = [\n path('', HomePage.as_view(), name='homepage'), # home page\n path('api/', include((router.urls, 'application'), namespace='api')), # api\n]\n","sub_path":"application/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"176732541","text":"import argparse\nimport sqlite3\nfrom visualization_utils import *\nfrom cvpr_utils import count_cvpr_publications_for_year\n\n\ndef cvpr_publications_by_years(time, show=False, save=\"\"):\n publications_timeline = {2019: 1300}\n total = 0\n for year in time:\n try:\n url = 'http://openaccess.thecvf.com/CVPR%s.py' % year\n publications_timeline[year] = count_cvpr_publications_for_year(url)\n print(\"In %s there was %s publications\" % (year, publications_timeline[year]))\n total += publications_timeline[year]\n except:\n print(\"For %s year there was not website\" % year)\n create_publications_timeline_plot(publications_timeline, \"CVPR Conference\", show=show, save=save)\n\n\ndef publications_by_categories(cursor, show=False, save=\"\"):\n sql_all = \"SELECT count(*) FROM PublicationsByCategories\"\n cursor.execute(sql_all)\n total_value = cursor.fetchall()[0][0]\n sql_by_category = \"SELECT Category, count(*) FROM PublicationsByCategories GROUP BY Category\"\n cursor.execute(sql_by_category)\n by_categories_value = cursor.fetchall()\n normalized_categories_count = [(category, (num * 100) / total_value) for category, num in by_categories_value]\n create_publications_categories_plot(normalized_categories_count, \"Categories\", show=show, save=save)\n return\n\n\ndef categories_by_years(cursor, show=False, save=\"\"):\n sql_category = \"SELECT Category FROM PublicationsByCategories GROUP BY Category\"\n sql_category_by_years = \"SELECT Publications.Year, count(*) \" \\\n \"FROM PublicationsByCategories JOIN Publications ON PublicationsByCategories.Publication = Publications.Title \" \\\n \"WHERE PublicationsByCategories.Category = ? GROUP BY Publications.Year\"\n cursor.execute(sql_category)\n list_categories = cursor.fetchall()\n categories = {}\n for category in list_categories:\n cursor.execute(sql_category_by_years, category)\n category_by_years = cursor.fetchall()\n categories[category[0]] = category_by_years\n print(category[0], category_by_years)\n create_categories_by_years_plot(categories, \"Categories dynamics by years\", show=show, save=save)\n return\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Sort publications by categories')\n parser.add_argument('db', help='Path to database file')\n args = parser.parse_args()\n conn = sqlite3.connect(args.db)\n cursor = conn.cursor()\n\n # Build \"num publications by years\" plot\n time = range(2014, 2019)\n #cvpr_publications_by_years(time, show=True)\n\n # Build \"publications by categories\" plot\n publications_by_categories(cursor, show=True)\n\n # Build \"categories by years\" plot\n categories_by_years(cursor, show=True)\n\n conn.close()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"596505326","text":"import pytest\nfrom flaky import flaky\nimport datetime\nfrom datetime import *\n\nfrom telegram import Update, User, Message, Chat, InlineKeyboardMarkup, InlineKeyboardButton\nfrom telegram.callbackquery import CallbackQuery\nfrom ..owepaybot import newOrderSplitEvenly\nfrom ..HELPME.bot_sql_integration import *\nfrom ..HELPME.helperFunctions import *\n\n@pytest.fixture(scope='class')\ndef orderUpdate():\n return Update(\n update_id= 123,\n callback_query=CallbackQuery(\n id='123', \n message=Message(\n 234, \n text='testOrderName', \n chat=Chat(\n 345, \n 'groupname'\n ),\n from_user=User(\n 456, \n 'userfirstname', \n False,\n username='userusername'\n ),\n date=datetime.now()\n ),\n chat_instance='12345',\n from_user=User(\n 456, \n 'userfirstname', \n False,\n username='userusername'\n )\n )\n )\n\nclass tempContext:\n class bot:\n def editMessageText(chat_id, message_id, text):\n return Message(message_id, datetime.now(), chat=Chat(chat_id, 'groupname'), text=text)\n\n\nclass TestNewOrderSplitEvenly:\n\n @flaky(3, 1)\n def test_newOrderSplitEvenly(self, orderUpdate):\n \n massDelete(\"Users\")\n massDelete(\"Orders\")\n massDelete(\"TelegramGroups\")\n massDelete(\"UserGroupRelational\")\n\n assert addGroup((345, 'groupname')) == \"Group groupname 345 inserted\"\n assert addUser((456, 'userusername', 0, 'userfirstname')) == \"User 456 inserted\"\n assert addUserToGroup(456, 345) == \"User 456 added to Group 345\"\n assert updateUserStateNewOrder(456, 345) == \"User 456 in Group 345 has state 'neworder'\"\n assert updateMessageIDToUserGroupRelational(456, 345, 234) == \"User 456 in Group 345 has MessageID 234\"\n message = newOrderSplitEvenly(orderUpdate, tempContext)\n assert isinstance(message, Message)\n assert userStateSplitEvenly(456, 345) == True\n\n assert message.chat_id == 345\n assert message.message_id == 234\n assert message.text == \"Please send in the amount to split!\"\n\n massDelete(\"Users\")\n massDelete(\"Orders\")\n massDelete(\"TelegramGroups\")\n massDelete(\"UserGroupRelational\")\n","sub_path":"test/test_newOrderSplitEvenly.py","file_name":"test_newOrderSplitEvenly.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"225677629","text":"import os\nimport sys\nfrom hashlib import sha256\nfrom cgi import parse_qs\n\nendpoints = {}\n\ndef expose(endpoint):\n def decorator_maker(func):\n endpoints[endpoint] = func\n return func\n return decorator_maker\n\ndef application(env, start):\n if env['PATH_INFO'] in endpoints:\n return endpoints[env['PATH_INFO']](env, start)\n return error_404(env, start)\n\ndef error(env, start, status, message=None):\n if not message:\n message = status\n response_headers = [('Content-type', 'text/plain'),\n ('Content-Length', str(len(message)))]\n start(status, response_headers)\n return [message]\n\ndef error_404(env, start):\n return error(env, start, '404 Not Found', \n 'Not Found: %s' % env['PATH_INFO'])\n\n@expose('/update-site')\ndef update_site(env, start):\n status = '200 OK'\n output = 'TODO: Finish this!'\n response_headers = [('Content-type', 'text/plain'),\n ('Content-Length', str(len(output)))]\n start(status, response_headers)\n return [output]\n\ndef notify_recruitment(env, form_id, form_data):\n from smtplib import SMTP\n from pprint import pformat\n\n server = SMTP(env['smtp.server'])\n\n if 'smtp.password' in env:\n server.login(env['smtp.username'], env['smtp.password'])\n\n fromaddr = env['smtp.username']\n toaddrs = env['recruitment.notify'].split()\n\n submitter = repr(form_data.get('email', ['UNKNOWN'])[0])\n\n msg = (\"Subject: Recruitment form submitted by %s\\r\\n\"\n \"From: %s\\r\\n\"\n \"To: %s\\r\\n\"\n \"\\r\\n\"\n \"This is form ID %s.\\r\\n\\r\\n%s\" % (submitter,\n fromaddr,\n toaddrs[0],\n form_id,\n pformat(form_data)))\n\n server.sendmail(fromaddr, toaddrs, msg)\n server.quit()\n\n@expose('/recruit-me')\ndef recruit_me(env, start):\n MAX_SIZE = int(env['recruitment.max_size'])\n TURING_ANSWER = env['recruitment.turing_answer']\n OUTPUT_DIR = env['recruitment.output_dir']\n\n length = int(env.get('CONTENT_LENGTH', '0'))\n\n if env['REQUEST_METHOD'] != 'POST':\n return error(env, start, '405 Method Not Allowed')\n\n if length == 0:\n return error(env, start, '411 Length Required')\n\n if length > MAX_SIZE:\n return error(env, start, '413 Request Entity Too Large')\n\n raw_input = env['wsgi.input'].read(length)\n input = parse_qs(raw_input)\n\n if not ('turingAnswer' in input and\n input['turingAnswer'][0].lower().strip() == TURING_ANSWER):\n return error(env, start, '403 Forbidden',\n 'No spam-bots allowed!')\n\n status = '200 OK'\n filename = sha256(raw_input).hexdigest()\n f = open(os.path.join(OUTPUT_DIR, filename), 'w')\n f.write(raw_input)\n f.close()\n\n if 'recruitment.notify' in env:\n notify_recruitment(env, form_id=filename, form_data=input)\n\n output = 'Stored recruitment form with ID %s.' % filename\n\n response_headers = [('Content-type', 'text/plain'),\n ('Content-Length', str(len(output)))]\n start(status, response_headers)\n\n return [output]\n","sub_path":"wsgi-scripts/hackasaurus_dot_org.py","file_name":"hackasaurus_dot_org.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"289236052","text":"import numpy\nimport dill as pickle\nfrom abstract.mcmc_sampler import mcmc_sampler, mcmc_sampler_settings_dict\nfrom adapt_util.tune_param_classes.tune_param_setting_util import *\nfrom experiments.experiment_obj import tuneinput_class\nfrom distributions.response_model import V_response_model\nfrom experiments.correctdist_experiments.prototype import check_mean_var_stan\nfrom abstract.util import wrap_V_class_with_input_data\nfrom input_data.convert_data_to_dict import get_data_dict\nfrom post_processing.get_diagnostics import energy_diagnostics,process_diagnostics,get_params_mcmc_tensor,get_short_diagnostics\n\nmcmc_meta = mcmc_sampler_settings_dict(mcmc_id=0,samples_per_chain=5000,num_chains=4,num_cpu=4,thin=1,tune_l_per_chain=1000,\n warmup_per_chain=1100,is_float=False,isstore_to_disk=False,allow_restart=False)\n\ninput_data = get_data_dict(\"1-PL\",standardize_predictor=False)\nV_generator = wrap_V_class_with_input_data(class_constructor=V_response_model,input_data=input_data)\n\ninput_dict = {\"v_fun\":[V_generator],\"epsilon\":[\"dual\"],\"second_order\":[False],\"cov\":[\"adapt\"],\n \"metric_name\":[\"diag_e\"],\"dynamic\":[True],\"windowed\":[False],\"criterion\":[\"gnuts\"]}\n\nep_dual_metadata_argument = {\"name\":\"epsilon\",\"target\":0.95,\"gamma\":0.05,\"t_0\":10,\n \"kappa\":0.75,\"obj_fun\":\"accept_rate\",\"par_type\":\"fast\"}\nadapt_cov_arguments = [adapt_cov_default_arguments(par_type=\"slow\",dim=V_generator(precision_type=\"torch.DoubleTensor\").get_model_dim())]\ndual_args_list = [ep_dual_metadata_argument]\nother_arguments = other_default_arguments()\ntune_settings_dict = tuning_settings(dual_args_list,[],adapt_cov_arguments,other_arguments)\ntune_dict = tuneinput_class(input_dict).singleton_tune_dict()\nsampler1 = mcmc_sampler(tune_dict=tune_dict,mcmc_settings_dict=mcmc_meta,tune_settings_dict=tune_settings_dict)\nstore_name = 'one_pl_sampler.pkl'\nsampled = False\nif sampled:\n sampler1 = pickle.load(open(store_name, 'rb'))\nelse:\n sampler1.start_sampling()\n with open(store_name, 'wb') as f:\n pickle.dump(sampler1, f)\nprint(\"overall diagnostics\")\nfull_mcmc_tensor = sampler1.get_samples(permuted=False)\n\nprint(get_short_diagnostics(full_mcmc_tensor))\n\nout = sampler1.get_diagnostics(permuted=False)\nprint(\"num divergent\")\nprocessed_diag = process_diagnostics(out,name_list=[\"divergent\"])\n\nprint(processed_diag.sum(axis=1))\nprint(\"num hit max tree depth\")\nprocessed_diag = process_diagnostics(out,name_list=[\"hit_max_tree_depth\"])\n\nprint(processed_diag.sum(axis=1))\n\nprint(\"average acceptance rate after warmup\")\nprocessed_diag = process_diagnostics(out,name_list=[\"accept_rate\"])\n\naverage_accept_rate = numpy.mean(processed_diag,axis=1)\n\nprint(average_accept_rate)\n\nprint(\"energy diagnostics\")\nprint(energy_diagnostics(diagnostics_obj=out))\n\n\nmixed_mcmc_tensor = sampler1.get_samples(permuted=True)\nprint(mixed_mcmc_tensor)\n\ntrue_cov = numpy.cov(mixed_mcmc_tensor,rowvar=False)\nsd_vec = numpy.diagonal(true_cov)\n\nprint(\"problem difficulty\")\n\nprint(max(sd_vec)/min(sd_vec)) # val = 11.5","sub_path":"experiments/problem_difficulty_experiments/one_pl.py","file_name":"one_pl.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"151294130","text":"#!/usr/bin/env python\n\nfrom state_table import state_table\n\nclass tdtables(object):\n\ttdlearner = None\n\n\t# state tables\n\tutilities = None\n\tfrequencies = None\n\n\t# states\n\tlast_game_state = None\n\tthis_game_state = None\n\n\t# states as keys\n\tlast_state = None\n\tthis_state = None\n\n\tdef __init__(self, tdlearner, game_state):\n\t\tself.tdlearner = tdlearner\n\t\tself.this_game_state = game_state\n\n\t\tself.utilities = state_table()\n\t\tself.frequencies = state_table()\n\n\tdef update(self, game_state, board_chunk):\n\t\t# store states\n\t\tself.last_game_state = self.this_game_state\n\t\tself.this_game_state = game_state\n\n\t\t# get states as keys\n\t\tlast_state = board_chunk.get_state()\n\t\tthis_state = board_chunk.get_state()\n\n\t\t# update frequency table\n\t\tself.frequencies.increment(this_state)\n\n\t\t# update utility function\n\t\t# algorithm taken from \"Artificial Intelligence: A Modern Approach\"\n\t\t# second edition by Stuart Russel and Peter Norvig,\n\t\t# page 769, Figure 21.4\n\t\tif self.utilities.table.has_key(last_state):\n\t\t\tself.utilities.table[last_state] = \\\n\t\t\t\tself.utilities.get(last_state) + (\n\t\t\t\t\tself.tdlearner.get_learning_rate(\n\t\t\t\t\t\tself.frequencies.get(last_state)\n\t\t\t\t\t) * (\n\t\t\t\t\t\tself.this_game_state.last_reward + (\n\t\t\t\t\t\t\tself.tdlearner.discount_factor * \\\n\t\t\t\t\t\t\tself.utilities.get(this_state) - \\\n\t\t\t\t\t\t\tself.utilities.get(last_state)\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\t\t\t\t)\n\t\telse:\n\t\t\tself.utilities.table[last_state] = self.this_game_state.this_reward\n","sub_path":"tdtables.py","file_name":"tdtables.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"365465527","text":"# There are others in sndhdr that don't have MIME types. :(\n# Additional ones to be added to sndhdr? midi, mp3, realaudio, wma??\ndef _whatsnd(data):\n \"\"\"Try to identify a sound file type.\n sndhdr.what() has a pretty cruddy interface, unfortunately. This is why\n we re-do it here. It would be easier to reverse engineer the Unix 'file'\n command and use the standard 'magic' file, as shipped with a modern Unix.\n \"\"\"\n hdr = data[:512]\n fakefile = StringIO(hdr)\n for testfn in sndhdr.tests:\n res = testfn(hdr, fakefile)\n if res is not None:\n return _sndhdr_MIMEmap.get(res[0])\n return None\n","sub_path":"LIVE/dj_demo/mysite/test_segment_base/audio_0.py","file_name":"audio_0.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"318989475","text":"import argparse\nimport os\nimport os.path as op\nimport glob\nimport json\nfrom ipymd.converters import nb_to_markdown, markdown_to_nb\n\ndef _flatten(l):\n return [item for sublist in l for item in sublist]\n\ndef _to_skip(dirname):\n return (op.basename(dirname).startswith('.') or\n op.basename(dirname).startswith('_'))\n\ndef _expand_dirs_to_files(files_or_dirs):\n files = []\n for file_or_dir in files_or_dirs:\n if op.isdir(file_or_dir):\n # Skip dirnames starting with '.'\n if _to_skip(file_or_dir):\n continue\n # Recursively visit the directories and add the files.\n files.extend(_expand_dirs_to_files([op.join(file_or_dir, file)\n for file in os.listdir(file_or_dir)]))\n elif '*' in file_or_dir:\n files.extend(glob.glob(file_or_dir))\n else:\n files.append(file_or_dir)\n return files\n\ndef _file_has_extension(file, extensions):\n if not isinstance(extensions, list):\n extensions = [extensions]\n return any(file.endswith(extension) for extension in extensions)\n\ndef _filter_files_by_extension(files, extensions):\n return [file for file in files if _file_has_extension(file, extensions)]\n\ndef _converted_filename(file, convert_from):\n base, ext = op.splitext(file)\n if convert_from == 'ipynb':\n convert_ext = '.md'\n elif convert_from == 'md':\n convert_ext = '.ipynb'\n return ''.join((base, convert_ext))\n\ndef _read_md(file):\n with open(file, 'r') as f:\n return f.read()\n\ndef _write_md(file, contents):\n with open(file, 'w') as f:\n f.write(contents)\n\ndef _read_nb(file):\n with open(file, 'r') as f:\n return json.load(f)\n\ndef _write_nb(file, contents):\n with open(file, 'w') as f:\n return json.dump(contents, f, indent=2)\n\ndef main():\n parser = argparse.ArgumentParser(description=\n 'Convert between ipynb and md.')\n\n parser.add_argument('files_or_dirs', nargs='+',\n help=('list of ipynb or md files or directories '\n 'to convert'))\n\n parser.add_argument('--from', dest='convert_from', required=True,\n help='either \\'md\\' or \\'ipynb\\'')\n\n parser.add_argument('--type', dest='type',\n help='either markdown (default) or atlas')\n\n parser.add_argument('--overwrite', dest='overwrite', action='store_true',\n help=('overwrite target file if it exists '\n '(false by default)'))\n\n # Parse the CLI arguments.\n args = parser.parse_args()\n files_or_dirs = args.files_or_dirs\n md_type = args.type or 'markdown'\n overwrite = args.overwrite\n convert_from = args.convert_from\n\n # Find all files.\n files = _expand_dirs_to_files(files_or_dirs)\n\n # Filter as a function of --from.\n if convert_from == 'ipynb':\n files = _filter_files_by_extension(files, '.ipynb')\n convert = nb_to_markdown\n read = _read_nb\n write = _write_md\n elif convert_from in ('md', 'markdown'):\n files = _filter_files_by_extension(files, '.md')\n convert = markdown_to_nb\n read = _read_md\n write = _write_nb\n else:\n raise ValueError(\"'from' should be 'ipynb' or 'md'\")\n\n for file in files:\n print(\"Converting {0:s}...\".format(file))\n contents = read(file)\n converted = convert(contents)\n file_to = _converted_filename(file, convert_from)\n if op.exists(file_to) and not overwrite:\n print(\"The file already exists, please use --overwrite.\")\n continue\n else:\n write(file_to, converted)\n\nif __name__ == '__main__':\n main()\n","sub_path":"ipymd/scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"168409200","text":"# -*- encoding: utf-8 -*-\n# @TIME : 2019/10/12 14:37\n# @Author : 成昭炜\n\nimport numpy as np\nimport torchvision\nimport os\nfrom PIL import Image\nimport collections\nimport sys\nif sys.version_info[0] == 2:\n import xml.etree.cElementTree as ET\nelse:\n import xml.etree.ElementTree as ET\n\n\nCLASSES = [\"car\", \"bus\", \"bicycle\", \"motorbike\", \"aeroplane\", \"boat\", \"train\", \"chair\", \"sofa\",\n \"diningtable\", \"tvmonitor\", \"bottle\", \"pottedplant\", \"cat\", \"dog\", \"cow\", \"horse\", \"sheep\",\n \"bird\", \"person\"]\n\nCLS2IDX = dict([(cls, idx) for cls, idx in zip(CLASSES, range(len(CLASSES)))])\nIDX2CLS = dict([(idx, cls) for cls, idx in zip(CLASSES, range(len(CLASSES)))])\n\nBBOX = \"bndbox\"\n\nDATASET_YEAR_DICT = {\n \"2007\": {\n \"base_dir\": \"VOCdevkit/VOC2007\",\n },\n \"2012\": {\n \"base_dir\": \"VOCdevkit/VOC2012\",\n }\n}\n\n\ndef handle_voc_target(target):\n\n objects = target[\"annotation\"][\"object\"]\n if isinstance(objects, dict):\n objects = [objects]\n\n values = np.zeros((len(objects), 5), dtype=np.float32)\n\n for idx, obj in enumerate(objects):\n xmin = int(obj[BBOX][\"xmin\"])\n ymin = int(obj[BBOX][\"ymin\"])\n xmax = int(obj[BBOX][\"xmax\"])\n ymax = int(obj[BBOX][\"ymax\"])\n values[idx, :] += np.array([CLS2IDX[obj[\"name\"]], xmin, ymin, xmax, ymax])\n\n return values\n\n\nclass VOCDetection(torchvision.datasets.VisionDataset):\n\n def __init__(self,\n root,\n image_set=\"train\",\n transform=None,\n target_transform=None,\n transforms=None):\n super(VOCDetection, self).__init__(root, transforms, transform, target_transform)\n self.years = [\"2007\", \"2012\"]\n\n base_dirs = [DATASET_YEAR_DICT[year][\"base_dir\"] for year in self.years]\n voc_roots = [os.path.join(self.root, base_dir) for base_dir in base_dirs]\n image_dirs = [os.path.join(voc_root, \"JPEGImages\") for voc_root in voc_roots]\n annotation_dirs = [os.path.join(voc_root, \"Annotations\") for voc_root in voc_roots]\n\n if not os.path.isdir(voc_roots[0]) or \\\n not os.path.isdir(voc_roots[1]):\n raise RuntimeError(\"Dataset not found or corrupted.\")\n\n splits_dirs = [os.path.join(voc_root, \"ImageSets/Main\") for voc_root in voc_roots]\n split_fs = [os.path.join(splits_dir, image_set.rstrip(\"\\n\")+\".txt\")\n for splits_dir in splits_dirs]\n\n if not os.path.exists(split_fs[0]) or not os.path.exists(split_fs[1]):\n raise ValueError(\"Wrong image_set entered!\")\n\n with open(os.path.join(split_fs[0]), \"r\") as f:\n file2007_names = [x.strip() for x in f.readlines()]\n\n with open(os.path.join(split_fs[1]), \"r\") as f:\n file2012_names = [x.strip() for x in f.readlines()]\n\n self.images = []\n self.annotations = []\n for x in file2007_names:\n self.images.append(os.path.join(image_dirs[0], x + \".jpg\"))\n self.annotations.append(os.path.join(annotation_dirs[0], x + \".xml\"))\n\n for x in file2012_names:\n self.images.append(os.path.join(image_dirs[1], x + \".jpg\"))\n self.annotations.append(os.path.join(annotation_dirs[1], x + \".xml\"))\n\n assert (len(self.images) == len(self.annotations))\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is a dictionary of the XML tree.\n \"\"\"\n img = Image.open(self.images[index]).convert('RGB')\n target = self.parse_voc_xml(\n ET.parse(self.annotations[index]).getroot())\n\n if self.transforms is not None:\n img, target = self.transforms(img, target)\n\n return img, target\n\n def __len__(self):\n return len(self.images)\n\n def parse_voc_xml(self, node):\n voc_dict = {}\n children = list(node)\n if children:\n def_dic = collections.defaultdict(list)\n for dc in map(self.parse_voc_xml, children):\n for ind, v in dc.items():\n def_dic[ind].append(v)\n voc_dict = {\n node.tag:\n {ind: v[0] if len(v) == 1 else v\n for ind, v in def_dic.items()}\n }\n if node.text:\n text = node.text.strip()\n if not children:\n voc_dict[node.tag] = text\n return voc_dict\n\n\nif __name__ == \"__main__\":\n import sys\n import os\n sys.path.append(os.path.abspath(\"..\"))\n\n root_path = os.path.abspath(\"..\")\n val = VOCDetection(os.path.join(root_path, \"data\"), image_set=\"train\")\n # print(len(val))\n\n import torchvision\n sets1 = torchvision.datasets.VOCDetection(os.path.join(root_path, \"data\"),\n image_set=\"train\",\n year=\"2012\")\n sets2 = torchvision.datasets.VOCDetection(os.path.join(root_path, \"data\"),\n image_set=\"train\",\n year=\"2007\")\n print(len(sets1) + len(sets2))\n\n\n","sub_path":"datasets/voc.py","file_name":"voc.py","file_ext":"py","file_size_in_byte":5179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"519632545","text":"import torch\nimport torch.nn as nn\n\nfrom .istft import InverseSTFT\n\nfrom lib.modules.optimizations import *\n\nimport math\n\nfrom collections import OrderedDict\n\ndef str2act(txt, param= None):\n return {\"sigmoid\": nn.Sigmoid(), \"relu\": nn.ReLU(), \"none\": nn.Sequential() , \"lrelu\": nn.LeakyReLU(param), \"selu\": nn.SELU() }[txt.lower()]\n\nclass ConvGLU(nn.Module):\n def __init__(self, in_ch, out_ch, kernel_size=(7, 7), padding=None, batchnorm=False, act=\"sigmoid\", stride = None):\n super().__init__()\n if not padding:\n padding = (kernel_size[0] // 2 , kernel_size[1] // 2 )\n if stride is None:\n self.conv = nn.Conv2d(in_ch, out_ch * 2, kernel_size, padding=padding)\n else:\n self.conv = nn.Conv2d(in_ch, out_ch * 2, kernel_size, padding=padding, stride= stride)\n self.weight = self.conv.weight\n self.bias = self.conv.bias\n\n if batchnorm:\n self.conv = nn.Sequential(\n self.conv,\n nn.BatchNorm2d(out_ch * 2)\n )\n self.sigmoid = str2act(act)\n \n def forward(self, x):\n x = self.conv(x)\n ch = x.shape[1]\n x = x[:, :ch//2, ...] * self.sigmoid(x[:, ch//2:, ...])\n return x\n\n\nclass DeGLI_DEQ(nn.Module):\n def __init__(self, writer, wnorm: bool, num_branches: int, base_channels: int, ratio2head: int, fuse_method: str,\n droprate: float, final_multiplier: int, pretrain_steps:int, f_thres:int, b_thres:int, num_layers:int,\n ch_hidden: int, k1:int, k2:int, p2:int):\n\n super().__init__()\n\n ## parameters ---------------------------\n self.writer = writer\n self.wnorm = wnorm\n self.num_branches = num_branches\n self.num_blocks = [1] * self.num_branches\n\nclass DeGLI_DNN(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n k_x1, k_y1, k_x2 ,k_y2 = self.parse(**config)\n\n ch_hidden = self.ch_hidden\n\n self.convglu_first = ConvGLU(6, ch_hidden, kernel_size=(k_y1, k_x1), batchnorm=True, act=self.act)\n self.two_convglus = nn.Sequential(\n ConvGLU(ch_hidden, ch_hidden, batchnorm=True, act=self.act, kernel_size=(k_y2, k_x2)),\n ConvGLU(ch_hidden, ch_hidden, act=self.act, kernel_size=(k_y2, k_x2))\n )\n self.convglu_last = ConvGLU(ch_hidden, ch_hidden , act=self.act)\n\n\n self.conv = nn.Conv2d(ch_hidden, 2, kernel_size=(k_y2, k_x2), padding=( (k_y2-1)//2 , (k_x2-1)//2 ) )\n\n def forward(self, x, mag_replaced, consistent, train_step = -1):\n x = torch.cat([x, mag_replaced, consistent], dim=1)\n x = self.convglu_first(x)\n residual = x\n x = self.two_convglus(x)\n x += residual\n x = self.convglu_last(x)\n x = self.conv(x)\n return x\n\n def parse(self, k_x1: int = 11, k_y1: int = 11, k_x2: int = 7, k_y2: int = 7, num_channel: int=16, act = \"sigmoid\"):\n self.ch_hidden = num_channel\n\n self.act = act.lower()\n return (k_x1, k_y1, k_x2 ,k_y2)\n\n\nclass DeGLI_ED(nn.Module):\n def __init__(self, n_freq, config):\n super().__init__()\n\n self.parse(**config)\n\n layer_specs = [\n 6, # encoder_1: [batch, 128, 128, 1] => [batch, 128, 128, ngf]\n self.widening, # encoder_1: [batch, 128, 128, 1] => [batch, 128, 128, ngf]\n self.widening * 2, # encoder_2: [batch, 128, 128, ngf] => [batch, 64, 64, ngf * 2]\n self.widening * 4, # encoder_3: [batch, 64, 64, ngf * 2] => [batch, 32, 32, ngf * 4]\n self.widening * 8, # encoder_4: [batch, 32, 32, ngf * 4] => [batch, 16, 16, ngf * 8]\n self.widening * 8, # encoder_5: [batch, 16, 16, ngf * 8] => [batch, 8, 8, ngf * 8]\n self.widening * 8, # encoder_6: [batch, 8, 8, ngf * 8] => [batch, 4, 4, ngf * 8]\n self.widening * 8, # encoder_7: [batch, 4, 4, ngf * 8] => [batch, 2, 2, ngf * 8]\n self.widening * 8, # encoder_8: [batch, 2, 2, ngf * 8] => [batch, 1, 1, ngf * 8]\n ]\n\n layer_specs = layer_specs[0:self.n_layers+1]\n\n\n self.encoders = nn.ModuleList()\n\n conv, pad = self._gen_conv(layer_specs[0] ,layer_specs[1], convGlu = self.convGlu, rounding_needed = True)\n self.encoders.append(nn.Sequential(pad, conv))\n \n last_ch = layer_specs[1]\n\n for i,ch_out in enumerate(layer_specs[2:]):\n d = OrderedDict()\n d['act'] = str2act(self.act,self.lamb)\n gain = math.sqrt(2.0/(1.0+self.lamb**2))\n gain = gain / math.sqrt(2) ## for naive signal propagation with residual w/o bn\n\n conv, pad = self._gen_conv(last_ch ,ch_out, gain = gain, convGlu = self.convGlu, kernel_size = self.k_xy)\n if not pad is None:\n d['pad'] = pad\n d['conv'] = conv\n\n if self.use_batchnorm:\n d['bn'] = nn.BatchNorm2d(ch_out)\n\n encoder_block = nn.Sequential(d)\n self.encoders.append(encoder_block)\n last_ch = ch_out\n\n layer_specs.reverse()\n self.decoders = nn.ModuleList()\n kernel_size = 4\n for i,ch_out in enumerate(layer_specs[1:]):\n\n d = OrderedDict()\n d['act'] = str2act(self.act2,self.lamb)\n gain = math.sqrt(2.0/(1.0+self.lamb**2))\n gain = gain / math.sqrt(2) \n\n if i == len(layer_specs)-2:\n kernel_size = 5\n ch_out = 2\n conv = self._gen_deconv(last_ch, ch_out , gain = gain, k= kernel_size)\n d['conv'] = conv\n\n # if i < self.num_dropout and self.droprate > 0.0:\n # d['dropout'] = nn.Dropout(self.droprate)\n\n if self.use_batchnorm and i < self.n_layers-1:\n d['bn'] = nn.BatchNorm2d(ch_out)\n\n decoder_block = nn.Sequential(d)\n self.decoders.append(decoder_block)\n last_ch = ch_out * 2\n\n if self.use_linear_finalizer:\n init_alpha = 0.001\n self.linear_finalizer = nn.Parameter(torch.ones(n_freq) * init_alpha , requires_grad = True)\n\n def parse(self, layers:int, k_x:int, k_y:int, s_x:int, s_y:int, widening:int,use_bn: bool, lamb: float, linear_finalizer:bool, convGlu: bool, act: str, act2 : str, glu_bn:bool) :\n self.n_layers = layers\n self.k_xy = (k_y, k_x)\n self.s_xy = (s_y, s_x)\n self.widening = widening\n self.use_batchnorm = use_bn\n self.lamb = lamb\n self.use_linear_finalizer = linear_finalizer\n self.convGlu = convGlu\n self.act = act\n self.act2 = act2\n self.glu_bn = glu_bn\n def forward(self, x, mag_replaced, consistent, train_step = -1):\n \n \n ##import pdb; pdb.set_trace()\n x = torch.cat([x, mag_replaced, consistent], dim=1)\n\n encoders_output = []\n\n for i,encoder in enumerate(self.encoders):\n x = encoder(x)\n encoders_output.append(x)\n\n for i,decoder in enumerate(self.decoders[:-1]):\n x = decoder(x)\n x = torch.cat([x, encoders_output[-(i+2)]], dim=1)\n\n x = self.decoders[-1](x) \n\n if self.use_linear_finalizer:\n x_perm = x.permute(0,1,3,2)\n x = torch.mul(x_perm, self.linear_finalizer) \n x = x.permute(0,1,3,2)\n\n return x\n\n def _gen_conv(self, in_ch, out_ch, strides = (2, 1), kernel_size = (5,3), gain = math.sqrt(2), convGlu = False, rounding_needed= False):\n # [batch, in_height, in_width, in_channels] => [batch, out_height, out_width, out_channels]\n ky,kx = kernel_size\n p1x = (kx-1)//2\n p2x = kx-1 - p1x\n p1y = (ky-1)//2\n p2y = ky-1 - p1y\n\n if rounding_needed:\n pad_counts = (p1x,p2x,p1y-1 , p2y)\n pad = torch.nn.ReplicationPad2d(pad_counts)\n else:\n pad = None\n\n if convGlu:\n conv = ConvGLU(in_ch, out_ch, kernel_size=kernel_size, stride = strides, batchnorm=self.glu_bn , padding=(0,0), act= \"sigmoid\")\n else:\n if pad is None:\n conv = nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, stride = strides, padding = (p1y, p1x) )\n else:\n conv = nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, stride = strides , padding=0)\n\n w = conv.weight\n k = w.size(1) * w.size(2) * w.size(3)\n conv.weight.data.normal_(0.0, gain / math.sqrt(k) )\n nn.init.constant_(conv.bias,0.01)\n return conv, pad \n\n def _gen_deconv(self, in_ch, out_ch, strides = (2, 1), k = 4, gain = math.sqrt(2), p =1 ):\n # [batch, in_height, in_width, in_channels] => [batch, out_height, out_width, out_channels]\n\n conv = nn.ConvTranspose2d(in_ch, out_ch, kernel_size= (k,3), stride = strides, padding_mode='zeros',padding = (p,1), dilation = 1)\n\n w = conv.weight\n k = w.size(1) * w.size(2) * w.size(3)\n conv.weight.data.normal_(0.0, gain / math.sqrt(k) )\n nn.init.constant_(conv.bias,0.01)\n\n return conv\n\ndef replace_magnitude(x, mag):\n phase = torch.atan2(x[:, 1:], x[:, :1]) # imag, real\n return torch.cat([mag * torch.cos(phase), mag * torch.sin(phase)], dim=1)\n\n\nclass DeGLI(nn.Module):\n def __init__(self, writer, model_config, model_type , n_freq:int, use_fp16:bool , n_fft: int, hop_length: int, depth:int, out_all_block:bool):\n super().__init__()\n self.n_fft = n_fft\n self.hop_length = hop_length\n self.out_all_block = out_all_block\n self.window = nn.Parameter(torch.hann_window(n_fft), requires_grad=False)\n self.istft = InverseSTFT(n_fft, hop_length=self.hop_length, window=self.window.data)\n\n model_type = model_type.lower()\n if model_type == \"vanilla\":\n self.dnns = nn.ModuleList([DeGLI_DNN(model_config) for _ in range(depth)])\n elif model_type == \"ed\":\n self.dnns = nn.ModuleList([DeGLI_ED( n_freq ,model_config) for _ in range(depth)])\n\n # self.use_fp16 = use_fp16\n\n # if self.use_fp16:\n # for dnn in self.dnns:\n # dnn = dnn.half()\n\n def stft(self, x):\n return torch.stft(x, n_fft=self.n_fft, hop_length=self.hop_length, window=self.window)\n\n def forward(self, x, mag, max_length=None, repeat=1, train_step = -1):\n if isinstance(max_length, torch.Tensor):\n max_length = max_length.item()\n\n out_repeats = []\n for ii in range(repeat):\n for dnn in self.dnns:\n # B, 2, F, T\n mag_replaced = replace_magnitude(x, mag)\n\n # B, F, T, 2\n waves = self.istft(mag_replaced.permute(0, 2, 3, 1), length=max_length)\n consistent = self.stft(waves)\n\n # B, 2, F, T\n consistent = consistent.permute(0, 3, 1, 2)\n # if self.use_fp16:\n # residual = dnn(x.half() , mag_replaced.half(), consistent.half(), train_step = train_step).float()\n # else:\n residual = dnn(x , mag_replaced, consistent, train_step = train_step)\n \n x = consistent - residual\n if self.out_all_block:\n out_repeats.append(x)\n\n if self.out_all_block:\n out_repeats = torch.stack(out_repeats, dim=1)\n else:\n out_repeats = x.unsqueeze(1)\n\n final_out = replace_magnitude(x, mag)\n\n return out_repeats, final_out, residual\n\n def plain_gla(self, x, mag, max_length=None, repeat=1, train_step = -1):\n if isinstance(max_length, torch.Tensor):\n max_length = max_length.item()\n\n out_repeats = []\n for _ in range(repeat):\n for _ in self.dnns:\n # B, 2, F, T\n mag_replaced = replace_magnitude(x, mag)\n\n # B, F, T, 2\n waves = self.istft(mag_replaced.permute(0, 2, 3, 1), length=max_length)\n consistent = self.stft(waves)\n\n # B, 2, F, T\n x = consistent.permute(0, 3, 1, 2)\n if self.out_all_block:\n out_repeats.append(x)\n\n if self.out_all_block:\n out_repeats = torch.stack(out_repeats, dim=1)\n else:\n out_repeats = x.unsqueeze(1)\n\n final_out = replace_magnitude(x, mag)\n return out_repeats, final_out","sub_path":"model1/degli.py","file_name":"degli.py","file_ext":"py","file_size_in_byte":12403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"335231763","text":"right_number = 9\nguess_count = 0\nguess_limit = 3\nwhile guess_count < guess_limit:\n guess = int(input('Guess the number is?'))\n guess_count += 1\n if guess == right_number:\n print(\"you win!\")\n break\n else:\n print(\"The number is wrong\")\nelse:\n print(\"Sorry, you failed\")","sub_path":"200403 While loop_Guess the numnber.py","file_name":"200403 While loop_Guess the numnber.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"411444849","text":"\ndef stats_text_en(text):\n ''' 统计英文单词词频出现的次数 '''\n text1 = text.replace(',','').replace('.','').replace('--','').replace('*','').replace('!','')\n list = ''\n for i in text1: #只保留英文单词\n if i.isascii():\n list += i\n text2 = list.split() #把字符串转化为列表\n\n d1 = {}\n for i in text2:\n count = text2.count(i)\n r1 = {i:count}\n d1.update(r1)\n #print(d)\n d2 = sorted(d1.items(),key = lambda x:x[1],reverse = True) #按照单词出现的次数进行排序\n return d2\n#print(stats_text_en(text))\n\n\ndef stats_text_cn(text):\n ''' 统计中文文字出现的次数 '''\n d3 = {}\n for i in text:\n if u'\\u4e00' <= i <= u'\\u9fa5': #提取中文汉字\n count = text.count(i)\n r2 = {i:count}\n d3.update(r2)\n d4 = sorted(d3.items(),key = lambda x:x[1],reverse = True)\n return d4\n#print(stats_text_cn(text))\n\ndef stats_text(text): \n ''' 分别调用stats_text_en和stats_text_cn方法输出合并词频统计结果 '''\n print(stats_text_en(text)+stats_text_cn(text))\n\n\n ","sub_path":"exercises/1901050059/d07/mymodule/stats_word.py","file_name":"stats_word.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"131716813","text":"import struct\n\n\ndef read(filename):\n with open(filename, 'rb') as file:\n entries = []\n\n # RMAP should be the first entry.\n if file.read(4) != b'RMAP':\n return\n\n # RMAP has a specific name.\n raw_name = file.read(8)\n if 0 in raw_name:\n name = raw_name[0:raw_name.index(0)].decode('ascii')\n else:\n name = raw_name\n if name != b'resource':\n return\n\n # Get the total length of the following resource headers.\n index_size = struct.unpack('= 0) and (x.max() <= 1)\n return x\n","sub_path":"lib/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"180675660","text":"from flask import request\nfrom flask import render_template\nfrom app import app\n\n@app.route('/', methods=['GET'])\ndef index():\n coordinates = [{ \n 'lat': 39.955688,\n 'lng': -75.202209,\n 'notes': 'Corner of 40th & Chestnut'\n }, { \n 'lat': 39.955536, \n 'lng': -75.198481,\n 'notes': 'Sitar'\n }]\n return render_template('index.html', title=\"The Treasure Map\", coordinates=coordinates)\n\n@app.route('/health', methods=['GET'])\ndef get_health():\n return '200 OK'\n\n@app.route('/coordinates', methods=['POST'])\ndef handle_coordinates():\n # store coordinates\n coordinates = list(request.args.get('coordinates'))\n for c_data in coordinates:\n # format = {'latitude': , 'longitude': , 'notes': }\n coord = Coordinate(latitude=c_data['latitude'], longitude=c_data['longitude'], notes=c_data['notes'])\n db.session.add(coord)\n db.session.commit()\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"638380991","text":"# -*- coding: utf-8 -*-\n# @Author: richman\n# @Date: 2017-10-23 20:47:21\n# @Last Modified by: Jiaqi guo\n# @Last Modified time: 2018-01-18 13:44:21\nimport os\nimport numpy as np\nimport torch\nfrom ..utils import kaldi_io\nimport itertools\nfrom torch.utils.data import TensorDataset\nimport torch.multiprocessing as multiprocessing\nfrom torch.utils.data import DataLoader\nimport logging\nfrom datetime import datetime\n\nlogger=multiprocessing.log_to_stderr()\nlogger.setLevel(logging.INFO)\n\n# Fetches a cache ( e.g. some frames from a stream ). Used as a background\n# process\ndef _fetch_cache(datastream, index_queue, data_queue, labels, nFinished, nj, batchsize, shuffle=False):\n while True:\n # Use None as breaking indicator for both queues\n cachesize = index_queue.get()\n #logger.debug(cachesize)\n if cachesize is None:\n data_queue.put(None)\n logger.error('get None cachesize')\n break\n st = datetime.now()\n try:\n if labels is None:\n feats, targets = zip(*[(v, np.array([1]*len(v))) for k, v in (next(datastream)\n for _ in range(cachesize))])\n else:\n feats, targets = zip(*[(v,labels[k]) for k, v in (next(datastream)\n for _ in range(cachesize)) if k in labels])\n # Finished iterating over the dataset, two cases:\n # 1. Last element has been reached and data is nonempty\n # 2. Last element has been reached and data is empty\n # The second case will be catched in ValueError\n\n except StopIteration as e:\n # Just return the data\n logger.error(e)\n nFinished.value = nFinished.value + 1\n logger.info(nFinished.value)\n break\n # Returns Value error, if zip fails (list is empty, thus no data)\n except ValueError as e:\n logger.error(e)\n nFinished.value = nFinished.value + 1\n logger.info(nFinished.value)\n if nFinished.value == nj:\n data_queue.put(None)\n break\n\n et = datetime.now()\n logger.debug('readdata took:'+ str((et-st).seconds))\n\n assert feats is not None, \"Check the labels!\"\n\n st = datetime.now()\n # No features to return, just tell the iterator its finished\n feats = np.concatenate(feats)\n # Assuming multiple labels for each feature, targets has size 2xDATA ->\n # DATAx2\n \n targets = np.concatenate(targets)\n tnetdataset = TensorDataset(torch.from_numpy(feats),\n torch.from_numpy(targets).long())\n\n dataloader = DataLoader(\n tnetdataset, batch_size=batchsize,\n shuffle=shuffle, drop_last=shuffle)\n data_queue.put(dataloader)\n et = datetime.now()\n logger.debug('package data took:'+str((et-st).seconds))\n\ndef parse_countsfile(countsfile):\n if isinstance(countsfile, str):\n with open(countsfile) as countsfileiter:\n res_dict = {k: int(v) for k, v in (\n l.rstrip('\\n').split() for l in countsfileiter)}\n else:\n res_dict = {k: int(v) for k, v in (\n l.rstrip('\\n').split() for l in countsfile)}\n return res_dict\n\ndef makefeatstring(splitroot, x, context=0, deltas=False):\n header=\"copy-feats scp,p:\"+splitroot\n featstring=os.path.join(header,str(x),'feats.scp')\n featstring+= \" ark:- |\"\n if context != 0:\n featstring += \" splice-feats --left-context={lc} --right-context={rc} ark:- ark:- |\".format(lc=context, rc=context)\n if deltas:\n featstring += \" add-deltas ark:- ark:- |\"\n return featstring\n\n\nclass KaldiStreamDataloader(object):\n \"\"\"docstring for KaldiStreamDataloader\"\"\"\n\n def __init__(self, splitdataroot, labels, num_outputs, cachesize=200, batchsize=64, shuffle=False):\n super(KaldiStreamDataloader, self).__init__()\n self.labels = labels\n self.cachesize = cachesize\n self.batchsize = batchsize\n self.shuffle = shuffle\n self.splitdataroot=splitdataroot\n self.num_outputs = num_outputs\n self.nj = int(splitdataroot[-1]) if splitdataroot[-1] != '/' else int(splitdataroot[-2])\n \n self.lengths={}\n self.num_caches=0\n for x in range(1, self.nj+1):\n countsfile=os.path.join(self.splitdataroot,str(x),\"counts.ark\")\n res_dict = parse_countsfile(countsfile)\n self.num_caches += int(\n max(np.ceil(1. * len(res_dict) / self.cachesize), 1))\n self.lengths.update(res_dict)\n\n \n # At least one cache needs to be processed\n self.cachestartidx = [self.cachesize *\n i for i in range(self.num_caches)]\n self.nsamples = sum(self.lengths.values())\n # Take the first sample from the data and use it as dim reference\n # key, feat = next(kaldi_io.read_mat_ark(self.stream))\n # self.inputdim = feat.shape[-1]\n\n \n\n def __iter__(self):\n return itertools.chain.from_iterable(KaldiStreamIter(self))\n\n def __len__(self):\n return self.num_caches\n\n\nclass KaldiStreamIter(object):\n \"\"\"\n Stream iterator for Kaldi based features\n This iterator needs the KaldiDataloader as its argument\n \"\"\"\n\n def __init__(self, loader):\n super(KaldiStreamIter, self).__init__()\n self.splitdataroot = loader.splitdataroot\n self.lengths = loader.lengths\n self.cachesize = loader.cachesize\n self.nj = loader.nj\n self.labels = loader.labels\n self.shuffle = loader.shuffle\n self.batchsize = loader.batchsize\n self.nsamples = loader.nsamples\n self.num_caches = len(loader)\n self.cachestartidx = loader.cachestartidx\n self.idx = 0\n self.nFinished = multiprocessing.Value('i', 0)\n self.startWork()\n\n def _submitjob(self):\n self.idx += 1\n self.index_queue.put(self.cachesize)\n\n def startWork(self):\n self.data_queue = multiprocessing.SimpleQueue()\n self.index_queue = multiprocessing.SimpleQueue()\n \n self.workers =[\n multiprocessing.Process(\n target=_fetch_cache, \n args=(\n kaldi_io.read_mat_ark(\n makefeatstring(self.splitdataroot, i, context=5, deltas=True)), \n self.index_queue, self.data_queue, \n self.labels, self.nFinished, self.nj, self.batchsize, self.shuffle\n )\n ) \n for i in range(1,self.nj+1)\n ]\n for i in range(self.nj):\n # start worker and submit an empty slot for each \n self.workers[i].start()\n self._submitjob()\n\n def _shutdown(self):\n for i in range(self.nj):\n self.index_queue.put(None)\n self.workers[i].join()\n for i in range(self.nj):\n self.workers[i].terminate()\n # Use -1 as flag for stopiteration\n self.idx = -1\n\n def __del__(self):\n self._shutdown()\n\n def __len__(self):\n return self.nsamples\n\n def __next__(self):\n try:\n # Queue is synchronized, thus will block\n res = self.data_queue.get()\n if not res:\n raise StopIteration\n self._submitjob()\n if self.nFinished == self.nj:\n # All fetching process complete\n logger.info('all complete shuting down')\n self._shutdown()\n return res\n except KeyboardInterrupt:\n self._shutdown()\n raise StopIteration\n\n next = __next__ # Python 2 compability\n\n def __iter__(self):\n return self\n","sub_path":"py_src/utils/kaldi_dataloader2.py","file_name":"kaldi_dataloader2.py","file_ext":"py","file_size_in_byte":7897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"216013705","text":"import numpy as np\nimport pandas as pd\n\nfrom dataLoad import *\nfrom dataToGrades import dataToGrades\n\n\ndef dataCheck(data):\n \n # Define the column with student IDs as vector.\n studentID = data[:,0]\n \n # Find and count the unique studentIDs and put them into vectors.\n unique, uniquecnt = np.unique(studentID, return_counts = True)\n # Fill the vector 'notunique' with the studentIDs from 'unique-vector' \n # which appear twice according to the 'uniquecnt'-vector.\n notunique = unique[uniquecnt > 1]\n \n # Fill the list 'err' with information on the not unique student IDs.\n err = [data[studentID == i] for i in notunique]\n \n err = np.concatenate(err, axis=0 )\n \n # Print errormessages. \n if len(notunique)>0:\n print(\"The following student ID(s) were found to be duplicates: {}\".format(notunique))\n print(\"We found the following information on the student ID(s): \\n{}\\n\".format(err),sep = \"\\n\")\n \n\n\n #Define a vector with the grading scale:\n gradeScale = np.array([-3,0,2,4,7,10,12])\n \n #We call the function to convert the data into only grades.\n grades = dataToGrades(data)\n \n #Define length of matrix containing all the grades \n n = len(grades)\n \n errs = 0\n #Create a for-loop that checks if the grade givin is contained in the grading scale:\n for i in range(n):\n #Only checks the columns that contains grades (numcol-2)\n for j in range(len(grades[0,:])):\n #If the grade is within the Scale, inserts a true\n\n if not np.any(grades[i,j] == gradeScale):\n #Prints the name and student ID of the student that has an incorect grade according to the grading scale\n #plus the assignment in which the mistake occurs. \n print(\"{:s}, {:s} has an invalid grade in assignment {}.\".format(data[i,1],data[i,0],j+1))\n errs += 1\n \n if errs == 0 and np.size(notunique) == 0:\n print(\"We found no errors in neither the student IDs nor the grades of the loaded data.\\n\")\n \n \n print(\"\")\n ","sub_path":"Mandag/dataCheck.py","file_name":"dataCheck.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"555031414","text":"# =============================================================================\n# Ural Normalize CLI Action\n# =============================================================================\n#\n# Logic of the normalize CLI action enabling the user to normalize the urls of\n# a CSV file column.\n#\nimport csv\n\nfrom ural.cli.utils import custom_reader\nfrom ural import normalize_url\n\n\ndef normalize_action(namespace):\n sort_query = not namespace.no_query_sort\n strip_authentication = not namespace.keep_authentication\n strip_trailing_slash = namespace.strip_trailing_slash\n strip_index = not namespace.keep_index\n\n headers, position, reader = custom_reader(namespace.file, namespace.column)\n\n headers.append(namespace.column + \"_normalized\")\n writer = csv.writer(namespace.output)\n writer.writerow(headers)\n\n for line in reader:\n url = line[position]\n line.append(normalize_url(url, sort_query=sort_query, strip_authentication=strip_authentication,\n strip_trailing_slash=strip_trailing_slash, strip_index=strip_index))\n writer.writerow(line)\n","sub_path":"ural/cli/normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"522449075","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 7 00:40:02 2017\n\n@author: Kyle\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nfrom keras.utils import np_utils\nfrom imblearn.under_sampling import NearMiss, AllKNN, RandomUnderSampler\nfrom imblearn.over_sampling import ADASYN, SMOTE, RandomOverSampler\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\nimport sys\nif __name__ == '__main__':\n sys.path.append(\"../model/\")\n import soundNet\n import waveCNN\nelse:\n sys.path.append(\"../../model/\")\n import soundNet\n import waveCNN\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\nimport matplotlib.pyplot as plt\nimport os\nimport math\nimport seaborn as sns\nimport random\n\n#%% slice the matrix using discontinuous row index \ndef discontSliceRow( matrix, index ):\n outputMatrix = np.zeros( [ len( index ), matrix.shape[ 1 ] ] )\n outputIndex = 0\n for processLine in range( 0, len( matrix ) ):\n if processLine in index:\n outputMatrix[ outputIndex, : ] = matrix[ processLine, : ]\n outputIndex += 1\n return outputMatrix\n\n#%% slice the matrix using discontinuous column index\ndef discontSliceCol( matrix, index ):\n outputMatrix = np.zeros( [ matrix.shape[ 0 ], len( index ) ] )\n outputIndex = 0\n for processCol in range( 0, matrix.shape[1] ):\n if processCol in index:\n outputMatrix[ :, outputIndex ] = matrix[ :, processCol ]\n outputIndex += 1\n return outputMatrix\n\n#%%\ndef iter_loadtxt(filename, delimiter=',', skiprows=0, dtype= float):\n def iter_func():\n with open(filename, 'r') as infile:\n for _ in range(skiprows):\n next(infile)\n for line in infile:\n line = line.rstrip().split(delimiter)\n for item in line:\n yield dtype(item)\n iter_loadtxt.rowlength = len(line)\n\n data = np.fromiter(iter_func(), dtype=dtype)\n data = data.reshape((-1, iter_loadtxt.rowlength))\n return data\n\n#%%\ndef processData( dataSet, task = 'nonEmotion', balance = 'imbalance', dataType = 'waveform', dataset = 'IEMOCAP' ):\n \n if dataset == 'IEMOCAP':\n if dataType == 'waveform' or dataType == 'toyWaveform':\n dataSize = 96000\n else:\n dataSize = 150 *64\n \n # for speaker and gender task, use all database\n if task != 'emotion':\n dataSet = dataSet.astype( 'float32' )\n np.random.seed( seed = 7 )\n np.random.shuffle( dataSet )\n #np.savetxt( 'dataSetAfterShuffle.csv', dataSet, delimiter = ',' )\n print( dataSet[ 17, -17 ] )\n \n feature = dataSet[ :, 0: dataSize ]\n # normalize the data\n feature = ( ( feature - np.mean( feature ) ) /math.sqrt( np.var( feature ) ) )\n \n emotionLabel = dataSet[ :, dataSize + 0 ]\n speakerLabel = dataSet[ :, dataSize + 1 ]\n genderLabel = dataSet[ :, dataSize + 2 ]\n \n emotionLabel = np_utils.to_categorical( emotionLabel )\n speakerLabel = np_utils.to_categorical( speakerLabel )\n genderLabel = np_utils.to_categorical( genderLabel )\n \n if task == 'speaker':\n return feature, speakerLabel\n elif task == 'gender':\n return feature, genderLabel\n \n # for emotion task, only select 4 classes ( 0, 1, 2, 3 ), label 4 means other emotion, should abandon\n if task == 'emotion':\n dataSet = dataSet.astype( 'float32' )\n np.random.seed(seed= 7 )\n np.random.shuffle( dataSet )\n #np.savetxt( 'dataSetAfterShuffle.csv', dataSet, delimiter = ',' )\n print( dataSet[ 17, -17 ] )\n \n # select only label with 0,1,2,3; 4 = others, not count\n emotionLabel = dataSet[ :, dataSize + 0 ]\n emotionIndices = [ i for i, x in enumerate( emotionLabel ) if x != 4]\n dataSet = discontSliceRow( dataSet, emotionIndices )\n \n feature = dataSet[ :, 0: dataSize ]\n # normalize the data\n feature = ( ( feature - np.mean( feature ) ) /math.sqrt( np.var( feature ) ) )\n \n emotionLabel = dataSet[ :, dataSize + 0 ]\n emotionLabel = np_utils.to_categorical( emotionLabel )\n \n # random oversampling\n if balance == 'balance':\n ros = RandomOverSampler( random_state= 7 )\n feature, emotionLabel = ros.fit_sample( feature, np.argmax( emotionLabel, 1 ) )\n numSamples = len( emotionLabel )\n emotionLabel = np.array( emotionLabel )\n emotionLabel.resize( [ numSamples, 1 ] )\n dataSet = np.concatenate( ( feature, emotionLabel ), axis = 1 ) \n np.random.shuffle( dataSet )\n feature = dataSet[ :, 0: dataSize ]\n emotionLabel = dataSet[ :, dataSize ]\n emotionLabel = np_utils.to_categorical( emotionLabel )\n \n assert emotionLabel.shape[ 1 ] == 4\n \n return feature, emotionLabel\n \n if dataset == 'ESC':\n if dataType == 'waveform' or dataType == 'toyWaveform':\n dataSize = 44100 *6\n else:\n dataSize = 150 *64\n \n # for speaker and gender task, use all database\n if task == 'scene':\n dataSet = dataSet.astype( 'float32' )\n np.random.seed( seed = 7 )\n np.random.shuffle( dataSet )\n #np.savetxt( 'dataSetAfterShuffle.csv', dataSet, delimiter = ',' )\n print( dataSet[ 17, -17 ] )\n \n feature = dataSet[ :, 0: dataSize ]\n # normalize the data\n feature = ( ( feature - np.mean( feature ) ) /math.sqrt( np.var( feature ) ) )\n \n sceneLabel = dataSet[ :, dataSize + 0 ]\n sceneLabel = np_utils.to_categorical( sceneLabel )\n #print( sceneLabel.shape[ 1 ] )\n #assert sceneLabel.shape[ 1 ] == 50\n \n return feature, sceneLabel\n \n if dataset == 'DCASE':\n if dataType == 'waveform' or dataType == 'toyWaveform':\n dataSize = 44100 *10\n else:\n dataSize = 150 *64\n \n # for speaker and gender task, use all database\n if task == 'scene':\n dataSet = dataSet.astype( 'float32' )\n np.random.seed( seed = 7 )\n np.random.shuffle( dataSet )\n #np.savetxt( 'dataSetAfterShuffle.csv', dataSet, delimiter = ',' )\n print( dataSet[ 17, -17 ] )\n \n feature = dataSet[ :, 0: dataSize ]\n # normalize the data\n #feature = ( ( feature - np.mean( feature ) ) /math.sqrt( np.var( feature ) ) )\n \n sceneLabel = dataSet[ :, dataSize + 0 ]\n sceneLabel = np_utils.to_categorical( sceneLabel )\n print( sceneLabel.shape[ 1 ] )\n #assert sceneLabel.shape[ 1 ] == 50\n \n return feature, sceneLabel\n \n if dataset == 'DCASE16000':\n if dataType == 'waveform' or dataType == 'toyWaveform':\n dataSize = 16000 *10\n else:\n dataSize = 150 *64\n \n # for speaker and gender task, use all database\n if task == 'scene':\n dataSet = dataSet.astype( 'float32' )\n np.random.seed( seed = 7 )\n np.random.shuffle( dataSet )\n #np.savetxt( 'dataSetAfterShuffle.csv', dataSet, delimiter = ',' )\n print( dataSet[ 17, -17 ] )\n \n feature = dataSet[ :, 0: dataSize ]\n # normalize the data\n feature = ( ( feature - np.mean( feature ) ) /math.sqrt( np.var( feature ) ) )\n \n sceneLabel = dataSet[ :, dataSize + 0 ]\n sceneLabel = np_utils.to_categorical( sceneLabel )\n print( sceneLabel.shape[ 1 ] )\n #assert sceneLabel.shape[ 1 ] == 50\n \n return feature, sceneLabel\n \n if dataset == 'DCASE8000':\n if dataType == 'waveform' or dataType == 'toyWaveform':\n dataSize = 8000 *10\n else:\n dataSize = 150 *64\n \n # for speaker and gender task, use all database\n if task == 'scene':\n dataSet = dataSet.astype( 'float32' )\n np.random.seed( seed = 7 )\n np.random.shuffle( dataSet )\n #np.savetxt( 'dataSetAfterShuffle.csv', dataSet, delimiter = ',' )\n print( dataSet[ 17, -17 ] )\n \n feature = dataSet[ :, 0: dataSize ]\n # normalize the data\n #feature = ( ( feature - np.mean( feature ) ) /math.sqrt( np.var( feature ) ) )\n \n sceneLabel = dataSet[ :, dataSize + 0 ]\n sceneLabel = np_utils.to_categorical( sceneLabel )\n print( sceneLabel.shape[ 1 ] )\n #assert sceneLabel.shape[ 1 ] == 50\n \n return feature, sceneLabel\n\n#%% \ndef train( testFeature, testLabel, trainFeature, trainLabel, newFolderName, iteration_num = 100, \\\n lr_decay = 0.1, batch_size = 32, learningRate = 0.0001, iterationNum = 100, \\\n modelT = soundNet.soundNet, init = 'lecun_uniform', saveSign = False, denseUnitNum = 64,\\\n dataType = 'waveform', visualSign = 0, dataset = 'IEMOCAP', task = 'emotion' ):\n \n # define data size, different size for waveform and spectrogram\n if dataset == 'IEMOCAP':\n if dataType == 'waveform' or dataType == 'toyWaveform':\n dataSize = 96000\n else:\n dataSize = 150 *64\n if dataset == 'ESC':\n if dataType == 'waveform' or dataType == 'toyWaveform':\n dataSize = 44100 *6\n else:\n dataSize = 150 *64\n if dataset == 'DCASE':\n if dataType == 'waveform' or dataType == 'toyWaveform':\n dataSize = 44100 *10\n else:\n dataSize = 150 *64\n if dataset == 'DCASE16000':\n if dataType == 'waveform' or dataType == 'toyWaveform':\n dataSize = 16000 *10\n else:\n dataSize = 150 *64\n if dataset == 'DCASE8000':\n if dataType == 'waveform' or dataType == 'toyWaveform':\n dataSize = 8000 *10\n else:\n dataSize = 150 *64\n \n # make folders \n os.mkdir( newFolderName + '/weight' )\n os.mkdir( newFolderName + '/models' )\n os.mkdir( newFolderName + '/figure' )\n os.mkdir( newFolderName + '/figure/TSNE' )\n os.mkdir( newFolderName + '/figure/convFilter' )\n os.mkdir( newFolderName + '/trainSummary' )\n os.mkdir( newFolderName + '/testSummary' )\n \n # initialize \n result = np.zeros( [ 2, iteration_num ] )\n diffResult = np.zeros( [ 8, iteration_num ] )\n class_num = testLabel.shape[ 1 ]\n train_datasize = trainFeature.shape[ 0 ]\n \n tf.set_random_seed( 7 )\n with tf.Graph().as_default() as g:\n with tf.Session( graph = g ) as sess:\n \n # changable learning rate \n global_step = tf.Variable(0) \n learning_rate = tf.train.exponential_decay( learningRate, global_step, int( iteration_num *(train_datasize/batch_size) ), lr_decay, staircase=False) \n \n # fix random index for reproducing result \n tf.set_random_seed( 17 )\n \n # define place holders \n input_x = tf.placeholder( tf.float32, shape = ( batch_size, dataSize ), name = 'inputx' )\n input_y = tf.placeholder( tf.float32, shape = ( batch_size, class_num ), name = 'inputy' )\n \n # define a set of tensors for training\n if task == 'scene' and dataset == 'DCASE':\n prediction = modelT( input_x, numClass = class_num, l2_reg = 0.5, init = init, denseUnitNum = denseUnitNum, task = task )\n if task == 'scene' and ( dataset == 'DCASE16000' or dataset == 'DCASE8000' ):\n prediction = modelT( input_x, numClass = class_num, l2_reg = 0.5, init = init, denseUnitNum = denseUnitNum, timeStep_num = 160 )\n else:\n prediction = modelT( input_x, numClass = class_num, l2_reg = 0.5, init = init, denseUnitNum = denseUnitNum )\n \n loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits( logits = prediction, labels= input_y ) )\n train_step = tf.train.AdamOptimizer( learning_rate ).minimize( loss, global_step = global_step )\n #train_step = tf.train.GradientDescentOptimizer( learning_rate ).minimize( loss, global_step = global_step )\n correct_prediction = tf.equal( tf.argmax( prediction, 1 ), tf.argmax( input_y, 1 ) )\n accuracy = tf.reduce_mean( tf.cast( correct_prediction, tf.float32 ), name=\"acc_restore\" )\n with tf.name_scope( 'result' ):\n tf.summary.scalar('accuracy', accuracy)\n \n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter( newFolderName + '/trainSummary' , sess.graph)\n test_writer = tf.summary.FileWriter( newFolderName + '/testSummary' )\n \n # initialize the variables\n init_op = tf.global_variables_initializer( )\n sess.run( init_op )\n \n # initialize the model saver\n saver = tf.train.Saver( max_to_keep= 100 )\n \n # print the list of variables\n print( tf.trainable_variables() )\n \n # number of iterations\n for iteration in range( 0, iteration_num ):\n \n # np.random.seed( seed = 7 )\n # np.random.shuffle( trainFeature )\n # np.random.seed( seed = 7 )\n # np.random.shuffle( trainLabel )\n \n # each batch\n for i in range( 0, 1 *int( train_datasize / batch_size ) ):\n \n # prepare data for each train batch\n start = ( i * batch_size ) % train_datasize\n end = min( start + batch_size, train_datasize )\n inputTrainFeature = trainFeature[ start: end ]\n inputTrainLabel = trainLabel[ start: end ]\n \n # train the model\n _, lossShow = sess.run( [ train_step, loss ], feed_dict = { input_x: inputTrainFeature, input_y: inputTrainLabel } )\n #print( 'loss = ' + str( lossShow ) )\n \n # get accuracy on a small subset of test data (just several epoch), a very fast approximation of the performance \n # number of batches to test\n testBatchNum = math.floor( len( testLabel ) /batch_size )\n # initialize result recorder\n testSubsetResult = [ None ] *( batch_size *testBatchNum )\n testSubsetLabel = [ None ] *( batch_size *testBatchNum )\n outputBeforeDense = [ None ] *( batch_size *testBatchNum )\n outputDense1 = [ None ] *( batch_size *testBatchNum )\n \n # get intermediate tensor \n if visualSign >= 2:\n flattenOut = sess.graph.get_tensor_by_name( 'flatten/flattenOut:0' ) \n dense1Out = sess.graph.get_tensor_by_name( 'dense1/dense1Out:0' ) \n \n # start test\n for testBatch in range( 0, testBatchNum ): # 3*32=96 test samples\n # prepare input data \n start = testBatch * batch_size \n end = start + batch_size\n inputTestFeature = testFeature[ start: end, : ]\n inputTestLabel = testLabel[ start: end, : ] \n # run test\n if visualSign >= 2:\n tempTestResult, tempAccuracyTest, tempoutputBeforeDense, tempoutputDense1 = sess.run( [ prediction, accuracy, flattenOut, dense1Out ], feed_dict = { input_x: inputTestFeature, input_y: inputTestLabel } ) \n else:\n tempTestResult, tempAccuracyTest = sess.run( [ prediction, accuracy ], feed_dict = { input_x: inputTestFeature, input_y: inputTestLabel } ) \n # record result\n testSubsetLabel[ start :end ] = np.argmax( inputTestLabel, 1 )\n testSubsetResult[ start :end ] = np.argmax( tempTestResult, 1 ) \n \n if visualSign >= 2:\n outputBeforeDense[ start :end ] = tempoutputBeforeDense\n outputDense1[ start :end ] = tempoutputDense1\n \n # plot the t-SNE before the dense layer\n if visualSign >= 2:\n plotTSNE( outputBeforeDense, testSubsetLabel, newFolderName + '/figure/TSNE/tSNE_1_' + str( iteration ) + '.png' )\n plotTSNE( outputDense1, testSubsetLabel, newFolderName + '/figure/TSNE/tSNE_2_' + str( iteration ) + '.png' )\n \n if iteration == -1:\n plotALotTSNE( outputBeforeDense, testSubsetLabel, newFolderName + '/figure/TSNE/tSNE_3_' )\n plotALotTSNE( outputDense1, testSubsetLabel, newFolderName + '/figure/TSNE/tSNE_4_' )\n \n #np.savetxt( newFolderName + '/testResult.csv', testResult, delimiter = ',' )\n #np.savetxt( newFolderName + '/testLabel.csv', inputTestLabel, delimiter = ',' )\n accuracyTest = accuracy_score( testSubsetLabel, testSubsetResult )\n print( confusion_matrix( testSubsetLabel, testSubsetResult ) )\n result[ 0, iteration ] = accuracyTest\n print( 'Epoch:' + str( iteration + 1 ) + ' result on test: ' + str( accuracyTest ) )\n \n # get accuracy on a small subset of training data (just one epoch), a very fast approximation of the training loss/ overfitting \n inputTestTrainFeature = trainFeature[ 0: batch_size, : ]\n inputTestTrainLabel = trainLabel[ 0: batch_size, : ]\n testTrainResult, accuracyTrain = sess.run( [ prediction, accuracy ], feed_dict = { input_x: inputTestTrainFeature, input_y: inputTestTrainLabel } ) \n print( 'Epoch:' + str( iteration + 1 ) + ' result on train: ' + str( accuracyTrain ) )\n np.savetxt( newFolderName + '/testTrainResult.csv', testTrainResult, delimiter = ',' )\n np.savetxt( newFolderName + '/testTrainLabel.csv', inputTestTrainLabel, delimiter = ',' )\n result[ 1, iteration ] = accuracyTrain\n print( '-----------------------------' )\n #print( sess.run(global_step) ) \n #print( sess.run(learning_rate) )\n # record the accuracy of both test/ training error approximation on the small subset\n np.savetxt( newFolderName + '/accuracy.csv', result, delimiter = ',' )\n \n # print variable\n if visualSign == 1:\n if iteration == 0:\n lastState, _ = printVariable( sess, newFolderName = newFolderName )\n else:\n lastState, diffThisIter = printVariable( sess, lastState, iteration + 1, newFolderName = newFolderName )\n diffResult[ :, iteration - 1 ] = diffThisIter\n #np.savetxt( newFolderName + '/weightConv1' + str( iteration + 1 ) + '.csv', lastState, delimiter = ',' )\n \n # save model every 10 epoches\n #if ( iteration + 1 )%10 == 0 and saveSign == True:\n if ( ( iteration + 1 ) < 10 or ( iteration + 1 ) %10 == 0 ) and saveSign == True:\n save_path = saver.save( sess, newFolderName + '/models/' + str( iteration + 1 ) + '_.ckpt' )\n print(\"Model saved in file: %s\" % save_path)\n \n # plot the result\n resultOnTest = result[ 0, : ]\n resultOnTrain = result[ 1, : ]\n plt.plot( list( range( iteration_num ) ), resultOnTrain )\n plt.plot( list( range( iteration_num ) ), resultOnTest )\n plt.savefig( newFolderName + '/accuracy.png' )\n plt.close('all')\n \n if visualSign == 1:\n for diffLayerIndex in range( 0, 1 ):\n plt.plot( list( range( iteration_num ) ), diffResult[ diffLayerIndex, : ], label = 'conv_' + str( diffLayerIndex ) )\n plt.legend( 'upper right' )\n plt.savefig( newFolderName + '/diff.png' )\n plt.close( 'all' )\n np.savetxt( newFolderName + '/diff.csv', diffResult, delimiter = ',' )\n \n return resultOnTrain, resultOnTest\n\n#%%\ndef printVariable( sess, lastState = -1, iteration = 1, newFolderName = -1 ): \n #layerList = [ 'conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'conv6', 'conv7', 'conv8' ]\n layerList = [ 'conv1' ]\n diffList = layerList.copy( )\n currentState = [ 0 ] *len( layerList )\n \n for layerIndex in range( len( layerList ) ):\n \n # get all parameters of this layer\n allFilter = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope= layerList[ layerIndex ] )\n #print( allFilter )\n \n # for conv layers\n if layerIndex <= 7:\n \n # get the weight of this layer\n kernal = allFilter[ 0 ].eval( )\n if layerIndex == 0:\n plotConvFilters( kernal, newFolderName + '/figure/convFilter/' + layerList[ layerIndex ] + '_' + str( iteration ) + '.png' )\n else:\n plotRandomConvFilters( kernal, newFolderName + '/figure/convFilter/' + layerList[ layerIndex ] + '_' + str( iteration ) + '.png' )\n \n # save the weight of current layer\n currentState[ layerIndex ] = kernal\n \n # track the difference with last state\n if lastState != -1:\n diff = trackChange( currentState[ layerIndex ], lastState[ layerIndex ] )\n diffList[ layerIndex ] = diff\n print( layerList[ layerIndex ] + ' : ' + str( diff ) )\n #trackChangeEachChannel( currentState[ layerIndex ], lastState[ layerIndex ] )\n \n# if layerIndex <= 1:\n# print( trackChangeEachChannel( currentState[ layerIndex ], lastState[ layerIndex ] ) )\n \n return currentState, diffList\n\n#%% track change ( in percentile ) of different layers according to epochs\ndef trackChange( currentState, lastState ):\n difference = lastState - currentState \n diffInPercentile = difference / lastState\n absDiffInPercentile = np.abs( diffInPercentile )\n meanChange = np.mean( absDiffInPercentile )\n return meanChange *100\n\n#%% track change ( in percentile ) of different layers according to epochs for each channel\ndef trackChangeEachChannel( currentState, lastState ):\n difference = lastState - currentState \n diffInPercentile = difference / currentState\n absDiffInPercentile = np.abs( diffInPercentile )\n absDiff = np.abs( difference )\n \n input_channel_num = np.shape( difference )[ 2 ]\n output_channel_num = np.shape( difference )[ 3 ]\n #initialize an array to track the change of each channel\n diffPerChannel = np.zeros( [ input_channel_num, output_channel_num ] )\n # for each channel \n for input_channel in range( 0, input_channel_num ):\n for output_channel in range( 0, output_channel_num ):\n tempChannelDiff = absDiff[ 0, :, input_channel, output_channel ]\n #tempChannelDiff = absDiffInPercentile[ 0, :, input_channel, output_channel ]\n diffPerChannel[ input_channel, output_channel ] = np.mean( tempChannelDiff )\n \n print( 'mean: ' + str( np.mean( diffPerChannel ) *100 ))\n print( 'max: ' + str( np.max( diffPerChannel ) *100 ) )\n print( 'min: ' + str( np.min( diffPerChannel ) *100 ) )\n print( '-------------' )\n \n return diffPerChannel *100\n#%% load data, devide it into training/test set, and seperate out the laebls \n# normalize the feature to [0, 1]\n# for emotion tests, filter out value = 4 (other emotions)\n# folder list, i.e., IEMCOCAP has 5 sessions, speakers are independent between sessions, always use leave-one-session-out stragegy\ndef loadDataIEMOCAP( testTask, testFolder = 4, precision = 'original', sampleRate = 16000, dataType = 'toyWaveform' ):\n \n folderList = [ 0, 1, 2, 3, 4 ] \n trainFolderList = folderList.copy( )\n del trainFolderList[ testFolder ]\n \n if dataType == 'toyWaveform':\n dataFileFolder = '../../../processedData/backup/toyWaveform/' + str( sampleRate ) + '_' + precision + '/session_'\n elif dataType == 'waveform':\n dataFileFolder = '../../../processedData/backup/waveform/' + str( sampleRate ) + '_' + precision + '/session_'\n elif dataType == 'toySpectrogram':\n dataFileFolder = '../../../processedData/toySpectrogram/' + str( sampleRate ) + '_' + precision + '_64/session_'\n elif dataType == 'spectrogram':\n dataFileFolder = '../../../processedData/spectrogram/' + str( sampleRate ) + '_' + precision + '_64/session_'\n \n fold = [ 0, 0, 0, 0, 0 ]\n for i in folderList:\n fold[ i ] = eval( 'iter_loadtxt( dataFileFolder + str(' + str( i + 1 ) + ') + \".csv\" )' )\n \n # seperate training and testing data\n trainData = eval( 'np.concatenate( ( fold[ ' + str( trainFolderList[ 0 ] ) + \\\n ' ], fold[ ' + str( trainFolderList[ 1 ] ) + \\\n ' ], fold[ ' + str( trainFolderList[ 2 ] ) + \\\n ' ], fold[ ' + str( trainFolderList[ 3 ] ) + ' ] ), axis=0 )' )\n testData = eval( 'fold[ ' + str( testFolder ) + ' ]' )\n \n if testTask == 'emotion':\n trainFeature, trainLabel = processData( trainData, task = testTask, balance = 'balance', dataType = dataType ) # emotion is not\n else:\n trainFeature, trainLabel = processData( trainData, task = testTask, dataType = dataType )\n testFeature, testLabel = processData( testData, task = testTask, dataType = dataType ) # note: don't balance the test set\n \n plotInputDistribution( trainFeature )\n #plotInputDistribution( testFeature[ 0, : ] )\n \n return trainFeature, trainLabel , testFeature, testLabel\n\n#%%\ndef loadDataESC( testTask, testFolder = 4, precision = 'original', sampleRate = 16000, dataType = 'toyWaveform' ):\n \n folderList = [ 0, 1, 2, 3, 4 ] \n trainFolderList = folderList.copy( )\n del trainFolderList[ testFolder ]\n \n if dataType == 'toyWaveform':\n dataFileFolder = '../../../processedData/ESC/toyWaveform/' + 'folder'\n elif dataType == 'waveform':\n dataFileFolder = '../../../processedData/ESC/waveform/' + 'folder'\n elif dataType == 'toySpectrogram':\n dataFileFolder = '../../../processedData/ESC/toySpectrogram/' + 'folder'\n elif dataType == 'spectrogram':\n dataFileFolder = '../../../processedData/ESC/spectrogram/' + 'folder'\n \n fold = [ 0, 0, 0, 0, 0 ]\n for i in folderList:\n fold[ i ] = eval( 'iter_loadtxt( dataFileFolder + str(' + str( i + 1 ) + ') + \".csv\" )' )\n \n # seperate training and testing data\n trainData = eval( 'np.concatenate( ( fold[ ' + str( trainFolderList[ 0 ] ) + \\\n ' ], fold[ ' + str( trainFolderList[ 1 ] ) + \\\n ' ], fold[ ' + str( trainFolderList[ 2 ] ) + \\\n ' ], fold[ ' + str( trainFolderList[ 3 ] ) + ' ] ), axis=0 )' )\n testData = eval( 'fold[ ' + str( testFolder ) + ' ]' )\n \n if testTask == 'scene':\n trainFeature, trainLabel = processData( trainData, task = testTask, dataType = dataType, dataset = 'ESC' )\n testFeature, testLabel = processData( testData, task = testTask, dataType = dataType, dataset = 'ESC' ) # note: don't balance the test set\n \n plotInputDistribution( trainFeature )\n #plotInputDistribution( testFeature[ 0, : ] )\n \n return trainFeature, trainLabel , testFeature, testLabel\n\n\n#%%\ndef loadDataDCASE( testTask, testFolder = 3, precision = 'original', sampleRate = 16000, dataType = 'toyWaveform' ):\n \n if dataType == 'toyWaveform':\n dataFileFolder = '../../../processedData/DCASE/toyWaveform/' + 'folder'\n elif dataType == 'waveform':\n dataFileFolder = '../../../processedData/DCASE/waveform/' + 'folder'\n elif dataType == 'toySpectrogram':\n dataFileFolder = '../../../processedData/DCASE/toySpectrogram/' + 'folder'\n elif dataType == 'spectrogram':\n dataFileFolder = '../../../processedData/DCASE/spectrogram/' + 'folder'\n \n # seperate training and testing data\n trainData = iter_loadtxt( dataFileFolder + 'train_' + str( testFolder + 1 ) + '.csv' )\n testData = iter_loadtxt( dataFileFolder + 'evaluate_' + str( testFolder + 1 ) + '.csv' )\n \n if testTask == 'scene':\n trainFeature, trainLabel = processData( trainData, task = testTask, dataType = dataType, dataset = 'DCASE' )\n testFeature, testLabel = processData( testData, task = testTask, dataType = dataType, dataset = 'DCASE' ) # note: don't balance the test set\n \n plotInputDistribution( trainFeature )\n #plotInputDistribution( testFeature[ 0, : ] )\n \n return trainFeature, trainLabel , testFeature, testLabel\n\n#%%\ndef loadDataDCASE16000( testTask, testFolder = 3, precision = 'original', sampleRate = 16000, dataType = 'toyWaveform' ):\n \n if dataType == 'toyWaveform':\n dataFileFolder = '../../../processedData/DCASE16000/toyWaveform/' + 'folder'\n elif dataType == 'waveform':\n dataFileFolder = '../../../processedData/DCASE16000/waveform/' + 'folder'\n elif dataType == 'toySpectrogram':\n dataFileFolder = '../../../processedData/DCASE16000/toySpectrogram/' + 'folder'\n elif dataType == 'spectrogram':\n dataFileFolder = '../../../processedData/DCASE16000/spectrogram/' + 'folder'\n \n # seperate training and testing data\n trainData = iter_loadtxt( dataFileFolder + 'train_' + str( testFolder + 1 ) + '.csv' )\n testData = iter_loadtxt( dataFileFolder + 'evaluate_' + str( testFolder + 1 ) + '.csv' )\n \n if testTask == 'scene':\n trainFeature, trainLabel = processData( trainData, task = testTask, dataType = dataType, dataset = 'DCASE16000' )\n testFeature, testLabel = processData( testData, task = testTask, dataType = dataType, dataset = 'DCASE16000' ) # note: don't balance the test set\n \n plotInputDistribution( trainFeature )\n #plotInputDistribution( testFeature[ 0, : ] )\n \n return trainFeature, trainLabel , testFeature, testLabel\n\n#%%\ndef loadDataDCASE8000( testTask, testFolder = 3, precision = 'original', sampleRate = 16000, dataType = 'toyWaveform' ):\n \n if dataType == 'toyWaveform':\n dataFileFolder = '../../../processedData/DCASE8000/toyWaveform/' + 'folder'\n elif dataType == 'waveform':\n dataFileFolder = '../../../processedData/DCASE8000/waveform/' + 'folder'\n elif dataType == 'toySpectrogram':\n dataFileFolder = '../../../processedData/DCASE8000/toySpectrogram/' + 'folder'\n elif dataType == 'spectrogram':\n dataFileFolder = '../../../processedData/DCASE8000/spectrogram/' + 'folder'\n \n # seperate training and testing data\n trainData = iter_loadtxt( dataFileFolder + 'train_' + str( testFolder + 1 ) + '.csv' )\n testData = iter_loadtxt( dataFileFolder + 'evaluate_' + str( testFolder + 1 ) + '.csv' )\n \n if testTask == 'scene':\n trainFeature, trainLabel = processData( trainData, task = testTask, dataType = dataType, dataset = 'DCASE8000' )\n testFeature, testLabel = processData( testData, task = testTask, dataType = dataType, dataset = 'DCASE8000' ) # note: don't balance the test set\n \n plotInputDistribution( trainFeature )\n #plotInputDistribution( testFeature[ 0, : ] )\n \n return trainFeature, trainLabel , testFeature, testLabel\n\n\n#%% calculate the number of elements of an high-dimensional tensor\ndef countElements( inputM ):\n inputShape = inputM.shape\n dim = 1\n for i in inputShape:\n dim *= i\n return dim\n\n#%% plot the distribution of data, compatatbale with high-dimensional np arrays\ndef plotInputDistribution( inputM, saveFolder = '' ):\n output = np.reshape( inputM, [ countElements( inputM ) ] )\n fig1 = plt.figure( )\n ax1 = fig1.gca()\n binwidth = ( max( output ) - min( output ) )/1000\n ax1.hist( output, bins=np.arange( min( output ), max( output ) + binwidth, binwidth ) )\n if saveFolder != '':\n fig1.savefig( saveFolder + '/hist.png' )\n plt.close('all')\n\n#%% plot the filter for both waveform (1-D), and spectrogram (2-D)\n#def plotConvFilters( inputM, saveFolder = '' ):\n# # inputM in the shape of [ filter_height, filter_width, input_channel_num, output_channel_num ]\n# # , in which the total number of 1-D filter is input_channel_num *output_channle_num\n# input_channel_num = np.shape( inputM )[ 2 ]\n# output_channel_num = np.shape( inputM )[ 3 ]\n# fig, ax = plt.subplots( nrows= input_channel_num, ncols= output_channel_num )\n# for input_channel in range( 0, input_channel_num ):\n# for output_channel in range( 0, output_channel_num ):\n# tempFilter = inputM[ :, :, input_channel, output_channel ]\n# # if 1-D filter, waveform\n# if np.shape( tempFilter )[ 0 ] == 1:\n# tempFilter = tempFilter.reshape( np.shape( tempFilter )[ 1 ] )\n# if input_channel_num == 1:\n# ax[ output_channel ].set_ylim( [ -0.1, 0.1 ] )\n# ax[ output_channel ].plot( list( range( len( tempFilter ) ) ), tempFilter, linewidth = 0.5 )\n# else:\n# ax[ input_channel ][ output_channel ].set_ylim( [ -0.1, 0.1 ] )\n# ax[ input_channel ][ output_channel ].plot( list( range( len( tempFilter ) ) ), tempFilter, linewidth = 0.5 )\n# # if 2-D filter, spectrogram\n# elif np.shape( tempFilter )[ 0 ] != 1:\n# if input_channel_num == 1:\n# ax[ output_channel ].imshow( tempFilter )\n# else:\n# ax[ input_channel ][ output_channel ].imshow( tempFilter )\n# if input_channel_num != 1:\n# fig.set_size_inches( input_channel_num *2, output_channel_num *2 )\n# else: \n# fig.set_size_inches( output_channel_num *2, 2 )\n# fig.savefig( filename = saveFolder, dpi = 200 )\n# plt.close('all')\n \n#%% plot the filter for both waveform (1-D), and spectrogram (2-D) and its fft\ndef plotConvFilters( inputM, saveFolder = '' ):\n # inputM in the shape of [ filter_height, filter_width, input_channel_num, output_channel_num ]\n # , in which the total number of 1-D filter is input_channel_num *output_channle_num\n input_channel_num = np.shape( inputM )[ 2 ]\n output_channel_num = np.shape( inputM )[ 3 ]\n fig, ax = plt.subplots( nrows= input_channel_num, ncols= output_channel_num )\n figfft, axfft = plt.subplots( nrows= input_channel_num, ncols= output_channel_num )\n for input_channel in range( 0, input_channel_num ):\n for output_channel in range( 0, output_channel_num ):\n tempFilter = inputM[ :, :, input_channel, output_channel ]\n # if 1-D filter, waveform\n if np.shape( tempFilter )[ 0 ] == 1:\n tempFilter = tempFilter.reshape( np.shape( tempFilter )[ 1 ] )\n if input_channel_num == 1:\n ax[ output_channel ].set_ylim( [ -0.1, 0.1 ] )\n ax[ output_channel ].plot( list( range( len( tempFilter ) ) ), tempFilter, linewidth = 0.5 )\n axfft[ output_channel ].plot( list( range( len( tempFilter ) ) ), np.abs( np.fft.fft( tempFilter ) ), linewidth = 0.5 )\n np.savetxt( saveFolder[ 0: -4 ] + '_' + str( output_channel ) + '.csv', tempFilter, delimiter = ',' )\n else:\n ax[ input_channel ][ output_channel ].set_ylim( [ -0.1, 0.1 ] )\n ax[ input_channel ][ output_channel ].plot( list( range( len( tempFilter ) ) ), tempFilter, linewidth = 0.5 )\n # if 2-D filter, spectrogram\n elif np.shape( tempFilter )[ 0 ] != 1:\n if input_channel_num == 1:\n ax[ output_channel ].imshow( tempFilter )\n else:\n ax[ input_channel ][ output_channel ].imshow( tempFilter )\n if input_channel_num != 1:\n fig.set_size_inches( input_channel_num *2, output_channel_num *2 )\n else: \n fig.set_size_inches( output_channel_num *2, 2 )\n figfft.set_size_inches( output_channel_num *2, 2 )\n fig.savefig( filename = saveFolder, dpi = 200 )\n figfft.savefig( filename = saveFolder[ 0: -4 ] + '_fft.png', dpi = 200 )\n plt.close('all')\n \n#%% plot the filter for both waveform (1-D), and spectrogram (2-D), since the number is large, this function only select a few for each layer\ndef plotRandomConvFilters( inputM, saveFolder = '', plot_num = 16 ):\n # inputM in the shape of [ filter_height, filter_width, input_channel_num, output_channel_num ]\n # , in which the total number of 1-D filter is input_channel_num *output_channle_num\n input_channel_num = np.shape( inputM )[ 2 ]\n output_channel_num = np.shape( inputM )[ 3 ]\n random.seed( 7 )\n selectedFilter = random.sample( range( 0, min( input_channel_num, output_channel_num ) ), plot_num )\n fig, ax = plt.subplots( nrows= 1, ncols= plot_num )\n for randomIndex in range( 0, plot_num ):\n tempFilter = inputM[ :, :, selectedFilter[ randomIndex ], selectedFilter[ randomIndex ] ]\n # if 1-D filter, waveform\n if np.shape( tempFilter )[ 0 ] == 1:\n tempFilter = tempFilter.reshape( np.shape( tempFilter )[ 1 ] )\n ax[ randomIndex ].set_ylim( [ -0.1, 0.1 ] )\n ax[ randomIndex ].plot( list( range( len( tempFilter ) ) ), tempFilter, linewidth = 0.5 )\n # if 2-D filter, spectrogram\n elif np.shape( tempFilter )[ 0 ] != 1:\n ax[ randomIndex ].imshow( tempFilter )\n\n fig.set_size_inches( plot_num *2, 2 )\n fig.savefig( filename = saveFolder, dpi = 200 )\n plt.close('all')\n \n#%%\ndef plotALotTSNE( inputM, label, saveFolder ):\n perplexityList = [ 5, 10, 15, 20 ]\n lrList = [ 10 ]\n n_iterList = list( range( 500, 5000, 100 ) )\n for perplexity in perplexityList:\n for lr in lrList:\n for n_iter in n_iterList:\n fileName = saveFolder + str( perplexity ) + '_' + str( lr ) + '_' + str( n_iter ) + '.png'\n plotTSNE( inputM, label, fileName, perplexity = perplexity, n_iter = n_iter, learning_rate = lr )\n \n\n#%% plot TSNE (mainly for dense layer, but can also be used for (flattened) convulutional layers )\ndef plotTSNE( inputM, label, saveFolder, perplexity= 16.0, n_iter = 5000, learning_rate = 10 ):\n inputShape = np.shape( inputM )\n \n # if already dense layer, in shape [ n_samples, n_features ]\n if len( inputShape ) == 2:\n tsneResult = calculateTSNE( inputM, perplexity= perplexity, n_iter = n_iter, learning_rate = learning_rate )\n \n # if conv layers, need first flatten to [ n_samples, n_features ]\n elif len( inputShape ) == 4:\n # flatten inputM \n num_samples = inputShape[ 0 ]\n num_elements = countElements( inputM )\n outputM = np.reshape( inputM, [ num_samples, int( num_elements /num_samples ) ] )\n tsneResult = calculateTSNE( outputM, perplexity= perplexity, n_iter = n_iter, learning_rate = learning_rate )\n label = [ mapLabelToColor( elem ) for elem in label ] \n plt.scatter( x = tsneResult[ :,0 ], y = tsneResult[ :, 1 ], c = label )\n plt.savefig( filename = saveFolder, dpi = 100 )\n plt.close('all')\n return tsneResult\n\n#%% calculate t-SNE\ndef calculateTSNE( inputM, perplexity= 16.0, n_iter = 5000, learning_rate = 10 ):\n randomState = 7\n tsne = TSNE( random_state= randomState, perplexity= perplexity, n_iter_without_progress = n_iter, learning_rate = learning_rate )\n # if many dimensions, first use PCA than t-sne\n if np.shape( inputM )[ 1 ] >= 128:\n pca_50 = PCA( n_components = 128, random_state= randomState )\n pca_50_result = pca_50.fit_transform( inputM )\n tsneResult = tsne.fit_transform( pca_50_result )\n \n # if only a few dimensions, directly use t-sne\n else:\n tsneResult = tsne.fit_transform( inputM )\n# pca_50 = PCA( n_components = 2, random_state= randomState )\n# tsneResult = pca_50.fit_transform( inputM )\n return tsneResult\n\n#%% map label to color\ndef mapLabelToColor( label ):\n if label == 0:\n color = 'r'\n elif label == 1:\n color = 'b'\n elif label == 2:\n color = 'm'\n elif label == 3:\n color = 'k'\n return color\n\ndef sineInit( shape, dtype=None ):\n InitKernal = np.zeros( shape )\n for filterIndex in range( 0, shape[ 3 ] ):\n InitKernal[ 0, :, 0, filterIndex ] = genSineFilter( 200 *( filterIndex + 1 ) )\n InitKernal = InitKernal /64\n return InitKernal\n\n#%%\ndef genSineFilter( frequency, points = 64, sampleRate = 16000 ):\n Ts = 1 /sampleRate\n t = list( np.linspace( -points/2*Ts, points/2*Ts, num= points ) )\n #t = list( xrange( -points/2*Ts, points/2*Ts-Ts, Ts ) )\n sinFilter = [ math.sin( 2 * math.pi * frequency *elem) for elem in t ]\n #plt.plot( sinFilter )\n return sinFilter\n\n#%%\ndef variable_summaries( var ):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)","sub_path":"code/experiment/dcase_simpleNetCNN/ex11_256debselargerLR/0.0003/expUtil.py","file_name":"expUtil.py","file_ext":"py","file_size_in_byte":42671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"634617312","text":"import csv\nimport pickle\nimport requests\n\n\nowners = None\nwith open('owners.pkl', 'rb') as f:\n owners = pickle.load(f)\n\ns = requests.Session()\n\n# Add authorization headers\nauth = ('madmin', 'MPassWd')\ns.auth = auth\n\nurl = 'http://35.167.45.63/api/contacts/new'\n\nMAX_CONTACTS = 99000\nMAX_COLS = 5\nmax_domains_num = 0\n\nadded2mautic_file = 'added2mautic.csv'\nadded2mautic = []\nwith open(added2mautic_file, 'rt') as f:\n reader = csv.reader(f)\n added2mautic = list(reader)\n\n# emails = ['beknazar23@gmail.com', 'abdik@naver.com', 'earthcops@mail.ru', 'email@jackfitzgerald.com.au']\ni = 0\nadded_emails = []\nowners_count = len(owners)\nfor email, domains in owners.items():\n if i == MAX_CONTACTS:\n print('Done adding %d domains' % (MAX_CONTACTS))\n break\n if email in added2mautic:\n print('Skipping %s' % (email))\n continue\n\n i += 1\n\n domains_num = len(domains)\n\n # Skip too long cols\n if len(domains) > MAX_COLS:\n continue\n\n print('About to add %s. %d/%d' % (email, i, owners_count))\n data = {\n # 'firstname': 'John',\n 'email': email,\n 'tags': 'Whois Leads'\n }\n j = 1\n for domain in domains:\n data['domain_%d_domain' % (j)] = domain[0]\n data['domain_%d_price' % (j)] = domain[1]\n j += 1\n\n r = s.post(url, data)\n error = r.json().get('error')\n if not error:\n print('%s is successfully added' % (email))\n added_emails.append(email)\n with open(added2mautic_file, 'a') as f:\n writer = csv.writer(f)\n writer.writerow([email])\n\n else:\n print(error)\n print(email)\n continue\n\n# with open(added2mautic_file, 'a') as f:\n# writer = csv.writer(f)\n# for email in added_emails:\n# writer.writerow([email])\n\n# url = 'http://35.167.45.63/api/fields/contact/new'\n\n# # # Create fields\n# rng = range(30, 101)\n# k = 1\n# for i in rng:\n# print('%d / %d' % (k, len(rng)))\n# k += 1\n# data = {\n# 'label': 'Domain %d: Domain' % (i),\n# 'type': 'text'\n# }\n# r = s.post(url, data)\n# error = r.json().get('error')\n# if not error:\n# print('%s successfully created' % (data['label']))\n# else:\n# print(error)\n# break\n# data = {\n# 'label': 'Domain %d: Price' % (i),\n# 'type': 'number'\n# }\n# r = s.post(url, data)\n# error = r.json().get('error')\n# if not error:\n# print('%s successfully created' % (data['label']))\n# else:\n# print(error)\n# break\n\n# MAX_COLS = 5\n# rows = []\n# max_domains_num = 0\n# for registrant_email, domains in owners.items():\n# row = [registrant_email]\n# domains_num = len(domains)\n\n# # Skip too long cols\n# if len(domains) > MAX_COLS:\n# continue\n\n# # Keep track of max domains num per owner\n# if domains_num > max_domains_num:\n# max_domains_num = domains_num\n# for domain in domains:\n# row.extend([domain[0], domain[1]])\n\n# rows.append(row)\n\n# with open('out_with_whois_5.csv', 'wt') as f:\n# writer = csv.writer(f)\n# colnames = []\n# for i in range(max_domains_num):\n# domain_colname = 'domain %d' % (i+1)\n# colnames.extend([domain_colname, '%s price' % (domain_colname)])\n# writer.writerow(colnames)\n# writer.writerows(rows)\n","sub_path":"mautic.py","file_name":"mautic.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"588246868","text":"# -*- coding: utf-8 -*-\n# (c) AbAKUS IT Solutions\nimport urllib\nimport base64\nimport tempfile\nfrom zipfile import ZipFile\nfrom string import maketrans\nfrom odoo import models, fields, api, _\n\n\nclass HrExpenseAttachmentsDownload(models.Model):\n _inherit = 'hr.expense.sheet'\n export_file = fields.Binary(\n attachment=True,\n help=\"This field holds the attachments export file.\",\n readonly=True)\n\n @api.model\n def get_valid_filename(self, filename):\n not_letters_or_digits = u'\\/*?:\"<>|'\n if isinstance(filename, unicode):\n translate_table = dict((ord(char), '_') for char in not_letters_or_digits)\n else:\n assert isinstance(filename, str)\n translate_table = maketrans(not_letters_or_digits, '_' * len(not_letters_or_digits))\n return filename.translate(translate_table)\n\n \"\"\"\n gather all attachments in an expense report\n \"\"\"\n @api.model\n def append_attachments(self, zip_file_object, prefix=''):\n temp_file = tempfile.mktemp(suffix='')\n existing_folders = {}\n for line in self.expense_line_ids:\n body = self.get_valid_filename(\"{}-{}#{}\".format(\n line.employee_id.name,\n line.name,\n line.attachment_number))\n base_folder_name = \"{}{}\".format(prefix, body)\n folder_name = base_folder_name\n counter = 1\n while folder_name in existing_folders:\n counter += 1\n folder_name = base_folder_name + \" - \" + str(counter)\n\n existing_folders[folder_name] = True\n attachment_data = self.env['ir.attachment'].search(\n [('res_model', '=', 'hr.expense'), ('res_id', '=', line.id)]\n )\n for f in attachment_data:\n fn = open(temp_file, 'wb')\n fn.write(base64.b64decode(f.datas))\n fn.close()\n zip_file_object.write(temp_file, folder_name + \"/\" + f.datas_fname)\n\n \"\"\"\n button action \n python 2.7 allows self.export_file = base64.encodestrings(fn.read()) to be called.\n python 3.x prefers self.export_file = base64.encodebytes(fn.read())\n \"\"\"\n\n @api.multi\n def download_hr_expense_attachments(self):\n self.ensure_one()\n temp_zip = tempfile.mktemp(suffix='.zip')\n zip_file_object = ZipFile(temp_zip, \"w\")\n self.append_attachments(zip_file_object)\n zip_file_object.close()\n fn = open(temp_zip, 'rb')\n self.export_file = base64.encodestring(fn.read())\n fn.close()\n return {\n 'type': 'ir.actions.act_url',\n 'url': '/web/binary/download_document?' + urllib.urlencode({\n 'models': 'hr.expense.sheet',\n 'field': 'export_file',\n 'id': self.id,\n 'filename': self.get_valid_filename(self.name) + _(\" (Attachments).zip\")\n }),\n 'target': 'blank',\n }\n","sub_path":"hr_expense_attachments_download/models/hr_expense_attachments_download.py","file_name":"hr_expense_attachments_download.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"260356720","text":"\"\"\"\nParser of a generic csv.\n\n@author Xiangyu Bu \n\"\"\"\n\nimport csv\nimport threading\n\nimport xlsxwriter\n\nfrom . import excelhelper\nfrom . import exceptions\n\n\nclass BaseCsvCollection:\n \n def __init__(self, name, suffix):\n self._lock = threading.Lock()\n self.name = name\n self.suffix = suffix\n self.all_data = dict()\n print('Created new netstat collection: \"%s\"' % name)\n\n def get_key(self, engine, ts, trace, nworker, args):\n return ts\n\n def add(self, key, data):\n self._lock.acquire()\n self.all_data[key] = data\n self._lock.release()\n\n def to_xlsx(self):\n with open('%s,%s.log' % (self.name, self.suffix), 'w') as f:\n print('Sample size: %d' % len(self.all_data), file=f)\n workbook = xlsxwriter.Workbook('%s,%s.xlsx' % (self.name, self.suffix), {'strings_to_numbers': True})\n summary_sheet = workbook.add_worksheet('Summary')\n sheet_names = []\n max_rowcount = 0\n max_colcount = 0\n for key in sorted(self.all_data.keys()):\n sheet_name = str(key)\n data = self.all_data[key]\n sheet = workbook.add_worksheet(sheet_name)\n sheet_names.append(sheet_name)\n print('Sheet name: %s' % sheet_name, file=f)\n print(' Records: %d' % len(data), file=f)\n if max_rowcount < len(data):\n max_rowcount = len(data)\n for rowid, row in enumerate(data):\n if max_colcount < len(row):\n max_colcount = len(row)\n for colid, col in enumerate(row):\n sheet.write(rowid, colid, col)\n for i in range(max_colcount):\n summary_sheet.write(0, i, '=%s!%s' % (sheet_names[0], excelhelper.excel_style(1, i+1)))\n # summary_sheet.set_column('{0}:{0}'.format(chr(ord('A') + i)), 10)\n for i in range(1, max_rowcount+1):\n for j in range(max_colcount):\n related_cells = []\n for s in sheet_names:\n related_cells.append('%s!%s' % (s, excelhelper.excel_style(i+1, j+1)))\n # print(related_cells)\n summary_sheet.write(i, j, '=MEDIAN(%s)' % ','.join(related_cells))\n workbook.close()\n print('Saved \"%s,%s.xlsx\"' % (self.name, self.suffix))\n\n\nclass BaseCsvParser:\n \n def __init__(self):\n pass\n\n def parse(self, path):\n \"\"\" CSV file is assumed to have header. \"\"\"\n data = []\n with open(path, 'rU') as f:\n reader = csv.reader(f)\n try:\n data.append(next(reader))\n except StopIteration:\n raise exceptions.NoContentException('File \"%s\" is empty.' % path)\n # data.append(header)\n for row in reader:\n data.append(row)\n return data\n","sub_path":"suricata/dataparser/csv2xlsx.py","file_name":"csv2xlsx.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"335730655","text":"# -*- coding: utf-8 -*-\n\"\"\"\ntest_parse_doi\n~~~~~~~~~~~~~~\n\nTest DOI parser.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport logging\nimport unittest\n\nfrom lxml import etree\n\nfrom chemdataextractor.doc.text import Sentence\nfrom chemdataextractor.parse.hrms import hrms\n\nlogging.basicConfig(level=logging.DEBUG)\nlog = logging.getLogger(__name__)\n\n\nclass TestParseHRMS(unittest.TestCase):\n maxDiff = None\n\n def do_parse(self, input, expected):\n s = Sentence(input)\n log.debug(s)\n log.debug(s.tagged_tokens)\n result = next(hrms.scan(s.tagged_tokens))[0]\n log.debug(etree.tostring(result, pretty_print=True, encoding='unicode'))\n self.assertEqual(expected, etree.tostring(result, encoding='unicode'))\n\n def test_hrms1(self):\n s = 'HRMS (ESI) calcd for C34H28N4OP 539.1995 [M + H]+, found 539.1997.'\n output = 'C34H28N4OP'\n self.do_parse(s, output)\n\n def test_hrms2(self):\n s = 'HRMS: 184.0767 [M + Na]+.'\n output = ''\n self.do_parse(s, output)\n\n def test_hrms3(self):\n s = 'HRMS-ESI (m/z): calcd. for C42H52NO9 [M + NH4]+ 714.3637, found 714.3633.'\n output = 'C42H52NO9'\n self.do_parse(s, output)\n\n def test_hrms4(self):\n s = 'MALDI-HRMS (matrix: HCCA) Calculated for C32H48N4O6: [M + H]+ m/z 585.3607, Found 585.3636.'\n output = 'C32H48N4O6'\n self.do_parse(s, output)\n\n def test_hrms5(self):\n s = 'HRMS (m/z): 827.6005 [M+Na]+ (calcd. for C48H84O9Na: 827.6013). '\n output = 'C48H84O9Na'\n self.do_parse(s, output)\n\n def test_hrms6(self):\n s = 'HRMS [M−H]+ m/z calcd. for C24H32N9+ 446.2781, found 446.2775.'\n output = 'C24H32N9+'\n self.do_parse(s, output)\n\n def test_hrms7(self):\n s = 'DCI-HRMS: m/z 289.0916 [M+H]+; (Calcd for C12H16O8, 288.0845)'\n output = 'C12H16O8'\n self.do_parse(s, output)\n\n def test_hrms8(self):\n s = 'ES-HRMS: m/z 115.0393 [M−H]−; (Calcd for C5H7O3, 116.0473).'\n output = 'C5H7O3'\n self.do_parse(s, output)\n\n def test_hrms9(self):\n s = 'HRMS (ESI) calcd for C27H24N4P 435.1733 [M + H]+, found 435.1738.'\n output = 'C27H24N4P'\n self.do_parse(s, output)\n\n def test_hrms10(self):\n s = 'HRMS (ESI): [M − H]−, found 344.8591. C11H5Br2O3− requires 344.8585.'\n output = 'C11H5Br2O3−'\n self.do_parse(s, output)\n\n def test_hrms11(self):\n s = 'HRMS (ESI): calcd. for C13H11BrO3Na+ [M + Na]+ 316.9789, found 316.9785.'\n output = 'C13H11BrO3Na+'\n self.do_parse(s, output)\n\n def test_hrms12(self):\n s = 'HR-ESI-MS [M − H]− m/z: 447.0854, Calcd. for C21H21O9P (M − H) 447.0923.'\n output = 'C21H21O9P'\n self.do_parse(s, output)\n","sub_path":"tests/test_parse_hrms.py","file_name":"test_parse_hrms.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"241362343","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__all__ = ['baidu_download']\n\nfrom ..common import *\n\nfrom urllib import parse\n\ndef baidu_get_song_data(sid):\n data = json.loads(get_html('http://music.baidu.com/data/music/fmlink?songIds=%s' % sid, faker = True))['data']\n\n if data['xcode'] != '':\n # inside china mainland\n return data['songList'][0]\n else:\n # outside china mainland\n return None\n\ndef baidu_get_song_url(data):\n return data['songLink']\n\ndef baidu_get_song_artist(data):\n return data['artistName']\n\ndef baidu_get_song_album(data):\n return data['albumName']\n\ndef baidu_get_song_title(data):\n return data['songName']\n\ndef baidu_get_song_lyric(data):\n lrc = data['lrcLink']\n return None if lrc is '' else \"http://music.baidu.com%s\" % lrc\n\ndef baidu_download_song(sid, output_dir='.', merge=True, info_only=False):\n data = baidu_get_song_data(sid)\n if data is not None:\n url = baidu_get_song_url(data)\n title = baidu_get_song_title(data)\n artist = baidu_get_song_artist(data)\n album = baidu_get_song_album(data)\n lrc = baidu_get_song_lyric(data)\n file_name = \"%s - %s - %s\" % (title, album, artist)\n else:\n html = get_html(\"http://music.baidu.com/song/%s\" % sid)\n url = r1(r'data_url=\"([^\"]+)\"', html)\n title = r1(r'data_name=\"([^\"]+)\"', html)\n file_name = title\n\n type, ext, size = url_info(url, faker=True)\n print_info(site_info, title, type, size)\n if not info_only:\n download_urls([url], file_name, ext, size, output_dir, merge=merge, faker=True)\n\n try:\n type, ext, size = url_info(lrc, faker=True)\n print_info(site_info, title, type, size)\n if not info_only:\n download_urls([lrc], file_name, ext, size, output_dir, faker=True)\n except:\n pass\n\ndef baidu_download_album(aid, output_dir = '.', merge = True, info_only = False):\n html = get_html('http://music.baidu.com/album/%s' % aid, faker = True)\n album_name = r1(r'

      (.+?)<\\/h2>', html)\n artist = r1(r'', html)\n output_dir = '%s/%s - %s' % (output_dir, artist, album_name)\n ids = json.loads(r1(r'', html).replace('"', '').replace(';', '\"'))['ids']\n track_nr = 1\n for id in ids:\n song_data = baidu_get_song_data(id)\n song_url = baidu_get_song_url(song_data)\n song_title = baidu_get_song_title(song_data)\n song_lrc = baidu_get_song_lyric(song_data)\n file_name = '%02d.%s' % (track_nr, song_title)\n\n type, ext, size = url_info(song_url, faker = True)\n print_info(site_info, song_title, type, size)\n if not info_only:\n download_urls([song_url], file_name, ext, size, output_dir, merge = merge, faker = True)\n\n if song_lrc:\n type, ext, size = url_info(song_lrc, faker = True)\n print_info(site_info, song_title, type, size)\n if not info_only:\n download_urls([song_lrc], file_name, ext, size, output_dir, faker = True)\n\n track_nr += 1\n\ndef baidu_download(url, output_dir = '.', stream_type = None, merge = True, info_only = False):\n if re.match(r'http://pan.baidu.com', url):\n html = get_html(url)\n\n title = r1(r'server_filename=\"([^\"]+)\"', html)\n if len(title.split('.')) > 1:\n title = \".\".join(title.split('.')[:-1])\n\n real_url = r1(r'\\\\\"dlink\\\\\":\\\\\"([^\"]*)\\\\\"', html).replace('\\\\\\\\/', '/')\n type, ext, size = url_info(real_url, faker = True)\n\n print_info(site_info, title, ext, size)\n if not info_only:\n download_urls([real_url], title, ext, size, output_dir, merge = merge)\n\n elif re.match(r'http://music.baidu.com/album/\\d+', url):\n id = r1(r'http://music.baidu.com/album/(\\d+)', url)\n baidu_download_album(id, output_dir, merge, info_only)\n\n elif re.match('http://music.baidu.com/song/\\d+', url):\n id = r1(r'http://music.baidu.com/song/(\\d+)', url)\n baidu_download_song(id, output_dir, merge, info_only)\n\nsite_info = \"Baidu.com\"\ndownload = baidu_download\ndownload_playlist = playlist_not_supported(\"baidu\")\n","sub_path":"src/you_get/extractors/baidu.py","file_name":"baidu.py","file_ext":"py","file_size_in_byte":4236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"450173104","text":"\"\"\"Support for HomeMatic binary sensors.\"\"\"\nfrom homeassistant.components.binary_sensor import (\n DEVICE_CLASS_BATTERY,\n DEVICE_CLASS_MOTION,\n DEVICE_CLASS_OPENING,\n DEVICE_CLASS_PRESENCE,\n DEVICE_CLASS_SMOKE,\n BinarySensorEntity,\n)\n\nfrom .const import ATTR_DISCOVER_DEVICES, ATTR_DISCOVERY_TYPE, DISCOVER_BATTERY\nfrom .entity import HMDevice\n\nSENSOR_TYPES_CLASS = {\n \"IPShutterContact\": DEVICE_CLASS_OPENING,\n \"IPShutterContactSabotage\": DEVICE_CLASS_OPENING,\n \"MaxShutterContact\": DEVICE_CLASS_OPENING,\n \"Motion\": DEVICE_CLASS_MOTION,\n \"MotionV2\": DEVICE_CLASS_MOTION,\n \"PresenceIP\": DEVICE_CLASS_PRESENCE,\n \"Remote\": None,\n \"RemoteMotion\": None,\n \"ShutterContact\": DEVICE_CLASS_OPENING,\n \"Smoke\": DEVICE_CLASS_SMOKE,\n \"SmokeV2\": DEVICE_CLASS_SMOKE,\n \"TiltSensor\": None,\n \"WeatherSensor\": None,\n \"IPContact\": DEVICE_CLASS_OPENING,\n \"MotionIPV2\": DEVICE_CLASS_MOTION,\n \"IPRemoteMotionV2\": DEVICE_CLASS_MOTION,\n}\n\n\ndef setup_platform(hass, config, add_entities, discovery_info=None):\n \"\"\"Set up the HomeMatic binary sensor platform.\"\"\"\n if discovery_info is None:\n return\n\n devices = []\n for conf in discovery_info[ATTR_DISCOVER_DEVICES]:\n if discovery_info[ATTR_DISCOVERY_TYPE] == DISCOVER_BATTERY:\n devices.append(HMBatterySensor(conf))\n else:\n devices.append(HMBinarySensor(conf))\n\n add_entities(devices, True)\n\n\nclass HMBinarySensor(HMDevice, BinarySensorEntity):\n \"\"\"Representation of a binary HomeMatic device.\"\"\"\n\n @property\n def is_on(self):\n \"\"\"Return true if switch is on.\"\"\"\n if not self.available:\n return False\n return bool(self._hm_get_state())\n\n @property\n def device_class(self):\n \"\"\"Return the class of this sensor from DEVICE_CLASSES.\"\"\"\n # If state is MOTION (Only RemoteMotion working)\n if self._state == \"MOTION\":\n return DEVICE_CLASS_MOTION\n return SENSOR_TYPES_CLASS.get(self._hmdevice.__class__.__name__)\n\n def _init_data_struct(self):\n \"\"\"Generate the data dictionary (self._data) from metadata.\"\"\"\n # Add state to data struct\n if self._state:\n self._data.update({self._state: None})\n\n\nclass HMBatterySensor(HMDevice, BinarySensorEntity):\n \"\"\"Representation of an HomeMatic low battery sensor.\"\"\"\n\n _attr_device_class = DEVICE_CLASS_BATTERY\n\n @property\n def is_on(self):\n \"\"\"Return True if battery is low.\"\"\"\n return bool(self._hm_get_state())\n\n def _init_data_struct(self):\n \"\"\"Generate the data dictionary (self._data) from metadata.\"\"\"\n # Add state to data struct\n if self._state:\n self._data.update({self._state: None})\n","sub_path":"homeassistant/components/homematic/binary_sensor.py","file_name":"binary_sensor.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"384703165","text":"# -*- coding: utf-8 -*-\n\nimport click\nimport multiprocessing\n\nfrom alias_db import Alias, AliasDB\n\nfrom .. import cli\n\n\n@cli.command()\n@click.argument('dump_file', type=click.Path(exists=True))\n@click.argument('out_file', type=click.File(mode='w'))\n@click.argument('dictionary', type=click.File(mode='r'))\n@click.option('--tokenize-title', is_flag=True)\n@click.option('--parallel/--no-parallel', default=True)\n@click.option('--pool-size', default=multiprocessing.cpu_count())\n@click.option('--chunk-size', default=100)\ndef build_wikipedia_alias_db(dump_file, out_file, dictionary, **kwargs):\n from entity_vector.dictionary import Dictionary\n import wikipedia_alias_db_builder\n\n dictionary = Dictionary.load(dictionary)\n wikipedia_alias_db_builder.build(dump_file, out_file, dictionary, **kwargs)\n\n\n@cli.command()\n@click.argument('aida_means_file', type=click.Path(exists=True))\n@click.argument('wiki_label_jsonl_file', type=click.File())\n@click.argument('out_file', type=click.File(mode='w'))\ndef build_yago_alias_db(**kwargs):\n import yago_alias_db_builder\n\n yago_alias_db_builder.build(**kwargs)\n","sub_path":"web/entity-disambi/entity_disambi/alias_db/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"82804598","text":"import style\nstyle.setup()\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os, sys\nfrom glob import glob\nimport tables as tb\nimport pycurb.analysis as pa\nfrom scipy import stats\nimport itertools as it\nfrom scipy import optimize\n\ncmap = ['#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00','#ffff33','#a65628','#f781bf']\n\nfrom cycler import cycler\nplt.rc('axes', prop_cycle=(cycler('color', cmap)))\n\ndef main():\n #dpath = '/home/upgp/jruebsam/simulations/april16/week1/tcflow/long/'\n dpath = '/home/upgp/jruebsam/simulations/april16/week1/tcflow/gc'\n f, (ax, ax2) = plt.subplots(1, 2, figsize = style.figsize(1, hscale=0.6))\n\n modes = ['df', 'dffrac', 'vp',\\\n 'vpfrac', 'ip', 'ipzero' ]\n labels = ['DF', 'DF-Vol.Frac.', 'VP', 'VP-Vol.Frac.', 'IP', 'IP+DF']\n\n re = 100.\n pmax = 4./re\n\n pr = 1./re\n rrel = 0.4\n lx, ly = 1/rrel, 1/rrel\n\n rs = 96\n\n f, axes = plt.subplots(2, 3)\n\n for label, method, ax in zip(labels, modes, axes.flatten()):\n on = 'o2'\n\n #var_path = os.path.join(method, on)\n var_path = os.path.join(method, on, 'res_128')\n sim_path = os.path.join(dpath, os.path.dirname(__file__), \"data\", var_path)\n\n with tb.open_file(sim_path +\"/simulation.h5\") as d:\n rho = d.root.simdata.rho[-1, :, :, 1]\n\n cax = ax.imshow(rho, interpolation='nearest')\n\n plt.sca(ax)\n c = plt.colorbar(cax, pad=0., label=r'$\\rho$', format='%.3g')\n ax.axes.get_xaxis().set_ticks([])\n ax.axes.get_yaxis().set_ticks([])\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n\n ax.set_title('{} {}'.format(label, on))\n plt.savefig('rho.pdf')\n\n\nif __name__=='__main__':\n main()\n","sub_path":"gfx/immersed_boundary/tcflow/long/bu.py","file_name":"bu.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"126676532","text":"\nfrom abstract.IXMLParse import IXMLParse\nimport xml.etree.ElementTree as ET\n\n\nclass XMLParse(IXMLParse):\n \"\"\"\n xmlをパースして返す\n \"\"\"\n\n def __init__(self, file_path=None, root_tag_name=None):\n self._file_path = file_path\n self._elm = None\n self._root_tag_name = root_tag_name\n\n @property\n def root_elm(self):\n if(self._file_path is not None):\n with open(self._file_path,\n 'rt', encoding='utf-8', newline=\"\\n\") as f:\n tree = ET.parse(f)\n return tree.getroot().find(self._root_tag_name) \\\n if self._file_path is not None else tree.getroot()\n if(self._elm is not None):\n return self._elm\n AssertionError\n\n @root_elm.setter\n def root_elm(self, elm):\n self._elm = elm\n\n def get_element(self, parent_elm, elm_str):\n \"\"\"\n 引数の要素から指定された子要素を返す\n 引数:\\n\n ・parent_elm:親要素\\n\n ・elm_str:対象のタグ名\n \"\"\"\n return parent_elm.find('{}'.format(elm_str))\n\n def get_list_in_atrr_dict_by_tag(self, parent_elm, child_tag_str):\n \"\"\"\n 親要素から子要素の属性の辞書を格納した配列を返す\n 引数:\\n\n ・parent_elm:親要素\\n\n ・child_tag:子要素タグ名\\n\n \"\"\"\n return [\n node.attrib\n for node in parent_elm\n ]\n\n def get_child_value(self, parent_elm, child_tag_str):\n \"\"\"\n 単一の子要素の要素を格納した辞書を返す\\n\n 引数:\\n\n ・parent_elm:親要素\\n\n \"\"\"\n result = {}\n child_elm = parent_elm.find(child_tag_str)\n _prosess_str = self._processing_str(child_elm.text)\n\n return self._cast_str(\n _prosess_str, child_elm.attrib['type'])\n\n def get_dict_by_tag_child(self, parent_elm):\n \"\"\"\n タグから子要素の要素を格納した辞書を返す\\n\n 要素内のテキストが空だった場合はdefault属性の値を取得\\n\n 引数:\\n\n ・parent_elm:親要素\\n\n ・child_tag_str:子要素タグ名\\n\n \"\"\"\n\n result = {}\n for node in parent_elm:\n _prosess_str = self._processing_str(node.text)\n\n _dict_value = node.attrib['default'] if 'default' in node.attrib \\\n and _prosess_str == '' else _prosess_str\n\n result = {**result, ** {node.tag: self._cast_str(\n _dict_value, node.attrib['type'])}}\n return result\n\n def get_dict_by_tag_grandson(self, parent_elm, grandson_tag_str, id=None):\n \"\"\"\n タグから孫要素の要素を格納した辞書を返す(id指定)\\n\n 要素内のテキストが空だった場合はdefault属性の値を取得\\n\n 引数:\\n\n ・parent_elm:親要素\\n\n ・grandson_tag_str:孫要素タグ名\\n\n\n ・id:親のid属性\\n\n \"\"\"\n for node in parent_elm:\n result = {}\n if(id is not None and node.attrib['id'] != id):\n continue\n for elm in node.find(grandson_tag_str):\n _prosess_str = self._processing_str(elm.text)\n\n _dict_value = elm.attrib['default'] if _prosess_str == '' \\\n else _prosess_str\n\n result = {**result, ** {elm.tag: self._cast_str(\n _dict_value, elm.attrib['type'])}}\n return result\n\n @staticmethod\n def _processing_str(_str):\n \"\"\"\n ・改行コード除外\\n\n ・タブコード除外\\n\n ・Trim\\n\n 引数:\\n\n ・_str:変換対象文字列\n \"\"\"\n return _str.rstrip('\\n').rstrip('\\t').strip()\n\n @staticmethod\n def _cast_str(_str, _type):\n \"\"\"\n ・キャストする\\n\n 引数:\\n\n ・_str:変換対象文字列\n ・_type:変換型\n \"\"\"\n if(_type == \"string\"):\n return str(_str)\n elif(_type == \"integer\"):\n return int(_str)\n else:\n return _str\n","sub_path":"src/Utils/XMLParse.py","file_name":"XMLParse.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"10504315","text":"import media\r\nimport fresh_tomatoes\r\n\r\n\r\n#Add movies by adding Title, Duration, Poster link, Description, and trailer\r\ntoy_story = media.Movie(\r\n \"Toy Story\", \"120 minutes\",\r\n \"https://upload.wikimedia.org/wikipedia/commons/4/46/Toy_Story.svg\",\r\n \"A story of toys\",\r\n \"https://www.youtube.com/watch?v=KYz2wyBy3kc\")\r\n\r\navatar = media.Movie(\r\n \"Avatar\",\r\n \"120 minutes\",\r\n \"http://upload.wikimedia.org/wikipedia/id/b/b0/Avatar-Teaser-Poster.jpg\",\r\n \"A marine on an alien planet\",\r\n \"http://www.youtube.com/watch?v=-9ceBgWV8io\")\r\n\r\nschool_of_rock = media.Movie(\r\n \"School of Rock\",\r\n \"120 minutes\",\r\n \"http://upload.wikimedia.org/wikipedia/en/1/11/School_of_Rock_Poster.jpg\",\r\n \"Using Rock to Learn\", \"https://www.youtube.com/watch?v=3PsUJFEBC74\")\r\n\r\nratatoullie = media.Movie(\r\n \"Ratatoullie\",\r\n \"120 minutes\",\r\n \"http://upload.wikimedia.org/wikipedia/en/5/50/RatatouillePoster.jpg\",\r\n \"A rat's a chef\",\r\n \"https://www.youtube.com/watch?v=c3sBBRxDAqk\")\r\n\r\nThe_fast_and_the_furious = media.Movie(\r\n \"The Fast and the Furious\",\r\n \"120 minutes\",\r\n \"https://goo.gl/vE7sdr\",\r\n \"An amazing story of street racers\",\r\n \"https://www.youtube.com/watch?v=ZsJz2TJAPjw\")\r\n\r\nSeinfeld = media.TvShow(\r\n \"Seinfeld\",\r\n \"30 minutes\",\r\n \"https://upload.wikimedia.org/wikipedia/commons/7/78/Seinfeld_logo.svg\",\r\n \"Season 1\",\r\n \"Episode 1\",\r\n \"https://www.youtube.com/watch?v=PaPxSsK6ZQA\")\r\n\r\nmovies = [ratatoullie, The_fast_and_the_furious, toy_story, avatar,\r\n school_of_rock, Seinfeld]\r\n\r\nfresh_tomatoes.open_movies_page(movies)\r\n","sub_path":"entertainment.py","file_name":"entertainment.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"611683458","text":"import telegram\nimport logging\nfrom telegram.ext import Updater\nimport cropper\nfrom telegram.ext import CommandHandler, BaseFilter\nfrom telegram.ext import MessageHandler, Filters, CallbackQueryHandler\nimport os\nimport time\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\nTOKEN = ''\n\nbot = telegram.Bot(token=TOKEN)\nupdater = Updater(token=TOKEN)\ndispatcher = updater.dispatcher\n\ntry: # creats output folder if it doesn't exist\n os.mkdir('output')\nexcept FileExistsError:\n pass\ntry: # creats input folder if it doesn't exist\n os.mkdir('input')\nexcept FileExistsError:\n pass\n\ndef start(bot, updater):\n bot.send_message(chat_id=updater.message.chat_id, text=\"This is Sticker Creator bot made by CoolkaOS!\\nSend me your photos and I will crop them to be round!\")\n\n\ndef hmp(bot, updater):\n bot.send_message(chat_id=updater.message.chat.id, text='Reply to this message with photos', reply_markup=telegram.ForceReply())\n time.sleep(4)\n btnlist = [\n telegram.InlineKeyboardButton('Photos.', callback_data='photos'),\n telegram.InlineKeyboardButton('Files.', callback_data='files'),\n ]\n markup = telegram.InlineKeyboardMarkup(cropper.build_menu(btnlist, n_cols=2))\n bot.send_message(chat_id=updater.message.chat.id, text='What type of output do you prefer?',\n reply_markup=markup)\n\ndef download(bot, updater):\n if updater.message.photo:\n file_id = updater.message.photo[-1]\n else:\n file_id = updater.message.document\n newfile = bot.get_file(file_id)\n newfile.download('input/{}.png'.format(file_id['file_id'][-10:]))\n\n\ndef error(bot, updater):\n bot.send_message(chat_id=updater.message.chat.id, text='Error.')\n\n\ndef query_h(bot, updater):\n call = updater.callback_query\n if call.data == 'files':\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=call.message.text)\n bot.send_chat_action(chat_id=call.message.chat.id, action=telegram.ChatAction.UPLOAD_PHOTO)\n cropper.crop_all()\n for file in os.listdir('output/'):\n bot.send_document(chat_id=call.message.chat.id, document=open('output/' + file, 'rb'))\n cropper.clear()\n if call.data == 'photos':\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=call.message.text)\n bot.send_chat_action(chat_id=call.message.chat.id, action=telegram.ChatAction.UPLOAD_PHOTO)\n cropper.crop_all()\n for file in os.listdir('output/'):\n bot.send_photo(chat_id=call.message.chat.id, photo=open('output/' + file, 'rb'))\n cropper.clear()\n\n\ndispatcher.add_handler(CallbackQueryHandler(query_h))\ndispatcher.add_handler(CommandHandler('start', start))\ndispatcher.add_handler(CommandHandler('crop', hmp))\ndispatcher.add_handler(MessageHandler(Filters.reply, download))\ndispatcher.add_handler(MessageHandler(Filters.chat, error))\nupdater.start_polling()","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"450303149","text":"import os\nimport sys\n\n# NOTE: This\nsys.path.append(\n \"{}/packages/python\".format(\n os.environ[\"GitHub\"]\n )\n)\nimport galpak\n\nimport autofit as af\nimport autolens as al\n\nif al.__version__ in [\n \"0.45.0\"\n]:\n from autoastro import dimensions as dim\nif al.__version__ in [\n \"0.46.0\",\n \"0.46.1\",\n \"0.46.2\"\n]:\n from autogalaxy import dimensions as dim\n\nimport numpy as np\n\n\ndef cube_from_image(image, shape_3d):\n\n # NOTE:\n if image.in_2d.shape != shape_3d[1:]:\n raise ValueError(\"...\")\n\n return np.tile(\n A=image.in_2d, reps=(shape_3d[0], 1, 1)\n )\n\n\nclass EllipticalSersic(al.lp.EllipticalSersic):\n @af.map_types\n def __init__(\n self,\n centre: dim.Position = (0.0, 0.0),\n axis_ratio: float = 1.0,\n phi: float = 0.0,\n intensity: dim.Luminosity = 0.1,\n effective_radius: dim.Length = 0.6,\n sersic_index: float = 4.0,\n ):\n\n super(EllipticalSersic, self).__init__(\n centre=centre,\n axis_ratio=axis_ratio,\n phi=phi,\n intensity=intensity,\n effective_radius=effective_radius,\n sersic_index=sersic_index,\n )\n\n\n @property\n def analytic(self):\n return True\n\n\n def profile_cube_from_grid(self, grid, shape_3d, z_step_kms, grid_radial_minimum=None):\n\n return cube_from_image(\n image=self.profile_image_from_grid(\n grid=grid,\n grid_radial_minimum=grid_radial_minimum\n ),\n shape_3d=shape_3d\n )\n\n\n\nclass Kinematical(al.lp.LightProfile):\n @af.map_types\n def __init__(\n self,\n centre: dim.Position = (0.0, 0.0),\n z_centre: float = 0.0,\n intensity: float = 0.1,\n effective_radius: float = 1.0,\n inclination: float = 0.0,\n phi: float = 50.0,\n turnover_radius: float = 0.0,\n maximum_velocity: float = 200.0,\n velocity_dispersion: float = 50.0,\n ):\n\n super(Kinematical, self).__init__()\n\n self.centre = centre\n self.z_centre = z_centre\n self.intensity = intensity\n self.effective_radius = effective_radius\n self.inclination = inclination\n self.phi = phi\n self.turnover_radius = turnover_radius\n self.maximum_velocity = maximum_velocity\n self.velocity_dispersion = velocity_dispersion\n\n\n @property\n def analytic(self):\n return False\n\n\n def convert_centre_from_arcsec_to_pixels(self, value, pixel_scale, n_pixels):\n\n return value / pixel_scale + n_pixels / 2.0\n\n\n def convert_radius_from_arcsec_to_pixels(self, value, pixel_scale):\n\n return value / pixel_scale\n\n\n # NOTE: grid should be replaced with grid_3d (working on it)\n # NOTE: Maybe not the best way to do this ... (It is very fast the way it is)\n def convert_parameters(self, grid):\n #start = time.time()\n\n #galpak = (x, y)\n #autolens_centre = (y, x)\n\n # NOTE: \"converted_parameters\" can also be a numpy array.\n converted_parameters = []\n for i, (name, value) in enumerate(self.__dict__.items()):\n if name not in [\"id\", \"_assertions\", \"cls\"]:\n #print(name, value)\n if name == \"centre\":\n for (i, sign) in zip([1, 0], [1.0, -1.0]):\n converted_parameters.append(\n self.convert_centre_from_arcsec_to_pixels(\n value=sign * value[i],\n pixel_scale=grid.pixel_scale,\n n_pixels=grid.shape_2d[i]\n )\n )\n\n elif name.endswith(\"radius\"):\n converted_parameters.append(\n self.convert_radius_from_arcsec_to_pixels(\n value=value,\n pixel_scale=grid.pixel_scale\n )\n )\n else:\n converted_parameters.append(value)\n # end = time.time()\n # print(\n # \"It took t={} to convert parameters\".format(end - start)\n # )\n\n #print(\"parameters (converted):\", converted_parameters)\n\n return converted_parameters\n\n\n # NOTE: grid should be replaced with grid_3d, where in this case shape_3d will be deprecated.\n def profile_cube_from_grid(self, grid, shape_3d, z_step_kms):\n\n # NOTE: I dont want to initialize this every time.\n model = galpak.DiskModel(\n flux_profile='exponential',\n thickness_profile=\"gaussian\",\n rotation_curve='isothermal',\n dispersion_profile=\"thick\"\n )\n\n cube, _, _, _ = model._create_cube(\n galaxy=galpak.GalaxyParameters.from_ndarray(\n a=self.convert_parameters(\n grid=grid\n )\n ),\n shape=shape_3d,\n z_step_kms=z_step_kms,\n zo=self.z_centre\n )\n\n # NOTE: The returned object should be\n return cube.data\n","sub_path":"autofit/delete/tutorial_6_v0.1/src/model/profiles.py","file_name":"profiles.py","file_ext":"py","file_size_in_byte":5124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"255031855","text":"#!/usr/bin/python\r\n# -*- coding: sjis -*-\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\nfrom transformers import BertModel, BertConfig\r\n\r\nimport numpy as np\r\nimport pickle\r\n\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n# Data setting\r\n\r\nwith open('xtrain.pkl','br') as fr:\r\n xtrain = pickle.load(fr)\r\n\r\nwith open('ytrain.pkl','br') as fr:\r\n ytrain = pickle.load(fr)\r\n\r\n# Define model\r\n\r\nbert = BertModel.from_pretrained('cl-tohoku/bert-base-japanese')\r\n\r\nclass DocCls(nn.Module):\r\n def __init__(self,bert):\r\n super(DocCls, self).__init__()\r\n self.bert = bert\r\n self.cls=nn.Linear(768,9)\r\n def forward(self,x):\r\n bout = self.bert(x)\r\n bs = len(bout[0])\r\n h0 = [ bout[0][i][0] for i in range(bs)]\r\n h0 = torch.stack(h0,dim=0)\r\n return self.cls(h0)\r\n\r\n# model generate, optimizer and criterion setting\r\n\r\nnet = DocCls(bert).to(device)\r\noptimizer = optim.SGD(net.parameters(),lr=0.001)\r\ncriterion = nn.CrossEntropyLoss()\r\n\r\n# Learn\r\n\r\nnet.train()\r\nfor ep in range(30):\r\n lossK = 0.0\r\n for i in range(len(xtrain)):\r\n x = torch.LongTensor(xtrain[i]).unsqueeze(0).to(device)\r\n y = torch.LongTensor([ ytrain[i] ]).to(device)\r\n out = net(x)\r\n loss = criterion(out,y)\r\n lossK += loss.item()\r\n if (i % 50 == 0):\r\n print(ep, i, lossK)\r\n lossK = 0.0\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n outfile = \"doccls-\" + str(ep) + \".model\"\r\n torch.save(net.state_dict(),outfile)\r\n","sub_path":"pytorch-nlp/Chapter5/BERT-doccls/doccls.py","file_name":"doccls.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"436346910","text":"# mouse_button_click.py Small example about mouse clicable buttons\n\n# We need import this\nfrom tkinter import *\n\n# Create window\nmy_window_app = Tk()\n\n# Define 3 buttons, each for other click operation\ndef write_acction_1(Event):\n print('This is acction after click on 1-st mouse button')\n\ndef write_acction_2(Event):\n print('This is acction after click on 2-nd mouse button (scroll)')\n\ndef write_acction_3(Event):\n print('This is acction after click on 3-th mouse button')\n\n# Other version with \"command\", better is to \"bind\" buttons and events like below\n#button_1 = Button(my_window_app, text = 'Click me!', command = wypisz)\n\nbutton_1 = Button(my_window_app, text = 'Click me (1)!')\nbutton_1.bind('', write_acction_1)\n\nbutton_2 = Button(my_window_app, text = 'Click me (2)!')\nbutton_2.bind('', write_acction_2)\n\nbutton_3 = Button(my_window_app, text = 'Click me (3)!')\nbutton_3.bind('', write_acction_3)\n\n# Pack buttons\nbutton_1.pack()\nbutton_2.pack()\nbutton_3.pack()\n\n# We always need this - mainloop!\nmy_window_app.mainloop()\n","sub_path":"Small_examples/mouse_button_click.py","file_name":"mouse_button_click.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"194702607","text":"from django.urls import path\nfrom .views import ProductListView, RetrieveProduct, ListPosts, PostDetails\n\nurlpatterns = [\n path('api/v1/products', ProductListView.as_view(), name='product_list'),\n path('api/v1/products//', RetrieveProduct.as_view(), name='product_list'),\n\n path('api/v1/blog/post', ListPosts.as_view(), name='post_list'),\n path('api/v1/blog/post//', PostDetails.as_view(), name='post_details')\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"336626806","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/swainn/projects/tethysdev/django-tethys_apps/tethys_apps/app_installation.py\n# Compiled at: 2014-09-22 18:21:43\nimport os, shutil, subprocess\nfrom setuptools.command.develop import develop\nfrom setuptools.command.install import install\n\ndef get_tethysapp_directory():\n \"\"\"\n Return the absolute path to the tethysapp directory.\n \"\"\"\n return os.path.join(os.path.abspath(os.path.dirname(__file__)), 'tethysapp')\n\n\ndef _run_install(self):\n \"\"\"\n The definition of the \"run\" method for the CustomInstallCommand metaclass.\n \"\"\"\n tethysapp_dir = get_tethysapp_directory()\n destination_dir = os.path.join(tethysapp_dir, self.app_package)\n print ('Copying App Package: {0} to {1}').format(self.app_package_dir, destination_dir)\n try:\n shutil.copytree(self.app_package_dir, destination_dir)\n except:\n try:\n shutil.rmtree(destination_dir)\n except:\n os.remove(destination_dir)\n\n shutil.copytree(self.app_package_dir, destination_dir)\n\n for dependency in self.dependencies:\n subprocess.call(['pip', 'install', dependency])\n\n install.run(self)\n\n\ndef _run_develop(self):\n \"\"\"\n The definition of the \"run\" method for the CustomDevelopCommand metaclass.\n \"\"\"\n tethysapp_dir = get_tethysapp_directory()\n destination_dir = os.path.join(tethysapp_dir, self.app_package)\n print ('Creating Symbolic Link to App Package: {0} to {1}').format(self.app_package_dir, destination_dir)\n try:\n os.symlink(self.app_package_dir, destination_dir)\n except:\n try:\n shutil.rmtree(destination_dir)\n except:\n os.remove(destination_dir)\n\n os.symlink(self.app_package_dir, destination_dir)\n\n for dependency in self.dependencies:\n subprocess.call(['pip', 'install', dependency])\n\n develop.run(self)\n\n\ndef custom_install_command(app_package, app_package_dir, dependencies):\n \"\"\"\n Returns a custom install command class that is tailored for the app calling it.\n \"\"\"\n properties = {'app_package': app_package, 'app_package_dir': app_package_dir, \n 'dependencies': dependencies, \n 'run': _run_install}\n return type('CustomInstallCommand', (install, object), properties)\n\n\ndef custom_develop_command(app_package, app_package_dir, dependencies):\n \"\"\"\n Returns a custom develop command class that is tailored for the app calling it.\n \"\"\"\n properties = {'app_package': app_package, 'app_package_dir': app_package_dir, \n 'dependencies': dependencies, \n 'run': _run_develop}\n return type('CustomDevelopCommand', (develop, object), properties)","sub_path":"pycfiles/django-tethys_apps-0.9.0.tar/app_installation.py","file_name":"app_installation.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"84384940","text":"from django.db.models.functions import datetime\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django import forms\nfrom django.contrib.auth.models import User\n\nfrom .forms import ClassForm, SignForm\nfrom .forms import TakeForm\nfrom .models import Take, Subject, Instructor\n\n\ndef SugangMain(request):\n # 수강 신청 메인\n template_name='sugang/SugangMain.html'\n # 전체 과목, 전체 교사, 사용자 정보\n subjects = Subject.objects.all()\n instructors = Instructor.objects.all()\n user = User.objects.get(id=request.user.id)\n\n if Instructor.objects.filter(i_id=user.id).exists():\n #사용자가 교사라면 교사 정보 포함\n instructor = Instructor.objects.get(i_id=user.id)\n context = {'subjects': subjects, 'user': user, 'instructors':instructors,'instructor': instructor}\n return render(request, template_name, context)\n else:\n instructor = None\n context = {'subjects': subjects, 'user': user, 'instructors':instructors,'instructor': instructor}\n if request.method == \"POST\":\n subject_id = request.POST['sign']\n subject = Subject.objects.get(pk=subject_id)\n # 수강 기록 존재 여부 확인\n if Take.objects.filter(username_id=user.id, subject_id=subject.id).exists():\n return HttpResponse('이미 수강 신청한 과목입니다.')\n else:\n sign = Take(username_id=user.id, subject_id=subject.id)\n sign.save()\n return HttpResponse('수강 신청 완료')\n else:\n return render(request, template_name, context)\n\n\n\ndef thanks(request):\n template_name='sugang/Thanks.html'\n return render(request,template_name)\n\ndef delete(request,take_id):\n template_name = 'sugang/delete.html'\n take = Take.objects.get(pk=take_id)\n\n if request.method == 'POST':\n take.delete();\n return render(request, template_name)\n else:\n return render(request, template_name)\n\ndef myclass(request):\n template_name='sugang/Myclass.html'\n user = User.objects.get(id=request.user.id)\n subjects = Subject.objects.all()\n takes = Take.objects.all()\n\n # 교사와 일반 사용자 구분\n if Instructor.objects.filter(i_id=user.id).exists():\n instructor = Instructor.objects.get(i_id=user.id)\n context = {'takes': takes,'subjects': subjects, 'user': user,'instructor': instructor}\n else:\n instructor = None\n context = {'takes': takes, 'subjects':subjects, 'user':user,'instructor': instructor}\n\n return render(request, template_name, context)\n\n\ndef SugangAddNewClass(request):\n # 새 강의 개설 - 사용자가 교사일 때 수강신청 메인 화면에서 접근 가능\n template_name='sugang/SugangAddNewClass.html'\n users = User.objects.all()\n user = User.objects.get(id=request.user.id)\n instructor = Instructor.objects.get(i_id=user.id)\n\n if request.method == 'POST':\n form = ClassForm(request.POST)\n if form.is_valid():\n new_class = form.save(commit=False)\n new_class.save()\n return redirect('Thanks')\n else:\n form = ClassForm(initial={'i_name': instructor.id})\n context = {'users':users, 'instructor':instructor,'form': form}\n\n return render(request, template_name, context)\n\n\n\ndef SignInstructor(request):\n template_name = 'sugang/SignInstructor.html'\n #instructors = Instructor.objects.all()\n user = User.objects.get(username=request.user)\n\n if request.method == 'POST':\n if Instructor.objects.filter(i_id=user.id).exists():\n return HttpResponse('이미 강사로 전환된 회원입니다.')\n else:\n form = SignForm(request.POST)\n if form.is_valid():\n # id는 form에서 initial set\n # 비밀번호 확인\n password = form.cleaned_data['i_pw']\n\n if user.check_password(password):\n new_sign = form.save(commit=False)\n new_sign.save()\n return HttpResponse('강사 전환 완료')\n else :\n raise forms.ValidationError(\"wrong password\")\n\n else:\n # not POST initial set\n form = SignForm(initial={'i_id': user.id})\n context = {'form': form,'user':user}\n return render(request, template_name,context)\n","sub_path":"sugang/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"261151544","text":"# Copyright (c) 2017-2022 Mathias Funk\n# This software is released under the MIT License.\n# http://opensource.org/licenses/mit-license.php\n\nfrom oocsi import OOCSI\nimport time\nfrom random import random\n\no = OOCSI('testsender', 'localhost')\n\nwhile 1: \n message = {}\n message['color'] = int(random() * 400)\n message['position'] = int(random() * 255)\n\n o.send('testchannel', message)\n\n # wait and continue\n time.sleep(1)\n","sub_path":"examples/channelSend.py","file_name":"channelSend.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"331369248","text":"from Individual import Individual\nfrom Terminal import Terminal\nfrom Function import Function\nimport sys\nimport copy\nimport random\nimport math\n\n\nclass BoardIndividual(Individual):\n\n def __init__(self, height, board):\n super(BoardIndividual, self).__init__(height)\n self.originalSudoku = board\n self.board = board\n self.gradeboard = [[{} for a in range(len(board))] for b in range(len(board))]\n subSquares = [Terminal.getSubSquare(row, col, board) for row in range(0, len(board), int(math.sqrt(len(board))))\n for col in range(0, len(board), int(math.sqrt(len(board))))]\n flatten = lambda l: [item for sublist in l for item in sublist]\n self.subSquaresBoard = [flatten(square) for square in subSquares]\n\n def __str__(self):\n buf = \"\"\n squareLength = math.sqrt(len(self.board))\n originalEmptyCell = self.countEmptyCellInOriginalSudoku()\n currentEmptyCell = self.countEmptyCellInSudoku()\n if originalEmptyCell == currentEmptyCell:\n buf += \"this individual property not played\\n\\n\"\n else:\n if currentEmptyCell < originalEmptyCell:\n buf += (\"Solve = \" + str(originalEmptyCell - currentEmptyCell) + \" / \" + str(originalEmptyCell) +\n \"\\nLeft \" + str(currentEmptyCell) + \"\\n\\n\")\n else:\n buf += \"Something Wrong in playing function\\n\"\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n buf+=str(self.board[i][j]) + \" \"\n if self.board[i][j] < 10:\n buf+=\" \"\n if (j + 1) % squareLength == 0:\n buf+=\" \"\n buf+=\"\\n\"\n if (i+1) % squareLength == 0:\n buf += \"\\n\"\n buf += super().__str__()\n return buf\n\n def isForward(self):\n flat_dict = [y for x in self.gradeboard for y in x]\n dics = [dic for dic in flat_dict if dic != {}]\n return len(dics) > 0\n\n def evaluateGradeboard(self):\n for i in range(len(self.gradeboard)):\n for j in range(len(self.gradeboard[i])):\n for key, value in self.gradeboard[i][j].items():\n val = self.run(i, j, key, self.board, self.gradeboard, self.subSquaresBoard)\n self.gradeboard[i][j][key] = val\n\n def play(self):\n fitness = self.countEmptyCellInSudoku()\n while self.isForward():\n self.evaluateGradeboard()\n min_val = sys.float_info.max\n min_row = -1\n min_col = -1\n min_key = 0\n for i in range(len(self.board)):\n for j in range(len(self.board)):\n if self.gradeboard[i][j] != {} and self.board[i][j] == 0:\n for key, value in self.gradeboard[i][j].items():\n tmp_min = value\n if tmp_min < min_val:\n min_key = key\n min_row = i\n min_col = j\n min_val = tmp_min\n if min_row != -1 and min_col != -1 and min_key != 0:\n self.board[min_row][min_col] = min_key\n # TODO: update subsquareboard\n # self.subSquaresBoard[min_row * len(self.board) + min_col] = min_key\n self.gradeboard[min_row][min_col] = {}\n fitness = fitness - 1\n self.initializeGradeboard()\n return fitness\n\n def initializeGradeboard(self):\n all_num = set(range(1, 1 + len(self.board)))\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n if self.board[i][j] == 0:\n row_set = set(self.board[i])\n col_set = set(list(map(list, zip(*self.board)))[j])\n square_set = set([y for x in Terminal.getSubSquare(i, j, self.board) for y in x])\n self.gradeboard[i][j] = {}\n for key in all_num - row_set - col_set - square_set:\n self.gradeboard[i][j][key] = None\n\n def countEmptyCellInSudoku(self):\n return Terminal.countEmptyCellInSudoku(self.board)\n\n def countEmptyCellInOriginalSudoku(self):\n return Terminal.countEmptyCellInSudoku(self.originalSudoku)\n\n def evaluate(self):\n return self.play()\n\n def clone(self):\n clone = copy.deepcopy(self)\n clone.setHeight(self.height)\n clone.tree = self.cloneFullTree()\n clone.fitness = Individual.NOT_PLAYED_YET\n clone.board = [[val for val in row] for row in self.originalSudoku]\n clone.initializeGradeboard()\n return clone\n\n def mutate(self):\n copy = self.clone()\n randNode = random.randint(1, copy.tree.getSize())\n if randNode == 1:\n copy.tree = self.createFullTree(random.randint(1, self.height))\n else:\n parent, removeNode = copy.tree.getParentNode(copy.tree, randNode)\n if parent.left == removeNode:\n parent.setLeft(self.createFullTree(random.randint(1, parent.height)))\n if parent.right == removeNode:\n parent.setRight(self.createFullTree(random.randint(1, parent.height)))\n copy.tree.setSize()\n copy.tree.findTreeHeight()\n return copy\n\n def crossover(self, object):\n copy = self.clone()\n copy.tree.setSize()\n copy.tree.findTreeHeight()\n object.tree.setSize()\n object.tree.findTreeHeight()\n if copy.tree.getSize() == 1 and object.tree.getSize() == 1:\n node = Function(random.choice(list(Function.functions.keys())))\n node.setRight(copy.tree)\n node.setLeft(object.tree)\n copy.tree = node\n copy.tree.setSize()\n copy.tree.findTreeHeight()\n return copy\n if copy.tree.getSize() == 1:\n copyParent = copy.tree\n copyRemoveNode = copy.tree\n else:\n copyRandNode = random.randint(2, copy.tree.getSize())\n copyParent, copyRemoveNode = copy.tree.getParentNode(copy.tree, copyRandNode)\n if object.tree.getSize() == 1:\n objectParent = object.tree\n objectRemoveNode = object.tree\n else:\n objectRandNode = random.randint(2, object.tree.getSize())\n objectParent, objectRemoveNode = object.tree.getParentNode(object.tree, objectRandNode)\n\n if objectParent.height > copyParent.height:\n if objectParent.left == objectRemoveNode:\n objectParent.setLeft(copyRemoveNode)\n if objectParent.right == objectRemoveNode:\n objectParent.setRight(copyRemoveNode)\n object.tree.setSize()\n object.tree.findTreeHeight()\n return object\n else:\n if copyParent.left == copyRemoveNode:\n copyParent.setLeft(objectRemoveNode)\n if copyParent.right == copyRemoveNode:\n copyParent.setRight(objectRemoveNode)\n copy.tree.setSize()\n copy.tree.findTreeHeight()\n return copy\n\n def testIfGoodDimensionBoard(self, board):\n realSquareLength = math.sqrt(len(board))\n squareLength = int(math.sqrt(len(board)))\n if realSquareLength != squareLength:\n raise ValueError('sqrt(N) is not a natural number')\n for i in range(len(board)):\n if len(board[i]) != len(board):\n ValueError('You should send sudoku board with NxN dimensions')\n\n def nextInc1ExcMax(self, max):\n return random.randint(1, max)\n\n","sub_path":"BoardIndividual.py","file_name":"BoardIndividual.py","file_ext":"py","file_size_in_byte":7713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"31148637","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def getIntersectionNode(self, headA, headB):\n \"\"\"\n :type head1, head1: ListNode\n :rtype: ListNode\n \"\"\"\n if not headA or not headB:\n return None\n ha=headA\n hb=headB\n\n n=0\n while n<2:\n if not headA.next:\n headA=hb\n n=n+1\n else:\n headA=headA.next\n \n if not headB.next:\n headB=ha\n n=n+1\n else:\n headB=headB.next\n \n while headB!=headA:\n headB=headB.next\n headA=headA.next\n return headA","sub_path":"getIntersectionNode160.py","file_name":"getIntersectionNode160.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"631533456","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.6-intel/egg/reports/Concise.py\n# Compiled at: 2012-04-20 03:23:31\n\n\ndef getExtension():\n return '.txt'\n\n\ndef getName():\n return __file__.split('.')[0]\n\n\ndef getDescription():\n return 'Reports the concise information needed to resolve vulnerabilities'\n\n\ndef getResult(rf):\n rpt = rf.rpts[0]\n msg = rf.getStatusMsg(rpt.name, 50)\n msg += 'Date: %s\\n' % rpt.scan_start\n msg += 'Hosts: %s\\n' % rpt.stats['targetsCount']\n msg += 'Critical: %s\\n' % rpt.stats['critCount']\n msg += 'Highs: %s\\n' % rpt.stats['highCount']\n for target in rpt.targets:\n if target.criticals or target.highs or target.mediums:\n msg += rf.getStatusMsg(target.get_name(), 50)\n if target.criticals:\n msg += rf.getStatusMsg('Criticals', 15, '-')\n for plugin_name in target.criticals:\n msg += plugin_name + '\\n'\n\n if target.highs:\n msg += rf.getStatusMsg('Highs', 15, '-')\n for plugin_name in target.highs:\n msg += plugin_name + '\\n'\n\n if target.mediums:\n msg += rf.getStatusMsg('Mediums', 15, '-')\n for plugin_name in target.mediums:\n msg += plugin_name + '\\n'\n\n msg += '\\n'\n\n return msg\n\n\ndef writeResult(rf, outputPath):\n path = outputPath + getExtension()\n rf.msWrite(getResult(rf), path)\n return path","sub_path":"pycfiles/Cernent-0.1.0-py2.7/Concise.py","file_name":"Concise.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"237190524","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\nfrom typing import List\n\n\nclass Solution:\n def plusOne(self, digits: List[int]) -> List[int]:\n s = int(''.join(map(str, digits))) + 1\n return list(map(int, str(s)))\n\n\nif __name__ == '__main__':\n s = Solution()\n digits = [1, 2, 3]\n print(s.plusOne(digits))\n","sub_path":"模拟面试/8-9/t1.py","file_name":"t1.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"239272351","text":"#!/usr/bin/env python3\r\n\r\n\r\n# IMPORTS\r\n\r\n\r\nimport sys\r\nsys.path.append('../data_api')\r\nsys.path.append('../../../data')\r\n\r\nfrom data_api import DataApi\r\nfrom k_nearest_neighbor import KNN\r\nfrom edited_knn import EditedKNN\r\nfrom condensed_knn import CondensedKNN\r\nimport math\r\nfrom random import randint\r\nimport pandas as pd\r\nfrom scipy.spatial.distance import pdist, squareform\r\nimport numpy as np\r\nimport statistics as stats\r\n\r\n\r\n# CLASS\r\n\r\n'''\r\n This class handles all things k medoids clustering...\r\n'''\r\n\r\n\r\nclass KMedoidsClustering(KNN):\r\n\r\n\r\n def __init__(self):\r\n KNN.__init__(self)\r\n self.DEBUG = False\r\n self.data_api_impl = DataApi('../../data/')\r\n self.enn_impl = EditedKNN()\r\n self.cnn_impl = CondensedKNN()\r\n\r\n\r\n '''\r\n perform k-medoids-clustering against full_data_frame using k value as parameter\r\n\r\n INPUT:\r\n - full_data_frame: full data set\r\n - k: value for parameter k, i.e. the number of clusters to partition the data set into\r\n\r\n OUTPUT:\r\n - data structure holding k clusters and all data contained within each cluster\r\n '''\r\n def cluster(self, training_set, preprocessed_data_frame, parameter):\r\n\r\n #data_frame = full_data_frame.loc[:, full_data_frame.columns != 'CLASS']\r\n self.enn_impl.set_data_set(self.get_data_set())\r\n dist_matrix = self.get_distance_matrix(preprocessed_data_frame.loc[:, preprocessed_data_frame.columns != 'CLASS'])\r\n edited_data_frame = self.cnn_impl.get_condensed_training_set(training_set)\r\n #edited_data_frame = self.enn_impl.get_edited_training_set(training_set, dist_matrix, parameter)\r\n\r\n #if self.DEBUG:\r\n\r\n\r\n #data_frame = full_data_frame.loc[:, full_data_frame.columns != 'CLASS']\r\n\r\n edited_data_frame = edited_data_frame.reset_index(drop=True)\r\n \r\n #randomly choose k points\r\n #find the max number of data points\r\n print(\"Size of Edited Data Frame: \")\r\n print(edited_data_frame.shape[0])\r\n maxRange = edited_data_frame.shape[0]\r\n #print(maxRange)\r\n #this list holds the indices of the medoids in relation to the distance matrix\r\n medoid_indices = []\r\n #calculate k random numbers from (0, max data points)\r\n while len(medoid_indices) < parameter:\r\n randIndex = randint(0, maxRange - 1)\r\n #make sure we don't generate duplicate indicies\r\n if randIndex not in medoid_indices:\r\n medoid_indices.append(randIndex)\r\n\r\n # print(\"Medoid Indices: \")\r\n # print(medoid_indices)\r\n\r\n #this 2D list stores a list of indexes for points in a cluster\r\n #the first dimension corresponds to the index of medoid_indices\r\n #the second dimension stores a list of indicies of points in relation to the distance matrix\r\n #please note that the clusters[medoid_index][0] stores the lowest known score of distances\r\n clusters = []\r\n for x in range(parameter):\r\n for y in range(2):\r\n if y is 0:\r\n clusters.append([0])\r\n else:\r\n clusters[0].append(medoid_indices[x])\r\n #clusters = [[0 for x in range(k)] for y in range(edited_data_frame.size)] #[0:k-1][0:clusterSize]\r\n\r\n #some point to be placed in a cluster\r\n for x in range(len(dist_matrix)):\r\n #set this large so it will be overwritten\r\n smallest_distance = 1000000\r\n #this stores which medoid set we'll write to\r\n medoid_index = 0\r\n #we compare the distance from this point to each medoid point\r\n for y in range(len(medoid_indices)):\r\n #if we have a smaller distance, then save that cluster and the distance for future comparison\r\n if dist_matrix[x][y] < smallest_distance:\r\n smallest_distance = dist_matrix[x][y]\r\n medoid_index = y\r\n #This actually holds the lowest score for the current randomly generated medoid\r\n print(\"This is k: \" + str(parameter))\r\n print(\"This is medoid indices: \" )\r\n print(medoid_indices)\r\n print(\"This is clusters: \")\r\n print(clusters)\r\n print(\"This is the medoid index: \" + str(medoid_index))\r\n clusters[medoid_index][0] += smallest_distance\r\n #append the index of the point we are placing in a cluster\r\n clusters[medoid_index].append(x)\r\n\r\n #we must now find the best fit point to use as the final medoid set from each cluster\r\n\r\n #save the initial medoids because they are not in the clusters data structure\r\n initial_medoids = medoid_indices\r\n\r\n #go cluster by cluster\r\n print(\"The number of clusters we have is: \" + str(len(clusters)))\r\n print(clusters)\r\n\r\n\r\n for cluster in range(len(clusters)):\r\n #zero out score\r\n score = 0\r\n #now go point by point in the cluster\r\n for potential_medoid in range(2, len(clusters[cluster]) - 2):\r\n #compare it to each other point in the cluster\r\n if potential_medoid is not medoid_indices[cluster]:\r\n #print(\"The number of potential medoids of this cluster are: \" + str(len(clusters[cluster])))\r\n for cluster_point in range(2, len(clusters[cluster]) - 2):\r\n #increment the score as a sum total of all the distances\r\n score += dist_matrix[potential_medoid][cluster_point]\r\n #add the score of the initial medoids because they are actually not in the clusters data structure\r\n\r\n score += dist_matrix[potential_medoid][initial_medoids[cluster]]\r\n #if the score is lower, it's a better candidate\r\n if score < clusters[cluster][0]:\r\n #set the new potential medoid as the medoid for the cluster\r\n medoid_indices[cluster] = potential_medoid\r\n #save its score\r\n clusters[cluster][0] = score\r\n\r\n #create new data frame to store subset\r\n training_set_df = pd.DataFrame()\r\n #put the medoid rows in the data frame from the full_data_frame\r\n print(\"The medoid indices: \")\r\n print(medoid_indices)\r\n print(\"The edited data frame: \")\r\n print(edited_data_frame)\r\n for x in medoid_indices:\r\n training_set_df = training_set_df.append(edited_data_frame.loc[x])\r\n #return to sender\r\n print(training_set_df)\r\n return training_set_df\r\n #return self.do_knn(training_set_df, test_set, preprocessed_data_frame, parameter)\r\n\r\n\r\n def get_kmedoids_medoids_for_rbf_network(self, train_data, data_frame, k):\r\n return self.cluster(train_data, data_frame, k)\r\n\r\n # def get_distance_matrix(self, data_frame):\r\n # # for some reason this has to be done here even though it's done above...\r\n # feature_vectors_df = data_frame.loc[:, data_frame.columns != 'CLASS']\r\n # # get distance matrix (upper triangle) using distance metric\r\n # distances = pdist(feature_vectors_df.values, metric='euclidean')\r\n # # fill in lower triangle maintaining symmetry\r\n # dist_matrix = squareform(distances)\r\n # # return full distance matrix\r\n # return dist_matrix\r\n\r\n\r\n# EXECUTE SCRIPT\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n print('k medoids clustering...')\r\n k_medoids_clustering_impl = KMedoidsClustering()\r\n","sub_path":"project-2/scripts/algorithms/k_medoids_clustering.py","file_name":"k_medoids_clustering.py","file_ext":"py","file_size_in_byte":7419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"343046313","text":"import os\nimport datetime\nimport time\nimport webbrowser\n\nimport PySide.QtGui as QtGui\nimport PySide.QtCore as QtCore\n\nimport HComHoudiniClient\nimport HComHoudiniUtils\nreload(HComHoudiniUtils)\n\nHCOM_VERSION = \"0.9.0\"\n\nICONPATH = os.path.dirname(__file__) + \"\\\\HCom_Icons\\\\\"\nHISTORY_PATH = os.path.dirname(__file__) + \"\\\\HCom_History\"\n\n###########################################\n# THREADS\n###########################################\nclass RecieveDataThread(QtCore.QThread):\n '''\n Thread used to update message box when a client is writing a data.\n '''\n dataRecieved_signal = QtCore.Signal()\n \n def __init__(self):\n QtCore.QThread.__init__(self)\n \n self.dataDict = None\n self._sender = None\n self.settings = None\n\n def run(self):\n \n self.workFonc(self.dataDict, self._sender, self.settings)\n self.dataRecieved_signal.emit()\n \n def workFonc(self, *args, **kwargs):\n return\n \nclass SendingDataThread(QtCore.QThread):\n '''\n Thread used to update the progress bar when a client is sending a data to the server.\n '''\n dataSent_signal = QtCore.Signal(int)\n \n def __init__(self):\n \n QtCore.QThread.__init__(self)\n self.target_clientID = None\n self._sender = None\n self.tabTarget = None\n self.imagePath = None\n self.tabClientType = None\n \n def run(self):\n \n if self.imagePath:\n result = self.workFunc(self.target_clientID, self._sender, self.tabTarget, self.imagePath, tabClientType=self.tabClientType)\n else:\n result = self.workFunc(self.target_clientID, self._sender, self.tabTarget, tabClientType=self.tabClientType)\n \n if result:\n self.dataSent_signal.emit(1)\n else:\n self.dataSent_signal.emit(0)\n \n def workFunc(self, *args, **kwargs):\n pass\n\nclass UiUpdaterThread(QtCore.QThread):\n '''\n Thread used for all update made on main UI\n This include appening a message, receiving confirmation\n Change icons states \n '''\n # Type of UI changement\n update_ui_signal = QtCore.Signal(object)\n \n # Header, Message, tabTarget\n append_message_signal = QtCore.Signal(object)\n \n # sender, dataType, dataDict, tabTarget\n input_data_signal = QtCore.Signal(object)\n \n # Send a data received update\n data_received_update = QtCore.Signal(object)\n \n def __init__(self):\n QtCore.QThread.__init__(self)\n \n self.data = {}\n self.messageData = {}\n self.forceStop = False\n self.inputData = {}\n self.dataReceivedUpdate = False\n \n def run(self):\n \n while 1:\n time.sleep(0.1)\n \n if self.forceStop:\n break\n \n if len(self.data.keys()) > 0:\n self.update_ui_signal.emit(self.data)\n self.data = {}\n \n if len(self.messageData.keys()) > 0:\n self.append_message_signal.emit(self.messageData)\n self.messageData = {}\n \n if len(self.inputData.keys()) > 0:\n self.input_data_signal.emit(self.inputData)\n self.inputData = {}\n \n if self.dataReceivedUpdate:\n self.data_received_update.emit(self.dataReceivedUpdate)\n self.dataReceivedUpdate = False\n\n###########################################\n# WIDGETS\n###########################################\n\nclass UserChatTabWidget(QtGui.QWidget):\n '''\n Widget appended to the main tab widget when a user double click\n on a user name or when a user receive a message from a user\n '''\n \n def __init__(self, target, clientType, openChatRoom=False, parent=None):\n QtGui.QWidget.__init__(self, parent=parent)\n \n self.mainUI = parent\n self.connected = self.mainUI.connected\n self.clientType = clientType\n \n if openChatRoom:\n self.tabTargetID = target\n else:\n self.tabTargetID = self.mainUI.ID\n \n self.targetLabel = str(target).replace(\"[\", \"\").replace(\"]\", \"\")\n if not isinstance(target, list):\n target = [target]\n self.target = target\n \n self.widgetList = []\n \n self.centralLayout = QtGui.QVBoxLayout()\n self.centralLayout.setSpacing(10)\n \n # target ( placeholder )\n self.targetLayout = QtGui.QHBoxLayout()\n self.targetLayout.setSpacing(5)\n \n self.clearTabBtn = QtGui.QPushButton(\"\")\n self.clearTabBtn.clicked.connect(self.clearTab)\n self.clearTabBtn.setStyleSheet('''QPushButton#closebtn{ background-color: rgba(0,0,0,0); border: none; }''')\n self.clearTabBtn.setFlat(True)\n self.clearTabBtn.setToolTip(\"Clear messages\")\n self.clearTabBtn.setFixedSize(QtCore.QSize(32,32))\n self.clearTabBtn.setIcon(QtGui.QIcon(ICONPATH + \"clearmsg.png\"))\n self.targetLayout.addWidget(self.clearTabBtn)\n \n if not openChatRoom:\n \n self.closeBtn = QtGui.QPushButton()\n self.closeBtn.setObjectName(\"closebtn\")\n self.closeBtn.setStyleSheet('''QPushButton#closebtn{ background-color: rgba(0,0,0,0); border: none; }''')\n self.closeBtn.setFixedSize(QtCore.QSize(20,20))\n self.closeBtn.setIcon(QtGui.QIcon(ICONPATH + \"close.png\"))\n self.closeBtn.setIconSize(QtCore.QSize(16,16))\n self.closeBtn.clicked.connect(lambda: self.mainUI._removeUserTab(self.targetLabel))\n self.targetLayout.addWidget(self.closeBtn)\n \n self.targetLbl = QtGui.QLabel(\"Target: \" + str(self.target).replace(\"[\", \"\").replace(\"]\", \"\"))\n self.targetLbl.setDisabled(not self.connected)\n self.targetLayout.addWidget(self.targetLbl)\n \n self.widgetList.append(self.targetLbl)\n \n self.targetLayout.setAlignment(QtCore.Qt.AlignLeft)\n \n self.centralLayout.addItem(self.targetLayout)\n \n # Message widget\n self.messageScrollArea = QtGui.QScrollArea()\n self.messageScrollArea.setWidgetResizable(True)\n \n self.messageContent = QtGui.QWidget()\n self.messageScrollArea.setWidget(self.messageContent)\n \n self.messageOutLayout = QtGui.QVBoxLayout()\n self.messageOutLayout.setSpacing(2)\n self.messageOutLayout.setAlignment(QtCore.Qt.AlignTop)\n self.messageOutLayout.setSizeConstraint(QtGui.QLayout.SetMinAndMaxSize)\n \n self.messageContent.setLayout(self.messageOutLayout)\n \n self.centralLayout.addWidget(self.messageScrollArea)\n \n self.messageLayout = QtGui.QHBoxLayout()\n \n self.messageLayout.setSpacing(5)\n self.messageLine = InputMessageBox(self)\n self.messageLine.setMaximumHeight(50)\n self.messageLine.setDisabled(not self.connected)\n self.messageLayout.addWidget(self.messageLine)\n self.widgetList.append(self.messageLine)\n \n self.messageSendBtn = QtGui.QPushButton(\"Send Message\")\n self.messageSendBtn.clicked.connect(lambda: self.mainUI._sendMessage(self.target, str(self.messageLine.toPlainText().encode('latin-1')), self, self.tabTargetID))\n self.messageSendBtn.setDisabled(not self.connected)\n self.messageLayout.addWidget(self.messageSendBtn)\n self.widgetList.append(self.messageSendBtn)\n \n self.centralLayout.addItem(self.messageLayout)\n \n # Send data buttons\n self.actionButtonLayout = QtGui.QHBoxLayout()\n self.actionButtonLayout.setSpacing(5)\n \n sendLbl = \"Send:\"\n if openChatRoom:\n sendLbl = \"Send to all users:\"\n sendLblW = QtGui.QLabel(sendLbl)\n \n self.actionButtonLayout.addWidget(sendLblW)\n \n if self.clientType[0] == HComHoudiniUtils.CLIENT_TYPE.MAYA_HENGINE or self.clientType[0] == HComHoudiniUtils.CLIENT_TYPE.HOUDINI:\n self.sendotlBtn = QtGui.QPushButton(\"\")\n self.sendotlBtn.setToolTip(\"Send houdini node or digital asset\")\n self.sendotlBtn.setIconSize(QtCore.QSize(32,32))\n self.sendotlBtn.setFixedSize(40,40)\n self.sendotlBtn.setIcon(QtGui.QIcon(ICONPATH + \"digitalasset.png\"))\n self.sendotlBtn.clicked.connect(lambda: self.mainUI._sendOtl(self.target, self.tabTargetID, self, self.clientType))\n self.sendotlBtn.setDisabled(not self.connected)\n self.actionButtonLayout.addWidget(self.sendotlBtn)\n self.widgetList.append(self.sendotlBtn) \n\n if self.clientType[0] == HComHoudiniUtils.CLIENT_TYPE.HOUDINI:\n self.sendSettingsBtn = QtGui.QPushButton(\"\")\n self.sendSettingsBtn.setToolTip(\"Send houdini node or digital asset settings\")\n self.sendSettingsBtn.setIconSize(QtCore.QSize(32,32))\n self.sendSettingsBtn.setFixedSize(40,40)\n self.sendSettingsBtn.setIcon(QtGui.QIcon(ICONPATH + \"digitalasset_settings.png\"))\n self.sendSettingsBtn.clicked.connect(lambda: self.mainUI._sendSettings(self.target, self.tabTargetID, self))\n self.sendSettingsBtn.setDisabled(not self.connected)\n self.actionButtonLayout.addWidget(self.sendSettingsBtn)\n self.widgetList.append(self.sendSettingsBtn)\n \n if self.clientType[0] == HComHoudiniUtils.CLIENT_TYPE.HOUDINI:\n \n self.sendBgeoBtn = QtGui.QPushButton(\"\")\n self.sendBgeoBtn.setToolTip(\"Send bgeo mesh\")\n self.sendBgeoBtn.setIconSize(QtCore.QSize(32,32))\n self.sendBgeoBtn.setFixedSize(40,40)\n self.sendBgeoBtn.setIcon(QtGui.QIcon(ICONPATH + \"bgeo.png\"))\n self.sendBgeoBtn.setDisabled(not self.connected)\n self.sendBgeoBtn.clicked.connect(lambda: self.mainUI._sendBgeo(self.target, self.tabTargetID, self))\n self.actionButtonLayout.addWidget(self.sendBgeoBtn)\n self.widgetList.append(self.sendBgeoBtn)\n self.widgetList.append(self.sendBgeoBtn)\n \n self.sendObjBtn = QtGui.QPushButton(\"\")\n self.sendObjBtn.setToolTip(\"Send obj mesh\")\n self.sendObjBtn.setIconSize(QtCore.QSize(32,32))\n self.sendObjBtn.setFixedSize(40,40)\n self.sendObjBtn.setIcon(QtGui.QIcon(ICONPATH + \"obj.png\"))\n self.sendObjBtn.setDisabled(not self.connected)\n self.sendObjBtn.clicked.connect(lambda: self.mainUI._sendObjMesh(self.target, self.tabTargetID, self))\n self.actionButtonLayout.addWidget(self.sendObjBtn)\n self.widgetList.append(self.sendObjBtn)\n \n self.sendAlembicBtn = QtGui.QPushButton(\"\")\n self.sendAlembicBtn.setToolTip(\"Send alembic cache\")\n self.sendAlembicBtn.setIconSize(QtCore.QSize(32,32))\n self.sendAlembicBtn.setFixedSize(40,40)\n self.sendAlembicBtn.setIcon(QtGui.QIcon(ICONPATH + \"alembic.png\"))\n self.sendAlembicBtn.setDisabled(not self.connected)\n self.sendAlembicBtn.clicked.connect(lambda: self.mainUI._sendAlembic(self.target, self.tabTargetID, self))\n self.actionButtonLayout.addWidget(self.sendAlembicBtn)\n self.widgetList.append(self.sendAlembicBtn)\n \n self.sendPictureBtn = QtGui.QPushButton(\"\")\n self.sendPictureBtn.setToolTip(\"Send picture file\")\n self.sendPictureBtn.setIconSize(QtCore.QSize(32,32))\n self.sendPictureBtn.setFixedSize(40,40)\n self.sendPictureBtn.setIcon(QtGui.QIcon(ICONPATH + \"picture.png\"))\n self.sendPictureBtn.setDisabled(not self.connected)\n self.sendPictureBtn.clicked.connect(lambda: self.mainUI._sendPic(self.target, self.tabTargetID, self))\n self.actionButtonLayout.addWidget(self.sendPictureBtn)\n self.widgetList.append(self.sendPictureBtn)\n \n self.actionButtonLayout.setAlignment(QtCore.Qt.AlignLeft)\n self.centralLayout.addItem(self.actionButtonLayout)\n \n self.setLayout(self.centralLayout)\n \n def clearTab(self):\n \n nmsg = self.messageOutLayout.count()\n if nmsg == 0: return\n widgets = []\n for i in range(nmsg):\n w = self.messageOutLayout.itemAt(i)\n widgets.append(w.widget())\n \n for w in widgets:\n w.setParent(None)\n w.deleteLater()\n \n def appendMessage(self, header, message, fromMyself=False):\n \n if header:\n now = datetime.datetime.now()\n timeStamp = \"{1}:{2} {0}:\".format(header, str(now.hour).zfill(2), str(now.minute).zfill(2))\n \n if fromMyself:\n timeStamp = HComHoudiniUtils.coloredString(timeStamp, \"70738c\", italic=True)\n \n else:\n timeStamp = \"\"\n \n msbBox = MessageBox(timeStamp, message, fromMyself)\n self.messageOutLayout.addWidget(msbBox)\n self.messageOutLayout.update()\n \n self.messageScrollArea.ensureWidgetVisible(self.messageOutLayout.widget())\n \n def appendInputBox(self, _sender, dataType, data):\n \n now = datetime.datetime.now()\n timeStamp = \"{1}:{2} {0}:\".format(_sender, str(now.hour).zfill(2), str(now.minute).zfill(2))\n \n message = \"{0} wants to send you \".format(_sender)\n \n if dataType == \"otl\":\n message += \"a node.\\n => type: {0}, Name: {1}\".format(data[\"OTL_TYPE\"], data[\"OTL_NAME\"])\n \n elif dataType == \"mesh\":\n message += \"a mesh.\\n => type: {0}\".format(data[\"MESH_TYPE\"])\n \n elif dataType == \"settings\":\n message += \"a node settings.\\n => type: {0}\".format(data[\"OTL_TYPE\"])\n \n elif dataType == \"pic\":\n message += \"an image file.\\n => name: {0}\".format(data[\"IMAGE_NAME\"])\n \n elif dataType == \"alembic\":\n message += \"an alembic cache file.\\n => name: {0}\".format(data[\"NAME\"])\n \n data = [dataType, data]\n \n msbBox = MessageBox(timeStamp, message, data=data, isInputData=True, mainUi=self.mainUI, _sender=_sender)\n self.messageOutLayout.addWidget(msbBox)\n \n def appendDataSendBox(self, msg, targets, _sender, tabTarget, workFunc, imagePath=None, tabClientType=None):\n \n box = SendingDataMessageBox(msg, targets, _sender, tabTarget, workFunc, imagePath=imagePath, tabClientType=tabClientType, parent=self)\n self.messageOutLayout.addWidget(box)\n box.workerThread.start()\n \n def disableTab(self, toggle):\n \n for w in self.widgetList:\n w.setDisabled(toggle)\n\nclass UserListDockWidget(QtGui.QWidget):\n '''\n Widget use as container for the user list and user connection infos\n '''\n def __init__(self, hcc, session_ID, parent=None):\n QtGui.QWidget.__init__(self, parent=parent)\n \n self.hcc = hcc\n self.session_ID = session_ID\n \n self.mainUI = parent\n \n self.setObjectName(\"cw\")\n mainLayout = QtGui.QVBoxLayout()\n mainLayout.setSpacing(5)\n \n self.setWindowTitle(\"User Connected:\")\n \n self.userListW = UserListWidget(self.session_ID, clientType=self.mainUI.CLIENT_TYPE, mainUI=self.mainUI, parent=self)\n \n if self.hcc:\n try:\n users = self.hcc.root.getAllClients().keys()\n usersType = self.hcc.root.getAllClientTypes()\n except EOFError:\n pass\n else:\n for k in users:\n self.userListW._addUser(k, usersType[k])\n \n mainLayout.addItem(self.userListW)\n \n folderButtonsLayout = QtGui.QHBoxLayout()\n folderButtonsLayout.setSpacing(10)\n \n self.openReveicedFolder = QtGui.QPushButton()\n self.openReveicedFolder.setToolTip(\"Open 'HCom My Received files' folder\")\n self.openReveicedFolder.setFixedSize(QtCore.QSize(38,38))\n self.openReveicedFolder.setIconSize(QtCore.QSize(32,32))\n self.openReveicedFolder.setIcon(QtGui.QIcon(ICONPATH + \"folder.png\"))\n self.openReveicedFolder.clicked.connect(self._openReceivedFilesFolder)\n folderButtonsLayout.addWidget(self.openReveicedFolder)\n \n self.openHistoryBtn = QtGui.QPushButton()\n self.openHistoryBtn.setToolTip(\"Open history folder\")\n self.openHistoryBtn.setFixedSize(QtCore.QSize(38,38))\n self.openHistoryBtn.setIconSize(QtCore.QSize(32,32))\n self.openHistoryBtn.setIcon(QtGui.QIcon(ICONPATH + \"folder_hist.png\"))\n self.openHistoryBtn.clicked.connect(self._openHistoryFolder)\n folderButtonsLayout.addWidget(self.openHistoryBtn)\n \n self.openHelp = QtGui.QPushButton()\n self.openHelp.setToolTip(\"Open help\")\n self.openHelp.setFixedSize(QtCore.QSize(38,38))\n self.openHelp.setIconSize(QtCore.QSize(32,32))\n self.openHelp.setIcon(QtGui.QIcon(ICONPATH + \"help.png\"))\n self.openHelp.clicked.connect(self._showHelp)\n folderButtonsLayout.addWidget(self.openHelp)\n \n folderButtonsLayout.setAlignment(QtCore.Qt.AlignHCenter)\n mainLayout.addItem(folderButtonsLayout)\n \n self.setLayout(mainLayout)\n \n def _updateUserList(self, ID, action, clientType):\n \n if action == \"join\":\n self.userListW._addUser(ID, clientType)\n elif action == \"left\":\n self.userListW._removeUser(ID)\n \n def _openHistoryFolder(self):\n if os.path.exists(HISTORY_PATH):\n os.startfile(HISTORY_PATH)\n else:\n print(\"ERROR: History folder not found.\")\n \n def _showHelp(self):\n \n helpWin = HelpWindow(self)\n helpWin.exec_()\n \n def _openReceivedFilesFolder(self):\n \n RECEIVED_FILES_PATH = HComHoudiniUtils.fetchMyReceivedFilesFolder()\n \n if os.path.exists(RECEIVED_FILES_PATH):\n os.startfile(RECEIVED_FILES_PATH)\n else:\n print(\"ERROR: Received files folder not found.\")\n \nclass UserListWidget(QtGui.QVBoxLayout):\n '''\n User list connected, used only in UserListDockWidget\n '''\n def __init__(self, session_ID, clientType=\"None\", mainUI=None, parent=None):\n \n QtGui.QVBoxLayout.__init__(self)\n self.setSpacing(5)\n self.session_ID = session_ID\n self.clientType = clientType\n \n self.mainUi = mainUI\n \n self.ITEM_IDS = []\n \n splitter = QtGui.QSplitter(QtCore.Qt.Vertical)\n splitter.setStyleSheet(''' QSplitter::handle:vertical{height: 2px;}''')\n \n # user connected list\n self.userList = QtGui.QListWidget(parent=parent)\n self.userList.setSpacing(2)\n self.userList.itemDoubleClicked.connect(self._selItem)\n splitter.addWidget(self.userList)\n \n # user connection and deconnection infos\n self.outuserInfo = QtGui.QTextEdit(parent=parent)\n self.outuserInfo.setReadOnly(True)\n splitter.addWidget(self.outuserInfo)\n \n splitter.setSizes([400,50])\n \n self.addWidget(splitter)\n \n def _selItem(self):\n \n curItem = self.userList.currentItem()\n idSelected = curItem.text()\n if not \"(me)\" in idSelected:\n self.mainUi._addUserTab(str(idSelected), curItem.clientType, fromUserList=True)\n \n def _addUser(self, ID, clientType):\n \n IDtoAdd = ID\n \n if ID == self.session_ID:\n IDtoAdd = \"(me) \" + ID\n \n userObj = UserItem(IDtoAdd, clientType)\n \n if ID not in self.ITEM_IDS:\n self.ITEM_IDS.append(IDtoAdd)\n\n self.userList.addItem(userObj)\n\n def _removeUser(self, ID):\n \n if ID in self.ITEM_IDS:\n itemIndex = self.ITEM_IDS.index(ID)\n it = self.userList.takeItem(itemIndex)\n qIndex = self.userList.indexFromItem(it)\n model = self.userList.model()\n model.removeRow(qIndex.row())\n self.ITEM_IDS.remove(ID)\n \n def clearUserList(self):\n self.userList.clear()\n \nclass UserItem(QtGui.QListWidgetItem):\n \n def __init__(self, text, clientType=[None,None]):\n QtGui.QListWidgetItem.__init__(self)\n \n self.ID = text\n self.clientType = clientType\n self.setIcon(QtGui.QIcon(ICONPATH + str(self.clientType[0]).lower() + \".png\"))\n self.setToolTip(str(self.clientType[-1]))\n self.setText(text)\n\n\nclass HelpWindow(QtGui.QDialog):\n '''\n help / infos window\n '''\n def __init__(self, parent=None):\n QtGui.QDialog.__init__(self, parent=parent)\n \n mainLayout = QtGui.QVBoxLayout()\n mainLayout.setSpacing(10)\n \n self.versionString = QtGui.QLabel(\"HCom Version {0}\".format(HCOM_VERSION))\n mainLayout.addWidget(self.versionString)\n \n self.onlineHelpBtn = QtGui.QPushButton(\"Online Help\")\n self.onlineHelpBtn.clicked.connect(self.openOnlineHelp)\n mainLayout.addWidget(self.onlineHelpBtn)\n\n self.closeButton = QtGui.QPushButton(\"Close\")\n self.closeButton.clicked.connect(self.close)\n mainLayout.addWidget(self.closeButton)\n \n self.setLayout(mainLayout)\n \n def openOnlineHelp(self):\n webbrowser.open('http://guillaumejobst.blogspot.fr/p/hcom.html')\n\nclass SettingsWindow(QtGui.QDialog):\n '''\n The setting and options window\n '''\n def __init__(self, parent=None):\n QtGui.QDialog.__init__(self, parent=parent)\n \n initValues = HComHoudiniUtils.readIni()\n \n self.SETTINGS = initValues\n \n self.setWindowTitle(\"HCom Settings\")\n self.setWindowIcon(QtGui.QIcon(ICONPATH + \"\\\\settings.png\"))\n \n settingsLayout = QtGui.QVBoxLayout()\n settingsLayout.setSpacing(10)\n \n settingsLayout.addWidget(QtGui.QLabel(\"HCom version: {0}\".format(HCOM_VERSION)))\n \n serverAdresslayout = QtGui.QHBoxLayout()\n serverAdresslayout.setSpacing(10)\n serverAdresslayout.addWidget(QtGui.QLabel(\"Server Adress:\"))\n self.serverAdress = QtGui.QLineEdit(str(initValues[\"SERVER\"]))\n serverAdresslayout.addWidget(self.serverAdress)\n settingsLayout.addItem(serverAdresslayout)\n \n serverPortlayout = QtGui.QHBoxLayout()\n serverPortlayout.setSpacing(10)\n serverPortlayout.addWidget(QtGui.QLabel(\"Server Port:\"))\n self.serverPort = QtGui.QLineEdit(str(initValues[\"PORT\"]))\n serverPortlayout.addWidget(self.serverPort)\n settingsLayout.addItem(serverPortlayout)\n \n myReceivedFilesLayout = QtGui.QHBoxLayout()\n myReceivedFilesLayout.setSpacing(10)\n self.myReceivedFilesLbl = QtGui.QLabel(\"My Received Files Folder:\")\n myReceivedFilesLayout.addWidget(self.myReceivedFilesLbl)\n self.myReceivedFileLine = QtGui.QLineEdit(\"\")\n if initValues[\"MY_RECEIVED_FILES\"] == \"DEFAULT\":\n self.myReceivedFileLine.setText(str(os.path.dirname(__file__)) + \"\\\\HCom_Received_Files\")\n else:\n self.myReceivedFileLine.setText(str(initValues[\"MY_RECEIVED_FILES\"]))\n myReceivedFilesLayout.addWidget(self.myReceivedFileLine)\n self.myReceivedFileBtn = QtGui.QPushButton(\"Pick\")\n self.myReceivedFileBtn.clicked.connect(self.pickMyReceivedFilesFolder)\n myReceivedFilesLayout.addWidget(self.myReceivedFileBtn)\n settingsLayout.addItem(myReceivedFilesLayout)\n \n self.switchToManualMode = QtGui.QCheckBox(\"Auto Switch To Manual Update\")\n self.switchToManualMode.setChecked(initValues[\"SWITCH_TO_MANUAL_UPDATE\"])\n settingsLayout.addWidget(self.switchToManualMode)\n \n self.saveHistory = QtGui.QCheckBox(\"Save Conversation history\")\n self.saveHistory.setChecked(initValues[\"SAVE_HISTORY\"])\n settingsLayout.addWidget(self.saveHistory)\n \n self.playSounds = QtGui.QCheckBox(\"Play Sounds\")\n self.playSounds.setChecked(initValues[\"PLAY_SOUND\"])\n settingsLayout.addWidget(self.playSounds)\n \n self.returnToDefaultBtn = QtGui.QPushButton(\"Revert to Default\")\n self.returnToDefaultBtn.clicked.connect(self.returnToDefault)\n settingsLayout.addWidget(self.returnToDefaultBtn)\n \n buttonsLayout = QtGui.QHBoxLayout()\n buttonsLayout.setSpacing(10)\n \n self.validBtn = QtGui.QPushButton(\"Valid\")\n self.validBtn.clicked.connect(self.validSettings)\n buttonsLayout.addWidget(self.validBtn)\n \n self.cancelBtn = QtGui.QPushButton(\"Cancel\")\n self.cancelBtn.clicked.connect(self.cancelSettings)\n buttonsLayout.addWidget(self.cancelBtn)\n \n settingsLayout.addItem(buttonsLayout)\n \n settingsLayout.setAlignment(QtCore.Qt.AlignTop)\n self.setLayout(settingsLayout)\n \n def pickMyReceivedFilesFolder(self):\n \n f = QtGui.QFileDialog.getExistingDirectory(caption=\"Pick a folder\")\n if not f:\n return\n \n if not os.path.exists(f):\n return\n \n self.myReceivedFileLine.setText(str(f))\n \n def returnToDefault(self):\n \n self.serverAdress.setText(\"127.0.0.1\")\n self.serverPort.setText(\"5000\")\n self.switchToManualMode.setChecked(True)\n self.playSounds.setChecked(False)\n self.myReceivedFileLine.setText(os.path.dirname(__file__) + \"\\\\HCom_Received_Files\")\n \n def validSettings(self):\n \n self.SETTINGS[\"SERVER\"] = str(self.serverAdress.text())\n self.SETTINGS[\"PORT\"] = str(self.serverPort.text())\n self.SETTINGS[\"SWITCH_TO_MANUAL_UPDATE\"] = str(self.switchToManualMode.isChecked())\n self.SETTINGS[\"PLAY_SOUND\"] = str(self.playSounds.isChecked())\n if str(self.myReceivedFileLine.text()) == os.path.dirname(__file__) + \"\\\\HCom_Received_Files\":\n self.SETTINGS[\"MY_RECEIVED_FILES\"] = \"DEFAULT\"\n else:\n self.SETTINGS[\"MY_RECEIVED_FILES\"] = str(self.myReceivedFileLine.text())\n \n HComHoudiniUtils.writeIni(self.SETTINGS)\n self.close()\n \n def cancelSettings(self):\n self.close()\n\n\nclass SendingDataMessageBox(QtGui.QWidget):\n '''\n Widget added to the current tab when data are being sent by a user\n '''\n def __init__(self, msg, target_clientID, _sender, tabTarget, workFunc, imagePath=None, parent=None, tabClientType=None):\n QtGui.QWidget.__init__(self, parent=parent)\n \n self.workerThread = SendingDataThread()\n self.workerThread.workFunc = workFunc\n self.workerThread.target_clientID = target_clientID\n self.workerThread.tabTarget = tabTarget\n self.workerThread._sender = _sender\n self.workerThread.tabClientType = tabClientType\n self.workerThread.imagePath = imagePath\n self.workerThread.dataSent_signal.connect(self.dataSent)\n \n layout= QtGui.QVBoxLayout()\n layout.setSpacing(5)\n \n self.msg = QtGui.QLabel(msg)\n layout.addWidget(self.msg)\n \n self.progressBar = QtGui.QProgressBar()\n self.progressBar.setMinimum(0)\n self.progressBar.setMaximum(0)\n self.progressBar.setFixedHeight(4)\n self.progressBar.setTextVisible(False)\n layout.addWidget(self.progressBar)\n \n self.setLayout(layout)\n \n def dataSent(self, result):\n \n if result:\n mod = \" sent !\"\n else:\n mod = \" cancelled !\"\n \n tmp = str(self.msg.text()).replace(\"Sending \", \"\") + mod\n \n self.msg.setText(tmp)\n self.progressBar.setMinimum(0)\n self.progressBar.setMaximum(1)\n self.progressBar.setValue(1)\n if result:\n self.progressBar.setStyleSheet('''QProgressBar::chunk{background:green;}''')\n else:\n self.progressBar.setStyleSheet('''QProgressBar::chunk{background:red;}''')\n \nclass MessageBox(QtGui.QWidget):\n '''\n Widget added to the current tab when a user is receiving data from another client\n '''\n def __init__(self, header, message, fromMyself=False, mainUi=None, data=False, isInputData=False, _sender=\"\"):\n QtGui.QWidget.__init__(self)\n \n if data:\n self.dataType = data[0]\n self.dataDict = data[1]\n else:\n self.dataType = \"\"\n self.dataDict = {}\n \n self._sender = _sender\n self.mainUi = mainUi\n \n self.workThread = RecieveDataThread()\n self.workThread.dataRecieved_signal.connect(self.endJob)\n self.workThread.dataDict = self.dataDict\n self.workThread._sender = self._sender\n \n self.setObjectName(\"msgw\")\n \n self.mainLayout = QtGui.QVBoxLayout()\n self.mainLayout.setSpacing(5)\n \n if not isInputData:\n if header:\n self.headerMsg = QtGui.QLabel(header)\n self.mainLayout.addWidget(self.headerMsg)\n \n self.msg = QtGui.QLabel(message)\n self.msg.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)\n if fromMyself:\n self.msg.setStyleSheet('''QLabel{background-color:rgba(128,128,128,0); border:None}''')\n else:\n self.msg.setStyleSheet('''QLabel{background-color:rgba(100,110,140,0); border:None}''')\n\n \n self.mainLayout.addWidget(self.msg)\n \n if isInputData:\n \n self.activityBar = QtGui.QProgressBar()\n self.activityBar.setMinimum(0)\n self.activityBar.setVisible(False)\n self.activityBar.setFixedHeight(4)\n \n self.buttonLayout = QtGui.QHBoxLayout()\n self.buttonLayout.setSpacing(5)\n \n self.mainLayout.addWidget(self.activityBar)\n \n self.acceptBtn = QtGui.QPushButton(\"Accept\")\n self.acceptBtn.setFixedWidth(75)\n self.acceptBtn.clicked.connect(self.acceptInput)\n self.cancelBtn = QtGui.QPushButton(\"Cancel\")\n self.cancelBtn.setFixedWidth(75)\n self.cancelBtn.clicked.connect(self.cancelInput)\n \n self.buttonLayout.addWidget(self.acceptBtn)\n self.buttonLayout.addWidget(self.cancelBtn)\n \n self.buttonLayout.setAlignment(QtCore.Qt.AlignRight)\n \n self.mainLayout.addItem(self.buttonLayout)\n \n self.setLayout(self.mainLayout)\n \n def cancelInput(self):\n \n self.dataDict = None\n \n self.acceptBtn.setDisabled(True)\n self.cancelBtn.setDisabled(True)\n \n HComHoudiniClient.sendDataReceivedInfo(self.sender, self.mainUi.ID, [False, self.dataType], self.mainUi.ID)\n \n def acceptInput(self):\n \n settings = HComHoudiniUtils.readIni()\n self.activityBar.setMaximum(0)\n self.activityBar.setTextVisible(False)\n self.activityBar.setVisible(True)\n \n self.workThread.settings = settings\n \n # Send a setting of parms for the given node selection type\n if self.dataType == \"settings\":\n \n self.workThread.workFonc = HComHoudiniUtils.setOtlSettings\n self.workThread.start()\n \n # Send an otl or a node\n elif self.dataType == \"otl\":\n \n self.workThread.workFonc = HComHoudiniUtils.createOtl\n self.workThread.start()\n \n # Bgeo mesh\n elif self.dataType == \"mesh\":\n \n self.workThread.workFonc = HComHoudiniUtils.createMesh\n self.workThread.start()\n \n # Pictures\n elif self.dataType == \"pic\":\n \n self.workThread.workFonc = HComHoudiniUtils.createPic\n self.workThread.start()\n \n # Alembic\n elif self.dataType == \"alembic\":\n \n self.workThread.workFonc = HComHoudiniUtils.createAlembic\n self.workThread.start()\n \n self.acceptBtn.setDisabled(True)\n self.cancelBtn.setDisabled(True)\n \n \n def endJob(self):\n \n HComHoudiniClient.sendDataReceivedInfo(self.sender, self.mainUi.ID, [True, self.dataType], self.mainUi.ID)\n self.activityBar.setMaximum(1)\n self.activityBar.setValue(1)\n self.activityBar.setStyleSheet('''QProgressBar::chunk{background:green;}''')\n self.dataDict = None\n \n\nclass InputMessageBox(QtGui.QTextEdit):\n '''\n Custom message text field\n '''\n def __init__(self, parent):\n super(self.__class__, self).__init__()\n \n self.parent = parent\n \n def keyPressEvent(self, event):\n \n mod = QtGui.QApplication.keyboardModifiers()\n\n if (event.key() == QtCore.Qt.Key_Enter or event.key() == QtCore.Qt.Key_Return) and mod == QtCore.Qt.ShiftModifier:\n self.append(\"\")\n \n elif (event.key() == QtCore.Qt.Key_Enter or event.key() == QtCore.Qt.Key_Return) and mod != QtCore.Qt.ShiftModifier:\n self.parent.mainUI._sendMessage(self.parent.target, str(self.toPlainText().encode('latin-1')), self.parent, self.parent.tabTargetID)\n \n else:\n super(self.__class__, self).keyPressEvent(event)\n \nclass FrameRangeSelection(QtGui.QDialog):\n \n def __init__(self, start=0, end=100, parent=None):\n QtGui.QDialog.__init__(self, parent=parent)\n \n self.frameRange = [start, end]\n self.VALID = False\n \n mainLayout = QtGui.QVBoxLayout()\n mainLayout.setSpacing(10)\n \n mainLayout.addWidget(QtGui.QLabel(\"Enter a frame range:\"))\n \n frameLayout = QtGui.QHBoxLayout()\n frameLayout.setSpacing(10)\n \n frameLayout.addWidget(QtGui.QLabel(\"Start Frame:\"))\n self.startValue = QtGui.QDoubleSpinBox()\n self.startValue.setMinimum(-999999.9)\n self.startValue.setMaximum(999999.9)\n self.startValue.setValue(start)\n frameLayout.addWidget(self.startValue)\n \n frameLayout.addWidget(QtGui.QLabel(\"End Frame:\"))\n self.endValue = QtGui.QDoubleSpinBox()\n self.endValue.setMinimum(-999999.9)\n self.endValue.setMaximum(999999.9)\n self.endValue.setValue(end)\n frameLayout.addWidget(self.endValue)\n \n mainLayout.addItem(frameLayout)\n \n buttonsLayout = QtGui.QHBoxLayout()\n buttonsLayout.setSpacing(5)\n \n acceptBtn = QtGui.QPushButton(\"Accept\")\n acceptBtn.clicked.connect(self.validFrameRange)\n buttonsLayout.addWidget(acceptBtn)\n \n closeBtn = QtGui.QPushButton(\"Cancel\")\n closeBtn.clicked.connect(self.close)\n buttonsLayout.addWidget(closeBtn)\n \n mainLayout.addItem(buttonsLayout)\n \n self.setLayout(mainLayout)\n \n def validFrameRange(self):\n \n start = self.startValue.value()\n end = self.endValue.value()\n self.frameRange = [start, end]\n self.VALID= True\n self.close()\n","sub_path":"HComHoudini/HComHoudiniWidgets.py","file_name":"HComHoudiniWidgets.py","file_ext":"py","file_size_in_byte":35608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"326727238","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Aug 26 00:59:42 2020\r\n\r\n@author: Souhardya\r\n\"\"\"\r\n\r\nclass Node:\r\n def __init__(self,data):\r\n self.data=data\r\n self.next=None\r\nclass LinkedList:\r\n def __init__(self):\r\n self.head=None\r\n def push(self,new_data):\r\n new_node=Node(new_data)\r\n new_node.next=self.head\r\n \r\n curr=self.head\r\n if self.head !=None:\r\n while (curr.next!=self.head):\r\n curr=curr.next\r\n curr.next=new_node\r\n else:\r\n new_node.next=new_node\r\n self.head=new_node\r\n def printList(self):\r\n curr=self.head\r\n if self.head!=None:\r\n while True:\r\n print(curr.data,end=' ')\r\n curr=curr.next\r\n if curr==self.head:\r\n break\r\nll=LinkedList()\r\nll.push(10)\r\nll.push(8)\r\nll.push(7)\r\nll.push(5)\r\nll.push(2)\r\nprint('Circular LinkedList is: ')\r\nll.printList()\r\n \r\n ","sub_path":"Circular Linked List Traversal & Insert at start.py","file_name":"Circular Linked List Traversal & Insert at start.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"204826218","text":"#! /usr/bin/python\nfrom collections import defaultdict\n\n\nclass Viterbi(object):\n def __init__(self, input_file, output_file, transactor, emission):\n # This class implements the Viterbi algo\n self.input_file = input_file\n self.output_file = output_file\n self.transactor = transactor\n self.emission = emission\n\n def run(self):\n with open(self.input_file, \"r\") as f:\n l = f.readline()\n sentence = []\n while l:\n word = l.strip()\n if word: # Nonempty line\n sentence.append(word)\n l = f.readline()\n continue\n res = self.handler(sentence)\n for single_word, tag, prob in res:\n self.output_file.write(\"{w} {t} {prob}\\n\".format(w=single_word, t=tag, prob=prob))\n self.output_file.write(\"\\n\")\n sentence = []\n l = f.readline()\n\n def handler(self, words_list):\n \"\"\"\n Dynamic Programming\n :param words_list:\n :return: list[(word, tag, probability)]\n \"\"\"\n n = len(words_list)\n S_1 = [\"*\"]\n S0 = list(S_1)\n S = ['I-LOC', 'B-ORG', 'I-PER', 'O', 'I-MISC', 'B-MISC', 'I-ORG', 'B-LOC']\n # special case when len(word_list) == 1\n if n == 1:\n word = words_list[0]\n prob = -1e10\n tag = ''\n for v in S:\n pi_w = 1\n q = self.transactor.get_transaction(\"*\", \"*\", v)\n e = self.emission.get_emission(word, v)\n temp = pi_w + q + e\n if temp > prob:\n prob, tag = temp, v\n return [(word, tag, prob)]\n dp_pi = [defaultdict(dict) for _ in range(n+1)]\n dp_pi[0][\"*\"][\"*\"] = 1\n dp_bp = [defaultdict(dict) for _ in range(n+1)]\n # w -> u -> v\n for k in range(1, n+1):\n if k == 1:\n u_set = S0\n else:\n u_set = S\n v_set = S\n for u in u_set:\n for v in v_set:\n opt_val = float(\"-inf\")\n opt_w = ''\n if k == 1:\n w_set = S_1\n elif k == 2:\n w_set = S0\n else:\n w_set = S\n for w in w_set:\n pi_w = dp_pi[k-1][w][u]\n q = self.transactor.get_transaction(w, u, v)\n e = self.emission.get_emission(words_list[k-1], v)\n temp_val = pi_w + q + e\n if temp_val > opt_val:\n opt_val = temp_val\n opt_w = w\n dp_pi[k][u].setdefault(v, opt_val)\n dp_bp[k][u].setdefault(v, opt_w)\n if n == 1:\n set_n_1 = S0\n set_n = S\n else:\n set_n_1 = S\n set_n = S\n opt_u = \"\"\n opt_v = \"\"\n opt_val = float(\"-inf\")\n for u in set_n_1:\n for v in set_n:\n q = self.transactor.get_transaction(u, v, \"STOP\")\n temp_val = q + dp_pi[n][u][v]\n if temp_val > opt_val:\n opt_val = temp_val\n opt_u = u\n opt_v = v\n # back pointing\n tags = [opt_u, opt_v]\n probs = [-1e10] * n\n fst, sec = opt_u, opt_v\n for k in range(n-2, 0, -1):\n temp = dp_bp[k+2][fst][sec]\n tags.insert(0, temp)\n probs[k+1] = dp_pi[k+2][fst][sec]\n sec, fst = fst, temp\n probs[1] = dp_pi[2][tags[0]][tags[1]]\n probs[0] = dp_pi[1][\"*\"][tags[0]]\n return zip(words_list, tags, probs)\n\n\n","sub_path":"COMS4705/tagger_utils/viterbi_model.py","file_name":"viterbi_model.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"615164399","text":"# # x pattern\n\n# def xpattern(row):\n# for i in range(row):\n# for j in range(row):\n# if i == j or i + j == row-1:\n# print(\"*\",end = \" \")\n# else:\n# print(\" \", end= \" \")\n# print()\n# xpattern(5)\n\n\n# # quiz question\n# b = 1\n# for a in range(1,10,3):\n# b+= a + 1\n# print(b)\n\n# x = 50 \n# y = 10 \n# z = y if y > x else x \n# print(z)\n\n\nn = 6\nfor i in range(n):\n for j in range(n):\n if i == 0 or j == (n-1) or i == j:\n print(\"*\", end = \"\")\n else:\n print(end = \" \")\n print()\n ","sub_path":"Questions/patterns.py","file_name":"patterns.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"259148182","text":"from google.appengine.ext import endpoints, ndb\nfrom google.appengine.api import memcache\nfrom google.appengine.datastore.datastore_query import Cursor\nfrom protorpc import remote\nfrom models import Artist, ShowRecording\nfrom messages import (ArtistsRequest, ArtistsResponse,\n SearchRequest, SearchResponse, ShowRecordingResponse)\n\nCLIENT_ID = 'live-music-archive'\n\n@endpoints.api(name='music', version='v1',\n description='Live Music Archive API',\n package_path='com/micronixsolutions/api',\n allowed_client_ids=[CLIENT_ID, endpoints.API_EXPLORER_CLIENT_ID])\nclass Music(remote.Service):\n '''Defines the music api v1'''\n\n @endpoints.method(ArtistsRequest, ArtistsResponse,\n path='artists', http_method='GET',\n name='music.artists')\n def artists(self, request):\n '''API endpoint to query for artists'''\n next_page = request.next_page or 'first_artists_page'\n cache = memcache.get(next_page)\n if cache:\n return ArtistsResponse(artists=cache[0], next_page=cache[1])\n query = Artist.query()\n if next_page is 'first_artists_page':\n artists, cursor, more = query.fetch_page(300) \n else:\n artists, cursor, more = query.fetch_page(300, \n start_cursor=Cursor.from_websafe_string(next_page))\n artists = [artist.to_message() for artist in artists]\n memcache.add(next_page, (artists, cursor.to_websafe_string()))\n return ArtistsResponse(artists=artists, next_page=cursor.to_websafe_string())\n\n\n @endpoints.method(SearchRequest, SearchResponse,\n path='search', http_method='GET',\n name='search.all')\n @ndb.synctasklet\n def search_all(self, request):\n '''API endpoint to search for everything'''\n query_bits = request.query.split(' ')\n artists = yield self.create_search_query(Artist, Artist.search_fields, query_bits)\n shows = yield self.create_search_query(ShowRecording, ShowRecording.search_fields, query_bits)\n #songs_query = Songs.query()\n artists = [artist.to_message() for artist in artists]\n shows = [show.to_message() for show in shows]\n raise ndb.Return(SearchResponse(artists=artists, shows=shows))\n\n @ndb.tasklet\n def create_search_query(self, klass, attr, bits):\n '''Helper method to create query objects that do string\n comparison'''\n futures = []\n for bit in bits:\n futures.append(klass.query(ndb.AND(attr >= bit, attr < bit+u'\\ufffd')).fetch_async())\n \n results = []\n for future in futures:\n results.append(set(future.get_result()))\n raise ndb.Return(set.intersection(*results))\n\n","sub_path":"server/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"12323504","text":"import math\n\nclass Continent():\n def ___init___(self,img):\n self.name = \"\"\n self.pos = 0\n self.heading = 0\n self.rate = 0\n self.img = img\n self.orient = 0\n self.rot = 0\n \n def change_heading(self,heading):\n self.heading = heading\n\n def change_rate(self,rate):\n self.rate = rate\n\n def change_rot(self,rot):\n self.rot=rot\n z=self.orient+self.rot\n self.orient=z\n def move(self):\n\n x=self.pos[0]\n y=self.pos[1]\n \n rate = self.rate\n heading = self.heading\n x=x+self.rate*math.cos(heading* (math.pi / 180))\n y=y-self.rate*math.sin(heading* (math.pi / 180))\n \n self.pos=[x,y]\n z=self.orient+self.rot\n self.orient=z\n \n \n","sub_path":"meth.py","file_name":"meth.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"130623256","text":"\"\"\"\nLeaf Package Manager\n\n@author: Legato Tooling Team \n@copyright: Sierra Wireless. All rights reserved.\n@contact: Legato Tooling Team \n@license: https://www.mozilla.org/en-US/MPL/2.0/\n\"\"\"\n\nimport shutil\nfrom collections import OrderedDict\nfrom pathlib import Path\nfrom tarfile import TarFile\n\nfrom leaf.api.remotes import RemoteManager\nfrom leaf.core.constants import LeafConstants, LeafFiles, LeafSettings\nfrom leaf.core.download import download_and_verify_file\nfrom leaf.core.error import InvalidPackageNameException, LeafException, LeafOutOfDateException, NoPackagesInCacheException, PrereqException\nfrom leaf.core.lock import LockFile\nfrom leaf.core.utils import fs_check_free_space, fs_compute_total_size, get_cached_artifact_name, mark_folder_as_ignored, rmtree_force\nfrom leaf.model.dependencies import DependencyUtils\nfrom leaf.model.environment import Environment\nfrom leaf.model.modelutils import check_leaf_min_version, find_manifest, is_latest_package\nfrom leaf.model.package import IDENTIFIER_GETTER, AvailablePackage, InstalledPackage, LeafArtifact, PackageIdentifier\nfrom leaf.model.steps import StepExecutor, VariableResolver\nfrom leaf.rendering.formatutils import sizeof_fmt\n\n\nclass PackageManager(RemoteManager):\n\n \"\"\"\n Main API for using Leaf package manager\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n RemoteManager.__init__(self)\n self.__download_cache_folder = self.cache_folder / LeafFiles.CACHE_DOWNLOAD_FOLDERNAME\n self.__application_lock = LockFile(self.find_configuration_file(LeafFiles.LOCK_FILENAME))\n self.__check_cache_folder_size()\n\n @property\n def application_lock(self):\n return self.__application_lock\n\n @property\n def download_cache_folder(self):\n self.__download_cache_folder.mkdir(parents=True, exist_ok=True)\n return self.__download_cache_folder\n\n def __check_cache_folder_size(self):\n # Check if it has been checked recently\n if self.is_file_outdated(self.download_cache_folder):\n # Compute the folder total size\n totalsize = fs_compute_total_size(self.download_cache_folder)\n if totalsize > LeafConstants.CACHE_SIZE_MAX:\n # Display a message\n self.logger.print_error(\"You can save {size} by cleaning the leaf cache folder\".format(size=sizeof_fmt(totalsize)))\n self.print_hints(\"to clean the cache, you can run: 'rm -r {folder}'\".format(folder=self.download_cache_folder))\n # Clean the cache\n if not LeafSettings.NON_INTERACTIVE.as_boolean() and LeafSettings.CACHE_AUTOCLEAN.as_boolean():\n if self.print_with_confirm(question=\"Do you want to clean the cache?\"):\n shutil.rmtree(str(self.download_cache_folder))\n # Update the mtime\n self.download_cache_folder.touch()\n\n def list_available_packages(self, force_refresh=False) -> dict:\n \"\"\"\n List all available package\n \"\"\"\n out = OrderedDict()\n self.fetch_remotes(force_refresh=force_refresh)\n\n for remote in self.list_remotes(only_enabled=True).values():\n if remote.is_fetched:\n for ap in remote.available_packages:\n if ap.identifier not in out:\n out[ap.identifier] = ap\n else:\n ap2 = out[ap.identifier]\n if ap.hashsum != ap2.hashsum:\n self.logger.print_error(\n \"Package {ap.identifier} is available in several remotes with same version but different content!\".format(ap=ap)\n )\n raise LeafException(\"Package {ap.identifier} has multiple artifacts for the same version\".format(ap=ap))\n ap2.add_duplicate(ap)\n # Keep tags\n for t in ap.tags:\n if t not in ap2.tags:\n ap2.tags.append(t)\n\n if len(out) == 0:\n raise NoPackagesInCacheException()\n return out\n\n def __download_ap(self, ap: AvailablePackage) -> LeafArtifact:\n \"\"\"\n Download given available package and returns the files in cache folder\n @return LeafArtifact\n \"\"\"\n cachedfile = self.__download_cache_folder / get_cached_artifact_name(ap.filename, ap.hashsum)\n # Select best candidate\n candidate = ap.best_candidate\n self.logger.print_verbose(\"Downloading {ap.identifier} from {ap.remote.alias}: {ap.url}\".format(ap=candidate))\n download_and_verify_file(candidate.url, cachedfile, logger=self.logger, hashstr=ap.hashsum)\n return LeafArtifact(cachedfile)\n\n def __extract_artifact(self, la: LeafArtifact, env: Environment, ipmap: dict, keep_folder_on_error: bool = False) -> InstalledPackage:\n \"\"\"\n Install a leaf artifact\n @return InstalledPackage\n \"\"\"\n if la.identifier in ipmap:\n raise LeafException(\"Package is already installed: {la.identifier}\".format(la=la))\n\n target_folder = self.install_folder / str(la.identifier)\n if target_folder.is_dir():\n raise LeafException(\"Folder already exists: {folder}\".format(folder=target_folder))\n\n # Check leaf min version\n min_version = check_leaf_min_version([la])\n if min_version:\n raise LeafOutOfDateException(\"You need to upgrade leaf to v{version} to install {la.identifier}\".format(version=min_version, la=la))\n\n # Create folder\n target_folder.mkdir(parents=True)\n\n try:\n # Extract content\n self.logger.print_verbose(\"Extract {la.path} in {dest}\".format(la=la, dest=target_folder))\n with TarFile.open(str(la.path)) as tf:\n tf.extractall(str(target_folder))\n # Execute post install steps\n out = InstalledPackage(target_folder / LeafFiles.MANIFEST)\n ipmap[out.identifier] = out\n self.__execute_steps(out.identifier, ipmap, StepExecutor.install, env=env)\n # Touch folder to trigger FS event\n target_folder.touch(exist_ok=True)\n return out\n except BaseException as e:\n self.logger.print_error(\"Error during installation:\", e)\n if keep_folder_on_error:\n target_folder = mark_folder_as_ignored(target_folder)\n self.logger.print_verbose(\"Mark folder as ignored: {folder}\".format(folder=target_folder))\n else:\n self.logger.print_verbose(\"Remove folder: {folder}\".format(folder=target_folder))\n rmtree_force(target_folder)\n raise e\n\n def __install_prereq(self, mflist: list, ipmap: dict, env: Environment = None, keep_folder_on_error: bool = False):\n \"\"\"\n Install given prereg packages and sync them after\n \"\"\"\n # First, install missing prereq packages\n ip_to_sync = []\n for mf in mflist:\n if isinstance(mf, InstalledPackage):\n self.logger.print_verbose(\"Prereq package {ip.identifier} is already installed\".format(ip=mf))\n ip_to_sync.append(mf)\n elif isinstance(mf, AvailablePackage):\n # Install package\n self.logger.print_verbose(\"Prereq package {ap.identifier} is being installed\".format(ap=mf))\n prereqla = self.__download_ap(mf)\n prereqip = self.__extract_artifact(prereqla, env, ipmap, keep_folder_on_error=keep_folder_on_error)\n ip_to_sync.append(prereqip)\n elif isinstance(mf, LeafArtifact):\n # Install package\n self.logger.print_verbose(\"Prereq package {la.identifier} is being installed\".format(la=mf))\n prereqla = mf\n prereqip = self.__extract_artifact(prereqla, env, ipmap, keep_folder_on_error=keep_folder_on_error)\n ip_to_sync.append(prereqip)\n else:\n raise ValueError()\n # Then, sync package sorted alphabetically\n for ip in sorted(ip_to_sync, key=IDENTIFIER_GETTER):\n # Sync package\n self.__execute_steps(ip.identifier, ipmap, StepExecutor.sync, env=env)\n\n def install_packages(self, items: list, env: Environment = None, keep_folder_on_error: bool = False):\n \"\"\"\n Compute dependency tree, check compatibility, download from remotes and extract needed packages\n @return: InstalledPackage list\n \"\"\"\n with self.application_lock.acquire():\n ipmap = self.list_installed_packages()\n apmap = self.list_available_packages()\n pilist = []\n for item in items:\n if isinstance(item, PackageIdentifier):\n # Package identifier is given\n pilist.append(item)\n elif PackageIdentifier.is_valid_identifier(item):\n # Package identifier string given\n pilist.append(PackageIdentifier.parse(item))\n else:\n # If leaf artifacts are given, add/replace identifiers of available packages\n la = LeafArtifact(Path(item))\n pilist.append(la.identifier)\n apmap[la.identifier] = la\n out = []\n\n # Build env to resolve dynamic dependencies\n if env is None:\n env = Environment.build(self.build_builtin_environment(), self.build_user_environment())\n\n ap_to_install = DependencyUtils.install(pilist, apmap, ipmap, env=env)\n\n # Check leaf min version\n min_version = check_leaf_min_version(ap_to_install)\n if min_version:\n raise LeafOutOfDateException(\n \"You need to upgrade leaf to v{version} to install {text}\".format(\n version=min_version, text=\", \".join([str(ap.identifier) for ap in ap_to_install])\n )\n )\n\n # Check nothing to do\n if len(ap_to_install) == 0:\n self.logger.print_default(\"All packages are installed\")\n else:\n # Check available size\n download_totalsize = 0\n download_count = 0\n for ap in [ap for ap in ap_to_install if isinstance(ap, AvailablePackage)]:\n download_count += 1\n if ap.size is not None:\n download_totalsize += ap.size\n fs_check_free_space(self.download_cache_folder, download_totalsize)\n\n # Confirm\n text = \", \".join([str(ap.identifier) for ap in ap_to_install])\n self.logger.print_quiet(\"Packages to install: {packages}\".format(packages=text))\n if download_totalsize > 0:\n self.logger.print_default(\"Total size:\", sizeof_fmt(download_totalsize))\n self.print_with_confirm(raise_on_decline=True)\n\n # Install prereq\n prereq_to_install = DependencyUtils.prereq([ap.identifier for ap in ap_to_install], apmap, ipmap, env=env)\n\n if len(prereq_to_install) > 0:\n try:\n self.__install_prereq(prereq_to_install, ipmap, env=env, keep_folder_on_error=keep_folder_on_error)\n except BaseException as e:\n raise PrereqException(e)\n\n # Download ap list\n self.logger.print_default(\"Downloading {size} package(s)\".format(size=download_count))\n la_to_install = []\n for mf in ap_to_install:\n if isinstance(mf, AvailablePackage):\n la_to_install.append(self.__download_ap(mf))\n elif isinstance(mf, LeafArtifact):\n la_to_install.append(mf)\n\n # Check the extracted size\n extracted_totalsize = 0\n for la in la_to_install:\n if la.final_size is not None:\n extracted_totalsize += la.final_size\n else:\n extracted_totalsize += la.get_total_size()\n fs_check_free_space(self.install_folder, extracted_totalsize)\n\n # Extract la list\n for la in la_to_install:\n self.logger.print_default(\"[{current}/{total}] Installing {la.identifier}\".format(current=(len(out) + 1), total=len(la_to_install), la=la))\n ip = self.__extract_artifact(la, env, ipmap, keep_folder_on_error=keep_folder_on_error)\n out.append(ip)\n\n return out\n\n def uninstall_packages(self, pilist: list):\n \"\"\"\n Remove given package\n \"\"\"\n with self.application_lock.acquire():\n ipmap = self.list_installed_packages()\n\n iplist_to_remove = DependencyUtils.uninstall(pilist, ipmap, logger=self.logger)\n\n if len(iplist_to_remove) == 0:\n self.logger.print_default(\"No package to remove\")\n else:\n # Confirm\n text = \", \".join([str(ip.identifier) for ip in iplist_to_remove])\n self.logger.print_quiet(\"Packages to uninstall: {packages}\".format(packages=text))\n self.print_with_confirm(raise_on_decline=True)\n for ip in iplist_to_remove:\n if ip.read_only:\n raise LeafException(\"Cannot uninstall system package {ip.identifier}\".format(ip=ip))\n self.logger.print_default(\"Removing {ip.identifier}\".format(ip=ip))\n self.__execute_steps(ip.identifier, ipmap, StepExecutor.uninstall)\n self.logger.print_verbose(\"Remove folder: {ip.folder}\".format(ip=ip))\n rmtree_force(ip.folder)\n del ipmap[ip.identifier]\n\n self.logger.print_default(\"{count} package(s) removed\".format(count=len(iplist_to_remove)))\n\n def sync_packages(self, pilist: list, env: Environment = None):\n \"\"\"\n Run the sync steps for all given packages\n \"\"\"\n ipmap = self.list_installed_packages()\n for pi in pilist:\n self.logger.print_verbose(\"Sync package {pi}\".format(pi=pi))\n self.__execute_steps(pi, ipmap, StepExecutor.sync, env=env)\n\n def __execute_steps(self, pi: PackageIdentifier, ipmap: dict, se_func: callable, env: Environment = None):\n # Find the package\n ip = find_manifest(pi, ipmap)\n # The environment\n if env is None:\n env = Environment.build(self.build_builtin_environment(), self.build_user_environment())\n # build the dependencies\n deps = DependencyUtils.installed([pi], ipmap, env=env, ignore_unknown=True)\n # Update env\n env.append(self.build_packages_environment(deps))\n # Fix PREREQ_ROOT\n env.set_variable(\"LEAF_PREREQ_ROOT\", self.install_folder)\n # The Variable resolver\n vr = VariableResolver(ip, ipmap.values())\n # Execute steps\n se = StepExecutor(self.logger, ip, vr, env=env)\n se_func(se)\n\n def build_packages_environment(self, items: list, ipmap=None):\n \"\"\"\n Get the env vars declared by given packages\n @param items: a list of InstalledPackage or PackageIdentifier\n \"\"\"\n ipmap = ipmap or self.list_installed_packages()\n out = Environment()\n for item in items:\n ip = None\n if isinstance(item, InstalledPackage):\n ip = item\n elif isinstance(item, PackageIdentifier):\n ip = None\n if is_latest_package(item):\n ip = find_manifest(item, ipmap)\n else:\n ip = ipmap.get(item)\n if ip is None:\n raise InvalidPackageNameException(item)\n else:\n raise InvalidPackageNameException(item)\n vr = VariableResolver(ip, ipmap.values())\n out.append(ip.build_environment(vr=vr.resolve))\n return out\n","sub_path":"src/leaf/api/packages.py","file_name":"packages.py","file_ext":"py","file_size_in_byte":16350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"309657184","text":"from flask_script import Manager, Shell\nfrom flask_migrate import Migrate, MigrateCommand\nfrom app.exts import db\nfrom app.models import *\nfrom app import create_app\n\napp = create_app()\nmanager = Manager(app)\n\n# 使用Migrate绑定app和db\nmigrate = Migrate(app, db)\n\n\ndef make_shell_context():\n return dict(app=app, db=db, Follow=Follow, UserTag=UserTag, ArticleTag=ArticleTag, QuestionTag=QuestionTag,\n FavoriteArticle=FavoriteArticle, FavoriteQuestion=FavoriteQuestion, User=User, Admin=Admin,\n Question=Question, Answer=Answer, Article=Article, Draft=Draft, Tag=Tag, ArticleComment=ArticleComment,\n AnswerComment=AnswerComment, Notification=Notification)\n\n\nmanager.add_command(\"shell\", Shell(make_context=make_shell_context))\n\n# 添加迁移脚本的命令到manager中\nmanager.add_command('db', MigrateCommand)\n\nif __name__ == \"__main__\":\n manager.run()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"365827305","text":"from cornice import Service\nimport uuid\n\nfrom tempus_api.models import Group, Project, Task\nfrom tempus_api.models.groups import Group, Groups\nfrom tempus_api.models.projects import Project, ProjectsByGroup\nfrom tempus_api.models.tempus import TempusByGroupProject\nfrom datetime import datetime\nfrom tempus_api.security import authenticated_user, send_to_group\nimport transaction\n\n\nproject_collection_info = Service(\n name='projects collection',\n path='/g/{grup_id}/p/',\n description=\"Project Collection API\")\n\nproject_info = Service(\n name='projects',\n path='/g/{grup_id}/p/{project_id}',\n description=\"Project API\")\n\ncheck_project = Service(\n name='search project',\n path='/checkproject/{grup_id}/{project_id}',\n description=\"Project Search\")\n\nsearch_project = Service(\n name='search projects by name',\n path='/searchprojects/',\n description=\"Search projects\",\n cors_enabled=True,\n cors_origins='*')\n\n\n@search_project.get(validators=authenticated_user)\ndef search_projects(request):\n \"\"\"\n Busca un projecte\n \"\"\"\n cadena = request.GET['term']\n group = request.GET['group']\n user = request.user\n if not Groups(request.dm).userInGroup(user.email, group):\n return {'total': 0, 'projects': []}\n projects = ProjectsByGroup(request.dm, group)\n resultat = []\n for p in projects.searchProject(group, cadena):\n resultat.append(p.getPublicDict())\n return {'total': len(resultat), 'projects': resultat}\n\n\n@check_project.get()\ndef check_project_exist(request):\n group_id = request.matchdict['grup_id']\n project_id = request.matchdict['project_id']\n if project_id in ProjectsByGroup(request.dm, group_id):\n return True\n else:\n return False\n\n\n@project_collection_info.get(validators=authenticated_user)\ndef get_all_info(request):\n user = request.user\n try:\n grup_name = request.matchdict['grup_id']\n except:\n return {'success': False, 'message': 'No group id'}\n\n if not Groups(request.dm).userInGroup(user.email, grup_name):\n return {'success': False, 'message': 'No user in group'}\n\n projects = ProjectsByGroup(request.dm, grup_name)\n\n resultat_json = []\n for project in projects.values():\n project_json = project.getPublicDict()\n # tasks = Task.getActivByProject(request, projecte.identifier)\n # project_json['tasks'] = [task.getPublicDict() for task in tasks]\n resultat_json.append(project_json)\n return {'total': len(projects), 'projectes': resultat_json}\n\n\n@project_info.get(validators=authenticated_user)\ndef get_info(request):\n \"\"\"Returns the information about a project.\n \"\"\"\n\n user = request.user\n if not user:\n return {'success': False, 'message': 'User does not exist'}\n\n grup_name = request.matchdict['grup_id']\n project_name = request.matchdict['project_id']\n\n if not Groups(request.dm).userInGroup(user.email, grup_name):\n return {'success': False, 'message': 'No user in group'}\n\n projects = ProjectsByGroup(request.dm, grup_name)\n if project_name not in projects:\n return {'success': False, 'message': 'No valid project'}\n\n project = projects[project_name]\n\n project_json = project.getPublicDict()\n # tasks = Task.getActivByProject(request, project.identifier)\n # project_json['tasks'] = [task.getPublicDict() for task in tasks]\n return {'success': True, 'project': project_json}\n\n\n@project_info.put(validators=authenticated_user)\ndef set_info(request):\n \"\"\"\n Edit a project!!\n \"\"\"\n user = request.user\n if not user:\n return {'success': False, 'message': 'User does not exist'}\n\n grup_id = request.matchdict['grup_id']\n project_id = request.matchdict['project_id']\n\n if not Groups(request.dm).userInGroup(user.email, grup_id):\n return {'success': False, 'message': 'No user in group'}\n\n projects = ProjectsByGroup(request.dm, grup_id)\n if project_id in projects:\n projects[project_id].update(**request.json_body)\n return {\n 'success': True,\n 'message': 'Saved changes',\n 'project': projects[project_id].getPublicDict()\n }\n else:\n p = Project(**request.json_body)\n p.creator = user.email\n p.group = grup_id\n request.dm.insert(p)\n missatge = \"%s: Afegit projecte %s \" % (p.group, p.name)\n send_to_group(request, user.email, p.group, missatge)\n return {\n 'success': True,\n 'message': 'Added project',\n 'project': p.getPublicDict()\n }\n\n\n@project_info.delete(validators=authenticated_user)\ndef del_info(request):\n \"\"\"\n Delete a project!!\n \"\"\"\n user = request.user\n if not user:\n return {'success': False, 'message': 'User does not exist'}\n\n grup_id = request.matchdict['grup_id']\n project_id = request.matchdict['project_id']\n\n if not Groups(request.dm).userInGroup(user.email, grup_id):\n return {'success': False, 'message': 'No user in group'}\n\n projects_coll = ProjectsByGroup(request.dm, grup_id)\n if project_id in projects_coll:\n request.dm.remove(projects_coll[project_id])\n transaction.commit()\n return{'success': True, 'message': 'Project deleted'}\n else:\n return{'success': False, 'message': 'No project'}\n","sub_path":"tempus_api/views/projects.py","file_name":"projects.py","file_ext":"py","file_size_in_byte":5329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"621628337","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport os\nimport zmq\nimport struct\nimport binascii\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\nrouter_back = {}\nrouter_up = {}\n\ndef send_up(up, identity, message = b\"\"):\n logger.info(\"SEND up %s '%d'\" % (binascii.hexlify(identity), len(message)))\n\n up.send(identity, zmq.SNDMORE)\n up.send(message)\n\ndef send_to_backend(backend, identity, message = b\"\"):\n logger.info(\"SEND backend %s '%d'\" % (binascii.hexlify(identity), len(message)))\n\n backend.send(identity, zmq.SNDMORE)\n backend.send(message)\n\ndef read_up(up, backend):\n global router_up, router_back\n\n identity = up.recv()\n message = up.recv()\n\n logger.info(\"RECV up %s '%d'\" % (binascii.hexlify(identity), len(message)))\n\n if ((identity not in router_back) and (len(message) == 0)):\n logger.info(\"Connecting to backend\")\n backend.connect(\"tcp://localhost:8445\")\n\n bid = backend.recv()\n message = backend.recv()\n \n logger.info(\"RECV backend %s '%d'\" % (binascii.hexlify(bid), len(message)))\n \n router_back[identity] = bid\n router_up[bid] = identity\n\n logger.info(\"%s %s\" % (router_back, router_up))\n elif ((identity in router_back) and (len(message) == 0)):\n logger.info(\"Disconnecting backend\")\n send_to_backend(backend, router_back[identity])\n del router_up[router_back[identity]]\n del router_back[identity]\n elif ((identity in router_back) and (len(message) > 0)):\n logger.info(\"Forwarding to backend\")\n send_to_backend(backend, router_back[identity], message)\n else:\n logger.info(\"Unknown client\")\n send_up(up, identity)\n\ndef read_backend(up, backend):\n global router_up, router_back\n\n identity = backend.recv()\n message = backend.recv()\n\n logger.info(\"RECV backend %s '%d'\" % (binascii.hexlify(identity), len(message)))\n\n if ((identity in router_up) and (len(message) == 0)):\n logger.info(\"Backend closed connection\")\n send_up(up, identity)\n del router_back[router_up[identity]]\n del router_up[identity]\n elif (identity not in router_up):\n logger.info(\"Unknown backend\")\n send_to_backend(backend, identity)\n elif ((identity in router_up) and (len(message) != 0)):\n logger.info(\"Forwarding to client\")\n send_up(up, router_up[identity], message)\n\ndef main():\n ctx = zmq.Context()\n\n backend = ctx.socket(zmq.STREAM)\n \n up = ctx.socket(zmq.DEALER)\n up.connect(\"tcp://127.0.0.1:8444\")\n \n poller = zmq.Poller()\n poller.register(up, flags = zmq.POLLIN)\n poller.register(backend, flags = zmq.POLLIN)\n\n while 1:\n socks = dict(poller.poll())\n \n if ((up in socks) and (socks[up] == zmq.POLLIN)):\n read_up(up, backend)\n if ((backend in socks) and (socks[backend] == zmq.POLLIN)):\n read_backend(up, backend)\n\n up.close()\n backend.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"downstream.py","file_name":"downstream.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"533772992","text":"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under both the MIT license found in the\n# LICENSE-MIT file in the root directory of this source tree and the Apache\n# License, Version 2.0 found in the LICENSE-APACHE file in the root directory\n# of this source tree.\n\nload(\"@prelude//cxx:cxx_toolchain_types.bzl\", \"CxxToolchainInfo\")\nload(\n \"@prelude//linking:link_info.bzl\",\n \"LinkedObject\", # @unused Used as a type\n)\nload(\"@prelude//linking:strip.bzl\", \"strip_shared_library\")\nload(\n \"@prelude//utils:types.bzl\",\n \"unchecked\", # @unused Used as a type\n)\n\nSharedLibrary = record(\n lib = field(LinkedObject.type),\n stripped_lib = field([\"artifact\", None]),\n can_be_asset = field(bool.type),\n for_primary_apk = field(bool.type),\n label = field(\"label\"),\n)\n\nSharedLibraries = record(\n # A mapping of shared library SONAME (e.g. `libfoo.so.2`) to the artifact.\n # Since the SONAME is what the dynamic loader uses to uniquely identify\n # libraries, using this as the key allows easily detecting conflicts from\n # dependencies.\n libraries = field({str.type: SharedLibrary.type}),\n)\n\n# T-set of SharedLibraries\nSharedLibrariesTSet = transitive_set()\n\n# Shared libraries required by top-level packaging rules (e.g. shared libs\n# for Python binary, symlink trees of shared libs for C++ binaries)\nSharedLibraryInfo = provider(fields = [\n \"set\", # [SharedLibrariesTSet.type, None]\n])\n\ndef create_shared_libraries(\n ctx: \"context\",\n libraries: {str.type: LinkedObject.type}) -> SharedLibraries.type:\n \"\"\"\n Take a mapping of dest -> src and turn it into a mapping that will be\n passed around in providers. Used for both srcs, and resources.\n \"\"\"\n cxx_toolchain = getattr(ctx.attrs, \"_cxx_toolchain\", None)\n return SharedLibraries(\n libraries = {name: SharedLibrary(\n lib = shlib,\n stripped_lib = strip_shared_library(\n ctx,\n cxx_toolchain[CxxToolchainInfo],\n shlib.output,\n cmd_args([\"--strip-unneeded\"]),\n ) if cxx_toolchain != None else None,\n can_be_asset = getattr(ctx.attrs, \"can_be_asset\", False) or False,\n for_primary_apk = getattr(ctx.attrs, \"used_by_wrap_script\", False),\n label = ctx.label,\n ) for (name, shlib) in libraries.items()},\n )\n\n# We do a lot of merging library maps, so don't use O(n) type annotations\ndef _merge_lib_map(\n dest_mapping: unchecked({str.type: SharedLibrary.type}),\n mapping_to_merge: unchecked({str.type: SharedLibrary.type})) -> None:\n \"\"\"\n Merges a mapping_to_merge into `dest_mapping`. Fails if different libraries\n map to the same name.\n \"\"\"\n for (name, src) in mapping_to_merge.items():\n existing = dest_mapping.get(name)\n if existing != None and existing.lib != src.lib:\n error = (\n \"Duplicate library {}! Conflicting mappings:\\n\" +\n \"{} from {}\\n\" +\n \"{} from {}\"\n )\n fail(\n error.format(\n name,\n existing.lib,\n existing.label,\n src.lib,\n src.label,\n ),\n )\n dest_mapping[name] = src\n\n# Merge multiple SharedLibraryInfo. The value in `node` represents a set of\n# SharedLibraries that is provided by the target being analyzed. It's optional\n# because that might not always exist, e.g. a Python library can pass through\n# SharedLibraryInfo but it cannot produce any. The value in `deps` represents\n# all the inherited shared libraries for this target.\ndef merge_shared_libraries(\n actions: \"actions\",\n node: [\"SharedLibraries\", None] = None,\n deps: [\"SharedLibraryInfo\"] = []) -> \"SharedLibraryInfo\":\n kwargs = {}\n\n children = filter(None, [dep.set for dep in deps])\n if children:\n kwargs[\"children\"] = children\n if node:\n kwargs[\"value\"] = node\n\n set = actions.tset(SharedLibrariesTSet, **kwargs) if kwargs else None\n return SharedLibraryInfo(set = set)\n\ndef traverse_shared_library_info(\n info: \"SharedLibraryInfo\"): # -> {str.type: SharedLibrary.type}:\n libraries = {}\n if info.set:\n for libs in info.set.traverse():\n _merge_lib_map(libraries, libs.libraries)\n return libraries\n","sub_path":"buck-build/prelude/linking/shared_libraries.bzl","file_name":"shared_libraries.bzl","file_ext":"bzl","file_size_in_byte":4422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"373122100","text":"\"\"\"changed stroke model\n\nRevision ID: 280b558606ec\nRevises: 14cd07cd9faf\nCreate Date: 2015-06-26 00:19:29.835517\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '280b558606ec'\ndown_revision = '14cd07cd9faf'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column(u'messages_drawing_move', sa.Column('mid_x_percentage', sa.Float(), nullable=True))\n op.add_column(u'messages_drawing_move', sa.Column('mid_y_percentage', sa.Float(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(u'messages_drawing_move', 'mid_y_percentage')\n op.drop_column(u'messages_drawing_move', 'mid_x_percentage')\n ### end Alembic commands ###\n","sub_path":"alembic/versions/280b558606ec_changed_stroke_model.py","file_name":"280b558606ec_changed_stroke_model.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"301980431","text":"# -*- coding:utf-8 -*-\n\"\"\"\ncreated by server on 14-8-13下午3:44.\n\"\"\"\nfrom app.game.component.baseInfo.slot_base_info import SlotBaseInfoComponent\nfrom app.game.core.fight.skill import Skill\nfrom app.game.core.fight.skill_helper import SkillHelper\n\n\nclass EquipmentSlotComponent(SlotBaseInfoComponent):\n \"\"\"阵容装备格子\n 1:头盔\n 2:武器\n 3:衣服\n 4:钻石\n 5:项链\n 6:指环\n \"\"\"\n\n def __init__(self, owner, equ_slot_no, activation=False, equipment_id=None, base_name=''):\n super(EquipmentSlotComponent, self).__init__(owner, equ_slot_no, base_name, activation)\n\n self._equipment_id = equipment_id # 装备\n\n @property\n def equipment_id(self):\n \"\"\"装备ID\n \"\"\"\n return self._equipment_id\n\n @equipment_id.setter\n def equipment_id(self, equipment_id):\n\n if equipment_id == '0':\n equipment_id = None\n\n self._equipment_id = equipment_id\n\n @property\n def equipment_obj(self):\n \"\"\"取得装备对象\n \"\"\"\n # player_obj = self.owner.owner.owner # -^_^- PlayerCharacter obj\n # equipment_obj = player_obj.equipment_component.equipments_obj.get(self._equipment_id, None)\n # return equipment_obj\n return self.owner.get_equipment_obj(self._equipment_id) if self._equipment_id else None\n\n @property\n def suit(self):\n \"\"\"套装信息\n \"\"\"\n equ_no_list = self.owner.equipment_nos # 全部装备编号\n\n equ_obj = self.equipment_obj\n if not equ_obj:\n return {'num': 0, 'suit_no': 0}\n suit_conf = equ_obj.suit_conf\n if not suit_conf:\n return {'num': 0, 'suit_no': 0}\n suit_intersection = list(set(equ_no_list).intersection(set(suit_conf.suitMapping))) # 获取两个list 的交集\n return {'num': len(suit_intersection), 'suit_no': suit_conf.id} # 激活数量,激活编号\n\n @property\n def suit_attr(self):\n \"\"\"套装属性值\n \"\"\"\n skills = []\n for skill_id in self.suit_skills:\n skill = Skill(skill_id)\n skill.init_attr()\n skills.append(skill)\n\n if not skills:\n return None\n\n skill_helper = SkillHelper(skills)\n skill_helper.init_attr()\n attr = skill_helper.parse_buffs()\n return attr\n\n @property\n def suit_skills(self):\n \"\"\"套装附加技能\n \"\"\"\n skills = []\n suit = self.suit # 套装数据\n equ_obj = self.equipment_obj\n if not equ_obj:\n return skills\n suit_conf = equ_obj.suit_conf # 套装配置\n if not suit_conf:\n return skills\n num = suit.get('num', 0) # 套装激活数量\n for i in range(3):\n skill_cof = getattr(suit_conf, 'attr%s' % (i+1)) # 套装属性配置\n if num >= skill_cof[0]:\n skills.append(skill_cof[1])\n return skills\n","sub_path":"app/game/component/line_up/equipment_slot.py","file_name":"equipment_slot.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"384172273","text":"import pytest\n\nfrom toolkit.settings import SettingsLoader\nfrom pytest_apistellar import prop_alias\n\nfrom blog.blog.article.service import ArticleService\narti_ser = prop_alias(\"blog.blog.article.service.ArticleService\")\n\n\n@pytest.mark.usefixtures(\"mock\")\n@pytest.mark.asyncio\nclass TestService(object):\n pytestmark = [\n arti_ser(\"settings\", ret_val=SettingsLoader().load(\"settings\")),\n ]\n\n @arti_ser(\"code\", ret_val=\"111111\")\n @pytest.mark.env(NEED_CODE=\"True\")\n async def test_check_code_on_True(self):\n assert ArticleService().check_code(\"111111\") is True\n\n @arti_ser(\"code\", ret_val=\"22222\")\n @pytest.mark.env(NEED_CODE=\"True\")\n async def test_check_code_on_False(self):\n assert ArticleService().check_code(\"111111\") is False\n\n @arti_ser(\"code\", ret_val=\"3333\")\n @pytest.mark.env(NEED_CODE=\"False\")\n async def test_check_code_off(self):\n assert ArticleService().check_code(\"111111\") is True","sub_path":"tests/test_article/test_service.py","file_name":"test_service.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"405059068","text":"#!/usr/bin/env python3\n\n# LIFO\nclass stack(object):\n def __init__(self, n):\n self.sp = 0\n self.floor = 0\n self.top = n - 1\n self.stack = [None] * n\n\n def isEmpty(self):\n return self.sp <= self.floor\n\n def isFull(self):\n return self.sp > self.top\n\n def push(self, value):\n if self.isFull():\n return False\n else:\n self.stack[self.sp] = value\n self.sp = self.sp + 1\n\n def pop(self):\n if self.isEmpty():\n return False\n else:\n self.sp = self.sp - 1\n\n def getTop(self):\n if self.isEmpty():\n return None\n else:\n return self.stack[self.sp - 1]\n\n def popTop(self):\n ret = self.getTop()\n self.pop()\n return ret\n\n\ndef calc(a, b, o):\n if o == \"+\":\n return a + b\n elif o == \"-\":\n return a - b\n elif o == \"*\":\n return a * b\n else:\n return a / b\n\n\ndef calcPostfix(p, n):\n theStack = stack(n)\n\n for i in range(0, n):\n if type(p[i]) is int:\n theStack.push(p[i])\n elif p[i] in [ \"+\", \"-\", \"*\", \"/\" ]:\n x = theStack.popTop()\n y = theStack.popTop()\n theStack.push(calc(y, x, p[i]))\n\n return theStack.popTop()\n\n\ndef infix2Postfix(a, n):\n theStack = stack(n)\n p = [None] * n\n j = 0\n\n for i in range(0, n):\n if type(a[i]) is int:\n p[j] = a[i]\n j += 1\n elif a[i] in [ \"+\", \"-\", \"*\", \"/\" ]:\n theStack.push(a[i])\n elif a[i] == \")\":\n p[j] = theStack.popTop()\n j += 1\n\n while not theStack.isEmpty():\n p[j] = theStack.popTop()\n j += 1\n\n return p, j\n\n\ndef testLIFO():\n a = [ \"(\", 3, \"*\", 2, \")\", \"+\", \"(\", 1, \"*\", \"(\", 3, \"+\", 2, \")\", \")\" ]\n p, n = infix2Postfix(a, len(a))\n print(p)\n\n theStack = stack(10)\n\n print(\"isEmpty: \", theStack.isEmpty())\n print(\"isFull: \", theStack.isFull())\n theStack.push(1)\n theStack.push(2)\n\n print(\"getTop: \", theStack.getTop())\n print(\"popTop: \", theStack.popTop())\n print(\"getTop: \", theStack.getTop())\n\n print(\"isEmpty: \", theStack.isEmpty())\n print(\"isFull: \", theStack.isFull())\n\n theStack.pop()\n theStack.pop()\n print(\"isEmpty: \", theStack.isEmpty())\n print(\"isFull: \", theStack.isFull())\n\n\n\n# FIFO\nclass queue(object):\n def __init__(self, n):\n self.start = 0\n self.end = 0\n self.maximum = n\n self.full = False\n self.queue = [None] * n\n\n def isEmpty(self):\n return self.start == self.end and not self.full\n\n def isFull(self):\n return self.full\n\n def put(self, value):\n if self.isFull():\n return False\n else:\n self.queue[self.end] = value\n self.end = (self.end + 1) % self.maximum\n\n if self.start == self.end:\n self.full = True\n\n def get(self):\n if self.isEmpty():\n return None\n else:\n self.full = False\n value = self.queue[self.start]\n self.start = (self.start) % self.maximum\n return value\n\n def front(self):\n if self.isEmpty():\n return None\n else:\n return self.queue[self.start]\n\n\ndef testFIFO():\n pass\n\n\n\ndef strike(Q, n):\n i = 1\n\n while True:\n z = Q.get()\n if i < n:\n Q.put(z)\n i += 1\n else:\n i = 1\n\n z = Q.front()\n\n if z == 1 or Q.isEmpty():\n return Q\n\n\ndef sumseq(Q):\n s = 0\n\n while True:\n z = Q.get()\n s += z\n Q.put(s)\n z = Q.front()\n\n if z == 1:\n return Q\n\n\n\ndef polyaSieve(n):\n theQueue = queue(n)\n\n for i in range(1, n + 1):\n theQueue.put(i)\n\n theQueue = strike(theQueue, 3)\n theQueue = sumseq(theQueue)\n theQueue = strike(theQueue, 2)\n theQueue = sumseq(theQueue)\n\n while not theQueue.isEmpty():\n print(theQueue.get())\n\n\n\nif __name__ == \"__main__\":\n testLIFO()\n testFIFO()\n polyaSieve(200)\n","sub_path":"python/w10-prak-stack_und_queue.py","file_name":"w10-prak-stack_und_queue.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"135308943","text":"import queue\n\n\nclass TreeNode:\n \"\"\" 最小数据结构,一个节点\"\"\"\n\n def __init__(self, val=None):\n self.val = val\n self.left = None\n self.right = None\n\n\nclass BinaryTree:\n \"\"\"how to use the node to create a binary tree\"\"\"\n\n # use the queue to store the node\n def __init__(self, value_list=[]):\n self.root = None\n self.traverse_list = []\n for n in value_list:\n self.insert(n)\n\n def insert(self, value):\n\n if self.root is None:\n self.root = TreeNode(value)\n return\n else:\n # 利用队列先进先出特性,构建完全二叉树\n # 初始化一个空队列,先把根节点放入队列\n # 根节点出队列,有子节点则进队列,相当于用迭代方式进行层次遍历,没有子节点则挂载,结束循环\n q = queue.Queue()\n q.put(self.root)\n while not q.empty():\n node = q.get() # 出队列\n if node.left is None:\n node.left = TreeNode(value)\n return\n else:\n q.put(node.left)\n if node.right is None:\n node.right = TreeNode(value)\n return\n else:\n q.put(node.right)\n\n # 前中后三种方式遍历代码是一样的,只是打印值代码位置不一样\n def preorder_traverse(self, root=None):\n # 递归方式dsf 策略进行遍历\n if root:\n self.traverse_list.append(root.val)\n if root.left:\n self.preorder_traverse(root.left)\n else:\n return\n if root.right:\n self.preorder_traverse(root.right)\n else:\n return\n else:\n return\n\n def postorder_order_traverse(self, root_node):\n if root_node:\n if root_node.left:\n self.postorder_order_traverse(root_node.left)\n if root_node.right:\n self.postorder_order_traverse(root_node.right)\n self.traverse_list.append(root_node.val)\n return\n else:\n return\n\n def middle_order_traverse(self, root_node):\n if root_node:\n if root_node.left:\n self.middle_order_traverse(root_node.left)\n self.traverse_list.append(root_node.val)\n\n if root_node.right:\n self.middle_order_traverse(root_node.right)\n return\n else:\n return\n\n def delete(self, value):\n pass\n\n def search(self, value):\n pass\n\n\nif __name__ == '__main__':\n value_list = list(range(1, 10))\n print(value_list)\n b = BinaryTree(value_list)\n\n b.preorder_traverse(b.root)\n\n print(b.traverse_list)\n b.traverse_list = []\n b.postorder_order_traverse(b.root)\n print(b.traverse_list)\n b.traverse_list = []\n b.middle_order_traverse(b.root)\n print(b.traverse_list)\n","sub_path":"Tree/binary_tree.py","file_name":"binary_tree.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"322658734","text":"# -*- coding: iso-8859-1 -*-\n# Copyright (C) 2005 France Telecom R&D\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n\n\"querier test cases\"\"\"\n\nimport unittest\nimport sha\n\nfrom logilab.common.testlib import MockConnection\nfrom logilab.common.db import get_connection\nfrom maay.querier import MaayQuerier, normalizeText, FutureDocument, Document, FileInfo\n\n\n\nclass QuerierTC(unittest.TestCase):\n def setUp(self):\n self.cnx = get_connection(driver='mysql', host='localhost',\n database='maay_test', user='maay',\n password='maay')\n self.querier = MaayQuerier(connection=self.cnx)\n self.nodeId = '0'*40\n self.querier.registerNode(self.nodeId, \"127.0.0.1\", 6789, 10)\n\n def tearDown(self):\n cursor = self.cnx.cursor()\n for table in ('document_providers', 'document_scores', 'documents',\n 'files', 'node_interests', 'nodes', 'words'):\n cursor.execute('DELETE FROM %s' % table)\n cursor.close()\n self.querier.close()\n\n \n def test_execute(self):\n answ = self.querier._execute('SELECT * from documents')\n self.assertEquals(list(answ), [])\n\n def testIndexDocument(self):\n text = u\"\"\"Le tartuffe, de Jean-Baptiste Poquelin, dit Molière.\n\nLe petit chat est mort.\"\"\"\n text = normalizeText(text)\n digest = sha.sha(text).hexdigest()\n cursor = self.cnx.cursor()\n # At this point, database should be emtpy, so no document\n # should match \n title = 'Le Tartuffe'\n matchingDocs = Document.selectWhere(cursor, document_id=digest)\n self.assertEquals(len(matchingDocs), 0)\n self.querier.indexDocument('0'*40, FutureDocument(\n filename='/tmp/Tartuffe.txt',\n title=title,\n text=text,\n fileSize=len(text),\n lastModificationTime=30000,\n content_hash=digest,\n mime_type='text',\n state=Document.PUBLISHED_STATE,\n file_state=FileInfo.CREATED_FILE_STATE))\n matchingDocs = Document.selectWhere(cursor, document_id=digest)\n self.assertEquals(len(matchingDocs), 1)\n self.assertEquals(matchingDocs[0].text, '%s %s' % (title, text))\n \n\n def test_normalizeText(self):\n self.assertEquals(normalizeText(u\"ÉtùïÄç\"), \"etuiac\")\n \nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tags/release-0.2.1/maay/test/test_querier.py","file_name":"test_querier.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"365906484","text":"t = input()\nn = int(t)\nhomeJersey = []\nawayJersey = []\ncount = 0\nfor i in range(n):\n color = input()\n temp = color.split()\n homeJersey.append(temp[0])\n awayJersey.append(temp[1])\n\nfor i in range(n):\n for j in range(n):\n if homeJersey[i] == awayJersey[j]:\n count += 1\nprint(count)\n","sub_path":"CodeForces/A. Games.py","file_name":"A. Games.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"591331270","text":"import requests, os\r\nfrom bs4 import BeautifulSoup\r\n#Download the desctription and image of XKDCs latest comic\r\n\r\nr = requests.get(\"https://xkcd.com/\")\r\n\r\nsoup = BeautifulSoup(r.content, \"html.parser\")\r\n\r\ng_da = soup.find_all(\"img\")\r\n\r\n#get the title of the comic\r\nimgTitle =g_da[2].get('title')\r\nprint(imgTitle)\r\n\r\n#get the image name and extension\r\nimgURL = g_da[2].get('src')\r\n\r\n#make it an absolute dir\r\nr = requests.get('https:'+imgURL)\r\n\r\n# make and save directory\r\nos.makedirs('xkcd', exist_ok=True)\r\nimageFile = open(os.path.join('xkcd', os.path.basename(imgURL)), 'wb')\r\n\r\n#write image\r\nfor chunk in r.iter_content(100000):\r\n imageFile.write(chunk)\r\n imageFile.close()\r\n","sub_path":"ScrapperProjects/xkdc_comic_scrapper.py","file_name":"xkdc_comic_scrapper.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}