diff --git "a/3455.jsonl" "b/3455.jsonl" new file mode 100644--- /dev/null +++ "b/3455.jsonl" @@ -0,0 +1,765 @@ +{"seq_id":"470959523","text":"import os\nimport sys\nimport numpy as np\nfrom math import erf\nfrom itertools import product\nfrom scipy.spatial.distance import cdist\n\ntry:\n cwd = sys.argv[1]\nexcept:\n cwd = os.getcwd()\n\nclass Angular_Fingerprint(object):\n \"\"\" comparator for construction of angular fingerprints\n \"\"\"\n\n def __init__(self, atoms, Rc1=4.0, Rc2=4.0, binwidth1=0.1, Nbins2=30, sigma1=0.2, sigma2=0.10, nsigma=4, eta=1, gamma=3, use_angular=True):\n \"\"\" Set a common cut of radius\n \"\"\"\n self.Rc1 = Rc1\n self.Rc2 = Rc2\n self.binwidth1 = binwidth1\n self.Nbins2 = Nbins2\n self.binwidth2 = np.pi / Nbins2\n self.sigma1 = sigma1\n self.sigma2 = sigma2\n self.nsigma = nsigma\n self.eta = eta\n self.gamma = gamma\n self.use_angular = use_angular\n\n self.pbc = atoms.get_pbc()\n self.cell = atoms.get_cell()\n self.n_atoms = atoms.get_number_of_atoms()\n self.num = atoms.get_atomic_numbers()\n self.atomic_types = sorted(list(set(self.num)))\n self.atomic_count = {type:list(self.num).count(type) for type in self.atomic_types}\n self.volume = atoms.get_volume()\n self.dim = 3\n\n # parameters for the binning:\n self.m1 = self.nsigma*self.sigma1/self.binwidth1 # number of neighbour bins included.\n self.smearing_norm1 = erf(1/np.sqrt(2) * self.m1 * self.binwidth1/self.sigma1) # Integral of the included part of the gauss\n self.Nbins1 = int(np.ceil(self.Rc1/self.binwidth1))\n\n self.m2 = self.nsigma*self.sigma2/self.binwidth2 # number of neighbour bins included.\n self.smearing_norm2 = erf(1/np.sqrt(2) * self.m2 * self.binwidth2/self.sigma2) # Integral of the included part of the gauss\n self.binwidth2 = np.pi/Nbins2\n\n Nelements_2body = self.Nbins1\n Nelements_3body = self.Nbins2\n\n if use_angular:\n self.Nelements = Nelements_2body + Nelements_3body\n else:\n self.Nelements = Nelements_2body\n\n def get_feature(self, atoms):\n \"\"\"\n \"\"\"\n\n cell = self.cell\n n_atoms = self.n_atoms\n pos = atoms.get_positions().tolist()\n num = atoms.get_atomic_numbers()\n atomic_count = self.atomic_count\n\n \n return pos\n\n def __angle(self, vec1, vec2):\n \"\"\"\n Returns angle with convention [0,pi]\n \"\"\"\n norm1 = np.linalg.norm(vec1)\n norm2 = np.linalg.norm(vec2)\n arg = np.dot(vec1,vec2)/(norm1*norm2)\n # This is added to correct for numerical errors\n if arg < -1:\n arg = -1.\n elif arg > 1:\n arg = 1.\n return np.arccos(arg), arg\n\n def __f_cutoff(self, r, gamma, Rc):\n \"\"\"\n Polinomial cutoff function in the, with the steepness determined by \"gamma\"\n gamma = 2 resembels the cosine cutoff function.\n For large gamma, the function goes towards a step function at Rc.\n \"\"\"\n if not gamma == 0:\n return 1 + gamma*(r/Rc)**(gamma+1) - (gamma+1)*(r/Rc)**gamma\n else:\n return 1\n\n def __f_cutoff_grad(self, r, gamma, Rc):\n if not gamma == 0:\n return gamma*(gamma+1)/Rc * ((r/Rc)**gamma - (r/Rc)**(gamma-1))\n else:\n return 0\n\n def angle2_grad(self, RijVec, RikVec):\n Rij = np.linalg.norm(RijVec)\n Rik = np.linalg.norm(RikVec)\n\n a = RijVec/Rij - RikVec/Rik\n b = RijVec/Rij + RikVec/Rik\n A = np.linalg.norm(a)\n B = np.linalg.norm(b)\n D = A/B\n\n RijMat = np.dot(RijVec[:,np.newaxis], RijVec[:,np.newaxis].T)\n RikMat = np.dot(RikVec[:,np.newaxis], RikVec[:,np.newaxis].T)\n\n a_grad_j = -1/Rij**3 * RijMat + 1/Rij * np.identity(3)\n b_grad_j = a_grad_j\n\n a_sum_j = np.sum(a*a_grad_j, axis=1)\n b_sum_j = np.sum(b*b_grad_j, axis=1)\n\n grad_j = 2/(1+D**2) * (1/(A*B) * a_sum_j - A/(B**3) * b_sum_j)\n\n\n\n a_grad_k = 1/Rik**3 * RikMat - 1/Rik * np.identity(3)\n b_grad_k = -a_grad_k\n\n a_sum_k = np.sum(a*a_grad_k, axis=1)\n b_sum_k = np.sum(b*b_grad_k, axis=1)\n\n grad_k = 2/(1+D**2) * (1/(A*B) * a_sum_k - A/(B**3) * b_sum_k)\n\n\n a_grad_i = -(a_grad_j + a_grad_k)\n b_grad_i = -(b_grad_j + b_grad_k)\n\n a_sum_i = np.sum(a*a_grad_i, axis=1)\n b_sum_i = np.sum(b*b_grad_i, axis=1)\n\n grad_i = 2/(1+D**2) * (1/(A*B) * a_sum_i - A/(B**3) * b_sum_i)\n\n return grad_i, grad_j, grad_k\n","sub_path":"Platinum_clusters_Project/globalOptim_program/featureCalculators_multi/angular_fingerprintFeature_cy_test.py","file_name":"angular_fingerprintFeature_cy_test.py","file_ext":"py","file_size_in_byte":4494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"296864427","text":"import importlib\nfrom os import listdir\nfrom os.path import isfile, join\nimport os\nimport re\nonlyfiles = [f for f in listdir(\"../mut\") if isfile(join(\"../mut\", f))]\nprint(onlyfiles)\nos.chdir(\"../mut\")\nfor i in onlyfiles:\n module = \"mut.\" + re.sub(\".py\",\"\",i)\n blee = importlib.import_module(module)\n print(blee.operator())\nos.chdir(\"../test_package\")\nprint(os.getcwd())\n\nwith open(\"tests.py\") as file:\n f = file.readlines()\n lines = list()\n # for j in onlyfiles:\n # j= re.sub('.py', \"\",j)\n for i in f:\n if re.search('from test_package',i):\n i = re.sub('from test_package' , 'from mut', i)\n if re.search('foo',i):\n i = re.sub(\"foo\",'M0test',i)\n lines.append(i)\n\nwith open('tests1.py', \"w\") as file:\n for line in lines:\n file.write(line)\n\n","sub_path":"test_package/bleee.py","file_name":"bleee.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"67411024","text":"import pandas as pd\n\ndef totalAtoms(file_path):\n file=open(file_path,'r')\n line=file.readline()\n atoms=0\n while line!='':\n line=file.readline()\n if '$' in line:\n break\n atoms+=1\n file.close()\n return atoms\n\ndef processLineCords(line):\n line=line.strip().split()\n line=line[:4]\n line[0]=float(line[0])\n line[1]=float(line[1])\n line[2]=float(line[2])\n return line\n\ndef getCords(file_path):\n data={'atom':[],'atom_no':[],'x':[],'y':[],'z':[]}\n atoms=totalAtoms(file_path)\n file=open(file_path,'r')\n line=file.readline() \n for i in range(atoms):\n line=file.readline()\n line=processLineCords(line)\n data['atom'].append(line[3])\n data['atom_no'].append(i)\n data['x'].append(line[0])\n data['y'].append(line[1])\n data['z'].append(line[2])\n df=pd.DataFrame.from_dict(data)\n return df\n\n","sub_path":"read_file_opt.py","file_name":"read_file_opt.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"250940570","text":"from tkinter import *\n\nclass Gui(Tk):\n\n def __init__(self):\n super().__init__()\n \n \t # load resources\n self.correct_image = PhotoImage(file=\"correct.gif\")\n self.wrong_image = PhotoImage(file=\"wrong.gif\")\n self.neutral_image = PhotoImage(file=\"neutral.gif\")\n\n # set window attributes\n self.title(\"Newsletter\")\n self.configure(pady=10,\n padx=10,\n background=\"#eee\")\n \n # add components\n self.add_main_frame()\n self.add_heading_label()\n self.add_instructional_label()\n self.add_email_frame()\n self.add_email_label()\n self.add_email_entry()\n self.add_subscribe_button()\n \n\n def add_main_frame(self):\n self.main_frame = Frame()\n self.main_frame.pack()\n \n def add_heading_label(self):\n self.heading_label = Label(self.main_frame)\n self.heading_label.grid(row=0, column=0)\n self.heading_label.configure(text=\"RECEIVE OUR NEWSLETTER\",\n font=\"Arial 14\",\n pady=10) \n\n def add_instructional_label(self):\n self.instructional_label = Label(self.main_frame,anchor=\"w\")\n self.instructional_label.grid(row=1, column=0)\n self.instructional_label.configure(text=\"Please enter your email below to receive our newsletter.\",\n pady=10,\n padx=10)\n\n def add_email_frame(self):\n self.email_frame = Frame(self.main_frame)\n self.email_frame.grid(row=2, column=0) \n\n def add_email_label(self):\n self.email_label = Label(self.email_frame)\n self.email_label.grid(row=0, column=0)\n self.email_label.configure(text=\"Email:\",\n padx=10,\n pady=10)\n\n\n def add_email_entry(self):\n self.email_entry = Entry(self.email_frame)\n self.email_entry.grid(row=0, column=1)\n self.email_entry.configure(width=30,\n border=2,\n fg=\"#f00\")\n\n def add_subscribe_button(self):\n self.subscribe_button = Button(self.main_frame)\n self.subscribe_button.grid(row=3, column=0)\n self.subscribe_button.configure(text=\"Subscribe\",\n background=\"#fee\",\n width=45)\n self.subscribe_button.bind(\"\", self.subscribe_button_clicked)\n\n def subscribe_button_clicked(self, event):\n messagebox.showinfo(\"Newsletter\", \"Subscribed!\")\n\n# Create an object of the Gui class when this module is executed\nif (__name__ == \"__main__\"):\n gui = Gui()\n gui.mainloop()\n","sub_path":"2-guis/Mock-Practice/1-Graphical-User-Interface/part_a.py","file_name":"part_a.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"621740195","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 26 10:09:07 2019\r\n\r\n@author: 86156\r\n\"\"\"\r\n\r\nimport torch\r\nfrom torch.utils.data import DataLoader\r\nfrom torchvision import transforms\r\nimport torch.nn.functional as F\r\nimport shutil\r\nimport numpy as np\r\nimport sys\r\nfrom model import ImgTxtModel\r\nfrom dataset import UrbanDataset\r\ncuda_device_id = '0'\r\n\r\nclass AverageMeter(object):\r\n \"\"\"\r\n Computes and stores the average and current value\r\n Copied from: https://github.com/pytorch/examples/blob/master/imagenet/main.py\r\n \"\"\"\r\n def __init__(self):\r\n self.reset()\r\n\r\n def reset(self):\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def update(self, val, n=1):\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r\n \r\nclass FocalLoss(torch.nn.Module):\r\n def __init__(self, gamma=2):\r\n super().__init__()\r\n self.gamma = gamma\r\n \r\n def forward(self, input, target):\r\n target = torch.zeros_like(input).scatter_(1, target.view(-1,1), 1)\r\n if not (target.size() == input.size()):\r\n raise ValueError(\"Target size ({}) must be the same as input size ({})\"\r\n .format(target.size(), input.size()))\r\n\r\n max_val = (-input).clamp(min=0)\r\n loss = input - input * target + max_val + \\\r\n ((-max_val).exp() + (-input - max_val).exp()).log()\r\n\r\n invprobs = F.logsigmoid(-input * (target * 2.0 - 1.0))\r\n loss = (invprobs * self.gamma).exp() * loss\r\n \r\n return loss.sum(dim=1).mean()\r\n \r\ndef train(dataloader, model, criterion, optimazer, epoch, grad_cilp=True):\r\n losses = AverageMeter()\r\n accuracy = AverageMeter()\r\n \r\n model.train()\r\n torch.set_grad_enabled(True) \r\n\r\n i_step = 50\r\n total_step = len(dataloader)\r\n \r\n for i, (input, target) in enumerate(dataloader):\r\n if i == len(dataloader)-1:\r\n break\r\n \r\n device = torch.device(\"cuda:\"+cuda_device_id if torch.cuda.is_available() else \"cpu\")\r\n input_var = [x.to(device) for x in input]\r\n batch_size = input_var[0].size(0)\r\n target = target.to(device)\r\n \r\n model.txt_model.hidden_state = [x.to(device) for x in model.txt_model.init_hidden()]\r\n \r\n output = model(input_var)\r\n #penalization AAT - I\r\n# attT = att.transpose(1,2)\r\n# identity = torch.eye(att.size(1)).to(device)\r\n# identity = identity.unsqueeze(0).expand(batch_size,att.size(1),att.size(1))\r\n# penal = model.l2_matrix_norm(att@attT - identity)\r\n# loss = criterion(output, target) + (1.0 * penal/batch_size)\r\n \r\n loss = criterion(output, target)\r\n loss.backward()\r\n if grad_cilp:\r\n torch.nn.utils.clip_grad_norm_(model.parameters(),0.5)\r\n optimazer.step()\r\n optimazer.zero_grad()\r\n losses.update(loss.item(),batch_size)\r\n \r\n _, pred = torch.max(output, 1)\r\n accuracy.update(torch.sum(pred == target.data).item()/batch_size, batch_size)\r\n \r\n if (i+1) % i_step == 0:\r\n print('Train --- epoch: {:02d}, step: [{:04d}/{:04d}], loss: {:.4f},\\\r\n acc: {:.2f}'.format(epoch, i, total_step, losses.avg, accuracy.avg))\r\n \r\ndef view_bar(message, num, total):\r\n rate = num / total\r\n rate_num = int(rate * 40)\r\n rate_nums = np.ceil(rate * 100)\r\n r = '\\r%s:[%s%s]%d%%\\t%d/%d' % (message, \">\" * rate_num, \" \" * (40 - rate_num), rate_nums, num, total,)\r\n sys.stdout.write(r)\r\n sys.stdout.flush()\r\n \r\ndef validation(dataloader, model, criterion, epoch):\r\n model.eval() \r\n torch.set_grad_enabled(False)\r\n \r\n loss_sum,acc_sum = 0,0\r\n total_step = len(dataloader)\r\n\r\n for i, (input, target) in enumerate(dataloader):\r\n if i == len(dataloader)-1:\r\n break \r\n \r\n device = torch.device(\"cuda:\"+cuda_device_id if torch.cuda.is_available() else \"cpu\")\r\n input_var = [x.to(device) for x in input]\r\n target = target.to(device)\r\n batch_size = input_var[0].size(0)\r\n \r\n output = model(input_var)\r\n loss = criterion(output, target)\r\n loss_sum += loss.item() * batch_size\r\n \r\n _, pred = torch.max(output, 1)\r\n acc_sum += torch.sum(pred == target.data)\r\n \r\n view_bar('val:',i,total_step)\r\n \r\n loss_avg = loss_sum / (total_step * batch_size)\r\n acc_avg = acc_sum.double() / (total_step * batch_size)\r\n print('\\n')\r\n print('Val --- epoch: loss: {:.4f}, acc: {:.2f}'.format(loss_avg, acc_avg)) \r\n return acc_avg\r\n\r\n\r\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\r\n filename = 'snapshot/__' + str(state['epoch']) + '__' + filename\r\n torch.save(state, filename)\r\n if is_best:\r\n best_name = 'snapshot/__' + 'model_best.pth.tar'\r\n shutil.copyfile(filename, best_name)\r\n \r\n \r\n \r\ndef main(run_epoch, train_list_file, val_list_file, val_frequency, resume, batch_size,\\\r\n input_mean=[0.468, 0.537, 0.621], input_std=[0.173, 0.157, 0.139], mode='train'):\r\n\r\n model = ImgTxtModel()\r\n device = torch.device(\"cuda:\"+cuda_device_id if torch.cuda.is_available() else \"cpu\")\r\n model = model.to(device)\r\n image_size = model.input_image_size\r\n # setup dataloads\r\n \r\n train_loader = DataLoader(UrbanDataset(train_list_file,\r\n image_transfrom = transforms.Compose([\r\n transforms.RandomResizedCrop(image_size),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(input_mean,input_std)])),\r\n batch_size=batch_size,shuffle=True, num_workers=0)\r\n \r\n val_loader = DataLoader(UrbanDataset(val_list_file,\r\n image_transfrom = transforms.Compose([\r\n transforms.CenterCrop(image_size),\r\n transforms.ToTensor(),\r\n transforms.Normalize(input_mean,input_std)])),\r\n batch_size=batch_size,shuffle=True, num_workers=0) \r\n \r\n if resume:\r\n check_point = torch.load('snapshot/__model_best.pth.tar')\r\n best_acc = check_point['best_acc']\r\n start_epoch = check_point['epoch']\r\n model.load_state_dict(check_point['state_dict'])\r\n print('-'*20)\r\n print('load check_point __model_best.pth.tar successful !')\r\n print('-'*20)\r\n else:\r\n best_acc = 0\r\n start_epoch = 0\r\n \r\n # judge validation or train\r\n if mode == 'train':\r\n # build loss function and optimazer\r\n# criterion = torch.nn.CrossEntropyLoss().to(device)\r\n criterion = FocalLoss().to(device)\r\n optimazer = torch.optim.SGD(model.parameters(), lr=0.008, momentum=0.9)\r\n exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(optimazer, step_size=3, gamma=0.9)\r\n print('----- start training -----')\r\n for epoch in range(start_epoch, start_epoch+run_epoch):\r\n train(train_loader, model, criterion, optimazer, epoch)\r\n exp_lr_scheduler.step()\r\n# print('current lr is : {:4f}'.format(optimazer.param_groups[0]['lr']))\r\n\r\n if (epoch+1) % val_frequency == 0:\r\n model.eval()\r\n acc = validation(val_loader, model, criterion, epoch)\r\n \r\n is_best = acc > best_acc\r\n # save model\r\n save_checkpoint({\r\n 'epoch': epoch + 1,\r\n 'state_dict': model.state_dict(),\r\n 'best_acc': best_acc,\r\n 'lr': optimazer.param_groups[-1]['lr'],\r\n }, is_best) \r\n \r\n else:\r\n model.eval()\r\n validation(val_loader, model, criterion, 0)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n train_root_path = 'data_list/train_list.txt'\r\n val_root_path = 'data_list/val_list.txt'\r\n start_epoch = 0\r\n run_epoch = 30\r\n val_frequency = 1\r\n batch_size = 16\r\n main(run_epoch, train_root_path, val_root_path, val_frequency, resume=False, batch_size=batch_size)\r\n ","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"640073561","text":"from django import forms\nfrom django.core.validators import RegexValidator\nfrom .models import User, Student, Teacher, Schedule, sTime, tTime\n\nclass DateInput(forms.DateInput):\n input_type = 'date'\n\n\n\nclass LoginForm(forms.ModelForm):\n\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = ['username', 'password']\n\t\twidgets = {\n\t\t\t'password': forms.PasswordInput(attrs={'placeholder': 'Password'}),\n\t\t\t'username':forms.TextInput(attrs={'placeholder': 'Username'}),\n\n\t\t}\n\nclass StudentForm(forms.ModelForm):\n\n\taddress = forms.CharField(widget=forms.Textarea)\n\n\tclass Meta:\n\t\tmodel = Student\n\t\tfields = ['lastname', 'firstname', 'middlename', 'nickname','birthdate','gender','picture',\n\t\t\t\t\t\t 'phone_num','tel_num','email',\n 'address','student_num','school',\n 'school_level','date_enrolled', 'status', 'kumon_levelM','kumon_levelR',\n 'guardian_name','relation','guardian_num','guardian_email'\n ]\n\t\twidgets = {\n 'birthdate': DateInput(),\n\t\t\t'date_enrolled': DateInput(),\n }\n\nclass EditStudentForm(forms.ModelForm):\n\n\taddress = forms.CharField(widget=forms.Textarea)\n\n\tclass Meta:\n\t\tmodel = Student\n\t\tfields = ['nickname','picture',\n\t\t\t\t\t\t 'phone_num','tel_num','email',\n 'address','student_num','school',\n 'school_level','date_enrolled', 'status', 'kumon_levelM','kumon_levelR',\n 'guardian_num','guardian_email']\n\n\nclass TeacherForm(forms.ModelForm):\n\n\tclass Meta:\n\t\tmodel = Teacher\n\t\tfields = ['lastname', 'firstname', 'middlename','phone_num','tel_num','email','birthdate','age','gender','address','subject','t_picture']\n\t\t\n\t\twidgets = {\n\t\t\t'birthdate':DateInput(),\n\t\t}\n\t\t\nclass EditTeacherForm(forms.ModelForm):\n\n\tclass Meta:\n\t\tmodel = Teacher\n\t\tfields = ['phone_num','tel_num','email','address','subject','t_picture']\n\n\nclass ScheduleForm(forms.ModelForm):\n\t\n\tclass Meta:\n\t\tmodel = Schedule\n\t\tfields = ['teacher','day','timeslots']\n\n\nclass sTimeForm(forms.ModelForm):\n\n\tclass Meta:\n\t\tmodel = sTime\n\t\tfields = ['student']\n\n\t\twidgets = {\n\t\t\t\n\t\t}\n","sub_path":"INTROSE/KumonSite/kumon/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"206816762","text":"'''\n\ntracing_digits Flask server\n\n'''\n\nimport requests\nfrom flask import Flask, render_template, request\n\nfrom file_ops import recordSession\ntry:\n import scroll_printer\nexcept Exception as e:\n e_message = '** Device not active! **'\n\n print ('\\n', e_message)\n print (e, '\\n')\n\n extra = {}\n extra['value1'] = e_message\n extra['value2'] = e\n # requests.post('https://maker.ifttt.com/trigger/digital_traces/with/key/dbGvvRtbul5OU_NDdAHz26', data=extra)\n\n\nAPP = Flask(__name__)\n\n\n@APP.route('/', methods=['GET', 'POST'])\ndef serve_trace():\n\n if request.method == 'GET':\n return render_template('index.html')\n\n elif request.method == 'POST':\n\n data = request.get_json()\n recordSession(data)\n\n try:\n result = scroll_printer.printResult(data)\n except Exception as e:\n error_message = '** Could not print! **'\n\n print ('\\n', error_message)\n print (e, '\\n')\n\n extra = {}\n extra['value1'] = error_message\n extra['value2'] = e\n # requests.post('https://maker.ifttt.com/trigger/digital_traces/with/key/dbGvvRtbul5OU_NDdAHz26', data=extra)\n\n return 'Could not print! :('\n\n return result\n\n\nif __name__ == '__main__':\n APP.run(debug=True, host='0.0.0.0', port='5000')\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"326711683","text":"import os\nimport math\nimport numpy as np\nfrom time import sleep\nfrom PyQt4 import QtCore\nfrom PyQt4.QtGui import *\nfrom urllib.error import HTTPError, URLError\n\nfrom Orange.widgets.settings import Setting\nfrom Orange.widgets.widget import OWWidget\nfrom Orange.widgets import gui\nfrom orangecontrib.text.corpus import Corpus\nfrom orangecontrib.text.nyt import NYT, parse_record_json\nfrom Orange.data import Domain, DiscreteVariable\n\n\ndef _i(name, icon_path=\"icons\"):\n widget_path = os.path.dirname(os.path.abspath(__file__))\n return os.path.join(widget_path, icon_path, name)\n\nclass Output:\n CORPUS = \"Corpus\"\n\nclass OWNYT(OWWidget):\n name = \"NY Times\"\n description = \"Load data from the New York Times article search API.\"\n icon = \"icons/NYTimes.svg\"\n priority = 20\n\n outputs = [(Output.CORPUS, Corpus)]\n want_main_area = False\n\n output_corpus = None\n # Response info.\n all_hits = 0\n num_retrieved = 0\n # NYT info:\n nyt_api = None\n api_key = \"\"\n api_key_is_valid = False\n query_running = False\n\n # Settings.\n recent_queries = Setting([])\n recent_api_keys = Setting([])\n year_from = Setting(\"\")\n year_to = Setting(\"\")\n # Text includes checkboxes.\n includes_headline = Setting(True)\n includes_lead_paragraph = Setting(True)\n includes_snippet = Setting(False)\n includes_abstract = Setting(False)\n includes_keywords = Setting(False)\n\n def __init__(self):\n super().__init__()\n\n # Refresh recent queries.\n self.recent_queries = [query for query in self.recent_queries]\n\n # To hold all the controls. Makes access easier.\n self.nyt_controls = []\n\n # Root box.\n parameter_box = gui.widgetBox(self.controlArea, addSpace=True)\n\n # API key box.\n api_key_box = gui.widgetBox(parameter_box, orientation=0)\n # Valid API key feedback.\n self.api_key_valid_label = gui.label(api_key_box, self, \"\")\n if self.api_key_is_valid:\n self.api_key_valid_label.setPixmap(QPixmap(_i(\"valid.svg\"))\n .scaled(15, 15, QtCore.Qt.KeepAspectRatio))\n else:\n self.api_key_valid_label.setPixmap(QPixmap(_i(\"invalid.svg\"))\n .scaled(15, 15, QtCore.Qt.KeepAspectRatio))\n self.api_key_valid_label.setMaximumSize(self.api_key_valid_label.sizeHint())\n # Set API key button.\n self.open_set_api_key_dialog_button = gui.button(api_key_box, self, 'Article API key',\n callback=self.open_set_api_key_dialog,\n tooltip=\"Set the API key for this widget.\")\n\n # Query box.\n query_box = gui.widgetBox(parameter_box, orientation=0)\n q_label = gui.label(query_box, self, \"Query:\")\n q_label.setMaximumSize(q_label.sizeHint())\n # Drop-down for recent queries.\n self.query_combo = QComboBox(query_box)\n self.query_combo.setMinimumWidth(150)\n self.query_combo.setEditable(True)\n query_box.layout().addWidget(self.query_combo)\n self.query_combo.activated[int].connect(self.select_query)\n self.nyt_controls.append(self.query_combo)\n\n # Year box.\n # TODO Add calendar widget and month+day support.\n year_box = gui.widgetBox(parameter_box, orientation=0)\n # Inputs for years.\n gui.label(year_box, self, \"From year\")\n self.year_from_input = gui.lineEdit(year_box, self, \"year_from\")\n gui.label(year_box, self, \"to year\")\n self.year_to_input = gui.lineEdit(year_box, self, \"year_to\")\n self.nyt_controls.append(self.year_from_input)\n self.nyt_controls.append(self.year_to_input)\n\n # Text includes box.\n self.text_includes_box = gui.widgetBox(self.controlArea,\n \"Text includes\", addSpace=True)\n self.headline_chbox = gui.checkBox(self.text_includes_box, self,\n \"includes_headline\", \"Headline\")\n self.lead_paragraph_chbox = gui.checkBox(self.text_includes_box, self,\n \"includes_lead_paragraph\", \"Lead Paragraph\")\n self.snippet_chbox = gui.checkBox(self.text_includes_box, self,\n \"includes_snippet\", \"Snippet\")\n self.abstract_chbox = gui.checkBox(self.text_includes_box, self,\n \"includes_abstract\", \"Abstract\")\n self.keywords_chbox = gui.checkBox(self.text_includes_box, self,\n \"includes_keywords\", \"Keywords\")\n self.nyt_controls.append(self.headline_chbox)\n self.nyt_controls.append(self.lead_paragraph_chbox)\n self.nyt_controls.append(self.snippet_chbox)\n self.nyt_controls.append(self.abstract_chbox)\n self.nyt_controls.append(self.keywords_chbox)\n\n # Run query button.\n self.run_query_button = gui.button(self.controlArea, self, 'Run query',\n callback=self.run_initial_query,\n tooltip=\"Run the chosen NYT article query.\")\n self.nyt_controls.append(self.run_query_button)\n h_line = QFrame()\n h_line.setFrameShape(QFrame.HLine)\n h_line.setFrameShadow(QFrame.Sunken)\n self.controlArea.layout().addWidget(h_line)\n\n # Query info.\n query_info_box = gui.widgetBox(self.controlArea, addSpace=True)\n self.query_info_label = gui.label(query_info_box, self,\n \"Records: {}\\nRetrieved: {}\".format(\"/\", \"/\"))\n\n # Retrieve other records.\n self.retrieve_other_button = gui.button(self.controlArea, self, 'Retrieve remaining records',\n callback=self.retrieve_remaining_records,\n tooltip=\"Retrieve the remaining records obtained in the query.\")\n self.retrieve_other_button.setEnabled(False)\n\n # Load the most recent queries.\n self.set_query_list()\n\n # Check the API key and enable controls accordingly.\n if self.recent_api_keys:\n self.api_key = self.recent_api_keys[0]\n self.check_api_key(self.api_key)\n\n self.enable_controls()\n\n def set_query_list(self):\n self.query_combo.clear()\n if not self.recent_queries:\n # Sample queries.\n self.recent_queries.append(\"slovenia\")\n self.recent_queries.append(\"text mining\")\n self.recent_queries.append(\"orange data mining\")\n self.recent_queries.append(\"bioinformatics\")\n for query in self.recent_queries:\n self.query_combo.addItem(query)\n\n def select_query(self, n):\n if n < len(self.recent_queries):\n name = self.recent_queries[n]\n del self.recent_queries[n]\n self.recent_queries.insert(0, name)\n\n if len(self.recent_queries) > 0:\n self.set_query_list()\n\n def run_initial_query(self):\n self.warning(1)\n self.error(1)\n # Only execute if the NYT object is present(safety lock).\n # Otherwise this method cannot be called anyway.\n if self.nyt_api:\n # Query keywords.\n qkw = self.query_combo.currentText()\n\n # Text fields.\n text_includes_params = [self.includes_headline, self.includes_lead_paragraph, self.includes_snippet,\n self.includes_abstract, self.includes_keywords]\n\n if True not in text_includes_params:\n self.warning(1, \"You must select at least one text field.\")\n return\n\n # Set the query url.\n self.nyt_api.set_query_url(qkw, self.year_from, self.year_to, text_includes_params)\n\n # Execute the query.\n res, cached, error = self.nyt_api.execute_query(0)\n\n if res:\n # Construct a corpus for the output.\n documents, metas, meta_vars, class_values = parse_record_json(res, text_includes_params)\n class_vars = [DiscreteVariable(\"section_name\", values=list(set(class_values)))]\n Y = np.array([class_vars[0].to_val(cv) for cv in class_values])[:, None]\n Y[np.isnan(Y)] = 0\n domain = Domain([], class_vars=class_vars, metas=meta_vars)\n\n self.output_corpus = Corpus(documents, None, Y, metas, domain)\n self.send(Output.CORPUS, self.output_corpus)\n\n # Update the response info.\n self.all_hits = res[\"response\"][\"meta\"][\"hits\"]\n self.num_retrieved = len(res[\"response\"][\"docs\"])\n info_label = \"Records: {}\\nRetrieved: {}\".format(self.all_hits, self.num_retrieved)\n if self.all_hits > 1000:\n info_label += \" (max 1000)\"\n self.query_info_label.setText(info_label)\n\n # Enable 'retrieve remaining' button.\n if self.num_retrieved < min(self.all_hits, 1000):\n self.retrieve_other_button.setText('Retrieve remaining records ({})'\n .format(min(self.all_hits, 1000)-self.num_retrieved))\n self.retrieve_other_button.setEnabled(True)\n self.retrieve_other_button.setFocus()\n else:\n self.retrieve_other_button.setText('All records retrieved')\n self.retrieve_other_button.setEnabled(False)\n\n # Add the query to history.\n if qkw not in self.recent_queries:\n self.recent_queries.insert(0, qkw)\n else:\n if error:\n if isinstance(error, HTTPError):\n self.error(1, \"An error occurred(HTTP {})\".format(error.code))\n elif isinstance(error, URLError):\n self.error(1, \"An error occurred(URL {})\".format(error.reason))\n\n def retrieve_remaining_records(self):\n self.error(1)\n # If a query is running, stop it.\n if self.query_running:\n self.query_running = False\n return\n\n if self.nyt_api:\n num_steps = min(math.ceil(self.all_hits/10), 100)\n\n # Update buttons.\n self.retrieve_other_button.setText('Stop retrieving')\n self.open_set_api_key_dialog_button.setEnabled(False)\n self.run_query_button.setEnabled(False)\n\n # Accumulate remaining results in these lists.\n remaining_docs = []\n num_metas = len(self.output_corpus.domain.metas)\n remaining_metas = np.empty((0, num_metas), dtype=object)\n remaining_classes = []\n\n self.query_running = True\n self.progressBarInit()\n for i in range(int(self.num_retrieved/10), num_steps):\n # Stop querying if the flag is not set.\n if not self.query_running:\n break\n\n # Update the progress bar.\n self.progressBarSet(100.0 * (i/num_steps))\n\n res, cached, error = self.nyt_api.execute_query(i)\n\n if res:\n docs, metas, meta_vars, class_values = parse_record_json(res, self.nyt_api.includes_fields)\n remaining_docs += docs\n remaining_metas = np.vstack((remaining_metas, np.array(metas)))\n remaining_classes += class_values\n\n # Update the info label.\n self.num_retrieved += len(res[\"response\"][\"docs\"])\n info_label = \"Records: {}\\nRetrieved: {}\".format(self.all_hits, self.num_retrieved)\n if self.all_hits > 1000:\n info_label += \" (max 1000)\"\n self.query_info_label.setText(info_label)\n\n if not cached: # Only wait if an actual request was made.\n sleep(1)\n else:\n if error:\n if isinstance(error, HTTPError):\n self.error(1, \"An error occurred(HTTP {})\".format(error.code))\n elif isinstance(error, URLError):\n self.error(1, \"An error occurred(URL {})\".format(error.reason))\n break\n self.progressBarFinished()\n self.query_running = False\n\n # Update the corpus.\n self.output_corpus.extend_corpus(remaining_docs, remaining_metas, remaining_classes, meta_vars)\n self.send(Output.CORPUS, self.output_corpus)\n\n if self.num_retrieved == min(self.all_hits, 1000):\n self.retrieve_other_button.setText('All available records retrieved')\n self.retrieve_other_button.setEnabled(False)\n else:\n self.retrieve_other_button.setText('Retrieve remaining records ({})'\n .format(min(self.all_hits, 1000)-self.num_retrieved))\n self.retrieve_other_button.setFocus()\n\n self.open_set_api_key_dialog_button.setEnabled(True)\n self.run_query_button.setEnabled(True)\n\n def check_api_key(self, api_key):\n nyt_api = NYT(api_key)\n self.api_key = api_key\n self.api_key_updated(nyt_api.check_api_key())\n\n def api_key_updated(self, is_valid):\n self.api_key_is_valid = is_valid\n self.enable_controls()\n\n if is_valid:\n self.api_key_valid_label.setPixmap(QPixmap(_i(\"valid.svg\"))\n .scaled(15, 15, QtCore.Qt.KeepAspectRatio))\n self.nyt_api = NYT(self.api_key) # Set the NYT API object, if key is valid.\n else:\n self.api_key_valid_label.setPixmap(QPixmap(_i(\"invalid.svg\"))\n .scaled(15, 15, QtCore.Qt.KeepAspectRatio))\n\n def enable_controls(self):\n for control in self.nyt_controls:\n control.setEnabled(self.api_key_is_valid)\n if not self.api_key_is_valid:\n self.open_set_api_key_dialog_button.setFocus()\n elif self.api_key_is_valid:\n self.run_query_button.setFocus()\n\n def open_set_api_key_dialog(self):\n api_dlg = APIKeyDialog(self, \"New York Times API key\")\n api_dlg.exec_()\n\n\nclass APIKeyDialog(QDialog):\n def __init__(self, parent, windowTitle=\"New York Times API key\"):\n super().__init__(parent, windowTitle=windowTitle)\n\n self.parent = parent\n\n self.setLayout(QVBoxLayout())\n self.layout().setMargin(10)\n self.mainArea = gui.widgetBox(self)\n self.layout().addWidget(self.mainArea)\n\n # Combo box.\n self.api_key_combo = QComboBox(self.mainArea)\n self.api_key_combo.setEditable(True)\n self.api_key_combo.activated[int].connect(self.select_api_key)\n self.mainArea.layout().addWidget(self.api_key_combo)\n\n # Buttons\n self.button_box = gui.widgetBox(self.mainArea, orientation=\"horizontal\")\n gui.button(self.button_box, self, \"Check\", self.check_api_key)\n gui.button(self.button_box, self, \"OK\", self.accept_changes)\n gui.button(self.button_box, self, \"Cancel\", self.reject_changes)\n\n # Label\n self.label_box = gui.widgetBox(self, orientation=\"horizontal\")\n self.api_key_check_label = gui.label(self.label_box, self, \"\")\n if self.parent.api_key_is_valid:\n self.api_key_check_label.setText(\"API key is valid.\")\n else:\n self.api_key_check_label.setText(\"API key NOT validated!\")\n\n # Load the most recent API keys.\n self.set_key_list()\n\n def set_key_list(self):\n self.api_key_combo.clear()\n for key in self.parent.recent_api_keys:\n self.api_key_combo.addItem(key)\n\n def select_key(self, n):\n if n < len(self.parent.recent_api_keys):\n key = self.parent.recent_api_keys[n]\n del self.parent.recent_api_keys[n]\n self.parent.recent_api_keys.insert(0, key)\n\n if len(self.parent.recent_api_keys) > 0:\n self.set_key_list()\n\n def check_api_key(self):\n self.parent.check_api_key(self.api_key_combo.currentText())\n if self.parent.api_key_is_valid:\n self.api_key_check_label.setText(\"API key is valid.\")\n else:\n self.api_key_check_label.setText(\"API key NOT valid!\")\n\n def accept_changes(self):\n self.parent.check_api_key(self.api_key_combo.currentText()) # On OK check the API key also.\n\n if self.api_key_combo.currentText() not in self.parent.recent_api_keys:\n self.parent.recent_api_keys.append(self.api_key_combo.currentText())\n self.parent.api_key = self.api_key_combo.currentText()\n QDialog.accept(self)\n\n def reject_changes(self):\n QDialog.reject(self)\n\n def select_api_key(self, n):\n if n < len(self.parent.recent_api_keys):\n key = self.parent.recent_api_keys[n]\n del self.parent.recent_api_keys[n]\n self.parent.recent_api_keys.insert(0, key)\n\n if len(self.parent.recent_api_keys) > 0:\n self.set_key_list()\n","sub_path":"orangecontrib/text/widgets/ownyt.py","file_name":"ownyt.py","file_ext":"py","file_size_in_byte":17438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"135688159","text":"# 290. Word Pattern\n# Given a pattern and a string str, find if str follows the same pattern.\n\n# Here follow means a full match,\n# such that there is a bijection between a letter in pattern\n# and a non-empty word in str.\n\n# Example 1:\n\n# Input: pattern = \"abba\", str = \"dog cat cat dog\"\n# Output: true\n# Example 2:\n\n# Input:pattern = \"abba\", str = \"dog cat cat fish\"\n# Output: false\n# Example 3:\n\n# Input: pattern = \"aaaa\", str = \"dog cat cat dog\"\n# Output: false\n# Example 4:\n\n# Input: pattern = \"abba\", str = \"dog dog dog dog\"\n# Output: false\n# Notes:\n# You may assume pattern contains only lowercase letters, and str contains lowercase letters that may be separated by a single space.\n\n\nclass Solution:\n def wordPattern(self, pattern: str, str: str) -> bool:\n # check if it is legal\n hash_table = dict()\n strings = str.split()\n if len(pattern) != len(strings):\n return False\n for i, v in enumerate(strings):\n if hash_table.get(pattern[i], None):\n if hash_table[pattern[i]] != v:\n return False\n else:\n if v in hash_table.values():\n return False\n hash_table[pattern[i]] = v\n return True\n\n\nif __name__ == '__main__':\n from util import Test\n s = Solution()\n t = Test(s.wordPattern)\n pattern = \"aaa\"\n string = \"aa aa aa aa\"\n t.equal(False, pattern, string)\n pattern = \"abba\"\n string = \"dog cat cat dog\"\n t.equal(True, pattern, string)\n string = \"dog cat cat fish\"\n t.equal(False, pattern, string)\n string = \"dog dog dog dog\"\n t.equal(False, pattern, string)","sub_path":"Python/290.py","file_name":"290.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"172332397","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport datetime\nfrom time import sleep\n\nimport telebot\nimport logging\n\nimport config\nimport utils\nfrom SQLighter import SQLighter\n\nlogging.basicConfig(format = u'%(levelname)-8s [%(asctime)s] %(message)s',\n level = logging.DEBUG, filename = config.botlog_name)\nlogging.info(u'PomniBot started')\n\nbot = telebot.TeleBot(config.token)\n\n# Обработка команды старт\n@bot.message_handler(commands=['start'])\ndef start(message):\n try:\n db_worker = SQLighter(config.database_name)\n if db_worker.select_single(message.chat.id):\n markup = utils.generate_markup(['Забудь про меня'])\n bot.send_message(message.chat.id, 'Не волнуйся, я тебя не забыл. Ты у меня уже на заметке, '\n 'напоминаю каждый день про отчет:)', reply_markup=markup)\n else:\n markup = utils.generate_markup(['Да. То, что нужно!'])\n bot.send_message(message.chat.id, 'Привет! Я бот, который поможет тебе не забыть писать ежедневный отчет! '\n 'Напоминать тебе об этом?', reply_markup=markup)\n except Exception as e:\n logging.error(e)\n\n@bot.message_handler(func=lambda message: True, content_types=['text'])\ndef message_handle(message):\n try:\n db_worker = SQLighter(config.database_name)\n\n #убрать после того, как имена всех существующих обновятся\n x = db_worker.select_single(message.chat.id)\n if x:\n username = '\"' + message.chat.username.encode('utf-8') + '\"' if message.chat.username else 'null'\n if username:\n db_worker.upd_col('chat_username', username, message.chat.id)\n first_name = '\"' + message.chat.first_name.encode('utf-8') + '\"' if message.chat.first_name else 'null'\n if first_name:\n db_worker.upd_col('chat_first_name', first_name, message.chat.id)\n #---------------------------------------------------------\n\n if message.text == u'Да. То, что нужно!':\n username = '\"' + message.chat.username.encode('utf-8') + '\"' if message.chat.username else 'null'\n first_name = '\"' + message.chat.first_name.encode('utf-8') + '\"' if message.chat.first_name else 'null'\n db_worker.add_row(message.chat.id, username, first_name)\n bot.send_message(message.chat.id, 'Больше тебе приседать не придется:) Я буду ежедневно напоминать тебе '\n 'об отчете каждый час, начиная с 20:00. '\n 'До связи!', reply_markup=utils.hide_markup())\n elif message.text == u'+':\n db_worker.upd_col('done', 1, message.chat.id)\n bot.send_message(message.chat.id, 'Отлично! встретимся завтра:)')\n elif message.text == u'Спроси в другой раз':\n bot.send_message(message.chat.id, 'Окей, напомню через час.')\n elif message.text == u'Забудь про меня':\n db_worker.delete_row(message.chat.id)\n bot.send_message(message.chat.id, 'Хорошо, больше не буду тебе надоедать. '\n 'Если передумаешь подай команду /start')\n elif message.text == u'Беру на себя всю ответственность':\n bot.send_message(message.chat.id, 'Смотри, так до приседаний недалеко))')\n else:\n bot.send_message(message.chat.id, 'Извини, такой команды я не знаю. '\n 'Я пока примитивный глуповатый бот:( Но я учусь')\n except Exception as e:\n logging.error(e)\n\nif __name__ == '__main__':\n bot.polling(none_stop=True)","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"462190791","text":"\nclass Word:\n def __init__(self, text, part):\n self.text = text\n self.part = part\n\nclass Sentence:\n def __init__(self, data, content):\n self.data = data\n self.content = content\n def show(self):\n output = [self.data[n].text for n in self.content]\n result = \"\"\n for n in output:\n result = f\"{result} {n}\"\n return result\n def show_parts(self):\n output = [self.data[n].part for n in self.content]\n output = set(output)\n result = \"\"\n for n in output:\n result = f\"{result} {n}\"\n return result\n\n\n\n\n\n","sub_path":"2.OOP_in_Python/OOP/someone/Task6 VE/Task5.py","file_name":"Task5.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"288283613","text":"import numpy as np\nimport random\n\n\ndistance_matrix = [ [0, 12, 3, 23, 1, 5, 23, 56, 12, 11],\n\t\t\t\t [12, 0, 9, 18, 3, 41, 45, 5, 41, 27],\n\t\t\t\t [3, 9, 0, 89, 56, 21, 12, 48, 14, 29],\n\t\t\t\t [23, 18, 89, 0, 87, 46, 75, 17, 50, 42],\n\t\t\t\t [1, 3, 56, 87, 0, 55, 22, 86, 14, 33],\n\t\t\t\t [5, 41, 21, 46, 55, 0, 21, 76, 54, 81],\n\t\t\t\t [23, 45, 12, 75, 22, 21, 0, 11, 57, 48],\n\t\t\t\t [56, 5, 48, 17, 86, 76, 11, 0, 63, 24],\n\t\t\t\t [12, 41, 14, 50, 14, 54, 57, 63, 0, 9],\n\t\t\t\t [11, 27, 29, 42, 33, 81, 48, 24, 9, 0] ]\n\n\nfirst_city = 0\np = 0.99 \t#parameter to regulate the reduction of T\nalpha = 1\nbeta = 1\nQ = 1\ninitial_pheromones = 0.1\n\nn_ants = 9\nn_iterations = 50\nn_cities = 10\ne = 5\n\nglobal_index = 0\n\n\npheromone_matrix = np.zeros(( n_cities, n_cities ))\nvisibility_matrix = np.zeros(( n_cities, n_cities ))\n\n\ncities = [ 'A','B','C','D','E','F','G','H','I','J' ]\n\ndef print_matrix( matrix, text ):\n\tprint( text )\n\tfor i in range( n_cities ):\n\t\tif(i==0):\n\t\t\tprint(\"\\tA\\tB\\tC\\tD\\tE\\tF\\tG\\tH\\tI\\tJ\" )\n\t\tfor j in range( n_cities ):\n\t\t\tif(j==0):\n\t\t\t\tprint(cities[i], end='\\t')\n\t\t\tprint( \"{:.3f}\".format(matrix[i][j]), end='\\t')\n\t\tprint()\n\ndef initialize_pheromone_matrix():\n\tfor i in range( n_cities ):\n\t\tfor j in range( n_cities ):\n\t\t\tif(i!=j):\n\t\t\t\tpheromone_matrix[i][j] = initial_pheromones\n\ndef initialize_visibility_matrix():\n\tfor i in range( n_cities ):\n\t\tfor j in range( n_cities ):\n\t\t\tif(i!=j):\n\t\t\t\tvisibility_matrix[i][j] = 1.0 / distance_matrix[i][j]\n\ndef next_city( m_prob, random_number):\n\tprobabilty_sum = 0\n\tfor i in range( len(m_prob) ):\n\t\tif( m_prob[i] != -1 ):\n\t\t\tprobabilty_sum += m_prob[i]\n\t\t\tif( random_number <= probabilty_sum ):\n\t\t\t\treturn i\n\ndef path_cost( path ):\n\tm_sum = 0.\n\tfor i in range( len( path )-1 ):\n\t\tm_sum += distance_matrix[path[i]][path[i+1]]\n\treturn m_sum\n\ndef print_ant_results( path_list ):\n\n\tprint(\"\\nResultado\")\n\tcosts_lists = []\n\t\n\tfor j in range( len( path_list ) ):\n\t\tprint(\"Hormiga \"+str(j)+\": \", end='') \n\t\t\n\t\t#Print the paths\n\t\tfor i in range( n_cities ):\n\t\t\tif( i == n_cities-1 ):\n\t\t\t\tprint( cities[path_list[j][i]], end=' ')\n\t\t\telse:\n\t\t\t\tprint( cities[ path_list[j][i]] + \"-\", end='')\n\t\t#---------------------------\n\t\tcosts_lists.append( path_cost(path_list[j]) ) \n\t\tprint( \"Costo: \", costs_lists[j])\n\tindex_ant = costs_lists.index(min(costs_lists))\n\t\n\t#print best paths\n\tprint(\"------------------------------------------------------\")\n\tprint(\"Mejor Hormiga: \", end='')\n\t\n\tfor i in range( n_cities ):\n\t\tif( i == n_cities-1 ):\n\t\t\tprint( cities[path_list[index_ant][i]], end=' ')\n\t\telse:\n\t\t\tprint( cities[ path_list[index_ant][i]] + \"-\", end='')\n\tprint(\"Costo: \", costs_lists[index_ant])\n\tprint(\"------------------------------------------------------\")\n\n\treturn costs_lists\n\n\ndef send_ants():\n\tglobal first_city\n\tpath_list = []\n\n\tfor j in range( n_ants ):\n\t\tprint(\"Hormiga \", j)\n\t\tprint(\"Ciudad Inicial: \", cities[first_city])\n\t\t\n\t\tcurrent_city = first_city\n\t\tpath = []\n\t\tpath.append( current_city )\n\t\t\n\t\twhile( len(path) < n_cities ):\n\t\t\tm_sum = 0\n\t\t\tsums_list = []\n\t\t\t\n\t\t\tfor k in range( n_cities ):\n\t\t\t\tif( k not in path ):\n\t\t\t\t\tt = (pheromone_matrix[current_city][k]) ** alpha\n\t\t\t\t\tn = (visibility_matrix[current_city][k]) ** beta\n\t\t\t\t\ttn = t*n\n\t\t\t\t\t\n\t\t\t\t\tsums_list.append( tn )\n\t\t\t\t\t\n\t\t\t\t\tm_sum += tn\n\t\t\t\t\t\n\t\t\t\t\tprint( cities[current_city] + \"-\" + cities[k], end=' ' )\n\t\t\t\t\tprint( \"t = \", t, end=' ' )\n\t\t\t\t\tprint( \"n = \", n, end=' ' )\n\t\t\t\t\tprint( \"t*n = \", tn )\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tsums_list.append(-1)\n\t\t\t\t\t\n\t\t\tm_prob = []\n\t\t\t\n\t\t\tfor k in range( n_cities ):\n\t\t\t\tif( k not in path ):\n\t\t\t\t\tm_prob.append( sums_list[k] / m_sum )\n\t\t\t\t\t\n\t\t\t\t\tprint( cities[current_city] + \"-\" + cities[k], end=' ' )\n\t\t\t\t\tprint( \"Prob = \", sums_list[k] / m_sum)\n\t\t\t\telse:\n\t\t\t\t\tm_prob.append(-1)\n\t\t\t\t\t\n\t\t\trandom_number = random.random()\n\t\t\tprint( \"Numero Aleatorio: \", random_number )\n\t\t\tn_index = next_city( m_prob, random_number )\n\t\t\tprint(\"Siguiente Ciudad: \", cities[n_index] )\n\t\t\tcurrent_city = n_index\n\t\t\tpath.append( n_index )\n\t\t\n\t\tprint(\"Hormiga \"+str(j)+\": \", end='')\n\t\t\n\t\tfor i in range( n_cities ):\n\t\t\tif( i == n_cities-1 ):\n\t\t\t\tprint( cities[path[i]])\n\t\t\telse:\t\n\t\t\t\tprint( cities[path[i]] + \"-\", end='')\n\n\t\tpath_list.append( path )\n\treturn path_list\n\ndef get_delta(path_list, costs_lists, i , j):\n\ts = 0\n\tsum_log = []\n\tfor k in range( len( path_list )):\n\t\tfor l in range( n_cities -1 ):\n\t\t\tif( (path_list[k][l] == i and path_list[k][l+1] == j) or \\\n\t\t\t\t(path_list[k][l] == j and path_list[k][l+1] == i) ):\n\t\t\t\tdd = Q / costs_lists[k]\n\t\t\t\tsum_log.append( dd )\n\t\t\t\ts += dd\n\treturn s, sum_log\n\ndef update_pheromone_matrix(path_list, costs_lists):\n\tglobal p\n\tfor r_0 in range( n_cities ):\n\t\tfor r_1 in range( n_cities ):\n\t\t\tif( r_0 != r_1):\n\t\t\t\ttmp, dd = get_delta(path_list, costs_lists, r_0, r_1)\n\t\t\t\tpheromone_matrix[r_0][r_1] = pheromone_matrix[r_0][r_1] * p + tmp\n\n\ndef as_algorithm():\n\tinitialize_pheromone_matrix()\n\tinitialize_visibility_matrix()\n\t\n\n\tfor i in range( n_iterations ):\n\t\tprint(\"Iteration: \", i)\t\t\n\t\tpath_list = send_ants()\n\t\tcost_list = print_ant_results( path_list )\n\t\tupdate_pheromone_matrix( path_list, cost_list )\n\nif __name__ == \"__main__\":\n\tas_algorithm()\n","sub_path":"Laboratorios/LAB6/LL/AntSystem.py","file_name":"AntSystem.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"318764630","text":"\"\"\"\nA function outputs the first number of Fibonacci number\n\"\"\"\ndef fibonacci(index):\n number1 = 0\n number2 = 1\n counter = 0\n \n if index <= 0:\n print(\"Enter a value greater than 0\")\n elif index == 1:\n print(number1)\n else:\n while counter < index:\n print(number1)\n numberNext = number1 + number2\n number1 = number2\n number2 = numberNext\n counter += 1\n \nfibonacci(12)\n\n\"\"\"\nOutput:\n\n0\n1\n1\n2\n3\n5\n8\n13\n21\n34\n55\n89\n\"\"\"","sub_path":"CIS 40/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"189553674","text":"import tensorflow as tf\nfrom tensorflow import keras\n\nfashion_mnist = keras.datasets.fashion_mnist\n# a set to train the algorithm and one to test it\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\n# making a model\n# relu: returning positive values, else is filtered\n# softmax: picks biggest number in a set\nmodel = keras.Sequential([\n # input is the image size\n keras.layers.Flatten(input_shape=(28, 28)),\n # 128 functions\n keras.layers.Dense(128, activation=tf.nn.relu),\n # the output shape is the number of categories\n keras.layers.Dense(10, activation=tf.nn.softmax)\n])\n\n# nn initialized with random numbers\n# optimizer generates new parameters\nmodel.compile(optimizer=tf.compat.v1.train.AdamOptimizer(),\n loss='sparse_categorical_crossentropy',\n metrics='accuracy')\n\n# fitting training data\nmodel.fit(train_images, train_labels, epochs=100)\n\n# evaluating the results\ntest_loss, test_acc = model.evaluate(test_images, test_labels)\n\n# predictions = model.predict(my_images)","sub_path":"Artificial Intelligence/TensorFlow Snippets/basic-image-ml.py","file_name":"basic-image-ml.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"505460586","text":"__G__ = \"(G)bd249ce4\"\n\nfrom ..logger.logger import verbose, verbose_flag, verbose_timeout\nfrom r2pipe import open as r2open\nfrom re import search\nfrom ast import literal_eval\n\n#this module needs some optimization\n#only apis for now, will change this in future\n#added a little hack for handling errors \n#(check returns 0 because of error and flags=['-2'])\n#Similar to objdump, still needs to optimize\n \nclass QBD3generator:\n @verbose(True,verbose_flag,verbose_timeout,\"Starting QBD3generator\")\n def __init__(self):\n pass\n\n @verbose(True,verbose_flag,verbose_timeout,None)\n def check_func(self,func,str) -> bool:\n '''\n check if functions are not sub or sym \n '''\n if func.startswith(\"sub.\"):\n if func[4:].split(\"_\")[1].lower() in str.lower():\n return False\n elif func.startswith(\"sym.\"):\n if func[4:].lower() in str.lower():\n return False\n if search(r'\\b{}\\b'.format(func), str) is not None:\n return False\n return True\n\n\n @verbose(True,verbose_flag,verbose_timeout,\"Making symbol xrefs\")\n def create_d3_ref(self,data):\n '''\n get cross references from file using radare2 \n '''\n data[\"XREFS\"] = { \"GRAPH\":{\"nodes\":[],\"links\":[]},\n \"TEXT\":[],\n \"_TEXT\":[\"From\",\"To\"]}\n r2p = r2open(data[\"Location\"][\"File\"],flags=['-2'])\n r2p.cmd(\"e anal.timeout = 10\")\n r2p.cmd(\"aaaa\")\n x = r2p.cmd(\"axtj@@ sym.*\")\n x = \"[\"+(x.replace('\\n','').replace(\"][\",\"],[\"))+\"]\"\n sym = ' '.join(r2p.cmd(\"is~[6]\").split())\n x = literal_eval(x)\n _node = []\n _links = []\n _list = []\n _temp = []\n for funcs in x:\n for func in funcs:\n if \"opcode\" in func and \"fcn_name\" in func:\n match = search(r'\\[(.*?)\\]', func[\"opcode\"])\n if match is not None:\n if len(r2p.cmd(\"pd 1 @ \"+match.group(1))) > 0:\n _list.append({\"From\":func[\"fcn_name\"],\"To\":match.group(1)})\n else:\n funcfromopcode = ''.join(func[\"opcode\"].split(' ')[-1:])\n _list.append({\"From\":func[\"fcn_name\"],\"To\":funcfromopcode})\n\n for xfunc in _list:\n if self.check_func(xfunc[\"From\"],sym):\n if xfunc[\"From\"] not in _temp:\n _temp.append(xfunc[\"From\"])\n _node.append({\"func\":xfunc[\"From\"]})\n if xfunc[\"To\"] not in _temp:\n _temp.append(xfunc[\"To\"])\n _node.append({\"func\":xfunc[\"To\"]})\n\n for xfunc in _list:\n try:\n S = _temp.index(xfunc[\"From\"])\n T = _temp.index(xfunc[\"To\"])\n if next((item for item in _links if item[\"source\"] == S and item[\"target\"] == T), False) == False:\n _links.append({\"source\":S,\"target\":T})\n except:\n pass\n\n if len(_node) > 0 and len(_links) > 0:\n data[\"XREFS\"][\"GRAPH\"][\"nodes\"] = _node\n data[\"XREFS\"][\"GRAPH\"][\"links\"] = _links\n data[\"XREFS\"][\"TEXT\"] = _list\n\n @verbose(True,verbose_flag,verbose_timeout,\"Making artifacts xrefs\")\n def create_d3_artifacts(self,data) -> bool:\n '''\n get artifacts from data and generate d3\n '''\n\n data[\"REFS\"] = { \"GRAPH\":{\"nodes\":[],\"links\":[]},\n \"TEXT\":[],\n \"_TEXT\":[\"From\",\"To\"]}\n\n _node = []\n _links = []\n _list = []\n _temp = []\n\n try: \n for item in data[\"Strings\"][\"IPS\"]:\n _list.append({\"From\":\"File\",\"To\":item[\"IP\"]})\n except:\n pass\n\n try: \n for item in data[\"Strings\"][\"EMAILs\"]:\n _list.append({\"From\":\"File\",\"To\":item[\"EMAIL\"]})\n except:\n pass\n\n for item in _list:\n if item[\"From\"] not in _temp:\n _temp.append(item[\"From\"])\n _node.append({\"func\":item[\"From\"]})\n if item[\"To\"] not in _temp:\n _temp.append(item[\"To\"])\n _node.append({\"func\":item[\"To\"]})\n\n for item in _list:\n try:\n S = _temp.index(item[\"From\"])\n T = _temp.index(item[\"To\"])\n if next((item for item in _links if item[\"source\"] == S and item[\"target\"] == T), False) == False:\n _links.append({\"source\":S,\"target\":T})\n except:\n pass\n\n if len(_node) > 0 and len(_links) > 0:\n data[\"REFS\"][\"GRAPH\"][\"nodes\"] = _node\n data[\"REFS\"][\"GRAPH\"][\"links\"] = _links\n data[\"REFS\"][\"TEXT\"] = _list","sub_path":"intell/qbd3generator.py","file_name":"qbd3generator.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"615969022","text":"# Copyright (c) 2008-2018 LG Electronics, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport re\nimport json\nimport string\nfrom SequenceCases import *\n\nclass SequenceFactory:\n @staticmethod\n def try_create(msg):\n try:\n ls_msg = LSMessage.parse_message(msg['msg']['message'])\n if ls_msg:\n msg['msg']['message'] = ls_msg\n if ls_msg.direction == LSMessage.Direction.OUTGOING:\n def check_create_load():\n if 'uri' in ls_msg.payload:\n return LoadSequence(msg)\n return None\n def check_create_play():\n if ls_msg.payload.keys() == ['mediaId']:\n return PlaySequence(msg)\n return None\n def check_create_pause():\n if ls_msg.payload.keys() == ['mediaId']:\n return PauseSequence(msg)\n return None\n def check_create_unload():\n if ls_msg.payload.keys() == ['mediaId']:\n return UnloadSequence(msg)\n return None\n def check_create_acquire():\n if 'resources' in ls_msg.payload:\n return AcquireSequence(msg)\n return { LoadSequence.METHOD : check_create_load(),\n PlaySequence.METHOD : check_create_play(),\n PauseSequence.METHOD : check_create_pause(),\n UnloadSequence.METHOD : check_create_unload(),\n AcquireSequence.METHOD: check_create_acquire()\n }.get(ls_msg.method)\n return None\n except Exception as e:\n return None\n\nclass LogBuffer:\n def __init__(self):\n self.buffer = []\n def push(self, log):\n idx = len(self.buffer)\n for i in xrange(len(self.buffer) - 1, -1, -1):\n if self.buffer[i]['msg']['info']['TIMESTAMP'] < log['msg']['info']['TIMESTAMP']:\n idx = i + 1\n break\n self.buffer.insert(idx, log)\n def pop(self):\n return self.buffer.pop(0)\n def len(self):\n return len(self.buffer)\n\nclass Detector:\n __buffer_depth = 20\n def __init__(self):\n self.sequence_kinds = dict()\n self.buffer = LogBuffer()\n def __process(self, message):\n # first try to feed message to existing sequences\n for kind, sequences in self.sequence_kinds.iteritems():\n for sequence in sequences:\n if sequence.handle(message):\n return True\n # try to create new sequence\n sequence = SequenceFactory.try_create(message)\n if sequence:\n if sequence.kind not in self.sequence_kinds:\n self.sequence_kinds[sequence.kind] = [sequence]\n return True\n self.sequence_kinds[sequence.kind].insert(0,sequence)\n return True\n # message skipped\n return False\n def process(self, message):\n self.buffer.push(message)\n if (self.buffer.len() < Detector.__buffer_depth):\n return True\n return self.__process(self.buffer.pop())\n def finalize(self):\n # process rest of the buffer\n while (self.buffer.len()):\n self.__process(self.buffer.pop())\n sequence_kinds = dict()\n for kind in self.sequence_kinds:\n sequences = []\n for sequence in self.sequence_kinds[kind]:\n if sequence.complete():\n sequences.append(sequence)\n sequence_kinds[kind] = sequences\n self.sequence_kinds = sequence_kinds\n def summary(self):\n csv = open('./report.csv', 'w')\n summary = ''\n for kind, sequences in self.sequence_kinds.iteritems():\n stages = []\n for sequence in sequences:\n csv.write(sequence.to_csv())\n\n if len(stages):\n for idx in range(len(sequence.stages)):\n stage = sequence.stages[idx]\n stages[idx]['elapsed'] += stage.elapsed\n else:\n for idx in range(len(sequence.stages)):\n stage = sequence.stages[idx]\n procname = stage.process.name\n if 'server' in procname:\n procname = 'server'\n elif not 'luna' == procname:\n procname = 'pipeline'\n stages.append({'process' : procname, 'descr' : stage.name,\n 'elapsed' : stage.elapsed})\n summary += '============ ' + {Sequence.Kind.LOAD : ' LOAD: (',\n Sequence.Kind.PLAY : ' PLAY: (',\n Sequence.Kind.PAUSE : ' PAUSE: (',\n Sequence.Kind.UNLOAD : ' UNLOAD: (',\n Sequence.Kind.ACQUIRE: 'ACQUIRE: ('\n }.get(kind) + str(len(sequences)) + ') ============\\n'\n for stage in stages:\n summary += string.ljust(stage['process'] + ':', 10)\n summary += string.ljust(stage['descr'] + ':', 20)\n summary += '%0.6f\\n' % (stage['elapsed'] / len(sequences))\n\n return summary.rstrip()\n","sub_path":"test/python/framework/src/detector/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":6145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"273372636","text":"import re\nimport sys\nimport click\nfrom murmuration import kms_wrapped\nfrom .param_bunch import ParamBunch\nfrom . import version\n\n\n@click.group(name='waddle')\n@click.version_option(version)\ndef main():\n \"cli for managing waddle config files\"\n\n\ndef is_secret(key):\n matcher = re.compile('.*(key|secret|token|password).*')\n return matcher.match(key)\n\n\n@main.command(name='add-secret')\n@click.argument('key', metavar='db.password')\n@click.option('-f', '--filename', metavar='/path/to/config_file.yml',\n type=click.Path(exists=True), required=True)\ndef add_secret(filename, key):\n \"\"\"\n Adds an encrypted secret to the specified configuration file\n\n Example:\n waddle add-secret -f conf/dev.yml db.password\n \"\"\"\n x = ParamBunch()\n x.from_file(filename=filename, decrypt=False, handle_tags=False)\n kms_key = x.get('meta.kms_key')\n if not kms_key:\n print(f'{filename} does not have a kms key specified.')\n return\n tty = sys.stdin.isatty()\n if tty: # pragma: no-cover\n print(f'Enter value for [{key}]: ', end='', file=sys.stderr)\n sys.stderr.flush()\n # stdin = os.fdopen(sys.stdin.fileno(), 'rb', 0)\n plain_text = sys.stdin.readline().rstrip()\n # plain_text = plain_text.decode('utf-8').rstrip()\n region = x.get('meta.region')\n profile = x.get('meta.profile')\n x[key] = kms_wrapped.encrypt(plain_text, kms_key, region, profile)\n x.save(filename)\n\n\n@main.command(name='encrypt')\n@click.option('-f', '--filename', metavar='/path/to/config_file.yml',\n type=click.Path(exists=True), required=True)\ndef encrypt(filename):\n \"\"\"\n Encrypts values for any key that that has the following keywords in it:\n * key\n * password\n * token\n * secret\n\n Example:\n waddle encrypt -f conf/dev.yml\n \"\"\"\n x = ParamBunch()\n x.load(filename=filename, decrypt=True)\n kms_key = x.get('meta.kms_key')\n if not kms_key:\n print(f'{filename} does not have a kms key specified.')\n return\n region = x.get('meta.region')\n profile = x.get('meta.profile')\n values = []\n for key, value in x.items():\n values.append([key, value])\n for key, value in values:\n if is_secret(key) and not key.startswith('meta.'):\n value = kms_wrapped.encrypt(\n value, kms_key, region=region, profile=profile)\n x[key] = value\n x.save(filename)\n\n\n@main.command(name='deploy')\n@click.option('-f', '--filename', metavar='/path/to/config_file.yml',\n type=click.Path(exists=True), required=True)\n@click.option('-e', '--encrypted', is_flag=True)\ndef deploy(filename, encrypted):\n \"\"\"\n Deploys a locally stored config file to aws:\n\n Example:\n waddle deploy -f conf/dev.yml\n\n Use the --encrypt flag to push all values as kms encrypted to\n parameter store.\n \"\"\"\n x = ParamBunch(filename=filename)\n x.to_aws(force_encryption=encrypted)\n\n\n@main.command(name='undeploy')\n@click.option('-f', '--filename', metavar='/path/to/config_file.yml',\n type=click.Path(exists=True), required=True)\ndef undeploy(filename):\n \"\"\"\n Deploys a locally stored config file to aws:\n\n Example:\n waddle deploy -f conf/dev.yml\n \"\"\"\n x = ParamBunch(filename=filename)\n x.delete_from_aws()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"waddle/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"636953523","text":"\"\"\"themovieroom URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom . import views\nfrom .admin import *\n\nurlpatterns = [\n url(r'^admin_site/', admin_site.urls, name='admin'),\n url(r'^$', views.index, name='index'),\n url(r'^search/$', views.SearchListView, name='SearchListView'),\n url(r'^person/(?P[0-9]+)/$', views.persons, name='persons'),\n url(r'^reviews/', include('films.urls')),\n url(r'^features/', views.features_view, name='features'),\n url(r'^features/(?P[0-9]+)/$', views.feature_view, name='feature'),\n url(r'^tinymce/', include('tinymce.urls')),\n\n]\nhandler404 = 'themovieroom.views.page_not_found'\nhandler500 = 'themovieroom.views.page_not_found'\n\n\nfrom django.conf import settings\n\nif settings.DEBUG:\n from django.conf.urls.static import static\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"themovieroom/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"498860810","text":"from __future__ import print_function\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets\nimport pandas as pd\nimport sys\nimport os\nimport math\n\n# Import helper functions\ndir_path = os.path.dirname(os.path.realpath(__file__))\nsys.path.insert(0, dir_path + \"/../utils\")\nfrom data_operation import mean_squared_error\nfrom data_manipulation import train_test_split, polynomial_features\nfrom loss_functions import SquareLoss\nfrom optimization import GradientDescent\n\n\n\nclass LinearRegression(object):\n \"\"\"Linear model for doing regression.\n Parameters:\n -----------\n n_iterations: float\n The number of training iterations the algorithm will tune the weights for.\n learning_rate: float\n The step length that will be used when updating the weights.\n gradient_descent: boolean\n True or false depending if gradient descent should be used when training. If \n false then we use batch optimization by least squares.\n \"\"\"\n def __init__(self, n_iterations=1500, learning_rate=0.001, gradient_descent=True):\n self.w = None\n self.n_iterations = n_iterations\n self.learning_rate = learning_rate\n self.gradient_descent = gradient_descent # Opt. method. If False => Least squares\n self.square_loss = SquareLoss()\n\n def fit(self, X, y):\n # Insert constant ones as first column (for bias weights)\n X = np.insert(X, 0, 1, axis=1)\n # Get weights by gradient descent opt.\n if self.gradient_descent:\n n_features = np.shape(X)[1]\n # Initial weights randomly [0, 1]\n self.w = np.random.random((n_features, ))\n # Do gradient descent for n_iterations\n for _ in range(self.n_iterations):\n # Gradient of squared loss w.r.t the weights\n grad_w = self.square_loss.gradient(y, X, self.w)\n # Move against the gradient to minimize loss\n self.w -= self.learning_rate * grad_w\n # Get weights by least squares (by pseudoinverse)\n else:\n U, S, V = np.linalg.svd(X.T.dot(X))\n S = np.diag(S)\n X_sq_inv = V.dot(np.linalg.pinv(S)).dot(U.T)\n self.w = X_sq_inv.dot(X.T).dot(y)\n\n def predict(self, X):\n # Insert constant ones for bias weights\n X = np.insert(X, 0, 1, axis=1)\n y_pred = X.dot(self.w)\n return y_pred\n\n\nclass PolynomialRegression(LinearRegression):\n def __init__(self, degree, n_iterations=3000, learning_rate=0.001, gradient_descent=True):\n self.degree = degree\n super(PolynomialRegression, self).__init__(n_iterations, learning_rate, gradient_descent)\n\n def fit(self, X, y):\n X_transformed = polynomial_features(X, degree=self.degree)\n super(PolynomialRegression, self).fit(X_transformed, y)\n\n def predict(self, X):\n X_transformed = polynomial_features(X, degree=self.degree)\n return super(PolynomialRegression, self).predict(X_transformed)\n\n\ndef main():\n\n # Load temperature data\n data = pd.read_csv('data/TempLinkoping2016.txt', sep=\"\\t\")\n\n time = np.atleast_2d(data[\"time\"].as_matrix()).T\n temp = np.atleast_2d(data[\"temp\"].as_matrix()).T\n\n X = time # fraction of the year [0, 1]\n y = temp\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)\n\n\n clf = PolynomialRegression(degree=6, n_iterations=100000)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n\n mse = mean_squared_error(y_test, y_pred)\n\n\n # Generate data for prediction line\n X_pred_ = np.arange(0, 1, 0.001).reshape((1000, 1))\n y_pred_ = clf.predict(X=X_pred_)\n\n # Print the mean squared error\n print (\"Mean Squared Error:\", mse)\n\n # Color map\n cmap = plt.get_cmap('viridis')\n\n # Plot the results\n m1 = plt.scatter(366 * X_train, y_train, color=cmap(0.9), s=10)\n m2 = plt.scatter(366 * X_test, y_test, color=cmap(0.5), s=10)\n p = plt.plot(366 * X_pred_, y_pred_, color=\"black\", linewidth=2, label=\"Prediction\")\n plt.suptitle(\"Polynomial Regression\")\n plt.title(\"MSE: %.2f\" % mse)\n plt.xlabel('Days')\n plt.ylabel('Temperature in Celcius')\n plt.legend(loc='lower right')\n plt.legend((m1, m2), (\"Training data\", \"Test data\"), loc='lower right')\n\n plt.show()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"supervised_learning/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"354828172","text":"from flask.ext.migrate import MigrateCommand\nfrom flask.ext.script import Manager, Server, Shell\n\nfrom jekylledit import app, db, model\n\n\nmanager = Manager(app)\nmanager.add_command('db', MigrateCommand)\nmanager.add_command('runserver', Server(host='0.0.0.0', port=8000))\nmanager.add_command('shell', Shell())\n\n\n@manager.shell\ndef shell_context():\n return {\n 'app': app,\n 'db': db,\n 'model': model,\n }\n\n\ndef main():\n manager.run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"app/jekylledit/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"634097465","text":"#!/usr/bin/env python3.4\nimport os\nimport sys\nimport html\nimport html.parser\nimport urllib\nimport urllib.request\nimport curses\nimport filecmp\nimport argparse\n\n\n##----- Lyrics Retrieval and Saving -----##\nclass NoLyricsFound(Exception):\n pass\n\nclass LyricsNotLicensed(Exception):\n pass\n\nclass LyricsPageParser(html.parser.HTMLParser):\n def __init__(self):\n super().__init__(self, convert_charrefs=True)\n self.found_lyricbox = False\n self.recording = False\n self.lyrics = ''\n\n def handle_starttag(self, tag, attrs):\n if ('class', 'lyricbox') in attrs:\n self.found_lyricbox = True\n elif tag == 'br' and self.recording:\n self.lyrics += '\\n'\n elif tag in {'a'} and self.recording:\n if not attrs[0][0] == 'href':\n self.recording = False\n elif tag not in {'br', 'i', 'b', 'u'} and self.recording:\n self.recording = False\n\n def handle_data(self, data):\n if self.recording:\n self.lyrics += data\n\n def handle_comment(self, data):\n if self.recording:\n self.recording = False\n\n def handle_endtag(self, tag):\n if tag in {'span', 'script'} and self.found_lyricbox:\n self.recording = True\n\n def get_lyrics(self):\n return self.lyrics.strip()\n\n\nclass LyricsFinder:\n def __init__(self, artist, song, lyrics_dir):\n self.artist = artist\n self.song = song.replace('/', '-')\n self.lyrics = ''\n\n self.lyrics_dir = lyrics_dir\n self.artist_dir = os.path.join(self.lyrics_dir, artist)\n self.lyrics_file_path = os.path.join(self.artist_dir, self.song)\n\n\n def get_lyrics(self):\n self.find_lyrics()\n return self.lyrics\n\n\n def find_lyrics(self):\n if os.path.isfile(self.lyrics_file_path):\n self.lyrics = self.retrieve_file_lyrics()\n else:\n self.get_web_lyrics()\n\n\n def get_web_lyrics(self):\n self.lyrics = self.retrieve_web_lyrics()\n if \"we are not licensed\" in self.lyrics:\n raise LyricsNotLicensed\n elif self.lyrics.strip() == '':\n raise NoLyricsFound\n else:\n self.save_lyrics_file()\n\n\n def retrieve_web_lyrics(self):\n # Set up safe address for retrieval first\n address = \"http://lyrics.wikia.com/\" + self.artist + \":\" + self.song\n address = address.replace(' ', '_')\n address = urllib.parse.quote(address, safe=\"/:!()?-'\")\n\n # Retrieve text from web\n headers = {'User-agent':'Mozilla/5.0'}\n request = urllib.request.Request(address, None, headers)\n response = urllib.request.urlopen(request)\n page = response.read().decode('utf-8')\n\n # And finally parse it for the lyrics\n parser = LyricsPageParser()\n parser.feed(page)\n return parser.get_lyrics()\n\n\n def retrieve_file_lyrics(self):\n with open(self.lyrics_file_path, 'r') as lyrics_file:\n return lyrics_file.read()\n\n\n def save_lyrics_file(self):\n if not os.path.isdir(self.artist_dir):\n os.makedirs(self.artist_dir)\n with open(self.lyrics_file_path, 'w') as save_file:\n save_file.write(self.lyrics)\n self.link_redundant_files()\n\n\n def link_redundant_files(self):\n all_lyrics = self.find_all_lyrics_files()\n for song_path in all_lyrics:\n if (filecmp.cmp(self.lyrics_file_path, song_path) and\n (self.lyrics_file_path != song_path)\n ):\n self.link_files(self.lyrics_file_path, song_path)\n\n\n def link_files(self, path_1, path_2):\n symlink_dir, _ = os.path.split(path_1)\n os.remove(path_1)\n artist_dir, song = os.path.split(path_2)\n _, artist = os.path.split(artist_dir)\n symlink = os.path.join('..', artist, song)\n os.chdir(symlink_dir)\n os.symlink(symlink, path_1)\n\n\n def find_all_lyrics_files(self):\n all_song_paths = []\n for artist in os.listdir(self.lyrics_dir):\n artist_dir = os.path.join(self.lyrics_dir, artist)\n for song in os.listdir(artist_dir):\n song_path = os.path.join(artist_dir, song)\n if not os.path.islink(song_path):\n all_song_paths.append(song_path)\n return all_song_paths\n\n\n def force_update(self):\n self.get_web_lyrics()\n\n\n##----- Ncurses Main Window -----##\nclass MainWindow:\n def __init__(self, stdscr, lyrics_dir=None,\n artist=None, song=None, song_first=False):\n self.stdscr = stdscr\n self.lyrics_dir = lyrics_dir\n self.artist = artist\n self.song = song\n if self.lyrics_dir is None:\n self.lyrics_dir = os.path.expanduser('~/Scripts/Data/Lyrics')\n\n if song_first is False:\n if self.artist is None:\n self.get_artist()\n if self.song is None:\n self.get_song()\n else:\n if self.song is None:\n self.get_song()\n if self.artist is None:\n self.get_artist()\n self.stdscr.clear()\n lyrics_finder = LyricsFinder(self.artist, self.song,\n self.lyrics_dir)\n lyrics = lyrics_finder.get_lyrics()\n try:\n DisplayLyrics(self.stdscr, lyrics)\n except ForceUpdate:\n lyrics_finder.force_update()\n lyrics = lyrics_finder.get_lyrics()\n DisplayLyrics(self.stdscr, lyrics)\n\n def get_artist(self):\n if self.song is None:\n artists = self.get_artists()\n else:\n artists = self.get_song_artists()\n self.artist = DisplayItems(self.stdscr, artists,\n 'Available Artists: ').get_item()\n\n\n def get_artists(self):\n artists = [dir for dir in os.listdir(self.lyrics_dir)\\\n if os.path.isdir(os.path.join(self.lyrics_dir,dir))\n ]\n return artists\n\n\n def get_song_artists(self):\n artists = self.get_artists()\n valid_artists = []\n for artist in artists:\n songs = self.get_songs(artist)\n\n for song in songs:\n if song == self.song:\n valid_artists.append(artist)\n return valid_artists\n\n\n def get_song(self):\n if self.artist is None:\n songs = []\n for artist in self.get_artists():\n songs += self.get_songs(artist)\n else:\n try:\n songs = self.get_songs(self.artist)\n except FileNotFoundError:\n songs = []\n songs = list(set(songs))\n self.song = DisplayItems(self.stdscr, songs,\n 'Available Songs: ').get_item()\n\n\n def get_songs(self, artist=None):\n if artist == None:\n artist = self.artist\n artist_dir = os.path.join(self.lyrics_dir, artist)\n songs = [song for song in os.listdir(artist_dir)\\\n if os.path.isfile(os.path.join(artist_dir, song))\n ]\n return songs\n\n\n def display_lyrics(self):\n lyrics_file_path = os.path.join(self.lyrics_dir, self.artist,\n self.song)\n with open(lyrics_file_path, 'r') as lyrics_file:\n lyrics_string = lyrics_file.read()\n DisplayLyrics(lyrics_string)\n\n\nclass EscKey(Exception):\n pass\n\nclass EnterKey(Exception):\n pass\n\n\n##----- Ncurses Display Lists -----##\nclass DisplayItems:\n def __init__(self, stdscr, items, top_display=''):\n self.stdscr = stdscr\n self.items = items\n self.lexographical_sort()\n self.top_display = top_display\n\n self.top = 0 # Top item of items currently displayed\n\n self.left = 0 # Index of leftmost item with prefix self.current_string\n self.right = len(items)\n\n self.highlighted = 0\n self.current_string = ''\n\n curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)\n curses.init_pair(2, curses.COLOR_BLUE, curses.COLOR_BLACK)\n curses.init_pair(3, curses.COLOR_RED, curses.COLOR_BLACK)\n curses.curs_set(0)\n\n self.main_loop()\n\n def get_item(self):\n return self.current_string\n\n def lexographical_sort(self):\n self.items.sort()\n self.items.sort(key=lambda v: (v.upper(), not v[0].islower()))\n\n\n def term_width(self):\n return os.get_terminal_size()[0]-1\n\n def term_height(self):\n return os.get_terminal_size()[1]-1\n\n def display_item(self, line, item):\n term_width = self.term_width()\n display_item = item[:term_width-1]\n if item == self.items[self.highlighted]:\n self.stdscr.addstr(line, 0, display_item,\n curses.A_REVERSE)\n else:\n self.stdscr.addstr(line, 0, display_item)\n\n if item == self.items[self.highlighted]:\n self.stdscr.chgat(line, 0, len(self.current_string),\n curses.A_REVERSE|curses.color_pair(1))\n else:\n self.stdscr.chgat(line, 0, len(self.current_string),\n curses.color_pair(1))\n\n\n def update_display(self):\n self.stdscr.clear()\n term_height = self.term_height()\n start = self.top\n end = min(start+term_height, self.right)\n\n self.stdscr.addstr(0, 0, self.top_display,\n curses.A_BOLD|curses.color_pair(2))\n for line, item in enumerate(self.items[start:end],\n start=1):\n if line + 1 > term_height:\n break\n else:\n self.display_item(line, item)\n if start == end:\n term_width = self.term_width()\n notice_string = 'Press Enter/Return to submit item as is'\n right_chr = min(len(notice_string), term_width)\n display_string = notice_string[:right_chr]\n line_no = term_height//2\n left_chr = max((term_width-len(notice_string))//2, 0)\n self.stdscr.addstr(line_no, left_chr, display_string,\n curses.A_BOLD|curses.color_pair(3))\n\n rightmost_character = max(0,\n len(self.current_string)-self.term_width()\n )\n self.stdscr.addstr(self.term_height(), 0,\n self.current_string[rightmost_character:],\n curses.A_BOLD|curses.color_pair(1)\n )\n\n def main_loop(self):\n while True:\n self.update_display()\n key = self.stdscr.getch()\n try:\n self.key_handler(key)\n except EscKey:\n sys.exit()\n except EnterKey:\n if not self.left == self.right:\n self.current_string = self.items[self.highlighted]\n else:\n self.current_string = self.current_string.strip()\n break\n\n\n def move_lines(self, no_lines):\n self.top = min(max(self.left, self.top+no_lines),\n self.right-1)\n self.highlighted = min(max(self.left,\n self.highlighted+no_lines),\n self.right-1)\n\n\n def get_longest_prefix(self):\n left = self.left\n right = self.right\n longest_prefix = os.path.commonprefix(self.items[left:right])\n if len(self.current_string) < len(longest_prefix):\n return longest_prefix\n else:\n return self.current_string\n\n\n def key_handler(self, key):\n term_height = self.term_height()\n if key in {27, curses.KEY_BREAK, curses.KEY_END}: # 27 -> ESC key\n raise EscKey\n elif key in {curses.KEY_ENTER, ord('\\n')}:\n raise EnterKey\n elif key in {curses.KEY_LEFT, curses.KEY_RIGHT,\n curses.KEY_IC, curses.KEY_DC, curses.KEY_RESIZE\n }:\n pass\n elif key in {curses.KEY_UP}:\n self.move_lines(-1)\n elif key in {curses.KEY_DOWN}:\n self.move_lines(1)\n elif key in {curses.KEY_NPAGE}:\n self.move_lines(term_height-2)\n elif key in {curses.KEY_PPAGE}:\n self.move_lines(-term_height+2)\n elif key in {curses.KEY_BACKSPACE, curses.KEY_DC}:\n self.current_string = self.current_string[:-1]\n self.update_range()\n elif key in {ord('\\t')}:\n self.current_string = self.get_longest_prefix()\n self.update_range()\n else:\n self.current_string += chr(key)\n self.update_range()\n\n\n def update_range(self):\n current_lower = self.current_string.lower()\n for i, item in enumerate(self.items):\n item_lower = item.lower()\n if item_lower.startswith(current_lower):\n self.left = i\n break\n else:\n self.left = 0\n for i, item in reversed(list(enumerate(self.items))):\n item_lower = item.lower()\n if item_lower.startswith(current_lower):\n self.right = i + 1\n break\n else:\n self.right = 0\n self.top = self.left\n self.highlighted = self.left\n\nclass RepeatWindow(Exception):\n pass\n\nclass ForceUpdate(Exception):\n pass\n\nclass DisplayLyrics:\n def __init__(self, stdscr, lyrics):\n self.stdscr = stdscr\n self.lyrics = lyrics\n self.lyrics_lines = ['-'*1000] + lyrics.split('\\n') + ['-'*1000]\n self.no_lines = len(self.lyrics_lines)\n\n self.top = 0\n\n self.main_loop()\n\n\n def main_loop(self):\n while True:\n self.update_display()\n key = self.stdscr.getch()\n try:\n self.key_handler(key)\n except EscKey:\n sys.exit()\n except EnterKey:\n raise RepeatWindow\n\n\n def term_width(self):\n return os.get_terminal_size()[0]\n\n\n def term_height(self):\n return os.get_terminal_size()[1]\n\n\n def display_item(self, line, item):\n term_width = self.term_width()\n display_item = item[:term_width-1]\n self.stdscr.addstr(line, 0, display_item)\n\n\n def update_display(self):\n self.stdscr.clear()\n term_height = self.term_height()\n start = self.top\n end = min(start + term_height, self.no_lines)\n\n for line, item in enumerate(self.lyrics_lines[start:end],\n start=0):\n if line > term_height:\n break\n else:\n self.display_item(line, item)\n\n def fits_in_screen(self):\n return self.term_height() > self.no_lines\n\n\n def move_lines(self, no_lines):\n if not self.fits_in_screen():\n self.top = min(max(0, self.top+no_lines),\n self.no_lines-self.term_height()\n )\n\n\n def key_handler(self, key):\n term_height = self.term_height()\n if key in {27, curses.KEY_BREAK, curses.KEY_END,\n ord('q'), ord('Q')}: # 27 -> ESC key\n raise EscKey\n elif key in {curses.KEY_ENTER, ord('\\n')}:\n raise EnterKey\n elif key in {curses.KEY_LEFT, curses.KEY_RIGHT,\n curses.KEY_IC, curses.KEY_DC\n }:\n pass\n elif key in {ord('r'), ord('u')}:\n raise ForceUpdate\n elif key in {curses.KEY_UP}:\n self.move_lines(-1)\n elif key in {curses.KEY_DOWN}:\n self.move_lines(1)\n elif key in {curses.KEY_NPAGE}:\n self.move_lines(term_height-2)\n elif key in {curses.KEY_PPAGE}:\n self.move_lines(-term_height+2)\n else:\n pass\n\ndef argparser():\n parser = argparse.ArgumentParser()\n parser.add_argument('song')\n parser.add_argument('artist', nargs='?')\n parser.add_argument('-d', '--dump', action='store_true')\n parser.add_argument('-r', '--reversed', action='store_true')\n args = parser.parse_args(sys.argv[1:])\n return args\n\n\ndef main():\n os.environ['ESCDELAY'] = '25'\n try:\n reverse = False\n if len(sys.argv) > 1:\n if sys.argv[1] == '-r':\n reverse = True\n while True:\n try:\n curses.wrapper(MainWindow, song_first=reverse)\n except RepeatWindow:\n pass\n except NoLyricsFound:\n print('No lyrics found')\n except urllib.error.HTTPError:\n print('No lyrics found')\n except LyricsNotLicensed:\n print('Lyrics not licensed')\n\nmain()\n","sub_path":"testing/old_version.py","file_name":"old_version.py","file_ext":"py","file_size_in_byte":16814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"394968989","text":"from flask import Flask, render_template, Markup, abort, jsonify, Markup\nimport MySQLdb\nimport MySQLdb.cursors\nimport markdown\nfrom datetime import datetime\nfrom config import Config\n\n# UTF-8 Fix\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\n\napp = Flask(__name__)\n\ndatabase = MySQLdb.connect(host=Config.database_host, user=Config.database_user, passwd=Config.database_password, db=Config.database_name, cursorclass=MySQLdb.cursors.DictCursor)\ndatabase.autocommit(True)\nc = database.cursor()\n\n\n@app.route(\"/\")\ndef hello():\n articles = []\n\n c.execute(\"SELECT * FROM articles\")\n data = c.fetchall()\n\n for item in data:\n content_p = Markup(markdown.markdown(item[\"content\"]))\n articles.append({\"title\": item[\"title\"], \"content\": content_p, \"link\": item[\"uri\"]})\n\n return render_template(\"index.html\", articles=articles)\n\n@app.route(\"/article/\")\ndef show_article(uri):\n c.execute(\"SELECT * FROM articles WHERE `uri`='%s'\" % uri)\n article = c.fetchone()\n if article:\n content_p = Markup(markdown.markdown(article[\"content\"]))\n return render_template(\"article.html\", article={\"title\": article[\"title\"], \"content\": content_p})\n else:\n abort(404)\n\n@app.errorhandler(404)\ndef error_404(e):\n return render_template(\"404.html\"), 404\n\napp.debug = True\napp.run(host='0.0.0.0')\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"117901386","text":"# coding=utf-8\n# Author: Jianghan LI\n# Question: 133.Clone_Graph\n# Date: 2017-05-10\n\n\n# Definition for a undirected graph node\n# class UndirectedGraphNode:\n# def __init__(self, x):\n# self.label = x\n# self.neighbors = []\n\nclass Solution:\n # @param node, a undirected graph node\n # @return a undirected graph node\n\n def cloneGraph(self, node):\n if not node:\n return None\n find = {}\n seen = set()\n stack = [node]\n while stack:\n cur = stack.pop()\n if cur.label in seen:\n continue\n if cur.label not in find:\n find[cur.label] = UndirectedGraphNode(cur.label)\n for i in cur.neighbors:\n if i.label not in find:\n find[i.label] = UndirectedGraphNode(i.label)\n find[cur.label].neighbors.append(find[i.label])\n stack.append(i)\n seen.add(cur.label)\n return find[node.label]\n\n def cloneGraph(self, node):\n def getNode(label):\n if label not in nodes:\n nodes[label] = UndirectedGraphNode(label)\n return nodes[label]\n if not node:\n return None\n nodes, seen, stack = {}, set(), [node]\n while stack:\n cur = stack.pop()\n if cur.label in seen:\n continue\n for i in cur.neighbors:\n getNode(cur.label).neighbors.append(getNode(i.label))\n stack.append(i)\n seen.add(cur.label)\n return getNode(node.label)\n","sub_path":"problems/133.Clone_Graph/li.py","file_name":"li.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"264663830","text":"\"\"\"\na simple test of given web site URL for online\nIt is important to give web site url as: http://www.google.com\nelse a schema error is dumped.\n\n\"\"\"\nimport sys\nimport requests\nimport time\n\n# defults and globals\nHORIZONTAL_DASH = 60\nALLOW_REDIRECTS = False\nTIME_PLACES = 3\n\n\ndef check_web():\n # input must have at least 2 inputs.\n if len(sys.argv) < 2:\n print(\"Usage: {} site1 site2 site2...\".format(sys.argv[0]))\n print(\"Verify input web sites are online.\")\n sys.exit(0)\n\n print(\"Checking web site status for:\")\n print(\" \".join(sys.argv[1:]))\n\n print(\"-\" * HORIZONTAL_DASH)\n try:\n for site in sys.argv[1:]:\n tc1 = time.clock()\n r = requests.head(site, allow_redirects=ALLOW_REDIRECTS)\n tc2 = time.clock()\n print(\"Site URL:\", site)\n print(\"Returned results:\")\n print(\"return.ok:\", r.ok)\n print(\"return.status_code:\", r.status_code)\n print(\"Web site response time:\", round(tc2 - tc1, TIME_PLACES), \"secs\")\n print(\"-\" * HORIZONTAL_DASH)\n except requests.ConnectionError as ce:\n print(\"Error: ConnectionError: {}\".format(ce))\n sys.exit(1)\n except requests.exceptions.MissingSchema as ms:\n print(\"Error: MissingSchema: {}\".format(ms))\n sys.exit(1)\n except Exception as e:\n print(\"Error: Exception: {}\".format(e))\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n check_web()","sub_path":"web_site_tester.py","file_name":"web_site_tester.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"108250708","text":"import MySQLdb\nimport datetime\nimport requests\n\n\ndef addhisto(cursorDebian, userID, cursorLocal, dbLocal, table):\n cursorDebian.execute(\"select * from \"+table+\" where us_id = \"+userID+\" ;\")\n # Fetch all row.\n data = cursorDebian.fetchall()\n #Add to local bdd\n print(data)\n for row in data:\n cursorLocal.execute(\"insert into \"+table+\" values \"+row+\";\")\n dbLocal.commit()\n\ndef checkIfExists(id, table, name):\n db = MySQLdb.connect(\"localhost\", \"usrRemMeds\", \"azerty\", \"remmeds\")\n cursor = db.cursor()\n\n #print(\"checkIfExists\")\n\n cursor.execute(\"select * from \" + table + \" where \" + name + \" = \" + str(id))\n data = cursor.fetchone()\n\n #print(data)\n #print(\"\\n\\n\")\n\n if(data):\n #print(\"False\")\n return False\n else:\n #print(\"True\")\n return True\n\n\ndef synchroBDD(userID):\n dbLocal = MySQLdb.connect(\"localhost\", \"usrRemMeds\", \"azerty\", \"remmeds\")\n cursorLocal = dbLocal.cursor()\n\n userID = str(userID)\n\n #-------------------------------------------------------------------------------------------------------------\n #For User\n\n # Fetch all row from user.\n r = requests.get(\"http://212.73.217.202:15020/raspberry/get_user/\" + str(userID))\n result = r.json()\n data = result[\"user\"][0]\n print(data)\n print(data[\"user_id\"])\n print(\"\\n\")\n\n #if(checkIfExists(data[\"user_id\"], \"rm_user\", \"us_id\")):\n liste = []\n for i in data:\n #print (i)\n if(i != None):\n liste.append(str(i))\n else:\n liste.append(\"\")\n #print(\"\\n\\n\")\n #print(data[0])\n #print(\"\\n\\n\")\n #print(liste)\n\n data = str(data[\"user_id\"])+\",\"+str(data[\"lastname\"])+\",\"+str(data[\"firstname\"])+\",\"+str(data[\"mail\"])+\",\" \\\n \"\"+str(data[\"pref_breakfast\"])+\",\"+str(data[\"pref_lunch\"])+\",\"+str(data[\"pref_dinner\"])+\",\"+str(data[\"pref_bedtime\"])+\");\"\n print(data)\n\n cursorLocal.execute(\"\"\"insert into rm_user (us_id, us_lastname, us_firstname, us_mail, us_prefbreakfast,\n us_preflunch, us_prefdinner, us_prefbedtime)\n values (%s, %s, %s, %s, %s, %s, %s, %s);\"\"\", data)\n dbLocal.commit()\n\n # -------------------------------------------------------------------------------------------------------------\n\n\n # -------------------------------------------------------------------------------------------------------------\n\n return \"Synchro\";\n\ndef resetBDD():\n dbLocal = MySQLdb.connect(\"localhost\", \"usrRemMeds\", \"azerty\", \"remmedsTest\")\n cursor = dbLocal.cursor()\n cursor.execute(\"DELETE FROM rm_comp_preset;\")\n cursor.execute(\"DELETE FROM rm_compartment;\")\n cursor.execute(\"DELETE FROM rm_connect;\")\n cursor.execute(\"DELETE FROM rm_historic;\")\n cursor.execute(\"DELETE FROM rm_repertory;\")\n cursor.execute(\"DELETE FROM rm_user;\")\n dbLocal.commit()\n dbLocal.close()\n return \"RESET\";\n\ndef checkHour(numComp):\n date = datetime.datetime.now()\n hour = str(date.hour) # + \":\" + str(date.minute)\n dbLocal = MySQLdb.connect(\"localhost\", \"usrRemMeds\", \"azerty\", \"remmeds\")\n cursor = dbLocal.cursor()\n\n numComp = str(numComp)\n cursor.execute(\"select com_id from rm_compartment where com_num = \" + numComp)\n\n idComp = cursor.fetchone()\n\n idComp = str(idComp[0])\n\n cursor.execute(\"select cpe_hour from rm_comp_preset where com_id = \" + idComp)\n\n\n result = cursor.fetchall()\n bool = False\n\n for row in result:\n row = row[0]\n print(row)\n if(row == hour):\n bool = True\n break\n\n if(bool):\n return True\n else:\n return False\n\n\n#print(resetBDD())\n\nprint(synchroBDD(18))\n\n\n","sub_path":"Connection.py","file_name":"Connection.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"151926281","text":"\n\n#calss header\nclass _ALCOVE():\n\tdef __init__(self,): \n\t\tself.name = \"ALCOVE\"\n\t\tself.definitions = [u'a small space in a room, formed by one part of a wall being further back than the parts on each side: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_alcove.py","file_name":"_alcove.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"50169901","text":"import os\nimport time\nimport argparse\n\n\ndef get_gpu_mem():\n _gpu_mem = None\n os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n\n # [100, 200, 3000, 4000]\n _gpu_mem = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]\n\n return _gpu_mem\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Parse script.')\n parser.add_argument('--script', type=str, help='script to run')\n parser.add_argument('--gpu_mem', type=int, default=2000,\n help='desired free gpu mem')\n\n # script = './experiments/ctdet_VID_dla_1x_4gpus.sh'\n args = parser.parse_args()\n script = args.script\n mem = args.gpu_mem\n print(\"script is: {}, desired mem is {}.\".format(script, mem))\n\n run_flag = False\n while not run_flag:\n gpu_mem = get_gpu_mem()\n if gpu_mem[0] > mem and gpu_mem[1] > mem and gpu_mem[2] > mem and gpu_mem[3] > mem:\n run_flag = True\n\n print(\"gpu free mem:\", gpu_mem)\n time.sleep(30)\n\n os.system(script)\n","sub_path":"queue_gpu.py","file_name":"queue_gpu.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"353592153","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n''' \n@version: 1.0\n@author: lihui\n@file: templates.py\n@time: 2016/5/30 16:23\n'''\n\nfrom services import linux\n\nclass BaseTemplate(object):\n def __init__(self):\n self.name = 'YourTemplateName'\n self.group_name = 'YourGroupName'\n self.hosts = []\n self.services = []\n\nclass LinuxTemplate(BaseTemplate):\n def __init__(self):\n super(LinuxTemplate,self).__init__()\n self.name = 'LinuxTemplate'\n self.services = [\n linux.cpu,\n linux.memory,\n ]\n\n\n","sub_path":"test/Monitor/server/conf/templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"477409076","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n This software is governed by the CeCILL-B license under French law and\r\n abiding by the rules of distribution of free software. You can use,\r\n modify and/ or redistribute the software under the terms of the CeCILL-B\r\n license as circulated by CEA, CNRS and INRIA at the following URL\r\n \"http://www.cecill.info\".\r\n As a counterpart to the access to the source code and rights to copy,\r\n modify and redistribute granted by the license, users are provided only\r\n with a limited warranty and the software's author, the holder of the\r\n economic rights, and the successive licensors have only limited\r\n liability.\r\n In this respect, the user's attention is drawn to the risks associated\r\n with loading, using, modifying and/or developing or reproducing the\r\n software by the user in light of its specific status of free software,\r\n that may mean that it is complicated to manipulate, and that also\r\n therefore means that it is reserved for developers and experienced\r\n professionals having in-depth computer knowledge. Users are therefore\r\n encouraged to load and test the software's suitability as regards their\r\n requirements in conditions enabling the security of their systems and/or\r\n data to be ensured and, more generally, to use and operate it in the\r\n same conditions as regards security.\r\n The fact that you are presently reading this means that you have had\r\n knowledge of the CeCILL-B license and that you accept its terms.\r\n\"\"\"\r\n\r\nimport os\r\nimport h5py\r\nimport numpy as np\r\nfrom operator import add\r\n\r\n\r\ndef store2hdf53D(filename, datas, labels, create=True, startloc=None, chunksz=256):\r\n '''\r\n Store patches in format HDF5\r\n Adapted from 2D stor2hdf5 of Matcaffe (Caffe for matlab)\r\n Caffe supports data's form : N*C*H*W*D (numberOfBatches,channels,heigh,width,depth)\r\n\r\n ----------\r\n datas, labels : 5D array,\r\n formed N*C*H*W*D matrix of images should be normalized (e.g. to lie between 0 and 1) beforehand\r\n create : True or False (default: true)\r\n specifies whether to create file newly or to append to previously created file,\r\n useful to store information in batches when a dataset is too big to be held in memory\r\n startloc : (point at which to start writing data).\r\n By default,\r\n if create=1 (create mode), startloc.data=[1, 1, 1, 1, 1], and startloc.lab=[1, 1, 1, 1, 1];\r\n if create=0 (append mode), startloc.data=[K+1, 1, 1, 1, 1], and startloc.lab = [K+1, 1, 1, 1, 1];\r\n where K is the current number of samples stored in the HDF\r\n chunksz : integer (default: 256)\r\n (used only in create mode):\r\n specifies number of samples to be stored per chunk (see HDF5 documentation on chunking)\r\n for creating HDF5 files with unbounded maximum size - TLDR;\r\n higher chunk sizes allow faster read-write operations\r\n\r\n '''\r\n # verify that format is right\r\n dat_dims = datas.shape\r\n lab_dims = labels.shape\r\n num_samples = dat_dims[0]\r\n if (num_samples != lab_dims[0]):\r\n raise AssertionError('Number of samples should be matched between data and labels')\r\n\r\n # Check Create mode and Startloc\r\n if create == True:\r\n if os.path.isfile(filename):\r\n print ('Warning: replacing existing file' + filename + '\\n')\r\n os.remove(filename)\r\n\r\n file = h5py.File(filename, \"w\")\r\n dset = file.create_dataset(\"data\", shape=dat_dims, dtype='float32', maxshape=(None,) + tuple(dat_dims[1:]),\r\n chunks=(np.long(chunksz),) + tuple(dat_dims[1:]))\r\n lset = file.create_dataset(\"label\", shape=lab_dims, dtype='float32', maxshape=(None,) + tuple(lab_dims[1:]),\r\n chunks=(np.long(chunksz),) + tuple(lab_dims[1:]))\r\n dset[...] = datas\r\n lset[...] = labels\r\n if startloc is None:\r\n startloc = {'dat': 0, 'lab': 0}\r\n startloc['dat'] = (0,) + tuple(np.zeros(len(dat_dims) - 1, dtype='int'))\r\n startloc['lab'] = (0,) + tuple(np.zeros(len(lab_dims) - 1, dtype='int'))\r\n file.close()\r\n\r\n else: # append mode\r\n if startloc is None:\r\n file = h5py.File(filename, \"r\")\r\n prev_dat_sz = file[list(file.keys())[0]].shape\r\n prev_lab_sz = file[list(file.keys())[1]].shape\r\n if (prev_dat_sz[1:] != dat_dims[1:]):\r\n raise AssertionError('Data dimensions must match existing dimensions in dataset')\r\n if (prev_lab_sz[1:] != lab_dims[1:]):\r\n raise AssertionError('Label dimensions must match existing dimensions in dataset')\r\n startloc = {'dat': 0, 'lab': 0}\r\n startloc['dat'] = (prev_dat_sz[0],) + tuple(np.zeros(len(dat_dims) - 1, dtype='int'))\r\n startloc['lab'] = (prev_lab_sz[0],) + tuple(np.zeros(len(lab_dims) - 1, dtype='int'))\r\n file.close()\r\n\r\n # Writing data\r\n if (datas.size) or (labels.size):\r\n file = h5py.File(filename, \"r+\")\r\n dset = file['/data']\r\n lset = file['/label']\r\n dset.resize(map(add, dat_dims, startloc['dat']))\r\n lset.resize(map(add, lab_dims, startloc['lab']))\r\n dset[startloc['dat'][0]:startloc['dat'][0] + dat_dims[0], :, :, :, :] = datas\r\n lset[startloc['lab'][0]:startloc['lab'][0] + lab_dims[0], :, :, :, :] = labels\r\n else:\r\n assert 'store2hdf5 need datas'\r\n curr_dat_sz = file[list(file.keys())[0]].shape\r\n curr_lab_sz = file[list(file.keys())[1]].shape\r\n file.close()\r\n return [curr_dat_sz, curr_lab_sz]","sub_path":"MultiscaleSRModel/utils/store2hdf5.py","file_name":"store2hdf5.py","file_ext":"py","file_size_in_byte":5628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"627595512","text":"# Author: Robert Guthrie\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport json\nimport os\nimport numpy as np\n\ntorch.manual_seed(1)\n\nwith open(os.path.join(opt.dataset_path, 'json_dataset', 'objects.json'), 'r') as f:\n\tobjects = json.load(f)\n\nword_to_ix = {}\nfor i, obj in enumerate(objects):\n\tword_to_ix[obj] = i\n\nglove = {}\nvocab = len(objects)\nmatrix_len = vocab\nweights_matrix = np.zeros((matrix_len, 300))\nwith open(opt.glove_path, 'rb') as f:\n\tfor l in f:\n\t\tline = l.decode().split()\n\t\tword = line[0]\n\t\tvect = np.array(line[1:]).astype(np.float)\n\t\tglove[word] = vect\nfor i, obj in enumerate(objects):\n\ttry:\n\t\tweights_matrix[word_to_ix[obj]] = glove[obj]\n\texcept KeyError:\n\t\tweights_matrix[word_to_ix[obj]] = np.random.normal(\n\t\t\tscale=0.6, size=(300, ))\nweights_matrix = torch.Tensor(weights_matrix)\n\n\nword_to_ix = {\"hello\": 0, \"world\": 1}\nembeds = nn.Embedding(2, 5) # 2 words in vocab, 5 dimensional embeddings\nlookup_tensor = torch.tensor([word_to_ix[\"hello\"]], dtype=torch.long)\nhello_embed = embeds(lookup_tensor)\nprint(hello_embed)","sub_path":"word_embedding.py","file_name":"word_embedding.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"162318750","text":"import numpy as np\nimport csv\n\ndef read_ipl_data_csv():\n ipl_matches_arrays = (','.join(i) for i in csv.reader(open('data/ipl_matches_small.csv','r'))) # ipl_matches_array=np.genfromtxt('/Users/saravanan/Documents/GIT/GreayAtom-Assignment/sara_Sheet1.csv',\n arrays=[]\n for row in ipl_matches_arrays:\n list1=row.split(',');\n arrays.append(list1)\n arrays=np.array(arrays)[1:]\n return arrays\n\n\ndef get_unique_teams_set():\n matchInfo=read_ipl_data_csv()\n teams1=np.unique(matchInfo[:,4:5])\n teams2=np.unique(matchInfo[:,5:6])\n uteams=np.union1d(teams1,teams2)\n #print(uteams)\n teams=set()\n teams.add('Kolkata Knight Riders'.encode('ASCII'))\n for x in set(uteams) :\n teams.add(x.encode('ASCII'))\n \n return teams\n\n","sub_path":"q07_get_unique_teams_set/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"357239479","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Feb 22 04:00:31 2019\r\n\r\n@author: Salim\r\n\"\"\"\r\n\r\n#Break ve continue\r\n\r\n#break içinde bulundugu döngüyü sonlandırır\r\ni=0\r\nwhile(i<10):\r\n if i==5:\r\n break\r\n print(i)\r\n i+=1\r\n \r\n#%%\r\nliste=list(range(1,10))\r\nfor i in liste:\r\n if i==3 or i==5 : #3 ve 5 gelince döngü atlicak bu yüzden 3 ve 5 yazılmaz\r\n continue\r\n print(\"i:\",i)\r\n \r\n ","sub_path":"break ve contınue.py","file_name":"break ve contınue.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"411014893","text":"# Copyright 2018 The Cornac Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\nimport cornac as cn\n\nml_100k = cn.datasets.movielens.load_100k()\nratio_split = cn.eval_methods.RatioSplit(data=ml_100k, test_size=0.2,\n rating_threshold=4.0, verbose=True)\n\nbo = cn.models.BaselineOnly(max_iter=30, learning_rate=0.01, lambda_reg=0.02, verbose=True)\nsvd = cn.models.SVD(k=10, max_iter=30, learning_rate=0.01, lambda_reg=0.02, verbose=True)\n\nmae = cn.metrics.MAE()\nrmse = cn.metrics.RMSE()\n\ncn.Experiment(eval_method=ratio_split,\n models=[bo, svd],\n metrics=[mae, rmse]).run()\n","sub_path":"examples/svd_example.py","file_name":"svd_example.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"101511486","text":"import pandas as pd\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nfrom parsl.monitoring.web_app.app import app, get_db, close_db\nfrom parsl.monitoring.web_app.utils import dropdown\nfrom parsl.monitoring.web_app.apps import workflow_details, tasks_details\n\n\ndef display_workflow(workflow_name):\n sql_conn = get_db()\n df_workflows = pd.read_sql_query('SELECT workflow_name, time_began, rundir, run_id FROM workflows WHERE workflow_name=(?)',\n sql_conn, params=(workflow_name, ))\n close_db()\n return html.Div(children=[\n html.H2(id='workflow_name', children=df_workflows['workflow_name'][0]),\n dropdown(id='run_number_dropdown', dataframe=df_workflows.sort_values(by='time_began', ascending=False), field='rundir'),\n dcc.Tabs(id=\"tabs\", value='workflow', children=[\n dcc.Tab(label='Workflow', value='workflow'),\n dcc.Tab(label='Tasks', value='tasks'),\n ]),\n html.Div(id='tabs-content')\n ])\n\n\n@app.callback(Output('tabs-content', 'children'),\n [Input('tabs', 'value')])\ndef render_content(tab):\n if tab == 'workflow':\n return workflow_details.layout\n elif tab == 'tasks':\n return tasks_details.layout\n","sub_path":"parsl/monitoring/web_app/apps/tabs.py","file_name":"tabs.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"533746295","text":"import sys\n\ndef File( fname, start, size=0x200, width=16 ):\n fp = open( fname, \"rb\" )\n fp.seek(start)\n row = start % width # 열\n col = (start / width) * width # 행\n r_size = 0\n line_start = row\n while True :\n if (r_size + (width-line_start) < size) :\n r_char = (width-line_start) # 읽어야할 문자 수\n r_size += (width-line_start)\n else :\n r_char = size - r_size\n r_size = size\n \n # print line_start, r_char\n line = fp.read(r_char)\n if len(line) == 0 :\n break\n # 주소 값\n output = \"{:08x} : \".format(int(col))\n # Hex 값 \n for c in line: \n output += line_start * \" \" \\\n + \"\".join( \"{:02x}\".format(c))\n output += \" \" \\\n + (width - (line_start + r_char) ) * \" \"\n # 문자 값\n output += line_start * \" \"\n \n output += \"\".join([chr(c) if IsPrint(c) else '.' for c in line])\n print(output)\n col += width\n line_start = 0\n if r_size == size :\n break\n fp.close()\n\ndef Buffer( buf, start, size=0x200, width=16 ):\n # 주어진 버퍼의 크기가 size보다 작다면 size값을 조정\n if len ( buf ) < size :\n size = len ( buf )\n row = start % width # 열\n col = (start // width) # 행\n # [row ... width*col]\n # [width*col ... width * (col+1)]\n r_size = 0\n line_start = row + ( col * width )\n # print hex(line_start), hex(width*(col+1))\n # print hex(row), hex(col)\n\n while True :\n line = buf[line_start:width * (col + 1)]\n \n if len(line) == 0 :\n break\n if ( ( r_size + len ( line ) ) < size ) :\n pass\n else :\n #print hex(line_start), hex(line_start + (size - r_size))\n line = line[0:(size - r_size)]\n r_size = size - len ( line )\n # 주소 값\n output = \"{:08x}: \".format( int(( line_start / width ) * width) )\n # Hex 값 \n for c in line: \n output += row * \" \" \\\n + \"\".join( \"{:02x} \".format(c))\n output += \" \" \\\n + (width - (row + len(line)) ) * \" \"\n # 문자 값\n output += row * \" \"\n output += \"\".join( [chr(c) if IsPrint(c) else '.' for c in line] )\n print(output)\n line_start = width * ( col + 1 )\n col += 1\n row = 0\n r_size += len( line )\n if r_size == size :\n break\n\ndef Dump ( buf, start, size=0x200):\n \n return buf[start: start+ size]\n\ndef IsPrint ( char ) :\n c = ord(chr(char))\n if c >= 0x20 and c < 0x80 :\n return True\n else :\n return False\n\nif __name__ == '__main__':\n File( sys.argv[1], \n int( sys.argv[2] ),\n int( sys.argv[3] ),\n int( sys.argv[4] ))","sub_path":"hexdump.py","file_name":"hexdump.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"603144296","text":"\"\"\"untitled14 URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import url\nfrom text import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n url(r'^login_action',views.login_action),\n url(r'^guest_manage',views.guest_manage),\n url(r'^event_manage',views.event_manage),\n url(r'^search_name',views.search_name),\n url(r'^event_edit/(\\d+)', views.event_edit),\n url(r'^event_del/(\\d+)', views.event_del),\n url(r'^event_add', views.event_add),\n url(r'^guest_edit/(\\d+)', views.guest_edit),\n url(r'^guest_add', views.guest_add),\n url(r'^guest_del/(\\d+)', views.guest_del),\n url(r'^logout', views.logout),\n\n\n]\n","sub_path":"untitled14/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"522846902","text":"'''\nCreated on Oct 19 16:45 2018\n@author :lilu\n'''\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 23 09:56:26 2018\n\n@author: 10724\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nimport utils\nimport input\n\ntf.reset_default_graph()\n\ntf.app.flags.DEFINE_float('alpha',0.001,'weight to adjust the ace loss')\ntf.app.flags.DEFINE_integer('batchsize',1024,'batch size')\ntf.app.flags.DEFINE_integer('epoches',50,'poches')\nFLAGS = tf.app.flags.FLAGS\n\nalpha = FLAGS.alpha\nbatch_size = FLAGS.batchsize\nepoches = FLAGS.epoches\n\ninput_x = tf.placeholder(tf.float32,shape=None)\ninput_y = tf.placeholder(tf.float32,shape=None)\nphase = tf.placeholder(tf.bool, name='phase')\n\ndef drop(data,rate=0.4):\n result = data.copy()\n [m,n] = data.shape\n for i in range(m):\n for j in range(n):\n if result[i,j] ==1:\n result[i,j] = np.random.binomial(1,1-rate)\n return result\n\ndata_x = input.readx('/data/users/lulusee/nus/Low_Level_Features','Train')\ndata_y = input.ready('/data/users/lulusee/nus/Tags/Train_Tags1k.dat')\ntest_x = input.readx('/data/users/lulusee/nus/Low_Level_Features','Test')\ntest_y = input.ready('/data/users/lulusee/nus/Tags/Test_Tags1k.dat')\nnum_samples = len(data_x)\ndata_y = drop(data_y)\n\nd = data_x.shape[1] # the feature dimention of input_x\nl = data_y.shape[1] # the num of labels\n\ny_countx = tf.convert_to_tensor(np.dot(data_y.T,data_y)*(1-np.diag(np.ones(l))),dtype=tf.float32)\ny_count = tf.convert_to_tensor(np.dot(data_y.T,data_y)*(np.diag(np.ones(l))),dtype=tf.float32)\n\n\n#initial parameters\nlayers = 4\ndims = [50,50,50,50]\nwith tf.variable_scope('input_x'):\n w = [None]*layers\n b = [None]*layers\n for i in range(layers):\n if i==0:\n weights = tf.random_uniform([d,dims[0]],minval = -np.sqrt(6)/(np.sqrt(d+dims[0])), \\\n maxval=np.sqrt(6)/(np.sqrt(d+dims[0])))\n else:\n weights = tf.random_uniform([dims[i-1],dims[i]],minval = -np.sqrt(6)/(np.sqrt(dims[i-1]+dims[i])), \\\n maxval=np.sqrt(6)/(np.sqrt(dims[i-1]+dims[i])))\n w[i] = tf.Variable(weights)\n b[i] = tf.Variable(tf.constant(0.001,shape=[dims[i]]))\n\nwith tf.variable_scope('input_y'):\n ytabel = tf.Variable(tf.random_uniform([l,dims[-1]],minval = -np.sqrt(6)/(np.sqrt(l+dims[-1])), \\\n maxval=np.sqrt(6)/(np.sqrt(l+dims[-1]))))\n biase = tf.get_variable('y_biase',initializer=0.001*tf.ones(l))\n\n#build computation graph\nfeax = [input_x]\nfor i in range(layers):\n tmp = tf.matmul(feax[i],w[i])+b[i]\n tmp = utils.batch_norm(tmp, phase)\n tmp = tf.nn.relu(tmp)\n feax.append(tmp)\nlogits = tf.matmul(feax[-1],ytabel,transpose_b=True)+biase\nlogloss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=input_y,logits=logits))\n\nytable_inner = tf.matmul(ytabel,ytabel,transpose_b=True)\ncorrloss = -1*tf.reduce_sum(tf.multiply(ytable_inner,y_countx))/tf.reduce_sum(y_countx)\ny_cov = tf.matmul(tf.matmul(tf.transpose(ytabel),y_count),ytabel)/tf.cast(tf.reduce_sum(y_count) - 1, tf.float32)\ntraceloss = 0.5 * tf.reduce_sum(tf.reshape(y_cov, [-1]) * tf.reshape(y_cov, [-1]))\naceloss = corrloss+traceloss\ntotalloss = (1-alpha)*logloss+alpha*aceloss\n\noptimizer = tf.train.AdamOptimizer(0.001)\ntrain_step = optimizer.minimize(totalloss)\n\n\nprediction = tf.nn.sigmoid(logits)\ndef evaluation(pre,truth):\n pre[range(len(pre)),np.argmax(pre,axis=1)] = 1\n pre[pre<1] = 0\n precision = np.sum(pre*truth)/np.sum(pre)\n recall = np.sum(pre*truth)/np.sum(truth)\n fscore = 2*precision*recall/(precision+recall)\n acc = np.sum(pre==truth)/(truth.shape[0]*truth.shape[1])\n return acc,precision,fscore\n\ntestfscore = []\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n steps_per_epoch = int(num_samples/batch_size)\n for i in range(epoches):\n idx = list(range(len(data_x)))\n np.random.shuffle(idx)\n data_x = data_x[idx]\n data_y = data_y[idx]\n for j in range(steps_per_epoch):\n rand_x = data_x[j*batch_size:min((j+1)*batch_size,num_samples)]\n rand_y = data_y[j*batch_size:min((j+1)*batch_size,num_samples)]\n sess.run(train_step,feed_dict={input_x:rand_x,input_y:rand_y,phase:True})\n loglossv,acelossv = sess.run([logloss,aceloss],feed_dict={input_x:data_x,input_y:data_y,phase:False})\n trainlossv = sess.run(totalloss,feed_dict={input_x:data_x,input_y:data_y,phase:False})\n testlossv = sess.run(totalloss,feed_dict={input_x:test_x,input_y:test_y,phase:False})\n predictionv = sess.run(prediction,feed_dict={input_x:test_x,input_y:test_y,phase:False})\n acc,precision,fscore = evaluation(predictionv,test_y)\n print('epoch{}---trainloss{},logloss{},aceloss{}'.format(i,trainlossv,loglossv,acelossv))\n print('epoch{}---testloss{},testacc{},testprecision{},testfscore{}'.format(i,testlossv,acc,precision,fscore))\n testfscore.append(fscore)\navgfscore = np.mean(np.array(testfscore[-10:]))\nprint('average fsoce:%f'%(avgfscore))\n","sub_path":"nuswide/modelv1.py","file_name":"modelv1.py","file_ext":"py","file_size_in_byte":5066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"204202216","text":"# Excel\nimport openpyxl\nwb = openpyxl.Workbook()\nsheet = wb.active\n\n# Variables globales\nVariacionesTemperatura = (2700, 4000)\nsistema = {\"Programa\": True}\nfinishings = { \"0\":\"Salir\", \"666\":\"Lista\", \"4\":\"Blanco\",\"2\":\"Negro\", \"98\":\"Aluminio Anodizado\",\"42\":\"Blanco/Negro\",\"22\":\"Negro/Negro\", \"3\": \"Negro\", \"5\":\"Aluminio\",\n \"6\":\"Cromo\", \"7\":\"Blanco\", \"8\":\"Negro Texturizado\", \"9\":\"Rojo\", \"10\":\"Rojo Brillante\", \"11\":\"Blanco Texturizado\", \"12\":\"Gris Texturizado\",\n \"14\": \"Gris Aluminio\", \"15\":\"Gris\", \"16\":\"Gris\", \"25\":\"Metal Satinado\", \"26\":\"Metal Satinado\", \"81\":\"Blanco\", \"82\":\"Negro\", \"86\":\"Dorado\",\n \"88\":\"Fibra de Carbono\", \"89\":\"Rojo\", \"92\":\"Negro\", \"94\":\"Blanco\", \"96\":\"Bronce Anodizado\", \"99\":\"Aluminio\", \"TR\":\"Transparente\", \"44\":\"Blanco/Blanco\",\n \"142\":\"Gris Aluminio/Negro\",\"144\":\"Gris/Blanco\",\"254\":\"Metal Satinado/Blanco\",\"252\":\"Metal Satinado/Negro\",\"24\":\"Negro/Blanco\",\"212\":\"Negro/Gris\"}\n\n#FUNCIONES (Bucle para codigos)\ndef loop(variable, descripcion_accion):\n while True:\n try:\n temporal = int(input(descripcion_accion + \"\\n\"))\n if temporal == 0:\n break\n elif temporal == 666:\n for ListaDeAcabado in finishings:\n print(ListaDeAcabado + finishings[ListaDeAcabado])\n continue\n elif variable == \"acabado\" :\n manejo_error = finishings[str(temporal)]\n variable.append(str(temporal))\n continue\n else:\n variable.append(str(temporal))\n continue\n except Exception as e:\n print(\"Introduce un valor numerico que se encuentre en la codificacion!\")\n continue\n\ndef SeleccionSimple(familia, variable, pregunta):\n while True:\n try:\n respuesta = input(pregunta + \"\\n\")\n if respuesta == \"N\":\n familia[variable] = False\n break\n elif respuesta != \"Y\" and respuesta != \"N\":\n print(\"Introduce una respuesta valida! (Solo Y/N en Uppercase)\")\n continue\n except Exception as e:\n print(\"Introduce una respuesta valida! (Solo Y/N en Uppercase)\")\n continue\n else:\n break\n\ndef CargarResultados(codigo,acabado):\n matriz_codigo.append(codigo)\n matriz_color.append(acabado)\n #INICIO DEL PROGRAMA\n\n#Programa\nwhile sistema[\"Programa\"] == True:\n try:\n\n #Varibles del programa\n variables = {\"Optica\": True, \"Longitud\": True, \"Potencia\": True, }\n codigo = str()\n matriz_codigo = []\n matriz_color = []\n optica = []\n acabado = []\n longitud = []\n\n #Inicio del programa\n print(\"\\n\\n\" + (\" \"*40) + \"AUTOCOMPLETACION DE TABLA DESIGN IT\"+ (\" \"*40) +\"\\n\")\n input(\"Para responder a las preguntas Y/N, procurar utilizar letras en mayusculas\"+ (\"\\n\"*26) + \"Presiona Enter para continuar\")\n print(\"\\n\"*32)\n #Introduccion de codigo base\n codigo = str(input( \"Introduce codigo base (separado por puntos):\"))\n\n #OPTICA\n SeleccionSimple(variables, \"Optica\", \"Posee optica variable? Y/N\")\n while variables[\"Optica\"] == True:\n print(\"\\n\\nVALORES DE OPTICA\\n\")\n input(\"INSTRUCIONES \\n1.- Colocar valores numericos sin simbolo de grado ° \\nEjemplo: 32 = 32 grados de apertura \\n2.-Introducir valor 0 para finalizar sección)\" + (\"\\n\"*25) + \"Presiona ENTER para continuar\")\n loop(optica, \"Introduce grados de optica\")\n break\n print(\"\\n\"*32)\n\n #Longitud variable\n if variables[\"Optica\"] == False:\n SeleccionSimple(variables, \"Longitud\", \"Posee longitud variable? Y/N\")\n while variables[\"Optica\"] == False and variables[\"Longitud\"] == True :\n print(\"\\n\\nVALORES DE LONGITUD\")\n input(\"INSTRUCIONES \\n1.- Colocar valores numericos de longitud en MM sin indicar unidad\\nEjemplo: 500 = 500 milimetros de longitud\\n2.-Introducir valor 0 para finalizar sección\"+ (\"\\n\"*25) +\"Presiona ENTER para continuar\")\n loop(longitud, \"Introduce longitud\")\n break\n print(\"\\n\"*32)\n\n #ACABADO\n print(\"\\n\\nCOLOR DE ACABADO\\n\")\n input(\"INSTRUCIONES:\\n1.-(Opcional) Introduciendo el numero '666', puedes revisar la codificacion de los colores mas utilizados\\n2.-Colocar valores numericos que simbolizan color de acabado (Introducir valor 0 para finalizar sección)\\n3.-Introducir valor 0 para finalizar sección\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nPresiona ENTER para continuar\")\n loop(acabado, \"Introduce codigo color del acabado\")\n print(\"\\n\"*32)\n\n #Calcular cadigos\n if variables[\"Optica\"] == True:\n for o in optica:\n if o == \"0\" or \"\":\n break\n else:\n for a in acabado:\n if a == \"0\":\n break\n else:\n resultado_codigo = codigo + \".\" + o + \".\" + a\n CargarResultados(resultado_codigo, finishings[a])\n for t in range(0, len(VariacionesTemperatura)):\n resultado_codigo = codigo + \".\" + str(VariacionesTemperatura[t]) + \".\" + o + \".\" + a\n CargarResultados(resultado_codigo, finishings[a])\n\n\n elif variables[\"Longitud\"] == True:\n for l in longitud:\n if l == \"0\" or \"\":\n break\n else:\n for a in acabado:\n if a == \"0\":\n break\n else:\n resultado_codigo = codigo + \".\" + l + \".\" + a\n CargarResultados(resultado_codigo, finishings[a])\n for t in range(0, len(VariacionesTemperatura)):\n resultado_codigo = codigo + \".\" + str(VariacionesTemperatura[t]) + \".\" + l + \".\" + a\n CargarResultados(resultado_codigo, finishings[a])\n\n else:\n for a in acabado:\n if a == \"0\":\n break\n else:\n resultado_codigo = codigo + \".\" + a\n CargarResultados(resultado_codigo, finishings[a])\n for t in range(0, len(VariacionesTemperatura)):\n resultado_codigo = codigo + \".\" + str(VariacionesTemperatura[t]) + \".\" + str(a)\n CargarResultados(resultado_codigo, finishings[a])\n\n #Seleccion de nombre y pasar resultados a EXCEL\n\n while True:\n try:\n NombreExcel = str(input(\"Introduce nombre del Excel donde deseas copiar los resultados:\\n\"))\n if NombreExcel == \"\":\n print(\"Introduce un nombre valido!\")\n continue\n except Exception as e:\n print(\"Ha ocurrido un error con el nombre seleccionado: \", e,\"\\nPor favor vuelve a introducir un nombre valido\")\n else:\n for a in range(0, len(matriz_codigo)):\n c1 = sheet[\"A\"+str(int(a+1))]\n c1.value = matriz_codigo[a]\n for a in range(0, len(matriz_color)):\n c1 = sheet[\"B\"+str(int(a+1))]\n c1.value = matriz_color[a]\n wb.save(\"C:\\\\Users\\\\DGLA-11\\\\github\\\\Database Design IT\\\\{}.xlsx\".format(NombreExcel))\n print(\"Codigos copiados en EXCEL con el nombre: \", NombreExcel)\n break\n\n\n\n except Exception as e:\n print(\"Ha ocurrido un error inesperado!\\nEl codigo del error es: \", e)\n\n #Continuar programa\n finally:\n SeleccionSimple(sistema,\"Programa\", \"Deseas continuar? Y/N\")\n","sub_path":"Python_Code.py","file_name":"Python_Code.py","file_ext":"py","file_size_in_byte":8323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"467822336","text":"import unittest\nimport sys\nimport numpy as np\nsys.path.append(\"../\")\n\ndef randwalk():\n import ludopy\n import numpy as np\n\n g = ludopy.Game()\n there_is_a_winner = False\n\n while not there_is_a_winner:\n (dice, move_pieces, player_pieces, enemy_pieces, player_is_a_winner,\n there_is_a_winner), player_i = g.get_observation()\n\n if len(move_pieces):\n piece_to_move = move_pieces[np.random.randint(0, len(move_pieces))]\n else:\n piece_to_move = -1\n\n _, _, _, _, _, there_is_a_winner = g.answer_observation(piece_to_move)\n\n print(\"Saving history to numpy file\")\n g.save_hist(\"game_history.npy\")\n print(\"Saving game video\")\n g.save_hist_video(\"randomwalk_game_video.mp4\")\n\n return True\n\n\nclass MyTestCase(unittest.TestCase):\n def test_something(self):\n self.assertEqual(True, randwalk())\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/randomwalk.py","file_name":"randomwalk.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"435031967","text":"from classes import Cliente, Aluno, Pessoa, ClienteVIP\n\n# Observar que, como os endereços estão associados por composição à classe Cliente,\n# ao deletar um cliente, os endereços são deletados juntos.\n\nc1 = Cliente('Luiz', 32)\nc1.falar()\nc1.comprar()\nprint()\n\nc2 = ClienteVIP('Rose', 32, 'Miranda')\nc2.falar()\nc2.comprar()\nprint()\n\na1 = Aluno('Maria', 54)\na1.falar()\na1.estudar()\nprint()\n\np1 = Pessoa('João', 43)\np1.falar()\nprint()\n\nprint('#########################################')\n","sub_path":"aula064-sobreposicao-de-membros/aula64.py","file_name":"aula64.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"253266829","text":"#!/usr/bin/env python\nimport gsm_modem as GSM_M\n\n#запуск программы\nif __name__ == '__main__':\n config = {}\n exec(open((\"config.py\"), encoding='utf8').read(), config)\n\n sms = GSM_M.GSMModem(config)\n #sms.connectPhone('COM5')\n #sms.sendMessage()\n #sms.getLevelSignal()\n sms.receiveSMS()\n #sms.delete_SMS()\n #sms.section_SMS()\n\n #sms.auto_finder_ports()\n\n sms.disconnectPhone()\n","sub_path":"NikitaKastyrya/gsm_modem/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"525466339","text":"#!/usr/bin/python3\n# 卓球個人世界ランキング上位50位の国別総数\nimport requests, bs4, collections\nimport ittfWRwhichOpen\nimport pie_grap\n\n\ndef number_of_people():\n # ITTFのサイトから男子か女子の個人世界ランクのページを選択取得\n url = ittfWRwhichOpen.men_women()\n res = requests.get(url, stream=True, verify=False)\n res.raise_for_status()\n soup = bs4.BeautifulSoup(res.text, 'lxml')\n\n # データから国名をリストで取り出す\n elem = soup.select('table td img.fabrikLightBoxImage')\n countrys = [imgtag.get('title') for imgtag in elem]\n\n # dictの国別の人数を降順にソート\n cnt_countrys = collections.Counter(countrys)\n sort_dic = collections.OrderedDict(sorted(cnt_countrys.items(), key=lambda x: x[1], reverse=True))\n\n return sort_dic\n\n\ndef display(dic):\n # pie_grap用データ\n label = []\n data = []\n for country, num in dic.items():\n label.append(country)\n data.append(num)\n\n # displayと円グラフで出力\n for country, i in dic.items():\n print('{}: {}名'.format(country, i))\n pie_grap.pie(label, data)\n\n\nif __name__ == '__main__':\n display(number_of_people())\n","sub_path":"kadai01/countrys_pie.py","file_name":"countrys_pie.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"278710978","text":"# encoding: utf-8\n\n\nfrom TaskInstance import TaskInstanceStatus\nfrom TaskExecution import TaskExecution\nfrom TaskExecution import TaskExecutionResult\nfrom JobExecution import JobExecutionResult\n\n\nclass RunningJob(object):\n\n def __init__(self, jobExecution):\n self.__jobExecution = jobExecution\n self.__failureCounter = 0\n self.__waitredoCounter = 0\n\n def getJobExecution(self):\n return self.__jobExecution\n\n def availableTasks(self):\n taskTop = self.__jobExecution.getTaskTop()\n tasks = taskTop.available()\n if len(tasks) == 0:\n return tasks\n result = []\n for task in tasks:\n instance = task.getTaskInstance()\n status = instance.getStatus()\n if status == TaskInstanceStatus.SUCCESS:\n taskTop.markup(task, True)\n elif status == TaskInstanceStatus.UNMEET or status == TaskInstanceStatus.FAILURE:\n taskTop.markup(task, False)\n self.__failureCounter = self.__failureCounter + 1\n else:\n context = self.__jobExecution.getContext().clone()\n taskExecution = TaskExecution(task, context)\n result.append(taskExecution)\n\n if len(result) != 0:\n return result\n return self.availableTasks()\n\n def feedback(self, taskExecution, result):\n task = taskExecution.getTask()\n taskTop = self.__jobExecution.getTaskTop()\n if result == TaskExecutionResult.SUCCESS:\n taskTop.markup(task, True)\n self.__jobExecution.getContext().merge(taskExecution.getContext())\n elif result == TaskExecutionResult.FAILURE:\n self.__failureCounter = self.__failureCounter + 1\n taskTop.markup(task, False)\n elif result == TaskExecutionResult.UNMEET:\n taskTop.markup(task, False)\n else:\n self.__waitredoCounter = self.__waitredoCounter + 1\n taskTop.markup(task, False)\n\n def hasProcessing(self):\n return self.__jobExecution.getTaskTop().hasProcessing()\n\n def getJobExecutionResult(self):\n if self.__waitredoCounter > 0:\n return JobExecutionResult.WAITREDO\n if self.__failureCounter > 0:\n return JobExecutionResult.FAILURE\n return JobExecutionResult.SUCCESS\n","sub_path":"RunningJob.py","file_name":"RunningJob.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"552057102","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 24 14:33:08 2017\n\n@author: admin\n\"\"\"\n\n#FitzHugh-Nagumo model\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom FHNneuron import FNNeuron\n\ndef main():\n neu = FNNeuron(simtime=200,numneu=1,Iext=1)\n \n for i in range(0, int(neu.cycle-1)):\n neu.propagation()\n\n\n t = np.arange(0, neu.simtime, neu.timestep)\n\n #print(neu.x[0])\n\n \n #グラフを左右に表示\n fig, (axL, axR) = plt.subplots(ncols = 2, figsize = (10,4))\n #menbrane potential\n axL.plot(t, neu.x[0])\n axL.set_title('FHN VOLTAGE')\n axL.set_xlabel('t')\n axL.set_ylabel('x')\n #nullcline\n axR.plot(neu.x[0], neu.y[0])\n axR.set_title('topological space')\n axR.set_xlabel('x')\n axR.set_ylabel('y')\n \n \n #fig.show()\n plt.show()\n\n\nif __name__ == '__main__':\n main()","sub_path":"FHNmodel/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"458841366","text":"import collections\n\ndef read_file(filename):\n bots = collections.defaultdict(list)\n navodila = collections.defaultdict(tuple)\n\n for vrstica in open(filename):\n n = vrstica.split()\n if vrstica.startswith('value'):\n st = int(n[1])\n bot = int(n[-1])\n bots[bot].append(st)\n elif vrstica.startswith('bot'):\n kdo = int(n[1])\n p1, st1 = n[5], int(n[6])\n p2, st2 = n[-2], int(n[-1])\n navodila[kdo] = (p1, st1), (p2, st2)\n return bots, navodila\n\ndef highest_output(instructions):\n naj = 0\n for (n1, st1), (n2, st2) in instructions.values():\n if n1 == 'output':\n naj = max(st1, naj)\n if n2 == 'output':\n naj = max(st2, naj)\n return naj\n\ndef execute(bots, outputs, bot, instructions):\n if len(bots[bot]) != 2:\n return False\n\n (m1, m2), (v1, v2) = instructions[bot]\n\n if m1 == 'bot':\n bots[m2].append(min(bots[bot]))\n elif m1 == 'output':\n outputs[m2] = min(bots[bot])\n if v1 == 'bot':\n bots[v2].append(max(bots[bot]))\n elif v1 == 'output':\n outputs[v2] = max(bots[bot])\n bots[bot] = []\n return True\n\ndef run(bots, instructions):\n n = 0\n for (p1, p2), (p3, p4) in instructions.values():\n if p1 == 'output':\n n += 1\n if p3 == 'output':\n n += 1\n\n outputs = [None] * n\n\n test = list(instructions.keys())\n\n while test:\n for k in instructions:\n vrne = execute(bots, outputs, k, instructions)\n if vrne is True:\n test.remove(k)\n return outputs\n\ndef count_paths(bot, instructions):\n st = 0\n pregled = [bot]\n\n while pregled:\n (n1, v1), (n2, v2) = instructions[pregled[0]]\n\n if n1 == 'output':\n st += 1\n elif n1 == 'bot':\n pregled.append(v1)\n if n2 == 'output':\n st += 1\n elif n2 == 'bot':\n pregled.append(v2)\n del pregled[0]\n return st\n\ndef bots_on_path(output, instructions):\n roboti_na_poti = set()\n\n for bot, ((n1, st1), (n2, st2)) in instructions.items():\n if n1 == 'output' and st1 == output:\n roboti_na_poti.add(bot)\n if n2 == 'output' and st2 == output:\n roboti_na_poti.add(bot)\n\n pregled = list(roboti_na_poti)\n\n while pregled:\n robot = pregled[0]\n for bot, ((n1, st1), (n2, st2)) in instructions.items():\n\n if n1 == 'bot' and st1 == robot:\n pregled.append(bot)\n if n2 == 'bot' and st2 == robot:\n pregled.append(bot)\n roboti_na_poti |= set(pregled)\n del pregled[0]\n pregled = list(set(pregled))\n\n return roboti_na_poti\n\nimport unittest\n\nclass Test06(unittest.TestCase):\n def test_read_file(self):\n bots, instructions = read_file(\"example.txt\")\n self.assertEqual(bots[1], [3])\n self.assertEqual(set(bots[2]), {2, 5})\n self.assertEqual(instructions,\n {2: ((\"bot\", 1), (\"bot\", 0)),\n 1: ((\"output\", 1), (\"bot\", 0)),\n 0: ((\"output\", 2), (\"output\", 0))})\n\n bots, instructions = read_file(\"input.txt\")\n self.assertEqual(bots[206], [43])\n self.assertEqual(instructions[106], ((\"bot\", 83), (\"bot\", 82)))\n self.assertEqual(instructions[48], ((\"bot\", 190), (\"bot\", 156)))\n self.assertEqual(instructions[53], ((\"output\", 0), (\"bot\", 6)))\n\n bots, instructions = read_file(\"example2.txt\")\n self.assertEqual(instructions,\n {1: (('bot', 3), ('bot', 4)),\n 2: (('bot', 4), ('output', 0)),\n 3: (('output', 5), ('bot', 5)),\n 4: (('bot', 5), ('bot', 6)),\n 5: (('output', 1), ('bot', 7)),\n 6: (('bot', 7), ('output', 4)),\n 7: (('output', 2), ('output', 3))}\n )\n bots = {k: sorted(v) for k, v in bots.items()}\n self.assertEqual(bots, {1: [1, 2], 2: [3, 4], 3: [5], 6: [6]})\n\n def test_highest_output(self):\n self.assertEqual(highest_output({2: ((\"bot\", 1), (\"bot\", 0)),\n 1: ((\"output\", 1), (\"bot\", 0)),\n 0: ((\"output\", 2), (\"output\", 0))}), 2)\n self.assertEqual(highest_output({0: ((\"output\", 1), (\"output\", 0))}), 1)\n self.assertEqual(highest_output({0: ((\"output\", 4), (\"output\", 0))}), 4)\n self.assertEqual(highest_output({0: ((\"output\", 0), (\"output\", 4))}), 4)\n self.assertEqual(highest_output({2: ((\"bot\", 1), (\"bot\", 0)),\n 1: ((\"output\", 1), (\"bot\", 0)),\n 0: ((\"output\", 42), (\"output\", 0))}), 42)\n self.assertEqual(highest_output({2: ((\"bot\", 1), (\"bot\", 0)),\n 1: ((\"output\", 1), (\"bot\", 0)),\n 0: ((\"output\", 0), (\"output\", 42))}), 42)\n\n\nclass Test07(unittest.TestCase):\n def test_execute(self):\n from copy import deepcopy\n from collections import defaultdict\n initial = defaultdict(list)\n initial.update({0: [42], 1: [13, 60], 2: [61, 15], 3: [], 4: [16, 62]})\n instructions = {0: ((\"output\", 1), (\"bot\", 3)),\n 1: ((\"output\", 2), (\"bot\", 0)),\n 2: ((\"bot\", 3), (\"bot\", 0)),\n 3: ((\"output\", 0), (\"output\", 3)),\n 4: ((\"bot\", 6), (\"bot\", 5))}\n outputs = [None] * 4\n bots = deepcopy(initial)\n self.assertFalse(execute(bots, outputs, 0, instructions))\n self.assertEqual(bots, initial)\n self.assertEqual(outputs, [None] * 4)\n\n self.assertFalse(execute(bots, outputs, 3, instructions))\n self.assertEqual(bots, initial)\n self.assertEqual(outputs, [None] * 4)\n\n self.assertTrue(execute(bots, outputs, 1, instructions))\n bots = {k: sorted(v) for k, v in bots.items()}\n self.assertEqual(bots, {0: [42, 60], 1: [], 2: [15, 61], 3: [], 4: [16, 62]})\n self.assertEqual(outputs, [None, None, 13, None])\n\n bots = deepcopy(initial)\n outputs = [None] * 4\n self.assertTrue(execute(bots, outputs, 2, instructions))\n bots = {k: sorted(v) for k, v in bots.items()}\n self.assertEqual(bots, {0: [42, 61], 1: [13, 60], 2: [], 3: [15], 4: [16, 62]})\n self.assertEqual(outputs, [None] * 4)\n\n bots = deepcopy(initial)\n outputs = [None] * 4\n self.assertTrue(execute(bots, outputs, 4, instructions))\n bots = {k: sorted(v) for k, v in bots.items()}\n self.assertEqual(bots, {0: [42], 1: [13, 60], 2: [15, 61], 3: [], 4: [], 5: [62], 6: [16]})\n self.assertEqual(outputs, [None] * 4)\n\n\nclass Test08(unittest.TestCase):\n def test_run(self):\n bots, instructions = read_file(\"example.txt\")\n self.assertEqual(run(bots, instructions), [5, 2, 3])\n\n bots, instructions = read_file(\"example2.txt\")\n self.assertEqual(run(bots, instructions), [4, 2, 3, 5, 6, 1])\n\n bots, instructions = read_file(\"input.txt\")\n self.assertEqual(run(bots, instructions), [7, 53, 37, 29, 13, 59, 3, 47, 23, 73, 41, 5, 61, 19, 43, 17, 2, 67, 11, 31, 71])\n\n\nclass Test09(unittest.TestCase):\n def test_count_paths(self):\n bots, instructions = read_file(\"example.txt\")\n self.assertEqual(count_paths(0, instructions), 2)\n self.assertEqual(count_paths(1, instructions), 3)\n self.assertEqual(count_paths(2, instructions), 5)\n\n bots, instructions = read_file(\"example2.txt\")\n self.assertEqual(count_paths(7, instructions), 2)\n self.assertEqual(count_paths(6, instructions), 3)\n self.assertEqual(count_paths(5, instructions), 3)\n self.assertEqual(count_paths(4, instructions), 6)\n self.assertEqual(count_paths(3, instructions), 4)\n self.assertEqual(count_paths(2, instructions), 7)\n self.assertEqual(count_paths(1, instructions), 10)\n\n\nclass Test10(unittest.TestCase):\n def test_bots_on_path(self):\n bots, instructions = read_file(\"example2.txt\")\n self.assertEqual(bots_on_path(4, instructions), {1, 2, 4, 6})\n\n bots, instructions = read_file(\"input.txt\")\n self.assertEqual(bots_on_path(0, instructions),\n {0, 129, 2, 3, 132, 136, 11, 140, 12, 141, 16, 17, 148,\n 21, 150, 151, 22, 153, 25, 27, 154, 29, 158, 152, 32,\n 161, 155, 36, 165, 37, 39, 40, 168, 43, 172, 45, 46,\n 176, 49, 50, 52, 53, 182, 57, 187, 188, 189, 191, 64,\n 65, 197, 201, 73, 205, 206, 207, 77, 209, 84, 85, 86,\n 87, 89, 91, 93, 97, 101, 103, 105, 114, 117, 119, 121,\n 123}\n )\n\n'''\nclass TestZaZdolgocasene(unittest.TestCase):\n def test_count_paths(self):\n bots, instructions = read_file(\"input.txt\")\n self.assertEqual(count_paths(2, instructions), 1771605360)\n self.assertEqual(count_paths(97, instructions), 24466267020)\n'''\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"Python/DN11-Boti/naloga.py","file_name":"naloga.py","file_ext":"py","file_size_in_byte":9326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"455273764","text":"#! /usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom PyQt4.QtGui import *\r\nfrom PyQt4.QtCore import *\r\nimport pypyodbc\r\nfrom utils import *\r\n\r\n\r\nclass ExchangeDialog(QDialog):\r\n\r\n def __init__(self, parent=None):\r\n QDialog.__init__(self, parent)\r\n self.setWindowTitle('兑换奖品')\r\n self.resize(300, 200)\r\n\r\n main_layout = QVBoxLayout()\r\n main_layout.setSpacing(8)\r\n\r\n search_layout = QHBoxLayout()\r\n id_label = QLabel('会员卡号: ')\r\n id_lineEdit = QLineEdit()\r\n prize_label = QLabel('奖品名称: ')\r\n prize_lineEdit = QLineEdit()\r\n ok_btn = QPushButton('确定')\r\n\r\n search_layout.addWidget(id_label)\r\n search_layout.addWidget(id_lineEdit)\r\n\r\n content_layout = QHBoxLayout()\r\n content_layout.addWidget(prize_label)\r\n content_layout.addWidget(prize_lineEdit)\r\n\r\n btn_layout = QHBoxLayout()\r\n btn_layout.addStretch()\r\n btn_layout.addWidget(ok_btn)\r\n ok_btn.clicked.connect(lambda: self.exchange_prize(id_lineEdit, prize_lineEdit))\r\n main_layout.addLayout(search_layout)\r\n main_layout.addLayout(content_layout)\r\n main_layout.addLayout(btn_layout)\r\n\r\n self.setLayout(main_layout)\r\n\r\n @pyqtSlot()\r\n def exchange_prize(self, id_lineEdit, prize_lineEdit):\r\n if id_lineEdit.text() == '' or prize_lineEdit.text() == '':\r\n toastWarning(self, '不能为空')\r\n return\r\n # 兑换奖品\r\n conn = pypyodbc.connect(\r\n 'driver={SQL Server};server=localhost;database=market;UID=sa;PWD=1234')\r\n cur = conn.cursor()\r\n credit = 0\r\n need_credit = 0\r\n\r\n cur.execute('''select Count(*) from card where 会员卡号 = ?''', [id_lineEdit.text()])\r\n for row in cur.fetchall():\r\n if row[0] == 0:\r\n toastWarning(self, '会员卡不存在')\r\n return False\r\n\r\n cur.execute('''select Count(*) from prize where 奖品名称 = ?''', [prize_lineEdit.text()])\r\n for row in cur.fetchall():\r\n if row[0] == 0:\r\n toastWarning(self, '奖品不存在')\r\n return False\r\n cur.execute('''select 积分 from card where 会员卡号 = ?''', [id_lineEdit.text()])\r\n for row in cur.fetchall():\r\n credit = row[0]\r\n\r\n cur.execute('''select 兑换积分 from prize where 奖品名称 = ?''', [prize_lineEdit.text()])\r\n for row in cur.fetchall():\r\n need_credit = row[0]\r\n\r\n if need_credit > credit:\r\n toastWarning(self, '积分不足!')\r\n return\r\n\r\n cur.execute('''insert into record values(?, ?)''', [id_lineEdit.text(), prize_lineEdit.text()])\r\n cur.commit() \r\n cur.execute('''update card set 积分 = ? where 会员卡号 = ?''', [int(credit) - int(need_credit), id_lineEdit.text()])\r\n cur.commit()\r\n # 成功\r\n self.accept()\r\n","sub_path":"exchange_prize.py","file_name":"exchange_prize.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"585679240","text":"from itertools import permutations as c\r\nl=[]\r\nwhile 1:\r\n try:\r\n l.extend(input().split())\r\n except:\r\n break\r\nk=list(c(l,2))\r\np=[]\r\nfor i,j in k:\r\n p.append(i+j)\r\nfor i in sorted(set(p)):\r\n print(i)","sub_path":"compoundwords.py","file_name":"compoundwords.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"76711380","text":"import cv2\nimport PIL\nimport numpy as np\nimport os\nfrom cv2 import aruco\nimport math\nimport imutils\nimport argparse\nfrom main_functions import *\n\n\nclass Persons():\n def __init__(self, id, mod):\n self.x = 0\n self.y = 0\n self.id = id\n self.count = 0\n self.mod = mod\n\n def draw(self, frame):\n cv2.circle(frame, (self.x, self.y + self.mod), 4,\n (255, 255, 255), thickness=1, lineType=8, shift=0)\n cv2.putText(frame, \"{}\".format(self.id), (self.x, self.y + self.mod - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n (255, 255, 255), 1, cv2.LINE_AA)\n\n\ndef draw(human, frame_markers):\n for (x, y, w, h) in human:\n cv2.rectangle(frame_markers, (x, y), (x + w, y + h), (0, 0, 255), 3)\n\n\ndef Draw_map(room_map, room_x, room_y, Cam_x, mod):\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n cv2.rectangle(room_map, (20, 20),\n (room_x - mod, room_y - mod), (255, 255, 255), 2)\n\n cv2.putText(room_map, 'Cam', (Cam_x, 13), font, 0.5,\n (255, 255, 255), 1, cv2.LINE_AA)\n\n\ndef find_dist(x1, x2, y1, y2):\n '''Finds Euclidean distance between the two points'''\n return math.sqrt((x1 - x2)**2 + (y1 - y2)**2)\n\n\ndef Compute_Depth(focal, dist_pix, dist_real):\n return (dist_real * focal) / dist_pix\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description=\"This script detects faces from web cam input, \"\n \"and estimates age and gender for the detected faces.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\"--width\", type=int, required=True,\n help=\"width the room \")\n parser.add_argument(\"--breadth\", type=int, required=True,\n help=\"depth of the room\")\n\n parser.add_argument(\"--cam\", type=int, default=5,\n help=\"width of the camera\")\n\n args = parser.parse_args()\n return args\n\n\ndef Foot_to_cms(val):\n return int(30.48 * val)\n","sub_path":"main_functions.py","file_name":"main_functions.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"3407848","text":"\"\"\"\nGiven a non-empty integer array, find the minimum number of moves required to make all array elements equal, where a move is incrementing a selected element by 1 or decrementing a selected element by 1.\n\nYou may assume the array's length is at most 10,000.\n\nExample:\n\nInput:\n[1,2,3]\n\nOutput:\n2\n\nExplanation:\nOnly two moves are needed (remember each move increments or decrements one element):\n\n[1,2,3] => [2,2,3] => [2,2,2]\n\"\"\"\n\n\nclass Solution(object):\n def minMoves2(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n nums.sort()\n average = nums[int(len(nums)/2)]\n result = sum([abs(num - average) for num in nums])\n return result\n\n\nif __name__ == '__main__':\n solution = Solution().minMoves2([4,2,3])\n print(solution)","sub_path":"package_401_500/package_461_480/462. Minimum Moves to Equal Array Elements II.py","file_name":"462. Minimum Moves to Equal Array Elements II.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"474063821","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n# Version: 1.0\n# Author: jtahstu\n# Contact: root@jtahstu.com\n# Site: blog.jtahstu.com\n# Software: PyCharm\n# Time: 2018/8/23 18:09\n\nclass Solution:\n def rotate(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n tmp = nums[:]\n length = len(nums)\n for i in range(0, k):\n nums[i] = tmp[length - k + i]\n for j in range(k, length):\n nums[j] = tmp[j - k]\n return nums\n\n k = k % len(nums)\n nums[:] = nums[-k:] + nums[:-k]\n\n\ndef init():\n nums = [1, 2, 3, 4, 5, 6, 7]\n k = 8 % 7\n print(Solution().rotate(nums, k))\n\n\nif __name__ == '__main__':\n init()\n","sub_path":"2018/leetcode/rotate-array.py","file_name":"rotate-array.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"552742316","text":"import numpy as np\ndef shell_sorted(alist):\n\n '''\n 1. 初始化参数定义,间隔\n 2.\n '''\n length = len(alist)\n increment = length // 2\n\n while increment != 0:\n indexs = []\n i = 0\n while i < increment:\n\n k = 0\n child_indexs = []\n indexs.append(child_indexs)\n while i + increment * k <= length - 1:\n\n child_indexs.append(i + increment * k)\n k += 1\n\n i += 1\n # 已提取到本次各个列表的索引号\n new = [1] * length\n for index in indexs:\n\n length_child = len(index)\n for i in range(2, length_child + 1):\n\n for j in range(0, length_child - i + 1):\n\n if alist[index[j]] > alist[index[j + 1]]:\n alist[index[j]], alist[index[j + 1]] = alist[index[j + 1]], alist[index[j]]\n\n increment -= 1\n return alist\n\n\nif __name__ == '__main__':\n n_samples = 9\n alist = np.random.randint(-30, 30, size=n_samples)\n print('排序前:', alist)\n l = shell_sorted(alist)\n\n # l = [1] * 10\n print('排序后:', l)","sub_path":"数据结构与算法/排序算法/shell_sorted.py","file_name":"shell_sorted.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"218917288","text":"import bpy\nimport bmesh\nimport math\nfrom random import random\nfrom rendering.BaseRenderer import BaseRenderer\nfrom rendering.utils import get_selected_shit, get_average\nfrom rendering.wave import wave_for_position\n\ndef get_mesh(\n name=\"test\",\n material_name=\"bluelight\",\n location=None,\n size=1,\n resolution=6,\n scale=(1,1,1)\n ):\n if location is None: location = [0,0,0]\n bpy.ops.mesh.primitive_uv_sphere_add(\n segments=resolution,\n ring_count=resolution,\n size=size,\n location=location)\n bpy.context.scene.objects.active.name = name\n mesh = bpy.data.objects[name].data\n bpy.data.objects[name].select = True\n bpy.ops.transform.resize(value=scale)\n bpy.ops.object.mode_set(mode='EDIT')\n if material_name is not None:\n mat = bpy.data.materials.get(material_name)\n mesh.materials.append(mat)\n return mesh\n\nclass Urchin(BaseRenderer):\n def __init__(\n self,\n name,\n location,\n size,\n spikes_factor=0,\n material_name=None,\n resolution=6,\n rotation=(0, 0, 0),\n scale=(1,1,1),\n real_urchin=False):\n super(Urchin, self).__init__(name, location)\n self.size = size\n self.spikes_factor = spikes_factor\n self.material_name = material_name\n self.resolution=resolution\n self.rotation=rotation\n self.scale=scale\n self.real_urchin=real_urchin\n\n def create_model(self):\n mesh = get_mesh(\n name=self.name,\n size=self.size,\n location=self.location,\n material_name=self.material_name,\n resolution=self.resolution,\n scale=self.scale,\n )\n bm = self.get_bm(mesh)\n\n self.make_spikes(bm)\n\n bmesh.update_edit_mesh(mesh)\n bpy.ops.object.mode_set(mode='OBJECT')\n bpy.context.scene.objects.active.rotation_euler = self.rotation\n bpy.context.scene.objects.active.name = self.name\n\n def make_spikes(self, bm):\n selected_vertices, selected_face, selected_edges = get_selected_shit(bm)\n sphere_center = get_average(selected_vertices)\n for i in range(len(selected_vertices)):\n v = selected_vertices[i]\n if self.real_urchin:\n if i%2 != 0 and math.floor(i/self.resolution)%2 != 0:\n v.co = v.co.lerp(sphere_center, self.spikes_factor)\n else:\n if i%2 != 0:\n v.co = v.co.lerp(sphere_center, self.spikes_factor)\n","sub_path":"rendering/Urchin.py","file_name":"Urchin.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"137653315","text":"import logging\nfrom typing import Any, Dict\n\nimport fastjsonschema\nimport jmespath\nfrom jmespath.exceptions import LexerError\n\nfrom .exceptions import InvalidEnvelopeExpressionError, InvalidSchemaFormatError, SchemaValidationError\nfrom .jmespath_functions import PowertoolsFunctions\n\nlogger = logging.getLogger(__name__)\n\n\ndef validate_data_against_schema(data: Dict, schema: Dict):\n \"\"\"Validate dict data against given JSON Schema\n\n Parameters\n ----------\n data : Dict\n Data set to be validated\n schema : Dict\n JSON Schema to validate against\n\n Raises\n ------\n SchemaValidationError\n When schema validation fails against data set\n InvalidSchemaFormatError\n When JSON schema provided is invalid\n \"\"\"\n try:\n fastjsonschema.validate(definition=schema, data=data)\n except fastjsonschema.JsonSchemaException as e:\n message = f\"Failed schema validation. Error: {e.message}, Path: {e.path}, Data: {e.value}\" # noqa: B306, E501\n raise SchemaValidationError(message)\n except (TypeError, AttributeError) as e:\n raise InvalidSchemaFormatError(f\"Schema received: {schema}. Error: {e}\")\n\n\ndef unwrap_event_from_envelope(data: Dict, envelope: str, jmespath_options: Dict) -> Any:\n \"\"\"Searches data using JMESPath expression\n\n Parameters\n ----------\n data : Dict\n Data set to be filtered\n envelope : str\n JMESPath expression to filter data against\n jmespath_options : Dict\n Alternative JMESPath options to be included when filtering expr\n\n Returns\n -------\n Any\n Data found using JMESPath expression given in envelope\n \"\"\"\n if not jmespath_options:\n jmespath_options = {\"custom_functions\": PowertoolsFunctions()}\n\n try:\n logger.debug(f\"Envelope detected: {envelope}. JMESPath options: {jmespath_options}\")\n return jmespath.search(envelope, data, options=jmespath.Options(**jmespath_options))\n except (LexerError, TypeError, UnicodeError) as e:\n message = f\"Failed to unwrap event from envelope using expression. Error: {e} Exp: {envelope}, Data: {data}\" # noqa: B306, E501\n raise InvalidEnvelopeExpressionError(message)\n","sub_path":"aws_lambda_powertools/utilities/validation/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"408563156","text":"\"\"\"Problem 43 - Sub-string divisibility\n\nSuper clear amiright... Aparet from being very unclear also quite slow!\n\"\"\"\nfrom itertools import permutations\n\nfrom eulerlib import get_primes\n\n\nprimes = get_primes(17, sort=True)\nanswer = 0\nfor perm in permutations(range(10), 10):\n if all([int(\"\".join([str(n) for n in perm[i + 1:i + 1 + 3]])) % primes[i] == 0 for i in range(7)]):\n answer += int(\"\".join([str(n) for n in perm]))\nprint(answer)\n","sub_path":"python/p043.py","file_name":"p043.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"19892758","text":"import ujson as json\nimport urequests as requests\nimport time\nimport dht\nimport machine\nfrom machine import Pin\nfrom machine import SPI\nimport ssd1306\nfrom machine import I2C\nfrom upy_rfm9x import RFM9x\n\nTIMEOUT = .2\nimport time\n\n#radio\nsck=Pin(25)\nmosi=Pin(33)\nmiso=Pin(32)\ncs = Pin(26, Pin.OUT)\nresetNum=27\nspi=SPI(1,baudrate=5000000,sck=sck,mosi=mosi,miso=miso)\nrfm9x = RFM9x(spi, cs, resetNum, 915.0)\n\n# set up the display\ni2c = I2C(-1, Pin(14), Pin(2))\noled = ssd1306.SSD1306_I2C(128, 64, i2c)\n\n# set up the 'done' pin\ndone_pin=Pin(22,Pin.OUT)\ndone_pin.value(0)\n\n# indicate that we're starting up\noled.fill(0)\noled.text(\"Starting up ...\",0,0)\noled.show()\n\n# set up the DHT22 temp + humidity sensor\nd = dht.DHT22(machine.Pin(18))\n\n# set up FARMOS params\nbase_url='https://wolfesneck.farmos.net/farm/sensor/listener/'\npublic_key='0dd7e8cbc2db252e801cb83bee9868f4'\nprivate_key='3f9f85b99317b343bc54c919c81a3073'\nurl = base_url+public_key+'?private_key='+private_key\nheaders = {'Content-type':'application/json', 'Accept':'application/json'}\n\ngeoduck_url = \"http://192.168.0.113:3002\"\ngeoduck_headers = {'Content-type':'application/json', 'Accept':'application/json'}\n\n\n# wifi parameters\n#WIFI_NET = 'Artisan\\'s Asylum'\n#WIFI_PASSWORD = 'I won\\'t download stuff that will get us in legal trouble.'\n\nWIFI_NET = 'TP-LINK_2.4GHz_9372CD'\nWIFI_PASSWORD = '15783332'\n\n# function for posting data\ndef post_farmos():\n try:\n r = requests.post(url,data=json.dumps(payload),headers=headers)\n except Exception as e:\n print(e)\n #r.close()\n return \"timeout\"\n else:\n r.close()\n print('Status', r.status_code)\n return \"posted\"\n\ndef post_geoduck():\n try:\n r = requests.post(geoduck_url,data=json.dumps(payload),headers=geoduck_headers)\n except Exception as e:\n print(e)\n #r.close()\n return \"timeout\"\n else:\n r.close()\n print('Status', r.status_code)\n return \"posted\"\n\n# function for connecting to wifi\ndef do_connect():\n import network\n sta_if = network.WLAN(network.STA_IF)\t\n if not sta_if.isconnected():\n print('connecting to network...')\n sta_if.active(False)\n sta_if.active(True)\n sta_if.connect(WIFI_NET, WIFI_PASSWORD)\n while not sta_if.isconnected():\n pass\n print('network config:', sta_if.ifconfig())\n\nindex=0\n\n# main loop\n\n\noled.fill(0)\noled.text(\"Connecting \"+str(index),0,20)\noled.show()\n \n \ndo_connect()\n \nwhile True:\n # make measurements\n \n \n #print(batt,temp,humid)\n \n \n rfm9x.receive(timeout=TIMEOUT)\n\n if rfm9x.packet is not None:\n print(rfm9x.packet)\n try:\n txt=str(rfm9x.packet,'ascii')\n print(txt.split(\",\"))\n items=txt.split(\",\")\n \n if len(items)==5:\n count=items[0]\n batt=float(items[1])/100.\n fence=int(items[2])\n #temp_1=float(items[3])/100.\n #temp_2=float(items[4])/100.\n #temp_2=float(items[4].split('\\x')[0])\n #print(count,batt,light,temp_1,temp_2)\n #oled.fill(0)\n #oled.text(str(count),0,0)\n #oled.show()\n \n payload ={\"count\":count,\"batt\":batt,\"fence\":fence}\n \n print(payload)\n \n # post the data\n #if DISPLAY==True:\n oled.text(\"Posting to FarmOS...\",0,30)\n oled.show()\n #print(\"posting to farmos\")\n #post_farmos()\n \n oled.text(\"Posting to Geoduck ...\",0,40)\n oled.show()\n post_geoduck()\n \n \n oled.text(\"Posted.\",0,50)\n oled.show() \n \n time.sleep(2)\n \n except Exception as e:\n print(e) \n","sub_path":"quahog/fence_geoduck.py","file_name":"fence_geoduck.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"251201571","text":"from datetime import datetime\n#from playsound import playsound\nalarm_time = input(\"Enter alarm time in HH:MM:SS\\n\")\nalarm_hour = alarm_time[0:2]\nalarm_minute = alarm_time[3:5]\nalarm_second = alarm_time[6:8]\nalarm_period = alarm_time[9:11].upper()\nprint(\"Setting up alarm..\")\nwhile True:\n now =datetime.now()\n current_hour=now.strftime(\"%I\")\n current_minute = now.strftime(\"%M\")\n current_seconds = now.strftime(\"%S\")\n current_period = now.strftime(\"%p\")\n if(alarm_hour == current_period):\n if(alarm_hour == current_hour):\n if(alarm_minute == current_hour):\n if(alarm_second == current_seconds):\n print(\"Wake up!\")\n #playsound('audio.mp3')\n break","sub_path":"Alarm.py","file_name":"Alarm.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"12270742","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nimport random\nimport re\nimport numpy as np\nimport pandas as pd\nimport torch as th\n\n# subj_file = \"/home/hadoop/rotten_imdb/subj.data\"\n# obj_file = \"/home/hadoop/rotten_imdb/obj.data\"\n# tr, dev, te = load_data(subj_file, obj_file)\n\ndef load_data(subj_file, obj_file): # 5000 obj - 5000 subj | 8500 train - 500 dev - 1000 test\n def TextFile2Dataset(filepath, label):\n sents = open(filepath, encoding=\"latin\")\n instances = [(label, line) for line in sents]\n return instances\n subj_set = TextFile2Dataset(subj_file, 0)\n obj_set = TextFile2Dataset(obj_file, 1)\n total_set = []\n total_set.extend(subj_set)\n total_set.extend(obj_set)\n total_set = random.sample(total_set, len(total_set))\n train_set = total_set[:8500]\n test_set = total_set[8500:9500]\n dev_set = total_set[9500:]\n return train_set, dev_set, test_set\n\ndef LabelSmooth(label, epsilon):\n cls_num = len(label[0])\n label = label + (epsilon)/(1.0*(cls_num-1))\n for i in range(len(label)):\n for j in range(len(label[0])):\n if label[i][j]>1:\n label[i][j] -= 2*epsilon\n return label\n\nclass SubjObjReader:\n def __init__(self, dataset, batchsize, tokenizer=None):\n length = len(dataset)\n self.length = (length//batchsize)*batchsize\n \n self.label = np.zeros([self.length, 2])\n [self.label.itemset((idx, int(item[0])), 1) for (idx,item) in enumerate(dataset[:self.length])]\n \n# self.label = LabelSmooth(self.label, 0.2)\n \n if tokenizer is not None:\n self.words = np.array([ \n tokenizer.encode(item[1], add_special_tokens=True)\n for item in dataset[:self.length]\n ])\n else:\n self.words = np.array([self.text2words(item[0]) for item in dataset[:self.length]])\n self.words_num = np.array([len(text) for text in self.words])\n self.max_sent_len = max(self.words_num)\n ##################convert into data batchs#################\n self.words = self.words.reshape(-1, batchsize)\n self.label = self.label.reshape(-1, batchsize, 2)\n self.words_num = self.words_num.reshape(-1, batchsize)\n def text2words(self, text):\n rep_dic = {'1':'one ', \n '2':'two ', \n '3':'three ', \n '4':'four ', \n '5':'fine ', \n '6':'six ', \n '7':'seven ', \n '8':'eight ', \n '9':'nine ', \n '0':'zero '}\n for k, v in rep_dic.items():\n text = text.replace(k, v)\n words = re.split('(?:[^a-zA-Z]+)', text.lower().strip() )\n return words\n \n def reset_batchsize(self, new_batch_size):\n self.words = self.words.reshape(-1, new_batch_size)\n self.label = self.label.reshape(-1, new_batch_size, 2)\n self.words_num = self.words_num.reshape(-1, new_batch_size)\n\n def sample(self):\n batches, _, _ = self.label.shape\n batch_idx = random.randint(0, batches-1)\n return self.words[batch_idx], self.label[batch_idx], self.words_num[batch_idx]\n \n def iter(self):\n for x, y, l in zip(self.words, self.label, self.words_num):\n yield x, y, l \n\n","sub_path":"SubjObjLoader.py","file_name":"SubjObjLoader.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"256344742","text":"# -*- coding: utf-8 -*-\n\n\nSERVER_ERROR = {'code': 50000, 'msg': 'Server error.'}\nNOT_FOUND = {'code': 50001, 'msg': 'Not found.'}\nPARAM_NOT_FOUND = {'code': 50002, 'msg': 'Requested parameter not found.'}\nPARAM_INVALID = {'code': 50003, 'msg': 'Parameter invalid.'}\nALREADY_OAUTH = {'code': 50004, 'msg': 'Already authorized.'}\nSIGNATURE_INVALID = {'code': 50005, 'msg': 'Signature invalid.'}\nNOT_OAUTH = {'code': 50006, 'msg': 'Not authorized.'}\nREMOTE_SERVER_ERROR = {'code': 50007, 'msg': 'Remote server error.'}\n\nSETTINGS_DATA_NOT_FOUND = {'code': 50008, 'msg': 'Settings data not found.'}\n\nCUSTOMER_ID_NOT_FOUND = {'code': 50009, 'msg': 'Customer id is required.'}\nALGORITHM_NOT_FOUND = {'code': 50010, 'msg': 'Algorithm is required.'}\nALGORITHM_NAME_ERROR = {'code': 50011, 'msg': 'Algorithm must be one of 3sigma or LSTM.'}\n\nTHRESHOLD_TYPE_NOT_FOUND = {'code': 50012, 'msg': 'Threshold type is required.'}\nTHRESHOLD_TYPE_ERROR = {'code': 50012, 'msg': 'Threshold type is invalid'}\nTHRESHOLD_TIMES_NOT_FOUND = {'code': 50013, 'msg': 'Threshold times is required.'}\nTHRESHOLD_TIMES_RANGE = {'code': 50014, 'msg': 'Threshold times must be a number greater than 0 and in the range 0-100.'}\n\nUPLINK_VALIDATE_ERROR_MSG = {'code': 50015, 'msg': 'Uplink must be a positive number and support K/M/G suffix format.'}\n\nCUSTOMER_PROFILE_NOT_FOUND = {'code': 50016, 'msg': 'Customer profile not found.'}\n\n","sub_path":"common/result_code.py","file_name":"result_code.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"485101789","text":"from math import sqrt\nfrom random import randint\n\n\ndef primality(num1):\n\tif (num1==1):\n\t\treturn False\n\tif (num1%2==0):\n\t\treturn False\n\tif (num1%3==0):\n\t\treturn False\n\tk=1\n\twhile (6*k)-1 <= sqrt(num1):\n\t\tif num1%((6*k)-1)==0:\n\t\t\treturn False\n\t\tif num1%((6*k)+1)==0:\n\t\t\treturn False\n\t\tk = k+1\n\treturn True\n\ndef fermatPrimality(n, k):\n\t#n must be greater than 3\n\twhile k > 0:\n\t\ta = randint(1, n)\n\t\tif (pow(a, n-1, n) != 1):\n\t\t\treturn False\n\t\tk = k-1\n\treturn True\n\ndef randPrime(bitLength):\n\t#k=1\n\twhile True:\n\t\t#print str(k) + '\\n'\n\t\tnum1 = randint(2**(bitLength-1), (2**bitLength)-1)\n\t\tif (fermatPrimality(num1,10000)==True):\n\t\t\treturn num1\n\t\t#k = k+1\n\ndef coprimeTest(num1, num2):\n\t# Euclidean Algorithm\n\twhile True:\n\t\tif (num1 > num2):\n\t\t\tnum1 = num1-num2\n\t\telif (num2 > num1):\n\t\t\tnum2 = num2-num1\n\t\telse:\n\t\t\treturn False\n\t\tif ((num1 == 1) or (num2 == 1)):\n\t\t\treturn True\n\t\tif (num1 == 0 or num2 == 0):\n\t\t\treturn False\n\ndef coprimeTest2(num1, num2): # too slow\n\twhile (num2 != 0):\n\t\tt = num2\n\t\tnum2 = num1%num2\n\t\tnum1 = num2\n\treturn num1\n\ndef egcd(a, b):\t\t# from internet source\n if a == 0:\n return (b, 0, 1)\n else:\n g, y, x = egcd(b % a, a)\n return (g, x - (b // a) * y, y)\n\ndef modinv(a, m):\t# from internet source\n g, x, y = egcd(a, m)\n if g != 1:\n raise Exception('modular inverse does not exist')\n else:\n return x % m\n\ndef message2block(message):\n\tblock = \"1\"\n\tfor k in message:\n\t\tif ord(k) < 100:\n\t\t\tblock = block+\"0\"+str(ord(k))\n\t\telse:\n\t\t\tblock = block+str(ord(k))\n\treturn int(block)\n\ndef block2message(block):\n\tblock = str(block)\n\tk = 0\n\tmessage = \"\"\n\twhile k < len(block):\n\t\tif (k==0):\n\t\t\tk = k+1\n\t\telse:\n\t\t\tletter = int(block[k:k+3])\n\t\t\tmessage = message + chr(letter)\n\t\t\tk=k+3\n\treturn message\n\ndef message2blockList(message, chrPerBlock):\n\twhile (len(message)%chrPerBlock != 0):\n\t\tmessage = message + \" \"\n\tk = 0\n\tmessageList = []\n\twhile k < len(message):\n\t\tmessageList.append(message[k:k+chrPerBlock])\n\t\tk = k+chrPerBlock\n\tblockList = []\n\tk = 0\n\twhile k < len(messageList):\n\t\tblockList.append(message2block(messageList[k]))\n\t\tk = k+1\n\treturn blockList\n\ndef blockList2message(blockList):\n\tmessage = \"\"\n\tk = 0\n\twhile k < len(blockList):\n\t\tmessage = message + block2message(blockList[k])\n\t\tk = k+1\n\treturn message\n\ndef RSAkeyGenerate(primeSize):\n\tp = randPrime(primeSize)\n\twhile True:\n\t\tq = randPrime(primeSize)\n\t\tif q != p:\n\t\t\tbreak\n\tn = p*q\n\tphi = n-(p+q-1)\n\n\t#test = 0\n\ttest = False\n\t#while (test!=1):\n\twhile (test == False):\n\t\te = randint(2,phi-1)\n\t\ttest = coprimeTest(e, phi)\n\t\t#(test, blah1, blah2) = egcd(e, phi)\n\td = modinv(e, phi)\n\treturn (e, d, n)\n\ndef RSAencrypt(block, e, n):\n\treturn pow(block, e, n)\n\ndef RSAdecrypt(block, d, n):\n\treturn pow(block, d, n)\n\ndef RSAencryptBlockList(blockList, e, n):\n\tk = 0\n\tencryptedBlockList = []\n\twhile k < len(blockList):\n\t\tencryptedBlockList.append(pow(blockList[k], e, n))\n\t\tk = k+1\n\treturn encryptedBlockList\n\ndef RSAdecryptBlockList(encryptedBlockList, d, n):\n\tk = 0\n\tblockList = []\n\twhile k < len(encryptedBlockList):\n\t\tblockList.append(pow(encryptedBlockList[k], d, n))\n\t\tk = k+1\n\treturn blockList\n\n\n\n\n# Function Tests\n#print primality(1000004099) # test primality\n#print randPrime(32)\n#print coprimeTest(15, 22065621)\n#print message2block(\"Hello World!\")\n\n\n\n#(e, d, n) = RSAkeyGenerate(512)\n#print e\n#print d\n#print n\n#message = raw_input(\">>> \")\n#block = message2blockList(message, 32)\n#block = RSAencryptBlockList(block, e, n)\n#print block\n#block = RSAdecryptBlockList(block, d, n)\n#message2 = blockList2message(block)\n#print message2\n","sub_path":"encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"170862055","text":"import csv\nimport time\nimport pandas as pd\nfrom selenium import webdriver\n\ndef main():\n chrome_driver = '/home/bobik/projects/tvoe_auto/avto/chromedriver'\n chrome_options = webdriver.ChromeOptions()\n driver = webdriver.Chrome(executable_path=chrome_driver, options=chrome_options)\n driver.get('https://www.drive2.ru/cars/?all')\n name = driver.find_elements_by_xpath(\"//a[@class='c-link c-link--text']\")\n text_name = elem_to_text(name)[:6]\n car_brand_list = [i.text.split('\\n') for i in name]\n model_list = []\n print(text_name)\n for i in text_name:\n driver.get('https://www.drive2.ru/cars/' + i)\n model = driver.find_elements_by_xpath(\"//nav[@class='is-short c-makes c-makes--no-top-margin is-animated']\")\n for i in model:\n model_list.append(i.text.split('\\n'))\n with open('car_brend.csv', 'w') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow('Brand'.split('\\n'))\n for row in car_brand_list:\n writer.writerow(row)\n\n\n print(model_list, len(model_list))\n\n\ndef elem_to_text(stri):\n text = []\n for elem in stri:\n elem = elem.text\n if elem == 'Mercedes-Benz':\n text.append('mercedes')\n elif elem == 'Лада':\n text.append('lada')\n else:\n text.append(elem.lower().replace(' ', ''))\n return text\n\nif __name__== \"__main__\":\n main()","sub_path":"avto/parser_drive2.py","file_name":"parser_drive2.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"547376711","text":"# %%writefile streamlit_app.py\n\nimport streamlit as st\nimport streamlit.components.v1 as components\nimport pandas as pd\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport matplotlib.pyplot as plt\nimport os\n\nst.set_page_config(layout='wide')\nst.config.get_option(\"server.enableCORS\")\n\ndef formatfunc(*args, **kwargs):\n value = args[0]\n if value >= 0:\n return '${:,.2f}'.format(value)\n else:\n return '-${:,.2f}'.format(abs(value))\n\ndf = pd.read_csv('anon_trades_26_10_21.csv', error_bad_lines=False)\nmy_df = df[df.trade_time.str.startswith('2')]\nclosed = my_df.dropna(subset=['buy_timestamp'])\nclosed.twitter_screen_name = '@'+closed.twitter_screen_name\nmean_group = closed.groupby('twitter_screen_name')['twitter_screen_name', 'percent_pnl'].agg('mean')\nsum_group = closed.groupby('twitter_screen_name')['twitter_screen_name', 'dollar_pnl'].agg('sum')\n\ndisp_df = mean_group.join(sum_group)\ndisp_df.columns = ['Avg PnL', 'Total PnL']\ndisp_df.index.name = 'Twitter Handle'\ndisp_df = disp_df[~disp_df.index.isin(['@ArbitrageDaddy','@JPottuy','@Tradermayne','@LazyTradeBot','@thescalpingpro','@Bitstamp','@cryptonews'])]\ndisp_df.sort_values(by='Total PnL', ascending=False, inplace=True)\n\nimages = os.listdir('images')\n\n# st.title(\"Leaderboard\")\nst.subheader('Crypto Twitter Leaderboard')\nst.write(\"Leaderboard based on realised PnL of each account's followers executing trades based on their tweets\")\n\ncol1, col2, col3 = st.columns([1,1,1])\n\ncol2.dataframe(disp_df.style.format(formatter={'Avg PnL': \"{:.2f}%\", 'Total PnL': formatfunc}), width=800, height=1000)\n\nst.subheader('Historical Signals')\nall_users = sorted(set(['@'+'_'.join(i.split('_')[:-2]) for i in images if i.endswith('USDT.html')]),key=lambda x: x.lower())\nall_users = tuple([i for i in all_users if i not in ['@ArbitrageDaddy', '@JPottuy']])\n\nusers = st.multiselect('Choose users to track ', all_users, default=['@elonmusk'])\n\nfor user in users:\n# coin_list = ['1INCH','ADA','ATOM','AXS','BNB','BTC','DOGE','DOT','ETH','LINK','LTC','LUNA','RUNE','SOL','SUSHI''UNI','XRP']\n\n anti_coins = ['OMG','OG','HNT','ICX','MKR','CVC']\n \n all_coins = sorted([i[len(user):].split('_')[0] for i in list(filter(lambda x : x.startswith(user[1:]) and x.endswith('USDT.html'), images))])\n all_coins = [i for i in all_coins if i not in anti_coins]\n coins = st.multiselect('Choose coins to plot', all_coins, default=['BTC' if 'BTC' in all_coins else all_coins[0]])\n\n for coin in coins:\n _, mid, _ = st.columns([1,1,1])\n mid.subheader('%s %s' % (user, coin))\n# st.image('images/%s_%s_BTC.png' % (user[1:], coin))\n \n HtmlFile = open('images/%s_%s_USDT.html' % (user[1:], coin), 'r', encoding='utf-8')\n source_code_2 = HtmlFile.read()\n components.html(source_code_2, height=700)\n","sub_path":"streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"214482387","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import declarative_base, sessionmaker\nfrom sqlalchemy import Column, Integer, String\n\n\nengine = create_engine(\n \"mysql+mysqlconnector://root:secret@127.0.0.1:3306/mydatabase\",\n echo=True,\n)\nSession = sessionmaker(bind=engine)\nSESSION = Session()\nBase = declarative_base()\n\n\nclass User(Base):\n __tablename__ = 'users'\n id = Column(Integer, primary_key=True)\n first_name = Column(String(50))\n last_name = Column(String(50))\n\n\nBase.metadata.create_all(engine)\n\n\ndef add_user(first_name, last_name):\n new_user = User(first_name=first_name, last_name=last_name)\n SESSION.add(new_user)\n SESSION.commit()\n return {\n \"id\": new_user.id,\n \"first_name\": new_user.first_name,\n \"last_name\": new_user.last_name\n }\n\n\ndef get_users():\n users = []\n users_in_db = SESSION.query(User).all()\n for user in users_in_db:\n new_user = {\n \"id\": user.id,\n \"first_name\": user.first_name,\n \"last_name\": user.last_name\n }\n users.append(new_user)\n return users\n\n\ndef get_user_by_id(id):\n user = SESSION.query(User).filter_by(id=id).first()\n if user:\n return {\n \"id\": user.id,\n \"first_name\": user.first_name,\n \"last_name\": user.last_name\n }\n\n\ndef delete_user(id):\n user = SESSION.query(User).filter_by(id=id).first()\n if user:\n print(f\"Found user with ID: {id}\")\n SESSION.delete(user)\n SESSION.commit()\n else:\n print(f\"User with ID: {id} not found.\")\n\n\ndef update_user(id, first_name=None, last_name=None):\n user = SESSION.query(User).filter_by(id=id).first()\n if user:\n if first_name:\n user.first_name = first_name\n if last_name:\n user.last_name = last_name\n SESSION.commit()\n return {\n \"id\": user.id,\n \"first_name\": user.first_name,\n \"last_name\": user.last_name\n }\n","sub_path":"main_orm.py","file_name":"main_orm.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"319870855","text":"import os\nimport csv\nimport imageio\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom sklearn.manifold import TSNE\nimport sis_utils\n\nrun_clustering = False\nimg_dir, task_dir = sis_utils.get_task_img_folder()\nnpy_file_name = os.path.join(task_dir, 'encoded_res50_2_sp_deeplab.npy')\ncity_order = {'Fre': 0, 'Mod': 1, 'Sto': 2}\ncity_list = ['Fresno', 'Modesto', 'Stockton']\nnp.random.seed(1004)\n\nif run_clustering:\n file_name = os.path.join(task_dir, 'temp', 'res50_fc1000_sp_deeplab.csv')\n features = []\n with open(file_name, 'r') as f:\n csv_reader = csv.reader(f)\n for row in csv_reader:\n features.append(row)\n print(len(features))\n feature_encode = TSNE(n_components=2, perplexity=25, verbose=True).fit_transform(features)\n np.save(npy_file_name, feature_encode)\nelse:\n feature_encode = np.load(npy_file_name)\nfeature_encode = np.array(feature_encode)\n\nrandom_idx = np.random.binomial(1, 0.2, feature_encode.shape[0])\npatch_ids = np.arange(feature_encode.shape[0])\nfeature_encode = feature_encode[random_idx == 1, :]\npatch_ids = patch_ids[random_idx == 1]\n\npatch_name_fname = os.path.join(task_dir, 'temp', 'res50_fc1000_sp_deeplab.txt')\nwith open(patch_name_fname, 'r') as f:\n patch_name_list = f.readlines()\n\npatch_percent_list = [patch_name_list[i] for i in range(len(patch_name_list)) if random_idx[i] == 1]\npatch_percent = np.zeros(len(patch_percent_list))\npatchDir = r'/hdd/uab_datasets/Results/PatchExtr/spca/chipExtrReg_cSz321x321_pad0'\nfor cnt, patch_name in enumerate(tqdm(patch_percent_list)):\n gt = imageio.imread(os.path.join(patchDir, '{}_GT.png'.format(patch_name.strip())))\n patch_percent[cnt] = np.sum(gt)/(gt.shape[0] * gt.shape[1])\n\npatch_name_list = [city_order[a[:3]] for a in patch_name_list]\n\npatch_name_code = []\nfor i in patch_name_list:\n patch_name_code.append(i)\npatch_name_code = np.array(patch_name_code)\npatch_name_code = patch_name_code[random_idx == 1]\n\ncmap = plt.get_cmap('Set1').colors\npatch_id = np.arange(feature_encode.shape[0])\nplt.figure(figsize=(15, 8))\n'''for i in range(3):\n plt.scatter(feature_encode[patch_name_code == i, 0], feature_encode[patch_name_code == i, 1], color=cmap[i],\n label=city_list[i], edgecolors='k')'''\nplt.scatter(feature_encode[:, 0], feature_encode[:, 1], c=patch_percent, cmap=plt.get_cmap('bwr'))\nplt.colorbar()\n\n#for i in range(feature_encode.shape[0]):\n# plt.text(feature_encode[i, 0], feature_encode[i, 1], patch_ids[i])\nplt.xlabel('Feature 1')\nplt.ylabel('Feature 2')\nplt.title('TSNE Projection Result')\nplt.legend()\nplt.tight_layout()\n# plt.savefig(os.path.join(img_dir, 'res50_tsne_proj_deeplab_spca.png'))\nplt.show()\n","sub_path":"]tasks/2018.03.02.res_gan/res50_patches_view_deeplab_spca.py","file_name":"res50_patches_view_deeplab_spca.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"242774438","text":"import socket\r\nimport sys\r\nimport time\r\nimport threading\r\nimport json\r\nimport logging\r\nimport hashlib\r\nimport hmac\r\nimport binascii\r\n\r\nfrom PyQt5.QtCore import pyqtSignal, QObject\r\nfrom common.utils import get_message, send_message\r\nfrom common.errors import ServerError, IncorrectDataRecivedError\r\nfrom common.variables import ACTION, PRESENCE, TIME, USER, ACCOUNT_NAME, PUBLIC_KEY, RESPONSE, ERROR, DATA, \\\r\n RESPONSE_511, MESSAGE, SENDER, DESTINATION, MESSAGE_TEXT, GET_CONTACTS, LIST_INFO, PUBLIC_KEY_REQUEST, \\\r\n ADD_CONTACT, REMOVE_CONTACT, EXIT, USERS_REQUEST\r\n\r\nsys.path.append('../')\r\n\r\n# Инициализация логгера клиента\r\nLOGGER = logging.getLogger('client')\r\n# Объект блокировки сокета и работы с базой данных\r\nsock_lock = threading.Lock()\r\n\r\n\r\nclass ClientTransport(threading.Thread, QObject):\r\n \"\"\"\r\n Класс - Транспорт, отвечает за взаимодействие с сервером.\r\n Реализует транспортную подсистему клиентского модуля.\r\n \"\"\"\r\n # Сигналы новое сообщение и потеря соединения\r\n new_message = pyqtSignal(str)\r\n message_205 = pyqtSignal()\r\n connection_lost = pyqtSignal()\r\n\r\n def __init__(self, port, ip_address, database, username, passwd, keys):\r\n threading.Thread.__init__(self)\r\n QObject.__init__(self)\r\n\r\n self.database = database\r\n # Имя пользователя\r\n self.username = username\r\n # Сокет для работы с сервером\r\n self.transport = None\r\n # Пароль\r\n self.password = passwd\r\n # Набор ключей для шифрования\r\n self.keys = keys\r\n # Устанавливаем соединение:\r\n self.connection_init(port, ip_address)\r\n # Обновление таблиц известных пользователей и контактов\r\n try:\r\n self.user_list_request()\r\n self.contacts_list_request()\r\n except OSError as e:\r\n if e.errno:\r\n LOGGER.critical(f'Соединение с сервером потеряно')\r\n raise ServerError(f'Соедниение с сервером потеряно')\r\n LOGGER.error('Таймаут соединения при обновлении списков пользователей')\r\n except json.JSONDecodeError:\r\n LOGGER.critical(f'Соединение с сервером потеряно')\r\n raise ServerError(f'Соединение с сервером потеряно')\r\n self.running = True\r\n\r\n def connection_init(self, port, ip):\r\n \"\"\"Функция инициализации соединения с сервером.\"\"\"\r\n # Инициализация сокета и сообщение серверу о нашем появлении\r\n self.transport = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.transport.settimeout(5)\r\n\r\n # Попытка соединения\r\n connected = False\r\n for i in range(5):\r\n LOGGER.info(f'Попытка соединения с сервером №{i+1}')\r\n try:\r\n self.transport.connect((ip, port))\r\n except (OSError, ConnectionRefusedError):\r\n pass\r\n else:\r\n connected = True\r\n break\r\n time.sleep(1)\r\n\r\n # Если соединиться с сервером не удалось\r\n if not connected:\r\n LOGGER.critical('Не удалось установить соединение с сервером')\r\n raise ServerError('Не удалось установить соединение с сервером')\r\n\r\n LOGGER.debug('Запускаем авторизацию пользователя')\r\n # Запускаем процедуру авторизации\r\n # Получаем хэш пароля\r\n passwd_bytes = self.password.encode('utf-8')\r\n salt = self.username.lower().encode('utf-8')\r\n passwd_hash = hashlib.pbkdf2_hmac('sha512', passwd_bytes, salt, 10000)\r\n passwd_hash_string = binascii.hexlify(passwd_hash)\r\n\r\n # Получаем публичный ключ и декодируем его из байтов\r\n pubkey = self.keys.publickey().export_key().decode('ascii')\r\n\r\n # Авторизируемся на сервере\r\n with sock_lock:\r\n presense = {\r\n ACTION: PRESENCE,\r\n TIME: time.time(),\r\n USER: {\r\n ACCOUNT_NAME: self.username,\r\n PUBLIC_KEY: pubkey\r\n }\r\n }\r\n LOGGER.debug(f'Приветственное сообщение: {presense}')\r\n # Посылаем серверу приветственное сообщение и получаем ответ что всё нормально или получаем исключение.\r\n try:\r\n send_message(self.transport, presense)\r\n ans = get_message(self.transport)\r\n LOGGER.debug(f'Ответ сервера: {ans}')\r\n if RESPONSE in ans:\r\n if ans[RESPONSE] == 400:\r\n raise ServerError(ans[ERROR])\r\n elif ans[RESPONSE] == 511:\r\n # Если всё нормально, то продолжаем процедуру авторизации.\r\n ans_data = ans[DATA]\r\n hash = hmac.new(passwd_hash_string, ans_data.encode('utf-8'))\r\n digest = hash.digest()\r\n my_ans = RESPONSE_511\r\n my_ans[DATA] = binascii.b2a_base64(\r\n digest).decode('ascii')\r\n send_message(self.transport, my_ans)\r\n self.process_server_ans(get_message(self.transport))\r\n except (OSError, json.JSONDecodeError) as e:\r\n LOGGER.critical(f'Потеряно соединение с сервером.', exc_info=e)\r\n raise ServerError('Сбой соединения с сервером в процессе авторизации.')\r\n # Сообщение об удачной установке соединения.\r\n LOGGER.debug('Соединение с сервером установлено успешно')\r\n\r\n def create_presence(self):\r\n \"\"\"Функция генерирует запрос о присутствии клиента.\"\"\"\r\n out = {\r\n ACTION: PRESENCE,\r\n TIME: time.time(),\r\n USER: {\r\n ACCOUNT_NAME: self.username\r\n }\r\n }\r\n LOGGER.debug(f'Сформировано {PRESENCE} сообщение для пользователя {self.username}')\r\n return out\r\n\r\n def process_ans(self, message):\r\n \"\"\"Функция разбирает ответ сервера. Генерирует исключение при ошибке\"\"\"\r\n LOGGER.debug(f'Разбор сообщения от сервера: {message}')\r\n if RESPONSE in message:\r\n if message[RESPONSE] == 200:\r\n return\r\n elif message[RESPONSE] == 400:\r\n raise ServerError(f'400 : {message[ERROR]}')\r\n elif message[RESPONSE] == 205:\r\n self.user_list_request()\r\n self.contacts_list_request()\r\n self.message_205.emit()\r\n else:\r\n LOGGER.debug('Неизвестный код подтверждения')\r\n\r\n # Если сообщение от пользователя, добавляем в базу\r\n elif ACTION in message and message[ACTION] == MESSAGE and SENDER in message and DESTINATION in message \\\r\n and MESSAGE_TEXT in message and message[DESTINATION] == self.username:\r\n print(f'Получено сообщение от пользователя {message[SENDER]}: {message[MESSAGE_TEXT]}')\r\n LOGGER.debug(f'Получено сообщение от пользователя {message[SENDER]}: {message[MESSAGE_TEXT]}')\r\n\r\n self.new_message.emit(message)\r\n\r\n def contacts_list_request(self):\r\n \"\"\"Функция генерирует запрос и обновление контакт листа с сервера\"\"\"\r\n self.database.contacts_clear()\r\n LOGGER.debug(f'Запрос контакт листа для пользователся {self.name}')\r\n req = {\r\n ACTION: GET_CONTACTS,\r\n TIME: time.time(),\r\n USER: self.username\r\n }\r\n LOGGER.debug(f'Сформирован запрос {req}')\r\n with sock_lock:\r\n send_message(self.transport, req)\r\n ans = get_message(self.transport)\r\n LOGGER.debug(f'Получен ответ {ans}')\r\n if RESPONSE in ans and ans[RESPONSE] == 202:\r\n for contact in ans[LIST_INFO]:\r\n self.database.add_contact(contact)\r\n else:\r\n LOGGER.error('Не удалось обновить список контактов')\r\n\r\n def user_list_request(self):\r\n \"\"\"Функция запроса списка известных пользователей\"\"\"\r\n LOGGER.debug(f'Запрос списка известных пользователей {self.username}')\r\n req = {\r\n ACTION: USERS_REQUEST,\r\n TIME: time.time(),\r\n ACCOUNT_NAME: self.username\r\n }\r\n with sock_lock:\r\n send_message(self.transport, req)\r\n ans = get_message(self.transport)\r\n if RESPONSE in ans and ans[RESPONSE] == 202:\r\n self.database.add_users(ans[LIST_INFO])\r\n else:\r\n LOGGER.error('Не удалось обновить список известных пользователей')\r\n\r\n def key_request(self, user):\r\n \"\"\"Функция запрашивающая с сервера публичный ключ пользователя.\"\"\"\r\n LOGGER.debug(f'Запрос публичного ключа для {user}')\r\n req = {\r\n ACTION: PUBLIC_KEY_REQUEST,\r\n TIME: time.time(),\r\n ACCOUNT_NAME: user\r\n }\r\n with sock_lock:\r\n send_message(self.transport, req)\r\n ans = get_message(self.transport)\r\n if RESPONSE in ans and ans[RESPONSE] == 511:\r\n return ans[DATA]\r\n else:\r\n LOGGER.error(f'Не удалось получить ключ собеседника {user}.')\r\n\r\n def add_contact(self, contact):\r\n \"\"\"Функция добавления пользователя в контакт лист\"\"\"\r\n LOGGER.debug(f'Создание контакта {contact}')\r\n req = {\r\n ACTION: ADD_CONTACT,\r\n TIME: time.time(),\r\n USER: self.username,\r\n ACCOUNT_NAME: contact\r\n }\r\n with sock_lock:\r\n send_message(self.transport, req)\r\n self.process_ans(get_message(self.transport))\r\n\r\n def remove_contact(self, contact):\r\n \"\"\"Функция удаления пользователя из контакт листа\"\"\"\r\n LOGGER.debug(f'Удаление контакта {contact}')\r\n req = {\r\n ACTION: REMOVE_CONTACT,\r\n TIME: time.time(),\r\n USER: self.username,\r\n ACCOUNT_NAME: contact\r\n }\r\n with sock_lock:\r\n send_message(self.transport, req)\r\n self.process_ans(get_message(self.transport))\r\n\r\n def transport_shutdown(self):\r\n \"\"\"Функция закрытия соединения с отправкой сообщения о выходе\"\"\"\r\n self.running = False\r\n message = {\r\n ACTION: EXIT,\r\n TIME: time.time(),\r\n ACCOUNT_NAME: self.username\r\n }\r\n with sock_lock:\r\n try:\r\n send_message(self.transport, message)\r\n except OSError:\r\n pass\r\n LOGGER.debug('Завершение работы транспорта')\r\n time.sleep(0.5)\r\n\r\n def send_message(self, to_user, message):\r\n \"\"\" Функция отправки сообщения на сервер \"\"\"\r\n message_dic = {\r\n ACTION: MESSAGE,\r\n SENDER: self.username,\r\n DESTINATION: to_user,\r\n TIME: time.time(),\r\n MESSAGE_TEXT: message\r\n }\r\n LOGGER.debug(f'Сформировано сообщение: {message_dic}')\r\n # Ждем сокета для отправки сообщения\r\n with sock_lock:\r\n send_message(self.transport, message_dic)\r\n self.process_ans(get_message(self.transport))\r\n LOGGER.info(f'Сообщение отправлено клиенту {to_user}')\r\n\r\n def run(self):\r\n \"\"\"Функция содержащая основной цикл работы транспортного потока.\"\"\"\r\n LOGGER.debug('Запущен процесс приема сообщений с сервера')\r\n while self.running:\r\n time.sleep(1)\r\n with sock_lock:\r\n try:\r\n self.transport.settimeout(0.5)\r\n message = get_message(self.transport)\r\n except OSError as e:\r\n if e.errno:\r\n LOGGER.critical(f'Соединение с сервером потеряно')\r\n self.running = False\r\n self.connection_lost.emit()\r\n except IncorrectDataRecivedError:\r\n LOGGER.error(f'Не удалось декодировать сообщение')\r\n except (ConnectionError, ConnectionAbortedError, ConnectionResetError, json.JSONDecodeError, TypeError):\r\n LOGGER.critical(f'Соединение с сервером потеряно')\r\n self.running = False\r\n self.connection_lost.emit()\r\n finally:\r\n self.transport.settimeout(1)\r\n\r\n # Если сообщение получено, то вызываем функцию обработчик:\r\n if message:\r\n LOGGER.debug(f'Принято сообщение с сервера: {message}')\r\n self.process_ans(message)\r\n","sub_path":"client/client/client/transport.py","file_name":"transport.py","file_ext":"py","file_size_in_byte":14833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"564952826","text":"\"\"\"\nCPSC 5520, Seattle University\nThis is free and unencumbered software released into the public domain.\n:Authors: Kevin Lundeen\n:Version: f19-02\n\"\"\"\nimport pickle\nimport selectors\nimport socket\nimport sys\nfrom datetime import datetime\nfrom enum import Enum\n\nBUF_SZ = 4096 # default socket.recv buffer size\nBACKLOG = 100 # socket.listen(BACKLOG) for our server\nASSUME_FAILURE_TIMEOUT = 2.0 # seconds without response assumes failure of peer\nCHECK_INTERVAL = 0.2 # seconds between checking for failure timeout\nPEER_DIGITS = 100 # modulo of peer port number for log (100 is useful for diagnosis)\n\n\nclass State(Enum):\n \"\"\"\n Enumeration of states a peer can be in for the Lab2 class.\n \"\"\"\n QUIESCENT = 'QUIESCENT' # Erase any memory of this peer\n\n # Outgoing message is pending\n SEND_ELECTION = 'ELECTION'\n SEND_VICTORY = 'COORDINATOR'\n SEND_OK = 'OK'\n\n # Incoming message is pending\n WAITING_FOR_OK = 'WAIT_OK' # When I've sent them an ELECTION message\n WAITING_FOR_VICTOR = 'WHO IS THE WINNER?' # This one only applies to myself\n WAITING_FOR_ANY_MESSAGE = 'WAITING' # When I've done an accept on their connect to my server\n\n def is_incoming(self):\n \"\"\"Categorization helper.\"\"\"\n return self not in (State.SEND_ELECTION, State.SEND_VICTORY, State.SEND_OK)\n\n\nclass Lab2(object):\n \"\"\"\n Class to perform the specified behavior for Lab 2.\n \"\"\"\n\n def __init__(self, gcd_address, next_birthday, su_id):\n \"\"\"\n Constructs a Lab2 object to talk to the given Group Coordinator Daemon.\n\n :param gcd_address: host name and port of the GCD\n :param next_birthday: datetime of next birthday\n :param su_id: SeattleU id number\n \"\"\"\n self.gcd_address = (gcd_address[0], int(gcd_address[1]))\n days_to_birthday = (next_birthday - datetime.now()).days\n self.pid = (days_to_birthday, int(su_id))\n self.members = {}\n self.states = {}\n self.bully = None # None means election is pending, otherwise this will be pid of leader\n self.selector = selectors.DefaultSelector()\n self.listener, self.listener_address = self.start_a_server()\n\n def run(self):\n \"\"\"\n Do the work of a group member as specified for Lab 2.\n \"\"\"\n print('STARTING WORK for pid {} on {}'.format(self.pid, self.listener_address))\n self.join_group()\n self.selector.register(self.listener, selectors.EVENT_READ)\n self.start_election('at startup')\n while True:\n events = self.selector.select(CHECK_INTERVAL)\n for key, mask in events:\n if key.fileobj == self.listener:\n self.accept_peer()\n elif mask & selectors.EVENT_READ:\n self.receive_message(key.fileobj)\n else:\n self.send_message(key.fileobj)\n self.check_timeouts()\n\n def accept_peer(self):\n \"\"\"Accept new TCP/IP connections from a peer.\"\"\"\n try:\n peer, _addr = self.listener.accept() # should be ready since selector said so\n print('{}: accepted [{}]'.format(self.pr_sock(peer), self.pr_now()))\n self.set_state(State.WAITING_FOR_ANY_MESSAGE, peer)\n except Exception as err:\n print('accept failed {}'.format(err))\n\n def send_message(self, peer):\n \"\"\"\n Send the queued message to the given peer (based on its current state).\n\n :param peer: socket connected to peer process\n \"\"\"\n state = self.get_state(peer)\n print('{}: sending {} [{}]'.format(self.pr_sock(peer), state.value, self.pr_now()))\n try:\n self.send(peer, state.value, self.members) # should be ready, but may be a failed connect instead\n\n except ConnectionError as err:\n # a ConnectionError means it will never succeed\n print('closing: {}'.format(err))\n self.set_quiescent(peer)\n return\n\n except Exception as err:\n # other exceptions we assume are due to being non-blocking; we expect them to succeed in future\n print('failed {}: {}'.format(err.__class__.__name__, err))\n if self.is_expired(peer):\n # but if we've kept trying and trying to no avail, then give up\n print('timed out')\n self.set_quiescent(peer)\n return\n\n # check to see if we want to wait for response immediately\n if state == State.SEND_ELECTION:\n self.set_state(State.WAITING_FOR_OK, peer, switch_mode=True)\n else:\n self.set_quiescent(peer)\n\n def receive_message(self, peer):\n \"\"\"\n Receive a message from a peer and do the corresponding state transition.\n\n :param peer: socket connected to peer process\n \"\"\"\n try:\n message_name, their_idea = self.receive(peer)\n print('{}: received {} [{}]'.format(self.pr_sock(peer), message_name, self.pr_now()))\n\n except ConnectionError as err:\n # a ConnectionError means it will never succeed\n print('closing: {}'.format(err))\n self.set_quiescent(peer)\n return\n\n except Exception as err:\n # other exceptions we assume are due to being non-blocking; we expect them to succeed in future\n print('failed {}'.format(err))\n if self.is_expired(peer):\n # but if we've kept trying and trying to no avail, then give up\n print('timed out')\n self.set_quiescent(peer)\n return\n\n self.update_members(their_idea) # add any new members\n\n # handle state transition\n if message_name == 'ELECTION':\n self.set_state(State.SEND_OK, peer)\n if not self.is_election_in_progress():\n self.start_election('Got a VOTE card from a lower-pid peer')\n\n elif message_name == 'COORDINATOR':\n # election is over (though I might not even have known there was one!)\n self.set_leader('somebody else') # FIXME - should change the protocol to indicate WHO is the new leader\n self.set_quiescent(peer)\n self.set_quiescent()\n\n elif message_name == 'OK':\n # if I have yet to get any oks since starting election and I get an OK, I change to expecting COORDINATOR\n if self.get_state() == State.WAITING_FOR_OK:\n self.set_state(State.WAITING_FOR_VICTOR) # subsequent OKs are ignored since already waiting\n self.set_quiescent(peer)\n\n def check_timeouts(self):\n \"\"\"Check to see if we need to do a state transition because of peer failures.\"\"\"\n if self.is_expired(): # we only need to check ourselves\n if self.get_state() == State.WAITING_FOR_OK:\n self.declare_victory('timed out waiting for any OK message')\n else: # my_state == State.WAITING_FOR_VICTOR:\n self.start_election('timed out waiting for COORDINATION message')\n\n def get_connection(self, member):\n \"\"\"\n Get a socket for given member.\n Note that the connection will be done in non-blocking mode, so may not be available immediately--use select\n to pick it up when it becomes writable.\n\n :param member: process id of peer, i.e., (daystobd, suid)\n :return: socket (or None if failed)\n \"\"\"\n listener = self.members[member] # look up address to connect to\n peer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n peer.setblocking(False)\n try:\n peer.connect(listener)\n\n except BlockingIOError:\n pass # connection still in progress -- will pick it up in select\n\n except Exception as err:\n print('FAILURE: get connection failed: {}'.format(err))\n return None\n\n return peer\n\n def is_election_in_progress(self):\n \"\"\"Check if we are currently in an election (signified by bully of None).\"\"\"\n return self.bully is None\n\n def is_expired(self, peer=None, threshold=ASSUME_FAILURE_TIMEOUT):\n \"\"\"\n Check if given peer's state was set more than threshold seconds ago.\n\n :param peer: socket connected to peer process (None means self)\n :param threshold: seconds to wait since last state transition\n :return: True if elapsed time is past threshold, False otherwise (or if state is quiescent)\n \"\"\"\n my_state, when = self.get_state(peer, detail=True)\n if my_state == State.QUIESCENT:\n return False\n waited = (datetime.now() - when).total_seconds()\n return waited > threshold\n\n def set_leader(self, new_leader):\n \"\"\"\n Set the leader (handy place to put a log print).\n\n :param new_leader: whatever you want self.bully to be set to\n \"\"\"\n self.bully = new_leader\n print('Leader is now {}'.format(self.pr_leader()))\n\n def get_state(self, peer=None, detail=False):\n \"\"\"\n Look up current state in state table.\n\n :param peer: socket connected to peer process (None means self)\n :param detail: if True, then the state and timestamp are both returned\n :return: either the state or (state, timestamp) depending on detail (not found gives (QUIESCENT, None))\n \"\"\"\n if peer is None:\n peer = self\n status = self.states[peer] if peer in self.states else (State.QUIESCENT, None)\n return status if detail else status[0]\n\n def set_state(self, state, peer=None, switch_mode=False):\n \"\"\"\n Set/change the state for the given process.\n\n :param State state: new state\n :param peer: socket connected to peer process (None means self)\n :param switch_mode: True if we want to move from read to write or vice versa FIXME: autodetect this?\n \"\"\"\n print('{}: {}'.format(self.pr_sock(peer), state.name))\n if peer is None:\n peer = self\n\n # figure out if we need to register for read or write in selector\n if state.is_incoming():\n mask = selectors.EVENT_READ\n else:\n mask = selectors.EVENT_WRITE\n\n # setting to quiescent means delete it from self.states\n if state == State.QUIESCENT:\n if peer in self.states:\n if peer != self:\n self.selector.unregister(peer)\n del self.states[peer]\n if len(self.states) == 0:\n print('{} (leader: {})\\n'.format(self.pr_now(), self.pr_leader()))\n return\n\n # note the new state and adjust selector as necessary\n if peer != self and peer not in self.states:\n peer.setblocking(False)\n self.selector.register(peer, mask)\n elif switch_mode:\n self.selector.modify(peer, mask)\n self.states[peer] = (state, datetime.now())\n\n # try sending right away (if currently writable, the selector will never trigger it)\n if mask == selectors.EVENT_WRITE:\n self.send_message(peer)\n\n def set_quiescent(self, peer=None):\n \"\"\"Shortcut for set_state(QUIESCENT).\"\"\"\n self.set_state(State.QUIESCENT, peer)\n\n def start_election(self, reason):\n \"\"\"\n Sends an ELECTION to all the greater group members.\n If there are no greater members, then we declare victory.\n Otherwise we wait for OKs.\n\n :param reason: text to put out to log as explanation\n \"\"\"\n print('Starting an election {}'.format(reason))\n self.set_leader(None) # indicates that election is in progress\n self.set_state(State.WAITING_FOR_OK)\n i_am_biggest_bully = True\n for member in self.members:\n if member > self.pid:\n peer = self.get_connection(member)\n if peer is None:\n continue\n self.set_state(State.SEND_ELECTION, peer)\n i_am_biggest_bully = False\n if i_am_biggest_bully:\n self.declare_victory('no other bullies bigger than me')\n\n def declare_victory(self, reason):\n \"\"\"\n Tell all the other members that I have decided that I am the biggest bully.\n\n :param reason: text to put out to the log as explanation\n \"\"\"\n print('Victory by {} {}'.format(self.pid, reason))\n self.set_leader(self.pid) # indicates election is over (note: I may not have known an election was happening)\n for member in self.members:\n if member != self.pid:\n peer = self.get_connection(member)\n if peer is None:\n continue\n self.set_state(State.SEND_VICTORY, peer)\n self.set_quiescent()\n\n def update_members(self, their_idea_of_membership):\n \"\"\"\n Pick up any new members from peer.\n\n :param their_idea_of_membership: the membership of group per the peer\n \"\"\"\n if their_idea_of_membership is not None:\n for member in their_idea_of_membership:\n self.members[member] = their_idea_of_membership[member]\n\n @classmethod # uses the class to know which receive method to call\n def send(cls, peer, message_name, message_data=None, wait_for_reply=False, buffer_size=BUF_SZ):\n \"\"\"\n Pickles and sends the given message to the given socket and unpickles the returned value and returns it.\n\n :param peer: socket to send/recv\n :param message_name: text message name 'ELECTION', 'OK', etc.\n :param message_data: message contents (anything pickle-able)\n :param wait_for_reply: if a synchronous response is wanted\n :param buffer_size: if wait_for_reply, this is the receive buffer size\n :return: if wait_for_reply, the received response, otherwise None\n :raises: whatever pickle.dumps, socket.sendall, and receive could raise\n \"\"\"\n message = message_name if message_data is None else (message_name, message_data)\n peer.sendall(pickle.dumps(message))\n if wait_for_reply:\n return cls.receive(peer, buffer_size)\n\n @staticmethod\n def receive(peer, buffer_size=BUF_SZ):\n \"\"\"\n Receives and unpickles an incoming message from the given socket.\n\n :param peer: socket to recv from\n :param buffer_size: buffer size of socket.recv\n :return: the unpickled data received from peer\n :raises: whatever socket.recv or pickle.loads could raise\n \"\"\"\n packet = peer.recv(buffer_size)\n if not packet:\n raise ValueError('socket closed')\n data = pickle.loads(packet)\n if type(data) == str:\n data = (data, None) # turn a simple text message into a pair: (text, None)\n return data\n\n @staticmethod\n def start_a_server():\n \"\"\"\n Start a socket bound to 'localhost' at a random port.\n\n :return: listening socket and its address\n \"\"\"\n listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n listener.bind(('localhost', 0)) # use any free socket\n listener.listen(BACKLOG)\n listener.setblocking(False)\n return listener, listener.getsockname()\n\n def join_group(self):\n \"\"\"\n Join the group via the GCD.\n\n :raises TypeError: if we didn't get a reasonable response from GCD\n \"\"\"\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as gcd:\n message_data = (self.pid, self.listener_address)\n print('JOIN {}, {}'.format(self.gcd_address, message_data))\n gcd.connect(self.gcd_address)\n self.members = self.send(gcd, 'JOIN', message_data, wait_for_reply=True)\n if type(self.members) != dict:\n raise TypeError('got unexpected data from GCD: {}'.format(self.members))\n\n @staticmethod\n def pr_now():\n \"\"\"Printing helper for current timestamp.\"\"\"\n return datetime.now().strftime('%H:%M:%S.%f')\n\n def pr_sock(self, sock):\n \"\"\"Printing helper for given socket.\"\"\"\n if sock is None or sock == self or sock == self.listener:\n return 'self'\n return self.cpr_sock(sock)\n\n @staticmethod\n def cpr_sock(sock):\n \"\"\"Static version of helper for printing given socket.\"\"\"\n l_port = sock.getsockname()[1] % PEER_DIGITS\n try:\n r_port = sock.getpeername()[1] % PEER_DIGITS\n except OSError:\n r_port = '???'\n return '{}->{} ({})'.format(l_port, r_port, id(sock))\n\n def pr_leader(self):\n \"\"\"Printing helper for current leader's name.\"\"\"\n return 'unknown' if self.bully is None else ('self' if self.bully == self.pid else self.bully)\n\n\nif __name__ == '__main__':\n if not 4 <= len(sys.argv) <= 5:\n print(\"Usage: python lab2.py GCDHOST GCDPORT SUID [DOB]\")\n exit(1)\n if len(sys.argv) == 5:\n print('here one ')\n # assume ISO format for DOB, e.g., YYYY-MM-DD\n pieces = sys.argv[4].split('-')\n now = datetime.now()\n next_bd = datetime(now.year, int(pieces[1]), int(pieces[2]))\n if next_bd < now:\n next_bd = datetime(next_bd.year + 1, next_bd.month, next_bd.day)\n else:\n next_bd = datetime(2020, 1, 1)\n print('Next Birthday:', next_bd)\n su_id = int(sys.argv[3])\n print('SeattleU ID:', su_id)\n lab2 = Lab2(sys.argv[1:3], next_bd, su_id)\n lab2.run()\n","sub_path":"lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":17473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"380707674","text":"# eht project URL Configuration\n\nfrom django.contrib import admin\nfrom django.urls import path, include, re_path\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include('landing.urls')),\n # The cms app URL patterns used here are encoded in basE91\n # for fun and profit\n path('4XVK/', include('cms.urls')),\n re_path(r'^.*/', include('shorterl.urls')),\n]\n","sub_path":"eht/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"209029661","text":"from __future__ import print_function\nfrom axlAuth import *\nfrom risport70Auth import *\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport ipaddress\nimport smtplib\n\ndevicePool = 'DP-SITENAME' # Enter the name of the device pool being queried.\nresp = client.service.executeSQLQuery(\"\"\"select * from devicepool where devicepool.name = '%s'\"\"\" % (devicePool)) # Perfoms query for phones in the specified devicePool variable, filters for devices that start with \"SEP\".\ndevicePoolDetail = resp[1]['return']['row'] # Pulls the output from the sql query and prepares it for pulling specific details.\ndevicePoolPKID = devicePoolDetail[0].pkid # Pulls the PKID of the device pool, needed for the following SQL statement.\ndevicePoolName = devicePoolDetail[0].name # Stores the cosmetic name of the device pool. Handy for troubleshooting.\nrangeStart = 0 # Used to pull groups of 1000 devices (needed because of AXLAPI throttling).\nrangeEnd = 999 # Used to pull groups of 1000 devices (needed because of AXLAPI throttling).\ncount = 1 # Used for counting listed devices in the IF statement.\ntotalCountOfDevices = client.service.executeSQLQuery(\"\"\"select count(*) from device where fkdevicepool='%s' and device.name MATCHES 'SEP*'\"\"\" % (devicePoolPKID)) # Number needed to determine how many total devices starting with SEP exist in a given device pool.\ntotalCountOfDevicesIntTest = totalCountOfDevices[1]['return']['row'] # Iterate through the output of totalCountOfDevices.\ntotalCountOfDevicesInt = totalCountOfDevicesIntTest[0].count # Yield the number of devices. This is used in the WHILE statement to determine if the loop should continue relative to the COUNT value.\n\nwhile count < int(totalCountOfDevicesInt): # Determines how many times the loop should run based on how many total devices are found in count\n resp2 = client.service.executeSQLQuery(\"\"\"select * from (select skip %s first 1000 * from device where fkdevicepool='%s' and device.name MATCHES 'SEP*' order by pkid asc)\"\"\" % (rangeStart, devicePoolPKID)) # Ask the device table for all devices that belong to the devicepool PKID, determined with devicePoolPID, and filter for devices whose names start with \"SEP\" (phones). Pull the results in groups of 1000 because of AXLAPI throttling.\n\n rangeStart += 1000\n rangeEnd += 1000\n phoneDetail = resp2[1]['return']['row'] # Pulls the output from the sql query and prepares it for pulling data.\n phoneDetailLength = len(phoneDetail) # Determines how many items are found in the query. This is needed for the while loop below.\n\n #print(phoneDetail)\n\n x = 0 # Our starting point for the while loop, this will incremenet until it reaches the value of phoneDetailLength.\n while x < phoneDetailLength: # Execute loop until we reach the end of the items in the device pool.\n phoneDetailName = phoneDetail[x].name\n if phoneDetailName.startswith('SEP'): # Check if the item starts with SEP (a phone). We're only pulling phones in this script.\n phoneDetailName = phoneDetail[x].name\n phoneDetailPKID = phoneDetail[x].pkid\n phoneDetailDescription = phoneDetail[x].description\n\n # We use the fkcallingsearchspace value from our device information in phoneDetail to get the PKID of the device calling search space. A device may not have a calling search space (like a Syn-Apps VIP device). We use an IF statement to determine if a value for phoneDetail[x].fkcallingsearchspace is returned. If yes, then use it to get the name of the device's calling search space in phoneDetailDeviceCallingSearchSpaceQuery. If not, then set the variable \"phoneDetailDeviceCallingSearchSpaceQuery[1]['return']['row'][0].name = None\".\n\n if phoneDetail[x].fkcallingsearchspace != \"\":\n phoneDetailDeviceCallingSearchSpaceQuery = client.service.executeSQLQuery(\"\"\"select * from callingsearchspace where callingsearchspace.pkid = '%s'\"\"\" % (phoneDetail[x].fkcallingsearchspace))\n\n # This IF structure determines if the string pulled from our device pool matches a string pulled from our device's calling search space. If it doesn't, it will set a variable that will be called upon during the email notification piece.\n\n #if (devicePool[2]+devicePool[3]+devicePool[4]+devicePool[5]) == (phoneDetailDeviceCallingSearchSpaceQuery[1]['return']['row'][0].name[3]+phoneDetailDeviceCallingSearchSpaceQuery[1]['return']['row'][0].name[4]+phoneDetailDeviceCallingSearchSpaceQuery[1]['return']['row'][0].name[5]+phoneDetailDeviceCallingSearchSpaceQuery[1]['return']['row'][0].name[6]):\n # pass\n #else:\n # phoneCallingSearchSpaceMessage = \"Phone Device Calling Search Space is incorrect.\"\n\n else:\n phoneDetailDeviceCallingSearchSpaceQuery[1]['return']['row'][0].name = None\n\n # We authenticate to the Risport70 service and request information based on the phoneDetailName variable.\n clientRisport70 = Client(wsdl,location=location, username=username, password=password, plugins=[ImportDoctor(imp)])\n resultRisport70 = clientRisport70.service.SelectCmDevice('',{'SelectBy':'Name', 'Status':'Registered', 'Class':'Phone','SelectItems':{'SelectItem':{'Item':phoneDetailName}}})\n #print(resultRisport70)\n print(\"Phone PKID \" + str(count) + \" = \" + phoneDetailPKID)\n print(\"Phone Name \" + str(count) + \" = \" + phoneDetailName)\n print(\"Phone Description \" + str(count) + \" = \" + phoneDetailDescription)\n if phoneDetailDeviceCallingSearchSpaceQuery[1]['return']['row'][0].name != None:\n print(\"Phone Device Calling Search Space \" + str(count) + \" = \" + phoneDetailDeviceCallingSearchSpaceQuery[1]['return']['row'][0].name) # Print the device's calling search space.\n else:\n #phoneDetailDeviceCallingSearchSpaceQuery = \"None\"\n print(\"Phone Device Calling Search Space = \" + str(phoneDetailDeviceCallingSearchSpaceQuery[1]['return']['row'][0].name))\n\n # We iterate through the resultRisport70 outout for this pass to yield the phone's IP address. Phones that are not registered will not have an IP, so this IF statment evaluates whether they are registed. If they are, print the IP address. If they're not, then print \"No Registered\".\n if resultRisport70['SelectCmDeviceResult']['CmNodes'] != None:\n for node in resultRisport70['SelectCmDeviceResult']['CmNodes']:\n for device in node['CmDevices']:\n print(\"Phone IP Address \" + str(count) + \" = \" + device['IpAddress'] + \"\\n\")\n if ipaddress.ip_address(device['IpAddress']) in ipaddress.ip_network(u'X.X.X.X/24'): # Determine if an IP address is in the acceptable range.\n\n # This IF statement verified whether a CSS has been configured on the device at all. If it does, we can take the value and assugn it to the phoneDetailDeviceCallingSearchSpaceQuery variable for further evaluation. If a device CSS has not been assigned, we're going to hard code a value of None to phoneDetailDeviceCallingSearchSpaceQueryString.\n if phoneDetailDeviceCallingSearchSpaceQuery[1]['return']['row'][0].name != None:\n phoneDetailDeviceCallingSearchSpaceQueryString = phoneDetailDeviceCallingSearchSpaceQuery[1]['return']['row'][0].name[3]+phoneDetailDeviceCallingSearchSpaceQuery[1]['return']['row'][0].name[4]+phoneDetailDeviceCallingSearchSpaceQuery[1]['return']['row'][0].name[5]+phoneDetailDeviceCallingSearchSpaceQuery[1]['return']['row'][0].name[6]\n else:\n phoneDetailDeviceCallingSearchSpaceQueryString = None\n\n if devicePool[2]+devicePool[3]+devicePool[4]+devicePool[5] == phoneDetailDeviceCallingSearchSpaceQueryString:\n pass\n else:\n if (phoneDetailDescription != \"addAnExceptionHereIfNeeded\"):\n phoneCallingSearchSpaceMessage = \"Phone Device Calling Search Space is incorrect.\"\n fromaddr = \"sender@address.com\"\n toaddr = \"receiver@address.com\"\n msg = MIMEMultipart()\n msg['From'] = fromaddr\n msg['To'] = toaddr\n msg['Subject'] = \"Telecom Alert - Incorrectly configured phone \\\"\" + phoneDetailDescription + \"\\\" (\" + phoneDetailName + \") at \" + device['IpAddress'] + \". Please correct.\"\n body = \"Incorrect calling search space for phone \" + phoneDetailName + \". Phone is in the \" + devicePool + \" device pool, the device calling search space has been set to \" + str(phoneDetailDeviceCallingSearchSpaceQueryString) + \".\"\n msg.attach(MIMEText(body, 'plain'))\n server = smtplib.SMTP('mail.domain.com', 25)\n server.ehlo()\n text = msg.as_string()\n server.sendmail(fromaddr, toaddr, text)\n else:\n fromaddr = \"sender@address.com\"\n toaddr = \"receiver@address.com\"\n msg = MIMEMultipart()\n msg['From'] = fromaddr\n msg['To'] = toaddr\n msg['Subject'] = \"Telecom Alert - Incorrectly configured phone \\\"\" + phoneDetailDescription + \"\\\" (\" + phoneDetailName + \") at \" + device['IpAddress'] + \". Please correct.\"\n body = \"Incorrect Device Pool for phone \" + phoneDetailName + \". Please verify that the device pool assignment and calling search space are correct for the phone's subnet.\"\n msg.attach(MIMEText(body, 'plain'))\n server = smtplib.SMTP('mail.domain.com', 25)\n server.ehlo()\n text = msg.as_string()\n server.sendmail(fromaddr, toaddr, text)\n\n else:\n print(\"Phone IP Address \" + str(count) + \" = Not Registered \\n\")\n count += 1\n x += 1\n","sub_path":"dp-siteName.py","file_name":"dp-siteName.py","file_ext":"py","file_size_in_byte":10551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"620472127","text":"from api import Api\nimport json\nimport numpy\nimport pandas\n\nkey = 'LOL_API_KEY'\n\napi = Api('br1', key)\nmatches = api.get_random_matches()\n\nmatch_list = []\ncolumns_list = []\n\nfor match in matches:\n item = []\n item_name = []\n teams = match['teams']\n participants = match['participants']\n\n # Criação de variáveis para match\n win, firstBlood, firstTower, firstInhibitor, firstBaron, firstDragon, firstRiftHerald = 0, 0, 0, 0, 0, 0, 0\n\n # Loop para lista de teams\n for i in range(2):\n if teams[i]['win'] == 'Win':\n win = i+1\n if teams[i]['firstBlood'] == True:\n firstBlood = i+1\n if teams[i]['firstTower'] == True:\n firstTower = i+i\n if teams[i]['firstInhibitor'] == True:\n firstInhibitor = i+1\n if teams[i]['firstBaron'] == True:\n firstBaron = i+1\n if teams[i]['firstDragon'] == True:\n firstDragon = i+1\n if teams[i]['firstRiftHerald'] == True:\n firstRiftHerald = i+1\n\n # Adiciona as variáveis na lista\n item.append(win)\n item.append(firstBlood)\n item.append(firstTower)\n item.append(firstInhibitor)\n item.append(firstBaron)\n item.append(firstDragon)\n item.append(firstRiftHerald)\n item_name.append(\"win\")\n item_name.append(\"first_blood\")\n item_name.append(\"first_tower\")\n item_name.append(\"first_inhibitor\")\n item_name.append(\"first_baron\")\n item_name.append(\"first_dragon\")\n item_name.append(\"first_rift_herald\")\n\n for i in range(2):\n teamId = str(teams[i]['teamId'])\n item.append(teams[i][\"towerKills\"])\n item.append(teams[i][\"inhibitorKills\"])\n item.append(teams[i][\"baronKills\"])\n item.append(teams[i][\"dragonKills\"])\n item.append(teams[i][\"vilemawKills\"])\n item.append(teams[i][\"riftHeraldKills\"])\n\n item_name.append(\"tower_kills\" + \"_team_\" + teamId[0])\n item_name.append(\"inhibitor_kills\" + \"_team_\" + teamId[0])\n item_name.append(\"baron_kills\" + \"_team_\" + teamId[0])\n item_name.append(\"dragon_kills\" + \"_team_\" + teamId[0])\n item_name.append(\"vilemaw_kills\" + \"_team_\" + teamId[0])\n item_name.append(\"rift_herald_kills\" + \"_team_\" + teamId[0])\n\n part_keys = [\n \"kills_top_team_1\",\n \"assists_top_team_1\",\n \"deaths_top_team_1\",\n \"gold_earned_20m_top_team_1\",\n \"cs_20m_top_team_1\",\n \"xp_20m_top_team_1\",\n \"damege_taken_20m_top_team_1\",\n \"kills_middle_team_1\",\n \"assists_middle_team_1\",\n \"deaths_middle_team_1\",\n \"gold_earned_20m_middle_team_1\",\n \"cs_20m_middle_team_1\",\n \"xp_20m_middle_team_1\",\n \"damege_taken_20m_middle_team_1\",\n \"kills_jungle_team_1\",\n \"assists_jungle_team_1\",\n \"deaths_jungle_team_1\",\n \"gold_earned_20m_jungle_team_1\",\n \"cs_20m_jungle_team_1\",\n \"xp_20m_jungle_team_1\",\n \"damege_taken_20m_jungle_team_1\",\n \"kills_bottom_duo_support_team_1\",\n \"assists_bottom_duo_support_team_1\",\n \"deaths_bottom_duo_support_team_1\",\n \"gold_earned_20m_bottom_duo_support_team_1\",\n \"cs_20m_bottom_duo_support_team_1\",\n \"xp_20m_bottom_duo_support_team_1\",\n \"damege_taken_20m_bottom_duo_support_team_1\",\n \"kills_bottom_duo_carry_team_1\",\n \"assists_bottom_duo_carry_team_1\",\n \"deaths_bottom_duo_carry_team_1\",\n \"gold_earned_20m_bottom_duo_carry_team_1\",\n \"cs_20m_bottom_duo_carry_team_1\",\n \"xp_20m_bottom_duo_carry_team_1\",\n \"damege_taken_20m_bottom_duo_carry_team_1\",\n \"kills_top_team_2\",\n \"assists_top_team_2\",\n \"deaths_top_team_2\",\n \"gold_earned_20m_top_team_2\",\n \"cs_20m_top_team_2\",\n \"xp_20m_top_team_2\",\n \"damege_taken_20m_top_team_2\",\n \"kills_middle_team_2\",\n \"assists_middle_team_2\",\n \"deaths_middle_team_2\",\n \"gold_earned_20m_middle_team_2\",\n \"cs_20m_middle_team_2\",\n \"xp_20m_middle_team_2\",\n \"damege_taken_20m_middle_team_2\",\n \"kills_jungle_team_2\",\n \"assists_jungle_team_2\",\n \"deaths_jungle_team_2\",\n \"gold_earned_20m_jungle_team_2\",\n \"cs_20m_jungle_team_2\",\n \"xp_20m_jungle_team_2\",\n \"damege_taken_20m_jungle_team_2\",\n \"kills_bottom_duo_support_team_2\",\n \"assists_bottom_duo_support_team_2\",\n \"deaths_bottom_duo_support_team_2\",\n \"gold_earned_20m_bottom_duo_support_team_2\",\n \"cs_20m_bottom_duo_support_team_2\",\n \"xp_20m_bottom_duo_support_team_2\",\n \"damege_taken_20m_bottom_duo_support_team_2\",\n \"kills_bottom_duo_carry_team_2\",\n \"assists_bottom_duo_carry_team_2\",\n \"deaths_bottom_duo_carry_team_2\",\n \"gold_earned_20m_bottom_duo_carry_team_2\",\n \"cs_20m_bottom_duo_carry_team_2\",\n \"xp_20m_bottom_duo_carry_team_2\",\n \"damege_taken_20m_bottom_duo_carry_team_2\"\n ]\n\n part_list = []\n\n # Loop para lista de participants\n for p in participants:\n timeline = p['timeline']\n stats = p['stats']\n teamId = str(p['teamId'])\n\n lane = timeline[\"lane\"].casefold()\n\n if timeline[\"lane\"] == \"BOTTOM\":\n lane = lane + \"_\" + timeline[\"role\"].casefold()\n\n part = {}\n\n key = \"kills_\" + lane + \"_team_\" + teamId[0]\n part[key] = stats[\"kills\"]\n\n key = \"assists_\" + lane + \"_team_\" + teamId[0]\n part[key] = stats[\"assists\"]\n\n key = \"deaths_\" + lane + \"_team_\" + teamId[0]\n part[key] = stats[\"deaths\"]\n\n key = \"gold_earned_20m_\" + lane + \"_team_\" + teamId[0]\n part[key] = timeline['goldPerMinDeltas']['10-20'] if 'goldPerMinDeltas' in timeline else 0\n\n key = \"cs_20m_\" + lane + \"_team_\" + teamId[0]\n part[key] = timeline['creepsPerMinDeltas']['10-20'] if 'goldPerMinDeltas' in timeline else 0\n\n key = \"xp_20m_\" + lane + \"_team_\" + teamId[0]\n part[key] = timeline['xpPerMinDeltas']['10-20'] if 'goldPerMinDeltas' in timeline else 0\n\n key = \"damege_taken_20m_\" + lane + \"_team_\" + teamId[0]\n part[key] = timeline['damageTakenPerMinDeltas']['10-20'] if 'goldPerMinDeltas' in timeline else 0\n\n part_list.append(part)\n\n for key in part_keys:\n item_name.append(key)\n for part in part_list:\n if key in part:\n item.append(part[key])\n\n if len(item) == len(item_name):\n match_list.append(item)\n columns_list.append(item_name)\n\ndataset = pandas.DataFrame(match_list)\ndataset.sample(3)\ndataset.columns = columns_list[0]\ndataset.index = pandas.RangeIndex(len(dataset.index))\ndataset.to_csv(\"random-games\",index=False)","sub_path":"random-games.py","file_name":"random-games.py","file_ext":"py","file_size_in_byte":6706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"557960204","text":"# 2. 有一只小猴子,摘了很多桃,\n# 第一天吃了全部的桃子的一半,感觉不饱又吃了一个\n# 第二天吃了剩下的一半,感觉不饱又吃了一个\n# 以此类推...\n# 到第10天,发现只剩下一个了\n# 问: 第一天摘了多少桃子?\n\nx = 1 # 第十天\n# x = (x + 1) * 2 # 第九天\n# x = (x + 1) * 2 # 第八天的\nfor day in range(9, 0, -1):\n x = (x + 1) * 2\n print(\"第%d天的桃子是:%d\" % (day, x))\n","sub_path":"02-PythonBase/day08/day07_exercise/monkey.py","file_name":"monkey.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"422479298","text":"from unittest import skipIf\n\nfrom django.conf import settings\nfrom django.contrib.gis.geos import LineString, Point\nfrom django.test import TestCase\n\nfrom geotrek.land.tests.test_filters import LandFiltersTest\n\nfrom geotrek.core.factories import PathFactory, TrailFactory\nfrom geotrek.core.filters import PathFilterSet, TopologyFilter\nfrom geotrek.core.models import Topology\n\nfrom geotrek.trekking.factories import TrekFactory\nfrom geotrek.trekking.filters import TrekFilterSet\n\n\n@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')\nclass PathFilterLandTest(LandFiltersTest):\n\n filterclass = PathFilterSet\n\n\nclass TestFilter(TopologyFilter):\n model = Topology\n\n\nclass TopologyFilterTest(TestCase):\n def test_values_to_edges(self):\n topology = TestFilter()\n\n with self.assertRaises(NotImplementedError):\n topology.values_to_edges(['Value'])\n\n\n@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')\nclass TopologyFilterTrailTest(TestCase):\n def setUp(self):\n self.path = PathFactory()\n self.trail = TrailFactory(paths=[(self.path, 0, 1)])\n\n def test_trail_filters(self):\n PathFactory()\n qs = PathFilterSet().qs\n self.assertEqual(qs.count(), 2)\n\n data = {'trail': [self.trail]}\n qs = PathFilterSet(data=data).qs\n self.assertEqual(qs.count(), 1)\n\n\nclass ValidTopologyFilterTest(TestCase):\n def setUp(self):\n if settings.TREKKING_TOPOLOGY_ENABLED:\n self.path = PathFactory()\n self.trek = TrekFactory.create(name=\"Crossed\", paths=[(self.path, 0, 1)])\n else:\n self.trek = TrekFactory.create(geom=LineString((0, 0), (5, 5)))\n\n @skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')\n def test_trek_filters_not_valid(self):\n trek = TrekFactory.create(name=\"Not crossed\", paths=[(self.path, 0, 0.5)])\n TrekFactory.create(paths=[])\n qs = TrekFilterSet().qs\n self.assertEqual(qs.count(), 3)\n\n data = {'is_valid': True}\n qs = TrekFilterSet(data=data).qs\n self.assertIn(self.trek, qs)\n self.assertEqual(qs.count(), 2)\n\n data = {'is_valid': False}\n qs = TrekFilterSet(data=data).qs\n self.assertEqual(qs.count(), 1)\n\n geom = LineString(Point(700100, 6600000), Point(700000, 6600100), srid=settings.SRID)\n PathFactory.create(geom=geom)\n self.trek.reload()\n trek.reload()\n data = {'is_valid': True}\n qs = TrekFilterSet(data=data).qs\n self.assertNotIn(self.trek, qs)\n self.assertIn(trek, qs)\n self.assertEqual(qs.count(), 1)\n\n data = {'is_valid': False}\n qs = TrekFilterSet(data=data).qs\n self.assertIn(self.trek, qs)\n self.assertNotIn(trek, qs)\n self.assertEqual(qs.count(), 2)\n\n @skipIf(settings.TREKKING_TOPOLOGY_ENABLED, 'Test without dynamic segmentation only')\n def test_trek_filters_not_valid_nds(self):\n TrekFactory.create(name=\"Empty\", geom='SRID=2154;LINESTRING EMPTY')\n qs = TrekFilterSet().qs\n self.assertEqual(qs.count(), 2)\n\n data = {'is_valid': True}\n qs = TrekFilterSet(data=data).qs\n self.assertIn(self.trek, qs)\n self.assertEqual(qs.count(), 1)\n\n data = {'is_valid': False}\n qs = TrekFilterSet(data=data).qs\n self.assertEqual(qs.count(), 1)\n","sub_path":"geotrek/core/tests/test_filters.py","file_name":"test_filters.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"317451798","text":"while True:\n correct_num = [4, 8]\n num = input(\"数字を当ててください。\")\n if num == \"q\":\n break\n try:\n if int(num) in correct_num:\n print(\"正解\\n\")\n else:\n print(\"不正解\\n\")\n except:\n print(\"数字を入力するか、qで終了します\\n\")\n\n \n","sub_path":"ch7/challenge4.py","file_name":"challenge4.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"513735970","text":"import pandas as pd\r\nimport os\r\n\r\ndirectory = r'F:\\PATH'\r\nresultPath = r'F:\\PATH'\r\nfor filename in os.listdir(directory):\r\n if not filename.startswith(\"E\"):\r\n #Load Channels and Events datasets\r\n df = pd.read_csv(os.path.join(directory,filename))\r\n dfEvent = pd.read_csv(os.path.join(directory,'E_'+filename))\r\n \r\n #Create a new column that contains the Draw Number\r\n df['Draw Number']=0\r\n \r\n #There are 14 draws so we loop from 1 to 14 to find each\r\n for i in range(1,15):\r\n #Extract the start and end of each draw from Events dataset \r\n startDraw = int(dfEvent.loc[dfEvent['Desc'].str.contains('Starting Draw # '+str(i)+' ')].iloc[0][1])\r\n endDraw = int(dfEvent.loc[dfEvent['Desc'].str.contains('Draw Complete')].iloc[i-1][1])\r\n \r\n #Find the equivalent index from the Channels dataset\r\n #PS1: TimeStamps floats in Events and Channels datasets don't match, so it's better to work with ints instead\r\n #PS2: Since the timeStamps are not the same then even when converting to int we need might need to (+1) to find a match \r\n if startDraw in df['TimeStamp (sec)'].values.astype(int):\r\n startDrawIndex = df.index[df['TimeStamp (sec)'].astype(int)==startDraw][0]\r\n else:\r\n startDrawIndex = df.index[df['TimeStamp (sec)'].astype(int)==startDraw+1][0]\r\n if endDraw in df['TimeStamp (sec)'].values.astype(int):\r\n endDrawIndex = df.index[df['TimeStamp (sec)'].astype(int)==endDraw][0]\r\n else:\r\n endDrawIndex = df.index[df['TimeStamp (sec)'].astype(int)==endDraw+1][0]\r\n \r\n #Assigning each row to its matching draw if it has\r\n df.loc[startDrawIndex:endDrawIndex,'Draw Number'] = i\r\n df.to_csv(os.path.join(resultPath,filename), index = False)\r\n \r\n else:\r\n continue\r\n \r\n#Maybe need to start TimpeStamps from 0 for each or not\r\n#Note to self: Maybe need to optimize the code but it works just fine (for now)","sub_path":"splitOnAllDrawsScript.py","file_name":"splitOnAllDrawsScript.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"417006401","text":"\"\"\"\n The animations handler gives back pictures from animations depending on time and directions of props/characters\n It also creates subsurfaces from pictures through the animation_creator() method\n\"\"\"\nimport time\nimport pygame\nfrom pygame.locals import *\n\nclass Animator:\n\n def __init__(self,tickTime, animationsTuple = None):\n self.tickTime = tickTime\n self.animationsTuple = animationsTuple\n self.startTime = time.time()\n self.animationStep = 1\n self.lastDirection = 0\n\n def Get_pictures(self, direction = 0):\n self.direction = direction\n self.currentTime = time.time()\n self.timeSum = self.currentTime - self.startTime\n\n\n if self.timeSum > self.tickTime:\n self.animationStep = (self.animationStep + 1) % len(self.animationsTuple[self.direction])\n self.startTime = time.time()\n \n \n if self.lastDirection != direction:\n self.animationStep %= len(self.animationsTuple[self.direction])\n self.lastDirection = direction\n \n return self.animationsTuple[self.direction][self.animationStep]\n\n\ndef Create_animation(URL,width,height,picAmount):\n\n basePic = pygame.image.load(URL)\n spriteList = []\n\n for i in range(picAmount):\n spriteCurrentX = i * width\n temp = basePic.subsurface((spriteCurrentX,0), (width,height))\n spriteList.append(temp)\n\n\n return spriteList\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"src/palt/Animation.py","file_name":"Animation.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"358238923","text":"#002 - Gerenciador de Produtos\n\nfrom Ex001 import Produto\n\nclass GerProd():\n\n def __init__(self):\n self.__listaProd = []\n \n def addProd(self, novo):\n self.__listaProd.append(novo)\n \n def removerProd(self, cod):\n removido = 0\n for prod in self.__listaProd:\n if prod.getProd() == cod:\n break\n removido += 1\n self.__listaProd.pop(removido)\n\n def imprimirProd(self):\n for prod in self.__listaProd:\n print(prod)\n\ngProd = GerProd()\nBiscoito = Produto(1, 2.50)\nMacarrao = Produto(2, 1.20)\nSuco = Produto(3, 1.80)\ngProd.addProd(Biscoito)\ngProd.addProd(Macarrao)\ngProd.addProd(Suco)\ngProd.removerProd(1)\ngProd.imprimirProd()","sub_path":"Revisões - UFRPE/#9: Orientação a Objetos - Slide[Parte 2]/Ex002.py","file_name":"Ex002.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"20108189","text":"def bubble_sort(arr):\n\tfor i in range(len(arr)):\n\t\tfor j in range(0, len(arr)-1 -i):\n\t\t\tif arr[j] > arr[j+1]:\n\t\t\t\tarr[j], arr[j+1] = arr[j+1], arr[j]\n\treturn arr\n\n\nif __name__ == '__main__':\n\tprint (\"Enter the element of the array\")\n\tarr = list(map(int, input().split()))\n\tprint (\"The array before sort :\", end='' )\n\tprint (arr)\n\tprint (\"The array after sort : \",bubble_sort(arr))\t","sub_path":"sorting/bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"442872570","text":"\"\"\"\niq_test(\"2 4 7 8 10\") => 3 # Third number is odd, while the rest of the numbers are even\n\niq_test(\"1 2 1 1\") => 2 # Second number is even, while the rest of the numbers are odd\n\nCareful: index must start at 1\n\"\"\"\nnumbers =(\"2 4 7 8 10\")\nnumbers = numbers.split()\neven = 0\nodd = 0\nindex = None\noddNumber = 0\nevenNumber = 0\nfor number in numbers:\n number= int(number)\n division = number % 2\n if division == 0:\n even +=1\n evenNumber = number\n else:\n odd +=1\n oddNumber = number\nif even < odd:\n print(str(evenNumber))\n index = numbers.index(str(evenNumber))\n print(index+1)\nelse:\n index = numbers.index(str(oddNumber))\n print(index+1)","sub_path":"IQTest.py","file_name":"IQTest.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"185082379","text":"from . import opmllib\n\n\ndef export_opml(subscriptions):\n outlines = []\n\n for sub in subscriptions:\n feed = sub.feed\n attr = {\n 'type': 'rss',\n 'text': feed.title,\n 'title': feed.title,\n 'xmlUrl': feed.url,\n 'htmlUrl': feed.site_url,\n }\n outlines.append(attr)\n\n return opmllib.export_opml(outlines)\n","sub_path":"feedvest/models/opml.py","file_name":"opml.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"368782889","text":"from django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\n\n# Create your views here.\nfrom django.http import HttpResponse, JsonResponse\nimport json\nfrom iotClient.models import Sensor\n\ndef index(request):\n print(\"Client Index\")\n return HttpResponse(\"Client Index\")\n\n@csrf_exempt\ndef submitData(request):\n if request.method == \"GET\":\n sensor_data = Sensor.objects.all()\n label = sensor_data[0].sensor\n payload = [el.data for el in sensor_data]\n return JsonResponse({'label': label, 'data': payload })\n elif request.method == \"POST\":\n print(\"successful post method\")\n print(request.body)\n json_data = json.loads(request.body)\n print(json_data)\n print(json_data['sensor'])\n print(json_data['data'])\n s = Sensor(sensor=json_data['sensor'], data=json_data['data'])\n s.save()\n return HttpResponse(\"success\")\n","sub_path":"iotClient/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"216832452","text":"from django import forms\nfrom django.utils.translation import ugettext_lazy as _\nfrom schedule.models import Event, Occurrence, CalendarPluginModel\n\n\nclass SpanForm(forms.ModelForm):\n start = forms.DateTimeField(label=_(\"start\"),\n widget=forms.SplitDateTimeWidget)\n end = forms.DateTimeField(label=_(\"end\"),\n widget=forms.SplitDateTimeWidget,\n help_text=_(u\"The end time must be later than start time.\"))\n\n def clean(self):\n if 'end' in self.cleaned_data and 'start' in self.cleaned_data:\n if self.cleaned_data['end'] <= self.cleaned_data['start']:\n raise forms.ValidationError(_(u\"The end time must be later than start time.\"))\n return self.cleaned_data\n\n\nclass EventForm(SpanForm):\n def __init__(self, *args, **kwargs):\n super(EventForm, self).__init__(*args, **kwargs)\n\n end_recurring_period = forms.DateTimeField(label=_(u\"End recurring period\"),\n help_text=_(u\"This date is ignored for one time only events.\"),\n required=False)\n\n class Meta:\n model = Event\n exclude = ('creator', 'created_on', 'calendar')\n\n\nclass OccurrenceForm(SpanForm):\n class Meta:\n model = Occurrence\n exclude = ('original_start', 'original_end', 'event', 'cancelled')\n\n\nclass CalendarPluginForm(forms.ModelForm):\n class Meta:\n model = CalendarPluginModel\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n # Limit the calendar queryset to ones that have a site same as the page it's being put on\n self.request = kwargs.pop('request', None)\n super(CalendarPluginForm, self).__init__(*args, **kwargs)\n try:\n page_site = self.request.user.userprofile.site\n except:\n page_site = None\n if page_site:\n self.fields['calendar'].queryset = self.fields['calendar'].queryset.filter(site=page_site)\n if len(self.fields['calendar'].queryset) == 1:\n self.fields['calendar'].initial = self.fields['calendar'].queryset[0]\n","sub_path":"schedule/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"262624436","text":"import psycopg2, time\r\n\r\n\r\n\r\nclass psql_connector:\r\n def __init__(self, host, user, password, database, table_main_name, table_main_columns:dict):\r\n self.host = host\r\n self.database = database\r\n self.user = user\r\n self.password = password\r\n self.database = database\r\n self.table_main_name = table_main_name\r\n self.table_main_columns = table_main_columns\r\n self.columns = ''.join([str(item) + ', ' for item in list(self.table_main_columns.keys())]).strip(', ')\r\n self.time_idle = 0\r\n self.connection = None\r\n\r\n #if self.is_table_exists(self.table_main_name, self.table_main_columns) == False:\r\n # self.create_table(self.table_main_name)\r\n \r\n def create_connection(self):\r\n try:\r\n connection = psycopg2.connect(dbname=self.database, user=self.user, \r\n password=self.password, host=self.host)\r\n self.connection = connection\r\n self.connection.autocommit = False\r\n return connection\r\n except (Exception, psycopg2.DatabaseError) as e:\r\n return e\r\n\r\n def close_connection(self):\r\n try:\r\n self.connection.close()\r\n except (Exception, psycopg2.DatabaseError) as e:\r\n return e\r\n\r\n def is_db_available(self):\r\n try:\r\n conn = psycopg2.connect(dbname=self.database, user=self.user, \r\n password=self.password, host=self.host)\r\n cursor = conn.cursor()\r\n cursor.close()\r\n conn.close()\r\n return True\r\n except Exception as e:\r\n return e\r\n\r\n def test_query(self):\r\n try:\r\n self.create_connection()\r\n test_query = 'select 1;'\r\n cursor = self.connection.cursor()\r\n cursor.execute(test_query)\r\n cursor.close()\r\n self.connection.commit()\r\n self.close_connection()\r\n return True\r\n except (Exception, psycopg2.DatabaseError, AttributeError ) as e:\r\n return e\r\n\r\n def query_execute(self, query):\r\n try:\r\n self.create_connection()\r\n cursor = self.connection.cursor()\r\n cursor.execute(query)\r\n cursor.close()\r\n self.connection.commit()\r\n self.close_connection()\r\n return True\r\n except (Exception, psycopg2.DatabaseError, AttributeError) as e:\r\n self.connection.rollback()\r\n return e\r\n\r\n def query_fetch_all(self, query):\r\n try:\r\n self.create_connection()\r\n cursor = self.connection.cursor()\r\n cursor.execute(query)\r\n fetch = cursor.fetchall()\r\n cursor.close()\r\n self.connection.commit()\r\n self.close_connection()\r\n return fetch\r\n except (Exception, psycopg2.DatabaseError, AttributeError) as e:\r\n self.connection.rollback()\r\n return e\r\n\r\n def query_fetch_one(self, query):\r\n try:\r\n self.create_connection()\r\n cursor = self.connection.cursor()\r\n cursor.execute(query)\r\n fetch = cursor.fetchone()\r\n cursor.close()\r\n self.connection.commit()\r\n self.close_connection()\r\n return fetch\r\n except (Exception, psycopg2.DatabaseError, AttributeError) as e:\r\n self.connection.rollback()\r\n return e\r\n\r\n def create_table(self, table_name, table_columns):\r\n \"\"\" create table in the PostgreSQL database\"\"\"\r\n columns = str(table_columns).replace(':','').replace(\"'\",'').strip('}').strip('{').replace(',',',\\n')\r\n query = f\"\"\"\r\n CREATE TABLE {table_name} (\r\n {columns}\r\n );\r\n \"\"\"\r\n q = self.query_execute(query)\r\n if q == True:\r\n return True\r\n else:\r\n return q\r\n\r\n def is_table_name_exists(self, table_name):\r\n '''Checks if table name exists'''\r\n query = f'''\r\n select * from {table_name};\r\n '''\r\n q = self.query_execute(query)\r\n if q == True:\r\n return True\r\n else:\r\n return q\r\n\r\n def is_table_exists(self, table_name, table_columns):\r\n '''Checks if table exists'''\r\n query = f'''\r\n select {self.columns} from {table_name} limit 1;\r\n '''\r\n if self.query_execute(query) == True:\r\n return True\r\n else:\r\n return False\r\n","sub_path":"services/filecloud_hot/project/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":4554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"291022584","text":"from flask import Flask, request, render_template, redirect\nimport json\nfrom collections import defaultdict\nimport pandas as pd\nimport boto3\nfrom dynamodb_json import json_util\nfrom datetime import datetime\nfrom decimal import Decimal\n\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n dynamodb = boto3.resource('dynamodb', region_name='us-east-2')\n table = dynamodb.Table('nlp-ranking-logs')\n\n # journal\n CL = float(request.form['CL'])\n TACL = float(request.form['TACL'])\n\n # conference\n ACL_C = float(request.form['ACL-C'])\n NAACL_C = float(request.form['NAACL-C'])\n EMNLP_C = float(request.form['EMNLP-C'])\n CoNLL_C = float(request.form['CoNLL-C'])\n EACL_C = float(request.form['EACL-C'])\n COLING = float(request.form['COLING'])\n IJCNLP = float(request.form['IJCNLP'])\n\n # workshop or demo\n WKSPDEMO = float(request.form['WKSPDEMO'])\n\n\n startYear = int(request.form['start-year'])\n endYear = int(request.form['end-year'])\n\n num_uni = int(request.form['num_uni'])\n num_author = int(request.form['num_author'])\n\n table.put_item(\n Item={\n 'UTCtime': str(datetime.utcnow()),\n 'startYear': startYear,\n 'endYear': endYear,\n 'num_uni': num_uni,\n 'num_author': num_author,\n 'CL': Decimal(CL),\n 'TACL': Decimal(TACL),\n 'ACL_C': Decimal(ACL_C),\n 'NAACL_C': Decimal(NAACL_C),\n 'EMNLP_C': Decimal(EMNLP_C),\n 'CoNLL_C': Decimal(CoNLL_C),\n 'EACL_C': Decimal(EACL_C),\n 'COLING': Decimal(COLING),\n 'IJCNLP': Decimal(IJCNLP),\n 'WKSPDEMO': Decimal(WKSPDEMO),\n }\n )\n\n\n\n authors,maxYear = get_author_dict(\n CL, TACL, ACL_C, NAACL_C, EMNLP_C, CoNLL_C, EACL_C, COLING, IJCNLP, WKSPDEMO\n )\n\n rank1,rank2,uni_authors = ranking(authors, startYear, endYear, num_author)\n rank1 = rank1.head(n=num_uni)\n rank1.index = rank1.index + 1\n\n weights = [CL, TACL, ACL_C, NAACL_C, EMNLP_C, CoNLL_C, EACL_C, COLING, IJCNLP, WKSPDEMO]\n\n years = list(range(2010, maxYear+1))\n\n return render_template('home.html', ranking1=rank1, ranking2=rank2, year=[startYear, endYear],\n years=years, weights=list(map(int, weights)), num=[num_uni, num_author], uni_authors=uni_authors)\n\n else:\n\n authors,maxYear = get_author_dict(3,3,3,3,3,2,2,2,2,1)\n\n rank1,rank2,uni_authors = ranking(authors, 2010, maxYear, 100)\n rank1 = rank1.head(n=100)\n rank1.index = rank1.index + 1\n\n years = list(range(2010, maxYear+1))\n\n return render_template('home.html', ranking1=rank1, ranking2=rank2, year=[2010, maxYear], years=years,\n weights=[3,3,3,3,3,2,2,2,2,1], num=[100, 100], uni_authors=uni_authors)\n\n\n@app.route('/methodology/')\ndef methodology():\n return render_template(\"methodology.html\");\n\n\n\ndef get_author_dict(CL,TACL,ACL_C,NAACL_C,EMNLP_C,CoNLL_C,EACL_C,COLING,IJCNLP,WKSPDEMO):\n\n university = pd.read_json('../data-collection/university.json', orient='records')\n bibmap = json.load(open('../data-collection/bibmap.json'))\n\n venue_pub = {} # only look at publications with authors from an university institution\n for pub_id in [y[0] for x in university['publications'].values.tolist() if x for y in x]:\n venue = find_venue(pub_id)\n if venue in venue_pub.keys():\n venue_pub[venue].append(pub_id)\n else:\n venue_pub[venue] = [pub_id]\n\n\n # scoring each venue type\n score = {'journal': 3, 'conference': 3, 'workshop': 1, 'demonstration': 1}\n\n # authors = {author_id: {university1_domain: {2019: [score, num_pub], 2018: [score, num_pub]}}}\n authors = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: [0,0])))\n maxYear = 2010 # take the latest year in the data (initialize at 2010, which is the earliest year)\n\n for k, v in venue_pub.items():\n bib = next((y for y in bibmap if y['id'] == k), None)\n with open('../data-collection/pub_json/' + k + '.json') as p:\n json_file = json.load(p)\n\n # customized scoring weights\n if 'J' in k:\n venue_score = CL\n elif 'Q' in k:\n venue_score = TACL\n elif 'P' in k and bib['type'] == 'conference':\n venue_score = ACL_C\n elif 'N' in k and bib['type'] == 'conference':\n venue_score = NAACL_C\n elif 'D' in k and bib['type'] == 'conference':\n venue_score = EMNLP_C\n elif 'K' in k and bib['type'] == 'conference':\n venue_score = CoNLL_C\n elif 'E' in k and bib['type'] == 'conference':\n venue_score = EACL_C\n elif 'C' in k:\n venue_score = COLING\n elif 'I' in k:\n venue_score = IJCNLP\n elif bib['type'] in ['workshop', 'demonstration'] or 'W' in k:\n venue_score = WKSPDEMO\n else:\n venue_score = score[bib['type']]\n\n for pub in [x for x in json_file if x['id'] in v]:\n for i in range(len(pub['authors'])):\n if '.edu' == pub['emails'][i][-4:]:\n author_id = pub['author_id'][i]\n year = pub['year']\n if int(year) > maxYear:\n maxYear = int(year)\n uni_domain = parse_email(pub['emails'][i].split('@')[1])\n if uni_domain == 'uw.edu': # university of washington has 2 domains (washington.edu & uw.edu)\n uni_domain = 'washington.edu'\n if uni_domain == 'iub.edu': # indiana university bloomington (2 domains)\n uni_domain = 'indiana.edu'\n\n if bib['type'] not in ['workshop', 'demonstration']:\n # score\n authors[author_id][uni_domain][year][0] += 1 / len(pub['authors']) * venue_score\n # num of publications\n authors[author_id][uni_domain][year][1] += 1\n else:\n try:\n pages = pub['pages'].split('--')\n num_pages = int(pages[1]) - int(pages[0]) + 1\n except: # pages with roman numbers or single page\n continue\n\n if num_pages > 4:\n # score\n authors[author_id][uni_domain][year][0] += 1 / len(pub['authors']) * venue_score\n # num of publications\n authors[author_id][uni_domain][year][1] += 1\n\n return authors,maxYear\n\n\n\n\n\ndef ranking(authors, startYear, endYear, top_k):\n\n uni_score = {} # university score rank\n author_score = {} # author score rank\n # uni_authors = {uni: {author1: [score, num_pub, last_year_pub, href], author2: [score, num_pub, last_year_pub, href]}}\n uni_authors = defaultdict(lambda: defaultdict(lambda: [0, 0, 0, 0])) # authors in each university, score + num_pub\n for author, institutions in authors.items():\n for institution, years in institutions.items():\n for year,value in years.items():\n if int(year) in range(startYear, endYear+1):\n\n if institution in uni_score.keys():\n uni_score[institution] += value[0]\n else:\n uni_score[institution] = value[0]\n\n if author in author_score.keys():\n author_score[author] += value[0]\n else:\n author_score[author] = value[0]\n\n uni_authors[institution][author][0] += value[0] # score\n uni_authors[institution][author][1] += value[1] # num_pub\n uni_authors[institution][author][2] = int(max(years.keys())) # latest publication year\n\n\n # author rank\n author_info = pd.read_json('../data-collection/author.json', orient='records')\n author_info['firstname'] = author_info.firstname.fillna('')\n author_info['name'] = author_info['firstname'] + ' ' + author_info['lastname']\n author_names = dict(zip(author_info.author_id, author_info.name))\n\n author_rank = pd.DataFrame(sorted(list(author_score.items()), key=lambda x: x[1], reverse=True)[:top_k],\n columns=['author_id', 'Score'])\n author_rank['url'] = author_rank['author_id'].str[0] + '/' + author_rank['author_id']\n author_rank['Author'] = author_rank['author_id'].map(author_names)\n author_rank = author_rank.round(2)\n\n\n # uni rank\n us_universities = pd.read_csv('../data-collection/us_universities.tsv', sep='\\t', names=['name', 'domain', 'city', 'state'])\n\n us_name = dict(zip(us_universities.domain, us_universities.name))\n\n rank = pd.DataFrame(sorted(list(uni_score.items()), key=lambda x: x[1], reverse=True), columns=['domain', 'Score'])\n rank = rank[rank['domain'].isin(us_name.keys())]\n\n rank['Institution'] = rank['domain'].map(us_name)\n rank = rank.round(2)\n rank = rank.reset_index(drop=True)\n\n # rounding & sorting by individual author scores\n for u,authors in uni_authors.items():\n for author,v in authors.items():\n v[0] = round(v[0], 2)\n v[3] = author[0] + '/' + author # author aclweb url\n authors = dict((author_names[id], val) for id, val in authors.items())\n uni_authors[u] = sorted(list(authors.items()), key=lambda k_v: (k_v[1][0], k_v[1][2], k_v[1][1]), reverse=True)\n\n\n rank['authors'] = rank['domain'].map(uni_authors)\n\n\n return rank, author_rank, uni_authors\n\n\n\ndef parse_email(domain):\n if '.edu' in domain:\n d = domain.split('.')\n return '.'.join(d[d.index('edu')-1:])\n else:\n return domain\n\n\ndef find_venue(pub_id):\n if 'W' in pub_id:\n return pub_id[:-2]\n else:\n return pub_id[:-3]\n\n\n\nif __name__ == '__main__':\n # authors,maxYear = get_author_dict(3,3,3,3,3,2,2,2,2,1)\n # print(authors['asdfkjhaelf'])\n # print(len(authors))\n # rank1,rank2,list1 = ranking(authors, 2010, 2019, 6000)\n # print(len(rank2))\n # print(len(rank1.Institution.unique()))\n # print(len(rank1['Institution']))\n # author_ranking(authors, 2010, 2019, 100)\n app.run(debug=True, use_debugger=False, use_reloader=False, passthrough_errors=True)","sub_path":"website/website.py","file_name":"website.py","file_ext":"py","file_size_in_byte":10827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"557758048","text":"from flask import Flask, request\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport flasgger\nfrom flasgger import Swagger\n\napp = Flask(__name__)\nSwagger(app)\n\npickle_in=open('classifier.pkl','rb')\nclassifier =pickle.load(pickle_in)\n\n\n\n@app.route('/')\ndef welcome():\n return \"welcome\"\n\n@app.route('/predict',methods=['Get'])\ndef predict_note_auth():\n\n \"\"\"authenticate bank notes.\n ---\n parameters:\n - name: variance\n in : query\n type: number\n required: True\n\n - name: skewness\n in : query\n type: number\n required: True\n\n - name: curtosis\n in : query\n type: number\n required: True \n\n - name: entropy\n in : query\n type: number\n required: True \n\n\n responses: \n 200:\n description: The output values\n\n\n \"\"\"\n\n variance = request.args.get('variance')\n skewness = request.args.get('skewness')\n curtosis = request.args.get('curtosis')\n entropy = request.args.get('entropy')\n\n prediction = classifier.predict([[variance,skewness,curtosis, entropy]])\n print(prediction)\n return \"the prediction is : \" + str(prediction)\n\n@app.route('/predict_file',methods=['POST'])\ndef predict_file():\n\n \"\"\"authenticate bank notes.\n ---\n parameters:\n - name: file\n in : formData\n type: file\n required: True\n\n responses: \n 200:\n description: The output values\n\n\n \"\"\"\n\n df_test = pd.read_csv(request.files.get(\"file\"))\n prediction= classifier.predict(df_test)\n return \"the prediction is : \" + str(list(prediction))\n\n\n\n\nif __name__ == '__main__':\n print(\"hello python file is executed\")\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"147315791","text":"from django.core.management import BaseCommand\n\nfrom ...importers.coppermine import CoppermineImporter\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n '-p', '--path',\n default='/',\n help='Target path in Edegal (defaults to gallery root)',\n )\n parser.add_argument(\n '-c', '--root-category-id',\n type=int,\n default=0,\n help='Root category ID in Coppermine (defaults to gallery root)',\n )\n parser.add_argument(\n '-d', '--database-connection-name',\n default='coppermine',\n help=(\n 'Database connection name for Coppermine. Needs to be '\n 'configured in settings.py.'\n ),\n )\n parser.add_argument(\n '-m', '--media-root',\n default='',\n help='Will be appended to all file paths from Coppermine.',\n )\n parser.add_argument(\n '-s', '--skip-previews',\n action='store_true',\n help='Skip creating previews. For development only.',\n )\n\n def handle(self, *args, **options):\n CoppermineImporter(\n path=options['path'],\n root_category_id=options['root_category_id'],\n connection_name=options['database_connection_name'],\n create_previews=not options['skip_previews'],\n media_root=options['media_root'],\n ).run()\n","sub_path":"backend/edegal/management/commands/import_coppermine.py","file_name":"import_coppermine.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"481886388","text":"# Displays relevant records, awards and references for a given player based on his/her information\r\n\r\nimport pickle\r\nfrom jinja2 import Environment, FileSystemLoader\r\nimport pandas as pd\r\nimport ast\r\n\r\ndef getData(row):\r\n\t# Translation/Transliteration of records, references and awards of a player's row\r\n\tplayer_name = row.Player_Name_Telugu.values[0]\r\n\tall_records = row.records_telugu.values[0]\r\n\ttest_records = row.test_records_telugu.values[0]\r\n\todi_records = row.odi_records_telugu.values[0]\r\n\tt20i_records = row.t20i_records_telugu.values[0]\r\n\tgender = row.Gender.values[0]\r\n\treferences = ast.literal_eval(row.References.values[0])\r\n\tawards = str(row.awards_telugu.values[0])\r\n\r\n\tif(all_records !='[]'):\r\n\t\ttry:\r\n\t\t\tall_records = ast.literal_eval(all_records)\r\n\t\texcept:\r\n\t\t\tall_records = ast.literal_eval(row.records.values[0])\r\n\tif(test_records != '[]'):\r\n\t\ttry:\r\n\t\t\ttest_records = ast.literal_eval(test_records)\r\n\t\texcept:\r\n\t\t\ttest_records = ast.literal_eval(row.test_records.values[0])\r\n\tif(odi_records != '[]'):\r\n\t\ttry:\r\n\t\t\todi_records = ast.literal_eval(odi_records)\r\n\t\texcept:\r\n\t\t\todi_records = ast.literal_eval(row.odi_records.values[0])\t\r\n\tif(t20i_records != '[]'):\r\n\t\ttry:\r\n\t\t\tt20i_records = ast.literal_eval(t20i_records)\r\n\t\texcept:\r\n\t\t\tt20i_records = ast.literal_eval(row.t20i_records.values[0])\r\n\tif(awards != 'nan'):\r\n\t\ttry:\r\n\t\t\tawards = awards.split(',, ')\r\n\t\texcept:\r\n\t\t\tawards = ast.literal_eval(row.AWARDS.values[0])\r\n\r\n\t# Data dictionary \r\n\tdata = {\r\n\t\t\r\n\t\t'player_name':player_name,\r\n\t\t'all_records': all_records, \r\n\t\t'test_records': test_records, \r\n\t\t'odi_records': odi_records,\r\n\t\t't20i_records': t20i_records,\r\n\t\t'gender': gender,\r\n\t\t'awards': awards,\r\n\t\t'references': references\r\n\t }\r\n\r\n\treturn data\r\n\r\n# Returns a url corresponding to details related to given player's records\r\ndef get_matches_ref(matches_ref, player_name):\r\n\trequired_ref = [r for r in matches_ref if \"records\" in r]\r\n\tif len(required_ref) == 0:\r\n\t\treturn ''\r\n\treturn \"[\" + required_ref[0] + \" \" + player_name + \" రికార్డులు]\"\r\n\r\ncricketDF = pd.DataFrame()\r\nwith open('../data_collection/data/final_cricket_players_translated_dataset_with_images.pkl', 'rb') as f:\r\n cricketDF = pickle.load(f)\r\n cricketDF.fillna(value=\"nan\", inplace=True)\r\n\r\n# Takes a player id as argument and returns corresponding template string associated with records, awards, references for that player\r\ndef main5(_id):\r\n\tfile_loader = FileSystemLoader('./')\r\n\tenv = Environment(loader=file_loader)\r\n\ttemplate = env.get_template('records.j2')\r\n\r\n\tfunc_dict = {\r\n\t\t\"get_matches_ref\": get_matches_ref\r\n\t}\r\n\ttemplate.globals.update(func_dict)\r\n\t\r\n\tcricketDF.rename(columns={'Records' : 'records'}, inplace=True)\r\n\tcricketDF.rename(columns={'Test Records' : 'test_records'}, inplace=True)\r\n\tcricketDF.rename(columns={'ODI Records' : 'odi_records'}, inplace=True)\r\n\tcricketDF.rename(columns={'T20I Records' : 't20i_records'}, inplace=True)\r\n\trow = cricketDF.loc[cricketDF['Cricinfo_id'] == _id]\r\n\treturn template.render(getData(row))\r\n\r\n","sub_path":"final_templates/render_records.py","file_name":"render_records.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"277209162","text":"N = int(input())\n\nsmall = 0\nfor i in range(0, N):\n tmp = 0\n nlist = list(map(int, str(i)))\n tmp += i + sum(nlist)\n if tmp == N:\n small = i\n break\nif small == 0:\n print(0)\nelse:\n print(small)\n","sub_path":"12주차 부르트포스,위클리/백준/분해합/김재환.py","file_name":"김재환.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"319572078","text":"\n# ************************************************************************\n# File: scrapeyelp.py\n# Purpose: scrape yelp business infos, and takes use of ''yelpreviews.py'' mainly\n# Author: Houdong Hu\n# ************************************************************************\n\n\nimport urllib2\nfrom BeautifulSoup import BeautifulSoup\nfrom xml.sax.saxutils import unescape\nfrom unicodedata import normalize\nfrom yelpreviews import *\nimport json\nimport sys\nimport os\nimport gzip\nimport time\nTIME_FORMAT = '%a %b %d %H:%M:%S %Y'\nimport logging\nlogging.basicConfig(format = \"[%(asctime)s] %(message)s\",\n datefmt='%a %b %d %H:%M:%S %Y',\n level = logging.DEBUG)\n\nclass Logger:\n def __init__(self, writer=sys.stderr):\n self._writer = writer\n\n def __del__(self):\n self._writer.close()\n\n def info(self, *args):\n print >>self._writer, \"[{0}] {1}\".format(\n time.strftime(TIME_FORMAT),\n \" \".join([str(msg) for msg in args]))\n\ndef loadConfig(confFile):\n with open(confFile) as f:\n lines = f.readlines()\n pairs = [line.strip().split(\":\",1) for line in lines]\n strJSON = \",\\n\".join(['\"{0}\": {1}'.format(\n pair[0].replace('\"','').replace(\"'\",''),\n pair[1].replace(\"'\",'\"')) for pair in pairs])\n conf = json.loads(\"{\" + strJSON + \"}\")\n\n return conf\n\ndef getyelplinks(conf):\n\n if \"links_folder\" not in conf:\n logging.info(\"no links_folder provided\")\n exit(1)\n else:\n links_folder = conf[\"links_folder\"]\n if not os.path.exists(links_folder):\n os.makedirs(links_folder)\n \n dirname=conf[\"links_folder\"]\n os.chdir(dirname)\n if not os.path.exists(\"logs\"):\n os.makedirs(\"logs\")\n \n logFile = \"logs/yelp_reviews_\" + time.strftime(\"%Y.%m.%d_%H.%M.%S\") + \".log.gz\"\n logger = Logger(gzip.open(logFile,\"wb\"))\n\n filename=\"{0}_page_{1}_{2}\".format(conf[\"links_folder\"],conf[\"getlinks_pagestart\"],conf[\"getlinks_pageend\"])\n\n with open(filename,'w') as outputfile:\n logging.info(\"Create file \"+filename)\n print >> outputfile, \"bizID|yelpurl\"\n biz=getlinks(conf,logger,outputfile)\n biz.getbizlinks()\n\n logger.info(\"Done\")\n\ndef getyelpreviews(conf):\n\n if \"reviews_folder\" not in conf:\n logging.info(\"no reviews_folder provided\")\n exit(1)\n else:\n reviews_folder = conf[\"reviews_folder\"]\n if not os.path.exists(reviews_folder):\n os.makedirs(reviews_folder)\n \n dirname=conf[\"reviews_folder\"]\n os.chdir(dirname)\n if not os.path.exists(\"logs\"):\n os.makedirs(\"logs\")\n \n logFile = \"logs/yelp_reviews_\" + time.strftime(\"%Y.%m.%d_%H.%M.%S\") + \".log.gz\"\n logger = Logger(gzip.open(logFile,\"wb\"))\n\n with open(\"../\"+conf[\"links_file\"]) as links:\n\n count=0 \n outputfile = None \n\n links.readline()\n for i in range(conf[\"getreviews_bizstart\"]-1):\n links.readline()\n\n for link in links:\n items=link.rstrip().split(\"|\")\n bizID=items[0]\n link=items[1]\n reviews=getreviews(link,logger,bizID)\n reviews.getbizinfo()\n reviews.getbizreviews()\n \n if count >= conf['reviews_venue_per_file']:\n count = 0\n if outputfile:\n outputfile.close()\n\n count += 1\n\n if count == 1:\n outputfile = gzip.open(\"{0}_{1:010d}.gz\".format(dirname, int(bizID)), \"wb\")\n logging.info(\"Create file \"+\"{0}_{1:010d}.gz\".format(dirname, int(bizID)))\n print >> outputfile,\"bizID|rating|date|review\"\n \n reviews.dumpbizreviews(outputfile)\n\n outputfile.close()\n \n logger.info(\"Done\")\n\ndef getyelpinfos(conf):\n\n if \"infos_folder\" not in conf:\n logging.info(\"no infos_folder provided\")\n exit(1)\n else:\n infos_folder = conf[\"infos_folder\"]\n if not os.path.exists(infos_folder):\n os.makedirs(infos_folder)\n\n dirname=conf[\"infos_folder\"]\n os.chdir(dirname)\n if not os.path.exists(\"logs\"):\n os.makedirs(\"logs\")\n \n logFile = \"logs/yelp_infos_\" + time.strftime(\"%Y.%m.%d_%H.%M.%S\") + \".log.gz\"\n logger = Logger(gzip.open(logFile,\"wb\"))\n\n with open(\"../\"+conf[\"links_file\"]) as links:\n\n count=0 \n outputfile = None \n\n links.readline()\n for i in range(conf[\"getinfos_bizstart\"]-1):\n links.readline()\n\n for link in links:\n\n items=link.rstrip().split(\"|\")\n bizID=items[0]\n link=items[1]\n infos=getinfos(link,logger,bizID,conf)\n infos.getbizinfos()\n \n if count >= conf['infos_venue_per_file']:\n count = 0\n if outputfile:\n outputfile.close()\n\n count += 1\n if count == 1:\n outputfile = gzip.open(\"{0}_{1:010d}.gz\".format(dirname, int(bizID)), \"wb\")\n logging.info(\"Create file \"+\"{0}_{1:010d}.gz\".format(dirname, int(bizID)))\n print >> outputfile, \"bizID|yelpurl|name|numreview|rating|street|city|state|zip|phone|url|categories\"\n \n infos.dumpbizinfos(outputfile)\n\n outputfile.close()\n \n logger.info(\"Done\")\n\n\nif __name__ == '__main__':\n\n if len(sys.argv) < 2:\n print >>sys.stderr, \"Funtion name and configuration file are not given.\"\n exit(1)\n\n if len(sys.argv) < 3:\n print >>sys.stderr, \"Configuration file not given.\"\n exit(1)\n\n conf = loadConfig(sys.argv[2])\n\n funcName = sys.argv[1]\n allVars = locals()\n if funcName in allVars:\n allVars[funcName](conf)\n else:\n logging.info(\"Can't find function `{0}'\".format(funcName));\n \n","sub_path":"scrapeyelp.py","file_name":"scrapeyelp.py","file_ext":"py","file_size_in_byte":5858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"525785143","text":"\n@nrp.MapSpikeSource(\"input_neuron\", nrp.brain.neurons[0], nrp.dc_source)\n@nrp.MapRobotSubscriber('model_states', Topic('/gazebo/model_states', gazebo_msgs.msg.ModelStates))\n@nrp.Robot2Neuron()\ndef set_neuron_rate(t, input_neuron, model_states):\n states = model_states.value\n try:\n if t > 1.3:\n input_neuron.voltage = 10\n clientLogger.info(\"Neuron Input potential: {}\".format(input_neuron.voltage))\n except ValueError:\n input_neuron.voltage = 0.0\n","sub_path":"motion_challenge/old/robot2brain.py","file_name":"robot2brain.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"90332782","text":"import json\nimport datetime\nimport unittest\n\nimport sqlalchemy as sa\nfrom marshmallow.utils import isoformat\n\nfrom tests import factories\nfrom tests.common import ApiBaseTest\n\nfrom webservices import rest\nfrom webservices import schemas\nfrom webservices.rest import api\nfrom webservices.rest import CandidateNameSearch\nfrom webservices.rest import CommitteeNameSearch\nfrom webservices.resources.totals import TotalsView\nfrom webservices.resources.reports import ReportsView\nfrom webservices.resources.candidates import CandidateList\n\n\nclass OverallTest(ApiBaseTest):\n # Candidate\n def test_header_info(self):\n response = self._response(api.url_for(CandidateList))\n self.assertIn('api_version', response)\n self.assertIn('pagination', response)\n\n def test_full_text_search(self):\n candidate = factories.CandidateFactory(name='Josiah Bartlet')\n factories.CandidateSearchFactory(\n id=candidate.candidate_id,\n fulltxt=sa.func.to_tsvector('Josiah Bartlet'),\n )\n rest.db.session.flush()\n results = self._results(api.url_for(CandidateList, q='bartlet'))\n self.assertEqual(len(results), 1)\n self.assertIn('josiah', results[0]['name'].lower())\n\n def test_full_text_search_with_whitespace(self):\n candidate = factories.CandidateFactory(name='Josiah Bartlet')\n factories.CandidateSearchFactory(\n id=candidate.candidate_id,\n fulltxt=sa.func.to_tsvector('Josiah Bartlet'),\n )\n rest.db.session.flush()\n results = self._results(api.url_for(CandidateList, q='bartlet josiah'))\n self.assertEqual(len(results), 1)\n self.assertIn('josiah', results[0]['name'].lower())\n\n def test_full_text_no_results(self):\n results = self._results(api.url_for(CandidateList, q='asdfasdf'))\n self.assertEquals(results, [])\n\n def test_cycle_filter(self):\n factories.CandidateFactory(cycles=[1986, 1988])\n factories.CandidateFactory(cycles=[2000, 2002])\n results = self._results(api.url_for(CandidateList, cycle=1988))\n self.assertEqual(len(results), 1)\n for result in results:\n self.assertIn(1988, result['cycles'])\n results = self._results(api.url_for(CandidateList, cycle=[1986, 2002]))\n self.assertEqual(len(results), 2)\n cycles = set([1986, 2002])\n for result in results:\n self.assertTrue(cycles.intersection(result['cycles']))\n\n def test_per_page_defaults_to_20(self):\n [factories.CandidateFactory() for _ in range(40)]\n results = self._results(api.url_for(CandidateList))\n self.assertEquals(len(results), 20)\n\n def test_per_page_param(self):\n [factories.CandidateFactory() for _ in range(20)]\n results = self._results(api.url_for(CandidateList, per_page=5))\n self.assertEquals(len(results), 5)\n\n def test_invalid_per_page_param(self):\n results = self.app.get(api.url_for(CandidateList, per_page=-10))\n self.assertEquals(results.status_code, 422)\n results = self.app.get(api.url_for(CandidateList, per_page=34.2))\n self.assertEquals(results.status_code, 422)\n results = self.app.get(api.url_for(CandidateList, per_page='dynamic-wombats'))\n self.assertEquals(results.status_code, 422)\n\n def test_page_param(self):\n [factories.CandidateFactory() for _ in range(20)]\n page_one_and_two = self._results(api.url_for(CandidateList, per_page=10, page=1))\n page_two = self._results(api.url_for(CandidateList, per_page=5, page=2))\n self.assertEqual(page_two[0], page_one_and_two[5])\n for itm in page_two:\n self.assertIn(itm, page_one_and_two)\n\n @unittest.skip('We are just showing one year at a time, this would be a good feature '\n 'for /candidate/ but it is not a priority right now')\n def test_multi_year(self):\n # testing search\n response = self._results('/candidate?candidate_id=P80003338&year=2012,2008')\n # search listing should aggregate years\n self.assertIn('2008, 2012', response)\n # testing single resource\n response = self._results('/candidate/P80003338?year=2012,2008')\n elections = response[0]['elections']\n self.assertEquals(len(elections), 2)\n\n @unittest.skip('This is not a great view anymore')\n def test_multiple_cmtes_in_detail(self):\n response = self._results('/candidate/P80003338/committees')\n self.assertEquals(len(response[0]), 11)\n self.assertEquals(response['pagination']['count'], 11)\n\n def test_totals_house_senate(self):\n committee = factories.CommitteeFactory(committee_type='H')\n committee_id = committee.committee_id\n factories.CommitteeHistoryFactory(committee_id=committee_id, committee_type='H')\n [\n factories.TotalsHouseSenateFactory(committee_id=committee_id, cycle=2008),\n factories.TotalsHouseSenateFactory(committee_id=committee_id, cycle=2012),\n ]\n response = self._results(api.url_for(TotalsView, committee_id=committee_id))\n self.assertEqual(len(response), 2)\n self.assertEqual(response[0]['cycle'], 2012)\n self.assertEqual(response[1]['cycle'], 2008)\n\n def _check_reports(self, committee_type, factory, schema):\n committee = factories.CommitteeFactory(committee_type=committee_type)\n factories.CommitteeHistoryFactory(\n committee_id=committee.committee_id,\n committee_key=committee.committee_key,\n committee_type=committee_type,\n )\n end_dates = [datetime.datetime(2012, 1, 1), datetime.datetime(2008, 1, 1)]\n committee_id = committee.committee_id\n committee_key = committee.committee_key\n rest.db.session.flush()\n [\n factory(\n committee_id=committee_id,\n committee_key=committee_key,\n coverage_end_date=end_date,\n )\n for end_date in end_dates\n ]\n response = self._results(api.url_for(ReportsView, committee_id=committee_id))\n self.assertEqual(len(response), 2)\n self.assertEqual(response[0]['coverage_end_date'], isoformat(end_dates[0]))\n self.assertEqual(response[1]['coverage_end_date'], isoformat(end_dates[1]))\n assert response[0].keys() == schema().fields.keys()\n\n # TODO(jmcarp) Refactor as parameterized tests\n def test_reports(self):\n self._check_reports('H', factories.ReportsHouseSenateFactory, schemas.CommitteeReportsHouseSenateSchema)\n self._check_reports('S', factories.ReportsHouseSenateFactory, schemas.CommitteeReportsHouseSenateSchema)\n self._check_reports('P', factories.ReportsPresidentialFactory, schemas.CommitteeReportsPresidentialSchema)\n self._check_reports('X', factories.ReportsPacPartyFactory, schemas.CommitteeReportsPacPartySchema)\n\n def test_reports_committee_not_found(self):\n resp = self.app.get(api.url_for(ReportsView, committee_id='fake'))\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(resp.content_type, 'application/json')\n data = json.loads(resp.data.decode('utf-8'))\n self.assertIn('not found', data['message'].lower())\n\n @unittest.skip(\"Not implementing for now.\")\n def test_total_field_filter(self):\n results_disbursements = self._results('/committee/C00347583/totals?fields=disbursements')\n results_recipts = self._results('/committee/C00347583/totals?fields=total_receipts_period')\n\n self.assertIn('disbursements', results_disbursements[0]['totals'][0])\n self.assertIn('total_receipts_period', results_recipts[0]['reports'][0])\n self.assertNotIn('reports', results_disbursements[0])\n self.assertNotIn('totals', results_recipts[0])\n\n def test_totals_committee_not_found(self):\n resp = self.app.get(api.url_for(TotalsView, committee_id='fake'))\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(resp.content_type, 'application/json')\n data = json.loads(resp.data.decode('utf-8'))\n self.assertIn('not found', data['message'].lower())\n\n def test_total_cycle(self):\n committee = factories.CommitteeFactory(committee_type='P')\n committee_id = committee.committee_id\n history = factories.CommitteeHistoryFactory(committee_id=committee_id, committee_type='P')\n receipts = 5\n totals = factories.TotalsPresidentialFactory(cycle=2012, committee_id=committee_id, receipts=receipts)\n\n results = self._results(api.url_for(TotalsView, committee_id=committee.committee_id))\n self.assertEqual(results[0]['receipts'], totals.receipts)\n\n # Typeahead name search\n def test_typeahead_candidate_search(self):\n [\n factories.CandidateSearchFactory(\n name='Bartlet {0}'.format(idx),\n fulltxt=sa.func.to_tsvector('Bartlet for America {0}'.format(idx)),\n )\n for idx in range(30)\n ]\n rest.db.session.flush()\n results = self._results(api.url_for(CandidateNameSearch, q='bartlet'))\n self.assertEqual(len(results), 20)\n ids = [r['id'] for r in results if r['id']]\n self.assertEqual(len(ids), len(set(ids)))\n for each in results:\n self.assertIn('bartlet', each['name'].lower())\n\n def test_typeahead_committee_search(self):\n [\n factories.CommitteeSearchFactory(\n name='Bartlet {0}'.format(idx),\n fulltxt=sa.func.to_tsvector('Bartlet for America {0}'.format(idx)),\n )\n for idx in range(30)\n ]\n rest.db.session.flush()\n results = self._results(api.url_for(CommitteeNameSearch, q='bartlet'))\n self.assertEqual(len(results), 20)\n ids = [r['id'] for r in results if r['id']]\n self.assertEqual(len(ids), len(set(ids)))\n for each in results:\n self.assertIn('bartlet', each['name'].lower())\n","sub_path":"tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":10029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"211888687","text":"# Pony Entities constants.\n\nTABLES_MODULE = 'mysql_data_entities'\n\nAPPS_TABLE_CLASS_NAME = 'Application'\nUSERS_TABLE_CLASS_NAME = 'UserInformation'\nEMAILS_TABLE_CLASS_NAME = 'Email'\nREDIS_CHANGES_CLASS_NAME = 'ChangesTable'\nTRAINING_DATA_TABLE_CLASS_NAME = 'TrainingData'\nPOLICY_DATA_TABLE_CLASS_NAME = 'Policy'\nDEVICE_INFORMATION_TABLE_CLASS_NAME = 'DeviceInformation'\nPGP_KEYS_DATA_TABLE_CLASS_NAME = 'PgpKeysData'\nWEB_RESOURCE_TABLE_CLASS_NAME = 'WebResource'\nPROVIDER_USERS_TABLE_CLASS_NAME = 'ProviderUser'\n\nMYSQL_APPS_TABLE_NAME = 'Applications'\nMYSQL_EMAILS_TABLE_NAME = 'Emails'\nMYSQL_PGP_KEYS_TABLE_NAME = 'PgpKeysData'\nMYSQL_USERS_TABLE_NAME = 'Profiles'\nMYSQL_TRAINING_DATA_TABLE_NAME = 'TrainingData'\nMYSQL_CHANGES_TABLE_NAME = 'UILog'\nMYSQL_POLICIES_TABLE_NAME = 'Policies'\nMYSQL_DEVICE_INFORMATION_TABLE_NAME = 'UserServices'\nMYSQL_WEB_RESOURCES_TABLE_NAME = 'WebResources'\nMYSQL_PROVIDER_USERS_TABLE_NAME = 'ProviderUsers'\n# Identification Data Tables\nMYSQL_IDENTIFICATION_USER_HASH_TABLE_NAME = 'IdentificationUsersBucketsData'\nMYSQL_IDENTIFICATION_HASH_DATA_TABLE_NAME = 'IdentificationHashData'\n\n# Redis Constants\nREDIS_BIOMIO_GENERAL_CHANNEL = 'biomio_general:%s'\nREDIS_APP_AUTH_KEY = REDIS_BIOMIO_GENERAL_CHANNEL % 'auth:%s'\nREDIS_APP_CONNECTION_KEY = 'connection:%s'\nREDIS_APPLICATION_KEY = 'application:%s'\nREDIS_USER_KEY = 'user:%s'\nREDIS_EMAILS_KEY = 'email:%s'\nREDIS_PGP_DATA_KEY = 'pgp_data:%s'\nREDIS_SESSION_KEY = 'token:%s'\nREDIS_JOB_RESULT_KEY = REDIS_BIOMIO_GENERAL_CHANNEL % 'job_result:%s:%s'\nREDIS_DO_NOT_STORE_RESULT_KEY = 'do_not_store:%s'\nREDIS_PROBE_RESULT_KEY = REDIS_BIOMIO_GENERAL_CHANNEL % 'probe_result:%s'\nREDIS_RESULTS_COUNTER_KEY = 'job_results_counter:%s'\nREDIS_PARTIAL_RESULTS_KEY = 'job_results_gatherer:%s'\nREDIS_JOB_RESULTS_ERROR = 'job_results_error:%s'\nREDIS_ACTIVE_DEVICES_KEY = 'active_devices_relations:%s'\nREDIS_UPDATE_TRAINING_KEY = 'update_training:%s'\nREDIS_RESOURCES_KEY = 'device_resources:%s'\nREDIS_USER_POLICY_KEY = 'policy:%s'\nREDIS_USER_CONDITION_KEY = 'auth_condition:%s'\nREDIS_DEVICE_INFORMATION_KEY = 'device_info:%s'\nREDIS_WEB_RESOURCE_KEY = 'web_resource:%s'\nREDIS_PROVIDER_USER_KEY = 'provider_user:%s'\n\nREDiS_TRAINING_RETRIES_COUNT_KEY = 'training_retries_count:%s'\nREDIS_VERIFICATION_RETIES_COUNT_KEY = 'verification_retries_count:%s'\n\nREDIS_ACTIVE_PROBE_DEVICES = 'active_probes_list'\nREDIS_ACTIVE_CLIENT_CONNECTIONS = 'active_clients_list'\n\nREDIS_IDENTIFICATION_USERS_DATA_KEY = 'identification_users_data:%s'\nREDIS_IDENTIFICATION_HASH_DATA_KEY = 'identification_hash_data:%s'\n\n# Other constants\nREDIS_CONFIG_MAX_MEMORY_OPTION_KEY = 'maxmemory'\nREDIS_CONFIG_MEMORY_SAMPLES_OPTION_KEY = 'maxmemory-samples'\nREDIS_CONFIG_EVICTION_POLICY_OPTION_KEY = 'maxmemory-policy'\n\nMODULES_CLASSES_BY_TABLE_NAMES = {\n MYSQL_APPS_TABLE_NAME: dict(\n module_name='biomio.protocol.data_stores.application_data_store',\n class_name='ApplicationDataStore'\n ),\n MYSQL_PGP_KEYS_TABLE_NAME: dict(\n module_name='biomio.protocol.data_stores.pgp_keys_data_store',\n class_name='PgpKeysDataStore'\n )\n}\n\n# REST Commands\nREST_VERIFY_COMMAND = 'verify_service/%s/%s'\nREST_CREATE_EMAIL_KEYS = 'get_user/%s'\nREST_REGISTER_BIOMETRICS = 'register_biometrics/%s/%s'\nREST_BIOAUTH_LOGIN = 'bioauth/%s/%s'\n\nTRAINING_FACE_TYPE = 'face'\nTRAINING_FINGER_TYPE = 'fingerprints'\nTRAINING_VOICE_TYPE = 'voice'\n\nTRAINING_GATE_AI_TYPES_MAP = {\n TRAINING_FACE_TYPE: 'face',\n TRAINING_FINGER_TYPE: 'fingerprints',\n TRAINING_VOICE_TYPE: 'voice'\n}\n\nAUTH_CANCELED_STATUS = 'canceled'\nAUTH_CANCELED_MESSAGE = 'Authentication was canceled'\nAUTH_FAILED_STATUS = 'failed'\nAUTH_FAILED_MESSAGE = 'Authentication failed'\nAUTH_MAX_RETRIES_STATUS = AUTH_FAILED_STATUS\nAUTH_MAX_RETRIES_MESSAGE = 'Maximum number of auth retries reached'\n\nTRAINING_CANCELED_STATUS = 'canceled'\nTRAINING_CANCELED_MESSAGE = 'Training was canceled'\nTRAINING_FAILED_STATUS = 'failed'\nTRAINING_FAILED_MESSAGE = 'Training failed. Try to change your location or your device position'\nTRAINING_MAX_RETRIES_STATUS = TRAINING_FAILED_STATUS\nTRAINING_MAX_RETRIES_MESSAGE = 'Maximum number of training retries reached.Try to change your location or your ' \\\n 'device position'\nTRAINING_STARTED_STATUS = 'in-progress'\nTRAINING_STARTED_MESSAGE = 'Training started'\nTRAINING_RETRY_STATUS = 'retry'\nTRAINING_RETRY_MESSAGE = 'Additional data is required, please check your device'\nTRAINING_SUCCESS_STATUS = 'success'\nTRAINING_SUCCESS_MESSAGE = 'Our recognition algorithm was successfully trained with your face data'\n\nTRAINING_TYPES_AI_RESPONSE = {\n 'face': '0',\n 'fingerprints': ['0', '0', '0', '0', '0', '0', '0', '0', '0', '0'],\n 'voice': '0'\n}\n\n\nHYBRID_APP_TYPE_PREFIX = 'hybrid'\nPROBE_APP_TYPE_PREFIX = 'probe'\nGENERAL_SUBSCRIBE_PATTERN = '__keyspace*:{redis_key_pattern}'\n\n\ndef get_ai_training_response(training_type):\n response = TRAINING_TYPES_AI_RESPONSE\n response.update({TRAINING_GATE_AI_TYPES_MAP.get(training_type, ''): '1'})\n return response\n","sub_path":"biomio/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":5028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"128003822","text":"import time\nimport os\nimport argparse\n\nimport cv2\nimport numpy as np\nimport tensorflow.compat.v1 as tf; tf.disable_v2_behavior()\nimport neuralgym as ng\n\nfrom inpaint_model import InpaintCAModel\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n '--flist', default='', type=str,\n help='The filenames of image to be processed: input, mask, output.')\nparser.add_argument(\n '--image_height', default=-1, type=int,\n help='The height of images should be defined, otherwise batch mode is not'\n ' supported.')\nparser.add_argument(\n '--image_width', default=-1, type=int,\n help='The width of images should be defined, otherwise batch mode is not'\n ' supported.')\nparser.add_argument(\n '--checkpoint_dir', default='', type=str,\n help='The directory of tensorflow checkpoint.')\n\n\nif __name__ == \"__main__\":\n ng.get_gpus(1)\n # os.environ['CUDA_VISIBLE_DEVICES'] =''\n args = parser.parse_args()\n\n sess_config = tf.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n sess = tf.Session(config=sess_config)\n\n model = InpaintCAModel()\n input_image_ph = tf.placeholder(\n tf.float32, shape=(1, args.image_height, args.image_width*3, 3))\n output = model.build_server_graph(input_image_ph)\n output = (output + 1.) * 127.5\n output = tf.reverse(output, [-1])\n output = tf.saturate_cast(output, tf.uint8)\n vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n assign_ops = []\n for var in vars_list:\n vname = var.name\n from_name = vname\n var_value = tf.contrib.framework.load_variable(\n args.checkpoint_dir, from_name)\n assign_ops.append(tf.assign(var, var_value))\n sess.run(assign_ops)\n print('Model loaded.')\n\n with open(args.flist, 'r') as f:\n lines = f.read().splitlines()\n t = time.time()\n for line in lines:\n # for i in range(100):\n image, mask, out = line.split()\n base = os.path.basename(mask)\n\n guidance = cv2.imread(image[:-4] + '_edge.jpg')\n image = cv2.imread(image)\n mask = cv2.imread(mask)\n image = cv2.resize(image, (args.image_width, args.image_height))\n guidance = cv2.resize(guidance, (args.image_width, args.image_height))\n mask = cv2.resize(mask, (args.image_width, args.image_height))\n # cv2.imwrite(out, image*(1-mask/255.) + mask)\n # # continue\n # image = np.zeros((128, 256, 3))\n # mask = np.zeros((128, 256, 3))\n\n assert image.shape == mask.shape\n\n h, w, _ = image.shape\n grid = 4\n image = image[:h//grid*grid, :w//grid*grid, :]\n mask = mask[:h//grid*grid, :w//grid*grid, :]\n guidance = guidance[:h//grid*grid, :w//grid*grid, :]\n print('Shape of image: {}'.format(image.shape))\n\n image = np.expand_dims(image, 0)\n guidance = np.expand_dims(guidance, 0)\n mask = np.expand_dims(mask, 0)\n input_image = np.concatenate([image, guidance, mask], axis=2)\n\n # load pretrained model\n result = sess.run(output, feed_dict={input_image_ph: input_image})\n print('Processed: {}'.format(out))\n cv2.imwrite(out, result[0][:, :, ::-1])\n\n print('Time total: {}'.format(time.time() - t))\n","sub_path":"gi/guided_batch_test.py","file_name":"guided_batch_test.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"55476008","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='MyUser',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('password', models.CharField(max_length=128, verbose_name='password')),\n ('last_login', models.DateTimeField(verbose_name='last login', default=django.utils.timezone.now)),\n ('name', models.CharField(max_length=255, unique=True)),\n ('email', models.EmailField(max_length=255, verbose_name='email address', unique=True)),\n ('owner_first_name', models.CharField(max_length=120)),\n ('owner_last_name', models.CharField(max_length=120)),\n ('is_member', models.BooleanField(verbose_name='Is Paid Member', default=False)),\n ('is_active', models.BooleanField(default=True)),\n ('is_admin', models.BooleanField(default=False)),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='UserProfile',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('description', models.TextField(blank=True, null=True)),\n ('adress', models.CharField(max_length=255)),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"accounts/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"229047874","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pygame\nimport pygame.mixer\nfrom time import sleep\nfrom scipy import signal, misc\nfrom scipy.io import wavfile\n\ndef Play_Sound(filename):\n pygame.mixer.init()\n sound = pygame.mixer.Sound(filename)\n tmp = sound.play()\n while tmp.get_busy():\n pygame.time.delay(1)\n\ndef Make_Two_Tone(A,fl,fh,fs,tu):\n sample_num = int(fs*tu)\n # 합성한 파형의 Amplitude의 범위가 10000까지 늘어나므로 1/2 실시\n tone = [ ( A*np.sin(2 * np.pi * fl * x / fs) +\n A*np.sin(2 * np.pi * fh * x / fs) )/2 for x in range(sample_num) ]\n return tone\n\ndef Make_Number_List(str):\n num_list = []\n length = len(str)\n for idx in range(length):\n if str[idx]=='*':\n num_list.append(10)\n elif str[idx]=='#':\n num_list.append(11)\n elif str[idx]==' ':\n num_list.append(100)\n else:\n num_list.append(int(str[idx]))\n return num_list\n\ndef Make_Tone_List(num_list):\n tone_list = []\n length = len(num_list)\n\n silence = np.zeros(1000) # 묵음을 중간중간에 넣어 노래를 그럴싸하게 만듦\n for idx in range(length):\n if num_list[idx] >= 12:\n tone = Make_Two_Tone(A, 0, 0, fs, tu)\n else:\n tone = Make_Two_Tone(A, freq_low[num_list[idx]], freq_high[num_list[idx]], fs, tu)\n tone_list.append(tone)\n tone_list.append(silence)\n return tone_list\n\nnumber_list = [7, 4, 1, 4, 7, 7, 7, 4, 4, 4, 7, 7, 7]\n\n# 0~9, *, #\nfreq_low = [941, 697, 697, 697, 770, 770, 770, 852, 852, 852, 941, 941]\nfreq_high = [1336, 1209, 1336, 1477, 1209, 1336, 1477, 1209, 1336, 1477, 1209, 1477]\nA = 5000\nfs = 8000\ntu = 0.5\n\n\n'''\nlength = len(number_list)\nDTMF_list = []\nfor idx in range(length):\n tone = Make_Two_Tone(A,freq_low[number_list[idx]],freq_high[number_list[idx]],fs,tu)\n DTMF_list.append(tone)\n'''\n# school\nschool = \"99##993 99332 99##993 93231\"\nnumber_school_list = Make_Number_List(school)\nDTMF_list = Make_Tone_List(number_school_list)\n\n# same\nsame = \"139 139 ###9 666 333 2221 \"\nnumber_stone_list = Make_Number_List(same)\nDTMF_list = Make_Tone_List(number_stone_list)\n\nDTMF_list = np.concatenate(DTMF_list)\nDTMF_list = np.cast['int16'](DTMF_list)\nwavfile.write(\"DTMF.wav\",fs,DTMF_list)\nPlay_Sound(\"DTMF.wav\")","sub_path":"MultimediaProgramming/Third/DMTF-Music-Maker.py","file_name":"DMTF-Music-Maker.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"604660590","text":"import json\nimport os\nfrom ruamel.yaml import YAML\nfrom cffconvert.version import __version__ as expected_version\n\n\ndef test_version_number_cff():\n # CITATION.cff content should have the same semver as setup.cfg / version.py\n fixture = os.path.join(\"CITATION.cff\")\n with open(fixture, \"r\", encoding=\"utf8\") as fid:\n cff_contents = fid.read()\n actual_version = YAML(typ='safe').load(cff_contents)[\"version\"]\n assert expected_version == actual_version\n\n\ndef test_zenodo_has_no_version_number():\n # .zenodo.json content should not have any semver information, Zenodo retrieves this automatically from the\n # Zenodo-GitHub integration\n fixture = os.path.join(\".zenodo.json\")\n with open(fixture, \"r\", encoding=\"utf8\") as fid:\n zenodojson_contents = fid.read()\n assert \"version\" not in json.loads(zenodojson_contents).keys()\n\n\ndef test_zenodo_has_no_doi():\n # .zenodo.json content should not have any doi information since you cannot tell Zenodo what the doi should be\n fixture = os.path.join(\".zenodo.json\")\n with open(fixture, \"r\", encoding=\"utf8\") as fid:\n zenodojson_contents = fid.read()\n assert \"doi\" not in json.loads(zenodojson_contents).keys()\n\n\ndef test_zenodo_has_no_date_published():\n # .zenodo.json content should not have any date information, the Zenodo-GitHub integration assigns\n # the date as today's date\n fixture = os.path.join(\".zenodo.json\")\n with open(fixture, \"r\", encoding=\"utf8\") as fid:\n zenodojson_contents = fid.read()\n assert \"publication_date\" not in json.loads(zenodojson_contents).keys()\n","sub_path":"test/test_consistent_versioning.py","file_name":"test_consistent_versioning.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"250495680","text":"class Solution(object):\n def search(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n left, right = 0, len(nums)-1\n while left nums[mid]) ^ (target > nums[mid]):\n left = mid+1\n else: right = mid\n return left if target in nums[left:left+1] else -1\n\n\n\nif __name__ == \"__main__\":\n solution = Solution\n a = solution.search(solution, [5,1,3],1)\n print(a)","sub_path":"033_SearchinRotatedSortedArray.py","file_name":"033_SearchinRotatedSortedArray.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"509423545","text":"#==================================================================\n#Copyright (C) 6/27/2014 Blender Sensei (Seth Fentress)\n#\n#This program is free software: you can redistribute it and/or modify\n#it under the terms of the GNU General Public License as published by\n#the Free Software Foundation, either version 3 of the License, or\n#(at your option) any later version.\n#\n#This program is distributed in the hope that it will be useful,\n#but WITHOUT ANY WARRANTY; without even the implied warranty of\n#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#GNU General Public License for more details.\n#\n#You should have received a copy of the GNU General Public License\n#along with this program. If not, see .\n#====================================================================\n#To report a bug or suggest a feature visit:\n#blendersensei.com/forums/addons\n#VISIT Blendersensei.com for more information.\n#SUBSCRIBE at Youtube.com/Blendersensei\n#====================================================================\n\n\nbl_info = { \n \"name\": \"Draw Mesh\", \n \"author\": \"Blender Sensei\", \n \"location\": \"Add Mesh Menu (Shift-A/Mesh/DrawMesh)\", \n \"description\": \"An object to use with BSurfaces.\", \n \"wiki_url\": \"http://blendersensei.com\", \n \"category\": \"Sensei Format\"} \n\nimport bpy\n\ndef add_to_menu(self, context):\n self.layout.operator(\"object.draw_mesh\", icon = \"LINE_DATA\")\n\nclass drawMesh(bpy.types.Operator):\n bl_idname = \"object.draw_mesh\"\n bl_label = \"Draw Mesh\"\n bl_description = \"Hold V to draw and Shift-V to create mesh. Make sure strokes are in order.\"\n \n def execute(self, context):\n #Add cube and name it DrawMesh\n bpy.ops.mesh.primitive_cube_add()\n ob = bpy.context.active_object\n ob.name = \"DrawMesh\"\n \n #Turn on xray and merge cube into single vert\n ob.show_x_ray = True\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.merge(type='CENTER')\n \n #Add solidify modifier\n bpy.ops.object.modifier_add(type='SOLIDIFY')\n sol = bpy.context.object.modifiers[\"Solidify\"]\n sol.thickness = 0.045\n sol.offset = 1\n \n #Add subsurf\n bpy.ops.object.modifier_add(type='SUBSURF')\n sub = bpy.context.object.modifiers[\"Subsurf\"]\n sub.show_on_cage = True\n sub.levels = 1\n sub.render_levels = 3\n \n #Add grease pencil layer and set to surface draw mode.\n bpy.ops.gpencil.data_add()\n bpy.context.active_object.grease_pencil.draw_mode = 'SURFACE'\n \n return{'FINISHED'}\n \n \n#REGISTRATION AND UNREGISTRATION FOR ADDON================== \ndef register():\n bpy.utils.register_module(__name__) \n bpy.types.INFO_MT_mesh_add.append(add_to_menu)\n \ndef unregister():\n bpy.utils.unregister_module(__name__) \n bpy.types.INFO_MT_mesh_add.remove(add_to_menu)\n \nif __name__ == \"__main__\":\n register()","sub_path":"Addon/senseiformat/BlenderVersionNumber/scripts/addons/Draw_Mesh.py","file_name":"Draw_Mesh.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"418066993","text":"import random\nimport time\n\n\ndef typing_game():\n words = ['장금이', '조선시대', '경복궁', \"중전마마\", \"투호\", \"윷놀이\", \"대장금\", \"기미상궁\"]\n\n random.shuffle(words)\n start_time = time.time()\n minus = 0\n time.sleep(1)\n print('='*50)\n print('다음 게임은 타자 연습이다.')\n time.sleep(1)\n print('제시된 단어를 똑같이 타이핑하거라. 테스트는 총 5번 진행된다.')\n time.sleep(1)\n print('오타 발생 시 점수가 깎이니 빠르고 정확하게 입력해야한다.')\n time.sleep(1)\n print('그럼 시작!')\n time.sleep(1)\n print('='*50)\n for word in range(5):\n while True:\n user_input = input(\"%s: \" % words[word])\n if (user_input == words[word]):\n break\n else:\n minus += 10\n break\n\n end_time = time.time()\n time.sleep(2)\n print('='*50)\n print(\"걸린 시간: %2f초\" % (end_time - start_time))\n print(\"감점: %d\" % minus)\n score = (100/(end_time - start_time))-minus\n print(\"획득 점수: %d\" % (score))\n time.sleep(2)\n if score > 10:\n print('='*50)\n print('잘했다. 재료를 건네주마')\n return True\n else:\n print('='*50)\n print('그 실력으로 내 김치를 가져갈 생각을 하다니')\n time.sleep(1)\n return False\n","sub_path":"typinggame.py","file_name":"typinggame.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"198245861","text":"'''\r\nCreated on 20 nov. 2019\r\n \r\n@author: jordane\r\n'''\r\n\r\nfrom multiprocessing import Queue, Process, Lock\r\nimport time\r\n\r\nfrom bloom import __utils__ as Utils\r\nfrom game.board import Reversi\r\nfrom intelligence.heuristics import eval\r\n\r\n__MaxAllowedTimeInSeconds__ = 7\r\n\r\n\r\nclass AlphaBeta:\r\n \r\n \r\n '''Class Implementing an AB-Pruning for reversi with 2 experimental options.\r\n Both of them are currently not used, because we are\r\n losing a bit of performance regarding the result we will obtain. The parallelization\r\n will also reduce the pruning because we will not pass the alpha and beta value \r\n over process.\r\n Concerning the BloomCheckerFirst, the goal was to instanciate every board that we saw\r\n with a pretty high heuristic value while we are looking over the AB tree.\r\n Unfortunatly, with the current implementation, we will instanciate these board, even\r\n if it could lead to some wrong path.\r\n '''\r\n def __init__(self):\r\n self.startTime = 0\r\n \r\n self.synch_alpha = AlphaBeta.__alpha__()\r\n self.synch_alpha_locker = Lock()\r\n self.synch_beta = AlphaBeta.__beta__()\r\n self.synch_beta_locker = Lock()\r\n \r\n def __reset__(self):\r\n self.startTime = 0\r\n \r\n \r\n self.synch_alpha = AlphaBeta.__alpha__()\r\n self.synch_alpha_locker = Lock()\r\n self.synch_beta = AlphaBeta.__beta__()\r\n self.synch_beta_locker = Lock()\r\n \r\n \r\n @classmethod\r\n def __synch_update_alpha__(self, newVal):\r\n self.synch_alpha_locker.acquire()\r\n if(newVal < self.synch_alpha):\r\n self.synch_alpha = newVal\r\n self.synch_alpha_locker.release()\r\n return self.synch_alpha\r\n \r\n @classmethod\r\n def __synch_update_beta__(self, newVal):\r\n self.synch_beta_locker.acquire()\r\n if(newVal > self.synch_beta):\r\n self.synch_beta = newVal\r\n self.synch_beta_locker.release()\r\n return self.synch_beta\r\n \r\n def __get_synch_beta__(self):\r\n return self.synch_beta\r\n \r\n def __get_synch_alpha__(self):\r\n return self.synch_alpha\r\n\r\n @staticmethod\r\n def __alpha__():\r\n return -float('Inf')\r\n \r\n @staticmethod\r\n def __beta__():\r\n return float('Inf')\r\n \r\n @classmethod\r\n def __minValueForInstanciation__(self):\r\n \"\"\"Deprecated.\r\n The heuristic score required to instanciate a board in the bloom filter\"\"\"\r\n return 99.95\r\n\r\n \r\n# def AlphaBetaWrapper(player, InitDepth = 0, MaxDepth = 8, Parallelization = False):\r\n# player._maxDepth = MaxDepth\r\n# return player.MaxAlphaBeta(InitDepth, AlphaBeta.__alpha__(), AlphaBeta.__beta__(), Parallelization)\r\n \r\n @staticmethod\r\n def __CopyCurrentBoard__(player):\r\n \"\"\"Function used to copy the current Reversi board and work over a \r\n copy. Mainly used for multithreading\"\"\"\r\n res = Reversi.Board(player._board._boardsize) \r\n \r\n for x in range(0,res._boardsize,1):\r\n for y in range(0,res._boardsize,1):\r\n res._board[y][x] = player._board._board[y][x]\r\n \r\n return res\r\n \r\n \r\n def __alpha_beta_main_wrapper__(self,\r\n player, \r\n depth = 3, #nb pair svp\r\n BloomCheckerFirst = False, \r\n Parallelization = False\r\n ):\r\n \"\"\"function that will organize the AB-pruning depending of the options enabled\"\"\"\r\n moves = player._board.legal_moves()\r\n\r\n startTime = time.time()\r\n return_move = None\r\n bestscore = AlphaBeta.__alpha__()\r\n q = Queue()\r\n process_list = []\r\n\r\n\r\n for m in moves:\r\n move = m \r\n \r\n if(BloomCheckerFirst):\r\n player._board.push(m)\r\n hashValue = Utils.HashingOperation.BoardToHashCode(player._board)\r\n player._board.pop() \r\n res_contain = player._bloomTable.__contains__(key=hashValue)\r\n if(res_contain):\r\n bestscore = AlphaBeta.__minValueForInstanciation__()\r\n print(\"Find a table with the corresponding move, returning it\", bestscore) \r\n return_move = m\r\n return (AlphaBeta.__minValueForInstanciation__(), m)\r\n #remove the element from the bloom filter\r\n #auto return move ?\r\n \r\n \r\n if(Parallelization): \r\n# if __name__ == '__main__':\r\n# freeze_support() \r\n proc = Process(target=self.alphaBetaParallelizationWrapper, args=(player, depth, AlphaBeta.__alpha__(), AlphaBeta.__beta__(), m, q, BloomCheckerFirst))\r\n proc.start()\r\n process_list.append(proc)\r\n else:\r\n\r\n (score) = self.alphaBetaNoParallelizationWrapper(player, depth, bestscore, AlphaBeta.__beta__(), m, BloomCheckerFirst)\r\n \r\n if score > bestscore:\r\n bestscore = score\r\n return_move = move\r\n if bestscore >= AlphaBeta.__minValueForInstanciation__() and BloomCheckerFirst: #instanciate\r\n player._board.push(m)\r\n# print(\"Instanciate a table with the score\", score) \r\n player._bloomTable.add(key=Utils.HashingOperation.BoardToHashCode(player._board))\r\n player._board.pop() \r\n if (time.time() - startTime > __MaxAllowedTimeInSeconds__):\r\n break\r\n\r\n \r\n if(Parallelization):\r\n# tout=5\r\n# tout = .5000/len(process_list)\r\n for proc in process_list:\r\n proc.join()#timeout=tout)\r\n while q.qsize() > 0:\r\n (score,move) = q.get(block=True)\r\n \r\n if score > bestscore:\r\n bestscore = score\r\n return_move = move\r\n if bestscore >= AlphaBeta.__beta__():\r\n return (bestscore, return_move)\r\n \r\n print(\"---------------------\")\r\n return (bestscore,return_move)\r\n\r\n\r\n\r\n @classmethod\r\n def alphaBetaNoParallelizationWrapper(self, player, depth, alpha, beta, move, BloomCheckerFirst):\r\n \"\"\"wrapper used only for the sequential AB-pruning.\"\"\"\r\n player._board.push(move)\r\n self.startTime = time.time()\r\n score = self.min_score_alpha_beta(player, player._board, depth, alpha, beta, BloomCheckerFirst)\r\n if score > alpha: \r\n alpha = score\r\n player._board.pop()\r\n return (alpha)\r\n \r\n \r\n @classmethod\r\n def alphaBetaParallelizationWrapper(self, player, depth, alpha, beta, move, queue, BloomCheckerFirst):\r\n \"\"\"wrapper used only for the multiprocessing AB-pruning, to organize the parameters, set up the Queue\r\n or create a copy of the board\"\"\"\r\n copiedBoard = AlphaBeta.__CopyCurrentBoard__(player)\r\n self.startTime = time.time()\r\n player._board.push(move)\r\n score = self.min_score_alpha_beta(player, copiedBoard, depth, alpha, beta, BloomCheckerFirst)\r\n player._board.pop()\r\n if score > alpha: \r\n alpha = score\r\n queue.put( (alpha, move) )\r\n copiedBoard = None\r\n return alpha\r\n \r\n \r\n \r\n \r\n \r\n \r\n # Also the max and min value function:\r\n @classmethod\r\n def max_score_alpha_beta(self, player, board, depth, alpha, beta, BloomCheckerFirst):\r\n moves = board.legal_moves()\r\n \r\n \r\n if board.is_game_over():\r\n (nbB, nbW) = board.get_nb_pieces()\r\n if player._mycolor is Reversi.Board._BLACK:\r\n if nbB > nbW: \r\n if(BloomCheckerFirst): #win board\r\n hashValue = Utils.HashingOperation.BoardToHashCode(board)\r\n res_contain = player._bloomTable.__contains__(key=hashValue)\r\n if(not res_contain): \r\n player._bloomTable.add(key=Utils.HashingOperation.BoardToHashCode(board))\r\n return AlphaBeta.__beta__()\r\n \r\n else: #lose board\r\n return AlphaBeta.__alpha__()\r\n else: #win board\r\n if nbW > nbB: \r\n if(BloomCheckerFirst):\r\n hashValue = Utils.HashingOperation.BoardToHashCode(board)\r\n res_contain = player._bloomTable.__contains__(key=hashValue)\r\n if(not res_contain):\r\n player._bloomTable.add(key=Utils.HashingOperation.BoardToHashCode(board))\r\n return AlphaBeta.__beta__()\r\n \r\n else: #lose board\r\n return AlphaBeta.__alpha__()\r\n \r\n if depth == 0 or board.is_game_over() or time.time() - self.startTime > __MaxAllowedTimeInSeconds__: # leaves of alpha-beta pruning \r\n score = eval.getTotal(player,player._mycolor)\r\n return score\r\n \r\n if(BloomCheckerFirst):\r\n hashValue = Utils.HashingOperation.BoardToHashCode(board)\r\n res_contain = player._bloomTable.__contains__(key=hashValue)\r\n if(not res_contain and score > AlphaBeta.__minValueForInstanciation__()):\r\n player._bloomTable.add(key=Utils.HashingOperation.BoardToHashCode(board))\r\n return score\r\n \r\n \r\n# maxVal = self.synch_alpha \r\n maxVal = alpha\r\n \r\n \r\n for move in moves: \r\n board.push(move)\r\n score = self.min_score_alpha_beta(player, board, depth-1, alpha, beta, BloomCheckerFirst)\r\n board.pop()\r\n \r\n if score > maxVal:\r\n maxVal = score\r\n if maxVal >= beta:\r\n return maxVal\r\n \r\n if maxVal > alpha:\r\n# alpha = self.__synch_update_alpha__(maxVal)\r\n alpha = maxVal\r\n \r\n \r\n if (time.time() - self.startTime > __MaxAllowedTimeInSeconds__):\r\n break\r\n \r\n return maxVal\r\n\r\n\r\n @classmethod\r\n def min_score_alpha_beta(self, player, board, depth, alpha, beta, BloomCheckerFirst):\r\n moves = board.legal_moves()\r\n \r\n \r\n if depth == 0 or board.is_game_over() or time.time() - self.startTime > __MaxAllowedTimeInSeconds__:\r\n return eval.getTotal(player,player._mycolor)\r\n \r\n# minVal = self.synch_beta\r\n minVal = beta\r\n \r\n for move in moves:\r\n \r\n board.push(move)\r\n score = self.max_score_alpha_beta(player, board, depth-1, alpha, beta, BloomCheckerFirst)\r\n board.pop()\r\n \r\n \r\n \r\n if score < minVal:\r\n minVal = score\r\n if minVal <= alpha:\r\n return minVal\r\n else:\r\n if(BloomCheckerFirst):\r\n hashValue = Utils.HashingOperation.BoardToHashCode(board)\r\n res_contain = player._bloomTable.__contains__(key=hashValue)\r\n if(not res_contain and beta >= AlphaBeta.__minValueForInstanciation__()):\r\n# print(\"Instanciate a table with the score\", score) \r\n player._bloomTable.add(key=Utils.HashingOperation.BoardToHashCode(board))\r\n \r\n \r\n if minVal > beta:\r\n beta = minVal\r\n# beta = self.__synch_update_beta__(minVal)\r\n \r\n if (time.time() - self.startTime > __MaxAllowedTimeInSeconds__):\r\n break\r\n \r\n \r\n return minVal\r\n \r\n \r\n\r\n","sub_path":"intelligence/movemanager/AlphaBeta.py","file_name":"AlphaBeta.py","file_ext":"py","file_size_in_byte":12267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"72424846","text":"import json\nfrom collections import Counter, OrderedDict\nimport sys\n\nteams = json.load(open(\"reddit.current.json\"))\nallplayers = json.load(open(\"players.current.json\"))\nstartercount = Counter()\nbenchcount = Counter()\ncaptains = Counter()\npositions = {\n \"Goalkeeper\": Counter(),\n \"Defender\": Counter(),\n \"Midfielder\": Counter(),\n \"Forward\": Counter(),\n}\n\ndef get(playerid):\n return allplayers[str(playerid)]\n\ndef fullname(player):\n return \"{} {}\".format(player[\"first_name\"].encode(\"utf8\"), player[\"second_name\"].encode(\"utf8\"))\n\ndef tableheader(position):\n sys.stdout.write(\"{} | Points | Owned | Started | Captained\\n\".format(position))\n sys.stdout.write(\"-- | -- | -- | -- | --\\n\")\n\nfor teamid, players in teams.iteritems():\n for player in players:\n position = get(player[\"id\"])[\"type_name\"]\n positions[position][player[\"id\"]] += 1\n\n if player[\"sub\"]:\n benchcount[player[\"id\"]] += 1\n else:\n startercount[player[\"id\"]] += 1\n if player[\"captain\"]:\n captains[player[\"id\"]] += 1\n\ndisplay_counts = OrderedDict([\n (\"Goalkeeper\", 20),\n (\"Defender\", 40),\n (\"Midfielder\", 40),\n (\"Forward\", 30),\n])\n\nfor position, display_count in display_counts.iteritems():\n tableheader(position)\n for player_id, count in positions[position].most_common(display_count):\n player = get(player_id)\n name = fullname(player)\n points = player[\"event_points\"]\n captained = captains[player_id]\n started = startercount[player_id]\n sys.stdout.write(\"{} | {} | {} | {} | {}\\n\".format(name, points, count, started, captained))\n sys.stdout.write(\"\\n\")\n","sub_path":"reddit_top_players.py","file_name":"reddit_top_players.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"354886410","text":"import tensorflow as tf\nimport json\nfrom google.protobuf import text_format\nfrom tensorflow.python.platform import gfile\nfrom baseline.tf.tfy import *\nfrom baseline.w2v import RandomInitVecModel\nimport tensorflow.contrib.seq2seq as tfcontrib_seq2seq\nfrom baseline.model import EncoderDecoder\n\n\ndef _temporal_cross_entropy_loss(logits, labels, label_lengths, mx_seq_length):\n \"\"\"Do cross-entropy loss accounting for sequence lengths\n \n :param logits: a `Tensor` with shape `[batch, timesteps, vocab]`\n :param labels: an integer `Tensor` with shape `[batch, timesteps]`\n :param label_lengths: The actual length of the target text. Assume right-padded\n :param mx_seq_length: The maximum length of the sequence\n :return: \n \"\"\"\n\n # The labels actual length is 100, and starts with \n labels = tf.transpose(labels, perm=[1, 0])\n # TxB loss mask\n # Logits == mxlen=10*batchsz=100\n # Labels == mxlen=9,batchsz=100\n labels = labels[0:mx_seq_length, :]\n timesteps = tf.to_int32(tf.shape(labels)[0])\n # The labels no longer include so go is not useful. This means that if the length was 100 before, the length\n # of labels is now 99 (and that is the max allowed)\n\n #logits = logits[0:mx_seq_length, :, :]\n with tf.name_scope(\"Loss\"):\n losses = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=labels)\n\n # BxT loss mask\n loss_mask = tf.to_float(tf.sequence_mask(tf.to_int32(label_lengths), timesteps))\n # TxB losses * TxB loss_mask\n losses = losses * tf.transpose(loss_mask, [1, 0])\n\n losses = tf.reduce_sum(losses)\n losses /= tf.cast(tf.reduce_sum(label_lengths), tf.float32)\n return losses\n\n\nclass Seq2SeqModel(EncoderDecoder):\n\n def create_loss(self):\n # We do not want to count in our assessment, we do want to count \n return _temporal_cross_entropy_loss(self.preds[:-1, :, :], self.tgt[:, 1:], self.tgt_len - 1, self.mx_tgt_len - 1)\n\n def __init__(self):\n super(Seq2SeqModel, self).__init__()\n pass\n\n @staticmethod\n def create(src_vocab_embed, dst_vocab_embed, **kwargs):\n\n model = Seq2SeqModel()\n hsz = int(kwargs['hsz'])\n attn = bool(kwargs.get('attn', False))\n nlayers = int(kwargs.get('layers', 1))\n rnntype = kwargs.get('rnntype', 'lstm')\n mxlen = kwargs.get('mxlen', 100)\n predict = kwargs.get('predict', False)\n model.sess = kwargs.get('sess', tf.Session())\n\n # These are going to be (B,T)\n model.src = tf.placeholder(tf.int32, [None, mxlen], name=\"src\")\n model.tgt = tf.placeholder(tf.int32, [None, mxlen], name=\"tgt\")\n model.pkeep = tf.placeholder(tf.float32, name=\"pkeep\")\n\n model.src_len = tf.placeholder(tf.int32, [None], name=\"src_len\")\n model.tgt_len = tf.placeholder(tf.int32, [None], name=\"tgt_len\")\n model.mx_tgt_len = tf.placeholder(tf.int32, name=\"mx_tgt_len\")\n\n model.vocab1 = src_vocab_embed.vocab\n model.vocab2 = dst_vocab_embed.vocab\n\n model.mxlen = mxlen\n model.hsz = hsz\n model.nlayers = nlayers\n model.rnntype = rnntype\n model.attn = attn\n\n GO = model.vocab2['']\n EOS = model.vocab2['']\n vsz = dst_vocab_embed.vsz + 1\n\n assert src_vocab_embed.dsz == dst_vocab_embed.dsz\n model.dsz = src_vocab_embed.dsz\n\n with tf.name_scope(\"LUT\"):\n Wi = tf.Variable(tf.constant(src_vocab_embed.weights, dtype=tf.float32), name=\"Wi\")\n Wo = tf.Variable(tf.constant(dst_vocab_embed.weights, dtype=tf.float32), name=\"Wo\")\n\n embed_in = tf.nn.embedding_lookup(Wi, model.src)\n \n with tf.name_scope(\"Recurrence\"):\n rnn_enc_tensor, final_encoder_state = model.encode(embed_in, model.src)\n batch_sz = tf.shape(rnn_enc_tensor)[0]\n\n with tf.variable_scope(\"dec\"):\n proj = dense_layer(vsz)\n rnn_dec_cell = model._attn_cell(rnn_enc_tensor) #[:,:-1,:])\n\n if model.attn is True:\n initial_state = rnn_dec_cell.zero_state(dtype=tf.float32, batch_size=batch_sz)\n else:\n initial_state = final_encoder_state\n\n if predict is True:\n helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(Wo, tf.fill([batch_sz], GO), EOS)\n else:\n helper = tf.contrib.seq2seq.TrainingHelper(inputs=tf.nn.embedding_lookup(Wo, model.tgt), sequence_length=model.tgt_len)\n decoder = tf.contrib.seq2seq.BasicDecoder(cell=rnn_dec_cell, helper=helper, initial_state=initial_state, output_layer=proj)\n final_outputs, final_decoder_state, _ = tf.contrib.seq2seq.dynamic_decode(decoder, impute_finished=True, output_time_major=True, maximum_iterations=model.mxlen)\n model.preds = final_outputs.rnn_output\n best = final_outputs.sample_id\n\n with tf.name_scope(\"Output\"):\n model.best = tf.identity(best, name='best')\n model.probs = tf.map_fn(lambda x: tf.nn.softmax(x, name='probs'), model.preds)\n return model\n\n def _attn_cell(self, rnn_enc_tensor):\n cell = new_multi_rnn_cell(self.hsz, self.rnntype, self.nlayers)\n if self.attn:\n attn_mech = tfcontrib_seq2seq.BahdanauAttention(self.hsz, rnn_enc_tensor, self.src_len)\n #attn_mech = tfcontrib_seq2seq.LuongAttention(self.hsz, rnn_enc_tensor, self.src_len)\n cell = tf.contrib.seq2seq.AttentionWrapper(cell, attn_mech, self.hsz, name='dyn_attn_cell')\n return cell\n\n def encode(self, embed_in, src):\n with tf.name_scope('encode'):\n # List to tensor, reform as (T, B, W)\n embed_in_seq = tensor2seq(embed_in)\n rnn_enc_cell = new_multi_rnn_cell(self.hsz, self.rnntype, self.nlayers)\n #TODO: Switch to tf.nn.rnn.dynamic_rnn()\n rnn_enc_seq, final_encoder_state = tf.contrib.rnn.static_rnn(rnn_enc_cell, embed_in_seq, scope='rnn_enc', dtype=tf.float32)\n # This comes out as a sequence T of (B, D)\n return seq2tensor(rnn_enc_seq), final_encoder_state\n\n def save_md(self, model_base):\n\n path_and_file = model_base.split('/')\n outdir = '/'.join(path_and_file[:-1])\n base = path_and_file[-1]\n tf.train.write_graph(self.sess.graph_def, outdir, base + '.graph', as_text=False)\n\n state = {\"attn\": self.attn, \"hsz\": self.hsz, \"dsz\": self.dsz, \"rnntype\": self.rnntype, \"nlayers\": self.nlayers, \"mxlen\": self.mxlen }\n with open(model_base + '.state', 'w') as f:\n json.dump(state, f)\n\n with open(model_base + '-1.vocab', 'w') as f:\n json.dump(self.vocab1, f) \n\n with open(model_base + '-2.vocab', 'w') as f:\n json.dump(self.vocab2, f) \n\n def save(self, model_base):\n self.save_md(model_base)\n self.saver.save(self.sess, model_base + '.model')\n\n def restore_md(self, model_base):\n\n with open(model_base + '-1.vocab', 'r') as f:\n self.vocab1 = json.load(f)\n\n with open(model_base + '-2.vocab', 'r') as f:\n self.vocab2 = json.load(f)\n\n with open(model_base + '.state', 'r') as f:\n state = json.load(f)\n self.attn = state['attn']\n self.hsz = state['hsz']\n self.dsz = state['dsz']\n self.rnntype = state['rnntype']\n self.nlayers = state['nlayers']\n self.mxlen = state['mxlen']\n\n def restore_graph(self, base):\n with open(base + '.graph', 'rb') as gf:\n gd = tf.GraphDef()\n gd.ParseFromString(gf.read())\n self.sess.graph.as_default()\n tf.import_graph_def(gd, name='')\n\n def step(self, src, src_len, dst, dst_len):\n \"\"\"\n Generate probability distribution over output V for next token\n \"\"\"\n feed_dict = self.make_feed_dict(src, src_len, dst, dst_len)\n return self.sess.run(self.probs, feed_dict=feed_dict)\n\n def make_feed_dict(self, src, src_len, dst, dst_len, do_dropout=False):\n mx_tgt_len = np.max(dst_len)\n feed_dict = {self.src: src, self.src_len: src_len, self.tgt: dst, self.tgt_len: dst_len, self.mx_tgt_len: mx_tgt_len, self.pkeep: 1.0}\n return feed_dict\n\n def get_src_vocab(self):\n return self.vocab1\n\n def get_dst_vocab(self):\n return self.vocab2\n\n\ndef create_model(src_vocab_embed, dst_vocab_embed, **kwargs):\n enc_dec = Seq2SeqModel.create(src_vocab_embed, dst_vocab_embed, **kwargs)\n return enc_dec\n","sub_path":"python/baseline/tf/seq2seq/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"579122801","text":"'''\nCreated on Jun 26, 2018\n\n@author: ftd\n'''\nfrom tkinter import *\nfrom src.main.pydev.com.ftd.generalutilities.metadata.gui.impl.frame.Frame_bottom import Frame_bottom\nfrom src.main.pydev.com.ftd.generalutilities.metadata.gui.impl.base.FormatableFrame import FormatableFrame\nfrom src.main.pydev.com.ftd.generalutilities.metadata.service.base.File_constant import File_constant\nfrom tkinter.messagebox import showwarning, showerror\nfrom src.main.pydev.com.ftd.generalutilities.metadata.service.fileproc.Java_processor import Java_processor\nfrom src.main.pydev.com.ftd.generalutilities.metadata.service.base.Java_constant import Java_constant\n\nclass Frame_serviceimpl_option(FormatableFrame):\n '''\n classdocs\n '''\n\n\n def __init__(self, parent=None, dtos=None, trans=None, **configs):\n '''\n Constructor\n '''\n self.__result = None\n self.__error = None\n self.__funclists = {} #functions list backup\n FormatableFrame.__init__(self, parent.get_mainframe(), dtos, trans, **configs)\n \n \n #overwrite create_widges\n def create_widges(self):\n javaconstant = Java_constant()\n #frame\n self.__frame1 = FormatableFrame(self)\n self.__frame1.pack(side=TOP)\n #Title\n self.__label01 = Label(self.__frame1, text=\"ServiceImpl generator options\", width= 45)\n self.__label01.pack(side=TOP, fill=X, ipady=10)\n \n #---- java validation flag\n self.__error = None\n self.__result = True\n \n #---- panel 01 ----------\n canv1 = Canvas(self, height=30, width=550)\n #label\n self.__label01 = Label(canv1, text='ServiceImpl name :')\n self.__label01.place(height=20, width=130, relx=0.03, rely=0.2)\n #input\n self.__feet = StringVar()\n self.__dicinput = Entry(canv1, textvariable=self.__feet, borderwidth=3, bg='black', foreground='yellow', highlightcolor='red', insertbackground='red')\n self.__dicinput.place(height=20, width=250, relx=0.3, rely=0.2)\n \n canv1.pack()\n \n #---- panel 02 ----------\n serviceInterDTO = self.get_dtos().get_serviceInterDTO()\n if not serviceInterDTO:\n self.__pack_errorpanel()\n return\n \n canv2 = Canvas(self, height=150, width=550)\n #label01\n self.__label01 = Label(canv2, text='Select the functions :')\n self.__label01.place(height=20, width=150, relx=0.01, rely=0.05)\n #left listbox and scrollbar\n self.__listboxleft = Listbox(canv2, width=30)\n self.__scrollleft = Scrollbar(canv2)\n self.__listboxleft.config(yscrollcommand = self.__scrollleft.set)\n self.__listboxleft.place(height=120, width=220, relx=0.02, rely=0.18)\n self.__scrollleft.place(height=120, width=20, relx=0.42, rely=0.18)\n self.__scrollleft.config(command = self.__listboxleft.yview)\n \n #middle buttons\n self.__button01 = Button(canv2, text='>', relief=RAISED, cursor='hand2')\n self.__button01.bind('', self.__to_right_click_event) #bind button click event\n self.__button01.place(height=25, width=25, relx=0.465, rely=0.2)\n \n self.__button02 = Button(canv2, text='>>', relief=RAISED, cursor='hand2')\n self.__button02.bind('', self.__all_to_right_click_event) #bind button click event\n self.__button02.place(height=25, width=25, relx=0.465, rely=0.4)\n \n self.__button03 = Button(canv2, text='<', relief=RAISED, cursor='hand2')\n self.__button03.bind('', self.__to_left_click_event) #bind button click event\n self.__button03.place(height=25, width=25, relx=0.465, rely=0.6)\n \n self.__button04 = Button(canv2, text='<<', relief=RAISED, cursor='hand2')\n self.__button04.bind('', self.__all_to_left_click_event) #bind button click event\n self.__button04.place(height=25, width=25, relx=0.465, rely=0.8)\n \n #right listbox and scrollbar\n self.__listboxright = Listbox(canv2, width=30)\n self.__scrollright = Scrollbar(canv2)\n self.__listboxright.config(yscrollcommand = self.__scrollright.set)\n self.__listboxright.place(height=120, width=220, relx=0.52, rely=0.18)\n self.__scrollright.place(height=120, width=20, relx=0.92, rely=0.18)\n self.__scrollright.config(command = self.__listboxright.yview)\n \n canv2.pack()\n \n #---- panel 03 ----------\n canv3 = Canvas(self, height=100, width=550)\n #label\n label1 = Label(canv3, text='Options:')\n label1.place(height=20, width=60, relx= 0, rely=0)\n #radio box\n self.__vari1 = IntVar()\n self.__rad1 = Radiobutton(canv3, text='Re-write the previous file', variable=self.__vari1, value=1)\n self.__rad1.place(height=20, width=170, relx= 0.1, rely=0.2)\n self.__rad1.select()\n self.__rad2 = Radiobutton(canv3, text='Attach new functions to the file', variable=self.__vari1, value=2)\n self.__rad2.place(height=20, width=210, relx= 0.1, rely=0.45)\n self.__rad2.deselect()\n self.__rad3 = Radiobutton(canv3, text='Save the previous file as backup', variable=self.__vari1, value=3)\n self.__rad3.place(height=20, width=220, relx= 0.1, rely=0.7)\n self.__rad3.deselect()\n canv3.pack()\n\n # set the serviceImpl name\n fileconstant = File_constant()\n serviceImpl_name = self.get_dtos().get_businessentityname() + fileconstant.SERVICEIMPL_SUFFIX + fileconstant.JAVA_SUFFIX\n self.__feet.set(serviceImpl_name)\n\n #set the function list to the left box\n for javaMtd in serviceInterDTO.get_class_methods():\n self.__funclists[javaMtd.get_method_name()] = javaMtd\n \n # CRUD functions are mandatory\n if javaMtd.get_method_name() == javaconstant.JAVA_FUNCTION_CREATE or javaMtd.get_method_name() == javaconstant.JAVA_FUNCTION_UPDATE or javaMtd.get_method_name() == javaconstant.JAVA_FUNCTION_DELETE or javaMtd.get_method_name() == javaconstant.JAVA_FUNCTION_FETCH:\n self.__listboxright.insert(END, javaMtd.get_method_name())\n continue\n \n #add items into list box\n self.__listboxleft.insert(END, javaMtd.get_method_name())\n \n \n #overwrite create_widges\n def add_bottom(self, parent):\n #bottom frame\n exFuncs = {'Next':{'process':self.get_nextframe(), 'before':self.before_next},\n 'Prev':{'process':self.get_prevframe(), 'before':self.before_prev}}\n self.__buttom = Frame_bottom(parent, ['Next','Prev'], exFuncs)\n self.__buttom.pack(fill=X, ipady=10,side=BOTTOM)\n \n \n def before_next(self):\n '''\n overwrite the function in super class\n verify the input directory\n '''\n if not self.__result:\n showwarning('Warning', 'There are error existing, the ServiceImpl cannot be generated.')\n return\n \n # write serviceImpl\n result, message, filefullpath, javaDTO = Java_processor.create_serviceImpl(self.__feet.get(), self.get_trans(), self.get_dtos(), self.__listboxright.get(0, END), self.__vari1.get())\n if not result:\n showerror('Error', message)\n return False\n \n # store the serviceImpl full path\n self.get_dtos().set_serviceImplInfo(self.__feet.get(), filefullpath, javaDTO)\n return True\n \n \n def __pack_errorpanel(self):\n '''\n pack the error panel\n '''\n canv2 = Canvas(self, height=50, width=550)\n #label01\n self.__label01 = Label(canv2, text=self.__error, fg='red')\n self.__label01.place(height=40, width=500, relx=0.01, rely=0.05) \n canv2.pack()\n \n \n def __to_right_click_event(self, event):\n '''\n move the selection to the right list box\n '''\n if not self.__listboxleft or self.__listboxleft.size() == 0:\n return\n \n select_item = None\n if len(self.__listboxleft.curselection()) > 0:\n selection = self.__listboxleft.selection_get()\n #the dict.items() will convert to tuple\n for tup in self.__funclists.items():\n if tup[0] == selection:\n select_item = tup\n break\n \n if select_item:\n #add into right\n self.__listboxright.insert(END, select_item[0])\n select_idx = self.__listboxleft.curselection()\n #remove from left\n self.__listboxleft.delete(select_idx[0])\n \n \n def __all_to_right_click_event(self, event):\n '''\n move all items to the right list box\n '''\n select_items = []\n if len(self.__listboxleft.get(0, END)) > 0:\n for select_item in self.__listboxleft.get(0, END):\n select_items.append(select_item)\n self.__listboxleft.delete(0, END)\n \n for select_item in select_items:\n #add into right\n self.__listboxright.insert(END, select_item)\n \n \n def __to_left_click_event(self, event):\n '''\n move the selection to the left list box\n '''\n javaconstant = Java_constant()\n if not self.__listboxright or self.__listboxright.size() == 0:\n return\n \n select_item = None\n if len(self.__listboxright.curselection()) > 0:\n selection = self.__listboxright.selection_get()\n #the dict.items() will convert to tuple\n idx = 0\n for tup in self.__funclists.items():\n if tup[0] == selection:\n if selection == javaconstant.JAVA_FUNCTION_CREATE or selection == javaconstant.JAVA_FUNCTION_UPDATE or selection == javaconstant.JAVA_FUNCTION_DELETE or selection == javaconstant.JAVA_FUNCTION_FETCH:\n return\n select_item = tup\n break\n idx = idx + 1\n \n if select_item:\n #add into left\n self.__listboxleft.insert(idx, select_item[0])\n select_idx = self.__listboxright.curselection()\n #remove from right\n self.__listboxright.delete(select_idx[0])\n \n \n def __all_to_left_click_event(self, event):\n '''\n move all items to the right list box\n '''\n javaconstant = Java_constant()\n select_items = []\n if len(self.__listboxright.get(0, END)) > 0:\n crud_nbr = 0\n for select_item in self.__listboxright.get(0, END):\n if select_item == javaconstant.JAVA_FUNCTION_CREATE or select_item == javaconstant.JAVA_FUNCTION_UPDATE or select_item == javaconstant.JAVA_FUNCTION_DELETE or select_item == javaconstant.JAVA_FUNCTION_FETCH:\n crud_nbr = crud_nbr + 1\n continue\n select_items.append(select_item)\n self.__listboxright.delete(crud_nbr, END)\n \n for select_item in select_items:\n #add into right\n self.__listboxleft.insert(END, select_item)","sub_path":"src/main/pydev/com/ftd/generalutilities/metadata/gui/impl/frame/Frame_serviceimpl_option.py","file_name":"Frame_serviceimpl_option.py","file_ext":"py","file_size_in_byte":11309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"330282297","text":"import MapReduce\nimport sys\n\n\"\"\"\nMy solution for asymmetric friendships\n\"\"\"\n\nmr = MapReduce.MapReduce()\n\ndef mapper(record):\n mr.emit_intermediate(record[0], record[1])\n mr.emit_intermediate(record[1], record[0])\n\ndef reducer(key, list_of_values):\n for x in list_of_values:\n if list_of_values.count(x) == 1:\n mr.emit((key, x))\n\n# Do not modify below this line\n# =============================\nif __name__ == '__main__':\n inputdata = open(sys.argv[1])\n mr.execute(inputdata, mapper, reducer)\n","sub_path":"assignment3/asymmetric_friendships.py","file_name":"asymmetric_friendships.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"581319210","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 7 00:00:52 2019\n\n@author: diaa\n\"\"\"\nimport numpy as np\nfrom util.func import * \n\ndef linear_activation_backward(dA, cache, activation,lambd=None):\n \"\"\"\n Function:\n Implement the backward propagation for the LINEAR->ACTIVATION layer.\n \n Arguments:\n dA -- post-activation gradient for current layer l \n cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently\n activation -- the activation to be used in this layer, stored as a text string: \"sigmoid\" or \"relu\"\n lambd -- regularization hyperparameter, scalar\n \n Returns:\n dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev\n dW -- Gradient of the cost with respect to W (current layer l), same shape as W\n db -- Gradient of the cost with respect to b (current layer l), same shape as b\n \"\"\"\n \n linear_cache, activation_cache = cache\n\n if activation == \"sigmoid\":\n dZ = sigmoid_backward(dA, activation_cache)\n elif activation == \"relu\":\n dZ = relu_backward(dA, activation_cache)\n \n dA_prev, dW, db = linear_backward(dZ, linear_cache,lambd)\n\n return dA_prev, dW, db\n\n\n\ndef L_model_backward(AL, Y, caches,lambd=None,keep_prob=None,Ds=None):\n \"\"\"\n Function:\n Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group\n \n Arguments:\n AL -- probability vector, output of the forward propagation (L_model_forward())\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat)\n caches -- list of caches containing:\n every cache of linear_activation_forward() with \"relu\" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)\n the cache of linear_activation_forward() with \"sigmoid\" (it's caches[L-1])\n lambd -- regularization hyperparameter, scalar\n keep_prob -- probability of keeping a neuron active during drop-out, scalar.\n Ds -- list of dropout neurons\n \n Returns:\n grads -- A dictionary with the gradients\n grads[\"dA\" + str(l)] = ... \n grads[\"dW\" + str(l)] = ...\n grads[\"db\" + str(l)] = ... \n \"\"\"\n \n grads = {}\n L = len(caches)\n m = AL.shape[1]\n\n dAL = np.divide(1 - Y, 1 - AL) - np.divide(Y, AL)\n dAL = np.nan_to_num(dAL)\n\n current_cache = caches[-1]\n\n grads[\"dA\"+str(L)], grads[\"dW\"+str(L)], grads[\"db\"+str(L)] = linear_activation_backward(dAL, current_cache, \"sigmoid\", lambd)\n \n for l in reversed(range(L-1)):\n current_cache = caches[l]\n dA = grads[\"dA\" + str(l + 2)]\n\n if keep_prob:\n D = Ds['D'+str(l+1)]\n\n dA *= D\n dA /= keep_prob\n dA_prev_temp, dW_temp, db_temp = linear_activation_backward(dA, current_cache, activation=\"relu\",lambd=lambd)\n grads[\"dA\" + str(l + 1)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n\n return grads, dA_prev_temp\n\n\n","sub_path":"util/backward.py","file_name":"backward.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"126119770","text":"import tensorflow as tf\nfrom tensorflow.python.framework.graph_util import convert_variables_to_constants\nimport numpy as np\n\n# Global variables\nn_channel = 1\nn_maps_c1 = 10\nwindow_size = 20\nshape_sensor = (5, 5)\nshape_input = (shape_sensor[0], window_size * shape_sensor[1])\nk_stride_c1 = (1, 1)\nk_shape_c1 = (3, 3)\nshape_c1 = (n_maps_c1,\n (shape_input[0] - k_shape_c1[0] + 1) / k_stride_c1[0],\n (shape_input[1] - k_shape_c1[1] + 1) / k_stride_c1[1])\nn_maps_c2 = 15\nk_stride_c2 = (1, 1)\nk_shape_c2 = (2, 2)\nshape_c2 = (n_maps_c2,\n (shape_c1[1] - k_shape_c2[0] + 1) / k_stride_c2[0],\n (shape_c1[2] - k_shape_c2[1] + 1) / k_stride_c2[1])\nl3_nodes = 1500\nk_shape_l3 = (np.prod(shape_c2), l3_nodes)\nn_class = 5\nparams_dir = 'parameters/classification/model.ckpt-1501'\noutput_file = 'graph_classification'\n\n# Instantiate graph\ngraph = tf.Graph()\n\n# Instantiate session\nsession = tf.InteractiveSession(graph=graph)\n\n# Reconstruct model\nwith graph.as_default():\n\n # Model input\n tf_x = tf.placeholder(tf.float32, shape=[1, shape_input[0], shape_input[1], n_channel], name='input')\n\n # Variables\n w1 = tf.Variable(tf.truncated_normal([k_shape_c1[0], k_shape_c1[1], n_channel, n_maps_c1], stddev=0.1), name='w1')\n b1 = tf.Variable(tf.zeros([n_maps_c1]), name='b1')\n w2 = tf.Variable(tf.truncated_normal([k_shape_c2[0], k_shape_c2[1], n_maps_c1, n_maps_c2], stddev=0.1), name='w2')\n b2 = tf.Variable(tf.zeros([n_maps_c2]), name='b2')\n w3 = tf.Variable(tf.truncated_normal([k_shape_l3[0], k_shape_l3[1]], stddev=0.1), name='w3')\n b3 = tf.Variable(tf.zeros([l3_nodes]), name='b3')\n w4 = tf.Variable(tf.truncated_normal([l3_nodes, n_class], stddev=0.1), name='w4')\n b4 = tf.Variable(tf.zeros([n_class]), name='b4')\n \n # Compute\n # First convolutional layer\n c1 = tf.nn.conv2d(tf_x, w1, strides=[1, 1, 1, 1], padding='VALID', name='c1')\n h1 = tf.nn.relu(c1 + b1, name='h1')\n # h1_pooled = tf.nn.max_pool3d(h1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='SAME', name='h1_pooled')\n \n # Second convolutional layer\n c2 = tf.nn.conv2d(h1, w2, strides=[1, 1, 1, 1], padding='VALID', name='c2')\n h2 = tf.nn.relu(c2 + b2, name='h2')\n # h2_pooled = tf.nn.max_pool3d(h2, ksize=[1, 5, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='SAME', name='h2_pooled')\n\n # Reshape for fully connected layer\n h2_reshaped = tf.reshape(h2, shape=[1, np.prod(shape_c2)], name='h2_reshaped')\n \n # First fully connected layer\n a3 = tf.add(tf.matmul(h2_reshaped, w3), b3, name='a3')\n h3 = tf.nn.relu(a3, name='h3')\n \n # Output\n output = tf.add(tf.matmul(h3, w4), b4, name='output')\n \n # Restore saved model params\n var_dict = {'w1': w1,\n 'b1': b1,\n 'w2': w2,\n 'b2': b2,\n 'w3': w3,\n 'b3': b3,\n 'w4': w4,\n 'b4': b4\n }\n saver = tf.train.Saver(var_dict)\n init = tf.initialize_all_variables()\n session.run(init)\n saver.restore(session, params_dir)\n\n# Serialize graph for c++\nfrozen_graph = convert_variables_to_constants(session, session.graph_def, ['output'])\ntf.train.write_graph(frozen_graph, '.', output_file + '.pb', as_text=False)\ntf.train.write_graph(frozen_graph, '.', output_file + '.txt', as_text=True)\n\n","sub_path":"model/model_def_classification.py","file_name":"model_def_classification.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"231932221","text":"\"\"\"Server configuration settings.\"\"\"\nfrom .serialization.encoder import ExtendedJSONEncoder\n\n# Server\nASYNC_MODE = 'eventlet'\n\n# Data Storage\nREDIS_HOST = 'localhost'\nREDIS_PORT = 6379\nREDIS_DB = 0\n\nUUID_LENGTH = 8\nKEY_SEPARATOR = '#'\nMETA_SEPARATOR = '!'\n\nJSON_ENCODER = ExtendedJSONEncoder\n","sub_path":"server/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"202238720","text":"# @Author: Jenkins Alec \n# @Date: 2017-02-17T14:20:47-08:00\n# @Project: LTSPM analysis\n# @Last modified by: alec\n# @Last modified time: 2017-02-17T14:23:22-08:00\n\n\n\nimport numpy as np\n\nohfPath = '/Users/alec/UCSB/oommf/data_and_runs/irmn/'\nfilename = 'irmn_magn-hdemag.ohf'\nohfDemagH = np.genfromtxt(ohfPath+filename, skip_header=32, skip_footer=2)\n\nprint(ohfDemagH[0])\n","sub_path":"LTSPM_analysis/calc_demag_energy_oommf.py","file_name":"calc_demag_energy_oommf.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"414282973","text":"n, m = map(int, input().split())\ncheck_list = [False]*n\nres = []\n\ndef backTracking(depth):\n if depth == m:\n print(' '.join([str(x) for x in res]))\n return\n for i in range(n):\n if check_list[i]:\n continue\n check_list[i] = True\n res.append(i+1)\n backTracking(depth+1)\n res.pop()\n for j in range(i+1, n):\n check_list[j] = False\n\nif __name__ == '__main__':\n backTracking(0)","sub_path":"#02. 알고리즘 이론/13. BackTracking/B#15650.py","file_name":"B#15650.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"140476595","text":"from sys import exit\ninp, msk = input (), input()\n\nif len (msk) > len (inp):\n print (-1)\n exit()\npos = -1\nfor i in range (len(inp) - len(msk) + 1):\n for j in range (len(msk)):\n if (inp[i+j] != msk[j]) and (msk[j] != '@'):\n break\n else:\n pos = i\n break\nprint (pos)\n","sub_path":"s5/patternfind.py","file_name":"patternfind.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"442582585","text":"# -*- coding: utf-8 -*-\nimport random\n\nUS_MALE_NAMES = ['Jacob', 'Ethan', 'Michael', 'Alexander', 'William', 'Joshua','Daniel',\n 'Jayden', 'Noah', 'Anthony']\nUS_FEMALE_NAMES = ['Isabella', 'Emma', 'Olivia', 'Sophia', 'Ava', 'Emily', 'Madison',\n 'Abigail','Chloe', 'Mia']\n\nclass FirstNames(object):\n \"\"\" can iterate over names for the given gender.\n \"\"\"\n def __init__(self, gender=None, male_names=US_MALE_NAMES,\n female_names=US_FEMALE_NAMES, *args, **kwargs):\n self.gender = gender\n if gender in ['male', 'm']:\n self.first_names = male_names\n elif gender in ['female', 'f']:\n self.first_names = female_names\n else:\n self.first_names = male_names + female_names\n random.shuffle(self.first_names)\n self.index = 0\n self.length = len(self.first_names)\n\n def __iter__(self):\n return self\n\n def next(self):\n self.index += 1\n return self.first_names[self.index % self.length]\n\n\nGERMAN_LAST_NAMES = [u'Müller', u'Schmid', u'Schneider', u'Fischer', u'Weber', u'Meyer',\n u'Wagner', u'Becker', u'Schutz', u'Hoffmann', u'Schäfer',\n u'Koch', u'Bauer', u'Richter']\n\nclass LastNames(object):\n \"\"\" Keeps returning last names\n \"\"\"\n def __init__(self, last_names=GERMAN_LAST_NAMES, *args, **kwargs):\n self.last_names = last_names\n self.index = 0\n self.length = len(self.last_names)\n\n def __iter__(self):\n return self\n\n def next(self):\n self.index += 1\n return self.last_names[self.index % self.length]\n","sub_path":"scaffolding/library/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"379328143","text":"# -*- coding: utf-8 -*-\nfrom tkinter import ttk\nfrom thonny.misc_utils import running_on_linux\nfrom thonny.globals import get_workbench\n\n_images = set() # for keeping references to tkinter images to avoid garbace colleting them\n\n\n\ndef tweak_notebooks():\n style = ttk.Style()\n theme = style.theme_use()\n \n if theme in [\"xpnative\", \"vista\"]:\n get_workbench().get_image('gray_line.gif', \"gray_line\")\n \n style.element_create(\"gray_line\", \"image\", \"gray_line\",\n (\"!selected\", \"gray_line\"), \n height=1, width=10, border=1)\n \n style.layout('Tab', [\n ('Notebook.tab', {'sticky': 'nswe', 'children': [\n ('Notebook.padding', {'sticky': 'nswe', 'side': 'top', 'children': [\n ('Notebook.focus', {'sticky': 'nswe', 'side': 'top', 'children': [\n ('Notebook.label', {'sticky': '', 'side': 'left'}),\n ]})\n ]}),\n ('gray_line', {'sticky': 'we', 'side': 'bottom'}),\n ]}),\n ])\n \n style.configure(\"Tab\", padding=(4,1,0,0))\n if theme == \"clam\":\n style.configure(\"ButtonNotebook.Tab\", padding=(6,4,2,3))\n else:\n style.configure(\"ButtonNotebook.Tab\", padding=(4,1,1,3))\n \n if theme == \"aqua\":\n style.map(\"TNotebook.Tab\", foreground=[('selected', 'white'), ('!selected', 'black')])\n\n\ndef tweak_treeviews():\n style = ttk.Style()\n # get rid of Treeview borders\n style.layout(\"Treeview\", [\n ('Treeview.treearea', {'sticky': 'nswe'})\n ])\n \n # necessary for Python 2.7 TODO: doesn't help for aqua\n style.configure(\"Treeview\", background=\"white\")\n \n #style.configure(\"Treeview\", font='helvetica 14 bold')\n style.configure(\"Treeview\", font=get_workbench().get_font(\"TreeviewFont\"))\n\n #print(style.map(\"Treeview\"))\n #print(style.layout(\"Treeview\"))\n #style.configure(\"Treeview.treearea\", font=TREE_FONT)\n # NB! Some Python or Tk versions (Eg. Py 3.2.3 + Tk 8.5.11 on Raspbian)\n # can't handle multi word color names in style.map \n light_blue = \"#ADD8E6\" \n light_grey = \"#D3D3D3\"\n if running_on_linux():\n style.map(\"Treeview\",\n background=[('selected', 'focus', light_blue),\n ('selected', '!focus', light_grey),\n ],\n foreground=[('selected', 'black'),\n ],\n )\n else:\n style.map(\"Treeview\",\n background=[('selected', 'focus', 'SystemHighlight'),\n ('selected', '!focus', light_grey),\n ],\n foreground=[('selected', 'SystemHighlightText')],\n )\n\ndef tweak_menubuttons():\n style = ttk.Style()\n #print(style.layout(\"TMenubutton\"))\n style.layout(\"TMenubutton\", [\n ('Menubutton.dropdown', {'side': 'right', 'sticky': 'ns'}),\n ('Menubutton.button', {'children': [\n #('Menubutton.padding', {'children': [\n ('Menubutton.label', {'sticky': ''})\n #], 'expand': '1', 'sticky': 'we'})\n ], 'expand': '1', 'sticky': 'nswe'})\n ])\n \n style.configure(\"TMenubutton\", padding=14)\n\ndef tweak_paned_windows():\n style = ttk.Style()\n style.configure(\"Sash\", sashthickness=10)\n\n\ndef load_plugin():\n tweak_notebooks()\n tweak_treeviews()\n tweak_paned_windows()\n \n \n","sub_path":"thonny/plugins/styler.py","file_name":"styler.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"159462672","text":"def possible(gaps, N, K, max_gap):\n # return: is it possible to insert at most K values to \n # make the gap <= max_gap ??\n cnt = sum([(gap-1)//max_gap for gap in gaps])\n return cnt <= K\n\ndef get_difficulty(sessions, N, K):\n gaps = [sessions[i]-sessions[i-1] for i in range(1, len(sessions))]\n left, right = 1, 10**9\n while left + 1 < right:\n mid = left + (right-left)//2\n if possible(gaps, N, K, mid):\n right = mid\n else:\n left = mid\n if possible(gaps, N, K, left):\n return left\n else:\n return right\n\nT = int(raw_input())\n\nfor test_id in range(1, T+1):\n N, K = map(int, raw_input().split())\n sessions = map(int, raw_input().split())\n diff = get_difficulty(sessions, N, K)\n print(\"Case #{}: {}\".format(test_id, diff))\n\n\n\n\"\"\"\nProblem\nTambourine has prepared a fitness program so that she can become more fit! The program is made of N sessions. During the i-th session, Tambourine will exercise for Mi minutes. The number of minutes she exercises in each session are strictly increasing.\n\nThe difficulty of her fitness program is equal to the maximum difference in the number of minutes between any two consecutive training sessions.\n\nTo make her program less difficult, Tambourine has decided to add up to K additional training sessions to her fitness program. She can add these sessions anywhere in her fitness program, and exercise any positive integer number of minutes in each of them. After the additional training session are added, the number of minutes she exercises in each session must still be strictly increasing. What is the minimum difficulty possible?\n\nInput\nThe first line of the input gives the number of test cases, T. T test cases follow. Each test case begins with a line containing the two integers N and K. The second line contains N integers, the i-th of these is Mi, the number of minutes she will exercise in the i-th session.\n\nOutput\nFor each test case, output one line containing Case #x: y, where x is the test case number (starting from 1) and y is the minimum difficulty possible after up to K additional training sessions are added.\n\nLimits\nTime limit: 20 seconds per test set.\nMemory limit: 1GB.\n1 ≤ T ≤ 100.\nFor at most 10 test cases, 2 ≤ N ≤ 105.\nFor all other test cases, 2 ≤ N ≤ 300.\n1 ≤ Mi ≤ 109.\nMi < Mi+1 for all i.\n\nTest set 1\nK = 1.\n\nTest set 2\n1 ≤ K ≤ 105.\n\nSamples\n\nInput 1\n \t\nOutput 1\n \n1\n3 1\n100 200 230\n \nCase #1: 50\n \n\nInput 2\n \t\nOutput 2\n \n3\n5 2\n10 13 15 16 17\n5 6\n9 10 20 26 30\n8 3\n1 2 3 4 5 6 7 10\n \nCase #1: 2\nCase #2: 3\nCase #3: 1\n \nSample #1\nIn Case #1: Tambourine can add up to one session. The added sessions are marked in bold: 100 150 200 230. The difficulty is now 50.\n\nSample #2\nIn Case #1: Tambourine can add up to six sessions. The added sessions are marked in bold: 9 10 12 14 16 18 20 23 26 29 30. The difficulty is now 3.\n\nIn Case #2: Tambourine can add up to three sessions. The added sessions are marked in bold: 1 2 3 4 5 6 7 8 9 10. The difficulty is now 1. Note that Tambourine only added two sessions.\n\nNote #1: Only Sample #1 is a valid input for Test set 1. Consequently, Sample #1 will be used as a sample test set for your submissions.\nNote #2: Unlike previous editions, in Kick Start 2020, all test sets are visible verdict test sets, meaning you receive instant feedback upon submission.\n\"\"\"\n","sub_path":"OA/Google Kick Start_problem 3_training sessiosn.py","file_name":"Google Kick Start_problem 3_training sessiosn.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"402098533","text":"# Compare Solutions of the Explore-Exploit Dilemma\n# Bayesian Method (Thompson Sampling) vs.\n# Epsilon Greedy vs. Optimistic Initial Values vs. UCB1\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom epsilon_greedy import Bandit\nfrom optimistic_initial_values import run_experiment as run_experiment_oiv\nfrom ucb1 import run_experiment as run_experiment_ucb\n\n\nclass BayesianBandit(object):\n\tdef __init__(self, m):\n\t\tself.m = m # true mean\n\t\t# parameters for mu - prior is N(0, 1)\n\t\tself.m0 = 0\n\t\tself.lambda0 = 1\n\t\tself.sum_x = 0 # for convenience\n\t\tself.tau = 1\n\n\tdef pull(self):\n\t\treturn np.random.randn() + self.m\n\n\tdef sample(self):\n\t\treturn np.random.randn() / np.sqrt(self.lambda0) + self.m0\n\n\tdef update(self, x):\n\t\t# assume tau is 1\n\t\t# lambda = lambda0 + tau * N\n\t\t# m = (m0 * lambda0 + tau * sum(xn)) / lambda\n\t\t# = (m0 * lambda0 + tau * sum(xn)) / (lambda0 + tau * N)\n\t\tself.lambda0 += self.tau\n\t\tself.sum_x += x\n\t\tself.m0 = self.tau * self.sum_x / self.lambda0\n\n\ndef run_experiment_decaying_epsilon(means, N):\n\tbandits = [Bandit(mean) for mean in means]\n\n\tdata = np.empty(N)\n\n\tfor i in range(N):\n\t\t# Epsilon-Greedy\n\t\tp = np.random.random()\n\t\tif p < 1.0 / (i + 1):\n\t\t\tj = np.random.choice(len(bandits))\n\t\telse:\n\t\t\tj = np.argmax([b.mean for b in bandits])\n\t\tx = bandits[j].pull()\n\t\tbandits[j].update(x)\n\n\t\t# for the plot\n\t\tdata[i] = x\n\n\tcumulative_average = np.cumsum(data) / (np.arange(N) + 1)\n\n\t# plot moving average ctr\n\tplt.plot(cumulative_average)\n\tfor mean in means:\n\t\tplt.plot(np.ones(N) * mean)\n\tplt.xscale('log')\n\tplt.title('Decaying Epsilon-Greedy')\n\tplt.show()\n\n\tfor i, b in enumerate(bandits):\n\t\tprint('Bandit %d mean: %f' % (i, b.mean))\n\n\treturn cumulative_average\n\n\ndef run_experiment(means, N):\n\tbandits = [BayesianBandit(mean) for mean in means]\n\n\tdata = np.empty(N)\n\n\tfor i in range(N):\n\t\t# Bayesian Method (Thompson Sampling)\n\t\tj = np.argmax([b.sample() for b in bandits])\n\t\tx = bandits[j].pull()\n\t\tbandits[j].update(x)\n\n\t\t# for the plot\n\t\tdata[i] = x\n\n\tcumulative_average = np.cumsum(data) / (np.arange(N) + 1)\n\n\t# plot moving average ctr\n\tplt.plot(cumulative_average)\n\tfor mean in means:\n\t\tplt.plot(np.ones(N) * mean)\n\tplt.xscale('log')\n\tplt.title('Bayesian Method (Thompson Sampling)')\n\tplt.show()\n\n\treturn cumulative_average\n\n\nif __name__ == '__main__':\n\teps = run_experiment_decaying_epsilon([1.0, 2.0, 3.0], 100000)\n\toiv = run_experiment_oiv([1.0, 2.0, 3.0], 100000)\n\tucb = run_experiment_ucb([1.0, 2.0, 3.0], 100000)\n\tbayes = run_experiment([1.0, 2.0, 3.0], 100000)\n\n\t# log scale plot\n\tplt.plot(eps, label='decaying-epsilon')\n\tplt.plot(oiv, label='optimistic')\n\tplt.plot(ucb, label='ucb1')\n\tplt.plot(bayes, label='bayesian')\n\tplt.legend()\n\tplt.xscale('log')\n\tplt.title('Decaying Epsilon vs. OIV vs. UCB1 vs. Bayesian')\n\tplt.show()\n\n\t# linear plot\n\tplt.plot(eps, label='decaying-epsilon')\n\tplt.plot(oiv, label='optimistic')\n\tplt.plot(ucb, label='ucb1')\n\tplt.plot(bayes, label='bayesian')\n\tplt.legend()\n\tplt.title('Decaying Epsilon vs. OIV vs. UCB1 vs. Bayesian')\n\tplt.show()\n\n","sub_path":"rl/compare_explore_exploit_methods.py","file_name":"compare_explore_exploit_methods.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"528406069","text":"#!/usr/bin/env python3\nimport sys\nimport os\nimport configparser\nimport argparse\n\nclass basefeed():\n\n def __init__(self, conf=None):\n if conf is None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--conf\", help = \"Configuration file for websocket details\",\n default = os.environ['conf'])\n args = parser.parse_args()\n self.conf = args.conf\n else:\n self.conf = conf\n\n feed_conf = self._parse_config_file()\n if bool(feed_conf):\n self.average_elements = int(feed_conf['averageElementCount'])\n self.threshold_percent = float(feed_conf['thresholdPercentage'])\n self.allowed_deviation = float(feed_conf['allowedPercentageDeviation'])\n else:\n print (\"Error while retrieving feed parameters\")\n sys.exit(1)\n\n def _parse_config_file(self):\n config = configparser.ConfigParser()\n config.optionxform = str\n config.read(self.conf)\n try:\n a = dict(config.items('feed'))\n return a\n except Exception as e:\n print (\"Feed section not defined in conf file\")\n\n\ndef main():\n self = basefeed()\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"market_simulator/lib/base/basefeed.py","file_name":"basefeed.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"605002914","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 14 18:13:58 2014\n\n@author: worker\n\"\"\"\n\nimport matplotlib.pyplot as plt\n# from mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n#import myutils.image as image\nimport matplotlib.cm as cm\nfrom functools import partial\n\n#from pylab import *\n\ndef hist(x,bins):\n plt.hist(x, bins=bins)\n show()\n\ndef show():\n plt.ion()\n plt.show()\n plt.draw()\n\ndef __format_coord(x,y,im,numcols,numrows):\n col = int(x+0.5)\n row = int(y+0.5)\n if col>=0 and col=0 and row= ?) \r\n ORDER BY \r\n id\r\n \"\"\",\r\n (minimum_tag_count,))\r\n\r\n rows = cursor.fetchall()\r\n\r\n image_records = []\r\n\r\n for row in rows:\r\n md5 = row['md5']\r\n extension = row['file_ext']\r\n image_path = os.path.join(\r\n image_folder_path, md5[0:2], f'{md5}.{extension}')\r\n tag_string = row['tag_string']\r\n\r\n image_records.append((image_path, tag_string))\r\n\r\n connection.close()\r\n\r\n return image_records\r\n\r\n\r\ndef load_image_records_raw(sqlite_path, minimum_tag_count, image_folder_path=None):\r\n if not os.path.exists(sqlite_path):\r\n raise Exception(f'SQLite database is not exists : {sqlite_path}')\r\n\r\n connection = sqlite3.connect(sqlite_path)\r\n connection.row_factory = sqlite3.Row\r\n cursor = connection.cursor()\r\n\r\n if image_folder_path is None:\r\n image_folder_path = os.path.join(os.path.dirname(sqlite_path), 'images')\r\n\r\n # Make Image path lookup\r\n image_path_list = glob.glob(os.path.join(image_folder_path, \"*/*\"))\r\n image_dict = dict()\r\n for img_path in image_path_list:\r\n img_filename = os.path.basename(img_path)\r\n img_id, img_ext = os.path.splitext(img_filename)\r\n image_dict[img_id] = img_path\r\n\r\n cursor.execute(\r\n \"\"\"\r\n SELECT \r\n id,\r\n md5, \r\n file_ext, \r\n tag_string \r\n FROM \r\n posts \r\n WHERE \r\n (file_ext = 'png' OR file_ext = 'jpg' OR file_ext = 'jpeg') \r\n AND (tag_count_general >= ?) \r\n ORDER BY \r\n id\r\n \"\"\",\r\n (minimum_tag_count,))\r\n\r\n rows = cursor.fetchall()\r\n\r\n image_records = []\r\n\r\n for row in rows:\r\n id = str(row['id'])\r\n md5 = row['md5']\r\n extension = row['file_ext']\r\n image_path = image_dict[id]\r\n tag_string = row['tag_string']\r\n\r\n image_records.append((image_path, tag_string))\r\n\r\n connection.close()\r\n\r\n return image_records\r\n\r\n\r\ndef read_metadata(file_path):\r\n lines = {}\r\n with open(file_path, 'r', encoding='utf-8') as file:\r\n line = file.readline()\r\n while line:\r\n line = json.loads(file.readline())\r\n lines[line['id']] = line\r\n return lines\r\n\r\n\r\ndef read_metadata_dict(file_path, top_n=None, filter_attributes=None, filter_tag=None):\r\n data = dict()\r\n with open(file_path, 'r', encoding='utf-8') as f:\r\n for line in f:\r\n l = json.loads(line)\r\n if filter_attributes:\r\n l = {k: l.get(k, None) for k in filter_attributes}\r\n l['tags'] = [t['name'] for t in l['tags']]\r\n\r\n if filter_tag:\r\n if len([l for l in l['tags'] if l in filter_tag]):\r\n data[l['id']] = l\r\n else:\r\n data[l['id']] = l\r\n return data\r\n\r\n\r\ndef query_db(sqlite_path, query):\r\n output_connection = sqlite3.connect(sqlite_path)\r\n\r\n df = pd.read_sql_query(query, output_connection)\r\n\r\n output_connection.close()\r\n\r\n return df\r\n","sub_path":"deepdanbooru/data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"464198808","text":"import os\nimport redis\nimport config\nfrom flask import Flask, request, jsonify\nfrom tasks import HippoTask\nfrom queues import HippoQueue\nfrom data_sources import *\n\napp = Flask(__name__,static_url_path='')\napp.config['TEMPLATES_AUTO_RELOAD'] = True\napp.redis = redis.StrictRedis(host=config.REDIS_HOST, port=config.REDIS_PORT, db=config.REDIS_DB, password=config.REDIS_PW)\n\n\n@app.route('/',methods=['GET'])\ndef index():\n return app.send_static_file('index.html')\n\n\n@app.route('/tasks/',methods=['GET','POST'])\ndef tasks():\n tasks = HippoTask.all_tasks(redis_client=app.redis)\n if request.method == 'POST':\n data = request.get_json()\n\n existing = [t for t in tasks if (\n t.definition['cmd'] == data['cmd'] and\n t.definition['mesos_state'] in ['TASK_STAGING','TASK_RUNNING', 'WAITING_ON_OFFERS'] and\n t.definition['container']['docker']['image'] == data['container']['docker']['image'])\n ]\n\n if len(existing):\n print('skipping duplicate task', data)\n return jsonify({\"mesos_id\":existing[0].mesos_id})\n\n # create a new task\n t = HippoTask(definition=data,redis_client=app.redis)\n validation_error = t.validate()\n if validation_error:\n return jsonify({\"error\":validation_error}), 400\n t.queue()\n return jsonify({\"mesos_id\":t.mesos_id})\n\n return jsonify([t.definition for t in tasks])\n\n\n@app.route('/tasks//',methods=['GET','DELETE'])\ndef single_task(task_id):\n t = HippoTask(mesos_id=task_id,redis_client=app.redis)\n if not t.definition:\n return jsonify({\"error\":task_id + \" not found\"}), 404\n\n if request.method == 'DELETE':\n t.delete()\n return jsonify({\"deleted\": task_id})\n else:\n t = HippoTask(mesos_id=task_id,redis_client=app.redis)\n return jsonify(t.definition)\n\n\n@app.route('/tasks//kill/',methods=['GET','POST'])\ndef kill_task(task_id):\n t = HippoTask(mesos_id=task_id,redis_client=app.redis)\n if not t.definition:\n return jsonify({\"error\":task_id + \" not found\"}), 404\n t.kill()\n return jsonify({\"killed\": task_id})\n\n\n@app.route('/queues/', methods=['GET','POST','PUT'])\ndef queues():\n if request.method in ['POST','PUT']:\n data = request.get_json()\n q = HippoQueue(definition=data,redis_client=app.redis)\n validation_error = q.validate()\n if validation_error:\n q.delete()\n return jsonify({\"error\":validation_error}), 400\n return jsonify({\"id\":q.id})\n else:\n all_queues = HippoQueue.all_queues(app.redis)\n\n return jsonify([q.definition for q in all_queues])\n\n\n@app.route('/queues//',methods=['GET','POST','PUT','DELETE'])\ndef single_queue(queue_id):\n q = HippoQueue(id=queue_id,redis_client=app.redis)\n if not q.definition:\n return jsonify({\"error\":queue_id + \" not found\"}), 404\n\n if request.method == 'DELETE':\n q.delete()\n return jsonify({\"deleted\": queue_id})\n elif request.method in ['POST','PUT']:\n data = request.get_json()\n q.definition = data\n if 'id' not in q.definition:\n q.definition['id'] = q.id\n validation_error = q.validate()\n if validation_error:\n return jsonify({\"error\":validation_error}), 400\n q.save()\n\n return jsonify(q.definition)\n\n\n@app.route('/queues///',methods=['GET'])\ndef single_queue_enable_toggle(queue_id, toggle):\n q = HippoQueue(id=queue_id,redis_client=app.redis)\n if not q.definition:\n return jsonify({\"error\":queue_id + \" not found\"}), 404\n\n if toggle == 'enable':\n q.enable()\n return jsonify({\"enabled\": queue_id})\n else:\n q.disable()\n return jsonify({\"disabled\": queue_id})\n\n\n@app.route('/queuetypes/',methods=['GET'])\ndef queue_types():\n qtlist = []\n processors = HippoDataSource.__subclasses__()\n for p in processors:\n qtlist.append({\n 'namespace':p.namespace,\n 'label':p.label,\n 'inputs':p.inputs\n })\n return jsonify(qtlist)\n\nif __name__ == '__main__':\n app.run(debug=True, threaded=True)","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"19302213","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/9/15\n# @Author : Cary\n# @Function : 利用PCA对半导体制作数据降维\nimport numpy as np\n\n\ndef load_data_set(file_name, delim='\\t'):\n \"\"\"载入数据集\"\"\"\n fr = open(file_name)\n string_arr = [line.strip().split(delim) for line in fr.readlines()]\n dat_arr = [list(map(float, line)) for line in string_arr]\n return np.mat(dat_arr)\n\n\ndef replace_nan_with_mean():\n \"\"\"平均值替换nan\"\"\"\n data_mat = load_data_set('secom.data', ' ')\n num_feat = np.shape(data_mat)[1]\n\n # 计算所有非NaN的平均值\n for i in range(num_feat):\n mean_val = np.mean(data_mat[np.nonzero(~np.isnan(data_mat[:, i].A))[0], i])\n data_mat[np.nonzero(np.isnan(data_mat[:, i].A))[0], i] = mean_val\n\n return data_mat\n\n\nif __name__ == '__main__':\n data_mat = replace_nan_with_mean()\n mean_vals = np.mean(data_mat, axis=0)\n mean_removed = data_mat-mean_vals\n cov_mat = np.cov(mean_removed, rowvar=0)\n eig_vals, eig_vects = np.linalg.eig(np.mat(cov_mat))\n print(eig_vals)","sub_path":"Machine Learning in Achtion/ch13/pca_semiconductor.py","file_name":"pca_semiconductor.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"568323204","text":"from openerp.osv import fields, osv\nfrom openerp.addons.crm import crm\n\nclass tag_cust_tabs(osv.osv):\n\n _inherit = \"res.partner\"\n \n\n\n _columns = {\n\n 'user_id_n': fields.many2one('res.users', 'Salesperson', select=True, track_visibility='always'),\n 'industry': fields.many2one('industry', 'industry', select=True),\n\n\n 'parent_id': fields.many2one('res.partner', 'Related Company', select=True, track_visibility='always'),\n 'contact_name': fields.char('Contact Name', size=64, select=True, track_visibility='always'),\n 'street_n': fields.char('Street', select=True, track_visibility='always'),\n 'street2_n': fields.char('Street2', select=True, track_visibility='always'),\n 'zip_n': fields.char('Zip', size=24, change_default=True, select=True, track_visibility='always'),\n 'city_n': fields.char('City', select=True, track_visibility='always'),\n 'state_id_n': fields.many2one(\"res.country.state\", 'State', ondelete='restrict', select=True, track_visibility='always'),\n 'country_id_n': fields.many2one('res.country', 'Country', ondelete='restrict', select=True, track_visibility='always'),\n 'email_form': fields.char('Email', select=True, track_visibility='always'),\n 'phone_num': fields.char('Phone', select=True, track_visibility='always'),\n 'fax_num': fields.char('Fax', select=True, track_visibility='always'),\n 'mobile_num': fields.char('Mobile', select=True, track_visibility='always'),\n 'vehicle_name_n': fields.char('Vehicle', size=64, select=True, track_visibility='always'),\n 'date_deadline': fields.date('Expected Closing', help=\"Estimate of the date on which the opportunity will be won.\", select=True, track_visibility='always'),\n 'date_action': fields.date('Next Action Date', select=True, track_visibility='always'),\n 'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority', select=True, track_visibility='always'),\n 'categ_ids': fields.many2many('crm.case.categ', 'res_partner_category_rel', 'partner_id', 'category_id', 'Tags', \\\n domain=\"['|', ('section_id', '=', section_id), ('section_id', '=', False), ('object_id.model', '=', 'crm.lead')]\", select=True, track_visibility='always'),\n 'title_action': fields.char('Next Action', select=True, track_visibility='always'),\n 'company_currency': fields.related('company_id', 'currency_id', type='many2one', string='Currency', readonly=True, relation=\"res.currency\"),\n 'planned_revenue': fields.float('Expected Revenue', select=True, track_visibility='always'),\n 'probability': fields.float('Success Rate (%)', group_operator=\"avg\"),\n 'TagCust1': fields.char('Data Upload ID', size=15),\n\t 'TagCust2': fields.char('Business Type', size=25), \n 'TagCust3': fields.text('Accountant Name', size=50),\n 'TagCust4': fields.text('Accountant Address', size=100), \n 'TagCust5': fields.text('Accountant contact details', size=100), \n 'TagCust6': fields.text('Company Name & or Trust Name', size=100), \n 'TagCust7': fields.char('Company ACN', size=25), \n 'TagCust8': fields.char('Company ABN', size=25),\n 'TagCust9': fields.text('Name of Company Directors', size=100), \n 'TagCust10': fields.text('Directors Date of Birth', size=50), \n 'TagCust11': fields.char('Directors drivers licence details', size=100), \n 'TagCust12': fields.char('Website', size=50),\n 'TagCust13': fields.char('Other 13', size=25),\n\t 'TagCust14': fields.date('Settlement'), \n 'TagCust15': fields.float('Loan Term'), \n 'TagCust16': fields.char('Make',size=25), \n 'TagCust17': fields.char('Model', size=25), \n 'TagCust18': fields.char('Year', size=5),\n 'TagCust19': fields.char('Color', size=25), \n 'TagCust20': fields.char('Reg No', size=25),\n 'TagCust21': fields.char('Reg Expiry', size=25),\n 'TagCust22': fields.char('Tyres %', size=25), \n 'TagCust23': fields.char('Kms', size=25), \n 'TagCust24': fields.selection([('unl','Unleaded'), ('ga','Gas'), ('die','Diesel'),('ele','Electric')],'Fuel Type'),\n 'TagCust25': fields.selection([('man','Manual'), ('aut','Auto')], 'Trans'),\n 'TagCust26': fields.selection([('1','4x4'), ('2','2x4'),('3','AWD')], 'X Drive'),\n 'TagCust27': fields.char('Seating', size=25), \n 'TagCust28': fields.text('Other', size=200), \n 'TagCust29': fields.char('Other 29', size=25), \n 'TagCust30': fields.char('D.O.B', size=25),\n 'TagCust31': fields.char('Other 31', size=25),\n\t 'TagCust32': fields.char('Other 32', size=25), \n\t 'TagCust33': fields.char('Other 33', size=25),\n\t 'TagCust34': fields.char('Make', size=30), \n 'TagCust35': fields.char('Model', size=30),\n 'TagCust36': fields.char('Year', size=25),\n 'TagCust37': fields.char('Color', size=25), \n 'TagCust38': fields.char('Reg No', size=25), \n 'TagCust39': fields.char('Reg Expiry', size=25), \n 'TagCust40': fields.char('Tyres %', size=25), \n 'TagCust41': fields.char('Kms', size=25),\n 'TagCust42': fields.char('Fuel Type', size=25), \n 'TagCust43': fields.char('Trans', size=25), \n 'TagCust44': fields.char('X Drive', size=25), \n 'TagCust45': fields.char('Seating', size=25),\n 'TagCust46': fields.text('Other', size=100),\n\t 'TagCust47': fields.char('Other 47', size=25), \n\t 'TagCust48': fields.char('Other 48', size=25),\n\t 'TagCust49': fields.char('Other 49', size=25), \n\t 'TagCust50': fields.char('Other 50', size=25),\n\n\t 'TagCustL1Label': fields.char('Assetts', size=1),\n\t 'TagCustAL1': fields.float('Cash at Bank'),\n 'TagCustAL2': fields.float('Residential Property'), \n 'TagCustAL3': fields.float('Property 1'), \n 'TagCustAL4': fields.float('Property 2'), \n 'TagCustAL5': fields.float('Property 3'),\n 'TagCustAL6': fields.float('Other Investments'),\n\t 'TagCustAL7': fields.float('Fixtures & Fittings'), \n\t 'TagCustAL8': fields.float('Furniture'),\n\t 'TagCustAL9': fields.float('Superannuation'), \n\t 'TagCustAL10': fields.float('Shares'),\n\t 'TagCustAL11': fields.float('Plant & Equip'),\n 'TagCustAL12': fields.float('Motor Vehicle'), \n 'TagCustAL13': fields.float('Bike/Van/Boat'), \n 'TagCustAL14': fields.float('Valuables'), \n 'TagCustAL15': fields.float('Business Val'),\n 'TagCustAL16': fields.float('Total Assetts'),\n\t 'TagCustL1Label2': fields.char('Liabilities', size=1),\n\t 'TagCustAL21': fields.float('Overdraught'),\n 'TagCustAL22': fields.float('Residential Property'), \n 'TagCustAL23': fields.float('Property 1'), \n 'TagCustAL24': fields.float('Property 2'), \n 'TagCustAL25': fields.float('Property 3'),\n 'TagCustAL26': fields.float('Other Investments'),\n\t 'TagCustAL27': fields.float('Fixtures & Fittings'), \n\t 'TagCustAL28': fields.float('Furniture'),\n\t 'TagCustAL29': fields.float('Superannuation'), \n\t 'TagCustAL30': fields.float('Shares'),\n\t 'TagCustAL31': fields.float('Plant & Equip'),\n 'TagCustAL32': fields.float('Motor Vehicle'), \n 'TagCustAL33': fields.float('Bike/Van/Boat'), \n 'TagCustAL34': fields.float('Valuables'), \n 'TagCustAL35': fields.float('Business Val'),\n 'TagCustAL36': fields.float('Total Liabilities'),\n\t 'TagDevNotesFT1':fields.text('DFT1 Notes'),\n\t 'TagDevNotesFT1H':fields.char('Help'),\n\t 'TagDevNotesFT2':fields.text('FT2 Added for comments re development of this tab area'),\n\t 'TagDevNotesFT3':fields.text('FT3 Added for comments re development of this tab area'),\n\t 'TagDevNotesFT4':fields.text('FT4 Added for comments re development of this tab area'),\n\t 'TagDevNotesFT5':fields.text('FT5 Added for comments re development of this tab area'),\n\t 'TagDevNotesLT1':fields.text('LT1 Added for comments re development of this tab area'),\n\t 'TagDevNotesLT2':fields.text('LT2 Added for comments re development of this tab area'),\n\t 'TagDevNotesLT3':fields.text('LT3 Added for comments re development of this tab area'),\n\t 'TagDevNotesLT4':fields.text('LT4 Added for comments re development of this tab area'),\n\t 'TagDevNotesLT5':fields.text('LT5 Added for comments re development of this tab area'),\n\t 'TagCustList01':fields.one2many('crm.lead.tag_cust_list_1', 'tag_list1_id', 'TagListA01'),\n\t 'TagCustList02': fields.one2many('crm.lead.tag_cust_list_2', 'tag_list2_id', 'TagListA02'),\n\t 'TagCustList03': fields.one2many('crm.lead.tag_cust_list_3', 'tag_list3_id', 'TagListA03'),\n\t 'TagCustList04': fields.one2many('crm.lead.tag_cust_list_4', 'tag_list4_id', 'TagListA04'),\n\t 'TagCustList05': fields.one2many('crm.lead.tag_cust_list_5', 'tag_list5_id', 'TagListA05')\n }\n\n _defaults = {\n 'user_id': lambda s, cr, uid, c: uid,\n # 'stage_id': lambda s, cr, uid, c: s._get_default_stage_id(cr, uid, c),\n # 'section_id': lambda s, cr, uid, c: s._get_default_section_id(cr, uid, context=c),\n 'priority': lambda *a: crm.AVAILABLE_PRIORITIES[2][0],\n \n }\n\n \nclass tag_cust_list_1(osv.osv):\n\n _name = \"crm.lead.tag_cust_list_1\"\n \n _columns = {\n 'tag_list1_id': fields.many2one('crm.lead','Member Block Reference', required=True, readonly=True),\n\t\t'tag_list2_id': fields.many2one('crm.lead','Member Block Reference', required=True, readonly=True),\n\t\t'tag_list3_id': fields.many2one('crm.lead','Member Block Reference', required=True, readonly=True),\n\t\t'tag_list4_id': fields.many2one('crm.lead','Member Block Reference', required=True, readonly=True),\n\t\t'tag_list5_id': fields.many2one('crm.lead','Member Block Reference', required=True, readonly=True),\n 'TagList101': fields.char('Name'),\n 'TagList102': fields.char('Address'),\n\t\t'TagList103': fields.char('Phone'),\n\t\t'TagList104': fields.char('D.O.B'),\n\t\t'TagList105': fields.char('Lic No', size=20),\n\t\t'TagList106': fields.text('Note', size=150),\n\t\t'TagList201': fields.char('Service Date'),\n 'TagList202': fields.char('Kms'),\n\t\t'TagList203': fields.char('L203'),\n\t\t'TagList204': fields.char('L204'),\n\t\t'TagList205': fields.char('L205'),\n\t\t'TagList206': fields.float('L206'),\n\t\t'TagList301': fields.char('Name'),\n 'TagList302': fields.char('Address'),\n\t\t'TagList303': fields.char('Contact'),\n\t\t'TagList304': fields.char('Phone'),\n\t\t'TagList305': fields.char('Details'),\n\t\t'TagList306': fields.float('Price'),\n\t\t'TagList401': fields.char('Name'),\n 'TagList402': fields.char('Address'),\n\t\t'TagList403': fields.char('Contact'),\n\t\t'TagList404': fields.char('Phone'),\n\t\t'TagList405': fields.text('Notes'),\n\t\t'TagList406': fields.float('Price'),\n\t\t'TagList501': fields.char('Ref No'),\n 'TagList502': fields.char('Term'),\n\t\t'TagList503': fields.char('Rate %'),\n\t\t'TagList504': fields.char('Broker'),\n\t\t'TagList505': fields.char('Trident Fee'),\n\t\t'TagList506': fields.char('Lender Fee')\n\n\n }\n\n\nclass tag_cust_list_2(osv.osv):\n\n _name = \"crm.lead.tag_cust_list_2\"\n \n _columns = {\n 'tag_list2_id': fields.many2one('crm.lead','Tag Back Reference Field', required=True, readonly=True),\n 'TagList201': fields.char('Service Date'),\n 'TagList202': fields.char('Kms'),\n 'TagList203': fields.char('L203'),\n 'TagList204': fields.char('L204'),\n 'TagList205': fields.char('L205'),\n 'TagList206': fields.float('L206')\n }\n\nclass tag_cust_list_3(osv.osv):\n\n _name = \"crm.lead.tag_cust_list_3\"\n \n _columns = {\n 'tag_list3_id': fields.many2one('crm.lead','Tag Back Reference Field', required=True, readonly=True),\n 'TagList301': fields.char('Name'),\n 'TagList302': fields.char('Address'),\n 'TagList303': fields.char('Contact'),\n 'TagList304': fields.char('Phone'),\n 'TagList305': fields.char('Details'),\n 'TagList305a': fields.char('Spotters'),\n 'TagList306': fields.float('Price')\n }\n \n\nclass tag_cust_list_4(osv.osv):\n\n _name = \"crm.lead.tag_cust_list_4\"\n \n _columns = {\n 'tag_list4_id': fields.many2one('crm.lead','Tag Back Reference Field', required=True, readonly=True),\n 'TagList401': fields.char('Name'),\n 'TagList402': fields.char('Address'),\n 'TagList403': fields.char('Contact'),\n 'TagList404': fields.char('Phone'),\n 'TagList405': fields.text('Notes'),\n 'TagList406': fields.char('Email'),\n 'TagList407': fields.float('Price')\n }\n \nclass tag_cust_list_5(osv.osv):\n\n _name = \"crm.lead.tag_cust_list_5\"\n \n _columns = {\n 'tag_list5_id': fields.many2one('crm.lead','Tag Back Reference Field', required=True, readonly=True),\n 'TagList501': fields.char('Ref No'),\n 'TagList502': fields.char('Term'),\n 'TagList503': fields.char('Rate %'),\n 'TagList504': fields.char('Broker'),\n 'TagList505': fields.char('Trident Fee'),\n 'TagList506': fields.char('Lender Fee'),\n 'TagList507': fields.char('Lender Name')\n }\n\nclass res_partner(osv.osv):\n\n _inherit = \"res.partner\"\n\n _columns = {\n\n 'vehicle_name':fields.char('Vehicle',size=64),\n\n }\nclass industry(osv.osv):\n _name = \"industry\"\n \n\n _columns = {\n\n 'name':fields.char('industry',size=64),\n\n }\ntag_cust_tabs()\n","sub_path":"customertrident/tag_cust_tabs.py","file_name":"tag_cust_tabs.py","file_ext":"py","file_size_in_byte":13359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"576185449","text":"# coding=utf-8\nimport torch\nimport numpy as np\nimport pandas as pd # 基于 NumPy 的一种数据处理工具\nimport torch.nn as nn # nn.Module\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nfrom torch.autograd import Variable # Variable变量import内容\nfrom torch import optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\n\n\nclass Batch_Net(nn.Module):\n def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):\n super(Batch_Net, self).__init__()\n self.layer1 = nn.Sequential(nn.Linear(in_dim, n_hidden_1),\n nn.BatchNorm1d(n_hidden_1),\n nn.ReLU(True))\n self.layer2 = nn.Sequential(nn.Linear(n_hidden_1, n_hidden_2),\n nn.BatchNorm1d(n_hidden_2),\n nn.ReLU(True))\n self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, out_dim))\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n out = self.layer3(x)\n return out\n\n\nclass Simple_Net(nn.Module):\n def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):\n super(Simple_Net, self).__init__()\n self.layer1 = nn.Linear(in_dim, n_hidden_1)\n self.layer2 = nn.Linear(n_hidden_1, n_hidden_2)\n self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, out_dim))\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n out = self.layer3(x)\n return out\n\n\nclass Activation_Net(nn.Module):\n def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):\n super(Activation_Net, self).__init__()\n self.layer1 = nn.Sequential(nn.Linear(in_dim, n_hidden_1),\n nn.ReLU(True))\n self.layer2 = nn.Sequential(nn.Linear(n_hidden_1, n_hidden_2),\n nn.ReLU(True))\n self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, out_dim))\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n out = self.layer3(x)\n return out\n\n\nbatch_size = 64\nlearning_rate = 1e-2\nnum_epoches = 20\n\n# 将totensor与标准化两个预处理组合,toTensor自动标准化,范围0~1,Normalize参数是均值与方法,对于3通道使用[0.5,0.5,0.5],[0.5,0.5,0.5]\ndata_tf = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])\ntrain_dataset = datasets.MNIST(root='./data', train=True, transform=data_tf, download=False)\ntest_dataset = datasets.MNIST(root='./data', train=False, transform=data_tf)\ntrain_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) # shuffle数据是否打乱\ntest_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n\nmodel = Batch_Net(28*28, 300, 100, 10) # 在此修改网络\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(model.parameters(), lr=learning_rate)\n\nfor epoch in range(20):\n model.train()\n train_loss = 0\n train_acc = 0\n losses = []\n acces = []\n train_loadercnt = 0\n for im, label in train_loader: # 938*64幅28*28图片训练数据\n im = im.view(-1, 28*28)\n im = Variable(im)\n label = Variable(label)\n out = model(im)\n loss = criterion(out, label)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item() # 20个训练的loss累加\n _, pred = out.max(1) # 选择10中的最大值,得出预测值数字\n num_correct = (pred == label).sum().item() # 计数\n acc = num_correct/im.shape[0] # 计算准确率\n train_acc += acc\n train_loadercnt = train_loadercnt + 1\n losses.append(train_loss/len(train_loader))\n acces.append(train_acc/len(train_loader))\n\n eval_loss = 0\n eval_acc = 0\n model.eval()\n eval_losses = []\n eval_acces = []\n for im, label in test_loader:\n im = im.view(-1, 28 * 28)\n im = Variable(im)\n label = Variable(label)\n out = model(im)\n loss = criterion(out, label)\n\n eval_loss += loss.item()\n _, pred = out.max(1)\n num_correct = (pred == label).sum().item()\n acc = num_correct / im.shape[0]\n eval_acc += acc\n eval_losses.append(eval_loss / len(test_loader))\n eval_acces.append(eval_acc / len(test_loader))\n print('epoch: {}, Train Loss: {:.6f}, Train Acc: {:.6f}, Eval Loss: {:.6f}, Eval Acc: {:.6f}'.format(epoch,\n train_loss / len(train_loader), train_acc / len(train_loader),\n eval_loss / len(test_loader), eval_acc / len(test_loader)))\n","sub_path":"深度学习/深度学习入门之pytorch/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":4670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"212399123","text":"#!/usr/bin/env python\n\nimport frozen, os, platform, re, uuid\n\nif platform.system() == \"Windows\":\n\timport _winreg\n\n__firstInstall__ = False\n\ndef is_first_install():\n\treturn __firstInstall__\n\ndef initialize():\n\t\"\"\"\n\tPeform certain initialization functions relating to the directory\n\tMainly, checking if it exists. If it doesn't it is assumed that\n\tthis is being run for the first time.\n\t\"\"\"\n\t\n\tpath = get_default_path()\n\t__firstInstall__ = False\n\t\n\tif not os.path.exists(path):\n\t\t__firstInstall__ = True\n\t\tos.makedirs(path)\n\t\n\t# Make sure each of these exists\n\tdirs = [\"bin\", \"config\", \"html\", \"log\", \"patch\", \"temp\", \"unity3d\",\n\t\t\tos.path.join(\"unity3d\", \"nslay\"),\n\t\t\tos.path.join(\"unity3d\", \"nplay\")\n\t]\n\t\n\tfor dir in dirs:\n\t\tdirPath = os.path.join(path,dir)\n\t\tif not os.path.exists(dirPath):\n\t\t\tos.makedirs(dirPath)\n\t\n\t# Do first install stuff\n\tif is_first_install() and frozen.we_are_frozen():\n\t\tpass # Todo\n\t\n\t# Generate the JS file\n\t#generate_js_path_file()\n\ndef copy_to_bin():\n\t\"\"\" Make assumptions based on operating system what files \n\twould need to be copied to the bin directory, and do so.\n\tNote that these files may change depending how the patcher\n\tis packed into an executable (and of course, operating system). \"\"\"\n\t\n\t# Do nothing unless frozen\n\tif not frozen.we_are_frozen():\n\t\treturn\n\t\n\tbinpath = os.path.join(get_default_path(), \"bin\")\n\tmodpath = frozen.module_path()\n\t\n\tcrtdir = \"Microsoft.VC90.CRT\"\n\t\n\tif platform.system() == \"Windows\":\n\t\t# Make sure CRT distributables path exists\n\t\tos.makedirs(os.path.join(binpath, crtdir))\n\t\t\n\t\t# We are running an .exe from the bin path already\n\t\tif frozen.we_are_frozen() and modpath == binpath:\n\t\t\treturn\n\t\t\n\t\t# Files to copy relative to modpath\n\t\tfiles = [\n\t\t\t\"NSlayPatcher.exe\",\n\t\t\t\"patcher.zip\",\n\t\t\t\"library.zip\",\n\t\t\t\"%s/msvcm90.dll\" % crtdir,\n\t\t\t\"%s/msvcp90.dll\" % crtdir,\n\t\t\t\"%s/msvcr90.dll\" % crtdir\n\t\t]\n\t\t\n\t\t# Todo\n\t\t#for file in files:\n\t\t#\tpath = os.path.join(binpath, file)\n\ndef generate_js_path_file():\n\t\"\"\" This should no longer be needed \"\"\"\n\t\n\tlatest = get_latest_nplay_client()\n\tif latest is None:\n\t\treturn\n\t\n\tif platform.system() == \"Windows\":\n\t\tlatest = latest.replace(\"\\\\\", \"\\\\\\\\\")\n\t\n\ttext = 'window.latestClientPath = \"%s\";' % latest\n\t\n\tprint(\"Generating with: %s\" % text)\n\t\n\ttry:\n\t\tf = open(os.path.join(get_default_path(), \"html\", \"js\", \"generated_path.js\"), \"w+\")\n\t\tf.write(text)\n\t\tf.close()\n\texcept IOError as e:\n\t\tprint(str(e))\n\t\treturn\n\ndef get_latest_patch(version=\"1.8.3.3\"):\n\t\"\"\" Get the greatest version of a patch to be applied to a\n\tspecific NPlay client. \"\"\"\n\tpath = os.path.join(get_default_path(), \"patch\")\n\tgreatest = None\n\t\n\t# Ex: patch_1.8.3.3_0.4.diff\n\tvr = r\"((\\d+\\.)*\\d+)\"\n\tp = re.compile(r\"^patch_\" + version + r\"_\" + vr + r\"\\.diff\")\n\t\n\tdirs = os.listdir(path)\n\tfor dir in dirs:\n\t\tm = p.match(dir)\n\t\tif m is None: continue\n\t\t\n\t\tpatchVersion = m.group(1)\n\t\t\n\t\tif greatest is None: greatest = patchVersion\n\t\telse: greatest = get_greater_version(greatest, patchVersion)\n\t\n\tif greatest is None: return None\n\t\n\treturn {\n\t\t\"path\": os.path.join(path, \"patch_\" + version + \"_\" + greatest + \".diff\"),\n\t\t\"nplay_version\": version,\n\t\t\"version\": greatest,\n\t}\n\ndef get_patch_path(version, nplay_version=\"1.8.3.3\"):\n\t\"\"\" Get the expected path to a patch of a specific version\n\tfor a specific NPlay client version \"\"\"\n\treturn os.path.join(get_default_path(), \"patch\",\n\t\t\"patch_%s_%s.diff\" % (nplay_version, version))\n\t\t\ndef get_latest_client(name=\"NPlay\"):\n\t\"\"\" Get info on the latest client (either NPlay or NSlay). If\n\tunsuccessful, will return None. If successful, will return a\n\tdictionary with keys \"path\" and \"version\" \"\"\"\n\tgreatest = None\n\tpath = os.path.join(get_default_path(), \"unity3d\", name.lower())\n\t\n\tp = re.compile(r\"^\" + name + r\"_((\\d+\\.)*\\d+)\\.unity3d$\")\n\tdirs = os.listdir(path)\n\tfor dir in dirs:\n\t\tm = p.match(dir)\n\t\tif m is None: continue\n\t\t\n\t\tversionStr = m.group(1)\n\t\t\n\t\t# Set the greatest version\n\t\tif greatest is None: greatest = versionStr\n\t\telse: greatest = get_greater_version(greatest, versionStr)\n\t\n\tif greatest is None: return None\n\t\n\treturn {\n\t\t\"path\": get_client_path(name=name, version=greatest),\n\t\t\"version\": greatest,\n\t}\n\t\ndef get_latest_client_version(name=\"NPlay\"):\n\tinfo = get_latest_client(name=name)\n\tif info is None: return None\n\treturn info[\"version\"]\n\ndef get_latest_client_path(name=\"NPlay\"):\n\tinfo = get_latest_client(name=name)\n\tif info is None: return None\n\treturn info[\"path\"]\n\ndef get_latest_nslay_client():\n\treturn get_latest_client(\"NSlay\")\n\ndef get_latest_nplay_client():\n\treturn get_latest_client(\"NPlay\")\n\t\n\t'''\n\tgreatestVersion = None\n\tclientPath = os.path.join(get_default_path(), \"unity3d\", \"nplay\")\n\t\n\tp = re.compile(r\"^NPlay_((\\d+\\.)*\\d+)\\.unity3d$\")\n\tdirs = os.listdir(clientPath)\n\tfor dir in dirs:\n\t\tm = p.match(dir)\n\t\tif m is None: continue\n\t\t\n\t\tversionStr = m.group(1)\n\t\t\n\t\t# Set the greatest version\n\t\tif greatestVersion is None:\n\t\t\tgreatestVersion = versionStr\n\t\telse:\n\t\t\tgreatestVersion = get_greater_version(greatestVersion, versionStr)\n\t\n\treturn greatestVersion\n\t'''\n\t\ndef get_client_path(name=\"NPlay\", version=\"1.8.3.3\"):\n\t\"\"\" Given a version, get the expected client path for that version \"\"\"\n\treturn os.path.join(get_default_path(), \"unity3d\", name.lower(), \"%s_%s.unity3d\" % (name, version))\n\n'''\ndef get_latest_nplay_client():\n\t\n\tgreatestVersion = get_latest_nplay_client_version()\n\t\n\tif greatestVersion is None:\n\t\treturn None\n\t\n\t# Return absolute path to the latest version\n\treturn get_nplay_client_path(version=greatestVersion)\n'''\n\ndef get_live_client_path():\n\t\"\"\" Get the latest NSlay client path,\n\tshould always be at $PATCHER/unity3d/BeGone.unity3d \"\"\"\n\treturn os.path.join(get_default_path(), \"unity3d\", \"BeGone.unity3d\")\n\ndef has_live_client():\n\treturn os.path.isfile(get_live_client_path())\n\t\ndef get_greater_version(v1, v2):\n\t\"\"\" Helper function for getting the greatest version Id \"\"\"\n\tv1s = v1.split(\".\")\n\tv2s = v2.split(\".\")\n\t\n\tm = min(len(v1s), len(v2s))\n\tfor i in xrange(0, m):\n\t\tn1 = int(v1s[i])\n\t\tn2 = int(v2s[i])\n\t\tif n1 > n2:\n\t\t\treturn v1\n\t\telif n2 > n1:\n\t\t\treturn v2\n\t\n\t# If we're still here, we're doing a comparison like\n\t# \"1.2.3\" vs. \"1.2.3.4\"\n\t# Just return the longer string\n\tif len(v1) > len(v2):\n\t\treturn v1\n\telse:\n\t\treturn v2\n\t\ndef cleanup_temp():\n\t\"\"\" Cleanup the temp directory. Meant to be called on start/exit. \"\"\"\n\tpath = os.path.join(get_default_path(), \"temp\")\n\t\n\t# Get list of files in temp\n\ttfiles = []\n\tfor (dirpath, dirnames, filenames) in os.walk(path):\n\t\ttfiles.extend(filenames)\n\t\tbreak\n\t\n\t# Remove each file in temp\n\tfor tfile in tfiles:\n\t\tos.remove(os.path.join(path, tfile))\n\t\ndef get_default_path():\n\t\"\"\" Get the default path of this directory \"\"\"\n\tif platform.system() == \"Windows\":\n\t\t# Get the AppData directory\n\t\tpath = os.path.join(os.getenv(\"APPDATA\"), \"NSlay\", \"Patcher\")\n\telse: # Assuming OSX/Unix-based\n\t\tpath = os.path.expanduser(os.path.join(\"~\", \".nslay\", \"patcher\"))\n\treturn path\n\ndef get_html_path():\n\treturn os.path.join(get_default_path(), \"html\", \"BeGone.html\")\n\t\ndef get_temp_file():\n\t\"\"\" Get the path to a possible random file in the temp directory \"\"\"\n\tfilename = str(uuid.uuid4()).replace(\"-\", \"\")\n\treturn os.path.join(get_default_path(), \"temp\", filename)\n\ndef get_default_browser():\n\t\"\"\" Attempt to detect the default browser \"\"\"\n\tif platform.system() == \"Windows\":\n\t\tbrowser = \"Internet Explorer\"\n\t\ttry:\n\t\t\twith _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, \"Software\\Classes\\http\\shell\\open\\command\") as key:\n\t\t\t\t# Note: \"(Default)\" = None\n\t\t\t\tkeyinfo = _winreg.QueryValueEx(key, None)\n\t\t\t\t\n\t\t\t\tif \"firefox.exe\" in keyinfo[0]:\n\t\t\t\t\tbrowser = \"firefox\"\n\t\t\t\telif \"chrome.exe\" in keyinfo[0]:\n\t\t\t\t\tbrowser = \"chrome\"\n\t\t\t\telif \"safari.exe\" in keyinfo[0]:\n\t\t\t\t\tbrowser = \"safari\"\n\t\t\t\telif \"opera.exe\" in keyinfo[0]:\n\t\t\t\t\tbrowser = \"opera\"\n\t\t\t\t\n\t\t\t\t_winreg.CloseKey(key)\n\t\texcept WindowsError as e:\n\t\t\tprint(str(e))\n\t\t\tpass\n\t\t\n\t\treturn browser\n","sub_path":"src/nslay/patcher/appdir.py","file_name":"appdir.py","file_ext":"py","file_size_in_byte":7861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"435200964","text":"from mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import animation, pyplot as plt\nimport matplotlib.cm as cm\nimport argparse\nimport os\nimport numpy as np\n\n\ndatafile = \"../data/recon32.bin\"\n\n\n\nclass AnimatedGif:\n def __init__(self, size=(800, 600)):\n self.fig = plt.figure()\n self.fig.set_size_inches(size[0] / 100, size[1] / 100)\n ax = self.fig.add_axes([0, 0, 1, 1], frameon=False, aspect=1)\n ax.set_xticks([])\n ax.set_yticks([])\n self.images = []\n\n def add(self, image, label=''):\n\n I = np.dstack([image, image, image])\n x = 15\n y = 15\n # I[x, y, :] = [1, 0, 0]\n\n plt_im = plt.imshow(image,cmap=cm.Greys_r, interpolation='nearest' , animated=True)\n plt.show()\n\n plt_txt = plt.text(10, 310, label, color='red')\n self.images.append([plt_im, plt_txt])\n\n def save(self, filename, fps):\n ani = animation.ArtistAnimation(self.fig, self.images)\n ani.save(filename, writer='imagemagick', fps=fps)\n\n\n\n\ndata = np.fromfile(datafile, dtype=np.float32)\nsize = np.cbrt(data.size)\nassert size**3 == data.size\nsize = size.astype(int)\ndata = data.reshape([size.astype(int)] * 3)\n\n# data[15, 15, 15] = 0\n\nvideo = AnimatedGif()\n# for frame_id in range(size):\n # video.add(data[frame_id])\nvideo.add(data[15])\nvideo.save(\"test.gif\", 10)\n","sub_path":"src/ctscan.py","file_name":"ctscan.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"315750122","text":"import numpy as np\nimport os\nfrom PIL import Image\nfrom numba import njit\n\nROOT_PATH = \"./Data/train/masks\"\nSAVE_PATH = \"./Locations/for_get_AP_yx/train-gt\"\ncnt=0\n\n\nfor file in os.listdir(ROOT_PATH):\n im = np.array(Image.open(os.path.join(ROOT_PATH, file)))\n\n target = np.max(im)\n for label in range(1, target + 1):\n x, y = np.where(im == label)\n if x.tolist() and y.tolist():\n xmin = np.min(x)\n xmax = np.max(x)\n ymin = np.min(y)\n ymax = np.max(y)\n \n save = SAVE_PATH +'/' +file.split('.')[0]+'.txt'\n with open(save, 'a') as f:\n f.write(f'47,{ymin},{xmin},{ymax},{xmax}\\n')\n\n else:\n print(f'continue from {file}')\n\n","sub_path":"get_gt.py","file_name":"get_gt.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"574083074","text":"def rozdilSloupcu(radky, indexS1, indexS2):\n rozdily = []\n for radek in radky:\n hodnoty = radek.split(sep=\";\")\n rozdil = float(hodnoty[indexS1]) - float(hodnoty[indexS2])\n rozdily.append(rozdil)\n return(rozdily) \n\nsoubor = open (\"cisla.txt\", \"r\")\nradky = soubor.readlines()\nrozdilyCisel = rozdilSloupcu(radky,0,2)\nprint(rozdilyCisel)\n \n","sub_path":"prog/cv5/ukol4.py","file_name":"ukol4.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"299134393","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# IMPORTS THE DATASET\ndata = keras.datasets.fashion_mnist\n\n# SPLIT TESTING AND TRAINING DATA\n(train_images, train_labels), (test_images, test_labels) = data.load_data()\n\n# ESTABLISH LABELS\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress',\n 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n\n# CONVERT EACH PIXEL VALUE TO A NUMBER BETWEEN 0 AND 1\ntrain_images = train_images/255.0\ntest_images = test_images/255.0\nprint(test_images[0])\n\n\n# CREATE THE NUERAL NETWORK MODEL\nmodel = keras.Sequential([\n keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Dense(128, activation=\"relu\"),\n keras.layers.Dense(128, activation=\"relu\"),\n keras.layers.Dense(10, activation=\"softmax\")\n])\n\nmodel.compile(optimizer=\"adam\",\n loss=\"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\n\n# TRAIN THE MODEL\nmodel.fit(train_images, train_labels, epochs=5)\n\n# PREDICTIONS\nprediciton = model.predict(test_images)\n\nfor i in range(10):\n plt.grid(False)\n plt.imshow(test_images[i], cmap=plt.cm.binary)\n plt.xlabel(\"Actual: \" + class_names[test_labels[i]])\n plt.title(\"Prediction: \" + class_names[np.argmax(prediciton[i])])\n plt.show()\n","sub_path":"tut_01.py","file_name":"tut_01.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"85852889","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.signal as ss\nimport bead_util as bu\nimport copy\nfrom scipy.optimize import curve_fit\n\npath0 = \"/data/20180927/bead1/spinning/amp_ramp_20181014_locked_good\"\npath1 = \"/data/20180927/bead1/spinning/amp_ramp_20181014_unlocked_good\"\ndfreq = 1210.68\nbw = 0.25\nNs = 250000\nFs = 5000.\nk = 1e-13*(370*2.*np.pi)**2\naxis = 0\nfreqs = np.fft.rfftfreq(Ns, 1./Fs)\nf_bool = np.abs(freqs-dfreq) lvl_start:\r\n await client.send_message(channel, '{} has leveled up to level {}'.format(user.mention, lvl_end))\r\n users[user.id]['level'] = lvl_end\r\n experience = 0\r\n\r\nkeep_alive()\r\nclient.run(token)\r\n","sub_path":"python/bot/xp_bot.py","file_name":"xp_bot.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"251382856","text":"# -*- coding: utf-8 -*-\nimport urllib\nimport time\nfrom selenium import webdriver\n\nclass Crawler:\n def __init__(self):\n self.url ='http://www.senbaba.cn/jsp/common/validateimage.jsp'\n self.img_xpath = '//ul/li/div/a/img'\n self.download_xpath = '//ul/li/div/div/span/a[@class=\"downloadicon\"]'\n self.img_url_dic = {}\n\n def launch(self):\n driver = webdriver.Chrome(r\"c:/python27/chromedriver.exe\")\n driver.maximize_window()\n driver.get(self.url)\n img_xpath = self.img_xpath\n download_xpath = self.download_xpath\n img_url_dic = self.img_url_dic\n\n\nif __name__ == '__main__':\n crawler = Crawler()\n crawler.launch()","sub_path":"下单平台改版后代码/selenium/code1.py","file_name":"code1.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"539183274","text":"from os import close, remove\nfrom shutil import move\nfrom tempfile import mkstemp\nimport re as regex\nimport traceback\nfrom util import CCTError, SkipSignal, TerminateError\nimport http_util as http\n\n\"\"\"\nThis file contains the Role Updating related object.\n\"\"\"\n\nclass RoleConfiguration:\n \"\"\" Holds \"\"\"\n def __init__(self, creators, maintainers, rightholders, settings, credentials):\n self.creators = creators\n self.maintainers = maintainers\n self.rightholders = rightholders\n self.settings = settings\n self.credentials = credentials\n\nclass RoleUpdater:\n def __init__(self, role_configuration):\n self.config = role_configuration\n \n def run_update_roles(self, xmlfile):\n self.update_roles(xmlfile, self.prepare_role_updates())\n\n def update_roles(self, file_path, replace_map):\n \"\"\"\n Reads through the input file and replaces content according to the replace map\n\n The replace_map is a list of tuples: (pattern, substitute text)\n \"\"\"\n fh, abs_path = mkstemp()\n with open(abs_path, 'w') as new_file:\n with open(file_path) as old_file:\n for line in old_file:\n for pattern, subst in replace_map:\n line = regex.sub(pattern, subst, line)\n new_file.write(line)\n close(fh)\n remove(file_path) # Remove original file\n move(abs_path, file_path) # Move new file\n\n def prepare_role_updates(self):\n \"\"\"\n Updates the roles on a module. This reads in from the settings file for\n creator, maintainer, and rightholder configuration.\n \"\"\"\n\n if len(self.config.creators) == 1:\n creator_string = '' \\\n 'firstname2 lastname2\\n' \\\n '' \\\n 'firstname2 lastname2\\n' \\\n '' \\\n 'firstname2 lastname2\\n' \\\n ' TodoList:\n encrypted_string = ''\n t = TodoList()\n if not (DATA_FILE in os.listdir('.')):\n return t\n\n with open('dump.dat', 'rb') as f:\n encrypted_string = f.read()\n fernet = Fernet(get_key())\n json_string = fernet.decrypt(encrypted_string).decode()\n data = json.loads(json_string)\n for i in data['todo']:\n dt = datetime.datetime.strptime(i['time'], \"%Y-%m-%d %H:%M:%S.%f\")\n t.add_todo(i['task'], i['desc'], dt)\n return t\n","sub_path":"modules/saveTodos.py","file_name":"saveTodos.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"352535488","text":"from __future__ import print_function\nfrom os.path import join, dirname, abspath\nimport xlrd\nimport re\nimport csv\nimport psycopg2\nfrom psycopg2.extensions import adapt\n\nx = \"14\"\npyc = (\"city_metrics_vmt_auto%s\")%x\n#Opening the database #\ntry:\n conn = psycopg2.connect(\"dbname='postgres' user='postgres' host='localhost' password='Lhb1234'\")\nexcept:\n print (\"I am unable to connect to the database\")\n\ncur = conn.cursor()\n# Make a SQL table\ncur.execute('CREATE TABLE %s (id serial PRIMARY KEY, year varchar(4), county varchar(50), city varchar(50), type varchar(50), vmt numeric(16,4))'%pyc)\nconn.commit()\ncur.close()\nconn.close()\n\n\n\n\n\nfname = join(dirname(dirname(abspath(__file__))), 'automation_vmt', 'vmt_year_%s.xlsx')%x\n\n\n# Open the workbook\ntry:\n xl_workbook = xlrd.open_workbook(fname)\nexcept:\n\n fname = join(dirname(dirname(abspath(__file__))), 'automation_vmt', 'vmt_year_%s.xls')%x\n xl_workbook = xlrd.open_workbook(fname)\n\n# List sheet names, and pull a sheet by name\n#\nsheet_names = xl_workbook.sheet_names()\nprint('Sheet Names', sheet_names)\n\nxl_sheet = xl_workbook.sheet_by_name(sheet_names[1])\n\n# Pull the first row by index\n# (rows/columns are also zero-indexed)\n#\nrow = xl_sheet.row(0) # 1st row\n\n# Print 1st row values and types\n#\nfrom xlrd.sheet import ctype_text\n\nprint('(Column #) type:value')\nfor idx, cell_obj in enumerate(row):\n cell_type_str = ctype_text.get(cell_obj.ctype, 'unknown type')\n print('(%s) %s %s' % (idx, cell_type_str, cell_obj.value))\n","sub_path":"sql_files/additional_files/automation_vmt/vmt_step2.py","file_name":"vmt_step2.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"143972994","text":"import caffe\nimport numpy as np\nimport random\n##############################################################################################################\n##############################################################################################################\n##############################################################################################################\nclass roi_pooling(caffe.Layer):\n\tdef setup(self,bottom,top):\n\t\tself.pooled_h = 6\n\t\tself.pooled_w = 6\n\t\tself.spatial_scale = 1.0/16.0\n\n\tdef reshape(self,bottom,top):\n\t\tself.ch_,self.h_,self.w_ = bottom[0].data.shape\n\t\tself.num_rois = bottom[1].data.shape[0]\n\t\t# [h_,w_] -> [pooled_h,pooled_w]\n\t\ttop[0].reshape((self.num_rois,self.ch_,self.pooled_h,self.pooled_w))\n\t\tself.top_data = np.zeros((self.num_rois,self.ch_,self.pooled_h,self.pooled_w))\n\t\tself.max_idx = -np.ones((self.num_rois,self.ch_,self.pooled_h,self.pooled_w))\n\n\n\tdef forward(self,bottom,top):\n\t\tself.features = bottom[0].data[...]\n\t\tself.rois\t = bottom[1].data[...]\n\t\tself.rois \t = np.round(self.rois*self.spatial_scale)\t#map to feature map\n\t\tfor i in range(self.num_rois):\n\t\t\tx1,y1,x2,y2 = self.rois[i]\n\t\t\tfor ch in range(self.ch_):\n\t\t\t\tfor ph in range(self.pooled_h):\n\t\t\t\t\tfor pw in range(self.pooled_w):\n\t\t\t\t\t\tbin_size_h = float(y2-y1+1)/self.pooled_h\n\t\t\t\t\t\tbin_size_w = float(x2-x1+1)/self.pooled_w\n\t\t\t\t\t\thstart = np.floor(bin_size_h*ph)\n\t\t\t\t\t\twstart = np.floor(bin_size_w*pw)\n\t\t\t\t\t\thend = np.ceil((ph+1)*bin_size_h)\n\t\t\t\t\t\twend = np.ceil((pw+1)*bin_size_w)\n\t\t\t\t\t\thstart = min(max(hstart + y1, 0), self.h_)\n\t\t\t\t\t\twstart = min(max(wstart + x1, 0), self.w_)\n\t\t\t\t\t\thend = min(max(hend \t+ y1, 0), self.h_)\n\t\t\t\t\t\twend = min(max(wend \t+ x1, 0), self.w_)\n\t\t\t\t\t\tif hstart >= hend or wstart >= wend:\n\t\t\t\t\t\t\tself.top_data[i][ch][ph][pw] = 0\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\trf = self.features[ch][wstart:wend,hstart:hend] # reception field\n\t\t\t\t\t\targmax_idx = np.argmax(rf)\n\t\t\t\t\t\tx_ = argmax_idx/self.pooled_w\n\t\t\t\t\t\ty_ = argmax_idx%self.pooled_h\n\t\t\t\t\t\tself.top_data[i][ch][ph][pw] = rf[x_,y_]\n\t\t\t\t\t\tself.max_idx[i][ch][ph][pw] = argmax_idx\n\t\ttop[0].reshape(self.num_rois,self.ch_,self.pooled_h,self.pooled_w)\n\t\ttop[0].data[...] = self.top_data\n\n\tdef backward(self, top, propagate_down, bottom):\n\t\tif propagate_down == 0:\n\t\t\treturn\n\t\ttop_diff = top[0].diff[...]\n\t\tbottom_diff = np.zeros((self.ch_,self.h_,self.w_))\n\t\tfor i in range(self.num_rois):\n\t\t\tx1,y1,x2,y2 = self.rois[i]\n\t\t\tfor ch in range(self.ch_):\n\t\t\t\tfor ph in range(self.pooled_h):\n\t\t\t\t\tfor pw in range(self.pooled_w):\n\t\t\t\t\t\tif self.max_idx[i][ch][ph][pw] == -1:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\targmax_idx = self.max_idx[i][ch][ph][pw]\n\t\t\t\t\t\tx_ = argmax_idx/self.pooled_w + x1\n\t\t\t\t\t\ty_ = argmax_idx%self.pooled_h + y1\n\t\t\t\t\t\tbottom_diff[ch][x_][y_] += self.top_data[i][ch][ph][pw]\n\t\tbottom[0].diff[...] = bottom_diff\n","sub_path":"python/roi_pooling.py","file_name":"roi_pooling.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"223352660","text":"from distutils.errors import DistutilsSetupError\nfrom pkg_resources import get_distribution\nfrom subprocess import check_output\n\n\ncommand = 'git describe --tags --long --dirty'\nfmt = '{tag}.{commitcount}+{gitsha}'\n\n\ndef validate_version_format(dist, attr, value):\n try:\n version = check_output(command.split()).decode('utf-8').strip()\n except:\n version = get_distribution(dist.get_name()).version\n else:\n version = format_version(version=version, fmt=value)\n dist.metadata.version = version\n\n\ndef set_package_info(dist, attr, value):\n \"\"\"\n Write the package version and package name\n into a file.\n \"\"\"\n #1. Check that the version is set\n if not dist.metadata.version:\n if not hasattr(dist, 'version_format'):\n raise DistutilsSetupError('`version_format` must be set.')\n validate_version_format(dist, 'version_format', dist.version_format)\n #2. Write the version and name into the file\n with open(value, 'w') as f:\n f.write('__version__ = \"{0}\"\\n'.format(dist.metadata.version))\n f.write('__name__ = \"{0}\"'.format(dist.get_name()))\n\ndef format_version(version, fmt=fmt):\n parts = version.split('-')\n assert len(parts) in (3, 4)\n dirty = len(parts) == 4\n tag, count, sha = parts[:3]\n if count == '0':# and not dirty:\n return tag\n return fmt.format(tag=tag, commitcount=count, gitsha=sha.lstrip('g'))\n\n\nif __name__ == \"__main__\":\n # determine version from git\n git_version = check_output(command.split()).decode('utf-8').strip()\n git_version = format_version(version=git_version)\n\n # monkey-patch `setuptools.setup` to inject the git version\n import setuptools\n original_setup = setuptools.setup\n\n def setup(version=None, *args, **kw):\n return original_setup(version=git_version, *args, **kw)\n\n setuptools.setup = setup\n\n # import the packages's setup module\n import setup\n","sub_path":"setuptools_git_version.py","file_name":"setuptools_git_version.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"597628398","text":"\ndef hamming_distance(p, q):\n distance = 0\n for i in range(len(p)):\n if p[i] != q[i]:\n distance += 1\n return distance\n \ndef approx_pattern_matching(pattern, text, max_mismatches):\n indexes = []\n len_pat = len(pattern)\n for i in range(len(text) - len_pat + 1):\n if hamming_distance(pattern, text[i:i + len_pat]) <= max_mismatches:\n indexes.append(i)\n return indexes\n \np = \"AAAAA\"\nt = \"AACAAGCTGATAAACATTTAAAGAG\"\nm = 2\nx = approx_pattern_matching(p, t, m)\n\nf = open('output.txt', 'w')\nfor i in x:\n f.write(str(i) + \" \")\nf.close()","sub_path":"Challenges/ApproximatePatternMatching/ApproximatePatternMatching.py","file_name":"ApproximatePatternMatching.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"167236756","text":"import tensorflow as tf\r\nimport numpy as np\r\n\r\n\r\nclass TeacherCifar:\r\n def __init__(self, para_npy_path=None):\r\n self._para = np.load(para_npy_path)\r\n self._para = self._para.item()\r\n print('Teacher parameter loaded...')\r\n\r\n def _conv(self, _input, W, b, strides=1, padding='SAME'):\r\n output = tf.nn.conv2d(_input, W, strides=[1, strides, strides, 1], padding=padding)\r\n output = tf.nn.bias_add(output, b)\r\n return output\r\n\r\n def _lrelu(self, _input):\r\n return tf.maximum(_input, 0.1 * _input)\r\n \r\n def build(self, input):\r\n weights = {\r\n 'conv1a': tf.Variable(self._para['conv1a_w:0'], trainable=False, name='conv1a_w'),\r\n 'conv1b': tf.Variable(self._para['conv1b_w:0'], trainable=False, name='conv1b_w'),\r\n 'conv1c': tf.Variable(self._para['conv1c_w:0'], trainable=False, name='conv1c_w'),\r\n 'conv2a': tf.Variable(self._para['conv2a_w:0'], trainable=False, name='conv2a_w'),\r\n 'conv2b': tf.Variable(self._para['conv2b_w:0'], trainable=False, name='conv2b_w'),\r\n 'conv2c': tf.Variable(self._para['conv2c_w:0'], trainable=False, name='conv2c_w'),\r\n 'conv3a': tf.Variable(self._para['conv3a_w:0'], trainable=False, name='conv3a_w'),\r\n 'conv3b': tf.Variable(self._para['conv3b_w:0'], trainable=False, name='conv3b_w'),\r\n 'conv3c': tf.Variable(self._para['conv3c_w:0'], trainable=False, name='conv3c_w'),\r\n 'fc': tf.Variable(self._para['fc_w:0'], trainable=False, name='fc_w')\r\n }\r\n\r\n biases = {\r\n 'conv1a': tf.Variable(self._para['conv1a_b:0'], trainable=False, name='conv1a_b'),\r\n 'conv1b': tf.Variable(self._para['conv1b_b:0'], trainable=False, name='conv1b_b'),\r\n 'conv1c': tf.Variable(self._para['conv1c_b:0'], trainable=False, name='conv1c_b'),\r\n 'conv2a': tf.Variable(self._para['conv2a_b:0'], trainable=False, name='conv2a_b'),\r\n 'conv2b': tf.Variable(self._para['conv2b_b:0'], trainable=False, name='conv2b_b'),\r\n 'conv2c': tf.Variable(self._para['conv2c_b:0'], trainable=False, name='conv2c_b'),\r\n 'conv3a': tf.Variable(self._para['conv3a_b:0'], trainable=False, name='conv3a_b'),\r\n 'conv3b': tf.Variable(self._para['conv3b_b:0'], trainable=False, name='conv3b_b'),\r\n 'conv3c': tf.Variable(self._para['conv3c_b:0'], trainable=False, name='conv3c_b'),\r\n 'fc': tf.Variable(self._para['fc_b:0'], trainable=False, name='fc_b')\r\n }\r\n\r\n conv1a = self._conv(input, weights['conv1a'], biases['conv1a'])\r\n conv1a_relu = self._lrelu(conv1a)\r\n conv1b = self._conv(conv1a_relu, weights['conv1b'], biases['conv1b'])\r\n conv1b_relu = self._lrelu(conv1b)\r\n conv1c = self._conv(conv1b_relu, weights['conv1c'], biases['conv1c'])\r\n conv1c_relu = self._lrelu(conv1c)\r\n\r\n self.maxpool1 = tf.nn.max_pool(conv1c_relu, (1, 2, 2, 1), (1, 2, 2, 1), padding='SAME')\r\n\r\n conv2a = self._conv(self.maxpool1, weights['conv2a'], biases['conv2a'])\r\n conv2a_relu = self._lrelu(conv2a)\r\n conv2b = self._conv(conv2a_relu, weights['conv2b'], biases['conv2b'])\r\n conv2b_relu = self._lrelu(conv2b)\r\n conv2c = self._conv(conv2b_relu, weights['conv2c'], biases['conv2c'])\r\n conv2c_relu = self._lrelu(conv2c)\r\n\r\n maxpool2 = tf.nn.max_pool(conv2c_relu, (1, 2, 2, 1), (1, 2, 2, 1), padding='SAME')\r\n\r\n conv3a = self._conv(maxpool2, weights['conv3a'], biases['conv3a'], padding='SAME')\r\n conv3a_relu = self._lrelu(conv3a)\r\n conv3b = self._conv(conv3a_relu, weights['conv3b'], biases['conv3b'], padding='SAME')\r\n conv3b_relu = self._lrelu(conv3b)\r\n conv3c = self._conv(conv3b_relu, weights['conv3c'], biases['conv3c'], padding='SAME')\r\n conv3c_relu = self._lrelu(conv3c)\r\n\r\n h = tf.reduce_mean(conv3c_relu, reduction_indices=[1, 2])\r\n self.fc = tf.nn.bias_add(tf.matmul(h, weights['fc']), biases['fc'])\r\n\r\n def get_hint(self):\r\n return self.maxpool1\r\n\r\n def get_logits(self):\r\n return self.fc\r\n","sub_path":"teacher_model.py","file_name":"teacher_model.py","file_ext":"py","file_size_in_byte":4141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"476392420","text":"#!/usr/bin/env python\n\"\"\"\nDefault data processing function for make_Hmumu_hist_mpi\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport ROOT\nimport numpy as np\nimport numpy.lib.recfunctions as rfn\nfrom root_numpy import tree2array\nimport subprocess\n\nfrom FastAnalysis import utils\nimport sys\nimport os\nimport yaml\n\ndef reading_files(file_name, start, end, config):\n tree_name = config['tree_name']\n try:\n renaming_dict = config['rename_branches']\n except KeyError:\n renaming_dict = None\n \n chain = ROOT.TChain(tree_name, tree_name)\n chain.Add(file_name)\n event_info = tree2array(chain,\n branches=None, # all branches\n selection=None, # no selection\n start=start,\n stop=end)\n \n # renaming\n event_info = rfn.rename_fields(event_info, renaming_dict)\n\n # apply event selections\n mass_name = 'Muons_Minv_MuMu_Fsr_nearOnly'\n\n sel_mask = (event_info[mass_name] > 110)\\\n & (event_info['Muons_PT_Lead'] > 27)\\\n & (event_info['Muons_PT_Sub'] > 15)\n\n event_info = event_info[sel_mask]\n \n return event_info\n\ndef adding_new_var(data, config):\n # add new variables: Z_PT_FSR_nearOnly and Z_PT_FSR_LOG_nearOnly\n # z_pt_info = utils.add_new_variables(data)\n new_var_info = utils.add_new_variables(config['add_new_var'], data)\n # data = utils.merge_arrays(z_pt_info, data)\n data = utils.merge_arrays(new_var_info, data)\n return data\n\ndef data_select(data, config, ZpT_weight_dict):\n weight_name = config['weight_name']\n njet_name = 'Jets_jetMultip'\n reweight_Zpt_branchname = \"Z_PT_FSR_LOG_nearOnly\"\n\n # event level weights\n if weight_name != \"None\":\n if '*' in weight_name:\n weight_names = weight_name.split('*')\n weights = 1\n for weight_name in weight_names:\n print(\"multiplying weights {}\".format(weight_name))\n weights = weights * data[weight_name]\n else:\n weights = data[weight_name]\n else:\n weights = None\n\n if weights is not None:\n if ZpT_weight_dict is not None:\n for jet_idx in range(3):\n if jet_idx== 2:\n mask = (data[njet_name] >= 2)\n else:\n mask = (data[njet_name] == jet_idx)\n\n weight_dict = ZpT_weight_dict[jet_idx]\n\n # this temporary matrix is very much needed, otherwise it does work\n tmp_weight = weights[mask]\n for low, high, value in weight_dict:\n mask_bin = (data[reweight_Zpt_branchname][mask] >= low) & (data[reweight_Zpt_branchname][mask] < high)\n tmp_weight[mask_bin] = tmp_weight[mask_bin] / value\n\n weights[mask] = tmp_weight\n\n return data, weights\n\ndef plot_hist(data, config, weights):\n njet_name = 'Jets_jetMultip'\n hist_opts = config['hists']\n n_categories = 16\n category_name = 'Event_XGB_Category'\n\n # now make histograms\n histograms = []\n h_postfix = \"Inc\"\n histograms += utils.make_hist(data, hist_opts, h_postfix, weights)\n\n # In jet multiplicity\n for ijet in range(3):\n if ijet == 2:\n mask = data[njet_name] >= 2\n else:\n mask = data[njet_name] == ijet\n\n h_postfix = '{}Jet'.format(ijet)\n histograms += utils.make_hist(data, hist_opts, h_postfix, weights, mask)\n\n # Categories\n for icat in range(n_categories):\n h_postfix = 'BDT_{}'.format(icat)\n mask = data[category_name] == icat+1\n histograms += utils.make_hist(data, hist_opts, h_postfix, weights, mask)\n \n return histograms\n\ndef data_process(file_name, start, end, config, ZpT_weight_dict, userprog):\n\n is_file = True\n try:\n sys.path.insert(0, os.path.dirname(__file__) + '/custom')\n exec('import ' + userprog)\n except ModuleNotFoundError as e:\n is_file = False\n\n # reading files starts #\n if is_file:\n try:\n exec('event_info = ' + userprog + '.reading_files(file_name, start, end, config)')\n except AttributeError as e:\n event_info = reading_files(file_name, start, end, config)\n else:\n event_info = reading_files(file_name, start, end, config)\n # reading files ends #\n\n # adding new variables starts #\n if is_file:\n try:\n exec('event_info = ' + userprog + '.adding_new_var(event_info, config)')\n except AttributeError as e:\n event_info = adding_new_var(event_info, config)\n else:\n event_info = adding_new_var(event_info, config)\n # adding new variables ends #\n\n # event-selection starts #\n if is_file:\n try:\n exec('event_info, weights = ' + userprog + '.data_select(event_info, config, ZpT_weight_dict)')\n except AttributeError as e:\n event_info, weights = data_select(event_info, config, ZpT_weight_dict)\n else:\n event_info, weights = data_select(event_info, config, ZpT_weight_dict)\n # event-selection ends #\n\n # plotting histograms starts #\n if is_file:\n try:\n exec('event_info = ' + userprog + '.plot_hist(event_info, config, weights)')\n except AttributeError as e:\n histograms = plot_hist(event_info, config, weights)\n else:\n histograms = plot_hist(event_info, config, weights)\n # plotting histograms ends #\n\n return histograms\n","sub_path":"scripts/make_Hmumu_hist_mpi_helper.py","file_name":"make_Hmumu_hist_mpi_helper.py","file_ext":"py","file_size_in_byte":5515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"26623084","text":"import heapq\nfrom collections import defaultdict\n\n\ndef find_k_frequent_numbers(nums, k):\n d = defaultdict(int)\n\n for i in nums:\n d[i] = d[i] + 1\n\n heap = []\n\n for key, val in d.items():\n if len(heap) < k:\n heapq.heappush(heap, (val, key))\n else:\n if val > heap[0][0]:\n heapq.heappop(heap)\n heapq.heappush(heap, (val, key))\n\n result = []\n\n for i in heap:\n result.append(i[1])\n\n return result\n\n\n\n\n\nprint(\"Here are the K frequent numbers: \" +\n str(find_k_frequent_numbers([1, 3, 5, 12, 11, 12, 11], 2)))\n# Output: [12, 11]\n# Explanation: Both'11' and '12'apearedtwice.\nprint(\"Here are the K frequent numbers: \" +\n str(find_k_frequent_numbers([5, 12, 11, 3, 11], 2)))\n# Output: [11, 5] or [11, 12] or [11, 3]\n# Explanation: Only'11'appearedtwice, allothernumbersappearedonce.\n\n\n","sub_path":"docs/python algorithms/mine/leetcode50/top_k_frequent_numbers2.py","file_name":"top_k_frequent_numbers2.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"390958434","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n\n# Create your models here.\nclass UserProfileInfo(models.Model):\n\n user = models.OneToOneField(User,\n on_delete=models.CASCADE,\n primary_key=True,)\n\n # additional\n\n phone_number = models.IntegerField(unique=True)\n\n time_created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.user.username\n\n\nclass Friendship(models.Model):\n # list of users that are friends with user_id_1\n friends_list = models.ManyToManyField(User)\n user_id_1 = models.ForeignKey(User,\n related_name='owner',\n null=True,\n on_delete=models.CASCADE)\n\n @classmethod\n def add_friend(cls, user_id_1, user_id_2):\n friend, created = cls.objects.get_or_create(\n user_id_1=user_id_1\n )\n friend.friends_list.add(user_id_2)\n\n @classmethod\n def remove_friend(cls, user_id_1, user_id_2):\n friend, created = cls.objects.get_or_create(\n user_id_1=user_id_1\n )\n friend.friends_list.remove(user_id_2)\n","sub_path":"TIE-23516 Basic Web Applications/user/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"326715236","text":"# coding:=utf-8\n# Copyright 2021 Tencent. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n''' Applications based on RecBERT. '''\n\nimport random\nimport numpy as np\n\nfrom ..tools import tf\nfrom .base import LMModule\nfrom .bert import get_bert_config, get_word_piece_tokenizer, get_key_to_depths\nfrom ..modeling.rec_bert import RecBERT\nfrom .. import utils\n\n\nclass RecBERTLM(LMModule):\n ''' Language modeling on RecBERT. '''\n _INFER_ATTRIBUTES = {\n 'max_seq_length': (\n 'An integer that defines max sequence length of input tokens, '\n 'which typically equals `len(tokenize(segments)) + 1'),\n 'init_checkpoint': (\n 'A string that directs to the checkpoint file used for '\n 'initialization')}\n\n def __init__(self,\n config_file,\n vocab_file,\n max_seq_length=128,\n init_checkpoint=None,\n output_dir=None,\n gpu_ids=None,\n add_prob=0.1,\n del_prob=0.1,\n do_lower_case=True,\n truncate_method='LIFO'):\n super(LMModule, self).__init__(\n init_checkpoint, output_dir, gpu_ids)\n\n self.batch_size = 0\n self.max_seq_length = max_seq_length\n self.truncate_method = truncate_method\n self._do_lower_case = do_lower_case\n self._on_predict = False\n self._add_prob = add_prob\n self._del_prob = del_prob\n self.__init_args__ = locals()\n\n assert add_prob <= 0.5, (\n 'The value of `add_prob` should be larger than 0 '\n 'and smaller than 1/2.')\n self.bert_config = get_bert_config(config_file)\n self.tokenizer = get_word_piece_tokenizer(vocab_file, do_lower_case)\n self._key_to_depths = get_key_to_depths(\n self.bert_config.num_hidden_layers)\n\n def predict(self, X=None, X_tokenized=None,\n batch_size=8):\n ''' Inference on the model.\n\n Args:\n X: list. A list object consisting untokenized inputs.\n X_tokenized: list. A list object consisting tokenized inputs.\n Either `X` or `X_tokenized` should be None.\n batch_size: int. The size of batch in each step.\n Returns:\n A dict object of model outputs.\n '''\n\n self._on_predict = True\n ret = super(LMModule, self).predict(\n X, X_tokenized, batch_size)\n self._on_predict = False\n\n return ret\n\n def convert(self, X=None, y=None, sample_weight=None, X_tokenized=None,\n is_training=False, is_parallel=False):\n self._assert_legal(X, y, sample_weight, X_tokenized)\n\n assert y is None, ('%s is unsupervised. `y` should be None.'\n % self.__class__.__name__)\n\n n_inputs = None\n data = {}\n\n # convert X\n if X or X_tokenized:\n tokenized = False if X else X_tokenized\n X_target = X_tokenized if tokenized else X\n (input_tokens, input_ids, add_label_ids, del_label_ids) = \\\n self._convert_X(\n X_target, tokenized=tokenized, is_training=is_training)\n data['input_ids'] = np.array(input_ids, dtype=np.int32)\n\n if is_training:\n data['add_label_ids'] = np.array(\n add_label_ids, dtype=np.int32)\n data['del_label_ids'] = np.array(\n del_label_ids, dtype=np.int32)\n\n # backup for answer mapping\n data[utils.BACKUP_DATA + 'input_tokens'] = input_tokens\n data[utils.BACKUP_DATA + 'tokenized'] = [tokenized]\n data[utils.BACKUP_DATA + 'X_target'] = X_target\n\n n_inputs = len(input_ids)\n if n_inputs < self.batch_size:\n self.batch_size = max(n_inputs, len(self._gpu_ids))\n\n # convert sample_weight\n if is_training or y:\n sample_weight = self._convert_sample_weight(\n sample_weight, n_inputs)\n data['sample_weight'] = np.array(sample_weight, dtype=np.float32)\n\n return data\n\n def _convert_X(self, X_target, tokenized, is_training):\n\n # tokenize input texts and scan over corpus\n input_tokens = []\n tokenized_input_ids = []\n vocab_size = len(self.tokenizer.vocab)\n vocab_ind = list(range(vocab_size))\n vocab_p = [0] * vocab_size\n for ex_id, sample in enumerate(X_target):\n _input_tokens = self._convert_x(sample, tokenized)\n\n # skip noise training data\n if is_training:\n if len(_input_tokens) == 0 or \\\n len(_input_tokens) > self.max_seq_length:\n continue\n else:\n utils.truncate_segments(\n [_input_tokens], self.max_seq_length,\n truncate_method=self.truncate_method)\n\n # count char\n _input_ids = self.tokenizer.convert_tokens_to_ids(_input_tokens)\n if is_training:\n for _input_id in _input_ids:\n vocab_p[_input_id] += 1\n\n input_tokens.append(_input_tokens)\n tokenized_input_ids.append(_input_ids)\n if is_training:\n vocab_p_sum = sum(vocab_p)\n vocab_p = [n / vocab_p_sum for n in vocab_p]\n\n input_ids = []\n add_label_ids = []\n del_label_ids = []\n for ex_id in range(len(tokenized_input_ids)):\n _input_ids = tokenized_input_ids[ex_id]\n\n nonpad_seq_length = len(_input_ids)\n for _ in range(self.max_seq_length - nonpad_seq_length):\n _input_ids.append(0)\n\n _add_label_ids = []\n _del_label_ids = []\n\n # add/del\n if is_training:\n if (ex_id + 1) % 10000 == 0:\n tf.logging.info(\n 'Sampling wrong tokens of input %d' % (ex_id + 1))\n\n _add_label_ids = [0] * self.max_seq_length\n _del_label_ids = [0] * self.max_seq_length\n\n max_add = np.sum(\n np.random.random(nonpad_seq_length) < self._add_prob)\n max_del = np.sum(\n np.random.random(nonpad_seq_length) < self._del_prob)\n\n sample_wrong_tokens(\n _input_ids, _add_label_ids, _del_label_ids,\n max_add=max_add, max_del=max_del,\n nonpad_seq_length=nonpad_seq_length,\n vocab_size=vocab_size,\n vocab_ind=vocab_ind,\n vocab_p=vocab_p)\n\n input_ids.append(_input_ids)\n add_label_ids.append(_add_label_ids)\n del_label_ids.append(_del_label_ids)\n\n return input_tokens, input_ids, add_label_ids, del_label_ids\n\n def _convert_x(self, x, tokenized):\n try:\n if not tokenized:\n # deal with general inputs\n if isinstance(x, str):\n return self.tokenizer.tokenize(x)\n\n # deal with tokenized inputs\n elif isinstance(x[0], str):\n return x\n except Exception:\n raise ValueError(\n 'Wrong input format: \\'%s\\'. ' % (x))\n\n # deal with tokenized and multiple inputs\n raise ValueError(\n '%s only supports single sentence inputs.'\n % self.__class__.__name__)\n\n def _set_placeholders(self, target, on_export=False, **kwargs):\n self.placeholders = {\n 'input_ids': utils.get_placeholder(\n target, 'input_ids',\n [None, self.max_seq_length], tf.int32),\n 'add_label_ids': utils.get_placeholder(\n target, 'add_label_ids',\n [None, self.max_seq_length], tf.int32),\n 'del_label_ids': utils.get_placeholder(\n target, 'del_label_ids',\n [None, self.max_seq_length], tf.int32),\n }\n if not on_export:\n self.placeholders['sample_weight'] = \\\n utils.get_placeholder(\n target, 'sample_weight',\n [None], tf.float32)\n\n def _forward(self, is_training, split_placeholders, **kwargs):\n\n model = RecBERT(\n bert_config=self.bert_config,\n is_training=is_training,\n input_ids=split_placeholders['input_ids'],\n add_label_ids=split_placeholders['add_label_ids'],\n del_label_ids=split_placeholders['del_label_ids'],\n sample_weight=split_placeholders.get('sample_weight'),\n add_prob=self._add_prob,\n del_prob=self._del_prob,\n scope='bert',\n **kwargs)\n return model.get_forward_outputs()\n\n def _get_fit_ops(self, as_feature=False):\n ops = [self._tensors['add_preds'],\n self._tensors['del_preds'],\n self._tensors['add_loss'],\n self._tensors['del_loss']]\n if as_feature:\n ops.extend([self.placeholders['input_ids'],\n self.placeholders['add_label_ids'],\n self.placeholders['del_label_ids']])\n return ops\n\n def _get_fit_info(self, output_arrays, feed_dict, as_feature=False):\n\n if as_feature:\n batch_inputs = output_arrays[-3]\n batch_add_labels = output_arrays[-2]\n batch_del_labels = output_arrays[-1]\n else:\n batch_inputs = feed_dict[self.placeholders['input_ids']]\n batch_add_labels = \\\n feed_dict[self.placeholders['add_label_ids']]\n batch_del_labels = \\\n feed_dict[self.placeholders['del_label_ids']]\n batch_mask = (batch_inputs != 0)\n\n # add accuracy\n batch_add_preds = output_arrays[0]\n add_accuracy = np.sum((batch_add_preds == batch_add_labels) * batch_mask) / (np.sum(batch_mask) + 1e-6)\n\n # del accuracy\n batch_del_preds = output_arrays[1]\n del_accuracy = np.sum((batch_del_preds == batch_del_labels) * batch_mask) / (np.sum(batch_mask) + 1e-6)\n\n # add loss\n batch_add_losses = output_arrays[2]\n add_loss = np.mean(batch_add_losses)\n\n # del loss\n batch_del_losses = output_arrays[3]\n del_loss = np.mean(batch_del_losses)\n\n info = ''\n if self._add_prob > 0:\n info += ', add_accuracy %.4f' % add_accuracy\n info += ', add_loss %.6f' % add_loss\n if self._del_prob > 0:\n info += ', del_accuracy %.4f' % del_accuracy\n info += ', del_loss %.6f' % del_loss\n\n return info\n\n def _get_predict_ops(self):\n return [self._tensors['add_preds'],\n self._tensors['del_preds']]\n\n def _get_predict_outputs(self, batch_outputs):\n n_inputs = len(list(self.data.values())[0])\n output_arrays = list(zip(*batch_outputs))\n\n input_ids = self.data['input_ids']\n mask = (input_ids > 0)\n\n # integrated preds\n preds = []\n add_preds = utils.transform(output_arrays[0], n_inputs)\n del_preds = utils.transform(output_arrays[1], n_inputs)\n tokens = self.data[utils.BACKUP_DATA + 'input_tokens']\n text = self.data[utils.BACKUP_DATA + 'X_target']\n tokenized = self.data[utils.BACKUP_DATA + 'tokenized'][0]\n for ex_id in range(n_inputs):\n _add_preds = add_preds[ex_id]\n _del_preds = del_preds[ex_id]\n _input_length = np.sum(mask[ex_id])\n _input_tokens = tokens[ex_id]\n _output_tokens = [token for token in _input_tokens]\n\n if tokenized:\n n = 0\n for i in range(_input_length):\n if self._del_prob > 0 and _del_preds[i] != 0:\n _token = '{del:%s}' % _output_tokens[i + n]\n _output_tokens[i + n] = _token\n if self._add_prob > 0 and _add_preds[i] != 0:\n _token = self.tokenizer.convert_ids_to_tokens(\n [_add_preds[i]])[0]\n _token = '{add:%s}' % _token\n _output_tokens.insert(i + 1 + n, _token)\n n += 1\n preds.append(_output_tokens)\n else:\n _text = text[ex_id]\n _mapping_start, _mapping_end = utils.align_tokens_with_text(\n _input_tokens, _text, self._do_lower_case)\n\n n = 0\n for i in range(_input_length):\n if self._del_prob > 0 and _del_preds[i] != 0:\n _start_ptr = _mapping_start[i] + n\n _end_ptr = _mapping_end[i] + n\n _del_token = _text[_start_ptr: _end_ptr]\n\n _token = '{del:%s}' % _del_token\n _text = _text[:_start_ptr] + _token + _text[_end_ptr:]\n n += len(_token) - len(_del_token)\n if self._add_prob > 0 and _add_preds[i] != 0:\n _token = self.tokenizer.convert_ids_to_tokens(\n [_add_preds[i]])[0]\n _token = '{add:%s}' % _token\n _ptr = _mapping_end[i] + n\n _text = _text[:_ptr] + _token + _text[_ptr:]\n n += len(_token)\n preds.append(_text)\n\n outputs = {}\n outputs['preds'] = preds\n\n return outputs\n\n\ndef sample_wrong_tokens(_input_ids, _add_label_ids, _del_label_ids,\n max_add, max_del, nonpad_seq_length,\n vocab_size, vocab_ind, vocab_p):\n\n # `add`, remove padding for prediction of adding tokens\n # e.g. 124 591 9521 -> 124 9521\n for _ in range(max_add):\n cand_indicies = [i for i in range(0, len(_input_ids) - 1)\n if _input_ids[i + 1] != 0 and\n _add_label_ids[i] == 0 and\n _add_label_ids[i + 1] == 0]\n if not cand_indicies:\n break\n\n index = random.choice(cand_indicies)\n _add_label_ids[index] = _input_ids.pop(index + 1)\n _add_label_ids.pop(index + 1)\n _del_label_ids.pop(index + 1)\n _input_ids.append(0)\n _add_label_ids.append(0)\n _del_label_ids.append(0)\n nonpad_seq_length -= 1\n\n # `del`, add wrong tokens for prediction of deleted tokens\n # e.g. 124 591 -> 124 92 591\n for _ in range(max_del):\n if _input_ids[-1] != 0: # no more space\n break\n\n index = random.randint(0, nonpad_seq_length)\n rand = np.random.choice(vocab_ind, p=vocab_p) # sample from vocabulary\n\n # always delete the right one\n _input_ids.insert(index, rand)\n _add_label_ids.insert(index, 0)\n _del_label_ids.insert(index, 1)\n if rand == _input_ids[index + 1]:\n _del_label_ids[index + 1], _del_label_ids[index] = \\\n _del_label_ids[index], _del_label_ids[index + 1]\n\n _input_ids.pop()\n _add_label_ids.pop()\n _del_label_ids.pop()\n nonpad_seq_length += 1\n","sub_path":"uf/application/rec_bert.py","file_name":"rec_bert.py","file_ext":"py","file_size_in_byte":15757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"601196284","text":"from django.shortcuts import render\nfrom sm.libary.additionalviews import JSONView\nfrom api.models import *\nimport uuid\nimport logging\nfrom django.utils import timezone\nfrom datetime import timedelta\n\nlogger = logging.getLogger(__name__)\n\n\nclass getSessionKey(JSONView):\n\n def get(self, *args, **kwargs):\n awnser_packet = {\"status\":False,\"reason\":\"\"}\n cookie_list = []\n if \"developer_key\" in kwargs:\n developer_key = kwargs[\"developer_key\"]\n\n developer_enty = AuthentificationDeveloperKeys.objects.filter(auth_key=developer_key)\n\n if developer_enty:\n if developer_enty[0].active is True:\n session = AuthentificationSessionKeys.objects.create(user_agent=self.request.META['HTTP_USER_AGENT'],\n ip=self.get_client_ip(),\n active=True,\n expired_at=timezone.now()+timedelta(hours=2))\n\n cookie_list.append({\"name\":\"api_cookie\", \"value\": str(session.cookie_uuid)})\n\n awnser_packet[\"status\"] = True\n awnser_packet[\"reason\"] = str(session.auth_key)\n else:\n awnser_packet[\"status\"] = False\n awnser_packet[\"reason\"] = \"expired\"\n else:\n awnser_packet[\"status\"] = False\n awnser_packet[\"reason\"] = \"not_exist\"\n\n response = self.render_to_response(awnser_packet)\n\n for cookie in cookie_list:\n response.set_cookie(cookie[\"name\"], cookie[\"value\"])\n return response\n\n def get_client_ip(self):\n x_forwarded_for = self.request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = self.request.META.get('REMOTE_ADDR')\n return ip\n","sub_path":"api/views_authentification.py","file_name":"views_authentification.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"85505188","text":"#!/usr/bin/env python3\n\n# https://codeforces.com/problemset/problem/1085/B\n# min x st. (x div k)⋅(x mod k)=n\n\ndef f(l):\n n,k=l\n r = k-1\n while n%r>0:\n r -= 1\n d = n//r\n return d*k+r\n\nl = list(map(int,input().split()))\nprint(f(l))\n","sub_path":"codeforces/math数学/1100/1085Bdiv乘mod.py","file_name":"1085Bdiv乘mod.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"548004021","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 13 20:17:18 2016\n\n@author: owen\n\"\"\"\n\nclass Solution(object):\n def largestRectangleArea(self, heights):\n area = 0\n stack = [] # non-decreasing stack\n heights += [0]\n n = len(heights)\n for i in range(n):\n while stack and heights[i] < heights[stack[-1]]:\n h = heights[stack.pop()]\n if not stack: # regard -1 as the left boundary, so the width is i-1-(-1)\n width = i\n else:\n width = i - 1 - stack[-1]\n area = max(area, h * width)\n stack.append(i)\n return area \n \n def maximalRectangle(self, matrix):\n \"\"\"\n :type matrix: List[List[str]]\n :rtype: int\n \"\"\"\n if not matrix:\n return 0\n m = len(matrix)\n n = len(matrix[0])\n maxRect = 0\n hist = [0]*n\n for i in range(m): # calculate the max rectangle based on i-th row\n for j in range(n):\n # do not write hist[j]+=1, right-side is hist[j]+1 or 0, not hist[j]+1 or hist[j]+0\n hist[j] = hist[j] + 1 if matrix[i][j] == '1' else 0\n maxRect = max(maxRect, self.largestRectangleArea(hist))\n \n return maxRect\n \n\nif __name__ == \"__main__\":\n matrix = [\"01101\",\n \"11010\",\n \"01110\",\n \"11110\",\n \"11111\",\n \"00000\"]\n print(Solution().maximalRectangle(matrix))","sub_path":"85. Maximal Rectangle.py","file_name":"85. Maximal Rectangle.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"14752902","text":"# coding=utf-8\n# https://codeload.github.com/Leoborse/Octoprint-job-scheduler/zip/master\n\n# The plugin's identifier, has to be unique\nfrom setuptools import setup\nplugin_identifier = \"jobscheduler\"\nplugin_package = \"jobscheduler\"\nplugin_name = \"A Job Scheduler\"\nplugin_version = \"0.7.6\"\nplugin_description = \"\"\"A job scheduler for OctoPrint\"\"\"\nplugin_author = \"Leonardo Borselli\"\nplugin_author_email = \"leonardo.borselli@gmail.com\"\nplugin_url = \"https://github.com/Leoborse/Octoprint-job-scheduler\"\nplugin_license = \"AGPLv3\"\n\n# Any additional requirements besides OctoPrint should be listed here\nplugin_requires = []\n\n# --------------------------------------------------------------------------------------------------------------------\n# More advanced options that you usually shouldn't have to touch follow after this point\n# --------------------------------------------------------------------------------------------------------------------\n\n# Additional package data to install for this plugin. The subfolders \"templates\", \"static\" and \"translations\" will\n# already be installed automatically if they exist.\nplugin_additional_data = []\n\n# Any additional python packages you need to install with your plugin that are not contains in .*\nplugin_addtional_packages = []\n\n# Any python packages within .* you do NOT want to install with your plugin\nplugin_ignored_packages = []\n\n# Additional parameters for the call to setuptools.setup. If your plugin wants to register additional entry points,\n# define dependency links or other things like that, this is the place to go. Will be merged recursively with the\n# default setup parameters as provided by octoprint_setuptools.create_plugin_setup_parameters using\n# octoprint.util.dict_merge.\n#\n# Example:\n# plugin_requires = [\"someDependency==dev\"]\n# additional_setup_parameters = {\"dependency_links\": [\"https://github.com/someUser/someRepo/archive/master.zip#egg=someDependency-dev\"]}\nadditional_setup_parameters = {}\n\n########################################################################################################################\ntry:\n import octoprint_setuptools\nexcept:\n print(\"Could not import OctoPrint's setuptools, are you sure you are running that under \"\n \"the same python installation that OctoPrint is installed under?\")\n import sys\n sys.exit(-1)\n\nsetup_parameters = octoprint_setuptools.create_plugin_setup_parameters(\n identifier=plugin_identifier,\n package=plugin_package,\n name=plugin_name,\n version=plugin_version,\n description=plugin_description,\n author=plugin_author,\n mail=plugin_author_email,\n url=plugin_url,\n license=plugin_license,\n requires=plugin_requires,\n additional_packages=plugin_addtional_packages,\n ignored_packages=plugin_ignored_packages,\n additional_data=plugin_additional_data\n)\n\nif len(additional_setup_parameters):\n from octoprint.util import dict_merge\n setup_parameters = dict_merge(\n setup_parameters, additional_setup_parameters)\n\nsetup(**setup_parameters)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"633707126","text":"# Copyright (c) 2015 App Annie Inc. All rights reserved.\nfrom abc import ABCMeta\nfrom selenium.common.exceptions import StaleElementReferenceException\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom tests.qa.pages.common_page import CommonPage\n\n\nclass AdvanaCommonPage(CommonPage):\n\n __metaclass__ = ABCMeta\n\n page_elements = {\n 'icon_itc': '.aa-icon.itc',\n }\n\n def goto_and_wait_for_page_ready(self):\n self.goto()\n self.wait_for_page_ready()\n\n\ndef safe_get_element_attribute(element, attribute):\n if not isinstance(element, WebElement):\n raise TypeError('Type of {} is not derived from WebElement'.format(element))\n\n attempts = 0\n while attempts < 30:\n try:\n value = getattr(element, attribute)\n return value\n except StaleElementReferenceException:\n pass\n attempts += 1\n","sub_path":"tests/qa/pages/advana/ad_common_page.py","file_name":"ad_common_page.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"302566827","text":"'''\n Creati o clasa pentru prelucrare de texte, la initializare trebuie data calea catre fisier\n Creati o metoda pentru scriere\n Creati o metoda pentru citire fisiere\n Creati o metoda care sa faca split in cuvinte\n Creati o metoda care sa numere cate aparitii are un cuvant dat.\n Utilizati metodele create\n''' #\n\nclass Prelucrare :\n\n def __init__(self):\n self.cale = \"//home//ionut//pyhton//C5//T53_fisier\"\n\n def scriere(self):\n text = open(self.cale, \"w\")\n raspuns= input(\"Ce doriti sa scrieti in fisier?\")\n text.write(raspuns)\n text.close()\n\n def citire(self):\n text = open(self.cale, \"r\")\n x = text.read()\n print(x)\n text.close()\n\n def splitincuv(self):\n l = []\n text = open(self.cale, \"r\")\n x = text.read()\n l = x.split(\" \")\n text.close()\n return l\n\n def nr_aparitii(self, cuv):\n text = open(self.cale, \"r\")\n x = text.read()\n l = x.split(\" \")\n text.close()\n nr_ap = l.count(str(cuv))\n return nr_ap\n\nj = Prelucrare()\nj.scriere()\nprint(j.nr_aparitii('ana'))\n","sub_path":"Python/Ex_9.py","file_name":"Ex_9.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"186594404","text":"import tensorflow as tf\nimport numpy as np\nimport os\nimport time\nfrom datetime import datetime\nfrom _Functions_gesture import load_gestures, batch_generator\nfrom layers import Layers\n\nclass Modell(Layers):\n \n \n def __init__(self, random_seed = 123,):\n self.root_logdir = '../logs/'\n self.modeldir = '../model/'\n self.now = datetime.utcnow().strftime('%Y%m%d%H%M%s')\n self.logdir = '{}/run-{}/'.format(self.root_logdir, self.now) \n tf.set_random_seed(random_seed)\n \n def save(self, saver, sess, epoch, path='./model/'):\n if not os.path.isdir(path):\n os.makedirs(path)\n print('Modell speichern in {}'.format(path))\n saver.save(sess, os.path.join(path,'cnn-model.ckpt'), global_step=epoch) \n \n def load(self,sess, saver):\n last = sorted(os.listdir(self.modeldir))[-1]\n meta_last = [i for i in os.listdir(self.modeldir+last) if 'meta' in i][0]\n epoch_last = int(meta_last.split('t-')[-1].split('.')[0])\n# saver = tf.train.Saver()#tf.train.import_meta_graph(self.modeldir+last+'/{}'.format(meta_last))\n print('Modell laden aus {}'.format(os.path.join(self.modeldir+last)))\n saver.restore(sess, os.path.join(self.modeldir+last, 'cnn-model.ckpt-%d' % epoch_last)) \n \n def find_last(self):\n last = sorted(os.listdir(self.modeldir))[-1]\n meta_last = [i for i in os.listdir(self.modeldir+last) if 'meta' in i][0]\n return os.path.join(self.modeldir+last, meta_last) \n \n def train(self, sess, training_set, validation_set=None, initialize=True, epochs=20, shuffle=True, dropout=0.5, random_seed=None):\n \n file_writer = tf.summary.FileWriter(self.logdir, tf.get_default_graph())\n start = time.time() \n X_data = np.array(training_set[0]) \n y_data = np.array(training_set[1])\n \n if initialize:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n else: \n last = sorted(os.listdir(self.modeldir))[-1]\n meta_last = [i for i in os.listdir(self.modeldir+last) if 'meta' in i][0]\n epoch_last = int(meta_last.split('t-')[-1].split('.')[0])\n saver = tf.train.Saver()#tf.train.import_meta_graph(self.modeldir+last+'/{}'.format(meta_last))\n print('Modell laden aus {}'.format(os.path.join(self.modeldir+last)))\n saver.restore(sess, os.path.join(self.modeldir+last, 'cnn-model.ckpt-%d' % epoch_last)) \n\n for epoch in range(1, epochs+1):\n batch_gen = batch_generator(X_data, y_data, shuffle=shuffle, batch_size=64) \n avg_loss = 0.0\n \n for i, (batch_x, batch_y) in enumerate(batch_gen):\n feed = {'tf_x:0': batch_x, 'tf_y:0': batch_y, 'fc_keep_prob:0': dropout}\n loss,_ = sess.run(['cross_entropy_loss:0', 'train_op'], feed_dict=feed) \n avg_loss += loss\n \n summary = tf.Summary()\n summary.value.add(tag=\"AVG_LOSS\", simple_value=avg_loss)\n file_writer.add_summary(summary, epoch) \n print('Epoch %02d Training LOSS: %7.3f' % (epoch, avg_loss), end=' ')\n\n if validation_set is not None: \n feed = {'tf_x:0': validation_set[0][:100],\n 'tf_y:0': validation_set[1][:100],\n 'fc_keep_prob:0': 1.0}\n valid_acc = sess.run('accuracy:0', feed_dict=feed)\n print('Validierung ACCURACY: %7.3f ' % valid_acc, end=' ')\n summary_valid_acc = tf.Summary()\n summary_valid_acc.value.add(tag=\"VALID_ACCURACY\", simple_value=valid_acc)\n file_writer.add_summary(summary_valid_acc, epoch) \n end_epoch_time = (time.time()-start)/60\n print('Time: {:.2f}min'.format(end_epoch_time))\n\n else: \n end = time.time() - start\n print('It took {:.2f}min'.format(end/60))\n \n file_writer.close()\n os.mkdir(self.modeldir+self.now)\n self.save(saver, sess, epochs, path=self.modeldir+self.now)\n\n\n def predict(self, sess, X_test, return_proba=False):\n feed = {'tf_x:0': X_test,\n 'fc_keep_prob:0': 1.0}\n if return_proba:\n return sess.run('probabilities:0', feed_dict=feed)\n else:\n return sess.run('labels:0', feed_dict=feed)\n \nif __name__=='__main__':\n\n#%% Data for training\n X_train, y_train = load_gestures(path='../../gestures/train')\n X_valid, y_valid = load_gestures(path='../../gestures/test') \n \n \n#%% CNN model\n \n model = Modell()\n learning_rate = 1e-4\n \n \n \n graph = model.build_cnn(learning_rate)\n \n with tf.Session(graph=graph) as sess: \n model.train(sess, \n training_set=(X_train,y_train),\n validation_set=(X_valid, y_valid), \n initialize=True,\n random_seed=123, \n epochs=20)\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n ","sub_path":"Modell.py","file_name":"Modell.py","file_ext":"py","file_size_in_byte":5174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"254124065","text":"import re\nimport sys\nimport os\nimport sklearn.datasets\nimport nltk\nimport string\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nimport numpy as np\nimport string\nfrom nltk.stem.snowball import EnglishStemmer\nimport csv\nimport pandas as pd\nfrom sklearn import svm\n\nstemmer = EnglishStemmer()\ndef stem_tokens(tokens, stemmer):\n stemmed = []\n for item in tokens:\n stemmed.append(stemmer.stem(item))\n return stemmed\n\n\ndef tokenize(text):\n text = \"\".join([ch for ch in text if ch not in (string.punctuation + '“'+ '”' + string.digits)])\n tokens = nltk.word_tokenize(text)\n #print(text)\n stems = stem_tokens(tokens, stemmer)\n return stems\n\ndef data_load():\n data_path=r'C:\\\\Users\\Vishal Ghorpade\\Desktop\\20news-bydate.tar\\20news-bydate\\20news-bydate-train'\n list_classes=os.listdir(data_path)\n find_and_moveincompatible(data_path)\n files=sklearn.datasets.load_files(data_path,shuffle=True)\n print(list(files.target_names))\n data_clean(files.data)\n print(len(files.target))\n count_vect = CountVectorizer(decode_error='ignore', stop_words='english', tokenizer=tokenize,lowercase=False)\n X_train_counts = count_vect.fit_transform(files.data)\n print(X_train_counts.shape)\n tfidf_transformer = TfidfTransformer()\n X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)\n X_train_tfidf.shape\n X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X_train_tfidf, files.target, test_size=0.2)\n clf=svm.SVC(kernel='linear', C=1, gamma=1,probability=True).fit(X_train, y_train)\n print(clf.score(X_train,y_train))\n print(clf.score(X_test,y_test))\n\n test_data_path=r'C:\\Users\\Vishal Ghorpade\\Desktop\\20news-bydate.tar\\20news-bydate\\20news-bydate-test'\n find_and_moveincompatible(test_data_path)\n test_data=sklearn.datasets.load_files(test_data_path)\n data_counts= count_vect.transform(test_data.data)\n X_eval= tfidf_transformer.transform(data_counts)\n\n l1=['doc_id']\n l2=test_data.target_names\n header=l1+l2\n\n doc_id=[]\n df=pd.DataFrame(columns=header)\n for i in range(len(test_data.filenames)):\n d_id=(test_data.filenames[i]).split('\\\\')\n doc_id.append(d_id[len(d_id)-1])\n df['doc_id']=doc_id\n print(df.head())\n print(X_eval.shape)\n predicted = clf.predict_proba(X_eval)\n print(predicted[0])\n\n df[test_data.target_names]=np.asanyarray(predicted)\n print(df.head())\n df.to_csv('output_svm.csv')\n\n\n\n\nimport shutil\ndef find_and_moveincompatible(data_path):\n count_vector = sklearn.feature_extraction.text.CountVectorizer()\n check_files=sklearn.datasets.load_files(data_path)\n num = []\n for i in range(len(check_files.filenames)):\n try:\n count_vector.fit_transform(check_files.data[i:i + 1])\n except UnicodeDecodeError:\n num.append(check_files.filenames[i])\n except ValueError:\n pass\n for i in num:\n os.remove(i)\n\n\ndef data_clean(file_data):\n print(len(file_data))\n for i, text in zip(range(len(file_data)), file_data):\n #print(text)\n file_data[i] = clean_stem(text)\n\ndef clean_stem(text):\n data = text.decode().split('\\n')\n text_data = []\n finished = False\n for part in data:\n if finished:\n text_data.append(part)\n continue\n if not (part.startswith('Path:') or part.startswith('Newsgroups:') or part.startswith('Xref:')) and not finished:\n text_data.append(part)\n if part.startswith('Lines:'):\n finished = True\n\n\n return text_data\n\n\nif __name__ == \"__main__\":\n data_load()\n","sub_path":"SVM/svm_based.py","file_name":"svm_based.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"93648034","text":"from dagster import ResourceDefinition, graph\nfrom dagster.utils import file_relative_path\nfrom dagster_dbt import dbt_cli_resource\nfrom hacker_news.ops.dbt import hn_dbt_run, hn_dbt_test\nfrom hacker_news.resources import RESOURCES_PROD, RESOURCES_STAGING\nfrom hacker_news.resources.dbt_asset_resource import SnowflakeQueryDbtAssetResource\nfrom hacker_news.resources.snowflake_io_manager import SHARED_SNOWFLAKE_CONF\n\nDBT_PROJECT_DIR = file_relative_path(__file__, \"../../hacker_news_dbt\")\nDBT_PROFILES_DIR = DBT_PROJECT_DIR + \"/config\"\ndbt_staging_resource = dbt_cli_resource.configured(\n {\"profiles-dir\": DBT_PROFILES_DIR, \"project-dir\": DBT_PROJECT_DIR, \"target\": \"staging\"}\n)\ndbt_prod_resource = dbt_cli_resource.configured(\n {\"profiles_dir\": DBT_PROFILES_DIR, \"project_dir\": DBT_PROJECT_DIR, \"target\": \"prod\"}\n)\n\n\n@graph\ndef dbt_metrics():\n hn_dbt_test(hn_dbt_run())\n\n\n# The prod job writes to production schemas and s3 buckets, and the staging job writes to alternate\n# schemas and s3 buckets.\n\n\ndbt_prod_job = dbt_metrics.to_job(\n resource_defs={\n **RESOURCES_PROD,\n **{\n \"dbt\": dbt_prod_resource,\n # this is an alternative pattern to the configured() api. If you know that you won't want to\n # further configure this resource per pipeline run, this can be a bit more convenient than\n # defining an @resource with a config schema.\n \"dbt_assets\": ResourceDefinition.hardcoded_resource(\n SnowflakeQueryDbtAssetResource(\n {**{\"database\": \"DEMO_DB\"}, **SHARED_SNOWFLAKE_CONF}, \"hackernews\"\n )\n ),\n \"partition_bounds\": ResourceDefinition.none_resource(),\n },\n }\n)\n\ndbt_staging_job = dbt_metrics.to_job(\n resource_defs={\n **RESOURCES_STAGING,\n **{\n \"dbt\": dbt_staging_resource,\n \"dbt_assets\": ResourceDefinition.hardcoded_resource(\n SnowflakeQueryDbtAssetResource(\n {**{\"database\": \"DEMO_DB_STAGING\"}, **SHARED_SNOWFLAKE_CONF}, \"hackernews\"\n )\n ),\n \"partition_bounds\": ResourceDefinition.none_resource(),\n },\n }\n)\n","sub_path":"examples/hacker_news/hacker_news/jobs/dbt_metrics.py","file_name":"dbt_metrics.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"598057932","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 30 15:00:34 2016\n\n@author: kewenjing\nPC: introduction to python\n\"\"\"\n\nfrom pylab import *\n#ex1.1\ndef func1(t):\n return x**3+x**2+x+1\n \nN=10 \nx=linspace(-1,1,N)\ny=func1(x)\nxlabel('X')\nylabel('Y')\ntitle(r\"$f(x)=x^3+x^2+x+1$\")\npl=plot (x,y,'r--')\nsetp(pl,'linewidth',3)\nsetp(pl,'color','r')\n\n#ex1.2\ndef func2(m,n):\n return m**2+n**2-2*m*n\n \nx,y=mgrid[-1:1:0.1,-1:1:0.1]\ng=func2(x,y)\nfig,axes=subplots(1,2,figsize=(8,4)) #plots\naxes[0].pcolor(x,y,g) \naxes[0].set_title('pcolor') \naxes[1].contour(x,y,g) \naxes[1].set_title('contour')\n\n\n#figure\nfig2=figure(figsize=(4,3))\n#axes \naxes=fig2.add_subplot(1,1,1,projection='3d')\n #plots \npl2=axes.plot_surface(x,y,g,cmap=cm.coolwarm) \ncb=fig2.colorbar(pl)","sub_path":"workshops/pc1/PC1_introPython.py","file_name":"PC1_introPython.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"462287148","text":"class Solution(object):\n def transpose(self, A):\n \"\"\"\n :type A: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n rowLen, colLen = len(A), len(A[0])\n transpose = [[0]*rowLen for i in range(colLen)]\n for x in range(rowLen):\n for y in range(colLen):\n transpose[y][x] = A[x][y]\n return transpose\n","sub_path":"solution/python/868.py","file_name":"868.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"252351487","text":"#!/usr/bin/python3\nimport sys\n\n\nrealType = {\n\"Boolean\" : \"Boolean\",\n\"char\" : \"Char\",\n\"data\" : \"Data\",\n\"float\" : \"Float\",\n\"Amt\" : \"Float\",\n\"Percentage\" : \"Float\",\n\"Price\" : \"Float\",\n\"PriceOffset\" : \"Float\",\n\"Qty\" : \"Float\",\n\"int\" : \"Int\",\n\"day-of-month\" : \"Int\",\n\"Length\" : \"Int\",\n\"NumInGroup\" : \"Int\",\n\"SeqNum\" : \"Int\",\n\"TagNum\" : \"Int\",\n\"String\" : \"String\",\n\"Country\" : \"String\",\n\"Currency\" : \"String\",\n\"Exchange\" : \"String\",\n\"LocalMktDate\" : \"Date\",\n\"month-year\" : \"MonthYear\",\n\"MultipleValueString\" : \"MulValString\",\n\"UTCDateOnly\" : \"Date\",\n\"UTCTimeOnly\" : \"Time\",\n\"UTCTimestamp\" : \"DateAndTime\"\n}\n\ntab = \" \"\n\n\n#HPP\ndef startHpp(hpp):\n hpp.write(\"#pragma once\\n\\n\")\n hpp.write(\"#include \\n\")\n hpp.write(\"#include \\\"Types.hpp\\\"\\n\")\n hpp.write(\"\\n\")\n hpp.write(\"namespace FIX\\n\")\n hpp.write(\"{\\n\")\n hpp.write(\"std::string toHuman(int tag, const char* val);\\n\\n\")\n\ndef createHpp(tagNum, tagName, tagType, vals, hpp):\n #Beggining of the class\n hpp.write(\"struct \" + tagName + \"\\n\")\n hpp.write(\"{\\n\")\n hpp.write(tab + \"constexpr static const int tag = \" + tagNum + \";\\n\")\n hpp.write(tab + \"constexpr static const char* name = \\\"\" + tagName + \"\\\";\\n\")\n hpp.write(\"\\n\")\n\n if tagType != \"MulValStr\":\n hpp.write(tab + \"static writeableTagVal<\" + tagType + \"> tagVal(\")\n if tagType == \"Boolean\":\n hpp.write(\"bool data\")\n if tagType == \"Char\":\n hpp.write(\"char data\")\n if tagType == \"Int\":\n hpp.write(\"int data\")\n if tagType == \"Float\":\n hpp.write(\"float data\")\n if tagType == \"String\":\n hpp.write(\"const char* data\")\n if tagType == \"Data\":\n hpp.write(\"Data data\")\n if tagType == \"Date\":\n hpp.write(\"Date data\")\n if tagType == \"MonthYear\":\n hpp.write(\"MonthYear data\")\n if tagType == \"Time\":\n hpp.write(\"Time data\")\n if tagType == \"DateAndTime\":\n hpp.write(\"DateAndTime data\")\n hpp.write(\");\\n\")\n \n #Possible values\n if len(vals) > 0:\n hpp.write(\"\\n\")\n for valName, val in vals:\n if tagType == \"Boolean\":\n hpp.write(tab + \"constexpr static const bool val\" + valName + \" = \" + (\"true\" if val == 'Y' else \"false\") + \";\\n\")\n if tagType == \"Char\":\n hpp.write(tab + \"constexpr static const char val\" + valName + \" = \" + \"'\" + val + \"';\\n\")\n if tagType == \"Int\":\n hpp.write(tab + \"constexpr static const int val\" + valName + \" = \" + val + \";\\n\")\n if tagType == \"Float\":\n hpp.write(tab + \"constexpr static const float val\" + valName + \" = \" + val + \";\\n\")\n if tagType == \"String\" or tagType == \"MulValString\":\n hpp.write(tab + \"constexpr static const char* val\" + valName + \" = \" + \"\\\"\" + val + \"\\\";\\n\")\n\n #Possible tag+val values\n if len(vals) > 0:\n hpp.write(\"\\n\")\n for valName, val in vals:\n hpp.write(tab+ \"constexpr static const char* tagVal\" + valName + \" = \\\"\" + tagNum + \"=\" + val + \"\\\";\\n\")\n\n hpp.write(\"\\n\")\n hpp.write(\"};\\n\")\n hpp.write(\"\\n\")\n\ndef endHpp(hpp):\n hpp.write(\"} //namespace FIX\\n\")\n\n \n#CPP\ndef startCpp(cpp):\n cpp.write(\"#include \\\"Tags.hpp\\\"\\n\")\n cpp.write(\"#include \\n\")\n cpp.write(\"#include \\n\")\n cpp.write(\"\\n\")\n cpp.write(\"namespace FIX\\n\")\n cpp.write(\"{\\n\")\n\ndef createCpp(tagNum, tagName, tagType, vals, cpp):\n if tagType != \"MulValStr\":\n cpp.write(tab + \"writeableTagVal<\" + tagType + \"> \" + tagName + \"::tagVal(\")\n if tagType == \"Boolean\":\n cpp.write(\"bool data\")\n if tagType == \"Char\":\n cpp.write(\"char data\")\n if tagType == \"Int\":\n cpp.write(\"int data\")\n if tagType == \"Float\":\n cpp.write(\"float data\")\n if tagType == \"String\":\n cpp.write(\"const char* data\")\n if tagType == \"Data\":\n cpp.write(\"Data data\")\n if tagType == \"Date\":\n cpp.write(\"Date data\")\n if tagType == \"MonthYear\":\n cpp.write(\"MonthYear data\")\n if tagType == \"Time\":\n cpp.write(\"Time data\")\n if tagType == \"DateAndTime\":\n cpp.write(\"DateAndTime data\")\n cpp.write(\")\\n\")\n cpp.write(tab + \"{\\n\")\n\n if tagType != \"MulValString\":\n cpp.write(tab+tab+ \"return writeableTagVal<\" + tagType + \">(\\\"\")\n cpp.write(tagNum+ \"\\\", \" + str(len(tagNum)) + \", data);\\n\")\n cpp.write(tab + \"}\\n\")\n cpp.write('\\n')\n\n \n\ndef endCpp(cpp, tagVals):\n cpp.write(\"std::string toHuman(int tag, const char* val)\\n\")\n cpp.write(\"{\\n\")\n cpp.write(tab+ \"std::string res;\\n\")\n cpp.write(tab+ \"bool foundValue = false;\\n\")\n cpp.write(tab+ \"switch(tag)\\n\")\n cpp.write(tab+ \"{\\n\")\n \n for (tagNum, tagName, tagType, vals) in tagVals:\n cpp.write(tab+ \"case FIX::\" + tagName + \"::tag: {\\n\")\n \n\n if len(vals) > 0:\n if tagType == \"Boolean\":\n cpp.write(tab+tab+ \"FIX::Boolean value = boolOfStr(val);\\n\")\n if tagType == \"Char\":\n cpp.write(tab+tab+ \"FIX::Char value = charOfStr(val);\\n\")\n if tagType == \"Int\":\n cpp.write(tab+tab+ \"FIX::Int value = intOfStr(val);\\n\")\n if tagType == \"Float\":\n cpp.write(tab+tab+ \"FIX::Float value = floatOfStr(val);\\n\")\n if tagType == \"String\":\n cpp.write(tab+tab+ \"FIX::String value = stringOfStr(val);\\n\")\n #if tagType == \"MulValString\":\n \n\n cpp.write(tab+tab+ \"res = \\\"\" + tagName + \" = \\\";\\n\")\n\n if tagType != \"MulValString\": #unhandled\n for (valueName, value) in vals:\n if tagType == \"String\":\n cpp.write(tab+tab+ \"if(!strcmp(value, FIX::\"+tagName+\"::val\"+valueName+\"))\\n\")\n else:\n cpp.write(tab+tab+ \"if(value == FIX::\"+tagName+\"::val\"+valueName+\")\\n\")\n cpp.write(tab+tab+ \"{\\n\")\n cpp.write(tab+tab+tab+ \"foundValue = true;\\n\")\n cpp.write(tab+tab+tab+ \"res += \\\"\" + valueName + \"\\\";\\n\")\n cpp.write(tab+tab+ \"}\\n\")\n cpp.write(tab+tab+ \"if(!foundValue)\\n\")\n cpp.write(tab+tab+tab+ \"res += val;\\n\")\n cpp.write(tab+\"}break;\\n\\n\")\n\n cpp.write(\"}\\n\")\n\n cpp.write(\"return res;\\n\")\n cpp.write(\"}\\n\")\n cpp.write(\"} //namespace FIX\\n\")\n\n\n\n#Main function\nif len(sys.argv) < 2:\n print(\"You have to provide dictionary file as an argument!\")\n exit()\n\ndicFilePath = sys.argv[1]\ndicFile = open(dicFilePath, \"r\")\ndictionary = dicFile.read().split()\n\ntagVals = []\n\ncurDicPos = 0\nwhile curDicPos < len(dictionary):\n tagNum = dictionary[curDicPos]\n curDicPos += 1\n\n tagName = dictionary[curDicPos]\n curDicPos += 1\n\n tagType = dictionary[curDicPos]\n curDicPos += 1\n tagType = realType[tagType]\n\n vals = []\n\n while curDicPos < len(dictionary):\n val = dictionary[curDicPos]\n curDicPos += 1\n \n if val[0] == '#':\n break\n\n valName = dictionary[curDicPos]\n curDicPos += 1\n vals += [(valName, val)]\n\n tagVals += [(tagNum, tagName, tagType, vals)]\n\nhpp = open(\"src/FIX/Tags.hpp\", \"w\")\ncpp = open(\"src/FIX/Tags.cpp\", \"w\")\n\nwelcomeMessage = \"\"\"\n// This file has been automatically generated via fixGen/dic2Code.py\n// On base of \"\"\" + dicFilePath + \" fix dictionary\\n\"; \n\nhpp.write(welcomeMessage)\ncpp.write(welcomeMessage)\n\nstartHpp(hpp)\nstartCpp(cpp)\n\nfor (tagNum, tagName, tagType, vals) in tagVals:\n createHpp(tagNum, tagName, tagType, vals, hpp)\n createCpp(tagNum, tagName, tagType, vals, cpp)\n\nendHpp(hpp)\nendCpp(cpp, tagVals)\n","sub_path":"codeGen/dic2Code.py","file_name":"dic2Code.py","file_ext":"py","file_size_in_byte":7954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"176132651","text":"\nimport Functions\n\nimg = Functions.photo.imread(\"Lena.ppm\")\n#photo.imshow(\"download.png\",img);\n#photo.waitKey(0);\n#lower =(0, 0, 0) # lower bound for each channel\n#upper = (80, 80,80) # upper bound for each channel\n\n## create the mask and use it to change the colors\n#mask = photo.inRange(img, lower, upper)\n#img[mask != 0] = [255,0,255]\n\n\n\ncap = Functions.photo.VideoCapture('ball.mov')\n \n# Check if camera opened successfully\nif (cap.isOpened()== False): \n print(\"Error opening video stream or file\")\n \n# Read until video is completed\nwhile(cap.isOpened()):\n # Capture frame-by-frame\n ret, frame = cap.read()\n if ret == True:\n \n # Display the resulting frame\n Functions.photo.imshow('Frame',frame)\n \n # Press Q on keyboard to exit\n if Functions.photo.waitKey(25) & 0xFF == ord('q'):\n break\n \n # Break the loop\n else: \n break\n \n# When everything done, release the video capture object\ncap.release()\n \n# Closes all the frames\nFunctions.photo.destroyAllWindows()\n#Functions.Write_Circle(img)\n\n## display it\n#Functions.photo.imshow(\"My Fucking Photo\", img)\n#Functions.photo.waitKey(0)\n\n#Functions.Fourier_Transform()\n#Functions.Make_Edge()","sub_path":"ADVANCED_PROGRAMMING_II/!PYTHON/First_Class/Photo_Loader/Photo_Loader.py","file_name":"Photo_Loader.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"546692264","text":"\"\"\"\nA parser written in python for reading the output .xml file\n\n\"\"\"\n\nimport xml.etree.ElementTree as ET\n\n\ndef xml_parse(f=\"/Users/luptior/Desktop/Research_3/python_generator/data/a5_d100_r4.xml\"):\n tree = ET.parse(f)\n root = tree.getroot()\n\n agents = []\n variables = {}\n domains = {}\n relations = {}\n constraints = {}\n\n data = {x.tag: x for x in root}\n\n # processing agents\n for agent in data['agents']:\n agents.append(agent.attrib['name'])\n\n # processing variables\n for v in data['variables']:\n variables[v.attrib[\"name\"]] = {\"agent\": v.attrib[\"agent\"]}\n variables[v.attrib[\"name\"]][\"domain\"] = v.attrib[\"domain\"]\n\n # processing domain\n for d in data['domains']:\n domains[d.attrib[\"name\"]] = {'nbValues': d.attrib['nbValues']}\n domains[d.attrib[\"name\"]]['domain_range'] = [int(x) for x in d.text.split(\"..\")]\n\n # processing relations\n for r in data['relations']:\n relations[r.attrib['name']] = r.attrib\n relations[r.attrib['name']]['relations'] = \\\n {tuple([int(x) for x in x.strip().split(\":\")[-1].split()]): int(x.strip().split(\":\")[0]) \\\n for x in r.text.strip(\"|\").split(\"|\")}\n\n # processing constrains\n for c in data['constraints']:\n constraints[c.attrib['name']] = c.attrib\n constraints[c.attrib['name']]['scope'] = constraints[c.attrib['name']]['scope'].split()\n\n return agents, domains, variables, relations, constraints\n\n\nif __name__ == '__main__':\n f = \"/Users/luptior/Desktop/Research_3/simulation_data/random_5_d3/rep_0_random_5_d3.xml\"\n print(xml_parse(f))","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"291855325","text":"#Stock Initialization\n# main modules\n#import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n#Required to manipulate df\nfrom pandas_datareader import data as pdr\n#updated yahoo finance fetcher\nimport yfinance as yf\nimport datetime\n\n#ML \nimport sklearn\nfrom sklearn.linear_model import LinearRegression, Lasso, Ridge\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.model_selection import train_test_split\n\ndef predict(stock):\n start = datetime.datetime.now()-datetime.timedelta(days=365)\n end = datetime.datetime.now()\n yf.Ticker(stock)\n\n #pull data using yfinance into a dataframe\n yf.Ticker(stock)\n df = pdr.get_data_yahoo(stock, start=start, end=end).reset_index()\n #df.head(3)\n df.set_index('Date', inplace=True)\n # train/test split\n\n\n X = df.iloc[:,:-1].to_numpy()\n y = df.iloc[:,-1].to_numpy()\n\n num_data = df.shape[0]\n split_ratio = 0.8\n split = int(split_ratio * num_data)\n\n X_train = X[:split]\n y_train = y[:split].reshape(-1,1)\n X_test = X[split:]\n y_test = y[split:].reshape(-1,1)\n #Linear regression model\n #Training\n split_time = df.index[split]\n reg_linear = LinearRegression()\n reg_linear.fit(X_train, y_train)\n # testing\n trainings = reg_linear.predict(X_train).reshape(-1,1)\n predictions = reg_linear.predict(X_test).reshape(-1,1)\n if predictions[-1] > df['Adj Close'][-1]:\n token = True\n else:\n token = False\n return token\n\n","sub_path":"SYBTS/ml_predict.py","file_name":"ml_predict.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"277163492","text":"\"\"\"\"\nUc (obje) adet cep telefonuna (class) sahip oldugumuzu dusunelim. Cep telefonlarinin modeli,hangi tarihte aldigimiz\nve ne kadara aldigimiz bilgileri ile birlikte her telefona ait rehber bilgisi olsun (instance attribute)\n(rehber bilgisini her objeye ait bir sozluk tanimlayarak olusturula bilinir). Daha sonra her cep telefonundaki\nrehberden kisi bulma-search (instance methot) kisi ekleme (instance methot) ve kisi silme secenekleri olsun.\n\nyazacagimiz kod ile\n\n1) tum telefon bilgilerini getirme (classmethod)\n2) ayri ayri telefon bilgilerini getirme (instance method)\n3) telefon fiyatlarini kucukten buyuge dogru siralama (class methot)\n4) her bir telefona\n\n\ta) Rehberi goruntuleme (instance method)\n\n b) rehberde kisi varmi yokmu aramasi yapma (instance method)\n\n\tc) rehbere kisi ekleme (eger ayni kisi varsa uyari vermeli ve tekrar sorgu ekranina donmeli)(instance method)\n(yazacaginiz if else gibi sorgulama komutlarini instance methot icindede yazabilirsiniz)\n\n\td) rehberden kisi silme ( eger silinmesini istedigimiz kisi yok ise uyari vermeli)(instance method)\n\"\"\"\nclass MobilePhone:\n\n model1 = []\n year1 = []\n amount1 = []\n\n def __init__(self, model, year, amount, phone_book):\n self.model = model\n self.year = year\n self.amount = amount\n self.phone_book = phone_book\n MobilePhone.model1.append(self.model)\n MobilePhone.year1.append(self.year)\n MobilePhone.amount1.append(self.amount)\n\n @classmethod\n def general_info(cls):\n print(\"--General Information--\")\n for i in range(len(MobilePhone.model1)):\n print(\"Model : {}, Year : {}, Amount : {}\".format(MobilePhone.model1, MobilePhone.year1, MobilePhone.amount1))\n\n\n @classmethod\n def sort_of_amounts(cls):\n return MobilePhone.amount1.sort()\n\n\n def phone_info(self):\n return \"ID : {} , BRAND : {} , AMOUNT : {} , CONTACTS : {}\".format(self.model, self.year, self.amount, self.phone_book)\n\n\n def DisplayPhoneBook(self):\n return self.phone_book\n\n\n def SearchPerson(self):\n name = input(\"Please enter the name and surname for search \")\n\n if name in self.phone_book.keys():\n return \"{} --> phone number : {} \".format(name, self.phone_book.get(name))\n else:\n return \"Contact not found. \"\n\n\n def AddPerson(self):\n name = input(\"Please enter the name and surname for add a contact: \")\n number = input(\"Please enter the phone number: \")\n\n if name in self.phone_book.keys():\n return \"There is already this contact.\"\n else:\n self.phone_book[name] = number\n return self.phone_book\n\n\n def DeletePerson(self):\n name = input(\"Enter the name and surname for to remove a contact: \")\n\n if name in self.phone_book.keys():\n self.phone_book.pop(name)\n return self.phone_book\n else:\n return \"The contact not found.\"\n\nphone1 = MobilePhone(\"iphone xyz\",2019,1000,{\"hakan guner\" : 12312313123})\nprint(phone1.model)\nprint(phone1.SearchPerson())","sub_path":"phone_book.py","file_name":"phone_book.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"561177263","text":"import itertools\nimport math\nimport random\n\nimport os\nimport jsonlines\nimport numpy as np\nimport pandas as pd\nfrom joblib import Parallel, delayed\n\nfrom .alias import alias_sample, create_alias_table\nfrom .utils import partition_num\nfrom tqdm.auto import tqdm\n\nimport multiprocessing\n\nclass RandomWalker:\n def __init__(self, G, p=1, q=1, use_rejection_sampling=0, dump_path=None, dump_size=100000):\n \"\"\"\n :param G:\n :param p: Return parameter,controls the likelihood of immediately revisiting a node in the walk.\n :param q: In-out parameter,allows the search to differentiate between “inward” and “outward” nodes\n :param use_rejection_sampling: Whether to use the rejection sampling strategy in node2vec.\n \"\"\"\n self.G = G\n self.p = p\n self.q = q\n self.use_rejection_sampling = use_rejection_sampling\n self.dump_path = dump_path\n self.dump_size = dump_size\n\n self.nodes = list(G.nodes())\n self.neighbors = {node: list(G.neighbors(node)) for node in G.nodes()}\n\n def deepwalk_walk(self, walk_length, start_node):\n\n walk = [start_node]\n\n while len(walk) < walk_length:\n cur = walk[-1]\n cur_nbrs = self.neighbors[cur]\n if len(cur_nbrs) > 0:\n walk.append(random.choice(cur_nbrs))\n else:\n break\n return walk\n\n def node2vec_walk(self, walk_length, start_node):\n\n G = self.G\n alias_nodes = self.alias_nodes\n alias_edges = self.alias_edges\n\n walk = [start_node]\n\n while len(walk) < walk_length:\n cur = walk[-1]\n cur_nbrs = self.neighbors[cur]\n if len(cur_nbrs) > 0:\n if len(walk) == 1:\n walk.append(\n cur_nbrs[alias_sample(alias_nodes[cur][0], alias_nodes[cur][1])])\n else:\n prev = walk[-2]\n edge = (prev, cur)\n next_node = cur_nbrs[alias_sample(alias_edges[edge][0],\n alias_edges[edge][1])]\n walk.append(next_node)\n else:\n break\n\n return walk\n\n def node2vec_walk2(self, walk_length, start_node):\n \"\"\"\n Reference:\n KnightKing: A Fast Distributed Graph Random Walk Engine\n http://madsys.cs.tsinghua.edu.cn/publications/SOSP19-yang.pdf\n \"\"\"\n\n def rejection_sample(inv_p, inv_q, nbrs_num):\n upper_bound = max(1.0, max(inv_p, inv_q))\n lower_bound = min(1.0, min(inv_p, inv_q))\n shatter = 0\n second_upper_bound = max(1.0, inv_q)\n if (inv_p > second_upper_bound):\n shatter = second_upper_bound / nbrs_num\n upper_bound = second_upper_bound + shatter\n return upper_bound, lower_bound, shatter\n\n G = self.G\n alias_nodes = self.alias_nodes\n inv_p = 1.0 / self.p\n inv_q = 1.0 / self.q\n walk = [start_node]\n while len(walk) < walk_length:\n cur = walk[-1]\n cur_nbrs = self.neighbors[cur]\n if len(cur_nbrs) > 0:\n if len(walk) == 1:\n walk.append(\n cur_nbrs[alias_sample(alias_nodes[cur][0], alias_nodes[cur][1])])\n else:\n upper_bound, lower_bound, shatter = rejection_sample(\n inv_p, inv_q, len(cur_nbrs))\n prev = walk[-2]\n prev_nbrs = set(self.neighbors[prev])\n while True:\n prob = random.random() * upper_bound\n if (prob + shatter >= upper_bound):\n next_node = prev\n break\n next_node = cur_nbrs[alias_sample(\n alias_nodes[cur][0], alias_nodes[cur][1])]\n if (prob < lower_bound):\n break\n if (prob < inv_p and next_node == prev):\n break\n _prob = 1.0 if next_node in prev_nbrs else inv_q\n if (prob < _prob):\n break\n walk.append(next_node)\n else:\n break\n return walk\n\n def simulate_walks(self, num_walks, walk_length, workers=1, verbose=0):\n\n def chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i:i + n]\n\n\n G = self.G\n\n # self.nodes = list(G.nodes())\n chunk_size = math.ceil(len(self.nodes) / workers)\n random.shuffle(self.nodes)\n self.node_partitions = {num: chunk for num, chunk in \\\n enumerate(chunks(self.nodes, chunk_size))}\n\n # return self.simulate_walks2(self, num_walks, walk_length, workers, verbose)\n # old\n # results = Parallel(n_jobs=workers, verbose=verbose, )(\n # delayed(self._simulate_walks)(num, walk_length) for num in\n # partition_num(num_walks, workers))\n\n results = Parallel(n_jobs=workers, verbose=verbose, )(\n delayed(self._simulate_walks)(num_walks, walk_length, partition) for \\\n partition in self.node_partitions.keys())\n\n walks = list(itertools.chain(*results))\n\n return walks\n\n def dump_walks(self, results):\n dump_path = os.path.join(self.dump_path, f'walks_dump.jsonl')\n with jsonlines.open(dump_path, 'a') as writer:\n writer.write_all(results)\n\n def dump_jsonlines(self, d, path):\n # new_d = {str(key): value for key, value in d.items()}\n # with jsonlines.open(path, 'a') as writer:\n # writer.write(object)\n pd.DataFrame.from_dict(d.items()).to_csv(path, mode='a', header=False, index=False)\n\n def load_dumped_alias(self):\n # load nodes\n df = pd.read_csv('data/cache/alias_nodes', converters={0: ast.literal_eval, 1: ast.literal_eval},\n header=None)\n self.alias_nodes = pd.Series(df[1].values, index=df[0]).to_dict()\n df = None\n\n if not self.use_rejection_sampling:\n # load alias\n df = pd.read_csv('data/cache/alias_edges', converters={0: ast.literal_eval, 1: ast.literal_eval},\n header=None)\n self.alias_edges = pd.Series(df[1].values, index=df[0]).to_dict()\n\n\n def simulate_walks2(self, num_walks, walk_length, workers=1, verbose=0, batch_size=100000):\n G = self.G\n\n self.nodes = list(G.nodes())\n\n jobs = [(num_walks, walk_length, partition_num) for partition_num in self.node_partitions.keys()]\n\n with multiprocessing.Pool(processes=workers) as p:\n results = p.starmap(self._simulate_walks, jobs)\n\n # results = list()\n # for batch in np.array_split(jobs, math.ceil(len(jobs)/batch_size)):\n # result = p.starmap(self._simulate_walks, batch)\n # if self.dump_path is None:\n # results.extend(result)\n # else:\n # self.dump_walks(result)\n #\n # if self.dump_path is None:\n # return None\n\n walks = list(itertools.chain(*results))\n\n return walks\n\n def _simulate_walks(self, num_walks, walk_length, partition):\n walks = []\n for _ in range(num_walks):\n random.shuffle(self.node_partitions[partition])\n for num, v in tqdm(enumerate(self.node_partitions[partition]), total=len(self.node_partitions[partition]), desc=f\"Simulating walks ({_}/{num_walks})\"):\n if self.p == 1 and self.q == 1:\n walks.append(self.deepwalk_walk(\n walk_length=walk_length, start_node=v))\n elif self.use_rejection_sampling:\n walks.append(self.node2vec_walk2(\n walk_length=walk_length, start_node=v))\n else:\n walks.append(self.node2vec_walk(\n walk_length=walk_length, start_node=v))\n\n if num % self.dump_size == 0:\n if self.dump_path is not None:\n self.dump_walks(walks)\n walks = list()\n if self.dump_path is not None:\n self.dump_walks(walks)\n walks = list()\n\n return walks\n\n def get_alias_edge(self, t, v):\n \"\"\"\n compute unnormalized transition probability between nodes v and its neighbors give the previous visited node t.\n :param t:\n :param v:\n :return:\n \"\"\"\n G = self.G\n p = self.p\n q = self.q\n\n unnormalized_probs = []\n for x in self.neighbors[v]:\n weight = G[v][x].get('weight', 1.0) # w_vx\n if x == t: # d_tx == 0\n unnormalized_probs.append(weight/p)\n elif G.has_edge(x, t): # d_tx == 1\n unnormalized_probs.append(weight)\n else: # d_tx > 1\n unnormalized_probs.append(weight/q)\n norm_const = sum(unnormalized_probs)\n normalized_probs = [\n float(u_prob)/norm_const for u_prob in unnormalized_probs]\n\n return create_alias_table(normalized_probs)\n\n\n\n def preprocess_transition_probs(self):\n \"\"\"\n Preprocessing of transition probabilities for guiding the random walks.\n \"\"\"\n G = self.G\n alias_nodes = dict()\n for num, node in tqdm(enumerate(G.nodes()), total=len(G.nodes()), desc='Preprocess - nodes'):\n unnormalized_probs = [G[node][nbr].get('weight', 1.0)\n for nbr in self.neighbors[node]]\n norm_const = sum(unnormalized_probs)\n normalized_probs = [\n float(u_prob)/norm_const for u_prob in unnormalized_probs]\n alias_nodes[node] = create_alias_table(normalized_probs)\n # if num % 1000 == 0:\n # self.dump_jsonlines(alias_nodes, './data/cache/alias_nodes')\n # alias_nodes = dict()\n # self.dump_jsonlines(alias_nodes, './data/cache/alias_nodes')\n # alias_nodes = dict()\n\n if not self.use_rejection_sampling:\n \n def do_alieas_edge(edges):\n items = list()\n for edge in tqdm(edges):\n items.append((edge, self.get_alias_edge(edge[0], edge[1])))\n return items\n\n results = Parallel(n_jobs=multiprocessing.cpu_count(), verbose=1, )(\n delayed(do_alieas_edge)(edges) for edges in np.array_split(G.edges(), multiprocessing.cpu_count()))\n\n results = list(itertools.chain(*results))\n alias_edges = {key: value for key, value in results}\n \n \n # alias_edges = dict()\n # for num, edge in tqdm(enumerate(G.edges()), total=len(G.edges()), desc='Preprocess - edges'):\n # alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n \n # if num % 1000 == 0:\n # self.dump_jsonlines(alias_edges, './data/cache/alias_edges')\n # alias_edges = dict()\n # self.dump_jsonlines(alias_edges, './data/cache/alias_edges')\n # self.alias_edges = dict()\n self.alias_edges = alias_edges\n\n self.alias_nodes = alias_nodes\n return\n\n\nclass BiasedWalker:\n def __init__(self, idx2node, temp_path):\n\n self.idx2node = idx2node\n self.idx = list(range(len(self.idx2node)))\n self.temp_path = temp_path\n pass\n\n def simulate_walks(self, num_walks, walk_length, stay_prob=0.3, workers=1, verbose=0):\n\n layers_adj = pd.read_pickle(self.temp_path+'layers_adj.pkl')\n layers_alias = pd.read_pickle(self.temp_path+'layers_alias.pkl')\n layers_accept = pd.read_pickle(self.temp_path+'layers_accept.pkl')\n gamma = pd.read_pickle(self.temp_path+'gamma.pkl')\n walks = []\n initialLayer = 0\n\n nodes = self.idx # list(self.g.nodes())\n\n results = Parallel(n_jobs=workers, verbose=verbose, )(\n delayed(self._simulate_walks)(nodes, num, walk_length, stay_prob, layers_adj, layers_accept, layers_alias, gamma) for num in\n partition_num(num_walks, workers))\n\n walks = list(itertools.chain(*results))\n return walks\n\n def _simulate_walks(self, nodes, num_walks, walk_length, stay_prob, layers_adj, layers_accept, layers_alias, gamma):\n walks = []\n for _ in range(num_walks):\n random.shuffle(nodes)\n for v in nodes:\n walks.append(self._exec_random_walk(layers_adj, layers_accept, layers_alias,\n v, walk_length, gamma, stay_prob))\n return walks\n\n def _exec_random_walk(self, graphs, layers_accept, layers_alias, v, walk_length, gamma, stay_prob=0.3):\n initialLayer = 0\n layer = initialLayer\n\n path = []\n path.append(self.idx2node[v])\n\n while len(path) < walk_length:\n r = random.random()\n if(r < stay_prob): # same layer\n v = chooseNeighbor(v, graphs, layers_alias,\n layers_accept, layer)\n path.append(self.idx2node[v])\n else: # different layer\n r = random.random()\n try:\n x = math.log(gamma[layer][v] + math.e)\n p_moveup = (x / (x + 1))\n except:\n print(layer, v)\n raise ValueError()\n\n if(r > p_moveup):\n if(layer > initialLayer):\n layer = layer - 1\n else:\n if((layer + 1) in graphs and v in graphs[layer + 1]):\n layer = layer + 1\n\n return path\n\n\ndef chooseNeighbor(v, graphs, layers_alias, layers_accept, layer):\n\n v_list = graphs[layer][v]\n\n idx = alias_sample(layers_accept[layer][v], layers_alias[layer][v])\n v = v_list[idx]\n\n return v\n","sub_path":"ge/walker.py","file_name":"walker.py","file_ext":"py","file_size_in_byte":14283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"436671633","text":"# encoding: utf-8\n\"\"\"\n @project:id_card_recognization\n @author: Jiang Hui\n @language:Python 3.7.2 [GCC 7.3.0] :: Anaconda, Inc. on linux\n @time: 6/19/19 10:48 AM\n @desc: 将id_recognition实现的功能,打包放在flask服务器上,提供API接口\n\"\"\"\nimport cv2\nimport numpy as np\nimport pytesseract\nfrom flask import Flask, request\nimport json\nimport os\nimport logging\nimport traceback\n\nlogging.basicConfig(filename='log.txt', level=logging.DEBUG,\n format='%(asctime)s - %(levelname)s - %(message)s')\n\n# ******************* flask服务器端基本配置 ***************************\nALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'PNG', 'JPG', 'JPEG'}\n\napp = Flask(__name__)\n\n# app.config['SECRET_KEY'] = '123456'\npath = os.getcwd()\nif not os.path.exists(path):\n os.makedirs(path)\napp.config['UPLOAD_FOLDER'] = path # 设置上传文件夹\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\n\n\ndef img_preprocess(pic_path, coefs):\n '''\n 图片预处理: resize -> denoise -> transform to grey\n :param pic_path:\n :return: image\n '''\n img = cv2.imread(pic_path, cv2.IMREAD_COLOR)\n # 检查异常,如果图片路径出错,则返回\n if img is None:\n return\n # 预处理,判断图片是竖直还是水平,如果是竖直放置的话,就逆时针旋转90度\n if img.shape[0] > img.shape[1]:\n img = np.rot90(img)\n img_resize = cv2.resize(img, (428, 270), interpolation=cv2.INTER_CUBIC) # resize照片为428*270\n img_denoise = cv2.fastNlMeansDenoisingColored(img_resize, None, 10, 10, 7, 21) # 降噪\n # 转换成灰���图\n # coefficients = [0, 1, 1]\n m = np.array(coefs).reshape((1, 3))\n img_gray = cv2.transform(img_denoise, m)\n # 反向二值化图像\n img_binary_inv = cv2.adaptiveThreshold(img_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 9, 3)\n # 自定义的降噪代码,将孤立像素点去除\n img_binary_inv = denoise(img_binary_inv)\n # 膨胀操作,将图像变成一个个矩形框,用于下一步的筛选,找到身份证号码对应的区域\n ele = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 5)) # 经过测试,(10,5)为水平和垂直方向的膨胀size\n img_dilation = cv2.dilate(img_binary_inv, ele, iterations=1)\n return img_resize, img_dilation\n\n\ndef denoise(binary):\n \"\"\"\n 当某个像素点值为255,但是其上下左右像素值都是0的话,认做为孤立像素点,将其值也设置为0\n :param binary:二值图\n :return:binary\n \"\"\"\n for i in range(1, len(binary) - 1):\n for j in range(1, len(binary[0]) - 1):\n if binary[i][j] == 255:\n # if条件成立的话,则说明当前像素点为孤立点\n if binary[i - 1][j] == binary[i + 1][j] == binary[i][j - 1] == binary[i][j + 1] == 0:\n binary[i][j] = 0\n return binary\n\n\ndef find_number_region(img):\n '''\n 在膨胀处理后的img中,找到身份证号码所在矩形区域,\n :param img:\n :return: box\n '''\n image, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n card_number_region = [] # 用来保存最终返回的region\n max_area = 0 # 存储最大的矩形面积,用于筛选出身份证边框所在区域\n regions = [] # 所有可能的身份证号码所在区域\n for i in range(len(contours)):\n # 返回点集cnt的最小外接矩形,(外接矩形中心坐标(x,y),(外接矩形宽,外接矩形高),旋转角度)\n rect = cv2.minAreaRect(contours[i])\n box = np.int0(cv2.boxPoints(rect)) # box是外接矩形四个点的坐标,np.int0()用来去除小数点之后的数字\n width, height = rect[1]\n if 0 not in box and 9 < width / height < 18 or 9 < height / width < 18:\n regions.append(box)\n\n return regions\n\n\ndef get_number_img(origin_img, regions):\n '''\n 根据上一步找到的边框,从原始图像中,裁剪出身份证号码区域的图像\n :param origin_img:\n :param region:\n :return: image\n '''\n images = []\n for region in regions:\n # 根据四个点的左边裁剪区域\n h = abs(region[0][1] - region[2][1])\n w = abs(region[0][0] - region[2][0])\n x_s = [i[0] for i in region]\n y_s = [i[1] for i in region]\n x1 = min(x_s)\n y1 = min(y_s)\n images.append(origin_img[y1:y1 + h, x1:x1 + w])\n return images\n\n\n# ************* start 身份证号码图像区域水平矫正代码 start *******************\n\n\n# 度数转换\ndef degree_trans(theta):\n res = theta / np.pi * 180\n return res\n\n\n# 逆时针旋转图像degree角度(原尺寸)\ndef rotate_image(src, degree):\n # 旋转中心为图像中心\n h, w = src.shape[:2]\n # 计算二维旋转的仿射变换矩阵\n rotate_matrix = cv2.getRotationMatrix2D((w / 2.0, h / 2.0), degree, 1)\n # 仿射变换,背景色填充为白色\n rotate = cv2.warpAffine(src, rotate_matrix, (w, h), borderValue=(255, 255, 255))\n return rotate\n\n\n# 通过霍夫变换计算角度\ndef calc_degree(img):\n mid_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n dst_image = cv2.Canny(mid_image, 50, 200, 3)\n line_image = img.copy()\n # 通过霍夫变换检测直线\n # 第4个参数就是阈值,阈值越大,检测精度越高\n lines = cv2.HoughLines(dst_image, 1, np.pi / 180, 15)\n # 排除lines为None的异常情况\n if lines is None:\n return 0\n # 由于图像不同,阈值不好设定,因为阈值设定过高导致无法检测直线,阈值过低直线太多,速度很慢\n count = 0\n # 依次画出每条线段\n for i in range(len(lines)):\n for rho, theta in lines[i]:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(round(x0 + 1000 * (-b)))\n y1 = int(round(y0 + 1000 * a))\n x2 = int(round(x0 - 1000 * (-b)))\n y2 = int(round(y0 - 1000 * a))\n # 只选角度最小的作为旋转角度\n count += theta\n cv2.line(line_image, (x1, y1), (x2, y2), (0, 0, 255), 1, cv2.LINE_AA)\n\n # 对所有角度求平均,这样做旋转效果会更好\n average = count / len(lines)\n angle = degree_trans(average) - 90\n return angle\n\n\ndef horizontal_correct(imgs):\n images = []\n for img in imgs:\n degree = calc_degree(img)\n # 在测试中发现,如果扭曲角度角度(大于3),则进行水平矫正,否则不进行矫正\n if abs(degree) > 5:\n img_rotate = rotate_image(img, degree)\n images.append(img_rotate)\n else:\n images.append(img)\n return images\n\n\n# ************* end 身份证号码图像区域水平矫正代码 end *******************\n\n\ndef tesseract_ocr(imgs):\n for img in imgs:\n id_number = pytesseract.image_to_string(img, lang='eng', config='--psm 7 sfz')\n # print(id_number)\n\n # 手动处理,识别结果中可能出现的错误\n\n # python清除字符串中非数字字符(xX§除外)\n id_number = ''.join(list(filter(lambda ch: ch in '0123456789xX§', id_number)))\n id_number = id_number.replace('x', 'X')\n\n if len(id_number) < 10:\n continue\n #\n if len(id_number) == 19 and '§' in id_number:\n id_number = id_number.replace('§', '')\n else:\n id_number = id_number.replace('§', '5')\n\n # 在测试中发现,tesseract会把4识别成46,所以这里直接手动替换\n if len(id_number) == 19 and '46' in id_number:\n id_number = id_number.replace('46', '4')\n\n # 在测试中发现,tesseract会把x识别成xx,所以这里手动删除一个X\n if len(id_number) == 19 and 'XX' in id_number:\n id_number = id_number.replace('XX', 'X')\n return id_number\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef main(file_path):\n # ********************* begin **************************\n # step 1:preprocess the image\n image_resize, image_preprocessed = img_preprocess(file_path, [0, 1, 1])\n\n # step 2:find id number_region\n number_regions = find_number_region(image_preprocessed)\n\n # step 3:get id number image\n image_id_number = get_number_img(image_resize, number_regions)\n # 如果找不到身份证号码所在区域\n # if image_id_number is None:\n # res = {'code': -1, 'card_number': '-1', 'info': 'Can Not Find the Card Number Area'}\n # return json.dumps(res)\n\n # step 4:horizontal correct the image, if necessary.\n image_correct = horizontal_correct(image_id_number)\n\n # step 5:recognize the id number\n number = tesseract_ocr(image_correct)\n\n # 图片识别完之后,再从服务器上删除该图片\n # if os.path.exists(file_path):\n # os.remove(file_path)\n # ********************* end **************************\n\n if number is None:\n res = {'code': -1, 'card_number': '-1', 'info': 'Can Not Find the Card Number Area!'}\n elif len(number) != 18:\n res = {'code': -1, 'card_number': number, 'info': 'The identified result is incorrect!'}\n else:\n res = {'code': 0, 'card_number': number, 'info': 'Success!'}\n\n return res\n\n\n@app.route('/recognition', methods=['POST'])\ndef upload_file():\n if request.method == 'POST':\n file = request.files['file']\n if file and allowed_file(file.filename):\n file_path = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)\n file.save(file_path)\n # 保存完图片之后,开始进行身份证号码提取\n try:\n result = main(file_path)\n except:\n logging.error(traceback.format_exc())\n result = {'code': -1, 'card_number': '-1', 'info': 'Can Not Find the Card Number Area!'}\n\n # 图片识别完之后,再从服务器上删除该图片\n if os.path.exists(file_path):\n os.remove(file_path)\n\n return json.dumps(result)\n\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=8880)\n","sub_path":"code/3_id_card_server.py","file_name":"3_id_card_server.py","file_ext":"py","file_size_in_byte":10190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"641112264","text":"# -*- coding: utf-8 -*-\n\"\"\"This module defines functions for calculating physical properties from normal\nmodes.\"\"\"\n\nimport time\n\nimport numpy as np\n\nfrom prody import LOGGER\nfrom prody.proteins import parsePDB\nfrom prody.atomic import AtomGroup, Selection\nfrom prody.ensemble import Ensemble, Conformation\nfrom prody.trajectory import TrajBase\nfrom prody.utilities import importLA\nfrom numpy import sqrt, arange, log, polyfit, array\n\nfrom .nma import NMA\nfrom .modeset import ModeSet\nfrom .mode import VectorBase, Mode, Vector\nfrom .gnm import GNMBase\n\n__all__ = ['calcCollectivity', 'calcCovariance', 'calcCrossCorr',\n 'calcFractVariance', 'calcSqFlucts', 'calcTempFactors',\n 'calcProjection', 'calcCrossProjection', \n 'calcPerturbResponse', 'parsePerturbResponseMatrix', \n 'calcPerturbResponseProfiles', 'writePerturbResponsePDB',\n 'calcSpecDimension', 'calcPairDeformationDist',]\n\nclass PRSMatrixParseError(Exception):\n pass\n\ndef calcCollectivity(mode, masses=None):\n \"\"\"Returns collectivity of the mode. This function implements collectivity\n as defined in equation 5 of [BR95]_. If *masses* are provided, they will\n be incorporated in the calculation. Otherwise, atoms are assumed to have\n uniform masses.\n\n .. [BR95] Bruschweiler R. Collective protein dynamics and nuclear\n spin relaxation. *J Chem Phys* **1995** 102:3396-3403.\n\n :arg mode: mode or vector\n :type mode: :class:`.Mode` or :class:`.Vector`\n\n :arg masses: atomic masses\n :type masses: :class:`numpy.ndarray`\"\"\"\n\n if not isinstance(mode, (Mode, ModeSet)):\n raise TypeError('mode must be a Mode or ModeSet instance')\n if isinstance(mode, Mode):\n mode = [mode]\n \n colls = []\n\n for m in mode:\n is3d = m.is3d()\n if masses is not None:\n if len(masses) != m.numAtoms():\n raise ValueError('length of masses must be equal to number of atoms')\n if is3d:\n u2in = (m.getArrayNx3() ** 2).sum(1) / masses\n else:\n if is3d:\n u2in = (m.getArrayNx3() ** 2).sum(1)\n else:\n u2in = (m.getArrayNx3() ** 2)\n u2in = u2in * (1 / u2in.sum() ** 0.5)\n coll = np.exp(-(u2in * np.log(u2in)).sum()) / m.numAtoms()\n colls.append(coll)\n \n if len(mode) == 1:\n return coll\n else:\n return colls\n\ndef calcSpecDimension(mode):\n\n \"\"\"\n :arg mode: mode or vector\n :type mode: :class:`.Mode` or :class:`.Vector`\n\n \"\"\"\n # if not isinstance(mode, Mode):\n # raise TypeError('mode must be a Mode instance')\n \n length = mode.shape[0]\n numbers = arange(2,length+1)\n ds,p=polyfit(log(sqrt(mode[0:int(length*0.25)])),log(numbers[0:int(length*0.25)]),1)\n \n return ds\n\ndef calcFracDimension(mode):\n \"\"\"\n :arg mode: mode or vector\n :type mode: mode or vector \"\"\"\n\n\n\n\ndef calcFractVariance(mode):\n \"\"\"Returns fraction of variance explained by the *mode*. Fraction of\n variance is the ratio of the variance along a mode to the trace of the\n covariance matrix of the model.\"\"\"\n\n if isinstance(mode, Mode):\n var = mode.getVariance()\n trace = mode.getModel()._getTrace()\n elif isinstance(mode, (ModeSet, NMA)):\n var = mode.getVariances()\n if isinstance(mode, ModeSet):\n trace = mode.getModel()._getTrace()\n else:\n trace = mode._getTrace()\n else:\n raise TypeError('mode must be a Mode instance')\n if trace is None:\n raise ValueError('modes are not calculated')\n\n return var / trace\n\n\ndef calcProjection(ensemble, modes, rmsd=True, norm=True):\n \"\"\"Returns projection of conformational deviations onto given modes.\n *ensemble* coordinates are used to calculate the deviations that are\n projected onto *modes*. For K conformations and M modes, a (K,M)\n matrix is returned.\n\n :arg ensemble: an ensemble, trajectory or a conformation for which\n deviation(s) will be projected, or a deformation vector\n :type ensemble: :class:`.Ensemble`, :class:`.Conformation`,\n :class:`.Vector`, :class:`.Trajectory`\n :arg modes: up to three normal modes\n :type modes: :class:`.Mode`, :class:`.ModeSet`, :class:`.NMA`\n\n By default root-mean-square deviation (RMSD) along the normal mode is\n calculated. To calculate the projection pass ``rmsd=True``.\n :class:`.Vector` instances are accepted as *ensemble* argument to allow\n for projecting a deformation vector onto normal modes.\"\"\"\n\n if not isinstance(ensemble, (Ensemble, Conformation, Vector, TrajBase)):\n raise TypeError('ensemble must be Ensemble, Conformation, Vector, '\n 'or a TrajBase, not {0}'.format(type(ensemble)))\n if not isinstance(modes, (NMA, ModeSet, VectorBase)):\n raise TypeError('rows must be NMA, ModeSet, or Mode, not {0}'\n .format(type(modes)))\n if not modes.is3d():\n raise ValueError('modes must be 3-dimensional')\n if isinstance(ensemble, Vector):\n n_atoms = ensemble.numAtoms()\n else:\n n_atoms = ensemble.numSelected()\n if n_atoms != modes.numAtoms():\n raise ValueError('number of atoms are not the same')\n if isinstance(ensemble, Vector):\n if not ensemble.is3d():\n raise ValueError('ensemble must be a 3d vector instance')\n deviations = ensemble._getArray()\n elif isinstance(ensemble, (Ensemble, Conformation)):\n deviations = ensemble.getDeviations()\n else:\n nfi = ensemble.nextIndex()\n ensemble.goto(0)\n deviations = np.array([frame.getDeviations() for frame in ensemble])\n ensemble.goto(nfi)\n if deviations.ndim == 3:\n deviations = deviations.reshape((deviations.shape[0],\n deviations.shape[1] * 3))\n elif deviations.ndim == 2:\n deviations = deviations.reshape((1, deviations.shape[0] * 3))\n else:\n deviations = deviations.reshape((1, deviations.shape[0]))\n la = importLA()\n if norm:\n N = la.norm(deviations)\n if N != 0:\n deviations = deviations / N\n projection = np.dot(deviations, modes._getArray())\n if rmsd:\n projection = (1 / (n_atoms ** 0.5)) * projection\n return projection\n\n\ndef calcCrossProjection(ensemble, mode1, mode2, scale=None, **kwargs):\n \"\"\"Returns projection of conformational deviations onto modes from\n different models.\n\n :arg ensemble: ensemble for which deviations will be projected\n :type ensemble: :class:`.Ensemble`\n :arg mode1: normal mode to project conformations onto\n :type mode1: :class:`.Mode`, :class:`.Vector`\n :arg mode2: normal mode to project conformations onto\n :type mode2: :class:`.Mode`, :class:`.Vector`\n :arg scale: scale width of the projection onto mode1 (``x``) or mode2(``y``),\n an optimized scaling factor (scalar) will be calculated by default \n or a value of scalar can be passed.\"\"\"\n\n if not isinstance(ensemble, (Ensemble, Conformation, Vector, TrajBase)):\n raise TypeError('ensemble must be Ensemble, Conformation, Vector, '\n 'or a Trajectory, not {0}'.format(type(ensemble)))\n if not isinstance(mode1, VectorBase):\n raise TypeError('mode1 must be a Mode instance, not {0}'\n .format(type(mode1)))\n if not mode1.is3d():\n raise ValueError('mode1 must be 3-dimensional')\n if not isinstance(mode2, VectorBase):\n raise TypeError('mode2 must be a Mode instance, not {0}'\n .format(type(mode2)))\n if not mode2.is3d():\n raise ValueError('mode2 must be 3-dimensional')\n\n if scale is not None:\n assert isinstance(scale, str), 'scale must be a string'\n scale = scale.lower()\n assert scale in ('x', 'y'), 'scale must be x or y'\n\n xcoords = calcProjection(ensemble, mode1, kwargs.get('rmsd', True), kwargs.get('norm', True))\n ycoords = calcProjection(ensemble, mode2, kwargs.pop('rmsd', True), kwargs.pop('norm', True))\n if scale:\n scalar = kwargs.get('scalar', None)\n if scalar:\n assert isinstance(scalar, (float, int)), 'scalar must be a number'\n else:\n scalar = ((ycoords.max() - ycoords.min()) /\n (xcoords.max() - xcoords.min())\n ) * np.sign(np.dot(xcoords, ycoords))\n if scale == 'x':\n LOGGER.info('Projection onto {0} is scaled by {1:.2f}'\n .format(mode1, scalar))\n else:\n scalar = 1 / scalar\n LOGGER.info('Projection onto {0} is scaled by {1:.2f}'\n .format(mode2, scalar))\n\n if scale == 'x':\n xcoords = xcoords * scalar\n else:\n ycoords = ycoords * scalar\n\n return xcoords, ycoords\n\n\ndef calcSqFlucts(modes):\n \"\"\"Returns sum of square-fluctuations for given set of normal *modes*.\n Square fluctuations for a single mode is obtained by multiplying the\n square of the mode array with the variance (:meth:`.Mode.getVariance`)\n along the mode. For :class:`.PCA` and :class:`.EDA` models built using\n coordinate data in Å, unit of square-fluctuations is |A2|, for\n :class:`.ANM` and :class:`.GNM`, on the other hand, it is arbitrary or\n relative units.\"\"\"\n\n if not isinstance(modes, (VectorBase, NMA, ModeSet)):\n raise TypeError('modes must be a Mode, NMA, or ModeSet instance, '\n 'not {0}'.format(type(modes)))\n is3d = modes.is3d()\n if isinstance(modes, Vector):\n if is3d:\n return (modes._getArrayNx3()**2).sum(axis=1)\n else:\n return (modes._getArray() ** 2)\n else:\n sq_flucts = np.zeros(modes.numAtoms())\n if isinstance(modes, VectorBase):\n modes = [modes]\n for mode in modes:\n if is3d:\n sq_flucts += ((mode._getArrayNx3()**2).sum(axis=1) *\n mode.getVariance())\n else:\n sq_flucts += (mode._getArray() ** 2) * mode.getVariance()\n return sq_flucts\n\n\ndef calcCrossCorr(modes, n_cpu=1):\n \"\"\"Returns cross-correlations matrix. For a 3-d model, cross-correlations\n matrix is an NxN matrix, where N is the number of atoms. Each element of\n this matrix is the trace of the submatrix corresponding to a pair of atoms.\n Covariance matrix may be calculated using all modes or a subset of modes\n of an NMA instance. For large systems, calculation of cross-correlations\n matrix may be time consuming. Optionally, multiple processors may be\n employed to perform calculations by passing ``n_cpu=2`` or more.\"\"\"\n\n if not isinstance(n_cpu, int):\n raise TypeError('n_cpu must be an integer')\n elif n_cpu < 1:\n raise ValueError('n_cpu must be equal to or greater than 1')\n\n if not isinstance(modes, (Mode, NMA, ModeSet)):\n raise TypeError('modes must be a Mode, NMA, or ModeSet instance, '\n 'not {0}'.format(type(modes)))\n\n if modes.is3d():\n model = modes\n if isinstance(modes, (Mode, ModeSet)):\n model = modes._model\n if isinstance(modes, (Mode)):\n indices = [modes.getIndex()]\n n_modes = 1\n else:\n indices = modes.getIndices()\n n_modes = len(modes)\n else:\n n_modes = len(modes)\n indices = np.arange(n_modes)\n array = model._getArray()\n n_atoms = model._n_atoms\n variances = model._vars\n if n_cpu == 1:\n s = (n_modes, n_atoms, 3)\n arvar = (array[:, indices]*variances[indices]).T.reshape(s)\n array = array[:, indices].T.reshape(s)\n covariance = np.tensordot(array.transpose(2, 0, 1),\n arvar.transpose(0, 2, 1),\n axes=([0, 1], [1, 0]))\n else:\n import multiprocessing\n n_cpu = min(multiprocessing.cpu_count(), n_cpu)\n queue = multiprocessing.Queue()\n size = n_modes / n_cpu\n for i in range(n_cpu):\n if n_cpu - i == 1:\n indices = modes.indices[i*size:]\n else:\n indices = modes.indices[i*size:(i+1)*size]\n process = multiprocessing.Process(\n target=_crossCorrelations,\n args=(queue, n_atoms, array, variances, indices))\n process.start()\n while queue.qsize() < n_cpu:\n time.sleep(0.05)\n covariance = queue.get()\n while queue.qsize() > 0:\n covariance += queue.get()\n else:\n covariance = calcCovariance(modes)\n diag = np.power(covariance.diagonal(), 0.5)\n return covariance / np.outer(diag, diag)\n\n\ndef _crossCorrelations(queue, n_atoms, array, variances, indices):\n \"\"\"Calculate covariance-matrix for a subset of modes.\"\"\"\n\n n_modes = len(indices)\n arvar = (array[:, indices] * variances[indices]).T.reshape((n_modes,\n n_atoms, 3))\n array = array[:, indices].T.reshape((n_modes, n_atoms, 3))\n covariance = np.tensordot(array.transpose(2, 0, 1),\n arvar.transpose(0, 2, 1),\n axes=([0, 1], [1, 0]))\n queue.put(covariance)\n\n\ndef calcTempFactors(modes, atoms):\n \"\"\"Returns temperature (β) factors calculated using *modes* from a\n :class:`.ANM` or :class:`.GNM` instance scaled according to the\n experimental β-factors from *atoms*.\"\"\"\n\n model = modes.getModel()\n if not isinstance(model, GNMBase):\n raise TypeError('modes must come from GNM or ANM')\n if model.numAtoms() != atoms.numAtoms():\n raise ValueError('modes and atoms must have same number of nodes')\n sqf = calcSqFlucts(modes)\n return sqf / ((sqf**2).sum()**0.5) * (atoms.getBetas()**2).sum()**0.5\n\n\ndef calcCovariance(modes):\n \"\"\"Returns covariance matrix calculated for given *modes*.\"\"\"\n\n if isinstance(modes, Mode):\n array = modes._getArray()\n return np.outer(array, array) * modes.getVariance()\n elif isinstance(modes, ModeSet):\n array = modes._getArray()\n return np.dot(array, np.dot(np.diag(modes.getVariances()), array.T))\n elif isinstance(modes, NMA):\n return modes.getCovariance()\n else:\n raise TypeError('modes must be a Mode, NMA, or ModeSet instance')\n\n\ndef calcPerturbResponse(model, atoms=None, **kwargs):\n\n \"\"\"Returns a matrix of profiles from scanning of the response of the\n structure to random perturbations at specific atom (or node) positions.\n The function implements the perturbation response scanning (PRS) method\n described in [CA09]_. Rows of the matrix are the average magnitude of the\n responses obtained by perturbing the atom/node position at that row index,\n i.e. ``prs_profile[i,j]`` will give the response of residue/node *j* to\n perturbations in residue/node *i*. PRS is performed using the covariance\n matrix from *model*, e.g. :class:`.ANM` instance. Each residue/node is\n perturbed *repeats* times with a random unit force vector. When *atoms*\n instance is given, PRS profile for residues will be added as an attribute\n which then can be retrieved as ``atoms.getData('prs_profile')``. *model*\n and *atoms* must have the same number of atoms. *atoms* must be an\n :class:`.AtomGroup` instance. write_output is a Bool variable to write\n normalized asymmetric PRS matrix to a file. \n\n .. [CA09] Atilgan C, Atilgan AR, Perturbation-Response Scanning\n Reveals Ligand Entry-Exit Mechanisms of Ferric Binding Protein.\n *PLoS Comput Biol* **2009** 5(10):e1000544.\n\n The PRS matrix can be calculated and saved as follows::\n\n prs_matrix = calcPerturbResponse(p38_anm, saveMatrix=True)\n \n The PRS matrix can also be save later as follows::\n \n writeArray('prs_matrix.txt', prs_matrix, format='%8.6f', delimiter='\\t')\n\n You can also control which operation is used for getting a single matrix\n from the repeated force application and whether to normalise the matrix\n at the end. If you do choose to normalise the matrix, you can still save\n the original matrix before normalisation as well.\n\n :arg operation: which operation to perform to get a single response matrix::\n the mean, variance, max or min of the set of repeats. Another operation \n is to select elements from the matrix showing biggest difference from \n the square sum of the covariance matrix. The Default is the mean.\n To obtain all response matrices, set operation=None without quotes.\n You can also ask for 'all' operations or provide a list containing\n any set of them.\n :type operation: str or list\n\n :arg noForce: whether to use the covariance matrix directly rather\n than applying forces. This appears to be equivalent when scanning for\n response magnitudes and will be much quicker. Default is True.\n :type noForce: bool\n\n :arg normMatrix: whether to normalise the single response matrix by\n dividing each row by its diagonal, Default is True\n :type normMatrix: bool\n\n :arg saveMatrix: whether to save the last matrix generated to a text file.\n Default is False\n :type saveMatrix: bool\n\n :arg saveOrig: whether to save the original matrix despite normalisation.\n This is the same as saveMatrix when not normalizing. Default is False\n :type saveOrig: bool\n\n :arg baseSaveName: The central part of the file name for saved\n matrices, which you can set. This is surrounded by underscores. \n The beginning says orig or norm and the end says which operation \n was used. Default is 'response_matrix'.\n :type baseSaveName: str\n\n :arg acceptDirection: select reference direction for forces to be accepted.\n Can be 'in' (towards center of atoms), 'out' (away from center),\n or 'all'. Default is 'all'; Using other directions requires atoms.\n :type acceptDirection: str\n \"\"\"\n noForce = kwargs.get('noForce',True)\n repeats = kwargs.get('repeats', 100)\n if not noForce:\n operation = kwargs.get('operation','mea')\n\n if operation is not None:\n if type(operation) is str:\n if operation == 'all' or operation == 'all operations':\n operationList = ['var','mea','max','min','dif']\n else:\n operationList = []\n operationList.append(operation.lower()[:3])\n elif type(operation) is list:\n operationList = operation\n for i in range(len(operationList)):\n operationList[i] = operationList[i].lower()[:3]\n\n operationList = np.array(operationList) \n found_valid_operation = False\n\n if 'var' in operationList:\n found_valid_operation = True\n\n if 'max' in operationList:\n found_valid_operation = True\n\n if 'mea' in operationList:\n found_valid_operation = True\n\n if 'min' in operationList:\n found_valid_operation = True\n\n if 'dif' in operationList:\n found_valid_operation = True\n\n if not found_valid_operation:\n raise ValueError('Operation should be mean, variance, max, min or ' \\\n 'or difference (from covariance matrix) in quotes ' \\\n 'or a list containing a set of these or None.')\n\n if not isinstance(model, (NMA, ModeSet, Mode)):\n raise TypeError('model must be an NMA, ModeSet, or Mode instance')\n elif not model.is3d() and not noForce:\n raise TypeError('model must be a 3-dimensional NMA instance' \\\n 'for using PRS with force')\n if isinstance(model, NMA) and len(model) == 0:\n raise ValueError('model must have normal modes calculated')\n\n if atoms is not None:\n if isinstance(atoms, Selection):\n atoms = atoms.copy()\n if not isinstance(atoms, AtomGroup):\n raise TypeError('atoms must be an AtomGroup instance')\n elif atoms.numAtoms() != model.numAtoms():\n raise ValueError('model and atoms must have the same number atoms')\n\n n_atoms = model.numAtoms()\n LOGGER.timeit('_prody_prs_all')\n LOGGER.info('Calculating covariance matrix')\n LOGGER.timeit('_prody_cov')\n\n assert isinstance(repeats, int), 'repeats must be an integer'\n cov = calcCovariance(model)\n if cov is None:\n raise ValueError('model did not return a covariance matrix')\n\n LOGGER.clear()\n LOGGER.report('Covariance matrix calculated in %.1fs.',\n '_prody_cov')\n\n LOGGER.progress('Calculating perturbation response', n_atoms, '_prody_prs_mat')\n matrix_dict = {}\n\n if noForce or 'dif' in operationList:\n if not model.is3d():\n n_by_n_cov_squared = cov**2\n\n else:\n cov_squared = cov**2\n n_by_3n_cov_squared = np.zeros((n_atoms, 3 * n_atoms))\n n_by_n_cov_squared = np.zeros((n_atoms, n_atoms))\n i3 = -3\n i3p3 = 0\n for i in range(n_atoms):\n i3 += 3\n i3p3 += 3\n n_by_3n_cov_squared[i,:] = (cov_squared[i3:i3p3,:]).sum(0)\n\n j3 = -3\n j3p3 = 0\n for j in range(n_atoms):\n j3 += 3\n j3p3 += 3 \n n_by_n_cov_squared[:,j] = (n_by_3n_cov_squared[:,j3:j3p3]).sum(1)\n\n if noForce:\n matrix_dict['noForce'] = n_by_n_cov_squared\n LOGGER.clear()\n LOGGER.report('Perturbation response matrix calculated in %.1fs.',\n '_prody_prs_mat')\n\n else:\n\n acceptDirection = kwargs.get('acceptDirection','all')\n if acceptDirection is not 'all':\n if atoms is None:\n acceptDirection = 'all'\n LOGGER.info('A specific direction for accepting forces was' \\\n ' provided without an atoms object. This' \\\n ' direction will be ignored and all forces will' \\\n ' be accepted.')\n else:\n coords = atoms.getCoords()\n atoms_center = array([np.mean(coords[:,0]), \\\n np.mean(coords[:,1]), \\\n np.mean(coords[:,2])])\n \n mag = kwargs.get('mag',1)\n response_matrix = np.zeros((repeats, n_atoms, n_atoms)) \n i3 = -3\n i3p3 = 0\n for i in range(n_atoms):\n i3 += 3\n i3p3 += 3\n forces = np.random.randn(repeats * 3).reshape((repeats, 3))\n forces /= ((forces**2).sum(1)**0.5).reshape((repeats, 1)) * mag\n for n in range(repeats):\n force = forces[n]\n\n if acceptDirection is 'in' or acceptDirection is 'out':\n res_coords = atoms.getCoords()[i]\n vec_to_center = atoms_center - res_coords\n vec_to_center /= (((atoms_center - res_coords)**2).sum()**0.5)\n force_overlap = np.dot(force,vec_to_center)\n\n if acceptDirection is 'in' and force_overlap < 0:\n force *= -1\n\n if acceptDirection is 'out' and force_overlap > 0:\n force *= -1\n\n response_matrix[n,i,:] = (\n np.dot(cov[:, i3:i3p3], force)\n ** 2).reshape((n_atoms, 3)).sum(1)\n LOGGER.update(i, '_prody_prs_mat')\n\n LOGGER.clear()\n LOGGER.report('Perturbation response scanning matrix calculated in %.1fs.',\n '_prody_prs_mat')\n\n LOGGER.progress('Performing matrix combination operations', n_atoms, \\\n '_prody_prs_ops')\n\n if 'var' in operationList:\n matrix_dict['var'] = np.var(response_matrix,axis=0) \n\n if 'max' in operationList:\n matrix_dict['max'] = np.amax(response_matrix,axis=0)\n\n if 'mea' in operationList:\n matrix_dict['mea'] = np.mean(response_matrix,axis=0) \n\n if 'min' in operationList:\n matrix_dict['min'] = np.amin(response_matrix,axis=0)\n\n if 'dif' in operationList:\n matrix_dict['dif'] = np.max(abs(response_matrix - n_by_n_cov_squared) \\\n , axis=0)\n\n LOGGER.report('Perturbation response matrix operations completed in %.1fs.',\n '_prody_prs_ops')\n\n if operation is None:\n LOGGER.info('Operation is None so all {0} repeats are output.' \\\n ' This is not compatible with saving, normalizing' \\\n ' or mapping to atoms at present.'.format(repeats))\n return response_matrix\n\n if atoms is not None: \n atoms.setData('prs_profile', matrix_dict[matrix_dict.keys()[0]])\n if len(matrix_dict.keys()) > 1:\n LOGGER.info('Only one matrix can be added as data to atoms so' \\\n ' the first one was chosen. The operation that generated' \\\n ' it was {0} (1st 3 letters).'.format(matrix_dict.keys()[0]))\n\n saveOrig = kwargs.get('saveOrig',False)\n saveMatrix = kwargs.get('saveMatrix',False)\n normMatrix = kwargs.get('normMatrix',True)\n suppressDiag = kwargs.get('suppressDiag',False)\n baseSaveName = kwargs.get('baseSaveName','response_matrix')\n\n if saveOrig == True or saveMatrix == True and normMatrix == False:\n # save the original PRS matrix for each operation\n for m in matrix_dict.keys():\n np.savetxt('orig_{0}_{1}.txt'.format(baseSaveName,m), \\\n matrix_dict[m], delimiter='\\t', fmt='%8.6f')\n \n if normMatrix == True:\n norm_PRS_mat = {}\n # calculate the normalized PRS matrix for each operation\n for m in matrix_dict.keys():\n self_dp = np.diag(matrix_dict[m]) # using self displacement (diagonal of\n # the original matrix) as a\n # normalization factor\n self_dp = self_dp.reshape(n_atoms, 1)\n norm_PRS_mat[m] = matrix_dict[m] / np.repeat(self_dp, n_atoms, axis=1)\n\n if suppressDiag == True:\n # suppress the diagonal (self displacement) to facilitate\n # visualizing the response profile\n norm_PRS_mat[m] = norm_PRS_mat[m] - np.diag(np.diag(norm_PRS_mat[m]))\n\n if saveMatrix == True:\n np.savetxt('norm_{0}_{1}.txt'.format(baseSaveName,m), \\\n norm_PRS_mat[m], delimiter='\\t', fmt='%8.6f')\n\n LOGGER.report('Perturbation response scanning completed in %.1fs.',\n '_prody_prs_all')\n\n matrix_list = []\n for m in matrix_dict.keys():\n if normMatrix == True:\n matrix_list.append(norm_PRS_mat[m])\n else:\n matrix_list.append(matrix_dict[m])\n matrix_array = array(matrix_list)\n\n returnFormat = kwargs.get('returnFormat','array')\n returnFormat = returnFormat.lower()\n\n if len(matrix_array) == 1:\n return matrix_array.reshape(n_atoms,n_atoms)\n \n if returnFormat is 'both':\n LOGGER.info('You have requested return in both formats.' \\\n ' Array comes first.')\n return matrix_array, matrix_dict\n elif 'dict' in returnFormat:\n LOGGER.info('Output has been returned as a dictionary of matrices.')\n return matrix_dict\n else:\n LOGGER.info('Output has been returned as an array of matrices,' \\\n ' which you can split into individual matrices.')\n return matrix_array\n\ndef parsePerturbResponseMatrix(prs_matrix_file='prs_matrix.txt',normMatrix=True):\n \"\"\"Parses a perturbation response matrix from a file into a numpy ndarray.\n\n :arg prs_matrix_file: name of the file containing a PRS matrix, default is\n 'prs_matrix.txt' as is used in the example under calcPerturbResponse.\n :type prs_matrix_file: str\n\n :arg normMatrix: whether to normalise the PRS matrix after parsing it.\n Default is True. If you have already normalised this before saving,\n or you do not want it normalised then set this to False.\n :type norm: bool\n\n \"\"\"\n fmat = open(prs_matrix_file,'r')\n matlines = fmat.readlines()\n fmat.close()\n\n prs_matrix = []\n for line in matlines:\n prs_matrix.append(line.split())\n\n for i in range(len(prs_matrix)):\n for j in range(len(prs_matrix)):\n prs_matrix[i][j] = float(prs_matrix[i][j])\n\n prs_matrix = np.array(prs_matrix)\n\n if normMatrix == True:\n # normalize the PRS matrix\n self_dp = np.diag(prs_matrix) # using self displacement (diagonal of\n # the original matrix) as a\n # normalization factor\n self_dp = self_dp.reshape(len(prs_matrix), 1)\n norm_PRS_mat = prs_matrix / np.repeat(self_dp, len(prs_matrix), axis=1)\n return norm_PRS_mat\n\n else:\n return prs_matrix\n\ndef calcPerturbResponseProfiles(prs_matrix):\n \"\"\" Calculate the effectiveness and sensitivity\n profiles, which are the averages over the rows\n and columns of the PRS matrix.\n\n :arg prs_matrix: a perturbation response matrix\n :type prs_matrix: ndarray \n \"\"\"\n\n effectiveness = []\n sensitivity = []\n for i in range(len(prs_matrix)):\n effectiveness.append(np.mean(prs_matrix[i]))\n sensitivity.append(np.mean(prs_matrix.T[i]))\n\n return np.array(effectiveness), np.array(sensitivity)\n\ndef writePerturbResponsePDB(prs_matrix,pdbIn,**kwargs):\n \"\"\" Write the average response to perturbation of\n a particular residue (a row of a perturbation response matrix)\n or the average effect of perturbation of a particular residue\n (a column of a normalized perturbation response matrix)\n into the b-factor field of a PDB file for visualisation in a\n molecular graphics program.\n If no chain is given this will be done for that residue in all chains.\n\n If no residue number is given then the effectiveness and sensitivity\n profiles will be written out instead. These two profiles are also returned\n as arrays for further analysis if they aren't already provided.\n\n :arg prs_matrix: a perturbation response matrix\n :type prs_matrix: ndarray\n\n :arg pdbIn: file name for the input PDB file where you would like the PRS\n data mapped\n :type pdbIn: str\n\n :arg pdbOut: a list of file names (enclosed in square\n brackets) for the output PDB file, default is to append\n the chain and residue info (name and number) onto the pdbIn stem.\n The input for pdbOut can also be used as a stem if you enter a \n single string enclosed in quotes.\n If no residue number is supplied, chain is ignored and the default \n is to append '_effectiveness' and '_sensitivity' onto the stem.\n :type pdbOut: list\n\n :arg chain: chain identifier for the residue of interest, default is all chains\n If you want to analyse residues in a subset of chains, concatentate them\n together e.g. 'AC'\n :type chain: str\n\n :arg resnum: residue number for the residue of interest\n :type resnum: int\n\n :arg direction: the direction you want to use to read data out\n of the PRS matrix for plotting: the options are 'effect' or 'response'.\n Default is 'effect'.\n A row gives the effect on each residue of peturbing the specified \n residue.\n A column gives the response of the specified residue to perturbing \n each residue.\n If no residue number is provided then this option will be ignored\n :type direction: str\n\n :arg returnData: whether to return effectiveness and sensitivity for analysis\n default is False\n :type returnProfiles: bool\n \"\"\"\n\n if not type(prs_matrix) is np.ndarray:\n raise TypeError('Please provide a valid PRS matrix in numpy ndarray format.')\n \n try:\n fi = open(pdbIn,'r')\n lines = fi.readlines()\n fi.close()\n except:\n raise PRSMatrixParseError('Please provide a valid file name for the input PDB.')\n \n chain = kwargs.get('chain', None)\n structure = parsePDB(pdbIn).calpha\n hv = structure.getHierView()\n chains = []\n for i in range(len(list(hv))):\n chainAg = list(hv)[i]\n chains.append(chainAg.getChids()[0])\n\n chains = np.array(chains)\n if chain is None:\n chain = ''.join(chains)\n\n resnum = kwargs.get('resnum', None)\n pdbOut = kwargs.get('pdbOut', None)\n if pdbOut is None:\n out_stem = pdbIn.split('.')[0]\n elif type(pdbOut) is str:\n out_stem = pdbOut.split('.')[0]\n pdbOut = None\n\n if resnum is None:\n effectiveness = kwargs.get('effectiveness')\n sensitivity = kwargs.get('sensitivity')\n if effectiveness is None or sensitivity is None:\n effectiveness, sensitivity = calcPerturbResponseProfiles(prs_matrix)\n\n file_effs_name = '{0}_effectiveness.pdb'.format(out_stem)\n file_sens_name = '{0}_sensitivity.pdb'.format(out_stem)\n fileEffs = open(file_effs_name,'w')\n fileSens = open(file_sens_name,'w')\n\n for line in lines: \n if line.find('ATOM') != 0 and line.find('HETATM') != 0 and line.find('ANISOU') != 0:\n fileEffs.write(line) \n fileSens.write(line)\n elif line.find('ATOM') == 0:\n sel_line_res = structure.select('resid {0}'.format(line[22:26]))\n j = np.where(structure.getResnums() == int(line[22:26]))[0] \\\n [np.where(sel_line_res.getChids() == line[21])[0][0]]\n fileEffs.write(line[:60] + ' '*(6-len('{:3.2f}'.format(( \\\n effectiveness[j]*100/np.max(effectiveness))))) \\\n + '{:3.2f}'.format((effectiveness[j]) \\\n *100/np.max(effectiveness)) + line[66:])\n fileSens.write(line[:60] + ' '*(6-len('{:3.2f}'.format((\\\n sensitivity[j]*100/np.max(sensitivity))))) \\\n + '{:3.2f}'.format((sensitivity[j]) \\\n *100/np.max(sensitivity)) + line[66:])\n elif line.find('HETATM') == 0:\n fileEffs.write(line[:60] + ' '*2 + '0.00' + line[66:])\n fileSens.write(line[:60] + ' '*2 + '0.00' + line[66:])\n \n fileEffs.close()\n fileSens.close()\n LOGGER.info('The effectiveness and sensitivity profiles were written' \\\n ' to {0} and {1}.'.format(file_effs_name,file_sens_name))\n\n returnData = kwargs.get('returnData',False)\n if returnData:\n return effectiveness, sensitivity\n else:\n return\n \n timesNF = 0\n direction = kwargs.get('direction','effect')\n for n in range(len(chain)):\n if not chain[n] in chains:\n raise PRSMatrixParseError('Chain {0} was not found in {1}'.format(chain[n], pdbIn))\n\n chainNum = int(np.where(chains == chain[n])[0])\n chainAg = list(hv)[chainNum]\n if not resnum in chainAg.getResnums():\n LOGGER.info('A residue with number {0} was not found',\n ' in chain {1}. Continuing to next chain.' \\\n .format(resnum, chain[n]))\n timesNF += 1\n continue\n\n if pdbOut is None:\n pdbOut = []\n for n in range(len(chain)):\n chainNum = int(np.where(chains == chain[n])[0])\n i = np.where(structure.getResnums() == resnum)[0][chainNum-timesNF]\n pdbOut.append('{0}_{1}_{2}{3}_{4}.pdb'.format(out_stem, chain[n], \\\n structure.getResnames()[i], resnum, direction))\n\n for n in range(len(chain)):\n chainNum = int(np.where(chains == chain)[0])\n i = np.where(structure.getResnums() == resnum)[0][chainNum-timesNF]\n fo = open(pdbOut[n],'w')\n for line in lines:\n if line.find('ATOM') != 0 and line.find('HETATM') != 0 and line.find('ANISOU') != 0:\n fo.write(line)\n elif line.find('ATOM') == 0:\n sel_line_res = structure.select('resid {0}'.format(line[22:26]))\n j = np.where(structure.getResnums() == int(line[22:26]))[0] \\\n [np.where(sel_line_res.getChids() == line[21])[0][0]]\n\n if direction is 'effect':\n fo.write(line[:60] + ' '*(6-len('{:3.2f}'.format(( \\\n prs_matrix[i][j])*100/np.max(prs_matrix)))) \\\n + '{:3.2f}'.format((prs_matrix[i][j]) \\\n *100/np.max(prs_matrix)) + line[66:])\n else:\n fo.write(line[:60] + ' '*(6-len('{:3.2f}'.format(( \\\n prs_matrix[j][i])*100/np.max(prs_matrix)))) \\\n + '{:3.2f}'.format((prs_matrix[j][i]) \\\n *100/np.max(prs_matrix)) + line[66:])\n elif line.find('HETATM') == 0:\n fo.write(line[:60] + ' '*2 + '0.00' + line[66:])\n\n LOGGER.info('Perturbation responses for specific residues were written' \\\n ' to {0}.'.format(', '.join(pdbOut)))\n\n return\n\n\ndef calcPairDeformationDist(model, coords, ind1, ind2, kbt=1.):\n \n \"\"\"Returns distribution of the deformations in the distance contributed by each mode \n for selected pair of residues *ind1* *ind2* using *model* from a :class:`.ANM`.\n Method described in [EB08]_ equation (10) and figure (2). \n \n .. [EB08] Eyal E., Bahar I. Toward a Molecular Understanding of \n the Anisotropic Response of Proteins to External Forces:\n Insights from Elastic Network Models. *Biophys J* **2008** 94:3424-34355. \n \n :arg model: this is an 3-dimensional NMA instance from a :class:`.ANM\n calculations.\n :type model: :class:`.ANM` \n :arg coords: a coordinate set or an object with ``getCoords`` method.\n Recommended: coords = parsePDB('pdbfile').select('protein and name CA').\n :type coords: :class:`numpy.ndarray`.\n :arg ind1: first residue number.\n :type ind1: int \n :arg ind2: secound residue number.\n :type ind2: int \n \"\"\"\n\n try:\n resnum_list = coords.getResnums()\n resnam_list = coords.getResnames()\n coords = (coords._getCoords() if hasattr(coords, '_getCoords') else\n coords.getCoords())\n except AttributeError:\n try:\n checkCoords(coords)\n except TypeError:\n raise TypeError('coords must be a Numpy array or an object '\n 'with `getCoords` method')\n \n if not isinstance(model, NMA):\n raise TypeError('model must be a NMA instance')\n elif not model.is3d():\n raise TypeError('model must be a 3-dimensional NMA instance')\n elif len(model) == 0:\n raise ValueError('model must have normal modes calculated')\n elif model.getStiffness() is None:\n raise ValueError('model must have stiffness matrix calculated')\n \n linalg = importLA()\n n_atoms = model.numAtoms()\n n_modes = model.numModes()\n LOGGER.timeit('_pairdef')\n\n r_ij = np.zeros((n_atoms,n_atoms,3))\n r_ij_norm = np.zeros((n_atoms,n_atoms,3))\n\n for i in range(n_atoms):\n for j in range(i+1,n_atoms):\n r_ij[i][j] = coords[j,:] - coords[i,:]\n r_ij[j][i] = r_ij[i][j]\n r_ij_norm[i][j] = r_ij[i][j]/linalg.norm(r_ij[i][j])\n r_ij_norm[j][i] = r_ij_norm[i][j]\n\n eigvecs = model.getEigvecs()\n eigvals = model.getEigvals()\n \n D_pair_k = []\n mode_nr = []\n ind1 = ind1 - resnum_list[0]\n ind2 = ind2 - resnum_list[0]\n\n for m in xrange(6,n_modes):\n U_ij_k = [(eigvecs[ind1*3][m] - eigvecs[ind2*3][m]), (eigvecs[ind1*3+1][m] \\\n - eigvecs[ind2*3+1][m]), (eigvecs[ind1*3+2][m] - eigvecs[ind2*3+2][m])] \n D_ij_k = abs(np.sqrt(kbt/eigvals[m])*(np.vdot(r_ij_norm[ind1][ind2], U_ij_k))) \n D_pair_k.append(D_ij_k)\n mode_nr.append(m)\n\n LOGGER.report('Deformation was calculated in %.2lfs.', label='_pairdef')\n \n return mode_nr, D_pair_k\n\n","sub_path":"covCys/setup/ProDy-1.9.3-p3/prody/dynamics/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":40792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"45573825","text":"from BFS import *\nimport pandas as pd\nfrom statsmodels.base.model import GenericLikelihoodModel\n\ndef test_all_legal_moves(car_list, answer):\n\tall_moves = all_legal_moves(car_list, Board(car_list))\n\tassert len(all_moves) == answer, 'test_all_legal_moves FAILED, observe '+str(len(all_moves))\n\ndef test_InitializeChildren(node, params, answer):\n\tInitializeChildren(node, params)\n\tassert len(node.children) == answer, 'test_InitializedChildren FAILED, observe '+str(len(node.children))\n\ttest_Node(node.children[1], params)\n\ndef test_move(car_list, car_tag, to_position, params, answer):\n\tprint('-------------- test move ----------------')\n\tnew_list, new_red = move(car_list, car_tag, to_position)\n\tnew_node = Node(new_list, params)\n\ttest_Node(new_node, params)\n\ttest_InitializeChildren(new_node, params, answer)\n\ttest_all_legal_moves(new_list, answer)\n\ndef test_red(node):\n\tassert node.red == node.board.red, 'test_red FAILED'\n\ndef test_Node(node, params):\n\tprint('--------------- test node ---------------')\n\tprint(node.board_to_str())\n\ttest_red(node)\n\tprint(node.value)\n\tfor car in node.car_list:\n\t\tprint('Car '+ car.tag \n\t\t\t+ ', edge_to:'+str([str(i.tag) for i in car.edge_to])\n\t\t\t+ ', levels:'+str(car.level))\n\ndef test_is_solved(board, red, answer):\n\tassert is_solved(board, red) == answer, \"test_is_solved FAILED\"\n\ndef my_negll(x,y,beta,sigma=1):\n\t'''\n\t\treturn negative log likelihood for current parameters\n\t\tBADS will find minimum based on this function value\n\t'''\n\tN = len(y)\n\tRSS = np.sum((y-(beta[0]*x[:,-1]+beta[1]))**2) #residual sum of squares\n\treturn np.log(np.sqrt(2*np.pi*sigma**2))+(1/(2*sigma**2))*RSS\n\ndef find_line(xs, ys):\n \"\"\"Calculates the slope and intercept\"\"\"\n # number of points\n n = len(xs)\n # calculate means\n x_bar = sum(xs)/n\n y_bar = sum(ys)/n \n # calculate slope\n num = 0\n denom = 0\n for i in range(n):\n num += (xs[i]-x_bar)*(ys[i]-y_bar)\n denom += (xs[i]-x_bar)**2\n slope = num/denom\n # calculate intercept\n intercept = y_bar - slope*x_bar\n return slope, intercept\n\nclass MyOLS(GenericLikelihoodModel):\n\tdef __init__(self, enog, exog, **kwds): # endog=y, exog=x\n\t\tsuper(MyOLS, self).__init__(enog, exog, **kwds)\n\tdef nloglikeobs(self, params):\n\t\tsigma = params[-1]\n\t\tbeta = params[:-1]\n\t\t# print('params: '+str(params))\n\t\t# print('sigma: '+str(sigma))\n\t\t# print('beta: '+str(beta))\n\t\treturn my_negll(self.exog, self.endog, beta, sigma)\n\tdef fit(self, start_params=None, maxiter=10000, maxfun=5000, **kwds):\n\t\t# we have one additional parameter and we need to add it for summary\n\t\tself.exog_names.append('sigma')\n\t\tif start_params == None:\n\t\t # Reasonable starting values\n\t\t start_params = np.append(np.zeros(self.exog.shape[1]), .5)\n\t\t print('start_params '+str(start_params))\n\t\treturn super(MyOLS, self).fit(start_params=start_params,\n\t\t\t\t\t maxiter=maxiter, maxfun=maxfun,\n\t\t\t\t\t **kwds)\ndef test_MLE():\n\tN = 10\n\tx = 10 + 2*np.random.randn(N)\n\ty = 5 + x + np.random.randn(N)\n\tdf = pd.DataFrame({'y':y, 'x':x})\n\tdf['constant'] = 1\n\n\tprint('x: '+str(x))\n\tprint('y: '+str(y))\n\n\tsm_ols_manual = MyOLS(df.y,df[['constant','x']]).fit()\n\tprint(sm_ols_manual.summary())\n\tprint('find_line: '+str(find_line(x,y)))\n\n\n\n","sub_path":"scripts/unit_tests.py","file_name":"unit_tests.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"192159482","text":"#! /usr/bin/env python3\n#NoGuiLinux\n\nimport urllib.request,urllib.error\nimport json, argparse\nfrom xml.etree.ElementTree import Element as element,SubElement as subElement, tostring as ts\n\nclass container:\n master=None\n class web:\n types=['search','info']\n rpc='/rpc/?v=5&type={}&arg={}'\n url='https://aur.archlinux.org'\n rpcUrl=url+rpc\n master=None\n def connect(self,t='info',pkg=''):\n try:\n conn=urllib.request.urlopen(self.rpcUrl.format(t,pkg))\n return [conn,pkg]\n except urllib.error.HTTPError as err:\n exit(str(err))\n except urllib.error.URLError as err:\n exit(str(err))\n\n \n def jsonParseNames(self,J=b''):\n load=json.load(J[0])\n result=load['results']\n come=[]\n if result != []:\n pkgs={}\n for item in result:\n for pk in ['Name','Description']:\n try:\n if pk == 'Description':\n item[pk]=item[pk]\n pkgs[pk.lower()]=item[pk]\n except:\n pass\n come.append(pkgs)\n pkgs={}\n return come,J[1]\n else:\n result=load['resultcount']\n return [{\n 'name':self.master.cmdline.options.package,\n 'description':'ERROR_NO_RESULTS',\n }],J[1]\n\n\n \n\n def jsonParsePkgs(self,J=b''):\n load=json.load(J[0])\n result=load['results']\n if result != []:\n result=result[0]\n pkgs={}\n for pk in ['Name','Depends','OptDepends','Conflicts','Replaces','URLPath','Description']:\n try:\n if pk == 'URLPath':\n result[pk]=[self.url+result[pk]]\n if pk == 'Description':\n result[pk]=[result[pk]]\n pkgs[pk.lower()]=result[pk]\n except:\n pass\n \n return pkgs\n else:\n result=load['resultcount']\n return {\n 'name':self.master.cmdline.options.package,\n 'error':['RESULT_{}'.format(result)],\n }\n\n class HiLvl:\n master=None\n def depends(self,pk):\n result=self.master.web.connect(pkg=pk)\n pkgs=self.master.web.jsonParsePkgs(result)\n return pkgs\n \n def similarPkgs(self,pk):\n result=self.master.web.connect(pkg=pk,t='search')\n pkgs=self.master.web.jsonParseNames(result)\n return pkgs\n\n class display:\n master=None\n def displayXML(self,data):\n pass\n\n def displayPlain(self,data):\n for i in data:\n formString='|'.join(['{}' for i in range(len(i))]).format(*i)\n print(formString)\n\n def cleanStream(self,data):\n for char in ['<','>','<=','>=','=']:\n if char in data:\n data=data.split(char)[0]\n return data\n\n def displayXml(self,data):\n top=element(data[0][0],pkgname=self.cleanStream(data[0][1]))\n num=0\n diff=''\n if data[0][0] == 'info':\n for i in data:\n if i[2] != diff:\n num=0\n diff=i[2]\n else:\n num+=1 \n opt=subElement(top,i[2],num=str(num))\n opt.text=self.cleanStream(i[3])\n elif data[0][0] == 'search':\n count=0\n for i in data:\n opt=subElement(top,i[2],num=str(count))\n subopt=subElement(opt,'pname')\n subopt.text=i[3]\n subopt=subElement(opt,i[4])\n subopt.text=i[5]\n count+=1\n\n print(ts(top).decode())\n\n def displayProcess(self,data,mode):\n result=[]\n node='error'\n if type(data) == type(list()) or type(data) == type(tuple()):\n for pkg in data[0]: \n result.append([\n mode,\n data[1],\n 'result',\n pkg['name'],\n 'description',\n pkg['description']\n ])\n elif type(data) == type(dict()):\n for lisT in data.keys():\n if lisT != 'name':\n for pkg in data[lisT]:\n result.append([mode,data['name'],lisT,pkg])\n else:\n result.append(data)\n #at this point decide if to print xml data or plain ascii\n ##right now assume plain\n if self.master.cmdline.options.print_plain:\n self.displayPlain(result) \n elif self.master.cmdline.options.print_xml:\n self.displayXml(result)\n else:\n self.displayPlain(result)\n\n class cmdline:\n master=None\n cmdArgs={\n 'package':[\n '-p',\n '--package',\n 'package to look for',\n 'yes'\n ],\n 'search':[\n '-s',\n '--search',\n 'mode search',\n 'store_true'\n ],\n 'info':[\n '-i',\n '--info',\n 'mode info',\n 'store_true'\n ],\n 'xml':[\n '-x',\n '--print-xml',\n 'print results in xml',\n 'store_true'\n ],\n 'plain':[\n '-a','--print-plain',\n 'print results plain',\n 'store_true'\n ]\n }\n options=None\n def args(self):\n parser=argparse.ArgumentParser(description='',epilog='')\n for arg in self.cmdArgs.keys():\n if arg == 'package':\n parser.add_argument(\n self.cmdArgs[arg][0],\n self.cmdArgs[arg][1],\n help=self.cmdArgs[arg][2],\n required=self.cmdArgs[arg][3]\n )\n elif arg in ['search','info','plain','xml']:\n parser.add_argument(\n self.cmdArgs[arg][0],\n self.cmdArgs[arg][1],\n help=self.cmdArgs[arg][2],\n action=self.cmdArgs[arg][3]\n )\n self.options=parser.parse_args()\n\n\n class void:\n master=None\n\n def run(self,wa):\n wa.cmdline.args()\n if wa.cmdline.options.info:\n wa.display.displayProcess(wa.HiLvl.depends(wa.cmdline.options.package),'info')\n elif wa.cmdline.options.search:\n wa.display.displayProcess(wa.HiLvl.similarPkgs(wa.cmdline.options.package),'search')\n def assemble(self):\n wa=self.void()\n wa.master=self\n\n wa.cmdline=self.cmdline()\n wa.cmdline.master=wa\n\n wa.web=self.web()\n wa.web.master=wa\n \n wa.HiLvl=self.HiLvl()\n wa.HiLvl.master=wa\n\n wa.display=self.display()\n wa.display.master=wa\n \n self.run(wa)\n\nrun=container()\nrun.assemble()\n","sub_path":"pacmanQuery2Xml/depscraper/aur-package-info.py","file_name":"aur-package-info.py","file_ext":"py","file_size_in_byte":8117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"6901776","text":"from __future__ import print_function\nimport os\nimport sys\nimport time\nimport warnings\nimport time\nimport copy\nimport random\n\nimport pandas as pd\nimport numpy as np\nfrom fisher import pvalue\nimport itertools\nimport math\n\nfrom sklearn.cluster import KMeans\n\nimport networkx as nx\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n###################################################### RRHO ####################################################################\n\n\ndef define_SNR_threshold(snr_file, exprs, exprs_data, network, q,\n min_n_samples=5, random_sample_size=1000, verbose=True):\n if os.path.exists(snr_file) and os.path.getsize(snr_file) > 0:\n try:\n f = open(snr_file, \"r\")\n min_SNR = float(f.readlines()[0])\n f.close()\n if verbose:\n print(\"Use pre-computed SNR threshold.\", file=sys.stdout)\n return (min_SNR)\n except:\n pass\n\n avgSNR = []\n t0 = time.time()\n for edge in random.sample(network.edges(), 1000):\n bic = identify_opt_sample_set(\n [edge[0], edge[1]], exprs, exprs_data, direction=\"UP\",\n min_n_samples=min_n_samples)\n if bic > 0:\n avgSNR.append(bic[\"avgSNR\"])\n else:\n avgSNR.append(0)\n min_SNR = np.quantile(avgSNR, q=1-q)\n f = open(snr_file, \"w\")\n print(round(min_SNR, 2), file=f)\n f.close()\n if verbose:\n print(\"time:\\tSNR threshold determined in %s s.\" %\n round(time.time()-t0, 2), file=sys.stdout)\n return round(min_SNR, 2)\n\n\ndef relabel_exprs_and_network(exprs, network):\n 'Changes gene and sample names to ints'\n g_names2ints = dict(zip(exprs.index.values, range(0, exprs.shape[0])))\n ints2g_names = exprs.index.values\n s_names2ints = dict(zip(exprs.columns.values, range(0, exprs.shape[1])))\n ints2s_names = exprs.columns.values\n exprs.rename(index=g_names2ints, columns=s_names2ints, inplace=True)\n network = nx.relabel_nodes(network, g_names2ints)\n return exprs, network, ints2g_names, ints2s_names\n\n#### Precompute a matrix of thresholds for RRHO ####\n\n\ndef find_threshold(t_u, t_w, N, significance_thr):\n 'Find min. possible overlap still passing the significance threshold, given t_u,t_w and N.'\n prev_x = N\n for x in range(min(t_u, t_w), 0, -1):\n p_val = pvalue(x, t_u-x, t_w-x, N-t_u-t_w+x).right_tail\n #print(t_u,t_w,x, p_val)\n if p_val < significance_thr:\n prev_x = x\n else:\n break\n return prev_x\n\n\ndef precompute_RRHO_thresholds(exprs, fixed_step=5, significance_thr=0.05):\n t_0 = time.time()\n rrho_thresholds = {}\n for t_u, t_w in itertools.product(range(exprs.shape[1]/2, 0, -fixed_step),\n repeat=2):\n if not t_u in rrho_thresholds.keys():\n rrho_thresholds[t_u] = {}\n rrho_thresholds[t_u][t_w] = find_threshold(\n t_u, t_w, exprs.shape[1], significance_thr=significance_thr)\n print(\"Precomputed RRHO thresholds\", time.time()-t_0, \"s\", file=sys.stdout)\n return rrho_thresholds\n\n#### Perform RRHO ####\n\n\ndef hg_test(rlist1, rlist2, t_u, t_w, return_items=False):\n N = len(rlist1)\n overlap = set(rlist1[:t_u]).intersection(set(rlist2[:t_w]))\n overlap_size = len(overlap)\n p_val = pvalue(overlap_size, t_u-overlap_size, t_w -\n overlap_size, N+overlap_size-t_u-t_w).right_tail\n enrichment = float(overlap_size)/(float((t_u)*(t_w))/N)\n if return_items:\n return p_val, enrichment, overlap\n else:\n return p_val, enrichment, overlap_size\n\n\ndef calc_sums(vals):\n return np.array([len(vals), np.sum(vals), np.sum(np.square(vals))])\n\n\ndef preprocess_power_cumsums(cells_count, inds1, inds2, vals):\n np_sums = np.zeros([cells_count, cells_count, 3])\n\n np.add.at(np_sums, (inds1, inds2, 0), 1)\n np.add.at(np_sums, (inds1, inds2, 1), vals)\n np.add.at(np_sums, (inds1, inds2, 2), vals * vals)\n\n reverted = np_sums[::-1, ::-1, :]\n cumsum = reverted.cumsum(axis=0).cumsum(axis=1)\n return cumsum[::-1, ::-1, :]\n\n\ndef calc_mean_std_by_powers(powers):\n count, val_sum, sum_sq = powers\n\n mean = val_sum / count # what if count == 0?\n std = np.sqrt((sum_sq / count) - mean*mean)\n return mean, std\n\n\ndef calc_SNR(in_powers, all_powers):\n out_powers = all_powers - in_powers\n\n in_mean, in_std = calc_mean_std_by_powers(in_powers)\n out_mean, out_std = calc_mean_std_by_powers(out_powers)\n\n return abs(in_mean - out_mean) / (in_std + out_std)\n\n\ndef more_optimal(count_new, thresh_new, count_old, thresh_old):\n if count_new > count_old:\n return True\n elif count_new == count_old:\n return abs(thresh_new[0] - thresh_new[1]) < abs(thresh_old[0] - thresh_old[1])\n else:\n return False\n\n\ndef partialRRHO(gene1, gene2, subnet, rhho_thrs, min_SNR=0.5, min_n_samples=8, fixed_step=10, verbose=False):\n '''Searches for thresholds corresponding to optimal patient overlap. \n The overlap is optimal if\n \\n1). it passes significance threshold in hypergeometric test;\n \\n2). resulting set of samples with SNR higher than 'min_SNR' threshold;\n \\n3). its size is maximal.\n \\n\n \\nReturns a set of samples with gene expressions above selected thresholds.\n '''\n\n t_0 = time.time()\n e1 = subnet.node[gene1][\"exprs\"]\n e1np = np.array(e1.copy().sort_index())\n\n e2 = subnet.node[gene2][\"exprs\"]\n # e2np = np.array(e2)\n e2np = np.array(e2.copy().sort_index())\n\n rlist1 = e1.index.values\n # ind to place in sorted array\n inv_rlist1 = np.zeros(len(rlist1), dtype=int)\n inv_rlist1[rlist1] = np.arange(len(rlist1))\n\n rlist2 = e2.index.values\n inv_rlist2 = np.zeros(len(rlist2), dtype=int)\n inv_rlist2[rlist2] = np.arange(len(rlist2))\n\n assert len(rlist1) == len(rlist2)\n mid = len(rlist1) // 2\n cells_count = (mid + fixed_step - 1) // fixed_step\n\n inds1 = (mid - inv_rlist1 - 1) // fixed_step\n inds2 = (mid - inv_rlist2 - 1) // fixed_step\n good_inds = (inds1 >= 0) & (inds2 >= 0)\n\n cumsums1 = preprocess_power_cumsums(\n cells_count, inds1[good_inds], inds2[good_inds],\n vals=e1np[good_inds]\n )\n cumsums2 = preprocess_power_cumsums(\n cells_count, inds1[good_inds], inds2[good_inds],\n vals=e2np[good_inds]\n )\n\n arsums1 = calc_sums(e1np)\n arsums2 = calc_sums(e2np)\n\n optimal_thresholds = (-1, mid)\n optimal_patient_count = 0\n\n # found significant overlap or thresolds reached ends of both lists\n stop_condition = False\n diag_index = 0\n while not stop_condition:\n # process diag_index'th diag. Calc all points stats\n for du in range(0, diag_index+1):\n dw = diag_index - du\n tu = mid - fixed_step * du\n tw = mid - fixed_step * dw\n\n if (tu <= 0) or (tw <= 0):\n stop_condition = True\n break # why ?\n\n samples_count = cumsums1[du, dw, 0]\n if samples_count > rhho_thrs[tu][tw]:\n SNR = (calc_SNR(cumsums1[du, dw, :], arsums1) +\n calc_SNR(cumsums2[du, dw, :], arsums2)) / 2\n\n if SNR >= min_SNR:\n stop_condition = True\n\n if more_optimal(samples_count, (tu, tw), optimal_patient_count, optimal_thresholds):\n optimal_thresholds = (tu, tw)\n optimal_patient_count = samples_count\n\n if samples_count <= min_n_samples:\n stop_condition = True\n\n diag_index += 1\n\n # print(diag_index)\n if optimal_patient_count > 0:\n tu, tw = optimal_thresholds\n\n optimal_patient_set = set(rlist1[:tu]).intersection(set(rlist2[:tw]))\n else:\n optimal_patient_set = set()\n if verbose:\n if optimal_patient_count > 0:\n print(\"runtime:\", round(time.time()-t_0, 5), \"s\", \"Best ovelap for\", (gene1, gene2), \"occurs at thresholds\",\n optimal_thresholds, \"and contains\", len(optimal_patient_set), \"samples\", file=sys.stderr)\n else:\n print(\"runtime:\", round(time.time()-t_0, 5), \"s\",\n \"No significant overlap for\", (gene1, gene2), file=sys.stderr)\n return optimal_patient_set\n\n#### Assign sets of samples on edges ####\n\n\ndef expression_profiles2nodes(subnet, exprs, direction, verbose=True):\n t_0 = time.time()\n '''Associates sorted expression profiles with nodes. Removes nodes without expressions.\n \\If direction=\"UP\" sorts patinets in the descending order, othewise in ascending order - down-regulated first.'''\n if direction == \"UP\":\n ascending = False\n elif direction == \"DOWN\":\n ascending = True\n\n # assign expression profiles to every node\n node2exprs = {}\n # store only sorted sample names\n for gene in exprs.index.values:\n node2exprs[gene] = exprs.loc[gene, :].sort_values(ascending=ascending)\n\n # set node attributes\n nx.set_node_attributes(subnet, 'exprs', node2exprs)\n print(\"\\texpression_profiles2nodes()\\truntime:\",\n round(time.time()-t_0, 5), \"s\", file=sys.stdout)\n return subnet\n\n\ndef assign_patients2edges(network, rrho_thrs, min_SNR=0.5, min_n_samples=8, fixed_step=10, verbose=True):\n t0 = time.time()\n edge2pats = {}\n n_removed, n_retained = 0, 0\n i = 0\n runtimes = []\n for edge in network.edges():\n n1, n2 = edge\n t_0 = time.time()\n samples = partialRRHO(n1, n2, network, rrho_thrs, min_SNR=min_SNR, min_n_samples=min_n_samples,\n fixed_step=fixed_step, verbose=False)\n runtimes.append(time.time() - t_0)\n i += 1\n if i % 1000 == 0 and verbose:\n print(\"\\t\", i, \"edges processed. Average runtime per edge:\",\n np.mean(runtimes), file=sys.stdout)\n\n # remove edges with not enough samples\n if len(samples) < min_n_samples:\n network.remove_edge(n1, n2)\n n_removed += 1\n else:\n network[n1][n2][\"samples\"] = samples\n n_retained += 1\n\n if verbose:\n print(\"\\tassign_patients2edges() runtime:\\t\", round(time.time() - t0, 4), \"for subnetwork of\",\n len(network.nodes()), \"nodes and\", len(network.edges()), \"edges.\", file=sys.stdout)\n if verbose:\n print(\"Of total %s edges %s retained;\\n%s edges containing less than %s samples were removed\" %\n (n_retained+n_removed, n_retained, n_removed, min_n_samples), file=sys.stdout)\n n_nodes = len(network.nodes())\n network.remove_nodes_from(list(nx.isolates(network)))\n if verbose:\n print(\"%s isolated nodes (deg==0) were removed, %s nodes retained.\" % (\n n_nodes-len(network.nodes()), len(network.nodes())), file=sys.stdout)\n return network\n\n#################################### Gibbs Sampling #######################################################\n\n\ndef calc_lp(edge, self_module, module, edge2Patients,\n nOnesPerPatientInModules, moduleSizes,\n moduleOneFreqs, p0, match_score, mismatch_score, bK_1,\n alpha=1.0, beta_K=1.0):\n\n N = edge2Patients.shape[1]\n m_size = moduleSizes[module]\n edge_vector = edge2Patients[edge, ]\n\n if m_size == 0:\n return p0\n\n n_ones_per_pat = nOnesPerPatientInModules[module, ]\n\n if self_module == module: # remove the edge from the module, if it belongs to a module\n if m_size == 1:\n return p0\n m_size -= 1\n n_ones_per_pat = n_ones_per_pat-edge_vector\n\n # if a module is composed of a single edge\n if m_size == 1:\n # just count number of matches and mismatches and\n n_matches = np.inner(n_ones_per_pat, edge_vector)\n return n_matches*match_score+(N-n_matches)*mismatch_score + bK_1\n\n # if a module contains more than one edge\n beta_term = math.log(m_size+beta_K)\n\n # alpha_term\n # ones-matching\n oneRatios = (n_ones_per_pat+alpha/2)/(m_size+alpha)\n ones_matching_term = np.inner(np.log(oneRatios), edge_vector)\n # zero-matching\n zeroRatios = (m_size-n_ones_per_pat+alpha/2)/(m_size+alpha)\n zeros_matching_term = np.inner(np.log(zeroRatios), (1-edge_vector))\n\n return ones_matching_term+zeros_matching_term + beta_term\n\n\ndef set_initial_conditions(network, p0, match_score, mismatch_score, bK_1, N, alpha=1.0,\n beta_K=1.0, verbose=True):\n t_0 = time.time()\n\n # 1. the number of edges inside each component, initially 1 for each component\n moduleSizes = np.ones(len(network.edges()), dtype=np.int)\n\n # 2. a binary (int) matrix of size n by m that indicates the samples on the edges\n edge2Patients = []\n all_samples = range(N)\n for edge in network.edges():\n n1, n2 = edge\n samples_in_module = network[n1][n2][\"samples\"]\n x = np.zeros(len(all_samples), dtype=np.int)\n i = 0\n for p in all_samples:\n if p in samples_in_module:\n x[i] = 1\n i += 1\n edge2Patients.append(x)\n\n edge2Patients = np.asarray(edge2Patients)\n\n # 3. a binary matrix of size K by m that stores the total number of ones per patient in each module,\n # initially equal to 'edge2Patients'\n nOnesPerPatientInModules = copy.copy(edge2Patients)\n\n t_0 = time.time()\n i = 0\n for n1, n2, data in network.edges(data=True):\n #del data['masked']\n del data['samples']\n data['m'] = i\n data['e'] = i\n i += 1\n # 4.\n edge2Module = range(0, len(network.edges()))\n\n # 5. moduleOneFreqs\n moduleOneFreqs = []\n n = edge2Patients.shape[0]\n for e in range(0, n):\n moduleOneFreqs.append(\n float(sum(edge2Patients[e, ]))/edge2Patients.shape[1])\n\n # 6. setting initial LPs\n t_0 = time.time()\n t_1 = t_0\n for n1, n2, data in network.edges(data=True):\n m = data['m']\n e = data['e']\n data['log_p'] = []\n data['modules'] = []\n for n in [n1, n2]:\n for n3 in network[n].keys():\n m2 = network[n][n3]['m']\n if not m2 in data['modules']:\n lp = calc_lp(e, m, m2, edge2Patients,\n nOnesPerPatientInModules, moduleSizes,\n moduleOneFreqs, p0, match_score, mismatch_score,\n bK_1, alpha=alpha, beta_K=beta_K)\n data['log_p'].append(lp)\n data['modules'].append(m2)\n print(\"time:\\tInitial state created in\", round(\n time.time() - t_0, 1), \"s.\", file=sys.stdout)\n return moduleSizes, edge2Patients, nOnesPerPatientInModules, edge2Module, moduleOneFreqs, network\n\n\ndef adjust_lp(log_probs, n_exp_orders=7):\n # adjusting the log values before normalization to avoid under-flow\n max_p = max(log_probs)\n probs = []\n for lp in log_probs:\n # shift all probs to set max_prob less than log(max_np.float)\n adj_lp = lp - max_p\n # set to minimal values all which less then 'n_orders' lesser than p_max to zeroes\n if adj_lp >= - n_exp_orders:\n probs.append(np.exp(adj_lp))\n else:\n probs.append(0)\n probs = probs/sum(probs)\n return probs\n\n### functions for checking of convergence conditions ###\n\n\ndef calc_p_transitions(states, unique, counts):\n n_steps = len(states)-1\n transitions = dict(\n zip(tuple(itertools.product(unique, unique)), np.zeros(len(unique)**2)))\n for i in range(0, n_steps):\n transitions[(states[i], states[i+1])] += 1\n p = {k: v/(counts[unique.index(k[0])]) for k, v in transitions.iteritems()}\n return p\n\n\ndef collect_all_p(labels):\n P = {}\n # calculate edge transition probabilities\n for edge in range(0, labels.shape[1]):\n states = labels[:, edge]\n unique, counts = np.unique(states, return_counts=True)\n if len(unique) > 1:\n P[edge] = calc_p_transitions(states, list(unique), counts)\n return P\n\n\ndef calc_RMSD(P, P_prev):\n t0 = time.time()\n p_prev_edges = set(P_prev.keys())\n p_edges = set(P.keys())\n Pdiff = []\n for edge in p_edges.difference(p_prev_edges):\n P_prev[edge] = {k: 0 for k in P[edge].keys()}\n P_prev[edge] = {k: 1 for k in P_prev[edge].keys() if k[0] == k[1]}\n for edge in p_prev_edges.difference(p_edges):\n P[edge] = {k: 0 for k in P_prev[edge].keys()}\n P[edge] = {k: 1 for k in P[edge].keys() if k[0] == k[1]}\n for edge in p_edges.intersection(p_prev_edges):\n p_modules = set(P[edge].keys())\n p_prev_modules = set(P_prev[edge].keys())\n for m, m2 in p_modules.difference(p_prev_modules):\n Pdiff.append((P[edge][(m, m2)])**2)\n for m, m2 in p_prev_modules.difference(p_modules):\n Pdiff.append((P_prev[edge][(m, m2)])**2)\n for m, m2 in p_modules.intersection(p_prev_modules):\n Pdiff.append((P[edge][(m, m2)] - P_prev[edge][(m, m2)])**2)\n if not len(Pdiff) == 0:\n return np.sqrt(sum(Pdiff)/len(Pdiff))\n else:\n return 0\n\n\ndef check_convergence_conditions(n_skipping_edges, n_skipping_edges_range,\n P_diffs, P_diffs_range, step, tol=0.05, verbose=True):\n n_points = len(n_skipping_edges)\n # check skipping edges\n se_min, se_max = n_skipping_edges_range\n n_skipping_edges = np.array(n_skipping_edges, dtype=float)\n\n # scale\n n_skipping_edges = (n_skipping_edges-se_min)/(se_max - se_min)*n_points\n # fit line\n\n A = np.vstack([range(0, n_points), np.ones(n_points)]).T\n k, b = np.linalg.lstsq(A, n_skipping_edges, rcond=None)[0]\n\n # check P_diffs\n P_diffs_min, P_diffs_max = P_diffs_range\n P_diffs = np.array(P_diffs)\n\n # scale\n P_diffs = (P_diffs-P_diffs_min)/(P_diffs_max - P_diffs_min)*n_points\n k2, b2 = np.linalg.lstsq(A, P_diffs, rcond=None)[0]\n if abs(k) < tol and abs(k2) < tol:\n convergence = True\n else:\n convergence = False\n if verbose:\n print(\"\\tConverged:\", convergence, \"#skipping edges slope:\", round(k, 5),\n \"RMS(Pn-Pn+1) slope:\", round(k2, 5))\n return convergence\n\n# sample and update model when necessary\n\n\ndef sampling(network, edge2Module, edge2Patients, nOnesPerPatientInModules, moduleSizes,\n moduleOneFreqs, p0, match_score, mismatch_score, bK_1, alpha=0.1, beta_K=1.0,\n max_n_steps=100, n_steps_averaged=20, n_points_fit=10, tol=0.1,\n n_steps_for_convergence=5, verbose=True, edge_ordering=\"nosort\"):\n edge_order = range(0, edge2Patients.shape[0])\n if edge_ordering == \"shuffle\":\n # shuffle edges\n random.shuffle(edge_order)\n t_ = time.time()\n edge2Module_history = [copy.copy(edge2Module)]\n is_converged = False\n network_edges = network.edges(data=True)\n for step in range(1, max_n_steps):\n if verbose:\n print(\"step\", step, file=sys.stdout)\n not_changed_edges = 0\n t_0 = time.time()\n t_1 = t_0\n i = 1\n for edge_index in edge_order:\n n1, n2, data = network_edges[edge_index]\n # adjust LogP and sample a new module\n p = adjust_lp(data['log_p'], n_exp_orders=7)\n curr_module = data['m']\n edge_ndx = data['e']\n new_module = np.random.choice(data['modules'], p=p)\n\n # update network and matrices if necessary\n if new_module != curr_module:\n apply_changes(network, n1, n2, edge_ndx, curr_module, new_module,\n edge2Patients, edge2Module, nOnesPerPatientInModules, moduleSizes,\n moduleOneFreqs, p0, match_score, mismatch_score, bK_1,\n alpha=alpha, beta_K=beta_K)\n\n else:\n not_changed_edges += 1\n i += 1\n if i % 10000 == 1:\n if verbose:\n print(i, \"\\t\\tedges processed in\", round(\n time.time() - t_1, 1), \"s runtime...\", file=sys.stdout)\n not_changed_edges = 0\n t_1 = time.time()\n if verbose:\n print(\"\\tstep \", step, # 1.0*not_changed_edges/len(edge_order),\"- % edges not changed; runtime\",\n round(time.time() - t_0, 1), \"s\", file=sys.stdout)\n\n edge2Module_history.append(copy.copy(edge2Module))\n if step == n_steps_averaged:\n is_converged = False\n n_times_cc_fulfilled = 0\n labels = np.asarray(\n edge2Module_history[step-n_steps_averaged:step])\n P_prev = collect_all_p(labels)\n P_diffs = []\n n_skipping_edges = []\n n_skipping_edges.append(len(P_prev.keys()))\n if step > n_steps_averaged:\n labels = np.asarray(\n edge2Module_history[step-n_steps_averaged:step])\n P = collect_all_p(labels)\n P_diff = calc_RMSD(copy.copy(P), copy.copy(P_prev))\n P_diffs.append(P_diff)\n n_skipping_edges.append(len(P.keys()))\n P_prev = P\n if step >= n_steps_averaged + n_points_fit:\n P_diffs_range = min(P_diffs), max(P_diffs)\n n_skipping_edges_range = min(\n n_skipping_edges), max(n_skipping_edges)\n # check convergence condition\n is_converged = check_convergence_conditions(n_skipping_edges[-n_points_fit:],\n n_skipping_edges_range,\n P_diffs[-n_points_fit:],\n P_diffs_range,\n step,\n tol=tol,\n verbose=verbose)\n if is_converged:\n n_times_cc_fulfilled += 1\n else:\n n_times_cc_fulfilled = 0\n\n if n_times_cc_fulfilled == n_steps_for_convergence: # stop if convergence is True for the last n steps\n # define how many the last steps to consider\n n_final_steps = n_points_fit+n_steps_for_convergence\n if verbose:\n print(\"The model converged after\",\n step, \"steps.\", file=sys.stdout)\n print(\"Consensus of last\", n_final_steps, \"states will be taken\")\n print(\"Sampling runtime\", round(\n time.time() - t_, 1), \"s\", file=sys.stdout)\n return edge2Module_history, n_final_steps, n_skipping_edges, P_diffs\n\n n_final_steps = n_steps_for_convergence\n if verbose:\n print(\"The model did not converge after\",\n step, \"steps.\", file=sys.stdout)\n print(\"Consensus of last\", n_final_steps, \"states will be taken\")\n print(\"Sampling runtime\", round(\n time.time() - t_, 1), \"s\", file=sys.stdout)\n\n return edge2Module_history, n_final_steps, n_skipping_edges, P_diffs\n\n\ndef apply_changes(network, n1, n2, edge_ndx, curr_module, new_module,\n edge2Patients, edge2Module, nOnesPerPatientInModules, moduleSizes,\n moduleOneFreqs, p0, match_score, mismatch_score, bK_1,\n alpha=1.0, beta_K=1.0, no_LP_update=False):\n '''Moves the edge from current module to the new one\n and updates network, nOnesPerPatientInModules and moduleSizes respectively.'''\n # update the edge module membership\n\n network[n1][n2]['m'] = new_module\n edge2Module[edge_ndx] = new_module\n # for this edge no probabilities change\n\n # reduce curr_module size and nOnesPerPatientInModules\n edge_vector = edge2Patients[edge_ndx, ]\n nOnesPerPatientInModules[curr_module,\n ] = nOnesPerPatientInModules[curr_module, ] - edge_vector\n moduleSizes[curr_module, ] -= 1\n\n # increase new_module\n nOnesPerPatientInModules[new_module,\n ] = nOnesPerPatientInModules[new_module, ] + edge_vector\n moduleSizes[new_module, ] += 1\n\n # update LPs for all edges contacting curr and new modules, except skipping edge\n # for affected edges, calcualte only probabilities regarding curr and new modules\n if not no_LP_update:\n for n in [n1, n2]:\n for n3 in network[n].keys():\n if not n3 == n1 and not n3 == n2: # for\n data = network[n][n3]\n m = data['m']\n e = data['e']\n # update LP for new_module\n lp = calc_lp(e, m, new_module, edge2Patients, nOnesPerPatientInModules, moduleSizes,\n moduleOneFreqs, p0, match_score, mismatch_score, bK_1,\n alpha=alpha, beta_K=beta_K)\n # find index of targe module or add it if did not exist\n if new_module in data['modules']:\n m_ndx = data['modules'].index(new_module)\n data['log_p'][m_ndx] = lp\n\n else: # if it is a novel connection to a new_module, append it to the end of a list\n data['modules'].append(new_module)\n data['log_p'].append(lp)\n\n # update LP for curr_module\n # check if an edge is still connected with curr_module\n still_connected = False\n # iterate all edges adjacent to affected\n for n1_, n2_, data_ in network.edges([n, n3], data=True):\n # if there is still one adjacent edge from curr_module or with index e==curr_module , update LP\n if data_['m'] == curr_module or data_['e'] == curr_module:\n still_connected = True\n lp = calc_lp(e, m, curr_module, edge2Patients, nOnesPerPatientInModules, moduleSizes,\n moduleOneFreqs, p0, match_score, mismatch_score, bK_1,\n alpha=alpha, beta_K=beta_K)\n m_ndx = data['modules'].index(curr_module)\n data['log_p'][m_ndx] = lp\n\n break\n\n # if not connected, remove curr_m from the list\n if not still_connected:\n m_ndx = data['modules'].index(curr_module)\n del data['modules'][m_ndx]\n del data['log_p'][m_ndx]\n\n\ndef get_consensus_modules(edge2module_history, network, edge2Patients, edge2Module,\n nOnesPerPatientInModules, moduleSizes, moduleOneFreqs, p0, match_score, mismatch_score,\n bK_1, alpha=1.0, beta_K=1.0):\n consensus_edge2module = []\n labels = np.asarray(edge2module_history)\n\n # identify modules which edges ocsilate\n edges = network.edges()\n for i in range(0, len(edges)):\n unique, counts = np.unique(labels[:, i], return_counts=True)\n if len(unique) > 1:\n counts = np.array(counts)\n new_ndx = unique[np.argmax(counts)]\n if float(max(counts))/labels.shape[0] < 0.5:\n print(\"Warning: less than 50% of time in the most frequent module\\n\\tedge:\", i,\n \"counts:\", counts, \"\\n\\tlabels:\", \",\".join(map(str, unique)), file=sys.stdout)\n consensus_edge2module.append(new_ndx)\n else:\n consensus_edge2module.append(unique[0])\n\n # construct consensus edge-to-module membership\n i = 0\n changed_edges = 0\n for i in range(0, len(consensus_edge2module)):\n curr_module = edge2Module[i]\n new_module = consensus_edge2module[i]\n if curr_module != new_module:\n changed_edges += 1\n n1, n2 = edges[i]\n apply_changes(network, n1, n2, i, curr_module, new_module,\n edge2Patients, edge2Module, nOnesPerPatientInModules, moduleSizes,\n moduleOneFreqs, p0, match_score, mismatch_score, bK_1,\n alpha=alpha, beta_K=beta_K, no_LP_update=True)\n\n print(changed_edges, \"edges changed their module membership after taking consensus.\")\n return consensus_edge2module, network, edge2Patients, nOnesPerPatientInModules, moduleSizes, moduleOneFreqs\n\n################################## 3. Post-processing ##########################################\n\n\ndef get_genes(mid, edge2module, edges):\n ndx = [i for i, j in enumerate(edge2module) if j == mid]\n genes = []\n for edge in [edges[i] for i in ndx]:\n genes.append(edge[0])\n genes.append(edge[1])\n genes = list(set(genes))\n return genes\n\n\ndef calc_bic_SNR(genes, samples, exprs, N, exprs_sums, exprs_sq_sums):\n bic = exprs[genes, :][:, samples]\n bic_sums = bic.sum(axis=1)\n bic_sq_sums = np.square(bic).sum(axis=1)\n\n bg_counts = N - len(samples)\n bg_sums = exprs_sums[genes]-bic_sums\n bg_sq_sums = exprs_sq_sums[genes]-bic_sq_sums\n\n bic_mean, bic_std = calc_mean_std_by_powers(\n (len(samples), bic_sums, bic_sq_sums))\n bg_mean, bg_std = calc_mean_std_by_powers((bg_counts, bg_sums, bg_sq_sums))\n\n return np.mean(abs(bic_mean - bg_mean) / (bic_std + bg_std))\n\n\ndef bicluster_avg_SNR(exprs, genes=[], samples=[]):\n mat = exprs.loc[genes, :]\n all_samples = set(mat.columns.values)\n bic = mat.loc[:, samples]\n out = mat.loc[:, all_samples.difference(samples)]\n SNR = (bic.mean(axis=1) - out.mean(axis=1)) / \\\n (bic.std(axis=1)+out.std(axis=1))\n return np.mean(abs(SNR))\n\n\ndef bicluster_corrs(exprs, genes=[], samples=[]):\n if len(samples) > 0:\n mat = exprs.loc[genes, samples]\n else:\n mat = exprs.loc[genes, :]\n corrs = []\n for g1, g2 in itertools.combinations(genes, 2):\n corr = np.corrcoef(mat.loc[g1, :].values, mat.loc[g2, :].values)[0][1]\n corrs.append(corr)\n return round(np.average(corrs), 2), round(np.min(corrs), 2), round(np.max(corrs), 2)\n\n\ndef identify_opt_sample_set(genes, exprs, exprs_data, direction=\"UP\", min_n_samples=8):\n N, exprs_sums, exprs_sq_sums = exprs_data\n e = exprs[genes, :]\n\n labels = KMeans(n_clusters=2, random_state=0).fit(e.T).labels_\n ndx0 = np.where(labels == 0)[0]\n ndx1 = np.where(labels == 1)[0]\n if min(len(ndx1), len(ndx0)) < min_n_samples:\n return {\"avgSNR\": -1}\n if np.mean(e[:, ndx1].mean()) > np.mean(e[:, ndx0].mean()):\n if direction == \"UP\":\n samples = ndx1\n else:\n samples = ndx0\n else:\n if direction == \"UP\":\n samples = ndx0\n else:\n samples = ndx1\n avgSNR = calc_bic_SNR(genes, samples, exprs, N, exprs_sums, exprs_sq_sums)\n\n # allow bicluster to be a little bit bigger than N/2\n if len(samples) < N*0.5*1.1 and len(samples) >= min_n_samples:\n bic = {\"genes\": set(genes), \"n_genes\": len(genes),\n \"samples\": set(samples), \"n_samples\": len(samples),\n \"avgSNR\": avgSNR, \"direction\": direction}\n return bic\n else:\n return {\"avgSNR\": -1}\n\n\ndef identify_opt_sample_set_for_discordant(genes, exprs, exprs_data, min_n_samples=8):\n N, exprs_sums, exprs_sq_sums = exprs_data\n e = exprs[genes, :]\n\n labels = KMeans(n_clusters=2, random_state=0).fit(e.T).labels_\n ndx0 = np.where(labels == 0)[0]\n ndx1 = np.where(labels == 1)[0]\n if min(len(ndx1), len(ndx0)) < min_n_samples:\n return {\"avgSNR\": -1}\n\n # biclsuter is a smaller group, direction = \"MIXED\"\n if len(ndx0) <= len(ndx1):\n samples = ndx0\n else:\n samples = ndx1\n\n avgSNR = calc_bic_SNR(genes, samples, exprs, N, exprs_sums, exprs_sq_sums)\n\n bic = {\"genes\": set(genes), \"n_genes\": len(genes),\n \"samples\": set(samples), \"n_samples\": len(samples),\n \"avgSNR\": avgSNR, \"direction\": \"MIXED\"}\n return bic\n\n#### Merging ###\n\n\ndef calc_J(bic, bic2, all_samples):\n s1 = bic[\"samples\"]\n s2 = bic2[\"samples\"]\n if bic[\"direction\"] != bic2[\"direction\"]:\n s2 = all_samples.difference(s2)\n sh_samples = s1.intersection(s2)\n u_samples = s1.union(s2)\n J = 1.0*len(sh_samples)/len(u_samples)\n return J\n\n\ndef calc_overlap_pval_J(bic, bic2, all_samples):\n s1 = bic[\"samples\"]\n s2 = bic2[\"samples\"]\n if bic[\"direction\"] != bic2[\"direction\"]:\n s2 = all_samples.difference(s2)\n s1_s2 = len(s1.intersection(s2))\n s1_only = len(s1.difference(s2))\n s2_only = len(s2.difference(s1))\n p_val = pvalue(s1_s2, s1_only, s2_only, len(\n all_samples)-s1_s2-s1_only-s2_only).right_tail\n J = 1.0*s1_s2/(s1_s2+s1_only+s2_only)\n return p_val, J\n\n\ndef merge_biclusters(biclusters, exprs, exprs_data, min_n_samples=5,\n verbose=True, min_SNR=0.5, J_threshold=0.25, pval_threshold=0.05,\n merge_discordant=False):\n t0 = time.time()\n\n n_bics = len(biclusters)\n bics = dict(zip(range(0, len(biclusters)), biclusters))\n\n all_samples = set(range(0, exprs.shape[1]))\n candidates = {}\n for i in bics.keys():\n bic = bics[i]\n for j in bics.keys():\n if j > i:\n bic2 = bics[j]\n p_val, J = calc_overlap_pval_J(bic, bic2, all_samples)\n\n p_val = p_val*n_bics\n if p_val < pval_threshold: # and J>J_threshold:\n candidates[(i, j)] = p_val, J\n\n nothing_to_merge = False\n while not nothing_to_merge:\n # take min p-value pair\n opt_pval, opt_J = pval_threshold, 0.0\n opt_pair = -1, -1\n\n for pair in candidates.keys():\n p_val, J = candidates[pair]\n if p_val < opt_pval:\n opt_pval, opt_J = p_val, J\n opt_pair = pair\n elif p_val == opt_pval: # take max(J) in case of ties\n if J > opt_J:\n opt_pval, opt_J = p_val, J\n opt_pair = pair\n\n if opt_pair[0] == -1:\n nothing_to_merge = True\n else:\n opt_i, opt_j = opt_pair\n if verbose:\n print(\"\\t\\ttry merging %s:%sx%s (%s,%s) + %s:%sx%s (%s,%s)\" % (bics[opt_i][\"id\"], bics[opt_i][\"n_genes\"],\n bics[opt_i][\"n_samples\"],\n round(\n bics[opt_i][\"avgSNR\"], 2),\n bics[opt_i][\"direction\"],\n bics[opt_j][\"id\"], bics[opt_j][\"n_genes\"],\n bics[opt_j][\"n_samples\"],\n round(\n bics[opt_j][\"avgSNR\"], 2),\n bics[opt_j][\"direction\"]))\n\n # try creating a new bicsluter from bic and bic2\n genes = list(bics[opt_i][\"genes\"] | bics[opt_j][\"genes\"])\n\n if merge_discordant:\n if bics[opt_i][\"direction\"] != bics[opt_j][\"direction\"]:\n new_bic = identify_opt_sample_set_for_discordant(\n genes, exprs, exprs_data, min_n_samples=min_n_samples)\n else:\n new_bic = identify_opt_sample_set(genes, exprs, exprs_data,\n direction=bics[opt_i][\"direction\"],\n min_n_samples=min_n_samples)\n else:\n new_bic = identify_opt_sample_set(genes, exprs, exprs_data,\n direction=bics[opt_i][\"direction\"],\n min_n_samples=min_n_samples)\n if new_bic[\"avgSNR\"] == -1 and bics[opt_i][\"direction\"] != bics[opt_j][\"direction\"]:\n new_bic = identify_opt_sample_set(genes, exprs, exprs_data,\n direction=bics[opt_j][\"direction\"],\n min_n_samples=min_n_samples)\n\n avgSNR = new_bic[\"avgSNR\"]\n if avgSNR >= min_SNR:\n # place new_bic to ith bic\n new_bic[\"id\"] = bics[opt_i][\"id\"]\n if verbose:\n substitution = (bics[opt_i][\"id\"], len(bics[opt_i][\"genes\"]), len(bics[opt_i][\"samples\"]),\n round(bics[opt_i][\"avgSNR\"],\n 2), bics[opt_i][\"direction\"],\n bics[opt_j][\"id\"], len(\n bics[opt_j][\"genes\"]),\n len(bics[opt_j][\"samples\"]),\n round(bics[opt_j][\"avgSNR\"],\n 2), bics[opt_j][\"direction\"],\n round(new_bic[\"avgSNR\"], 2), len(\n new_bic[\"genes\"]),\n len(new_bic[\"samples\"]))\n print(\n \"\\tMerge biclusters %s:%sx%s (%s,%s) and %s:%sx%s (%s,%s) --> %s SNR and %sx%s\" % substitution)\n new_bic[\"n_genes\"] = len(new_bic[\"genes\"])\n new_bic[\"n_samples\"] = len(new_bic[\"samples\"])\n\n bics[opt_i] = new_bic\n # deleted data for ith and jth biclusters\n for i, j in list(candidates.keys()):\n if i == opt_j or j == opt_j:\n del candidates[(i, j)]\n # remove j-th bics jth column and index\n del bics[opt_j]\n n_bics = len(bics)\n for j in bics.keys():\n if j != opt_i:\n J = calc_J(new_bic, bics[j], all_samples)\n p_val, J = calc_overlap_pval_J(\n new_bic, bics[j], all_samples)\n p_val = p_val*n_bics\n if p_val < pval_threshold: # and J>J_threshold:\n if opt_i < j:\n candidates[(opt_i, j)] = p_val, J\n else:\n candidates[(j, opt_i)] = p_val, J\n else:\n # set J for this pair to 0\n if verbose:\n print(\"\\t\\tSNR=\", round(avgSNR, 2), \"<\",\n round(min_SNR, 2), \"--> no merge\")\n candidates[(opt_i, opt_j)] = pval_threshold, 0.0\n\n if verbose:\n print(\"time:\\tMerging finished in:\", round(time.time()-t0, 2))\n\n return bics.values()\n","sub_path":"method_post.py","file_name":"method_post.py","file_ext":"py","file_size_in_byte":39034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"445833590","text":"import pyglet\r\nfrom pyglet.window import key\r\nfrom pyglet.window import mouse\r\nimport primitives\r\nfrom random import random\r\nimport math\r\nfrom math import acos, pi, sqrt\r\nfrom itertools import permutations\r\n\r\npyglet.resource.path = ['../res']\r\npyglet.resource.reindex()\r\n\r\nclass App(pyglet.window.Window):\r\n\tdef __init__(self):\r\n\t\tconfig = pyglet.gl.Config(sample_buffers=1, samples=4)\r\n\t\tpyglet.window.Window.__init__(self, config=config, resizable=True)\r\n\r\n\t\tself.w, self.h = 1300, 800\r\n\t\tself.set_size(self.w, self.h)\r\n\t\tself.cx, self.cy = 0, 0\r\n\t\tself.dcx, self.dcy = self.w/2, self.h/2\r\n\r\n\t\tself.set_caption(\"golden angle\")\r\n\r\n\t\tself.entities = []\r\n\r\n\t\tself.line1 = Line()\r\n\t\tself.line2 = Line()\r\n\t\tself.entities.append(self.line1)\r\n\t\tself.entities.append(self.line2)\r\n\r\n\t\tself.last = 1\r\n\t\tself.actual_a = 0\r\n\r\n\t\tphi = 360/((1+5**0.5)/2)\r\n\t\tself.da = 0\r\n\t\tself.keys = key.KeyStateHandler()\r\n\t\tself.push_handlers(self.keys)\r\n\t\t#self.generate()\r\n\r\n\t\t@self.event\r\n\t\tdef on_resize(width, height):\r\n\t\t\tself.dcx = width/2\r\n\t\t\tself.dcy = height/2\r\n\t\t\tself.w = width\r\n\t\t\tself.h = height\r\n\r\n\t\tself.stopped = False\r\n\r\n\tdef update(self, dt):\r\n\t\tlast = self.entities[2]\r\n\t\tself.stopped = (last.x**2 + last.y**2)**0.5 > 300\r\n\t\tif not self.stopped:\r\n\t\t\tself.clear()\r\n\t\t\ttimes = 3\r\n\t\t\tfor _ in range(times):\r\n\t\t\t\tfor entity in self.entities:\r\n\t\t\t\t\tentity.update(self, dt)\r\n\r\n\t\t\tfor entity in self.entities:\r\n\t\t\t\tentity.render(self.cx - self.dcx, self.cy - self.dcy)\r\n\r\n\t\t\tself.add(dt*times)\r\n\r\n\t\t\tif self.keys[key.SPACE]:\r\n\t\t\t\tself.line1.target = self.entities[-1]\r\n\t\t\t\tself.line2.target = self.entities[-2]\r\n\r\n\tdef add(self, dt):\r\n\t\tself.last += dt\r\n\t\tif self.last > 1:\r\n\t\t\tself.last -= 1\r\n\t\t\tc = Circle(0, 0, 4)\r\n\t\t\tself.actual_a += self.da\r\n\t\t\tself.entities.append(c)\r\n\r\n\t\t\tif self.line1.target:\r\n\t\t\t\tx1, y1 = self.line1.target.x, self.line1.target.y\r\n\t\t\t\tx2, y2 = self.line2.target.x, self.line2.target.y \r\n\t\t\t\tangle = acos((x1*x2 + y1*y2)/sqrt((x1*x1 + y1*y1)*(x2*x2 + y2*y2)))/pi*180\r\n\t\t\t\tprint(\"angle: {}\".format(angle))\r\n\r\n\r\n\r\n\r\nclass Line():\r\n\r\n\tdef __init__(self):\r\n\t\tself.target = None\r\n\r\n\tdef update(self, app, dt):\r\n\t\tpass\r\n\r\n\tdef render(self, cx, cy):\r\n\t\tif self.target:\r\n\t\t\tprimitives.line(a=(self.target.x - cx, self.target.y - cy), b=(-cx, -cy), color=(1., 1., 1., 1.))\r\n\r\nclass Circle():\r\n\r\n\tdef __init__(self, x, y, r):\r\n\t\tself.x, self.y, self.r = x, y, r\r\n\r\n\tdef update(self, app, dt):\r\n\t\tif self.x**2 + self.y**2 > 500**2 and False: #self.x < -self.r or self.y < -self.r or self.x > app.w + self.r or self.y > app.h + self.r:\r\n\t\t\tapp.entities.remove(self)\r\n\t\t\treturn\r\n\r\n\t\tvx = 0\r\n\t\tvy = 0\r\n\t\tfor drop in app.entities:\r\n\t\t\tif type(drop) == Circle:\r\n\t\t\t\tif drop == self:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tdx = self.x - drop.x\r\n\t\t\t\tdy = self.y - drop.y\r\n\t\t\t\tdis2 = dx**2 + dy**2\r\n\r\n\t\t\t\tvx += 2000/dis2*(dx/dis2**0.5)\r\n\t\t\t\tvy += 2000/dis2*(dy/dis2**0.5)\r\n\r\n\r\n\t\tself.x += vx*dt\r\n\t\tself.y += vy*dt\r\n\t\t#self.r += 0.03\r\n\r\n\tdef render(self, cx, cy):\r\n\t\tprimitives.circle(x=self.x - cx, y=self.y - cy, radius=self.r, color=(1., 1., 1., 1.))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tapp = App()\r\n\tapp.entities.append(Circle(10, 10, 4))\r\n\tapp.entities.append(Circle(10, -5, 4))\r\n\tpyglet.clock.schedule_interval(app.update, 1/120.0)\r\n\tpyglet.app.run()","sub_path":"gekom/zlatyrez/zlatyrez3.py","file_name":"zlatyrez3.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"85544777","text":"class Solution:\n # @param {integer} num\n # @return {string}\n def intToRoman(self, num):\n letter = ['I','V','X','L','C','D','M','','']\n i = 0\n result = ''\n while num != 0:\n digit = num % 10\n if 0 <= digit < 4:\n result = letter[i] * digit + result\n elif digit == 4:\n result = letter[i] + letter[i+1] + result\n elif 5 <= digit < 9:\n result = letter[i+1] + letter[i] * (digit - 5) + result\n else:\n result = letter[i] + letter[i+2] + result\n num //= 10\n i += 2\n return result\n\nprint(Solution.intToRoman(Solution.intToRoman,3999))\n","sub_path":"12. Integer to Roman/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"420295291","text":"from keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers.convolutional import Convolution3D, MaxPooling3D\n\nfrom keras.optimizers import SGD, RMSprop\nfrom keras.utils import np_utils, generic_utils\n\nimport theano\nimport os\n# import matplotlib\n# import matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn import cross_validation\nfrom sklearn import preprocessing\n\nfrom keras import backend as K\ndef set_keras_backend(backend):\n\n if K.backend() != backend:\n os.environ['KERAS_BACKEND'] = backend\n reload(K)\n assert K.backend() == backend\n\nset_keras_backend(\"theano\")\nK.set_image_dim_ordering('th')\n#load data\ntrain_x = np.load('./data/total_train.npz')['a']\nipt=np.rollaxis(np.rollaxis(train_x,2,0),2,0)\n# frames= np.zeros((16,16,1500))\n\nimg_rows,img_cols,img_depth=120,160,10\n# for i in range (1500):\n# frames[:,:,i] = cv2.resize(ipt[:,:,i],(img_rows,img_cols),interpolation=cv2.INTER_AREA)\n\n\nX_tr=[] \n\n\n\nfor i in range(150):\n X_tr.append(ipt[:,:,i*10:(i+1)*10])\n\nX_tr_array = np.array(X_tr)\n\nnum_samples = len(X_tr_array) \n# print(len(X_tr))\n# print(X_tr[0].shape)\n\n#assign label\nlabel=np.ones((num_samples,),dtype = int)\nlabel[0:25]= 0\nlabel[25:50] = 1\nlabel[50:75] = 2\nlabel[75:100] = 3\nlabel[100:125]= 4\nlabel[125:] = 5\n\ntrain_data = [X_tr_array,label]\n\n(X_train, y_train) = (train_data[0],train_data[1])\n\ntrain_set = np.zeros((num_samples, 1, img_rows,img_cols,img_depth))\n\nfor h in range(num_samples):\n train_set[h][0][:][:][:]=X_train[h,:,:,:]\n \n\npatch_size = 10 # img_depth or number of frames used for each video\n\n# print(train_set.shape, 'train samples')\n\n# CNN Training parameters\n\nbatch_size = 2\nnb_classes = 6\nnb_epoch =700\n\n# convert class vectors to binary class matrices\nY_train = np_utils.to_categorical(y_train, nb_classes)\n\n\n# number of convolutional filters to use at each layer\nnb_filters = [32, 32]\n\n# level of pooling to perform at each layer (POOL x POOL)\nnb_pool = [3, 3]\n\n# level of convolution to perform at each layer (CONV x CONV)\nnb_conv = [5,5]\n\n# Pre-processing\n\ntrain_set = train_set.astype('float32')\n\ntrain_set -= np.mean(train_set)\n\ntrain_set /=np.max(train_set)\n\n# Define model\n\nmodel = Sequential()\nmodel.add(Convolution3D(nb_filters[0],(nb_conv[0], nb_conv[0], nb_conv[0]), input_shape=(1, img_rows, img_cols, patch_size), activation='relu'))\n\nmodel.add(MaxPooling3D(pool_size=(nb_pool[0], nb_pool[0], nb_pool[0])))\n\nmodel.add(Dropout(0.5))\n\nmodel.add(Flatten())\n\nmodel.add(Dense(128, activation=\"relu\", kernel_initializer=\"normal\"))\n\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(nb_classes,kernel_initializer='normal'))\n\nmodel.add(Activation('softmax'))\n\nmodel.compile(loss='categorical_crossentropy', optimizer='RMSprop', metrics=['accuracy'])\n\n\n# Split the data\n\nX_train_new, X_val_new, y_train_new,y_val_new = train_test_split(train_set, Y_train, test_size=0.3, random_state=42)\n\n\n# Train the model\n\nhist = model.fit(X_train_new, y_train_new, validation_data=(X_val_new,y_val_new), batch_size=batch_size,epochs = nb_epoch,shuffle=True)\n\n # Evaluate the model\n# score = model.evaluate(X_val_new, y_val_new, batch_size=batch_size)\n# score = model.evaluate(X_train_new, y_train_new, batch_size=batch_size)\n\n# print('Test score:', score[0])\n# print('Test accuracy:', score[1]) \n'''\nlisting = os.listdir('./dataset/boxing')[:1]\n\nfor vid in listing:\n vid = './dataset/boxing/'+vid\n frames = []\n cap = cv2.VideoCapture(vid)\n fps = cap.get(5)\n print (\"Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}\".format(fps))\n \n\n for k in range(15):\n ret, frame = cap.read()\n # frame=cv2.resize(frame,(img_rows,img_cols),interpolation=cv2.INTER_AREA)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # plt.imshow(gray)\n # plt.show()\n frames.append(gray)\n\n #plt.imshow(gray, cmap = plt.get_cmap('gray'))\n #plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis\n #plt.show()\n #cv2.imshow('frame',gray)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\n\n input=np.array(frames)\n\n print (input.shape)\n ipt=np.rollaxis(np.rollaxis(input,2,0),2,0)\n print (ipt.shape)\n\n X_tr.append(ipt)\n\nprint(X_tr[0].shape)\n# plt.imshow(gray)\n# plt.show()\n# frames.append(gray)\n\n'''","sub_path":"5_CRNN/keras_cnn.py","file_name":"keras_cnn.py","file_ext":"py","file_size_in_byte":4508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"450264436","text":"#coding: utf-8\r\nimport binascii\r\n\r\nfrom ByteArray import ByteArray\r\nfrom Identifiers import Identifiers\r\n\r\nclass Shop:\r\n def __init__(self, player, server):\r\n self.client = player\r\n self.server = player.server\r\n self.Cursor = player.Cursor\r\n\r\n def getShopLength(self):\r\n return 0 if self.client.shopItems == \"\" else len(self.client.shopItems.split(\",\"))\r\n\r\n def checkUnlockShopTitle(self):\r\n if self.server.shopTitleList.has_key(self.getShopLength()):\r\n title = self.server.shopTitleList[self.getShopLength()]\r\n self.client.checkAndRebuildTitleList(\"shop\")\r\n self.client.sendUnlockedTitle(int(title - (title % 1)), int(round((title % 1) * 10)))\r\n self.client.sendCompleteTitleList()\r\n self.client.sendTitleList()\r\n\r\n def checkAndRebuildBadges(self):\r\n rebuild = False\r\n for badge in self.server.shopBadges.items():\r\n if not self.client.shopBadges.has_key(badge[0]) and self.checkInShop(badge[0]):\r\n self.client.shopBadges[str(badge[1])] = 0\r\n rebuild = True\r\n\r\n if rebuild:\r\n badges = self.client.shopBadges\r\n self.client.shopBadges = {}\r\n for badge, count in badges.items():\r\n if not self.client.shopBadges.has_key(badge):\r\n self.client.shopBadges[badge] = count\r\n\r\n## def checkAndRebuildBadges(self):\r\n## rebuild = False\r\n## for badge in self.server.shopBadges.items():\r\n## if not badge[1] in self.client.shopBadges and self.checkInShop(badge[0]):\r\n## self.client.shopBadges.append(str(badge[1]))\r\n## rebuild = True\r\n##\r\n## if rebuild:\r\n## tempBadges = []\r\n## tempBadges.extend(self.client.shopBadges)\r\n## self.client.shopBadges = []\r\n## self.client.shopBadgesCounts = {}\r\n## for badge in tempBadges:\r\n## if not badge in self.client.shopBadges:\r\n## self.client.shopBadges.append(badge)\r\n##\r\n def checkUnlockShopBadge(self, itemID):\r\n if not self.client.isGuest:\r\n if self.server.shopBadges.has_key(itemID):\r\n unlockedBadge = self.server.shopBadges[itemID]\r\n self.sendUnlockedBadge(unlockedBadge)\r\n self.checkAndRebuildBadges()\r\n\r\n def checkInShop(self, checkItem):\r\n if not self.client.shopItems == \"\":\r\n for shopItem in self.client.shopItems.split(\",\"):\r\n if checkItem == int(shopItem.split(\"_\")[0] if \"_\" in shopItem else shopItem):\r\n return True\r\n else:\r\n return False\r\n\r\n def checkInShamanShop(self, checkItem):\r\n if not self.client.shamanItems == \"\":\r\n for shamanItems in self.client.shamanItems.split(\",\"):\r\n if checkItem == int(shamanItems.split(\"_\")[0] if \"_\" in shamanItems else shamanItems):\r\n return True\r\n else:\r\n return False\r\n\r\n def checkInPlayerShop(self, type, playerName, checkItem):\r\n self.Cursor.execute(\"select %s from Users where Username = %s\" %(type), [playerName])\r\n for rs in self.Cursor.fetchall():\r\n items = rs[type]\r\n if not items == \"\":\r\n for shopItem in items.split(\",\"):\r\n if checkItem == int(shopItem.split(\"_\")[0] if \"_\" in shopItem else shopItem):\r\n return True\r\n else:\r\n return False\r\n\r\n def getItemCustomization(self, checkItem, isShamanShop):\r\n items = self.client.shamanItems if isShamanShop else self.client.shopItems\r\n if not items == \"\":\r\n for shopItem in items.split(\",\"):\r\n itemSplited = shopItem.split(\"_\")\r\n custom = itemSplited[1] if len(itemSplited) >= 2 else \"\"\r\n if int(itemSplited[0]) == checkItem:\r\n return \"\" if custom == \"\" else (\"_\" + custom)\r\n else:\r\n return \"\"\r\n\r\n def getShamanItemCustom(self, code):\r\n item = self.client.shamanItems.split(\",\")\r\n if \"_\" in item:\r\n itemSplited = item.split(\"_\")\r\n custom = (itemSplited[1] if len(itemSplited) >= 2 else \"\").split(\"+\")\r\n if int(itemSplited[0]) == code:\r\n packet = ByteArray().writeByte(len(custom))\r\n x = 0\r\n while x < len(custom):\r\n packet.writeInt(int(custom[x], 16))\r\n x += 1\r\n return packet.toByteArray()\r\n return chr(0)\r\n\r\n def getShopItemPrice(self, fullItem):\r\n itemCat = (0 if fullItem / 10000 == 1 else fullItem / 10000) if fullItem > 9999 else fullItem / 100\r\n item = fullItem % 1000 if fullItem > 9999 else fullItem % 100 if fullItem > 999 else fullItem % (100 * itemCat) if fullItem > 99 else fullItem\r\n return self.getItemPromotion(itemCat, item, self.server.shopListCheck[str(itemCat) + \"|\" + str(item)][1])\r\n \r\n def getShamanShopItemPrice(self, fullItem):\r\n return self.server.shamanShopListCheck[str(fullItem)][1]\r\n\r\n def getItemPromotion(self, itemCat, item, price):\r\n for promotion in self.server.shopPromotions:\r\n if promotion[0] == itemCat and promotion[1] == item:\r\n return int(promotion[2] / 100.0 * price)\r\n return price\r\n\r\n def sendShopList(self):\r\n self.sendShopList(True)\r\n\r\n def sendShopList(self, sendItems=True):\r\n shopItems = [] if self.client.shopItems == \"\" else self.client.shopItems.split(\",\")\r\n\r\n packet = ByteArray().writeInt(self.client.shopCheeses).writeInt(self.client.shopFraises).writeUTF(self.client.playerLook).writeInt(len(shopItems))\r\n for item in shopItems:\r\n if \"_\" in item:\r\n itemSplited = item.split(\"_\")\r\n realItem = itemSplited[0]\r\n custom = itemSplited[1] if len(itemSplited) >= 2 else \"\"\r\n realCustom = [] if custom == \"\" else custom.split(\"+\")\r\n\r\n packet.writeByte(len(realCustom)+1).writeInt(int(realItem))\r\n\r\n x = 0\r\n while x < len(realCustom):\r\n packet.writeInt(int(realCustom[x], 16))\r\n x += 1\r\n else:\r\n packet.writeByte(0).writeInt(int(item))\r\n\r\n shop = self.server.shopList if sendItems else []\r\n packet.writeInt(len(shop))\r\n\r\n for item in shop:\r\n value = item.split(\",\")\r\n packet.writeShort(int(value[0])).writeShort(int(value[1])).writeByte(int(value[2])).writeByte(int(value[3])).writeByte(int(value[4])).writeInt(int(value[5])).writeInt(int(value[6])).writeShort(0)\r\n \r\n visuais = self.server.newVisuList\r\n packet.writeByte(len(visuais))\r\n i = len(visuais)\r\n for visual in visuais.items():\r\n packet.writeShort(visual[0])\r\n a = visual[1]\r\n packet.writeUTF(\"\".join(a))\r\n packet.writeByte(visual[2])\r\n i -= 1\r\n\r\n packet.writeShort(len(self.client.clothes))\r\n\r\n for clothe in self.client.clothes:\r\n clotheSplited = clothe.split(\"/\")\r\n packet.writeUTF(clotheSplited[1] + \";\" + clotheSplited[2] + \";\" + clotheSplited[3]) \r\n\r\n shamanItems = [] if self.client.shamanItems == \"\" else self.client.shamanItems.split(\",\")\r\n packet.writeShort(len(shamanItems))\r\n\r\n for item in shamanItems:\r\n if \"_\" in item:\r\n itemSplited = item.split(\"_\")\r\n realItem = itemSplited[0]\r\n custom = itemSplited[1] if len(itemSplited) >= 2 else \"\"\r\n realCustom = [] if custom == \"\" else custom.split(\"+\")\r\n\r\n packet.writeShort(int(realItem))\r\n\r\n packet.writeBoolean(item in self.client.shamanLook.split(\",\")).writeByte(len(realCustom)+1)\r\n\r\n x = 0\r\n while x < len(realCustom):\r\n packet.writeInt(int(realCustom[x], 16))\r\n x += 1\r\n else:\r\n packet.writeShort(int(item)).writeBoolean(item in self.client.shamanLook.split(\",\")).writeByte(0)\r\n\r\n shamanShop = self.server.shamanShopList if sendItems else []\r\n packet.writeShort(len(shamanShop))\r\n\r\n for item in shamanShop:\r\n value = item.split(\",\")\r\n packet.writeInt(int(value[0])).writeByte(int(value[1])).writeByte(int(value[2])).writeByte(int(value[3])).writeInt(int(value[4])).writeShort(int(value[5]))\r\n\r\n self.client.sendPacket(Identifiers.send.Shop_List, packet.toByteArray())\r\n \r\n def sendShamanItems(self):\r\n shamanItems = [] if self.client.shamanItems == \"\" else self.client.shamanItems.split(\",\")\r\n\r\n packet = ByteArray().writeShort(len(shamanItems))\r\n for item in shamanItems:\r\n if \"_\" in item:\r\n custom = item.split(\"_\")[1] if len(item.split(\"_\")) >= 2 else \"\"\r\n realCustom = [] if custom == \"\" else custom.split(\"+\")\r\n packet.writeShort(int(item.split(\"_\")[0])).writeBoolean(item in self.client.shamanLook.split(\",\")).writeByte(len(realCustom) + 1)\r\n x = 0\r\n while x < len(realCustom):\r\n packet.writeInt(int(realCustom[x], 16))\r\n x += 1\r\n else:\r\n packet.writeShort(int(item)).writeBoolean(item in self.client.shamanLook.split(\",\")).writeByte(0)\r\n self.client.sendPacket(Identifiers.send.Shaman_Items, packet.toByteArray())\r\n\r\n def sendLookChange(self):\r\n try:\r\n p = ByteArray()\r\n look = self.client.playerLook.split(\";\")\r\n p.writeByte(int(look[0]))\r\n\r\n for item in look[1].split(\",\"):\r\n if \"_\" in item:\r\n itemSplited = item.split(\"_\")\r\n realItem = itemSplited[0]\r\n custom = itemSplited[1] if len(itemSplited) >= 2 else \"\"\r\n realCustom = [] if custom == \"\" else custom.split(\"+\")\r\n p.writeInt(int(realItem)).writeByte(len(realCustom))\r\n x = 0\r\n while x < len(realCustom):\r\n p.writeInt(int(realCustom[x], 16))\r\n x += 1\r\n else:\r\n p.writeInt(int(item)).writeByte(0)\r\n\r\n p.writeInt(int(self.client.mouseColor, 16))\r\n self.client.sendPacket(Identifiers.send.Look_Change, p.toByteArray())\r\n except: pass\r\n\r\n def sendShamanLook(self):\r\n items = ByteArray()\r\n\r\n count = 0 \r\n for item in self.client.shamanLook.split(\",\"):\r\n realItem = int(item.split(\"_\")[0]) if \"_\" in item else int(item)\r\n if realItem != 0:\r\n items.writeShort(realItem)\r\n count += 1\r\n self.client.sendPacket(Identifiers.send.Shaman_Look, ByteArray().writeShort(count).writeBytes(items.toByteArray()).toByteArray())\r\n\r\n def sendItemBuy(self, fullItem):\r\n self.client.sendPacket(Identifiers.send.Item_Buy, ByteArray().writeInt(fullItem).writeByte(1).toByteArray())\r\n\r\n def sendUnlockedBadge(self, badge):\r\n self.client.room.sendAll(Identifiers.send.Unlocked_Badge, ByteArray().writeInt(self.client.playerCode).writeShort(badge).toByteArray())\r\n\r\n def sendselfResult(self, type, playerName):\r\n self.client.sendPacket(Identifiers.send.self_Result, ByteArray().writeByte(type).writeUTF(playerName).writeByte(0).writeShort(0).toByteArray())\r\n\r\n def equipClothe(self, packet):\r\n clotheID = packet.readByte()\r\n for clothe in self.client.clothes:\r\n values = clothe.split(\"/\")\r\n if values[0] == \"%02d\" %(clotheID):\r\n self.client.playerLook = values[1]\r\n self.client.mouseColor = values[2]\r\n self.client.shamanColor = values[3]\r\n break\r\n \r\n self.sendLookChange()\r\n self.sendShopList(False)\r\n\r\n def saveClothe(self, packet):\r\n clotheID = packet.readByte()\r\n for clothe in self.client.clothes:\r\n values = clothe.split(\"/\")\r\n if values[0] == \"%02d\" %(clotheID):\r\n values[1] = self.client.playerLook\r\n values[2] = self.client.mouseColor\r\n values[3] = self.client.shamanColor\r\n self.client.clothes[self.client.clothes.index(clothe)] = \"/\".join(values)\r\n break\r\n\r\n self.sendShopList(False)\r\n\r\n def sendShopInfo(self): \r\n self.client.sendPacket(Identifiers.send.Shop_Info, ByteArray().writeInt(self.client.shopCheeses).writeInt(self.client.shopFraises).toByteArray())\r\n\r\n def equipItem(self, packet):\r\n if self.client.isBlockAttack:\r\n fullItem = packet.readInt()\r\n itemStr = str(fullItem)\r\n itemCat = (0 if fullItem / 10000 == 1 else fullItem /10000) if len(itemStr) > 4 else fullItem / 100\r\n item = int(itemStr[2 if len(itemStr) > 3 else 1:]) if len(itemStr) >= 3 else fullItem\r\n itemStr = str(item)\r\n\r\n equip = str(item) + self.getItemCustomization(fullItem, False)\r\n\r\n lookList = self.client.playerLook.split(\";\")\r\n lookItems = lookList[1].split(\",\")\r\n lookCheckList = lookItems[:]\r\n idx = 0\r\n while idx < len(lookCheckList):\r\n lookCheckList[idx] = lookCheckList[idx].split(\"_\")[0] if \"_\" in lookCheckList[idx] else lookCheckList[idx]\r\n idx += 1\r\n\r\n if itemCat <= 10:\r\n if lookCheckList[itemCat] == itemStr:\r\n lookItems[itemCat] = \"0\"\r\n else:\r\n lookItems[itemCat] = str(equip)\r\n\r\n elif itemCat == 21:\r\n lookList[0] = \"1\"\r\n color = \"bd9067\" if item == 0 else \"593618\" if item == 1 else \"8c887f\" if item == 2 else \"dfd8ce\" if item == 3 else \"4e443a\" if item == 4 else \"e3c07e\" if item == 5 else \"272220\" if item == 6 else \"78583a\"\r\n self.client.mouseColor = \"78583a\" if self.client.mouseColor == color else color\r\n else:\r\n if lookList[0] == itemStr:\r\n lookList[0] = \"1\"\r\n else:\r\n lookList[0] = itemStr\r\n\r\n self.client.playerLook = lookList[0] + \";\" + \",\".join(map(str, lookItems))\r\n self.sendLookChange()\r\n self.client.isBlockAttack = False\r\n self.client.blockAttack()\r\n\t\t\r\n def buyItem(self, packet):\r\n fullItem, withFraises = packet.readInt(), packet.readBoolean()\r\n itemCat = ((fullItem - 10000) / 10000) if fullItem > 9999 else fullItem / 100\r\n item = fullItem % 1000 if fullItem > 9999 else fullItem % 100 if fullItem > 999 else fullItem % (100 * itemCat) if fullItem > 99 else fullItem\r\n self.client.shopItems += str(fullItem) if self.client.shopItems == \"\" else \",\" + str(fullItem)\r\n price = self.getItemPromotion(itemCat, item, self.server.shopListCheck[str(itemCat) + \"|\" + str(item)][1 if withFraises else 0])\r\n if withFraises:\r\n self.client.shopFraises -= price\r\n else:\r\n self.client.shopCheeses -= price\r\n\r\n self.sendItemBuy(fullItem)\r\n self.sendShopList(False)\r\n self.client.sendAnimZelda(0, fullItem)\r\n self.checkUnlockShopTitle()\r\n self.checkUnlockShopBadge(fullItem)\r\n\r\n def customItemBuy(self, packet):\r\n fullItem, withFraises = packet.readInt(), packet.readBoolean()\r\n\r\n items = self.client.shopItems.split(\",\")\r\n for shopItem in items:\r\n item = shopItem.split(\"_\")[0] if \"_\" in shopItem else shopItem\r\n if fullItem == int(item):\r\n items[items.index(shopItem)] = shopItem + \"_\"\r\n break\r\n\r\n self.client.shopItems = \",\".join(items)\r\n if withFraises:\r\n self.client.shopFraises -= 20\r\n else:\r\n self.client.shopCheeses -= 2000\r\n\r\n if len(self.client.custom) == 1:\r\n if not fullItem in self.client.custom:\r\n self.client.custom.append(fullItem)\r\n else:\r\n if not str(fullItem) in self.client.custom:\r\n self.client.custom.append(str(fullItem))\r\n \r\n self.sendShopList(False)\r\n\r\n def customItem(self, packet):\r\n fullItem, length = packet.readInt(), packet.readByte()\r\n custom = length\r\n customs = list()\r\n\r\n i = 0\r\n while i < length:\r\n customs.append(packet.readInt())\r\n i += 1\r\n\r\n items = self.client.shopItems.split(\",\")\r\n for shopItem in items:\r\n sItem = shopItem.split(\"_\")[0] if \"_\" in shopItem else shopItem\r\n if fullItem == int(sItem):\r\n newCustoms = map(lambda color: \"%06X\" %(0xffffff & color), customs)\r\n\r\n items[items.index(shopItem)] = sItem + \"_\" + \"+\".join(newCustoms)\r\n self.client.shopItems = \",\".join(items)\r\n\r\n itemCat = (0 if fullItem / 10000 == 1 else fullItem / 10000) if fullItem > 9999 else fullItem / 100\r\n item = fullItem % 1000 if fullItem > 9999 else fullItem % 100 if fullItem > 999 else fullItem % (100 * itemCat) if fullItem > 99 else fullItem\r\n equip = str(item) + self.getItemCustomization(fullItem, False)\r\n lookList = self.client.playerLook.split(\";\")\r\n lookItems = lookList[1].split(\",\")\r\n\r\n if \"_\" in lookItems[itemCat]:\r\n if lookItems[itemCat].split(\"_\")[0] == str(item):\r\n lookItems[itemCat] = equip\r\n \r\n elif lookItems[itemCat] == str(item):\r\n lookItems[itemCat] = equip\r\n self.client.playerLook = lookList[0] + \";\" + \",\".join(lookItems)\r\n self.sendShopList(False)\r\n self.sendLookChange()\r\n break\r\n\r\n def buyShamanItem(self, packet):\r\n fullItem, withFraises = packet.readShort(), packet.readBoolean()\r\n price = self.server.shamanShopListCheck[str(fullItem)][1 if withFraises else 0]\r\n self.client.shamanItems += str(fullItem) if self.client.shamanItems == \"\" else \",\" + str(fullItem)\r\n\r\n if withFraises:\r\n self.client.shopFraises -= price\r\n else:\r\n self.client.shopCheeses -= price\r\n\r\n self.sendShopList(False)\r\n self.client.sendAnimZelda(1, fullItem)\r\n\r\n def equipShamanItem(self, packet):\r\n fullItem = packet.readInt()\r\n item = str(fullItem) + self.getItemCustomization(fullItem, True)\r\n itemStr = str(fullItem)\r\n itemCat = int(itemStr[:len(itemStr)-2])\r\n index = itemCat if itemCat <= 4 else itemCat - 1 if itemCat <= 7 else 7 if itemCat == 10 else 8 if itemCat == 17 else 9\r\n index -= 1\r\n lookItems = self.client.shamanLook.split(\",\")\r\n\r\n if \"_\" in lookItems[index]:\r\n if lookItems[index].split(\"_\")[0] == itemStr:\r\n lookItems[index] = \"0\"\r\n else:\r\n lookItems[index] = item\r\n\r\n elif lookItems[index] == itemStr:\r\n lookItems[index] = \"0\"\r\n else:\r\n lookItems[index] = item\r\n\r\n self.client.shamanLook = \",\".join(lookItems)\r\n self.sendShamanLook()\r\n\r\n def customShamanItemBuy(self, packet):\r\n fullItem, withFraises = packet.readShort(), packet.readBoolean()\r\n\r\n items = self.client.shamanItems.split(\",\")\r\n for shopItem in items:\r\n item = shopItem.split(\"_\")[0] if \"_\" in shopItem else shopItem\r\n if fullItem == int(item):\r\n items[items.index(shopItem)] = shopItem + \"_\"\r\n break\r\n\r\n self.client.shamanItems = \",\".join(items)\r\n if withFraises:\r\n self.client.shopFraises -= 150\r\n else:\r\n self.client.shopCheeses -= 4000\r\n \r\n self.sendShopList(False)\r\n\r\n def customShamanItem(self, packet):\r\n fullItem, length = packet.readShort(), packet.readByte()\r\n customs = []\r\n i = 0\r\n while i < length:\r\n customs.append(packet.readInt())\r\n i += 1\r\n\r\n items = self.client.shamanItems.split(\",\")\r\n for shopItem in items:\r\n sItem = shopItem.split(\"_\")[0] if \"_\" in shopItem else shopItem\r\n if fullItem == int(sItem):\r\n newCustoms = map(lambda color: \"%06X\" %(0xFFFFFF & color), customs)\r\n\r\n items[items.index(shopItem)] = sItem + \"_\" + \"+\".join(newCustoms)\r\n self.client.shamanItems = \",\".join(items)\r\n\r\n item = str(fullItem) + self.getItemCustomization(fullItem, True)\r\n itemStr = str(fullItem)\r\n itemCat = int(itemStr[len(itemStr)-2:])\r\n index = itemCat if itemCat <= 4 else itemCat - 1 if itemCat <= 7 else 7 if itemCat == 10 else 8 if itemCat == 17 else 9\r\n index -= 1\r\n lookItems = self.client.shamanLook.split(\",\")\r\n\r\n if \"_\" in lookItems[index]:\r\n if lookItems[index].split(\"_\")[0] == itemStr:\r\n lookItems[index] = item\r\n \r\n elif lookItems[index] == itemStr:\r\n lookItems[index] = item\r\n\r\n self.client.shamanLook = \",\".join(lookItems)\r\n self.sendShopList()\r\n self.sendShamanLook()\r\n break\r\n\r\n def buyClothe(self, packet):\r\n clotheID, withFraises = packet.readByte(), packet.readBoolean()\r\n self.client.clothes.append(\"%02d/%s/%s/%s\" %(clotheID, \"1;0,0,0,0,0,0,0,0,0,0,0\", \"78583a\", \"fade55\" if self.client.shamanSaves >= 1000 else \"95d9d6\"))\r\n if withFraises:\r\n self.client.shopFraises -= 5 if clotheID == 0 else 50 if clotheID == 1 else 100\r\n else:\r\n self.client.shopFraises -= 40 if clotheID == 0 else 1000 if clotheID == 1 else 2000 if clotheID == 2 else 4000\r\n\r\n self.sendShopList(False)\r\n\r\n def sendself(self, packet):\r\n playerName, isShamanItem, fullItem, message = packet.readUTF(), packet.readBoolean(), packet.readShort(), packet.readUTF()\r\n if not self.server.checkExistingUser(playerName):\r\n self.sendselfResult(1, playerName)\r\n else:\r\n player = self.server.players.get(playerName)\r\n if player != None:\r\n if (player.Shop.checkInShamanShop(fullItem) if isShamanItem else player.Shop.checkInShop(fullItem)):\r\n self.sendselfResult(2, playerName)\r\n else:\r\n self.server.lastselfID += 1\r\n player.sendPacket(Identifiers.send.Shop_self, ByteArray().writeInt(self.server.lastselfID).writeUTF(self.client.playerName).writeUTF(self.client.playerLook).writeBoolean(isShamanItem).writeShort(fullItem).writeUTF(message).writeBoolean(False).toByteArray())\r\n self.sendselfResult(0, playerName)\r\n self.server.shopselfs[self.server.lastselfID] = [self.client.playerName, isShamanItem, fullItem]\r\n self.client.shopFraises -= self.getShamanShopItemPrice(fullItem) if isShamanItem else self.getShopItemPrice(fullItem)\r\n self.sendShopList()\r\n else:\r\n selfs = \"\"\r\n if (self.checkInPlayerShop(\"ShamanItems\" if isShamanItem else \"ShopItems\", playerName, fullItem)):\r\n self.sendselfResult(2, playerName)\r\n else:\r\n self.Cursor.execute(\"select selfs from Users where Username = %s\", [playerName])\r\n rs = self.Cursor.fetchone()\r\n selfs = rs[0]\r\n\r\n selfs += (\"\" if selfs == \"\" else \"/\") + binascii.hexlify(\"|\".join(map(str, [self.client.playerName, self.client.playerLook, isShamanItem, fullItem, message])))\r\n self.Cursor.execute(\"update Users set selfs = %s where Username = %s\", [selfs, playerName])\r\n self.sendselfResult(0, playerName)\r\n\r\n def selfResult(self, packet):\r\n selfID, isOpen, message, isMessage = packet.readInt(), packet.readBoolean(), packet.readUTF(), packet.readBoolean()\r\n if isOpen:\r\n values = self.server.shopselfs[int(selfID)]\r\n player = self.server.players.get(str(values[0]))\r\n if player != None:\r\n player.sendLangueMessage(\"$DonItemRecu\", self.client.playerName)\r\n\r\n isShamanItem = bool(values[1])\r\n fullItem = int(values[2])\r\n if isShamanItem:\r\n self.client.shamanItems += str(fullItem) if self.client.shamanItems == \"\" else \",%s\" %(fullItem)\r\n self.sendShopList(False)\r\n self.client.sendAnimZelda(1, fullItem)\r\n else:\r\n self.client.shopItems += str(fullItem) if self.client.shopItems == \"\" else \",%s\" %(fullItem)\r\n self.client.sendAnimZelda(0, fullItem)\r\n self.checkUnlockShopTitle()\r\n self.checkUnlockShopBadge(fullItem)\r\n\r\n elif not message == \"\":\r\n values = self.server.shopselfs[int(selfID)]\r\n player = self.server.players.get(str(values[0]))\r\n if player != None:\r\n player.sendPacket(Identifiers.send.Shop_self, ByteArray().writeInt(selfID).writeUTF(self.client.playerName).writeUTF(self.client.playerLook).writeBoolean(bool(values[1])).writeShort(int(values[2])).writeUTF(message).writeBoolean(True).toByteArray())\r\n else:\r\n messages = \"\"\r\n self.Cursor.execute(\"select Messages from Users where Username = %s\", [str(values[0])])\r\n rs = self.Cursor.fetchone()\r\n messages = rs[0]\r\n\r\n messages += (\"\" if messages == \"\" else \"/\") + binascii.hexlify(\"|\".join(map(str, [self.client.playerName, self.client.playerLook, values[1], values[2], message])))\r\n self.Cursor.execute(\"update Users set Messages = %s where Username = %s\", [messages, str(values[0])])\r\n\r\n def checkselfsAndMessages(self, lastReceivedselfs, lastReceivedMessages):\r\n needUpdate = False\r\n selfs = lastReceivedselfs.split(\"/\")\r\n for self in selfs:\r\n if not self == \"\":\r\n values = binascii.unhexlify(self).split(\"|\", 4)\r\n self.server.lastselfID += 1\r\n self.client.sendPacket(Identifiers.send.Shop_self, ByteArray().writeInt(self.server.lastselfID).writeUTF(values[0]).writeUTF(values[1]).writeBoolean(bool(values[2])).writeShort(int(values[3])).writeUTF(values[4] if len(values) > 4 else \"\").writeBoolean(False).toByteArray())\r\n self.server.shopselfs[self.server.lastselfID] = [values[0], bool(values[2]), int(values[3])]\r\n needUpdate = True\r\n\r\n messages = lastReceivedMessages.split(\"/\")\r\n for message in messages:\r\n if not message == \"\":\r\n values = binascii.unhexlify(message).split(\"|\", 4)\r\n self.client.sendPacket(Identifiers.send.Shop_self_Message, ByteArray().writeShort(0).writeShort(0).writeUTF(values[0]).writeBoolean(bool(values[1])).writeShort(int(values[2])).writeUTF(values[4]).writeUTF(values[3]).writeBoolean(True).toByteArray())\r\n needUpdate = True\r\n\r\n if needUpdate:\r\n self.Cursor.execute(\"update Users set selfs = '', Messages = '' where Username = %s\", [self.client.playerName])\r\n","sub_path":"SlyShop.py","file_name":"SlyShop.py","file_ext":"py","file_size_in_byte":27716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"551439678","text":" # -*- coding: utf-8 -*-\n\n\nimport numpy as np\n\nclass Fonctions:\n \n ''' Permet de résoudre le problème des N Reines. Les différents algorithmes héritent de cette classes'''\n\n \n def test( permutation, n ):\n ''' vérifie que la permutation est solution\n - permutation : permutation à vérifier\n - n : taille de l'échiquier'''\n for i in range( n - 1 ):\n for j in range( i + 1, n ):\n if j != i and abs( j - i ) == abs( abs( permutation[j] )-abs( permutation[i] ) ):\n return False\n return True\n \n \n \n def test2( permutation, n, i ): \n ''' teste les conflits d'une permutation sur la i-ème ligne \n - permutation : permutation à vérifier\n - n : taille de l'échiquier\n - i : ligne à vérifier '''\n for j in range( n ):\n if j != i and ( permutation[j] == permutation[i] or abs( j - i ) == abs( abs( permutation[j] )-abs( permutation[i] ) ) ):\n return False\n return True\n \n \n def test3( matriceDeCollision, i, n):\n '''teste le nombre de placements viables sur la i-ème ligne\n - matriceDeCollision : matrice indiquant les cases menacées où occupées par une dame\n - i : ligne où compter le nombre de placements viables\n - n : taille de l'échiquier'''\n compteur = 0\n for k in range(n):\n if ( matriceDeCollision[i][k] == 1 ) :\n compteur = compteur + 1\n return compteur\n \n def test4( permutation, L, i):\n ''' teste les conflits d'une permutation sur la i-ème ligne\n - permutation : permutation à vérifier\n - L : liste des indices dans l'ordre de remplissage\n - i : nombre de lignes remplies'''\n for k in range(i):\n if int(L[k]) != int(L[i]) and ( permutation[ int(L[k]) ] == permutation[ int(L[i]) ] or abs( int(L[k]) - int(L[i]) ) == abs( abs( permutation[ int(L[k]) ] )-abs( permutation[ int(L[i]) ] ) ) ): #int car probleme de float \n return False\n return True\n \n def miseAJourCollisions( i, j, matriceCollision, n ):\n ''' met à jour la matrice de collision M selon L sachant que une dame en (i,j) a été ont été ajoutée\n - listePermutation : permutation actuelle\n - i,j coordonnées de la nouvelle dame\n - matriceCollision : matrice de transposition\n - n : taille de l'échiquier'''\n for k in range(n):\n matriceCollision[k][j] = 1\n matriceCollision[i][k] = 1\n if ( j - i + k >= 0 ) and (j - i + k < n):\n matriceCollision[k][j-i+k] = 1\n if ( j + i - k >= 0 ) and (j + i - k < n):\n matriceCollision[k][j+i-k] = 1\n \n \n def nextPermutationSJT(permutation, n):\n ''' pour une permutation donnée, calcule la permutation suivante selon l'algorithme de Steinhaus–Johnson–Trotter.\n Il faut que la direction soit précisée (nombre positif: direction vers la gauche, nombre négatif: direction vers la droite) \n - permutation : permutation actuelle\n - n : taille de l'échiquier '''\n #cherche le plus grand élément, \n for j in range(n-1, -1, -1): \n for i in range(n):\n if permutation[i] == j or permutation[i] == -j:\n p = i\n break\n #vérifie la possibilité d'un décalage à gauche\n if permutation[p] > 0 and p > 0 and abs( permutation[p - 1] ) < abs( permutation[p] ):\n ( permutation[p], permutation[p - 1] ) = ( permutation[p - 1], permutation[p] )\n return True\n #vérifie la possibilité d'un décalage à droite \n if permutation[p] < 0 and p < n - 1 and abs( permutation[p + 1] ) < abs( permutation[p] ):\n ( permutation[p], permutation[p + 1] ) = ( permutation[p + 1], permutation[p] )\n return True\n #si le décalage n'est pas possible, change la direction\n permutation[p] = -permutation[p]\n return False\n \n test = staticmethod(test)\n test2 = staticmethod(test2)\n test3 = staticmethod(test3)\n test4 = staticmethod(test4)\n miseAJourCollisions = staticmethod(miseAJourCollisions)\n nextPermutationSJT = staticmethod(nextPermutationSJT)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n \n\n \n \n \n\n \n \n \n\n \n \n\n \n \n \n\n \n \n\n \n\n \n","sub_path":"Fonctions.py","file_name":"Fonctions.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"96513843","text":"\n\nfrom xai.brain.wordbase.nouns._henna import _HENNA\n\n#calss header\nclass _HENNAS(_HENNA, ):\n\tdef __init__(self,): \n\t\t_HENNA.__init__(self)\n\t\tself.name = \"HENNAS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"henna\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_hennas.py","file_name":"_hennas.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"332471464","text":"# Count Words in a String\r\ndef count_words():\r\n try:\r\n total = 0\r\n FILE = input(\"Enter the name of your text file\\n>: \")\r\n input_file = open(FILE, 'r')\r\n for word in input_file.read().split():\r\n total += 1\r\n print(\"Total words in file is\", total)\r\n\r\n except FileNotFoundError:\r\n print(\"\\nNo such file exists\")\r\n sentence = input(\"Enter a sentence instead\\n>: \")\r\n words = 1\r\n for word in sentence:\r\n if word == ' ':\r\n words += 1\r\n print(\"Your sentence contains\", words, \"words\")\r\n\r\nif __name__ == \"__main__\":\r\n count_words()\r\n","sub_path":"count_string.py","file_name":"count_string.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"353357150","text":"#!/usr/bin/python\n\nfrom PyQt5.QtWidgets import QWidget, QCalendarWidget, QLabel, QApplication, QHBoxLayout, QMessageBox, QListWidget, \\\n QVBoxLayout, QTableWidget, QAbstractItemView, QDesktopWidget, QTableWidgetItem, \\\n QDialog, QDialogButtonBox, QComboBox, QHeaderView\nfrom PyQt5.QtCore import QDate, Qt, QObject, pyqtSignal, QMetaMethod\nfrom PyQt5.QtGui import QBrush, QColor\n\nimport sys\nimport requests\n\nfrom datetime import timedelta, datetime\n\nfrom convenience.server_apis import make_http_request, user_is_admin\nfrom convenience.datetime_management import datetime_to_UTC, datetime_UTC_to_local\nfrom entities.user import User\nfrom entities.user_category import UserCategory\nfrom entities.parking_lot import ParkingLot\nfrom entities.parking_slot import ParkingSlot\nfrom entities.vehicle_type import VehicleType\nfrom entities.booking import Booking\nfrom entities.hourly_rate import HourlyRate\nfrom entities.vehicle import Vehicle\n\n\nclass Table_signal(QObject):\n message = pyqtSignal(int,str, str) # parking_slot_number, start_time, end_time\n\n\nclass Bookings_table(QTableWidget):\n def __init__(self, rows, columns, parent, signal):\n super().__init__(rows, columns, parent)\n self.signal = signal\n self.init_UI()\n\n def init_UI(self):\n self.setEditTriggers(QAbstractItemView.NoEditTriggers)\t\t\t# no editable\n self.horizontalHeader().setDefaultSectionSize(65) # column width\n self.horizontalHeader().setDefaultAlignment(Qt.AlignLeft)\n\n # free_parking slots are white\n self.cells_colors = {\n \"user_bookings\" : \"#FFA200\",\n \"other_bookings\" : \"#A8ADAF\",\n \"bookings_separation\": \"#D2D5D7\",\n \"disability\": \"#138DF3\"\n }\n\n # table column names\n self.quarters_of_hour = [str(timedelta(minutes=15)*i) for i in range(97)] # index 96 = 1 day, 0:00:00 (24:00:00)\n for i, q in enumerate(self.quarters_of_hour):\n \tq_split = q.split(\":\")\n \titem = str(\"0\" + q_split[0] if len(q_split[0]) == 1 else q_split[0]) + \":\" + str(q_split[1]) # e.g. 1:00:00 -> 01:00\n \tself.quarters_of_hour[i] = item\n\n self.setHorizontalHeaderLabels(self.quarters_of_hour)\n\n # move scrollbar slider to actual time slot\n now_time = datetime.now().time()\n hour = int(now_time.strftime(\"%H\"))\n minutes = int(now_time.strftime(\"%M\"))\n quarter_of_hour_idx = hour * 4 + minutes//15\n self.horizontalScrollBar().setValue(quarter_of_hour_idx)\n\n self.show()\n\n\n def mouseReleaseEvent(self, event):\n super().mouseReleaseEvent(event) # base behaviour on mouse release (e.g. clear selection)\n\n if event.button() == Qt.LeftButton: # Release event only if done with left button\n indexSelection = [(item.row() , item.column()) for item in self.selectedIndexes()]\n\n if len(indexSelection) != 0: # at least one cell selected\n # check on selection validity, only with multiple cells selected\n error = False\n for i in range(1, len(indexSelection)):\n # if selection on different rows or non contiguous cells\n if (indexSelection[i][0] != indexSelection[i-1][0]) | indexSelection[i][1] not in [indexSelection[i-1][1] + 1, indexSelection[i-1][1] - 1]:\n error = True\n if error == True:\n QMessageBox.critical(self, \"Error\", \"Not a valid selection!\")\n return\n\n # if no errors occured, proceed, take the first and last cell in selection\n parking_slot_number = indexSelection[0][0] + 1 # parking slot numbers start from one\n booking_start_time = self.quarters_of_hour[indexSelection[0][1]] # format xx:xx\n booking_end_time = self.quarters_of_hour[indexSelection[-1][1] + 1]\n self.signal.message.emit(parking_slot_number, booking_start_time, booking_end_time)\n\n self.clearSelection()\n\n\n def item_paint(self, row_index, col_index, color, deactivate = True):\n self.setItem(row_index, col_index, QTableWidgetItem(\"\"))\n cell = self.item(row_index, col_index)\n cell.setBackground(QBrush(QColor(color)))\n\n # colors handling\n if color == self.cells_colors[\"other_bookings\"]:\n cell.setToolTip(\"Other user booking.\")\n elif color == self.cells_colors[\"user_bookings\"]:\n cell.setToolTip(\"Your booking.\")\n elif color == self.cells_colors[\"bookings_separation\"]:\n cell.setToolTip(\"Time interval between one booking and another\")\n elif color == self.cells_colors[\"disability\"]:\n cell.setToolTip(\"Parking slot reserved for disability.\")\n\n if deactivate == True: # always true unless when disable user\n cell.setFlags(Qt.NoItemFlags) # cell no selectable, editable, ecc.\n\n\nclass BookingDialog(QDialog):\n def __init__(self, start, end, duration, parking_slot, user_vehicles, slot_vehicle_type_id, amount, parent=None):\n super().__init__(parent=parent)\n\n self.setWindowTitle(\"Booking\")\n\n QBtn = QDialogButtonBox.Ok | QDialogButtonBox.Cancel\n self.buttonBox = QDialogButtonBox(QBtn)\n self.buttonBox.accepted.connect(self.accept) # QDialog slot\n self.buttonBox.rejected.connect(self.reject)\n\n self.layout = QVBoxLayout()\n self.layout.addWidget(QLabel(\"Booking info: \"), 0, Qt.AlignCenter)\n self.layout.addWidget(QLabel(\"Start: \" + start))\n self.layout.addWidget(QLabel(\"End: \" + end))\n self.layout.addWidget(QLabel(\"Duration: \" + str(duration)))\n self.layout.addWidget(QLabel(\"Parking slot: \" + str(parking_slot)))\n self.layout.addWidget(QLabel(\"Vehicle: \"))\n # filter on user vehicles\n self.user_vehicles_cmb = QComboBox()\n self.user_vehicles_cmb.addItems([f\"{vehicle.get_license_plate()} - {vehicle.get_brand()} - {vehicle.get_model()}\" for vehicle in user_vehicles if vehicle.get_id_vehicle_type() == slot_vehicle_type_id])\n\n if self.user_vehicles_cmb.count() == 0:\n self.buttonBox = QDialogButtonBox(QDialogButtonBox.Cancel)\n self.buttonBox.setCenterButtons(True)\n self.buttonBox.rejected.connect(self.reject)\n\n self.layout.addWidget(self.user_vehicles_cmb)\n self.layout.addWidget(QLabel(\"Amount: \" + amount + \"\"))\n self.layout.addSpacing(20)\n self.layout.addWidget(self.buttonBox)\n\n self.setLayout(self.layout)\n\n\n def selected_vehicle(self):\n return self.user_vehicles_cmb.currentText()\n\n\nclass Park(QWidget):\n def __init__(self, https_session, user):\n super().__init__()\n self.https_session = https_session\n self.user = user\n Park.initUI(self)\n\n def initUI(self):\n self.table_signal = Table_signal()\n self.table_signal.message.connect(self.make_booking)\n\n vbox_main = QVBoxLayout(self)\n\n table_hbox = QHBoxLayout()\n self.tableWidget = Bookings_table(0, 96, self, self.table_signal) # 0 is the parking lot default number of slots\n table_hbox.addWidget(self.tableWidget, 10)\n\n self.vehicle_types_table = QTableWidget()\n self.vehicle_types_table.setEditTriggers(QAbstractItemView.NoEditTriggers)\t\t\t# no editable\n self.vehicle_types_table.setColumnCount(1)\n\n self.vehicle_types_table.setHorizontalHeaderLabels([\"Reserved: \"])\n self.vehicle_types_table.horizontalHeader().setStretchLastSection(True);\n self.vehicle_types_table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch);\n\n self.vehicle_types_table.verticalHeader().hide() # possible modification with another column which items are parking slot numbers\n self.vehicle_types_table.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOn)\n table_hbox.addWidget(self.vehicle_types_table, 1)\n\n # synchronize scrolling between slots table and vehicle types table\n self.tableWidget.verticalScrollBar().valueChanged.connect(self.vehicle_types_table.verticalScrollBar().setValue);\n self.vehicle_types_table.verticalScrollBar().valueChanged.connect(self.tableWidget.verticalScrollBar().setValue);\n\n self.pl_vbox = QVBoxLayout()\n self.pl_vbox.setSpacing(0)\n name_lbl = QLabel(\"Parking lots: \")\n name_lbl.setStyleSheet(\"font-size: 14px;\")\n self.pl_vbox.addWidget(name_lbl, 1, Qt.AlignBottom)\n self.parking_lots_list = QListWidget()\n self.parking_lots_list.setStyleSheet(\"font: 11pt Arial;\")\n self.parking_lots_list.setMinimumHeight(150)\n self.parking_lots_list.setMinimumWidth(250)\n self.parking_lots_list.currentItemChanged.connect(lambda : self.get_bookings(self.parking_lots[self.parking_lots_list.currentRow()], True))\n self.pl_vbox.addWidget(self.parking_lots_list, 2, Qt.AlignTop)\n\n bottom_hbox = QHBoxLayout()\n bottom_hbox.addStretch(1)\n bottom_hbox.addLayout(self.pl_vbox, 1)\n bottom_hbox.addStretch(1)\n\n self.cal = QCalendarWidget(self)\n self.cal.setGridVisible(True)\n #self.cal.setMinimumDate(QDate.currentDate())\t\t# doesn't show previous days\n\n bottom_hbox.addWidget(self.cal, 1)\n bottom_hbox.addStretch(1)\n\n text = QLabel(\"Park\")\n text.setStyleSheet(\"font-family: Ubuntu; font-size: 30px;\")\n vbox_main.addWidget(text, 2, Qt.AlignTop | Qt.AlignHCenter)\n\n vbox_main.addLayout(table_hbox, 10)\n vbox_main.addStretch(2)\n vbox_main.addLayout(bottom_hbox, 6)\n vbox_main.addStretch(2)\n self.setLayout(vbox_main)\n\n self.setWindowTitle(\"Park\")\n self.show()\n\n\n # get info on vehicle type of parking_slots\n def get_vehicle_types(self):\n self.vehicle_types = []\n response = make_http_request(self.https_session, \"get\", \"vehicle_types\")\n if response.json():\n self.vehicle_types = [VehicleType(**vehicle_type) for vehicle_type in response.json()]\n\n\n # get info on parking lots\n def get_parking_lots(self):\n self.parking_lots_list.clear()\n \n response = make_http_request(self.https_session, \"get\", \"parking_lots\")\n\n if response.json(): # if json is not null\n self.parking_lots = [ParkingLot(**parking_lots) for parking_lots in response.json()]\n else:\n self.parking_lots = []\n return\n\n if self.parking_lots:\n\n for parking_lot in self.parking_lots:\n\n self.parking_lots_list.addItem(f\"{parking_lot.get_name()} - {parking_lot.get_street().capitalize()}\")\n # get info on parking slots of parking lot\n response = make_http_request(self.https_session, \"get\", \"parking_lots/\" + str(parking_lot.get_id()) + \"/parking_slots\")\n if response.json():\n parking_lot.set_parking_slots([ParkingSlot(**parking_slot) for parking_slot in response.json()])\n\n self.parking_lots_list.setCurrentRow(0) # first item selected (default)\n self.parking_lot = self.parking_lots[0] # self.parking_lot = actually selected parking lot (default)\n\n\n def disability_check(self):\n disabled_user = self.user.get_disability()\n slots_row_indexes_disability = [slot.get_number() - 1 for slot in self.parking_lot.get_parking_slots() if slot.get_reserved_disability() == True]\n for row_index in slots_row_indexes_disability:\n for col_index in range(96):\n self.tableWidget.item_paint(row_index, col_index, self.tableWidget.cells_colors[\"disability\"], not disabled_user) # only disabled user can make bookings on reserved slots\n\n\n def get_bookings(self, parking_lot, selected_park_changed):\n # GET bookings?since=xxxx-xx-xx&until=xxxx-xx-xx&id_user=x&id_parking_lot=x\n self.parking_lot = parking_lot\n # clear previously selected info (also items background color). Removes all items not in the headers.\n self.tableWidget.clearContents()\n # check on slots reserved for disability and set color\n self.disability_check()\n\n # selected_park_changed=True when there is a change in selected parking lot. Info on parking slots needs to be updated only in this case. Otherwise in case of date change is equal to False.\n if selected_park_changed:\n self.tableWidget.setRowCount(self.parking_lot.get_num_parking_slots())\n # shows the associated vehicle type for each parking slot\n self.vehicle_types_table.clearContents()\n self.vehicle_types_table.setRowCount(self.tableWidget.rowCount()) # each parking slot has associated exactly one vehicle type\n\n for i in range(self.vehicle_types_table.rowCount()):\n try:\n vehicle_type_index = self.vehicle_types.index(VehicleType(id=self.parking_lot.get_parking_slots()[i].get_id_vehicle_type()))\n except ValueError:\n return\n else:\n vehicle_type_name = self.vehicle_types[vehicle_type_index].get_name()\n self.vehicle_types_table.setItem(i, 0, QTableWidgetItem(vehicle_type_name))\n\n day = self.cal.selectedDate()\n day_before = day.addDays(-1).toString(Qt.ISODate)\n day = day.toString(Qt.ISODate)\n\n # start: from local 00:00:00 to utc (e.g. local 2021-04-01 00:00:00 = UTC 2021-03-31 22:00)\n # end: from local 23:59:00 to utc\n # bookings_start_datetime could be different from bookings_end_datetime (max 1 day)\n bookings_start_datetime = datetime_to_UTC(day_before, \"23:45\", True) # is_query = True\n bookings_end_datetime = datetime_to_UTC(day, \"23:59\", True) # 23:59 for include bookings of time xx:45, 23:45 is enough\n\n response = make_http_request(self.https_session, \"get\", \"bookings\", params = {\"since\":bookings_start_datetime, \"until\":bookings_end_datetime, \"id_parking_lot\":self.parking_lot.get_id()})\n if response.json():\n bookings = [Booking(**booking) for booking in response.json()]\n\n for booking in bookings:\n # server bookings are in UTC\n booking_start = datetime_UTC_to_local(booking.get_datetime_start())\n booking_start = booking_start if booking_start.split()[0] == day else day + \" 00:00:00\" # if booking start day in localtime is different from booking end day in localtime (booking on multiple days) select all table cells.\n\n booking_end = datetime_UTC_to_local(booking.get_datetime_end())\n # Set end time to 24:00:00 for bookings which end day is temporally subsequent to the selected day.\n booking_end = booking_end if booking_end.split()[0] == day else day + \" 24:00:00\"\n\n # show booking in table\n booking_start_time_split = booking_start.split()[1].split(\":\") # list with booking time info\n booking_end_time_split = booking_end.split()[1].split(\":\")\n\n # find table row index\n try:\n slot_index = self.parking_lot.get_parking_slots().index(ParkingSlot(id = booking.get_id_parking_slot()))\n except ValueError:\n return\n #continue\n else:\n table_row_index = self.parking_lot.get_parking_slots()[slot_index].get_number() - 1\n\n # find table column indexes\n start_table_col_index = int(booking_start_time_split[0])*4 + int(booking_start_time_split[1])//15 # table column index = hour*4 + minutes/15\n end_table_col_index = int(booking_end_time_split[0])*4 + int(booking_end_time_split[1])//15\n\n # show booking in table, different color for booking owner user\n color = self.tableWidget.cells_colors[\"user_bookings\"] if booking.get_id_user() == self.user.get_id() else self.tableWidget.cells_colors[\"other_bookings\"]\n for i in range(start_table_col_index, end_table_col_index):\n self.tableWidget.item_paint(table_row_index, i, color)\n\n # deny bookings 15 minutes before and after existing bookings\n deactivate = not (booking.get_id_user() == self.user.get_id())\n if (start_table_col_index > 0):\n if self.tableWidget.item(table_row_index, start_table_col_index-1) == None:\n self.tableWidget.item_paint(table_row_index, start_table_col_index-1, self.tableWidget.cells_colors[\"bookings_separation\"], deactivate)\n if (end_table_col_index < 96):\n if self.tableWidget.item(table_row_index, end_table_col_index) == None:\n self.tableWidget.item_paint(table_row_index, end_table_col_index, self.tableWidget.cells_colors[\"bookings_separation\"], deactivate)\n\n\n def get_booking_amount(self, utc_start_datetime_string, utc_end_datetime_string, slot_vehicle_type_id):\n # amount = hourly_rate * rate_percentage * time_interval\n # 1. hourly rate\n user_category_id = self.user.get_id_user_category()\n # obtain user_categories\n response = make_http_request(self.https_session, \"get\", \"user_categories\")\n if response.json():\n user_categories = [UserCategory(**user_category) for user_category in response.json()]\n\n try:\n category_index = user_categories.index(UserCategory(id = user_category_id))\n except ValueError:\n return\n else:\n hourly_rate_id = user_categories[category_index].get_id_hourly_rate()\n if hourly_rate_id == None:\n QMessageBox.alert(self, \"Alert\", \"There isn't yet an hourly_rate associated with this category.\")\n return\n else:\n return\n\n # obtain hourly rates and filter\n response = make_http_request(self.https_session, \"get\", \"hourly_rates\")\n if response.json():\n hourly_rates = [HourlyRate(**hourly_rate) for hourly_rate in response.json()]\n\n try:\n hourly_rate_index = hourly_rates.index(HourlyRate(id = hourly_rate_id))\n except ValueError:\n return\n else:\n hourly_rate_amount = float(hourly_rates[hourly_rate_index].get_amount())\n else:\n return\n\n # 2. rate_percentage\n try:\n vehicle_type_index = self.vehicle_types.index(VehicleType(id = slot_vehicle_type_id))\n except ValueError:\n return\n else:\n rate_percentage = float(self.vehicle_types[vehicle_type_index].get_rate_percentage())\n\n # 3. time_inverval = utc_end_datetime_string - utc_start_datetime_string\n time_interval_seconds = (datetime.strptime(f\"{utc_end_datetime_string}\", \"%Y-%m-%d %H:%M:%S\") - datetime.strptime(f\"{utc_start_datetime_string}\", \"%Y-%m-%d %H:%M:%S\")).total_seconds()\n\n booking_amount = \"{:.2f}\".format((hourly_rate_amount/3600) * rate_percentage * time_interval_seconds)\n return booking_amount\n\n\n def make_booking(self, parking_slot_number, start_time, end_time):\n if user_is_admin(self.user, self.https_session):\n QMessageBox.information(self, \"uPark tip\", \"Admin can't make bookings!\")\n return\n\n if not self.parking_lots:\n QMessageBox.information(self, \"uPark tip\", \"There aren't parking lots for your category!\")\n return\n\n day = str(self.cal.selectedDate().toString(Qt.ISODate))\n\n # booking start and end in UTC\n utc_start_datetime_string = datetime_to_UTC(day, start_time)\n utc_end_datetime_string = datetime_to_UTC(day, end_time)\n\n # booking duration, work with timezone and daylight saving time\n booking_duration = (datetime.strptime(f\"{utc_end_datetime_string}\", \"%Y-%m-%d %H:%M:%S\") - datetime.strptime(f\"{utc_start_datetime_string}\", \"%Y-%m-%d %H:%M:%S\"))\n\n response = make_http_request(self.https_session, \"get\", \"users/\" + str(self.user.get_id()) + \"/vehicles\")\n if response.json():\n user_vehicles = [Vehicle(**user_vehicle) for user_vehicle in response.json()]\n else:\n user_vehicles = []\n\n # obtain slot vehicle type name\n slot_vehicle_type = self.vehicle_types_table.item(self.tableWidget.currentRow(), 0).text()\n\n try:\n vehicle_type_index = self.vehicle_types.index(VehicleType(name = slot_vehicle_type))\n except ValueError:\n return\n else:\n slot_vehicle_type_id = self.vehicle_types[vehicle_type_index].get_id()\n\n # it's necessary convert back from UTC to local time zone to handle daylight saving time\n booking_amount = self.get_booking_amount(utc_start_datetime_string, utc_end_datetime_string, slot_vehicle_type_id)\n\n datetime_start = datetime_UTC_to_local(utc_start_datetime_string)\n datetime_end = datetime_UTC_to_local(utc_end_datetime_string)\n\n booking_dialog = BookingDialog(datetime_start, datetime_end, booking_duration, parking_slot_number, user_vehicles, slot_vehicle_type_id, booking_amount, self)\n # if clicked OK button\n if booking_dialog.exec_():\n vehicle = booking_dialog.selected_vehicle()\n if not vehicle:\n return\n try:\n vehicle_index = user_vehicles.index(Vehicle(license_plate = vehicle.split()[0]))\n slot_index = self.parking_lot.get_parking_slots().index(ParkingSlot(number = parking_slot_number))\n except ValueError:\n return\n else:\n vehicle_id = user_vehicles[vehicle_index].get_id()\n parking_slot_id = int(self.parking_lot.get_parking_slots()[slot_index].get_id())\n\n booking = {\n \"datetime_start\": utc_start_datetime_string,\n \"datetime_end\": utc_end_datetime_string,\n \"id_vehicle\": vehicle_id,\n \"id_parking_slot\": parking_slot_id\n }\n\n response = make_http_request(self.https_session, \"post\", \"users/\" + str(self.user.get_id()) + \"/bookings\", json = booking)\n if response:\n json_response = response.json()\n QMessageBox.information(self, \"Booking response\", json_response[\"message\"] + \"\\nPrice: \" + str(json_response[\"amount\"]))\n self.get_bookings(self.parking_lot, False)\n\n\n def showEvent(self, event):\n self.get_vehicle_types()\n self.get_parking_lots()\n if self.parking_lots_list:\n self.cal.selectionChanged.connect(lambda : self.get_bookings(self.parking_lots[self.parking_lots_list.currentRow()], False))\n self.get_bookings(self.parking_lots[self.parking_lots_list.currentRow()], False)\n else:\n try:\n self.cal.disconnect()\n except:\n pass\n","sub_path":"uPark_client/ui_widgets/common/park.py","file_name":"park.py","file_ext":"py","file_size_in_byte":23572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"395114284","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\n FaceLibs is a library with some methods are used for\r\n Face detection and Face recognition\r\n (Created by: Nguyen Bao Trung)\r\n\"\"\"\r\n\r\nfrom threading import Thread\r\nimport numpy as np\r\nimport cv2\r\nimport os\r\n\r\n# These below are Global variables,\r\n# Please don't change (except Path to directory of them change):\r\nFACE_CASCADEPATH = cv2.CascadeClassifier(\r\n \"cascade/haarcascade_frontalface_default.xml\")\r\nEYE_CASCADEPATH = cv2.CascadeClassifier(\r\n \"cascade/haarcascade_eye.xml\")\r\nNAME_LIST = \"Names.txt\"\r\n# Changing if it is neccessary\r\nFONT = cv2.FONT_HERSHEY_SIMPLEX\r\n\r\n\r\nclass WebcamVideoStream:\r\n \"\"\"\r\n Class is taking from imutils library of Adrian Rosebrock (PyImageSearch)\r\n \"\"\"\r\n def __init__(self, src=0):\r\n # initialize the video camera stream and read the first frame\r\n # from the stream\r\n self.stream = cv2.VideoCapture(src)\r\n (self.grabbed, self.frame) = self.stream.read()\r\n\r\n # initialize the variable used to indicate if the thread should\r\n # be stopped\r\n self.stopped = False\r\n\r\n def start(self):\r\n # start the thread to read frames from the video stream\r\n t = Thread(target=self.update, args=())\r\n t.daemon = True\r\n t.start()\r\n return self\r\n\r\n def update(self):\r\n # keep looping infinitely until the thread is stopped\r\n while True:\r\n # if the thread indicator variable is set, stop the thread\r\n if self.stopped:\r\n return\r\n\r\n # otherwise, read the next frame from the stream\r\n (self.grabbed, self.frame) = self.stream.read()\r\n\r\n def read(self):\r\n # return the frame most recently read\r\n return self.frame\r\n\r\n def stop(self):\r\n # indicate that the thread should be stopped\r\n self.stopped = True\r\n self.stream.release()\r\n\r\n\r\nclass FaceDetector:\r\n \"\"\"\r\n FaceDetector collects some methods are used for\r\n Face detection and Face recognition:\r\n 1,faceDetect\r\n 2,eyeDetect\r\n \"\"\"\r\n def __init__(self):\r\n \"\"\" Getting paths of HaarCascade, use for face, eye detection \"\"\"\r\n self.faceCascadePath = FACE_CASCADEPATH\r\n self.eyeCascadePath = EYE_CASCADEPATH\r\n\r\n def faceDetect(self, image, scaleFactor=1.3,\r\n minNeighbors=5, minSize=(30, 30)):\r\n \"\"\"\r\n Method is used to detect face\r\n 1,image: An image Matrix with type=unit8\r\n 2,scaleFactor: How much image size is reduced at each image scale\r\n 3,minNeighbors: How many neighbors each candidate\r\n rectangle should have. To retain it\r\n 4,minSize: Objects smaller than that are ignored\r\n \"\"\"\r\n self.faceRect = self.faceCascadePath.detectMultiScale(\r\n image,\r\n scaleFactor=scaleFactor,\r\n minNeighbors=minNeighbors,\r\n minSize=minSize,\r\n flags=cv2.CASCADE_SCALE_IMAGE)\r\n return self.faceRect\r\n\r\n def eyeDetect(self, roiImage, scaleFactor=1.05,\r\n minNeighbors=4, minSize=(3, 3)):\r\n \"\"\"\r\n Method is used to detect eyes\r\n 1,image: An image Matrix with type=unit8\r\n 2,scaleFactor: How much image size is reduced at each image scale\r\n 3,minNeighbors: How many neighbors each candidate\r\n rectangle should have. To retain it\r\n 4,minSize: Objects smaller than that are ignored\r\n \"\"\"\r\n eyeRect = self.eyeCascadePath.detectMultiScale(\r\n roiImage,\r\n scaleFactor=scaleFactor,\r\n minNeighbors=minNeighbors,\r\n minSize=minSize,\r\n flags=cv2.CASCADE_SCALE_IMAGE)\r\n return eyeRect\r\n\r\n\r\ndef assignColorPerson(name, specPerson):\r\n \"\"\" Method is used in 'face recog' to assign color for special person \"\"\"\r\n if (specPerson is not None) and (name in specPerson):\r\n color_person = (0, 0, 255)\r\n else:\r\n color_person = (10, 255, 10)\r\n return color_person\r\n\r\n\r\ndef showNameList():\r\n \"\"\" Method is used to return list of names in NAME_LIST \"\"\"\r\n n = []\r\n with open(NAME_LIST, \"r\") as names:\r\n for line in names:\r\n if line != \"\":\r\n n.append(line.split(',')[1].strip())\r\n return n\r\n\r\n\r\ndef addName():\r\n \"\"\" Method is used in 'face capture' to add your name to NAME_LIST \"\"\"\r\n name = input('Enter your name: ').lower().strip()\r\n with open(NAME_LIST, \"r+\") as info:\r\n id = sum([1 for line in info])+1\r\n print(f\"{id},{name}\", file=info)\r\n print(f\"Your name stored at position '{id}' in Name.txt\")\r\n return id\r\n\r\n\r\ndef findName(id, confidence):\r\n \"\"\"\r\n Method is used to find name with confidence for face\r\n 1,id: ID get from image\r\n 2,confidence: A distance between two histogram,\r\n is taken from \"predic method\" of\r\n cv2.face.LBPHFaceRecognizer_create()\r\n \"\"\"\r\n with open(NAME_LIST, \"r\") as info:\r\n names = []\r\n for line in info:\r\n if line == \"\":\r\n break\r\n names.append(line.split(\",\")[1].rstrip())\r\n name = f\"{names[id-1]}\"\r\n conf = f\"{round(100-confidence)}%\"\r\n return name, conf\r\n\r\n\r\ndef createFolderHoldImage(path_data, folder_image):\r\n \"\"\"\r\n Method is used in 'face capture' to create folder,\r\n that holds image of each User\r\n \"\"\"\r\n os.mkdir(f'{path_data}/{folder_image}')\r\n return None\r\n\r\n\r\ndef getFaceWithID(path):\r\n \"\"\"\r\n Method is used in 'face trainning' to get samples of Faces and IDs\r\n path: link to folder that contains images\r\n e.g.: \"C:/my_project/dataset\"\r\n \"\"\"\r\n faces, ids = [], []\r\n for user_path, many_users, images in os.walk(path):\r\n for user in many_users:\r\n image_path = os.path.join(user_path, user)\r\n for images in os.listdir(image_path):\r\n gray = cv2.imread(os.path.join(image_path, images),\r\n cv2.IMREAD_GRAYSCALE)\r\n h, w = gray.shape[:2]\r\n x, y = 0, 0\r\n # with image(110x110) => image(84x105)\r\n img = gray[y:y+h-(h//22), (w//2)-(10*w//26):(w//2)+(10*w//26)]\r\n img_numpy = np.array(img, dtype='uint8')\r\n id = int(os.path.split(images)[-1].split(\".\")[1])\r\n faces.append(img_numpy)\r\n ids.append(id)\r\n return faces, np.array(ids)\r\n\r\n\r\ndef pressQtoQuitCV2():\r\n \"\"\" Method is used to quit program \"\"\"\r\n return (cv2.waitKey(1) & 0xFF == ord('q'))\r\n\r\n\r\ndef resize(image, select=None, size=None, inter=cv2.INTER_AREA):\r\n \"\"\"\r\n Method is used to resize image\r\n 1,select: \"w\"(width) or \"h\"(heigth)\r\n 2,size: in pixel\r\n 3,interpolation:\r\n cv2.INTER_AREA (more accurate)\r\n cv2.INTER_CUBIC\r\n cv2.INTER_NEAREST\r\n \"\"\"\r\n img_height, img_width = image.shape[:2]\r\n if select == \"w\":\r\n aspect_radio = size / float(img_width)\r\n dim = (size, int(img_height * aspect_radio))\r\n if select == \"h\":\r\n aspect_radio = size / float(img_height)\r\n dim = (int(img_width * aspect_radio), size)\r\n if select is None:\r\n return image\r\n # image is resized with these information above\r\n resized = cv2.resize(image, dim, interpolation=inter)\r\n return resized\r\n\r\n\r\ndef faceMask(image):\r\n \"\"\"\r\n Method is used for masking to find ROI\r\n image: An image Matrix with type=unit8\r\n \"\"\"\r\n # take two coordinates from original image\r\n h, w = image.shape[:2]\r\n cX, cY = w // 2, h // 2\r\n mask = np.zeros((w, h), dtype=\"uint8\")\r\n cv2.rectangle(mask,\r\n (cX-(10*w//26), 0),\r\n (cX+(10*w//26), h),\r\n 255, -1)\r\n # draw two triangle at the bottom of two sides of image\r\n pts1 = np.array([[cX-(10*w//26), 10*h//14],\r\n [cX-w//5, h],\r\n [cX-(10*w//26), h]],\r\n np.int32)\r\n pts1 = pts1.reshape((-1, 1, 2))\r\n cv2.fillPoly(mask, [pts1], (0))\r\n pts2 = np.array([[cX+(10*w//26), 10*h//14],\r\n [cX+w//5, h],\r\n [cX+(10*w//26), h]],\r\n np.int32)\r\n pts2 = pts2.reshape((-1, 1, 2))\r\n cv2.fillPoly(mask, [pts2], (0))\r\n applyMask = cv2.bitwise_and(image, image, mask=mask)\r\n return applyMask\r\n","sub_path":"faceLibs.py","file_name":"faceLibs.py","file_ext":"py","file_size_in_byte":9000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"393243909","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 26 11:01:01 2018\n\n@author: vl-tshzzz\n\"\"\"\n\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n\nclass yolov1_loss(nn.Module):\n def __init__(self, B, l_coord, l_noobj,device='cuda' ,cls_num=20):\n super(yolov1_loss, self).__init__()\n self.B = B\n self.l_coord = l_coord\n self.l_noobj = l_noobj\n self.class_num = cls_num\n self.device = device\n\n def compute_iou(self, box1, box2):\n '''Compute the intersection over union of two set of boxes, each box is [x1,y1,x2,y2].\n Args:\n box1: (tensor) bounding boxes, sized [N,4].\n box2: (tensor) bounding boxes, sized [M,4].\n Return:\n (tensor) iou, sized [N,M].\n '''\n\n lt = torch.max(\n box1[:, :2], # [N,2] -> [N,1,2] -> [N,M,2]\n box2[:, :2], # [M,2] -> [1,M,2] -> [N,M,2]\n )\n\n rb = torch.min(\n box1[:, 2:], # [N,2] -> [N,1,2] -> [N,M,2]\n box2[:, 2:], # [M,2] -> [1,M,2] -> [N,M,2]\n )\n\n wh = rb - lt # [N,M,2]\n wh[wh < 0] = 0 # clip at 0\n inter = wh[:, 0] * wh[:, 1] # [N,M]\n\n area1 = (box1[:, 2] - box1[:, 0]) * (box1[:, 3] - box1[:, 1]) # [N,]\n area2 = (box2[:, 2] - box2[:, 0]) * (box2[:, 3] - box2[:, 1]) # [M,]\n\n iou = inter / (area1 + area2 - inter)\n return iou\n\n def forward(self,pred,target):\n #print(target)\n pred_cls, pred_response, pred_bboxes = pred\n label_cls, label_response, label_bboxes = target\n\n\n pred_cls = pred_cls.to(self.device)\n pred_response = pred_response.to(self.device)\n pred_bboxes = pred_bboxes.to(self.device)\n\n label_cls = label_cls.to(self.device)\n label_response = label_response.to(self.device)\n label_bboxes = label_bboxes.to(self.device)\n\n\n\n batch_size = pred_response.size(0)\n\n no_obj_mask = (label_response[:, :, :, 0] < 1).unsqueeze(-1).expand_as(label_response)\n\n obj_response_mask = (label_response[:, :, :, 0] > 0).unsqueeze(-1).expand_as(label_response)\n\n obj_box_mask = (label_response[:, :, :, 0] > 0).unsqueeze(-1).expand_as(label_bboxes)\n\n obj_cls_mask = (label_response[:, :, :, 0] > 0).unsqueeze(-1).expand_as(label_cls)\n\n no_obj_contain_pred = pred_response[no_obj_mask].view(-1)\n no_obj_contain_target = label_response[no_obj_mask].view(-1)\n\n\n obj_contain_pred = pred_response[obj_response_mask].view(-1, self.B)\n\n obj_contain_target = label_response[obj_response_mask].view(-1, self.B)\n\n # class pred response\n obj_class_pred = pred_cls[obj_cls_mask].view(-1, self.class_num)\n obj_class_target = label_cls[obj_cls_mask].view(-1, self.class_num)\n\n # box pred response\n obj_loc_pred = pred_bboxes[obj_box_mask].view(-1, self.B * 4)\n obj_loc_target = label_bboxes[obj_box_mask].view(-1, self.B * 4)\n\n iou = torch.zeros(obj_loc_pred.size(0), self.B)\n iou = Variable(iou)\n\n for j in range(self.B):\n pred_bb = torch.zeros(obj_loc_pred.size(0), 4)\n pred_bb = Variable(pred_bb)\n\n target_bb = torch.zeros(obj_loc_pred.size(0), 4)\n target_bb = Variable(target_bb)\n\n target_bb[:, 0] = obj_loc_target[:, j * 4] - 0.5 * pow(obj_loc_target[:, j * 4 + 2], 2)\n target_bb[:, 1] = obj_loc_target[:, j * 4 + 1] - 0.5 * pow(obj_loc_target[:, j * 4 + 3], 2)\n target_bb[:, 2] = obj_loc_target[:, j * 4] + 0.5 * pow(obj_loc_target[:, j * 4 + 2], 2)\n target_bb[:, 3] = obj_loc_target[:, j * 4 + 1] + 0.5 * pow(obj_loc_target[:, j * 4 + 3], 2)\n\n pred_bb[:, 0] = obj_loc_pred[:, j * 4] - 0.5 * pow(obj_loc_pred[:, j * 4 + 2], 2)\n pred_bb[:, 1] = obj_loc_pred[:, j * 4 + 1] - 0.5 * pow(obj_loc_pred[:, j * 4 + 3], 2)\n pred_bb[:, 2] = obj_loc_pred[:, j * 4] + 0.5 * pow(obj_loc_pred[:, j * 4 + 2], 2)\n pred_bb[:, 3] = obj_loc_pred[:, j * 4 + 1] + 0.5 * pow(obj_loc_pred[:, j * 4 + 3], 2)\n\n iou[:, j] = self.compute_iou(target_bb, pred_bb)\n\n max_iou, max_index = iou.max(1)\n min_iou, _ = iou.min(1)\n max_index = max_index.data.cpu()\n\n coo_response_mask = torch.ByteTensor(obj_loc_pred.size(0), self.B * 4).to(self.device)\n\n coo_response_mask.zero_()\n for i in range(obj_loc_pred.size(0)):\n coo_response_mask[i, max_index[i] * 4:max_index[i] * 4 + 4] = 1\n\n obj_axis_pred = obj_loc_pred[coo_response_mask].view(-1, 4)\n obj_axis_target = obj_loc_target[coo_response_mask].view(-1, 4)\n\n iou_response_mask = coo_response_mask[:, [i * 4 for i in range(self.B)]]\n\n obj_response_pred = obj_contain_pred[iou_response_mask].view(-1)\n obj_response_target = obj_contain_target[iou_response_mask].view(-1)\n\n obj_local_loss = F.mse_loss(obj_axis_pred[:, 0:2], obj_axis_target[:, 0:2], size_average=False) + \\\n F.mse_loss(obj_axis_pred[:, 2:4], obj_axis_target[:, 2:4], size_average=False)\n obj_class_loss = F.mse_loss(obj_class_pred, obj_class_target, size_average=False)\n\n\n max_iou = (max_iou.data).to(self.device)\n conf_id = ((1 - max_iou) * self.l_noobj + max_iou).to(self.device)\n\n conf_id = Variable(conf_id, requires_grad=True)\n\n obj_contain_loss = F.mse_loss(obj_response_pred, max_iou, size_average=False)\n\n no_obj_contain_loss = F.mse_loss(no_obj_contain_pred, no_obj_contain_target, size_average=False)\n\n iou_loss = F.mse_loss(max_iou, obj_response_target, size_average=False)\n\n loss_all = (self.l_coord * obj_local_loss + obj_class_loss + obj_contain_loss + self.l_noobj * no_obj_contain_loss + iou_loss) / batch_size\n\n loss_info = {\n 'local_loss': self.l_coord * obj_local_loss.data,\n 'class_loss': obj_class_loss.data,\n 'contain_loss': obj_contain_loss.data,\n 'no_contain_loss': self.l_noobj * no_obj_contain_loss,\n 'iou_loss': iou_loss,\n 'mean_iou': torch.mean(max_iou)\n }\n\n return loss_all, loss_info\n\n\nif __name__ == '__main__':\n test_loss = yolov1_loss(2, 5, 0.5,'cuda')\n\n batch_size = 7\n\n label_cls = torch.zeros(batch_size, 7, 7, 20)\n label_bbox = torch.zeros(batch_size, 7, 7, 4 * 2)\n label_response = torch.zeros(batch_size, 7, 7, 2)\n\n label_response[0, 5, 3, :] = 1\n label_bbox[0, 5, 3, 0] = 0.1\n label_bbox[0, 5, 3, 1] = 0.1\n label_bbox[0, 5, 3, 2] = 0.2\n label_bbox[0, 5, 3, 3] = 0.3\n label_bbox[0, 5, 3, 4] = 0.1\n label_bbox[0, 5, 3, 5] = 0.1\n label_bbox[0, 5, 3, 6] = 0.2\n label_bbox[0, 5, 3, 7] = 0.3\n\n pred_cls = torch.zeros(batch_size, 7, 7, 20)\n pred_bbox = torch.zeros(batch_size, 7, 7, 4 * 2)\n pred_response = torch.zeros(batch_size, 7, 7, 2)\n\n pred_response[0, 5, 3, :] = 1\n pred_bbox[0, 5, 3, 0] = 0.1\n pred_bbox[0, 5, 3, 1] = 0.1\n pred_bbox[0, 5, 3, 2] = 0.2\n pred_bbox[0, 5, 3, 3] = 0.3\n pred_bbox[0, 5, 3, 4] = 0.1\n pred_bbox[0, 5, 3, 5] = 0.1\n pred_bbox[0, 5, 3, 6] = 0.2\n pred_bbox[0, 5, 3, 7] = 0.3\n\n\n\n pred = (pred_cls, pred_response, pred_bbox)\n target = (label_cls, label_response, label_bbox)\n test_loss(pred,target)\n\n\n\n","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":7349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"597719060","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 24 10:22:11 2016\r\n\r\n@author: User\r\n\"\"\"\r\n\r\n## Peak 1\r\n\r\nx, y = DataTransfer(Image2Data('20160316/peak1_+0.2_37'));\r\npeak1_h1,peak1_loc1,xdata,ydata = DataAnalysis(x,y,37,0.2);\r\npeak1_area1, peak1_mu1 = GaussianFit(xdata,ydata,peak1_h1,peak1_loc1);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak1_-0.2_41'));\r\npeak1_h2,peak1_loc2,xdata,ydata = DataAnalysis(x,y,40,-0.2);\r\npeak1_area2, peak1_mu2 = GaussianFit(xdata,ydata,peak1_h2,peak1_loc2);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak1_+1_36'));\r\npeak1_h3,peak1_loc3,xdata,ydata = DataAnalysis(x,y,33,1);\r\npeak1_area3, peak1_mu3 = GaussianFit(xdata,ydata,peak1_h3,peak1_loc3);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak1_-1_42'));\r\npeak1_h4,peak1_loc4,xdata,ydata = DataAnalysis(x,y,41.5,-1);\r\npeak1_area4, peak1_mu4 = GaussianFit(xdata,ydata,peak1_h4,peak1_loc4);\r\n#%%\r\n\r\n## Peak 2\r\n\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak2_+0.2_44'));\r\npeak2_h1,peak2_loc1,xdata,ydata = DataAnalysis(x,y,43.5,0.2);\r\npeak2_area1, peak2_mu1 = GaussianFit(xdata,ydata,peak2_h1,peak2_loc1);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak2_-0.2_46'));\r\npeak2_h2,peak2_loc2,xdata,ydata = DataAnalysis(x,y,46,-0.2);\r\npeak2_area2, peak2_mu2 = GaussianFit(xdata,ydata,peak2_h2,peak2_loc2);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak2_+1_42'));\r\npeak2_h3,peak2_loc3,xdata,ydata = DataAnalysis(x,y,40,1);\r\npeak2_area3, peak2_mu3 = GaussianFit(xdata,ydata,peak2_h3,peak2_loc3);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak2_-1_47'));\r\npeak2_h4,peak2_loc4,xdata,ydata = DataAnalysis(x,y,49,-1);\r\npeak2_area4, peak2_mu4 = GaussianFit(xdata,ydata,peak2_h4,peak2_loc4);\r\n#%%\r\n\r\n## Peak 3\r\n\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak3_+0.2_64.4'));\r\npeak3_h1,peak3_loc1,xdata,ydata = DataAnalysis(x,y,64.4,0.2);\r\npeak3_area1, peak3_mu1 = GaussianFit(xdata,ydata,peak3_h1,peak3_loc1);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak3_-0.2_66.8'));\r\npeak3_h2,peak3_loc2,xdata,ydata = DataAnalysis(x,y,66.5,-0.2);\r\npeak3_area2, peak3_mu2 = GaussianFit(xdata,ydata,peak3_h2,peak3_loc2);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak3_+1_63'));\r\npeak3_h3,peak3_loc3,xdata,ydata = DataAnalysis(x,y,61,1);\r\npeak3_area3, peak3_mu3 = GaussianFit(xdata,ydata,peak3_h3,peak3_loc3);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak3_-1_68'));\r\npeak3_h4,peak3_loc4,xdata,ydata = DataAnalysis(x,y,69,-1);\r\npeak3_area4, peak3_mu4 = GaussianFit(xdata,ydata,peak3_h4,peak3_loc4);\r\n#%%\r\n\r\n## Peak 4\r\n\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak4_+0.2_77.6'));\r\npeak4_h1,peak4_loc1,xdata,ydata = DataAnalysis(x,y,77.1,0.2);\r\npeak4_area1, peak4_mu1 = GaussianFit(xdata,ydata,peak4_h1,peak4_loc1);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak4_-0.2_80'));\r\npeak4_h2,peak4_loc2,xdata,ydata = DataAnalysis(x,y,79.5,-0.2);\r\npeak4_area2, peak4_mu2 = GaussianFit(xdata,ydata,peak4_h2,peak4_loc2);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak4_+1_77'));\r\npeak4_h3,peak4_loc3,xdata,ydata = DataAnalysis(x,y,74,1);\r\npeak4_area3, peak4_mu3 = GaussianFit(xdata,ydata,peak4_h3,peak4_loc3);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak4_-1_81'));\r\npeak4_h4,peak4_loc4,xdata,ydata = DataAnalysis(x,y,82,-1);\r\npeak4_area4, peak4_mu4 = GaussianFit(xdata,ydata,peak4_h4,peak4_loc4);\r\n#%%\r\n\r\n## Peak 5\r\n\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak5_+0.2_82'));\r\npeak5_h1,peak5_loc1,xdata,ydata = DataAnalysis(x,y,82,0.2);\r\npeak5_area1, peak5_mu1 = GaussianFit(xdata,ydata,peak5_h1,peak5_loc1);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak5_-0.2_84.8'));\r\npeak5_h2,peak5_loc2,xdata,ydata = DataAnalysis(x,y,83.2,-0.2);\r\npeak5_area2, peak5_mu2 = GaussianFit(xdata,ydata,peak5_h2,peak5_loc2);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak5_+1_80'));\r\npeak5_h3,peak5_loc3,xdata,ydata = DataAnalysis(x,y,79,1);\r\npeak5_area3, peak5_mu3 = GaussianFit(xdata,ydata,peak5_h3,peak5_loc3);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak5_-1_86'));\r\npeak5_h4,peak5_loc4,xdata,ydata = DataAnalysis(x,y,85.3,-1);\r\npeak5_area4, peak5_mu4 = GaussianFit(xdata,ydata,peak5_h4,peak5_loc4);\r\n#%%\r\n\r\n## Peak 7\r\n\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak7_+0.2_110'));\r\npeak7_h1,peak7_loc1,xdata,ydata = DataAnalysis(x,y,110,0.2);\r\npeak7_area1, peak7_mu1 = GaussianFit(xdata,ydata,peak7_h1,peak7_loc1);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak7_-0.2_113'));\r\npeak7_h2,peak7_loc2,xdata,ydata = DataAnalysis(x,y,114,-0.2);\r\npeak7_area2, peak7_mu2 = GaussianFit(xdata,ydata,peak7_h2,peak7_loc2);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak7_+1_108'));\r\npeak7_h3,peak7_loc3,xdata,ydata = DataAnalysis(x,y,106.5,1);\r\npeak7_area3, peak7_mu3 = GaussianFit(xdata,ydata,peak7_h3,peak7_loc3);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak7_-1_114'));\r\npeak7_h4,peak7_loc4,xdata,ydata = DataAnalysis(x,y,117,-1);\r\npeak7_area4, peak7_mu4 = GaussianFit(xdata,ydata,peak7_h4,peak7_loc4);\r\n#%%\r\n\r\n## Peak 8\r\n\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak8_+0.2_115'));\r\npeak8_h1,peak8_loc1,xdata,ydata = DataAnalysis(x,y,114.5,0.2);\r\npeak8_area1, peak8_mu1 = GaussianFit(xdata,ydata,peak8_h1,peak8_loc1);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak8_-0.2_118'));\r\npeak8_h2,peak8_loc2,xdata,ydata = DataAnalysis(x,y,118,-0.2);\r\npeak8_area2, peak8_mu2 = GaussianFit(xdata,ydata,peak8_h2,peak8_loc2);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak8_+1_113'));\r\npeak8_h3,peak8_loc3,xdata,ydata = DataAnalysis(x,y,112.5,1);\r\npeak8_area3, peak8_mu3 = GaussianFit(xdata,ydata,peak8_h3,peak8_loc3);\r\n#%%\r\nx, y = DataTransfer(Image2Data('20160316/peak8_-1_120'));\r\npeak8_h4,peak8_loc4,xdata,ydata = DataAnalysis(x,y,120,-1);\r\npeak8_area4, peak8_mu4 = GaussianFit(xdata,ydata,peak8_h4,peak8_loc4);\r\n#%%\r\n\r\n## Put all the data together\r\n\r\narea_data = [[],[],[],[]];\r\nmu_data = [[],[],[],[]];\r\nh_data = [[],[],[],[]];\r\nloc_data = [[],[],[],[]];\r\n\r\nfor i in arange(1,5):\r\n for j in [1,2,3,4,5,7,8]:\r\n if j==6:\r\n break\r\n else:\r\n area_data[i-1].append(eval('peak%d_area%d' % (j,i)));\r\n mu_data[i-1].append(eval('peak%d_mu%d' % (j,i)));\r\n h_data[i-1].append(eval('peak%d_h%d' % (j,i)));\r\n loc_data[i-1].append(eval('peak%d_loc%d' % (j,i)));\r\n\r\n#%%\r\n\r\n## Format change\r\narea_data = array(area_data);\r\nmu_data = array(mu_data);\r\nh_data = array(h_data);\r\nloc_data = array(loc_data);\r\n#%%\r\n\r\n## Area data normalization\r\narea_data_normalize = [area_data[i]/area_data[i][0] for i in arange(0,4)];\r\narea_data_normalize = array(area_data_normalize);\r\n#%%\r\n\r\n## Try plot\r\nfigure('trial1');\r\nplot(sin(0.5*pi*reallocation/180)**2,log(area_data[0]/m),'ro');\r\nplot(sin(0.5*pi*reallocation/180)**2,log(area_data[1]/m),'bo');\r\nshow();\r\nfigure('trial1 normalized');\r\nplot(sin(0.5*pi*reallocation/180)**2,log(area_data_normalize[0]/m),'ro');\r\nplot(sin(0.5*pi*reallocation/180)**2,log(area_data_normalize[1]/m),'bo');\r\nplot(sin(0.5*pi*reallocation/180)**2,log(area_data_normalize[2]/m),'go');\r\nplot(sin(0.5*pi*reallocation/180)**2,log(area_data_normalize[3]/m),'ko');\r\n\r\nshow();\r\n#%%\r\n\r\n## error analysis and obtain error bar\r\n\r\nmean_mu = mean(mu_data,0);\r\nerror_mu = sqrt(sum((mu_data-mean_mu)**2,0))/2;\r\n\r\nmean_normalized_area = mean(area_data_normalize,0);\r\nerror_normalized_area = sqrt(sum((area_data_normalize-mean_normalized_area)**2,0))/2;\r\n#%%\r\n\r\n## Plot the data with error bar\r\nwavelength = 1.506;\r\nlp1 = (1+cos(pi*mean_mu/180)**2)/(sin(0.5*pi*mean_mu/180)**2*cos(0.5*pi*mean_mu/180));\r\nlp2 = (1+cos(pi*mean_mu/180)**2)/(sin(0.5*pi*mean_mu/180)*sin(pi*mean_mu/180));\r\nx_axis = sin(0.5*pi*mean_mu/180)**2/(wavelength**2);\r\ny_axis = mean(log(area_data_normalize/(m)),0)/lp1;\r\ny_error = sqrt(sum((log(area_data_normalize/m)-mean(log(area_data_normalize/m),0))**2,0))/2;\r\nfit = polyfit(x_axis,y_axis,1);\r\n\r\n\r\n\r\nfigure('error bar plot');\r\nerrorbar(x_axis,y_axis,y_error,fmt='g*',ecolor='r',label='Data');\r\nplot(linspace(0,0.35,100),fit[0]*linspace(0,0.35,100)+fit[1],label='Fitting');\r\nxlabel(r'$Sin^2(\\theta)/\\lambda^2$');\r\nylabel(r'log(normalized area/m*LP)');\r\ntitle(r'log(normalized area/m*LP) vs $Sin^2(\\theta)/\\lambda^2$')\r\nlegend();\r\nshow();\r\ngrid(True);\r\n#%%\r\n\r\n## Calculate the slope and error\r\ny_data = log(area_data_normalize/m)/lp1**1.5;\r\nx_data = sin(0.5*pi*mu_data/180)**2/wavelength**2;\r\nslope = [];\r\nfor i in arange(0,4):\r\n slope.append(polyfit(x_data[i],y_data[i],1)[0]);\r\n\r\nslope = array(slope);\r\nprint('Mean slope =', mean(slope));\r\nprint('Slope error =', sqrt(sum((slope-mean(slope))**2,0))/2);\r\n#%%\r\n\r\n## Error Analysis\r\nmean(4*x_data,0);\r\nsqrt(sum((4*x_data-mean(4*x_data,0))**2,0))/2;\r\nlp = (1+cos(pi*mu_data/180)**2)/(sin(0.5*pi*mu_data/180)**2*cos(0.5*pi*mu_data/180));\r\nmean(lp,0);\r\nsqrt(sum((lp-mean(lp,0))**2,0))/2;\r\nmean(area_data_normalize,0);\r\nsqrt(sum((area_data_normalize-mean(area_data_normalize,0))**2,0))/2;\r\n#%%\r\n\r\n## Plot the parameter v.s. h^2+k^2+l^2\r\nparameter = mean(4*x_data,0);\r\nhkl = array([3, 4, 8, 11, 12, 19, 20]);\r\nparameter_error = sqrt(sum((4*x_data-mean(4*x_data,0))**2,0))/2;\r\nfigure('hkl plot');\r\nerrorbar(hkl,parameter,parameter_error,fmt='g*',ecolor='r',label='Data');\r\nfit2 = polyfit(hkl,parameter,1);\r\nplot(linspace(0,21,1000),fit2[0]*linspace(0,21,1000)+fit2[1],label='Fitting');\r\nxlabel(r'$h^2+k^2+l^2$');\r\nylabel(r'$4Sin^2(\\theta)/\\lambda^2$');\r\ntitle('Bragg relation');\r\nlegend();\r\ngrid(True);\r\nshow();\r\nprint('Lattice constant a = ',sqrt(1/fit2[0]));\r\n#%%\r\n\r\n## Second part of the experiment.\r\n## Error analysis\r\n\r\nmean(al_angle);\r\nsqrt(sum((al_angle-mean(al_angle))**2))/2;\r\nmean(fe_angle,0);\r\nsqrt(sum((fe_angle-mean(fe_angle,0))**2,0))/2;\r\n\r\nal_sine = sin(0.5*pi*al_angle/180)/wavelength;\r\nfe_sine = sin(0.5*pi*fe_angle/180)/wavelength;\r\n\r\nmean(al_sine);\r\nsqrt(sum((al_sine-mean(al_sine))**2))/2;\r\n\r\nmean(fe_sine,0);\r\nsqrt(sum((fe_sine-mean(fe_sine,0))**2,0))/2;\r\n\r\nal_lp = (1+cos(pi*al_angle/180)**2)/(sin(0.5*pi*al_angle/180)**2*cos(0.5*pi*al_angle/180));\r\nfe_lp = (1+cos(pi*fe_angle/180)**2)/(sin(0.5*pi*fe_angle/180)**2*cos(0.5*pi*fe_angle/180));\r\n\r\nmean(al_lp);\r\nsqrt(sum((al_lp-mean(al_lp))**2))/2;\r\n\r\nmean(fe_lp,0);\r\nsqrt(sum((fe_lp-mean(fe_lp,0))**2,0))/2;\r\n\r\nal_dwf = exp(-1.84*al_sine**2);\r\nfe_dwf = exp(-0.736*fe_sine**2);\r\n\r\n\r\nmean(al_dwf);\r\nsqrt(sum((al_dwf-mean(al_dwf))**2))/2;\r\n\r\nmean(fe_dwf,0);\r\nsqrt(sum((fe_dwf-mean(fe_dwf,0))**2,0))/2;\r\n\r\n#%%\r\n\r\n## Area and Scattering factor\r\n\r\nmean(fe_area,0);\r\nsqrt(sum((fe_area-mean(fe_area,0))**2,0))/2;\r\n\r\nfe_asf = fe_area*mean(al_lp)*8*mean(al_dwf)/(fe_lp*m*fe_dwf);\r\n\r\nmean(fe_asf,0);\r\nsqrt(sum((fe_asf-mean(fe_asf,0))**2,0))/2;\r\n#%%\r\n\r\n## Plot the parameter v.s. h^2+k^2+l^2\r\nparameter = mean(4*fe_sine**2,0);\r\nhkl = array([2,4,6]);\r\nparameter_error = sqrt(sum((4*fe_sine**2-mean(4*fe_sine**2,0))**2,0))/2;\r\nfigure('hkl plot');\r\nerrorbar(hkl,parameter,parameter_error,fmt='g*',ecolor='r',label='Data');\r\nfit2 = polyfit(hkl,parameter,1);\r\nplot(linspace(0,8,1000),fit2[0]*linspace(0,8,1000)+fit2[1],label='Fitting');\r\nxlabel(r'$h^2+k^2+l^2$');\r\nylabel(r'$4Sin^2(\\theta)/\\lambda^2$');\r\ntitle('Bragg relation');\r\nlegend();\r\ngrid(True);\r\nshow();\r\nprint('Lattice constant a = ',sqrt(1/fit2[0]));\r\n#%%\r\n","sub_path":"Command.py","file_name":"Command.py","file_ext":"py","file_size_in_byte":11148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"578765899","text":"from numpy import *\nimport distmesh as dm\nrandom.seed(190) # Always the same results\n\ndef uniform_plate(size=.05):\n fd = lambda p: dm.drectangle(p, -1, 1, -1, 1)\n fh = dm.huniform\n p, t = dm.distmesh2d(fd, fh, size, (-1, -1, 1, 1),\n [(-1, -1),(-1, 1),(1, -1),(1, 1)])\n return p, t\n\ndef plate_with_hole(size=.05):\n fd = lambda p: dm.ddiff(dm.drectangle(p,-1,1,-1,1),\n dm.dcircle(p,0,0,0.5))\n fh = lambda p: 0.05+0.3*dm.dcircle(p,0,0,0.5)\n p, t = dm.distmesh2d(fd, fh, size, (-1,-1,1,1),\n [(-1,-1),(-1,1),(1,-1),(1,1)])\n return p, t\n\ndef toaba(filename, p, t):\n fh = open(filename, 'w')\n fh.write('*Node\\n')\n m = len(str(len(p)))\n for (i, xy) in enumerate(p, start=1):\n x, y = [float(_) for _ in xy]\n fh.write('{0:{m}d}, {1: .18f}, {2: .18f}\\n'.format(i, x, y, m=m))\n m = max([len(str(_)) for ec in t for _ in ec])\n fh.write('*Element, type=CPE3\\n')\n for (i, ec) in enumerate(t, start=1):\n a, b, c = [int(_)+1 for _ in ec]\n fh.write('{0:d}, {1:{m}d}, {2:{m}d}, {3:{m}d}\\n'.format(i,a,b,c,m=m))\n\np, t = plate_with_hole(size=.025)\ntoaba('plate_with_hole.inp', p, t)\n","sub_path":"pyfem2/mesh/genmesh.py","file_name":"genmesh.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"407416496","text":"# -*- coding: utf-8 -*-\nfrom Products.Archetypes import atapi\nfrom Products.Archetypes.interfaces.layer import ILayerContainer\nfrom Products.ATContentTypes.content.link import ATLink\nfrom Products.ATContentTypes.interfaces import IATLink\nfrom Products.ATContentTypes.tests import atctftestcase\nfrom Products.ATContentTypes.tests import atcttestcase\nfrom Products.CMFCore.permissions import ModifyPortalContent\nfrom Products.CMFCore.permissions import View\nfrom zope.interface.verify import verifyObject\n\nimport unittest\n\n\nURL = 'http://www.example.org/'\n\n\ndef editATCT(obj):\n obj.setTitle('Test Title')\n obj.setDescription('Test description')\n obj.setRemoteUrl(URL)\n\ntests = []\n\n\nclass TestSiteATLink(atcttestcase.ATCTTypeTestCase):\n\n klass = ATLink\n portal_type = 'Link'\n title = 'Link'\n meta_type = 'ATLink'\n\n def test_implementsATLink(self):\n iface = IATLink\n self.assertTrue(iface.providedBy(self._ATCT))\n self.assertTrue(verifyObject(iface, self._ATCT))\n\n def testLink(self):\n obj = self._ATCT\n\n url = 'http://www.example.org/'\n obj.setRemoteUrl(url)\n self.assertEqual(obj.getRemoteUrl(), url)\n\n url = 'false:url'\n obj.setRemoteUrl(url)\n self.assertEqual(obj.getRemoteUrl(), url)\n\n def testLinkSanitizesOutput(self):\n obj = self._ATCT\n\n url = 'javascript:alert(\"test\")'\n obj.setRemoteUrl(url)\n self.assertEqual(obj.getRemoteUrl(),\n 'javascript:alert%28%22test%22%29')\n # Keep question marks and ampersands intact, please.\n url = 'http://something.sane/f.php?p1=value&p2=value'\n obj.setRemoteUrl(url)\n self.assertEqual(obj.getRemoteUrl(),\n 'http://something.sane/f.php?p1=value&p2=value')\n # already quoted values should also remain intact\n url = 'http://something.sane/except with spaces in it'\n expected = 'http://something.sane/except%20with%20spaces%20in%20it'\n obj.setRemoteUrl(url)\n self.assertEqual(obj.getRemoteUrl(), expected)\n obj.setRemoteUrl(obj.getRemoteUrl())\n self.assertEqual(obj.getRemoteUrl(), expected)\n\n def test_edit(self):\n new = self._ATCT\n editATCT(new)\n\n def test_get_size(self):\n atct = self._ATCT\n editATCT(atct)\n self.assertEqual(atct.get_size(), len(URL))\n\ntests.append(TestSiteATLink)\n\n\nclass TestATLinkFields(atcttestcase.ATCTFieldTestCase):\n\n def afterSetUp(self):\n atcttestcase.ATCTFieldTestCase.afterSetUp(self)\n self._dummy = self.createDummy(klass=ATLink)\n\n def test_remoteUrlField(self):\n dummy = self._dummy\n field = dummy.getField('remoteUrl')\n\n self.assertTrue(ILayerContainer.providedBy(field))\n self.assertTrue(field.required == 1, 'Value is %s' % field.required)\n self.assertTrue(field.default == 'http://',\n 'Value is %s' % str(field.default))\n self.assertTrue(field.searchable == 1, 'Value is %s' %\n field.searchable)\n self.assertTrue(field.vocabulary == (),\n 'Value is %s' % str(field.vocabulary))\n self.assertTrue(field.enforceVocabulary == 0,\n 'Value is %s' % field.enforceVocabulary)\n self.assertTrue(field.multiValued == 0,\n 'Value is %s' % field.multiValued)\n self.assertTrue(field.isMetadata == 0, 'Value is %s' %\n field.isMetadata)\n self.assertTrue(field.accessor == 'getRemoteUrl',\n 'Value is %s' % field.accessor)\n self.assertTrue(field.mutator == 'setRemoteUrl',\n 'Value is %s' % field.mutator)\n self.assertTrue(field.read_permission == View,\n 'Value is %s' % field.read_permission)\n self.assertTrue(field.write_permission == ModifyPortalContent,\n 'Value is %s' % field.write_permission)\n self.assertTrue(field.generateMode == 'veVc',\n 'Value is %s' % field.generateMode)\n self.assertTrue(field.force == '', 'Value is %s' % field.force)\n self.assertTrue(field.type == 'string', 'Value is %s' % field.type)\n self.assertTrue(isinstance(field.storage, atapi.AttributeStorage),\n 'Value is %s' % type(field.storage))\n self.assertTrue(\n field.getLayerImpl('storage') == atapi.AttributeStorage(),\n 'Value is %s' % field.getLayerImpl('storage'))\n self.assertTrue(ILayerContainer.providedBy(field))\n self.assertTrue(field.validators == (),\n 'Value is %s' % str(field.validators))\n self.assertTrue(isinstance(field.widget, atapi.StringWidget),\n 'Value is %s' % id(field.widget))\n vocab = field.Vocabulary(dummy)\n self.assertTrue(isinstance(vocab, atapi.DisplayList),\n 'Value is %s' % type(vocab))\n self.assertTrue(tuple(vocab) == (), 'Value is %s' % str(tuple(vocab)))\n self.assertTrue(field.primary == 1, 'Value is %s' % field.primary)\n\ntests.append(TestATLinkFields)\n\n\nclass TestATLinkFunctional(atctftestcase.ATCTIntegrationTestCase):\n\n portal_type = 'Link'\n views = ('link_view', )\n\ntests.append(TestATLinkFunctional)\n\n\ndef test_suite():\n suite = unittest.TestSuite()\n for test in tests:\n suite.addTest(unittest.makeSuite(test))\n return suite\n","sub_path":"Products/ATContentTypes/tests/test_atlink.py","file_name":"test_atlink.py","file_ext":"py","file_size_in_byte":5482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"95296343","text":"\"\"\"\nload NERDm records into the RMM's MongoDB database\n\"\"\"\nimport json, os, sys\nfrom collections import Mapping\n\nfrom .loader import (Loader, RecordIngestError, JSONEncodingError,\n UpdateWarning, LoadLog)\nfrom .loader import ValidationError, SchemaError, RefResolutionError\nfrom nistoar.nerdm import utils\nfrom nistoar.nerdm.convert.rmm import NERDmForRMM\n\nDEF_BASE_SCHEMA = \"https://data.nist.gov/od/dm/nerdm-schema/v0.5#\"\nDEF_SCHEMA = DEF_BASE_SCHEMA + \"/definitions/Resource\"\n\nLATEST_COLLECTION_NAME=\"record\"\nVERSIONS_COLLECTION_NAME=\"versions\"\nRELEASES_COLLECTION_NAME=\"releasesets\"\n\nclass _NERDmRenditionLoader(Loader):\n \"\"\"\n a base class for loading a rendition of a NERDm record into one of the data collections holding \n NERDm metadata (record, versions, releaseSets)\n \"\"\"\n def __init__(self, collection_name, dburl, schemadir, log=None, defschema=DEF_SCHEMA):\n \"\"\"\n create the loader. \n\n :param str collection_name\n :param str dburl: the URL of MongoDB database in the form,\n 'mongodb://HOST:PORT/DBNAME' \n :param str schemadir: the path to a directory containing the JSON \n schemas needed to validate the input JSON data.\n :param logging.Logger log: a logging instance that messages can be \n sent to. If not provided, warnings might be \n issued via the warnings module. \n :param str defschema: the URI for the schema to validated new records \n against by default. \n \"\"\"\n super(_NERDmRenditionLoader, self).__init__(dburl, collection_name, schemadir, log)\n self._schema = defschema\n\n def _get_upd_key(self, nerdm):\n return { \"@id\": nerdm['@id'] }\n\n def _get_onupdate(self, nerdm):\n newver = nerdm.get('version', \"1.0.0\")\n # replace previous record if the version of new rec is newer or same as previous\n return lambda data, key: utils.cmp_versions(newver, data.get('version', '1.0.0')) >= 0\n\n def load(self, rec, validate=True, results=None, id=None):\n \"\"\"\n load a NERDm resource record into the database\n :param rec dict: the NERDm JSON record to load\n :param validate bool: False if validation should be skipped before\n loading; otherwise, loading will fail if the input\n data is not valid.\n \"\"\"\n if not results:\n results = self._mkloadlog()\n\n try:\n key = self._get_upd_key(rec)\n except KeyError as ex:\n if id is None:\n id = str({'@id': '?'})\n return results.add(id, RecordIngestError(\"Data is missing input key value, @id\"))\n if id is None:\n id = key\n\n errs = None\n if validate:\n schemauri = rec.get(\"_schema\")\n if not schemauri:\n schemauri = self._schema\n\n errs = self.validate(rec, schemauri)\n if errs:\n return results.add(id, errs)\n\n try:\n if self.load_data(rec, key, self._get_onupdate(rec)):\n results.add(key, None)\n except Exception as ex:\n results.add(key, [ex])\n return results\n\n def _mkloadlog(self):\n return LoadLog(\"NERDm resources\")\n\n \n\nclass NERDmLoader(_NERDmRenditionLoader):\n \"\"\"\n a class for validating and loading NERDm records into the Mongo database.\n \"\"\"\n\n class LatestLoader(_NERDmRenditionLoader):\n def __init__(self, dburl, schemadir, log=None):\n super(NERDmLoader.LatestLoader, self).__init__(LATEST_COLLECTION_NAME, dburl, schemadir, log)\n\n class ReleaseSetLoader(_NERDmRenditionLoader):\n def __init__(self, dburl, schemadir, log=None):\n super(NERDmLoader.ReleaseSetLoader, self).__init__(RELEASES_COLLECTION_NAME, dburl, schemadir, log)\n\n\n def __init__(self, dburl, schemadir, onupdate='quiet', log=None, defschema=DEF_SCHEMA):\n \"\"\"\n create the loader. \n\n :param dburl str: the URL of MongoDB database in the form,\n 'mongodb://HOST:PORT/DBNAME' \n :param schemadir str: the path to a directory containing the JSON \n schemas needed to validate the input JSON data.\n :param onupdate: a string or function that controls reactions to \n the need to update an existing record; see \n documentation for load_data().\n :param log logging.Logger: a logging instance that messages can be \n sent to. If not provided, warnings might be \n issued via the warnings module. \n :param defschema str: the URI for the schema to validated new records \n against by default. \n \"\"\"\n super(NERDmLoader, self).__init__(VERSIONS_COLLECTION_NAME, dburl, schemadir, log, defschema)\n self.onupdate = onupdate\n\n self.lateloadr = self.LatestLoader(dburl, schemadir, log)\n self.relloadr = self.ReleaseSetLoader(dburl, schemadir, log)\n self.tormm = NERDmForRMM(log, schemadir)\n\n def connect(self):\n \"\"\"\n establish a connection to the database.\n \"\"\"\n super(NERDmLoader, self).connect()\n self.lateloadr._client = self._client\n self.lateloadr._db = self._db\n self.relloadr._client = self._client\n self.relloadr._db = self._db\n \n def disconnect(self):\n \"\"\"\n close the connection to the database.\n \"\"\"\n try:\n super(NERDmLoader, self).disconnect()\n finally:\n self.lateloadr._client = None\n self.relloadr._db = None\n\n def _get_upd_key(self, nerdm):\n return { \"@id\": nerdm['@id'], \"version\": nerdm.get('version', '1.0.0') }\n\n def _get_onupdate(self, nerdm):\n return self.onupdate\n\n def load(self, rec, validate=True, results=None, id=None):\n \"\"\"\n load a NERDm resource record into the database\n :param dict rec: the NERDm JSON record to load\n :param bool validate: False if validation should be skipped before\n loading; otherwise, loading will fail if the input\n data is not valid.\n :param LoadLog results: the results object to add loading result error messages to. Provide\n this when chaining loaders together\n :param str|dict id: an identifier for the record being loaded that messages should be associated\n with. \n \"\"\"\n if not results:\n results = self._mkloadlog()\n errs = []\n\n # the input is a versioned Resource record; convert it into its three parts for the three\n # collections (record, versions, releaseSets)\n try:\n parts = self.tormm.convert(rec, validate=False)\n except (ValueError, ValidationError) as ex:\n return results.add(id or json.dumps({'@id': rec.get('@id','?')}), ex)\n\n for prop in \"record version releaseSet\".split():\n if prop not in parts or not isinstance(parts[prop], Mapping):\n errs.append(\n ValidationError(\"Failed to extract %s record from input NERDm Resource\" % prop)\n )\n if errs:\n if id is None:\n id = json.dumps({'@id': rec.get('@id','?')})\n return results.add(id, errs)\n \n # now load the versioned record first; if that's successful, we'll load the others\n\n # determine the versions udpate key\n try:\n key = self._get_upd_key(parts['version'])\n id = json.dumps(key)\n except KeyError as ex:\n if id is None:\n id = json.dumps({'@id': '?'})\n return results.add(id, RecordIngestError(\"Data is missing input key value, @id\"))\n\n if id is None:\n id = key\n\n # validate the versions record (if requested)\n errs = None\n if validate:\n schemauri = parts['version'].get(\"_schema\")\n if not schemauri:\n schemauri = self._schema\n\n errs = self.validate(parts['version'], schemauri)\n if errs:\n return results.add(id, errs)\n\n # load the version record\n try:\n if self.load_data(parts['version'], key, self._get_onupdate(parts['version'])):\n results.add(id, None)\n except Exception as ex:\n errs = [ex]\n return results.add(id, errs)\n \n\n # now (conditionally) load the other parts. (These will not get loaded if the version is not\n # new enough)\n self.lateloadr.load(parts['record'], validate, results, key)\n self.relloadr.load(parts['releaseSet'], validate, results, key)\n return results\n \n\n def load_from_file(self, filepath, validate=True, results=None):\n \"\"\"\n load a NERDm resource record from a file (containing one resource)\n \"\"\"\n with open(filepath) as fd:\n try:\n data = json.load(fd)\n except ValueError as ex:\n ex = JSONEncodingError(ex)\n if not results:\n raise ex\n results.add(filepath, ex)\n return results\n\n return self.load(data, validate=validate, results=results, id=filepath)\n\n def load_from_dir(self, dirpath, validate=True, results=None):\n \"\"\"\n load all the records found in a directory. This will attempt to load\n all files in the given directory with the extension, '.json'\n \"\"\"\n for root, dirs, files in os.walk(dirpath):\n # don't look in .directorys\n for i in range(len(dirs)-1, -1, -1):\n if dirs[i].startswith('.'):\n del dirs[i]\n\n for f in files:\n if f.startswith('.') or not f.endswith('.json'):\n continue\n f = os.path.join(root, f) \n results = self.load_from_file(f, validate, results)\n \n if not results:\n results = self._mkloadlog()\n \n return results\n\n","sub_path":"python/nistoar/rmm/mongo/nerdm.py","file_name":"nerdm.py","file_ext":"py","file_size_in_byte":10439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"568303053","text":"import matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport numpy as np\nimport scipy.stats as stat\n\n\ndef archImage(domarchs):\n \"\"\"Create an image for a set of domain architectures.\"\"\"\n ndoms = max([max(ds) for ds in domarchs if ds != []]) + 1\n if ndoms < 10:\n cmap = cm.tab10\n indmap = dict(zip(range(ndoms), range(ndoms)))\n else:\n cmap = cm.jet\n indmap = dict(zip(range(ndoms),\n np.linspace(0, 255, ndoms).astype(np.int32)))\n\n dimg = np.ones([len(domarchs), max([len(ds) for ds in domarchs]), 4])\n for di, ds in enumerate(domarchs):\n for dj, d in enumerate(ds):\n dimg[di, dj, :] = cmap(indmap[d])\n\n plt.figure(figsize=(10, 8))\n plt.imshow(dimg)\n plt.xlabel('Domain', fontsize=18)\n plt.ylabel('Sequence', fontsize=18)\n\n\ndef joinImage(n0, jMap):\n \"\"\"Create an image for a join map.\"\"\"\n allJ = [jm for jms in jMap for jm in jms]\n xs = [jm[1] for jm in allJ]\n ys = [jm[0] for jm in allJ]\n plt.figure(figsize=(6, 6))\n plt.scatter(xs, ys, s=60, alpha=0.5)\n plt.plot(range(n0), range(n0), 'k:')\n plt.gca().invert_yaxis()\n plt.gca().xaxis.tick_top()\n plt.axis([-2, n0+2, n0+2, -2])\n\n\ndef domainTaxaImage(muts):\n obsTaxa = list(set([el for dom in muts for el in dom]))\n nObsTaxa = len(obsTaxa)\n nDoms = len(muts)\n\n taxaCounts = np.zeros([nDoms, nObsTaxa])\n for d in range(nDoms):\n for t in muts[d]:\n taxaCounts[d, obsTaxa.index(t)] += 1\n\n plt.figure(figsize=(6, 6))\n plt.imshow(np.log(np.transpose(taxaCounts)))\n plt.yticks([])\n plt.xticks([])\n plt.xlabel('Domains', fontsize=18)\n plt.ylabel('Taxa', fontsize=18)\n \n \ndef hyperMutImage(muts):\n plt.figure(figsize=(14, 6))\n plt.imshow(np.log(np.transpose(muts[1])))\n \ndef hyperMutEntropyPlot(muts):\n plt.figure(figsize=(8, 4))\n plt.plot(stat.entropy(np.transpose(muts[1])))","sub_path":"post/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"610133271","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community\nEdition) available.\nCopyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\nhttp://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\n\nfrom tastypie import fields\nfrom tastypie.authorization import ReadOnlyAuthorization\nfrom tastypie.constants import ALL, ALL_WITH_RELATIONS\n\nfrom gcloud.core.api_adapter import is_user_functor\n\nfrom gcloud.webservice3.resources import (\n GCloudModelResource,\n AppSerializer,\n)\nfrom gcloud.taskflow3.resources import TaskFlowInstanceResource\nfrom gcloud.contrib.function.models import FunctionTask\n\n\nclass FunctionTaskResource(GCloudModelResource):\n task = fields.ForeignKey(\n TaskFlowInstanceResource,\n 'task',\n full=True\n )\n creator_name = fields.CharField(\n attribute='creator_name',\n readonly=True,\n null=True\n )\n editor_name = fields.CharField(\n attribute='editor_name',\n readonly=True,\n null=True\n )\n status_name = fields.CharField(\n attribute='status_name',\n readonly=True,\n null=True\n )\n\n class Meta:\n queryset = FunctionTask.objects.filter(task__is_deleted=False)\n resource_name = 'function_task'\n excludes = []\n q_fields = ['task__pipeline_instance__name']\n authorization = ReadOnlyAuthorization()\n always_return_data = True\n serializer = AppSerializer()\n filtering = {\n 'task': ALL_WITH_RELATIONS,\n 'creator': ALL,\n 'editor': ALL,\n 'status': ALL,\n 'create_time': ['gte', 'lte'],\n 'claim_time': ['gte', 'lte']\n }\n limit = 0\n\n def get_object_list(self, request):\n if is_user_functor(request) or request.user.is_superuser:\n return super(FunctionTaskResource, self).get_object_list(request)\n else:\n return super(FunctionTaskResource, self).get_object_list(request).none()\n","sub_path":"gcloud/contrib/function/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"194897246","text":"import pytest\n\nfrom rake import Rake\n\nINVALID_ABBREVIATIONS = [\n \"this is not an abbreviation\",\n \"bad abbrevation lenth (TOOMANY)\",\n \"not uppercase abbreviation (bad)\",\n \"contains space (go od)\",\n \"too short (TST)\",\n]\n\nVALID_ABBREVIATIONS = [\"this is a good abbreviation (TIAGA)\", \"this-is good (TIG)\"]\n\n\n@pytest.fixture\ndef rake():\n \"\"\"Create Rake object and pass to calling functions\"\"\"\n yield Rake()\n\n\ndef test_init(rake):\n \"\"\"Tests behaviour of Rake object\"\"\"\n assert rake is not None\n assert len(rake.stop_list) > 0\n assert len(rake.frequent_words) > 0\n assert len(rake.get_phrases(\"This is a test sentence with some key words\")) > 0\n assert (\n len(rake.get_phrases(\"This is a test sentence with some key words\", length=1))\n == 1\n )\n\n\ndef test_invalid_abbreviations(rake):\n \"\"\"Tests for invalid abbreviations\"\"\"\n for text in INVALID_ABBREVIATIONS:\n abbreviation = rake.get_abbreviations(text)\n assert len(abbreviation) == 0\n\n\ndef test_valid_abbreviations(rake):\n \"\"\"Tests for valid abbreviations\"\"\"\n for text in VALID_ABBREVIATIONS:\n abbreviation = rake.get_abbreviations(text)\n assert len(abbreviation) == 1\n","sub_path":"tests/test_rake.py","file_name":"test_rake.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"337733126","text":"import os \n \ndef rename(dir): \n listOfFiles = os.listdir(dir) \n print(listOfFiles) \n for i in range(len(listOfFiles)): \n os.rename( \n os.path.join(dir, listOfFiles[i]), \n os.path.join(dir, \"pixel\"+str(i).zfill(4)+\".txt\")\n ) \n \nif __name__ == '__main__': \n rename('/home/phil/Repos/cnn-gesture-recognition/db/pixels/')","sub_path":"Code/tensorflow/python/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"36859946","text":"from django.shortcuts import render, redirect\nfrom django.views import View\nfrom mainapp.models import Image, Comment, User\nfrom django.core.files.storage import FileSystemStorage\n\n\ndef loggedin(request):\n username = request.session.get('name')\n if username:\n user = User.objects.get(name=username)\n return user.id\n return False\n\n\nclass IndexView(View):\n def get(self, request):\n images = Image.objects.all()\n return render(request, 'mainapp/index.html', context={\n 'images': images,\n 'user_id': request.session.get('name')\n })\n\n\nclass AddImage(View):\n def get(self, request):\n userid = loggedin(request)\n if not userid:\n return redirect('/')\n return render(request, 'mainapp/add_image.html', context={\n 'user_id': userid\n })\n\n def post(self, request):\n if not loggedin(request):\n return redirect('/')\n if request.FILES['myfile']:\n myfile = request.FILES['myfile']\n fs = FileSystemStorage()\n filename = fs.save('mainapp/static/mainapp/media/' + myfile.name, myfile)\n uploaded_file_url = fs.url(filename)\n image = Image()\n image.title = request.POST.get('title')\n image.path = myfile.name\n image.user_id = request.POST.get('user')\n image.save()\n\n return render(request, 'mainapp/add_image.html', context={'path': uploaded_file_url})\n\n\nclass ShowImage(View):\n def get(self, request, id):\n image = Image.objects.get(pk=id)\n comments = Comment.objects.filter(image_id=id)\n return render(request, 'mainapp/show_image.html', context={\n 'image': image,\n 'comments': comments,\n 'user_id': loggedin(request)\n })\n\n def post(self, request, id):\n comment = Comment()\n comment.content = request.POST.get('content')\n comment.user_id = int(request.POST.get('user'))\n comment.image_id = int(id)\n comment.save()\n return self.get(request, id)\n\n\nclass UserImages(View):\n def get(self, request, id=False):\n user_id_from_sess = loggedin(request)\n if not user_id_from_sess:\n return redirect('/')\n if id:\n img = Image.objects.get(pk=id)\n img.delete()\n images = Image.objects.filter(user_id=user_id_from_sess)\n return render(request, 'mainapp/user_images.html', context={\n 'images': images,\n 'user_id': user_id_from_sess\n })\n\n\nclass LoginView(View):\n def get(self, request):\n return render(request, 'mainapp/login.html')\n\n def post(self, request):\n login = request.POST.get(\"name\")\n password = request.POST.get(\"password\")\n users = User.objects.all()\n user_names = []\n for user in users:\n user_names.append(user.name)\n if login in user_names:\n if password == User.objects.get(name=login).password:\n request.session[\"name\"] = login\n return redirect('/')\n else:\n return render(request, 'mainapp/login.html', context={'name': \"podano błędne hasło\"})\n else:\n return render(request, 'mainapp/login.html', context={'name': \"podano błędny login\"})\n\n\nclass LogoutView(View):\n def get(self, request):\n if request.session.get('name') is not None:\n del request.session['name']\n return redirect('/')\n","sub_path":"mainapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"622837137","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport tkinter as tk\nimport os\nimport math as m\nfrom fun import *\n\nclass Block(object):\n def __init__(self,master):\n self.e = tk.Entry(master,width = 45)\n self.e1 = tk.Entry(master,width = 20)\n self.e2 = tk.Entry(master,width = 10)\n self.e3 = tk.Entry(master,width = 10)\n self.b = tk.Button(master,text = \"Create\",command = self.draw_graph,width = 10)\n self.b1 = tk.Button(master,text = \"Rozrahunok_B\",width = 20,command = self.bisection)\n self.b2 = tk.Button(master,text =\"Rozrahunok_S\",width = 20,command = self.secant_root)\n self.b3 = tk.Button(master,text =\"Rozrahunok_N\",width = 10,command = self.tangent_root)\n self.b4 = tk.Button(master,text =\"Rozrahunik_C\",width = 10,command = self.combined_root)\n self.b5 = tk.Button(master,text =\"Rozrahunik_I\",width = 10,command = self.iteration_root)\n self.l = tk.Label(master,bg = \"black\", width = 20,fg = \"white\")\n self.e.insert(0,\"x**4 - x **3 -2 * x**2 + 3*x -3\")\n self.e.pack(side = \"top\")\n self.e1.insert(0,\"0.001\")\n self.e1.pack()\n self.e2.insert(0,\"-5\")\n self.e2.pack()\n self.e3.insert(0,\"5\")\n self.e3.pack()\n self.b.pack(side = \"right\")\n self.b1.pack(side = \"right\")\n self.b2.pack(side = \"right\")\n self.b3.pack(side = \"right\")\n self.b4.pack(side = \"right\")\n self.b5.pack(side = \"right\")\n self.l.pack(side = \"left\")\n def draw_graph(self):\n s,mistake,left_bound,right_bound = get_parameters(self)\n E = []\n D = np.arange(left_bound,right_bound,mistake)\n for x in D:\n tmp_val = function_value(s,x)\n E.append(tmp_val)\n plt.plot(D,E)\n plt.xlabel(\"X\")\n plt.ylabel(\"Y\")\n plt.grid(True)\n plt.axis([left_bound,right_bound,-20,20])\n plt.show()\n def bisection(self):\n os.system(\"cls\")\n iteration = 0\n middle = 0\n equation,mistake,left_bound,right_bound = get_parameters(self)\n while(abs(function_value(equation,middle)) > mistake and iteration < 1000):\n print(iteration,mistake,left_bound,right_bound)\n middle = (left_bound + right_bound)/2\n f = function_value(equation,middle)\n f_left = function_value(equation,left_bound)\n f_right = function_value(equation,right_bound)\n if (f > 0):\n if (f_left < 0):right_bound = middle\n else:left_bound = middle\n elif (f < 0):\n if (f_left > 0):right_bound = middle\n else:left_bound = middle\n iteration+=1\n print(middle)\n self.l['text'] = str(middle - middle % mistake)\n return middle\n def secant_root(self):\n print(\"Метод хорд\")\n equation,mistake,left_bound,right_bound = get_parameters(self)\n try:\n t,result = secant(equation,left_bound,right_bound,mistake)\n self.l['text'] = str(result-result%mistake)\n except ArithmeticError:\n self.l['text'] = \"Not found\"\n def tangent_root(self):\n print(\"Метод дотичних\")\n equation,mistake,left_bound,right_bound = get_parameters(self)\n if(function_value(equation,left_bound) * second_deritative_value(equation,left_bound) > 0):\n t=tangent(left_bound,mistake,equation)\n elif(function_value(equation,right_bound) * second_deritative_value(equation,right_bound) > 0):\n t=tangent(right_bound,mistake,equation)\n else:\n self.l['text'] = \"Not found\"\n return 0\n self.l['text'] = str(t - t%mistake)\n def combined_root(self):\n print(\"Комбинований метод\")\n equation,mistake,left_bound,right_bound = get_parameters(self)\n iteration = 0\n z=0\n x=right_bound\n y=left_bound\n if(function_value(equation,left_bound) * second_deritative_value(equation,left_bound) > 0):\n z=left_bound\n elif(function_value(equation,right_bound) * second_deritative_value(equation,right_bound) > 0 ):\n z=right_bound\n else:\n self.l['text'] = \"Not search\"\n return 0\n while(iteration < 1000):\n try:\n x,y = secant(equation,x,y,mistake,1)\n except ArithmeticError:\n y=0\n z = tangent(z,mistake,equation,1)\n print(\"Метод хорд:\"+str(y - y%mistake)+\" y=\" + str(function_value(equation,y)))\n print(\"Метод дотичних:\"+str(z - z%mistake)+\" y=\" + str(function_value(equation,z))+\"\\n\")\n if (abs(function_value(equation,y)) < mistake):\n self.l['text'] = \"Метод хорд:\"+str(y - y%mistake)\n return 0\n elif(abs(function_value(equation,z)) < mistake):\n self.l['text'] = \"Метод дотичних:\"+str(z - z%mistake)\n return 0\n def iteration_root(self):\n equation,mistake,left_bound,right_bound = get_parameters(self)\n try:\n result = simple_iterations(equation,(left_bound+right_bound)/2,mistake)\n self.l[\"text\"] = str(result-result%mistake)\n except ArithmeticError:\n self.l[\"text\"]=\"Not search\"\nroot = tk.Tk()\nb = Block(root)\nroot.mainloop()","sub_path":"BP/BP.py","file_name":"BP.py","file_ext":"py","file_size_in_byte":5410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"612134604","text":"#!/usr/bin/python3\n\nimport time, torch, numpy, collections\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport os, random, sys\nfrom trainers.edit_and_gen import EditAndGen\n\nsys.path.append(\"../\")\nimport utils\n\ndef generate_images(generator, directory_name, is_gpu=True, w_labels=False, epoch='na'):\n num_samples = 8\n\n z = torch.randn((num_samples, 128))\n\n labels = numpy.random.choice(10,num_samples)\n onehot_labels = numpy.eye(10)[labels]\n onehot_labels = torch.from_numpy(onehot_labels)\n onehot_labels = onehot_labels.view(-1,1,1,10)\n\n if is_gpu:\n z, onehot_labels = z.cuda(), onehot_labels.cuda()\n\n if w_labels:\n sample = generator(z, onehot_labels)\n else:\n sample = generator(z)\n \n for j in range(num_samples):\n for i, image in enumerate(sample):\n image = nn.functional.tanh(image)\n image = image[j].view(3,32,32).mul(0.5).add(0.5)\n filename = \"def\"\n if w_labels:\n filename = \"Ep\" + epoch + \"_\" + \"sample%d\"%j + \"_edited%d\"%i + \"_label: \" + str(labels[i])\n else:\n filename = \"Ep\" + epoch + \"_\" + \"sample%d\"%j + \"_edited%d\"%i \n \n utils.save_image(image.cpu().detach().numpy(), filename, directory_name)\n \nclass base_chain_gan(object):\n def __init__(self, data_loader, generator, discriminator, hyper_param_dict, directory_name, editor_object_list=None, num_editors=0):\n # parameters\n self.epoch = hyper_param_dict['epoch']\n self.batch_size = hyper_param_dict['batch_size']\n self.pretrain_iter = hyper_param_dict['pretrain_iter']\n self.data_loader = data_loader\n self.gpu_mode = True\n\n self.num_edit_generators = num_editors # number of generator we want\n self.gp = hyper_param_dict['gp']\n self.ncritic = hyper_param_dict['ncritic']\n self.lr = hyper_param_dict['lr']\n self.betas = hyper_param_dict['betas']\n self.directory_name = directory_name\n\n # Variables holding statistics of training\n self.train_hist = {}\n self.train_hist['D_loss'] = []\n self.train_hist['G_loss'] = []\n self.train_hist['E_loss'] = []\n self.train_hist['per_epoch_time'] = []\n self.train_hist['total_time'] = []\n self.data_sampler = self.get_next_sample()\n\n # Generators\n self.G = EditAndGen(generator, editor_object_list, self.num_edit_generators)\n self.G.weight_init(mean=0.0, std=0.02)\n self.G_optimizers = []\n for optimizer in range(self.num_edit_generators + 1):\n self.G_optimizers.append(optim.Adam(self.G.param_groups[optimizer], lr=self.lr, betas=self.betas))\n if self.gpu_mode:\n self.G.cuda()\n \n # Discriminators\n self.D = discriminator\n self.D.weight_init(mean=0.0, std=0.02)\n self.D_optimizer = optim.Adam(self.D.parameters(), lr=self.lr, betas=self.betas)\n if self.gpu_mode:\n self.D.cuda()\n \n print('---------- Networks architecture -------------')\n utils.print_network(self.G)\n utils.print_network(self.D)\n print('-----------------------------------------------')\n \n def get_next_sample(self):\n while True:\n gen = iter(self.data_loader)\n for images, labels in gen:\n yield images, labels\n \n def train(self):\n # Create the directory to save the generated images\n if not os.path.exists(self.directory_name):\n os.makedirs(self.directory_name)\n \n def process_variables(var_list):\n new_vars = []\n for var in var_list:\n if self.gpu_mode:\n var = var.cuda()\n new_vars.append(var)\n return new_vars\n\n def backDifferentiate(loss, optimizer):\n return backDifferentiateMany(loss, [optimizer])\n \n def backDifferentiateMany(loss, optimizers):\n for optim in optimizers:\n optim.zero_grad()\n lossval = loss.item()\n loss.backward(retain_graph=True)\n for optim in optimizers:\n optim.step()\n return lossval\n \n def train_discriminator(update_index):\n self.D.train()\n self.G.eval()\n \n for _ in range(self.ncritic):\n z = torch.randn((self.batch_size, 128))\n images, labels = next(self.data_sampler)\n images = images.view(-1,3,32,32)\n images = images.cuda()\n z = z.cuda()\n real_loss = -torch.mean(self.D(images, update_index))\n\n # In training the discrim, we don't want to store the generator graphs but still be on gpu\n generated = self.G.generate_upto(z, update_index+1) # generates upto but not including the editor number\n generated = nn.functional.tanh(generated)\n generated = generated.detach()\n generated = generated.cuda()\n \n fake_loss = torch.mean(self.D(generated, update_index))\n grad_penalty = utils.grad_penalty(images, generated, self.gp, self.D, update_index)\n loss = real_loss + fake_loss + grad_penalty\n backDifferentiate(loss, self.D_optimizer)\n\n self.train_hist['D_loss'].append(loss.item())\n return images \n\n def train_generators(update_index):\n self.G.train()\n self.D.eval()\n \n z = torch.randn((self.batch_size, 128))\n z = process_variables([z])[0]\n \n gen_image = self.G.generate_upto(z, update_index+1)\n gen_image = nn.functional.tanh(gen_image)\n loss = -self.D(gen_image, update_index).mean()\n genLoss = backDifferentiate(loss, self.G_optimizers[update_index])\n\n def pre_train(pretraining_epoch=self.pretrain_iter):\n for epoch in range(pretraining_epoch): \n num_iters = len(self.data_loader)\n for i in tqdm(range(num_iters), total=num_iters): \n update_index = random.randint(0, self.num_edit_generators) \n train_discriminator(update_index)\n \n # In each epoch, run thru all the samples in data_loader\n print(\"Start pre-train\")\n pre_train()\n print(\"End pre-train\")\n\n for epoch in range(self.epoch):\n num_iters = len(self.data_loader)\n for i in tqdm(range(num_iters), total=num_iters): \n update_index = random.randint(0, self.num_edit_generators) \n sample_images = train_discriminator(update_index)\n train_generators(update_index)\n\n print(\"Epoch: [%2d] [%4d/%4d] D_loss: %.8f\" %\n ((epoch + 1), (i + 1), len(self.data_loader), self.train_hist['D_loss'][-1]))\n \n generate_images(self.G, self.directory_name, w_labels=False, epoch=str(epoch))\n #utils.plot_editor_scores(self.G, self.D, self.gpu_mode, self.num_edit_generators, \n # directory_name + \"/d_scores\", epoch) \n #utils.compute_wass_distance(sample_images, self.D, self.G, directory_name + \"/wass_distance\", epoch)\n \n if (epoch % 25)==0 and epoch != 0:\n G_optimizers_dict = [g_optim.state_dict() for g_optim in self.G_optimizers]\n save_dict = {'epoch' : epoch,\n 'Discriminator' : self.D.state_dict(),\n 'Generators' : self.G.state_dict(),\n 'D_optimizers' : G_optimizers_dict }\n file_name = self.directory_name + '/model_trained_' + str(epoch) + '.tar'\n torch.save(save_dict, file_name)\n","sub_path":"trainers/base_chain_gan.py","file_name":"base_chain_gan.py","file_ext":"py","file_size_in_byte":7976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"403511214","text":"import os\nimport gc\nimport types\nfrom enum import Enum\nfrom imp import reload\n\n\nMODULE_EXCLUDE_NAME = \"_EXCLUDE\"\n\n\ndef get_packages(root: str) -> list:\n ret = []\n root_lst = root.split(os.path.sep)\n for path, dirs, files in os.walk(root):\n flag = False\n for d in dirs:\n if d.startswith(\"__\") or d.endswith(\"__\"):\n continue\n flag = True\n if flag:\n continue\n path_lst = path.split(os.path.sep)\n if path_lst[-1].startswith(\"__\") or path_lst[-1].endswith(\"__\"):\n continue\n for f in files:\n if f.startswith(\"__\") or f.endswith(\"__\"):\n continue\n if f.endswith(\".py\"):\n ret.append(\".\".join(path_lst[len(root_lst):] + [f[:-3]]))\n return ret\n\ndef module_path(root: str, f: str=None):\n cls = type(\"ModulePath\", (), {\"_\".join(p.split(\".\")).upper(): p for p in get_packages(root)})\n if f is None:\n return cls\n root_lst = root.split(os.path.sep)\n f_lst = f.split(os.path.sep)\n path = getattr(cls, \"_\".join(f_lst[len(root_lst):])[:-3].upper(), None)\n if path is None:\n raise Exception(\"get module path error\")\n return path\n\n\ndef raise_err(func):\n def wraps(*args, **kwargs):\n ret = func(*args, **kwargs)\n if ret is None:\n raise Exception(\"error in {0} func call, args: {1} kwargs: {2}\".format(func.__name__, args, kwargs))\n return ret\n return wraps\n\nclass ReloaderMixin(object):\n\n def _on_reload(self):\n raise Exception(\"sub class must implement _on_reload method\")\n\n\nclass ObjType(Enum):\n\n FUNC = 0\n CLS = 1\n\n @classmethod\n def has_obj_type(cls, obj_type):\n return obj_type in [cls.FUNC, cls.CLS]\n\n @classmethod\n def get_obj_type(cls, name):\n return cls.CLS if name[0].isupper() else cls.FUNC\n\n\nclass ModuleInfo(object):\n\n def __init__(self, module_path: str):\n self.__module_path = module_path\n self.__module = None\n self.__funcs = dict()\n self.__clses = dict()\n\n @property\n def module_path(self):\n return self.__module_path\n\n @property\n def module(self):\n return self.__module\n\n @module.setter\n def module(self, val):\n self.__module = val\n\n def set(self, obj_type: ObjType, identifier, obj):\n if not ObjType.has_obj_type(obj_type):\n raise ValueError\n d, f = (self.__clses, self.__replace_cls) if obj_type == ObjType.CLS else (self.__funcs, self.__replace_func)\n if identifier not in d.keys():\n d[identifier] = obj\n else:\n f(d[identifier], obj)\n\n @raise_err\n def get(self, obj_type: ObjType, identifier):\n if not ObjType.has_obj_type(obj_type):\n raise ValueError\n d = self.__clses if obj_type == ObjType.CLS else self.__funcs\n return d.get(identifier)\n\n\n def reload(self):\n old_module_attrs = {nm: v for nm, v in self.__module.__dict__.items()\n if not (isinstance(v, types.FunctionType) or isinstance(v, type))}\n reload(self.__module)\n self.__recover_module(self.__module, old_module_attrs)\n\n \n def __same_instance_of(self, o1, o2, *tps):\n for tp in tps:\n if isinstance(o1, tp) and isinstance(o2, tp):\n return True\n return False\n \n def __replace_func(self, old, new):\n old.__code__ = new.__code__\n old.__defaults__ = new.__defaults__\n old.__doc__ = new.__doc__\n old.__dict__ = new.__dict__\n if not old.__closure__ or not new.__closure__:\n return\n for old_cell, new_cell in zip(old.__closure__, new.__closure__):\n if not self.__same_instance_of(old_cell.cell_contents, new_cell.cell_contents, types.FunctionType):\n continue\n self.__replace_func(old_cell.cell_contents, new_cell.cell_contents)\n \n def __replace_cls(self, old, new):\n for name, new_attr in new.__dict__.items():\n if name in new._EXCLUDE:\n continue\n old_attr = old.__dict__.get(name)\n if self.__same_instance_of(old_attr, new_attr, types.FunctionType):\n self.__replace_func(old_attr, new_attr)\n elif self.__same_instance_of(old_attr, new_attr, staticmethod, classmethod):\n self.__replace_func(old_attr.__func__, new_attr.__func__)\n elif self.__same_instance_of(old_attr, new_attr, property):\n try:\n self.__replace_func(old_attr.fdel, new_attr.fdel)\n except:\n pass\n try:\n self.__replace_func(old_attr.fget, new_attr.fget)\n except:\n pass\n try:\n self.__replace_func(old_attr.fset, new_attr.fset)\n except:\n pass\n elif self.__same_instance_of(old_attr, new_attr, type):\n self.__replace_cls(old_attr, new_attr)\n else:\n try:\n setattr(old, name, new_attr)\n except:\n continue\n for instance in gc.get_referrers(old):\n if isinstance(instance, old):\n instance._on_reload()\n\n\n\n def __recover_module(self, m, old_attrs):\n exclude = getattr(m, MODULE_EXCLUDE_NAME, set())\n for nm, v in old_attrs.items():\n if nm in exclude:\n try:\n setattr(m, nm, v)\n except:\n continue\n if hasattr(m, MODULE_EXCLUDE_NAME):\n delattr(m, MODULE_EXCLUDE_NAME)\n\n\nclass Reloader(object):\n\n MODULES = dict() # module_path : ModuleInfo\n __SET_SWITCH = False\n\n @classmethod\n def load_modules(cls, *module_paths: str, exclude_reload_paths=list(), rld: bool=False) -> None:\n cls.__SET_SWITCH = True\n for p in (module_paths or cls.MODULES.keys()):\n if not rld:\n cls.MODULES[p] = ModuleInfo(p)\n cls.MODULES[p].module = __import__(p, fromlist=[p.split(\".\")[-1]])\n elif p in cls.MODULES.keys() and p not in exclude_reload_paths:\n cls.MODULES[p].reload()\n cls.__SET_SWITCH = False\n\n @classmethod\n def reload(cls, module_path: str, exclude: set=set()) -> function:\n def deco(obj):\n m = cls.MODULES.get(module_path)\n if m is None:\n raise Exception(\"no module path error\")\n obj_type = ObjType.get_obj_type(obj.__name__)\n if obj_type == ObjType.CLS:\n setattr(obj, MODULE_EXCLUDE_NAME, set(exclude))\n if cls.__SET_SWITCH:\n m.set(obj_type, obj.__name__, obj)\n return m.get(obj_type, obj.__name__)\n return deco\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"sample/libs/reloader/Reloader.py","file_name":"Reloader.py","file_ext":"py","file_size_in_byte":6863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"431806706","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 12 13:52:42 2016\n\n@author: GlennMurphy\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm \nimport matplotlib.pyplot as scatter\n\n#pulling in the data\nloansData= pd.read_csv('https://github.com/Thinkful-Ed/curric-data-001-data-sets/raw/master/loans/loansData.csv')\n\n#making interest rate into a float \n\ndef AddOne(x):\n \n return 1\n \ndef funownership(x):\n if x == 'MORTGAGE': \n return 2\n if x=='RENT':\n return 3\n if x=='OWN':\n return 4\n if x=='OTHER':\n return 1\n return 1 \n \n\n#creating a y intercept\n\nloansData['Intercept']=map(AddOne,loansData['Interest.Rate'])\nloansData['Ownership_Col']=map(funownership,loansData['Home.Ownership'])\n#creating a row for annual income base off monthly \nloansData['annual_inc']=map(lambda x: float(x*12), loansData['Monthly.Income'])\n#making a datafram \nloansData['Interest_Rate']=map(lambda x:float(x[0:len(x)-1]),loansData['Interest.Rate'])\ndf=pd.DataFrame(loansData)\n\n #interest rate = annual income * coefficient + house ownership * coefficient + intercept\n\nInterestRate=loansData['Interest_Rate']\nAnnualInc=loansData['annual_inc']\nAnnualInc[np.isnan(AnnualInc)] = 0\nOwnership=loansData['Ownership_Col']\n\n\n#Dependant Variable\ny=np.matrix(InterestRate).transpose()\n\n#Independent variables \nx1=np.matrix(AnnualInc).transpose()\nx2=np.matrix(Ownership).transpose()\n#x3=np.matrix(AnnualInc*Ownership).transpose()\n\n#put the columns together\nx=np.column_stack([x1,x2])\n\n#creating model\nX=sm.add_constant(x)\nmodel = sm.OLS(y,X)\nf = model.fit()\n\nf.summary()\n\n\nscatter\n#scatter(df['Ownership'],df['Interest_Rate'])\n\n#xlabel('Home Ownership')\n#ylabel('interest rate')\n#title('Ownership vs annual income')\n#show()","sub_path":"Stat_Analysis/MulipleRegression.py","file_name":"MulipleRegression.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"503534582","text":"from daos.department_dao import DepartmentDAO\nfrom daos.employee_dao import EmployeeDAO\nfrom daos.employee_information_dao import EmployeeInformationDAO\nfrom daos.supervisors_dao import SupervisorDAO\nfrom daos.training_dao import TrainingDAO\nfrom daos.tuition_dao import TuitionDAO\nfrom exceptions.resource_not_found import ResourceNotFound\n\n\nclass TrainingService:\n employee_dao = EmployeeDAO()\n employee_information_dao = EmployeeInformationDAO()\n department_dao = DepartmentDAO()\n supervisor_dao = SupervisorDAO()\n tuition_dao = TuitionDAO()\n training_dao = TrainingDAO()\n\n @classmethod\n def create_training(cls, training, department_name):\n cls.employee_dao.get_record(training.employee_user)\n cls.tuition_dao.get_record(training.tuition_type)\n supervisor = cls.supervisor_dao.get_record(training.employee_user)\n department = cls.department_dao.get_record(department_name)\n training.direct_supervisor = supervisor.supervisor_user\n training.department_head = department.department_head\n return cls.training_dao.create_record(training)\n\n @classmethod\n def get_all_trainings(cls):\n return cls.training_dao.get_all_records()\n\n @classmethod\n def get_all_employee_trainings(cls, employee_user):\n cls.employee_dao.get_record(employee_user)\n return cls.training_dao.get_records_by_employee(employee_user)\n\n @classmethod\n def get_training(cls, case_id):\n return cls.training_dao.get_record(case_id)\n\n @classmethod\n def update_training(cls, change):\n try:\n cls.training_dao.get_record(change.case_id)\n return cls.training_dao.update_record(change)\n except ResourceNotFound as r:\n return r.message, 404\n\n @classmethod\n def update_approvals(cls, case_id, level):\n try:\n cls.training_dao.get_record(case_id)\n return cls.training_dao.update_approval(case_id, level)\n except ResourceNotFound as r:\n return r.message, 404\n\n @classmethod\n def update_reimbursement_amount(cls, case_id, password, value, add=\"add\"):\n try:\n cls.training_dao.get_record(case_id)\n employee = cls.employee_information_dao.get_record(password)\n if add == \"add\":\n new_funds = value + employee.reimbursement_funds_remaining\n elif employee.reimbursement_funds_remaining < value:\n value = employee.reimbursement_funds_remaining\n new_funds = 0\n else:\n new_funds = employee.reimbursement_funds_remaining - value\n cls.employee_information_dao.update_reimbursement(password, new_funds)\n return cls.training_dao.update_reimbursement_amount(case_id, value)\n except ResourceNotFound as r:\n return r.message, 404\n\n @classmethod\n def update_query(cls, change):\n return cls.training_dao.update_query(change)\n\n @classmethod\n def delete_training(cls, case_id):\n try:\n cls.training_dao.get_record(case_id)\n return cls.training_dao.delete_record(case_id)\n except ResourceNotFound as r:\n return r.message, 404\n","sub_path":"services/training_service.py","file_name":"training_service.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"650908991","text":"import os\r\nimport re\r\nimport socket\r\nimport threading\r\n\r\ndomain = 'http://www.example.com' #the domain name\r\nhome ='/home' #the root directory for file server\r\nhost = '192.168.25.47'\r\nport = 80\r\n\r\ndef full_path(path):\r\n path = path.split('/')\r\n return '  home/' + ''.join(['%s/'%('/'.join(path[:i+1]), directory) for i, directory in enumerate(path[:-1])]) + '%s'%path[-1]\r\n\r\ndef handle(conn):\r\n path = re.search('GET /(.*?) ', conn.recv(1024).decode()).group(1)\r\n abs_path = os.path.join(home, path)\r\n if os.path.isfile(abs_path):\r\n with open(abs_path, 'rb') as file:\r\n data = file.read()\r\n conn.sendall(('HTTP/1.1 200 OK\\r\\nAccept-Ranges: bytes\\r\\nContent-Length: %d\\r\\nContent-Type: application/x-download\\r\\nContent-Disposition: attachment;filename=%s\\r\\nConnection: keep-alive\\n\\n'%(len(data), path.split('/')[-1])).encode() + data)\r\n elif os.path.isdir(abs_path):\r\n conn.sendall(html.format(full_path(path), ''.join(['
  • %s
  • '%(path + '/' + item, item) for item in os.listdir(abs_path)])).encode())\r\n conn.close()\r\n\r\nhtml = 'HTTP/1.1 200 OK\\r\\nAccept-Ranges: bytes\\r\\nContent-Type: text/html; charset=utf-8\\n\\nMy Directory

     Directory listing

    {}



      {}

    '%domain\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\ns.bind((host, port))\r\ns.listen(8)\r\nwhile True:\r\n conn, _ = s.accept()\r\n threading.Thread(target=handle, args=(conn,)).start()\r\n","sub_path":"file_server.py","file_name":"file_server.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"489818608","text":"import operator\nimport os\nimport tarfile\nfrom datetime import datetime\nfrom tasks import read_content\nimport pickle\n\n\ndef data_processing(source_file_name='2.tar'):\n \"\"\"\n :param source_file_name: string file name from test_inputs directory\n :return:\n \"\"\"\n t1 = datetime.now()\n # read_mode = 'r:bz2'\n read_mode = 'r'\n result = process_archive(source_file_name, read_mode)\n t2 = datetime.now()\n delta = (t2 - t1)\n minutes_delta_time = delta.seconds/60.0\n print('Algorithm takes minutes:' + str(minutes_delta_time))\n top_result = dict(sorted(result.items(),\n key=operator.itemgetter(1),\n reverse=True)[:10])\n t3 = datetime.now()\n delta = (t3 - t2)\n minutes_delta_time = delta.seconds/60.0\n print('Sort takes minutes:' + str(minutes_delta_time))\n\n for el in top_result:\n print(el, \" = \", top_result[el])\n\n\ndef process_archive(source_file_name, read_mode):\n \"\"\"\n :param source_file_name: string file name from test_inputs directory\n :param read_mode: 'r' for tar or 'r:bz' for tar.bz\n :return: dictionary with words as keys and numbers as values\n \"\"\"\n result_list = []\n result = {}\n this_path = os.path.dirname(os.path.abspath(__file__))\n\n file_path = os.path.join(this_path, 'test_inputs', source_file_name)\n temp_path, extension = os.path.splitext(file_path)\n file_name = os.path.basename(temp_path)\n print('Source file name - ' + file_name)\n\n with tarfile.open(file_path, read_mode) as tar:\n for file in tar:\n some_name, extension = os.path.splitext(file.path)\n extension = extension.lower()\n if extension == '.txt':\n try:\n extr_file = tar.extractfile(file)\n content = extr_file.readlines()\n # read_content is a celery task\n tmp_result = read_content.delay(pickle.dumps(content))\n result_list.append(tmp_result)\n except Exception as e:\n print(e)\n pass\n\n counter = len(result_list)\n i = 0\n while counter > 0:\n if result_list[i].ready():\n for key, val in result_list[i].get().items():\n if key in result:\n result[key] += val\n else:\n result[key] = val\n del result_list[i]\n counter -= 1\n if i >= counter - 1:\n i = 0\n else:\n i += 1\n return result\n\ndata_processing()\n","sub_path":"celery_app.py","file_name":"celery_app.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"405997886","text":"from argparse import ArgumentParser, Namespace\nfrom enum import Enum\nfrom pathlib import Path\nfrom subprocess import check_call\nfrom typing import Sequence, Tuple\n\n\nclass _PythonVersion(Enum):\n python_2: str = \"2\"\n python_3: str = \"3\"\n\n\ndef _args() -> Namespace:\n parser = ArgumentParser()\n parser.add_argument(\n \"--python-versions\",\n choices=tuple([version.value for version in _PythonVersion]),\n default=tuple([version.value for version in _PythonVersion]),\n help=\"Lints all modules targeting a specific python version, will lint both versions by default\",\n nargs=\"+\",\n required=False,\n type=str,\n )\n return parser.parse_args()\n\n\ndef _main() -> None:\n python_versions_to_be_linted = tuple(\n _PythonVersion(raw_version) for raw_version in _args().python_versions\n )\n for python_version in python_versions_to_be_linted:\n print(f\"Attempting to lint modules for python version {python_version.value}\")\n _install_pylint_for_python_version(version=python_version)\n _lint_modules_for_python_version(\n version=python_version,\n modules=_python_modules_for_python_version(python_version),\n )\n _uninstall_pylint_for_python_version(version=python_version)\n\n print(\"Linting finished\")\n\n\ndef _python_modules_for_python_version(version: _PythonVersion) -> Tuple[Path, ...]:\n if version == _PythonVersion.python_3:\n return _python_3_modules()\n if version == _PythonVersion.python_2:\n return _python_2_modules()\n raise ValueError(f\"Unsupported python version {version.value}\")\n\n\ndef _python_2_modules():\n\tfiles = []\n\tfor path in (_root_path() / \"Assets\" / \"Python\").rglob(\"*.py\"):\n\t\tfiles.append(path)\n\tfor path in (_root_path() / \"PrivateMaps\").rglob(\"*.py\"):\n\t\tfiles.append(path)\n\treturn files\n\n\ndef _python_3_modules() -> Tuple[Path, ...]:\n return tuple(path for path in (Path(__file__).parents[1]).resolve().rglob(\"*.py\"))\n\n\ndef _root_path() -> Path:\n return (Path(__file__).parents[2]).resolve()\n\n\ndef _assert_valid_python_version(version: _PythonVersion) -> None:\n if version not in tuple(_PythonVersion):\n raise ValueError(f\"Unsupported python version: {version.value}\")\n\n\ndef _install_pylint_for_python_version(version: _PythonVersion) -> None:\n print(f\"Installing pylint for python {version.value}\")\n _assert_valid_python_version(version)\n if version == _PythonVersion.python_2:\n pylint_version = \"pylint==1.9.5\"\n elif version == _PythonVersion.python_3:\n pylint_version = \"pylint\"\n else:\n raise ValueError(f\"Unsupported python version: {version.value}\")\n check_call(\n args=(\n f\"python{version.value}\",\n \"-m\",\n \"pip\",\n \"install\",\n pylint_version,\n )\n )\n\n\ndef _uninstall_pylint_for_python_version(version: _PythonVersion) -> None:\n print(f\"Uninstalling pylint for python {version.value}\")\n _assert_valid_python_version(version)\n check_call(\n args=(\n f\"python{version.value}\",\n \"-m\",\n \"pip\",\n \"uninstall\",\n \"--yes\",\n \"pylint\",\n )\n )\n\n\ndef _lint_modules_for_python_version(\n version: _PythonVersion, modules: Sequence[Path]\n) -> None:\n print(f\"Linting python {version.value} modules\")\n _assert_valid_python_version(version)\n config_file = (\n _root_path() / \"Tools\" / \"Lint\" / f\".pylintrc_for_python_{version.value}\"\n )\n\n check_call(\n args=(\n f\"python{version.value}\",\n \"-m\",\n \"pylint\",\n \"--jobs=0\",\n f\"--rcfile={config_file}\",\n *[path.resolve() for path in modules],\n )\n )\n\n\nif __name__ == \"__main__\":\n _main()\n","sub_path":"Tools/Lint/pylint.py","file_name":"pylint.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"74451669","text":"import sqlite3\nimport models\nimport leerplanparser\nimport os\nimport config\n\ndef create_db():\n conn = sqlite3.connect(config.db_filename)\n \n c = conn.cursor()\n \n # parsed data\n\n c.execute(\"CREATE TABLE Competenties(nummer text primary key, omschrijving text)\")\n c.execute(\"CREATE TABLE Deelcompetenties(nummer text primary key, omschrijving text, competentie text)\")\n c.execute(\"CREATE TABLE Leerplandoelen(nummer text primary key, omschrijving text, competentie text, deelcompetentie text)\")\n \n leerplanonderdelen = leerplanparser.parse_leerplandoelentekst()\n\n for lpo in leerplanonderdelen:\n print(lpo.getInsertCommand())\n c.execute(lpo.getInsertCommand())\n\n # user data\n\n c.execute(\"CREATE TABLE Users(id int primary key, achternaam text, voornaam text)\")\n c.execute(\"CREATE TABLE Antwoorden(id int primary key, user_id int, leerplandoel text, inhoud text, tijdstip datetime)\")\n\n # user test data\n\n c.execute(\"INSERT INTO Users VALUES(1, 'Janssens', 'Jos')\")\n c.execute(\"INSERT INTO Users VALUES(2, 'Peeters', 'Willy')\")\n c.execute(\"INSERT INTO Antwoorden VALUES(1, 1, '7.3.7', 'We hebben een les gegeven met de IT-Tapa.', '2019-02-06')\")\n c.execute(\"INSERT INTO Antwoorden VALUES(2, 1, '5.1.1', 'We weten nu dat een relationele databank een verzameling is van tabellen met relaties tussen.', '2019-03-13')\")\n c.execute(\"INSERT INTO Antwoorden VALUES(3, 2, '5.1.1', 'Een relationele databank is een verzameling van tabellen met relaties tussen.', '2019-03-13')\")\n\n conn.commit()\n c.close()\n \n\nif __name__ == '__main__':\n if os.path.isfile(config.db_filename):\n import ui\n q = f\"{config.db_filename} already exists. Do you want to remove it?\"\n if ui.yes_or_no(question=q, default=False):\n os.remove(config.db_filename)\n else:\n print(\"Quitting program: not generating db again.\")\n exit()\n create_db()\n ","sub_path":"parser/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"564796056","text":"#!/usr/bin/python\n\nimport xml.etree.ElementTree as ET\nimport sys\n\ndef main():\n\tprint('tag-mapping xml file: ', sys.argv[1])\n\tprint('render xml file: ', sys.argv[2])\n\t\n\tmapXmlTree = ET.parse(sys.argv[1])\n\tmapXmlRoot = mapXmlTree.getroot()\n\t\n\trenderXmlTree = ET.parse(sys.argv[2])\n\trenderXmlRoot = renderXmlTree.getroot()\n\t\n\tfor item in mapXmlRoot:\n\t\tprint(item.tag)\n\t\tfor tagItem in item:\n\t\t\tprint(tagItem.attrib['key'],tagItem.attrib['value'])\n\nmain()\n","sub_path":"tools/comparator.py","file_name":"comparator.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"424621099","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('user_mgmt/',views.user_mgmt, name='user_mgmt'),\n path('add_user/',views.add_user, name='add_user'),\n path('user_unblock/',views.user_unblock, name='user_unblock'),\n path('delete_user/',views.delete_user, name='delete_user'),\n path('view_tutorials/',views.view_tutorials, name='view_tutorials'),\n path('add_tutorials/',views.add_tutorials, name='add_tutorials'),\n path('add_topic/',views.add_topic, name='add_topic'),\n path('add_mcq/',views.add_mcq, name='add_mcq'),\n path('mcqquestions/',views.mcqquestions, name='mcqquestions'),\n path('all_topics/',views.all_topics, name='all_topics'),\n path('view_tutorial/',views.view_tutorial, name='view_tutorial'),\n path('edit_tutorials/',views.edit_tutorials, name='edit_tutorials'),\n path('view_challenges_admin/',views.view_challenges_admin, name='view_challenges_admin'),\n path('add_challenges/',views.add_challenges, name='add_challenges'),\n path('add_challenge_topic/',views.add_challenge_topic, name='add_challenge_topic'),\n path('show_challenge/',views.show_challenge, name='show_challenge'),\n path('edit_challenge/',views.edit_challenge, name='edit_challenge'),\n path('admin_dashboard/', views.admin_dashboard, name = 'admin_dashboard'),\n path('delete_topic/', views.delete_topic, name = 'delete_topic'),\n path('delete_challenge/', views.delete_challenge, name = 'delete_challenge'),\n path('delete_tutorial/', views.delete_tutorial, name = 'delete_tutorial'),\n]","sub_path":"admin_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"353305922","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom sales.models.client_sales_model import ClientSales\nfrom django.contrib.auth.decorators import login_required\n\n\n\n@login_required(login_url='/login/')\ndef client_sales_detail_view(request, id):\n \"\"\"\n This view renders Warehouse Detail page with a details of selected Warehouse\n \"\"\"\n sale_obj = get_object_or_404(ClientSales, client_sales_id=id)\n context = {\n \"clientSale\": sale_obj,\n \"title\": \"Client Sales Details\"\n\n }\n return render(request, \"client_sales_details.html\", context)\n\n","sub_path":"sales/views/client_sales_details.py","file_name":"client_sales_details.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"76997547","text":"class Node:\n def __init__(self, character):\n self.character=character\n self.left_node=None\n self.middle_node=None\n self.right_node=None\n self.value=0\n\n\nclass TernarySearchTree:\n def __init__(self):\n self.root_node=None\n\n def put(self, key, value):\n self.root_node=self.put_item(self.root_node, key, value, 0)\n\n def put_item(self, node, key, value, index):\n c=key[index]\n if node==None:\n node=Node(c)\n if cnode.character:\n node.right_node=self.put_item(node.right_node, key, value, index)\n elif indexnode.character:\n return self.get_item(node.right_node, key, index)\n elif index dict:\r\n '''\r\n 根据传入的corpsercet 生成access token,\r\n 将 corpseret 和 accress_token 生成键值对, 存入 token 词典里面. 如果 token[corpsecret]不存在, 就生成一个.\r\n :param corpsecret: 特定工具的token\r\n :return: token [corpsecret: access_token]\r\n '''\r\n format_date = Common_Tools()\r\n if corpsecret not in cls.token.keys():\r\n cls.token = cls.generate_token_time(corpsecret)\r\n else:\r\n # print('corpseret is existing value is: \\n ' + cls.token[corpsecret])\r\n cls.currentTime = format_date.format_date()\r\n for key in cls.token.keys():\r\n if corpsecret + \"_\" in key:\r\n cls.corpsecret_time = key\r\n break\r\n if int(cls.currentTime) - int(cls.token[cls.corpsecret_time]) > 20000:\r\n # 当前时间和存储的 corpsecret_time 相比较, 大于 2小时就 生成新token, 新 token 时间\r\n print(\"need generate new token\")\r\n cls.token = cls.generate_token_time(corpsecret)\r\n\r\n return cls.token\r\n\r\n @classmethod\r\n def get_access_token(cls, corpsecret) -> str:\r\n '''\r\n 根据传入的corpsercet 生成access token\r\n :param corpsecret: 特定工具的token\r\n :return: JSON 格式的response\r\n '''\r\n r = requests.get(cls.token_URL, params={\"corpid\": cls.corpid, \"corpsecret\": corpsecret})\r\n return r.json()\r\n\r\n @classmethod\r\n def generate_token_time(cls, corpsecret):\r\n format_date = Common_Tools()\r\n response = cls.get_access_token(corpsecret)\r\n cls.token[corpsecret] = response[\"access_token\"]\r\n cls.currentTime = format_date.format_date()\r\n cls.corpsecret_time = corpsecret + '_' + cls.currentTime\r\n print(cls.corpsecret_time)\r\n cls.token[cls.corpsecret_time] = cls.currentTime\r\n return cls.token\r\n","sub_path":"apiTest/test_weichat/apis/weichat_accesstoken.py","file_name":"weichat_accesstoken.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"611515205","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"The setup script.\"\"\"\n\nfrom setuptools import find_packages, setup\n\nwith open(\"README.md\") as readme_file:\n readme = readme_file.read()\n\ninstall_requirements = [\n \"nevermined-sdk-py==0.10.0\",\n \"web3==5.9.0\",\n \"minio==6.0.0\",\n]\n\n# Required to run setup.py:\nsetup_requirements = []\n\ntest_requirements = []\n\ndev_requirements = []\n\ndocs_requirements = []\n\nsetup(\n author=\"nevermined-io\",\n author_email=\"root@nevermined.io\",\n classifiers=[\n \"Development Status :: 2 - Pre-Alpha\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3.6\",\n ],\n description=\"🐳 Nevermined/Python pod publishing\",\n extras_require={\n \"test\": test_requirements,\n \"dev\": dev_requirements + test_requirements + docs_requirements,\n \"docs\": docs_requirements,\n },\n install_requires=install_requirements,\n license=\"Apache Software License 2.0\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"nevermined-pod-publishing\",\n name=\"nevermined-pod-publishing\",\n packages=find_packages(),\n setup_requires=setup_requirements,\n tests_require=test_requirements,\n url=\"https://github.com/nevermined-io/pod-publishing-py\",\n version=\"0.1.2\",\n zip_safe=False,\n entry_points={\n \"console_scripts\": [\"pod-publishing=nevermined_pod_publishing.pod_publishing:main\"]\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"44101353","text":"from durable.lang import *\nimport json\nimport jinja2\nfrom pprint import pprint\n\n# these rules would be from the rules database\n# external-action is for the future when the consequent can trigger an orchestration via a rest/graphql API\nrules_json = '''[\n{\n \"antecedent\": \"(m.id == 2) | (m.name == 'ok')\",\n \"consequent\": {\n \"disposition\": \"true positive\",\n \"auto-report\": {\"template_id\": \"t1\" },\n \"external-action\": null\n }:q\n:\n},\n{\n \"antecedent\": \"(m.id == 3) | (m.name == 'poke')\",\n \"consequent\": {\n \"disposition\": \"true positive\",\n \"auto-report\": {\"template_id\": \"t2\" },\n \"external-action\": null\n }\n\n}\n]\n'''\n\n# these templates would be from the reporting templates database\ntemplates={ \"t1\": \"findings: {{alert.name}}\", \"t2\": \"malware: {{alert.id}}\" }\n\ndef process_consequent_actions(c, action_str, num):\n print(\"-----------\\naction #\" + str(num) + \"\\n-----------\\n\")\n actions = json.loads(action_str)\n alert = c.m\n\n for k,v in actions.items():\n if k==\"disposition\":\n print(\"setting disposition to \" + v)\n elif k==\"auto-report\":\n template_id = v[\"template_id\"]\n assert template_id in templates\n t = jinja2.Template(templates[template_id])\n print(\"sending report: \" + t.render(alert=alert))\n else:\n print(\"unsupported action: \" + k)\n\ndef gen_rule_str(rule, r):\n# rule_str=f'''\n#@when_all({rule[\"antecedent\"]})\n#def action_{str(r)}(c):\n# print ('action_{str(r)}')\n#'''\n rule_str='''\n@when_all({0})\ndef action_{1}(c):\n process_consequent_actions(c, {2}, {1})\n'''.format(rule[\"antecedent\"], str(r), json.dumps(json.dumps(rule[\"consequent\"])))\n \n return rule_str\n\ndef create_ruleset(ruleset_name, rules):\n r = 0\n with ruleset(ruleset_name):\n for rule in rules:\n #print(gen_rule_str(rule, r))\n exec(gen_rule_str(rule, r))\n r += 1\n \nrules = json.loads(rules_json)\n\ncreate_ruleset(\"expense\", rules)\npost('expense', { 'id': 1, 'name': 'ok'}) \npost('expense', { 'id': 1, 'name': 'poke'}) \n\n#t = jinja2.Template('hello {{alert.name}}')\n#print(t.render(alert={\"name\": \"world\"}))\n\n","sub_path":"rules/test_rule.py","file_name":"test_rule.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"470052920","text":"\"\"\"add usermixin\n\nRevision ID: 3f2a6353213e\nRevises: a658fcd6537f\nCreate Date: 2018-04-18 17:32:27.688753\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '3f2a6353213e'\ndown_revision = 'a658fcd6537f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)\n op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)\n op.drop_constraint('users_email_key', 'users', type_='unique')\n op.drop_constraint('users_username_key', 'users', type_='unique')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_unique_constraint('users_username_key', 'users', ['username'])\n op.create_unique_constraint('users_email_key', 'users', ['email'])\n op.drop_index(op.f('ix_users_username'), table_name='users')\n op.drop_index(op.f('ix_users_email'), table_name='users')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/3f2a6353213e_add_usermixin.py","file_name":"3f2a6353213e_add_usermixin.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"574371740","text":"import threading\r\nimport time\r\nimport serial\r\nimport socket\r\nimport sys\r\nimport base64\r\nfrom NeuralNet_Correlation_Model import DanceClassifierNN\r\nfrom Crypto import Random\r\nfrom Crypto.Cipher import AES\r\nfrom RingBuffer import RingBuffer\r\n\r\nclass Communication:\r\n\r\n def __init__(self, host, port, dataList):\r\n self.host = host\r\n self.port = int(port)\r\n self.bs = 16\r\n self.key = bytes(\"1234123412341234\".encode(\"utf8\"))\r\n self.dataList = dataList\r\n\r\n self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.client.connect((self.host, self.port))\r\n print(\"[+] You are currently connected to \", self.host + \":\" + str(self.port))\r\n\r\n self.actions = {'WIPERS':'wipers', 'NUMBER7':'number7', 'CHICKEN':'chicken',\r\n 'SIDESTEP':'sidestep', 'TURNCLAP':'turnclap', 'COWBOY':'cowboy',\r\n 'SWING':'swing', 'MERMAID':'mermaid', 'SALUTE':'salute',\r\n 'NUMBER6':'numbersix', 'LOGOUT':'logout'}\r\n\r\n def encrypt(self, message):\r\n text = self._pad(message)\r\n iv = Random.new().read(AES.block_size)\r\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\r\n return base64.b64encode(iv + cipher.encrypt(text))\r\n\r\n def updateData(self, arrayData):\r\n self.dataList = arrayData\r\n\r\n def _pad(self, s):\r\n return bytes(s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs), 'utf-8')\r\n\r\n def format(self, message):\r\n return '#{0}|{1:.2f}|{2:.2f}|{3:.2f}|{4:.2f}|'.format(self.actions[message], self.dataList[0], self.dataList[1], self.dataList[2], self.dataList[3])\r\n\r\n def sendMessage(self, message):\r\n message = self.format(message)\r\n message = self.encrypt(message)\r\n self.client.send(message) \r\n\r\nclass Pi:\r\n def __init__(self, host, port):\r\n self.WINDOWSIZE = 100\r\n self.dataList = [4.65, 2.00, 1.98, 10.00]\r\n self.buffer = RingBuffer(self.WINDOWSIZE)\r\n self.client = Communication(host, port, self.dataList)\r\n self.movesSent = 0\r\n\r\n # Machine Learning\r\n self.model = DanceClassifierNN()\r\n self.executionTime = 0.0\r\n self.FlushTime = 2.5\r\n \r\n # Data Collection\r\n self.SENSOR_COUNT = 23\r\n self.csPosition = self.SENSOR_COUNT * 2\r\n self.connection_established = False\r\n\r\n # Setup serial port\r\n self.ser = serial.Serial(\r\n port='/dev/ttyS0',\r\n baudrate = 115200,\r\n parity=serial.PARITY_NONE,\r\n stopbits=serial.STOPBITS_ONE,\r\n bytesize=serial.EIGHTBITS,\r\n #timeout=0.1\r\n )\r\n self.ser.flushInput()\r\n self.ser.flushOutput()\r\n\r\n self.ser.reset_input_buffer()\r\n self.ser.reset_output_buffer()\r\n \r\n self.byteArray = []\r\n\r\n def checkByteArray(self, bArray):\r\n newArray = []\r\n checksum = 0\r\n\r\n chkPos = self.SENSOR_COUNT\r\n\r\n # Forms an array of 0 to 22 since sensorcount is 23(46)\r\n for counter in range (0, chkPos):\r\n byte1 = bArray[counter * 2]\r\n checksum = checksum ^ (int.from_bytes(byte1, byteorder=\"big\", signed=True))\r\n byte2 = bArray[counter * 2 + 1]\r\n checksum = checksum ^ (int.from_bytes(byte2, byteorder=\"big\", signed=True))\r\n combinedValue = int.from_bytes(byte2 + byte1, byteorder=\"big\", signed=True)\r\n newArray.append(combinedValue)\r\n\r\n for counter in range (19,22):\r\n newArray[counter] /= 100\r\n\r\n newArray[22] /= 1000\r\n\r\n self.dataList = newArray[19:23]\r\n self.client.updateData(self.dataList)\r\n \r\n arrayChecksum = (int.from_bytes(bArray[self.csPosition] , byteorder=\"big\", signed=True))\r\n # checksum is at 46th position of bArray\r\n # if checksum matches, then data is clean and ready to be stored into circular buffer\r\n if (checksum == arrayChecksum):\r\n self.ser.write(b'C')\r\n \r\n if checksum == 0:\r\n return\r\n \r\n self.buffer.append(newArray[0:19])\r\n return\r\n else:\r\n self.ser.write(b'N')\r\n \r\n print(\"Checksum error\")\r\n print(bArray)\r\n print(newArray)\r\n print(checksum)\r\n print(arrayChecksum)\r\n \r\n return\r\n \r\n def establish_connection(self):\r\n # Wait for SYNC packet\r\n while(self.ser.read() != b'S'):\r\n pass\r\n self.ser.write(b'A')\r\n # Wait for SYNC-ACK packet\r\n while(self.ser.read() != b'H'):\r\n pass\r\n\r\n self.connection_established = True\r\n print(\"Connection Established\")\r\n \r\n def read_data(self):\r\n # will be receiving 47 array bytes, 0 to 46\r\n while (len(self.byteArray) < 47):\r\n startTime = time.time()\r\n while(self.ser.in_waiting == 0):\r\n currentTime = time.time()\r\n if (currentTime - startTime >= 2.0):\r\n self.byteArray = []\r\n self.connection_established = False\r\n print('termination')\r\n return\r\n\r\n rcv = self.ser.read()\r\n self.byteArray.append(rcv)\r\n\r\n self.checkByteArray(self.byteArray) \r\n self.byteArray = []\r\n \r\n def communicate(self):\r\n if not self.connection_established:\r\n self.establish_connection()\r\n else:\r\n self.read_data()\r\n\r\n def processData(self, feedingBuffer):\r\n newPrediction = self.model.detectMove(feedingBuffer)\r\n\r\n if(newPrediction is None):\r\n print('ML returning None')\r\n return 'IDLE_A'\r\n\r\n return newPrediction\r\n\r\n def main(self):\r\n try:\r\n self.executionTime = time.time()\r\n while True:\r\n # Collect data\r\n self.communicate()\r\n \r\n if(self.buffer.getSize() == self.WINDOWSIZE):\r\n print('Buffer Filled')\r\n currentTime = time.time()\r\n if((currentTime - self.executionTime) >= self.FlushTime):\r\n action = 'IDLE_A'\r\n \r\n tempBuffer = self.buffer.get()\r\n feedingBuffer = []\r\n\r\n for i in range(0, self.WINDOWSIZE):\r\n feedingBuffer += tempBuffer[i]\r\n \r\n action = self.processData(feedingBuffer)\r\n \r\n if self.movesSent >= 40:\r\n if action == 'LOGOUT':\r\n self.client.sendMessage(action)\r\n print('Program End: LOGOUT & 41 MOVES SENT')\r\n sys.exit(1)\r\n\r\n if action == 'UNSURE':\r\n self.FlushTime = 1.0\r\n elif action != 'IDLE_A' and action != 'LOGOUT':\r\n self.client.sendMessage(action)\r\n print(action, 'sent')\r\n self.movesSent += 1\r\n self.FlushTime = 2.5\r\n\r\n self.executionTime = currentTime\r\n \r\n self.buffer.reset()\r\n \r\n except KeyboardInterrupt:\r\n print('Program End')\r\n sys.exit(1) \r\n\r\nif __name__ == '__main__':\r\n host = sys.argv[1]\r\n port = sys.argv[2]\r\n pi = Pi(host, port)\r\n pi.main()","sub_path":"Raspberry Pi/PiV3/RaspiMod.py","file_name":"RaspiMod.py","file_ext":"py","file_size_in_byte":7662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"506315808","text":"\"\"\"\n Stochastic Integrators\n\n\"\"\"\n\nimport numpy as np\nfrom typing import Tuple\nimport scipy.constants as const\n\nfrom ensembler import system\nfrom ensembler.integrator._basicIntegrators import _integratorCls\n\n\nclass monteCarloIntegrator(_integratorCls):\n \"\"\"\n ..autoclass: monteCarloIntegrator\n This class implements the classic monte carlo integrator.\n It choses its moves purely randomly.\n \"\"\"\n resolution:float = 0.01 #increase the ammount of different possible values = between 0 and 10 there are 10/0.01 different positions.\n fixedStepSize: (float or list)\n\n def __init__(self, maxStepSize:float=None, minStepSize:float=None, spaceRange:tuple=None, fixedStepSize=None):\n self.fixedStepSize = None if(isinstance(fixedStepSize, type(None))) else np.array(fixedStepSize)\n self.maxStepSize = maxStepSize\n self.minStepSize = minStepSize\n self.spaceRange = spaceRange\n pass\n \n def step(self, system)-> Tuple[float, None, float]:\n \"\"\"\n ..autofunction: step\n This function is performing an integration step in MonteCarlo fashion.\n :param system: This is a system, that should be integrated.\n :type system: ensembler.system.system\n :return: (new Position, None, position Shift)\n :rtype: (float, None, float)\n \"\"\"\n # integrate\n # while no value in spaceRange was found, terminates in first run if no spaceRange\n while(True):\n current_state = system.currentState\n\n self.oldpos = current_state.position\n self.randomShift(system.nDim)\n self.newPos = np.add(self.oldpos,self.posShift)\n\n #only get positions in certain range or accept if no range\n if(self._critInSpaceRange(self.newPos)):\n break\n else:\n self.newPos = self.oldpos #reject step outside of range\n\n return self.newPos, np.nan, self.posShift\n \n def randomShift(self, nDim:int)->float:\n \"\"\"\n ..autofunction: randomShift\n This function calculates the shift for the current position.\n\n :return: position shift\n :rtype: float\n \"\"\"\n #which sign will the shift have?\n sign = np.array([-1 if(x <50) else 1 for x in np.random.randint(low=0, high=100, size=nDim)])\n\n #Check if there is a space restriction? - converges faster\n ##TODO: Implement functional\n if(not isinstance(self.fixedStepSize, type(None))):\n shift = self.fixedStepSize\n elif(self.spaceRange!=None):\n shift = np.multiply(np.abs(np.random.randint(low=self.spaceRange[0]/self.resolution, high=self.spaceRange[1]/self.resolution, size=nDim)), self.resolution)\n else:\n shift = np.abs(np.random.rand(nDim))\n #print(sign, shift)\n #Is the step shift in the allowed area? #Todo: fix min and max for mutliDimensional\n if(self.maxStepSize != None and shift > self.maxStepSize):#is there a maximal step size?\n self.posShift = np.multiply(sign, self.maxStepSize)\n elif(self.minStepSize != None and shift < self.minStepSize):\n self.posShift = np.multiply(sign, self.minStepSize)\n else:\n self.posShift = np.multiply(sign, shift)\n\n if(nDim == 1): #TODO Make Effiecient?\n self.posShift = self.posShift[0]\n\n return self.posShift\n\n\nclass metropolisMonteCarloIntegrator(monteCarloIntegrator):\n \"\"\"\n ..autoclass: metropolisMonteCarloInegrator\n This class is implementing a metropolis monte carlo Integrator.\n In opposite to the Monte Carlo Integrator, that is completley random, this integrator has limitations to the randomness.\n Theis limitation is expressed in the Metropolis Criterion.\n\n There is a standard Metropolis Criterion implemented, but it can also be exchanged with a different one.\n\n Default Metropolis Criterion:\n $ decision = (E_{t} < E_{t-1}) || ( rand <= e^{(-1/(R/T*1000))*(E_t-E_{t-1})}$\n with:\n - $R$ as universal gas constant\n\n The original Metropolis Criterion (Nicholas Metropolis et al.; J. Chem. Phys.; 1953 ;doi: https://doi.org/10.1063/1.1699114):\n\n $ p_A(E_{t}, E_{t-1}, T) = min(1, e^{-1/(k_b*T) * (E_{t} - E_{t-1})})\n $ decision: True if( 0.5 < p_A(E_{t}, E_{t-1}, T)) else False\n with:\n - $k_b$ as Boltzmann Constant\n \"\"\"\n #\n #Parameters:\n metropolisCriterion=None #use a different Criterion\n randomnessIncreaseFactor:float = 1 #tune randomness of your results\n maxIterationTillAccept:float = 100 #how often shall the integrator iterate till it accepts a step forcefully\n\n #METROPOLIS CRITERION\n ##random part of Metropolis Criterion:\n _defaultRandomness = lambda self, ene_new, currentState: ((1/self.randomnessIncreaseFactor)*np.random.rand() <= np.exp(-1.0 / (const.gas_constant / 1000.0 * currentState.temperature) * (ene_new - currentState.totPotEnergy))) #pseudocount for equal energies\n ##default Metropolis Criterion\n _defaultMetropolisCriterion = lambda self, ene_new, currentState: (ene_new < currentState.totEnergy or self._defaultRandomness(ene_new, currentState))\n ## original criterion not useful causes overflows:\n #_defaultMetropolisCriterion = lambda self, ene_new, currentState: True if(0.5 > min(1, np.e**(-1/(const.k * currentState.temperature)*(ene_new-currentState.totPotEnergy)))) else False\n\n def __init__(self, minStepSize:float=None, maxStepSize:float=None, spaceRange:tuple=None, metropolisCriterion=None, randomnessIncreaseFactor=1, maxIterationTillAccept:int=100, fixedStepSize=None):\n self.fixedStepSize = None if(isinstance(fixedStepSize, type(None))) else np.array(fixedStepSize)\n self.maxStepSize = maxStepSize\n self.minStepSize = minStepSize\n self.spaceRange = spaceRange\n self.randomnessIncreaseFactor = randomnessIncreaseFactor\n self.maxIterationTillAccept = maxIterationTillAccept\n if(metropolisCriterion == None):\n self.metropolisCriterion = self._defaultMetropolisCriterion\n else:\n self.metropolisCriterion = metropolisCriterion\n\n def step(self, system):\n \"\"\"\n ..autofunction: step\n This function is performing an integration step in MetropolisMonteCarlo fashion.\n :param system: This is a system, that should be integrated.\n :type system: ensembler.system.system\n :return: (new Position, None, position Shift)\n :rtype: (float, None, float)\n \"\"\"\n\n iterstep = 0\n current_state = system.currentState\n self.oldpos = current_state.position\n nDim = system.nDim\n # integrate position\n while(True): #while no value in spaceRange was found, terminates in first run if no spaceRange\n self.randomShift(nDim)\n #eval new Energy\n system._currentPosition = np.add(self.oldpos, self.posShift)\n ene = system.totPot()\n\n #MetropolisCriterion\n if ((self._critInSpaceRange(system._currentPosition) and self.metropolisCriterion(ene, current_state)) or iterstep==self.maxIterationTillAccept):\n break\n else: #not accepted\n iterstep += 1\n continue\n return system._currentPosition , None, self.posShift\n","sub_path":"ensembler/integrator/stochastic.py","file_name":"stochastic.py","file_ext":"py","file_size_in_byte":7448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"375459674","text":"#\r\n# @lc app=leetcode.cn id=1291 lang=python3\r\n#\r\n# [1291] 顺次数\r\n#\r\n# https://leetcode-cn.com/problems/sequential-digits/description/\r\n#\r\n# algorithms\r\n# Medium (44.02%)\r\n# Likes: 4\r\n# Dislikes: 0\r\n# Total Accepted: 1.5K\r\n# Total Submissions: 3.4K\r\n# Testcase Example: '100\\n300'\r\n#\r\n# 我们定义「顺次数」为:每一位上的数字都比前一位上的数字大 1 的整数。\r\n#\r\n# 请你返回由 [low, high] 范围内所有顺次数组成的 有序 列表(从小到大排序)。\r\n#\r\n#\r\n#\r\n# 示例 1:\r\n#\r\n# 输出:low = 100, high = 300\r\n# 输出:[123,234]\r\n#\r\n#\r\n# 示例 2:\r\n#\r\n# 输出:low = 1000, high = 13000\r\n# 输出:[1234,2345,3456,4567,5678,6789,12345]\r\n#\r\n#\r\n#\r\n#\r\n# 提示:\r\n#\r\n#\r\n# 10 <= low <= high <= 10^9\r\n#\r\n#\r\n#\r\n\r\n\r\n# @lc code=start\r\nclass Solution:\r\n def sequentialDigits(self, low: int, high: int) -> List[int]:\r\n # 统计区间内所有的满足条件的递增数\r\n res = []\r\n llen = len(str(low))\r\n hlen = len(str(high))\r\n for l in range(llen, hlen + 1):\r\n for s in range(1, 11 - l):\r\n cur = s\r\n for delta in range(1, l):\r\n cur = 10 * cur + s + delta\r\n if cur < low:\r\n continue\r\n elif cur <= high:\r\n res.append(cur)\r\n else:\r\n break\r\n return res\r\n\r\n\r\n# @lc code=end\r\n","sub_path":"Medium/1291.顺次数.py","file_name":"1291.顺次数.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"292957621","text":"def solve(string):\n _, *l = map(int, string.split())\n l.sort()\n ans = 0\n for i, l0 in enumerate(l[:-2]):\n for j, l1 in enumerate(l[i + 1:-1]):\n if l0 == l1:\n continue\n for l2 in l[i + j + 2:]:\n if l1 == l2:\n continue\n if l2 < l0 + l1:\n ans += 1\n else:\n break\n return str(ans)\n\n\nif __name__ == '__main__':\n import sys\n print(solve(sys.stdin.read().strip()))\n","sub_path":"Python_codes/p02583/s072262638.py","file_name":"s072262638.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"199339391","text":"import datetime\nimport functools\nimport glob\nimport itertools as IT\nimport math\nimport os\nimport pickle\nimport shutil\nfrom os.path import basename, dirname\n\nimport cv2\nimport matplotlib.image as mpimg\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport scipy\nimport scipy.misc\nfrom keras.engine import Model\nfrom keras.models import load_model\nfrom keras.preprocessing.image import random_channel_shift\nfrom sklearn.utils import shuffle\nfrom tqdm import tqdm\n\nIMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS = 160, 320, 3\nINPUT_SHAPE = (IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS)\n\nDEBUG = False\nDATA_DIR = \"data\"\nOUTPUT_DIR = \"output\"\nARCHIVE_DIR = \"archive\"\nDATA_GENERATED_DIR = os.path.join(DATA_DIR, \"generated\")\nLOG_FILE = os.path.join(DATA_DIR, \"driving_log.csv\")\nMODEL_FILE = os.path.join(OUTPUT_DIR, \"model-{epoch:02d}-{val_loss:.2f}.h5\")\nMODEL_HISTORY_FILE = os.path.join(OUTPUT_DIR, \"model_history_history.p\")\nARGS_FILE = os.path.join(OUTPUT_DIR, \"args.txt\")\n\n\ndef save_args(args):\n with open(ARGS_FILE, \"w\") as f:\n txt = \"\"\n for key, value in args.items():\n txt += '{:>20}: {:<20}\\n'.format(key, value)\n print(txt, file=f)\n\n\ndef post_init():\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR, exist_ok=True)\n\n if not os.path.exists(DATA_GENERATED_DIR):\n os.makedirs(DATA_GENERATED_DIR, exist_ok=True)\n\n\ndef load_image(filepath, rel_path=None):\n if rel_path is not None:\n return mpimg.imread(os.path.join(rel_path, filepath))\n\n return mpimg.imread(filepath)\n\n\ndef save_image(filepath, image, angle=None, debug=DEBUG):\n if debug:\n scipy.misc.toimage(update_image_with_guides(image, angle)).save(filepath)\n else:\n scipy.misc.toimage(image).save(filepath)\n\n\ndef load_images(filepaths, rel_path=None):\n images = []\n for file in filepaths:\n image = load_image(file, rel_path)\n images.append(image)\n\n return np.array(images)\n\n\ndef load_driving_log(logfile=LOG_FILE, x_cols=None, y_cols=None, size=-1, random=False):\n if x_cols is None:\n x_cols = ['center', 'left', 'right']\n\n if y_cols is None:\n y_cols = ['steering']\n\n data_frame = pd.read_csv(logfile)\n X, y = data_frame[x_cols].values.squeeze(), data_frame[y_cols].values.squeeze()\n\n if random:\n X, y = shuffle(X, y)\n\n if size > 0:\n X, y = X[:size], y[:size]\n\n return X, y\n\n\ndef save_driving_log(driving_log, X, y, cols=None):\n if cols is None:\n cols = ['center', 'steering']\n\n data = np.hstack((X[:, None], y[:, None]))\n df = pd.DataFrame(columns=cols, data=data)\n df.to_csv(driving_log, sep=',', index=False)\n\n\ndef update_image_with_guides(image, angle, pred_angle=None):\n updated_image = image.copy()\n h, w = updated_image.shape[0:2]\n cv2.putText(updated_image, str(round(angle, 2)), org=(2, 33), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=.5, color=(0, 255, 0), thickness=2)\n cv2.line(updated_image, (int(w / 2), int(h)), (int(w / 2 + angle * w / 4), int(h / 2)), (0, 255, 0),\n thickness=2)\n if pred_angle is not None:\n cv2.putText(updated_image, str(round(pred_angle, 2)), org=(2, 55), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=.5, color=(0, 0, 255), thickness=2)\n cv2.line(updated_image, (int(w / 2), int(h)), (int(w / 2 + pred_angle * w / 4), int(h / 2)), (0, 0, 255),\n thickness=2)\n\n return updated_image\n\n\ndef generate_dirname(original_dir, fileprefix=None):\n if fileprefix is None:\n fileprefix = datetime.datetime.now().strftime('%y_%m_%d_%H_%M_%S_%f')\n\n dirname = basename(original_dir)\n return \"%s_%s\" % (dirname, fileprefix)\n\n\ndef archive_state(archive_name=None):\n if archive_name is None:\n archive_name = generate_dirname(ARCHIVE_DIR)\n\n target_archive_dir = os.path.join(ARCHIVE_DIR, archive_name)\n target_data_dir = os.path.join(target_archive_dir, DATA_DIR)\n\n if not os.path.exists(target_archive_dir):\n os.makedirs(target_archive_dir, exist_ok=True)\n\n if not os.path.exists(target_data_dir):\n os.makedirs(target_data_dir, exist_ok=True)\n\n if os.path.exists(OUTPUT_DIR):\n shutil.move(OUTPUT_DIR, target_archive_dir)\n print(\"{} -> {}/\".format(OUTPUT_DIR, target_archive_dir), flush=True)\n\n if os.path.exists(DATA_GENERATED_DIR):\n shutil.move(DATA_GENERATED_DIR, target_data_dir)\n print(\"{} -> {}/\".format(DATA_GENERATED_DIR, target_archive_dir), flush=True)\n\n\ndef brightness_shift(image):\n image_hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n\n bright_increase = int(30 * np.random.uniform(-0.3, 1))\n image_hsv[:, :, 2] = image[:, :, 2] + bright_increase\n\n image = cv2.cvtColor(image_hsv, cv2.COLOR_HSV2RGB)\n return image\n\n\ndef channel_shift(image, intensity=30, channel_axis=2):\n if np.random.rand() > 0.5:\n image = np.invert(image)\n return random_channel_shift(image, intensity, channel_axis)\n\n\ndef flip(image, angle):\n if math.isclose(0.0, angle, rel_tol=1e-5):\n return np.fliplr(image), angle\n\n return np.fliplr(image), -angle\n\n\ndef random_transform(image, angle):\n image, angle = flip(image, angle)\n image = brightness_shift(image)\n image = channel_shift(image)\n return image, angle\n\n\ndef flatten_driving_log(image_paths, angles, correction, rel_in_path=DATA_DIR):\n res_images_paths = []\n res_angles = []\n\n for idx in tqdm(range(len(image_paths))):\n image_path = image_paths[idx]\n angle = angles[idx]\n\n (center, left, right) = image_path\n\n center, left, right = os.path.join(rel_in_path, center.strip()), \\\n os.path.join(rel_in_path, left.strip()), \\\n os.path.join(rel_in_path, right.strip())\n\n center_angle = angle\n left_angle = angle + correction\n right_angle = angle - correction\n\n res_images_paths.append(center)\n res_images_paths.append(left)\n res_images_paths.append(right)\n\n # if center camera image -> no correction\n res_angles.append(center_angle)\n # if left camera image -> correct steering to the right\n res_angles.append(left_angle)\n # if right camera image -> correct steering to the left\n res_angles.append(right_angle)\n\n return np.array(res_images_paths), np.array(res_angles)\n\n\ndef augment_driving_log(image_paths, angles, out_path=DATA_GENERATED_DIR):\n res_images_paths = []\n res_angles = []\n\n for idx in tqdm(range(len(image_paths))):\n image_path = image_paths[idx]\n angle = angles[idx]\n\n transformed_image, transformed_angle = \\\n random_transform(load_image(image_path), angle)\n transformed_path = os.path.join(out_path, basename(image_path))\n save_image(transformed_path, transformed_image, transformed_angle)\n res_images_paths.append(transformed_path)\n res_angles.append(transformed_angle)\n\n return np.array(res_images_paths), np.array(res_angles)\n\n\ndef calc_optimal_distribution(angles, bins_num):\n _, bins = np.histogram(angles, bins_num)\n mean = np.mean(angles)\n variance = np.var(angles)\n sigma = np.sqrt(variance)\n x = np.linspace(min(angles), max(angles), bins_num)\n dx = bins[1] - bins[0]\n scale = len(angles) * dx\n y = mlab.normpdf(x, mean, sigma) * scale\n return bins, y\n\n\ndef reduce_size_of(range_x, range_y, over_limit_num):\n data_size = len(range_x)\n target_size = data_size - over_limit_num\n selected_indices = np.random.choice(data_size, size=target_size, replace=False)\n\n return range_x[selected_indices], range_y[selected_indices]\n\n\ndef adjust_with_best_fit_line(X_input, y_input, angle_groups):\n bins, y = calc_optimal_distribution(y_input, bins_num=angle_groups)\n X_adj = y_adj = None\n for idx in tqdm(range(len(bins))):\n if idx + 1 == len(bins):\n break\n\n range_from = bins[idx]\n range_to = bins[idx + 1]\n optimal_sample_num = int(y[idx])\n\n if optimal_sample_num == 0:\n continue\n\n y_indexes = np.sort(np.where((y_input >= range_from) & (y_input < range_to))[0])\n\n range_x = X_input[y_indexes]\n range_y = y_input[y_indexes]\n data_size = len(range_y)\n assert data_size == len(y_indexes) == len(range_x) == len(range_y)\n\n # print(\"dx: \", (range_to - range_from))\n # print(\"{} ... {}, {} -> {}\".format(range_from, range_to, data_size, optimal_sample_num))\n\n if data_size == 0:\n continue\n\n missing_samples = optimal_sample_num - data_size\n # print(\"{} opt. - {} act. = {} est.\".format(optimal_sample_num, data_size, missing_samples))\n\n new_range_x = new_range_y = None\n if missing_samples < 0:\n over_limit_num = abs(missing_samples)\n # print(\"over {}\".format(over_limit_num))\n new_range_x, new_range_y = reduce_size_of(range_x, range_y, over_limit_num)\n # items reduced\n assert len(new_range_x) == len(new_range_y) == optimal_sample_num\n else:\n # print(\"ok\")\n new_range_x, new_range_y = range_x, range_y\n\n # make one default value for 1 bin range - average\n optimal_value = np.mean(new_range_y)\n # print(\"Optimal value: \", optimal_value)\n # simply extend array with optimal value\n new_range_y = np.repeat(optimal_value, len(new_range_y))\n\n assert new_range_x.shape == new_range_y.shape\n\n if X_adj is None or y_adj is None:\n X_adj, y_adj = new_range_x, new_range_y\n else:\n X_adj = np.concatenate((X_adj, new_range_x), axis=0)\n y_adj = np.concatenate((y_adj, new_range_y), axis=0)\n\n return X_adj, y_adj\n\n\ndef load_input_data(correction, angle_groups,\n init_size=-1, init_shuffle=False, logfile=LOG_FILE):\n print(\"Load data\", flush=True)\n X, y = load_driving_log(logfile=logfile, size=init_size, random=init_shuffle)\n print(\"{} loaded\".format(len(X)))\n\n print(\"Flatten data\", flush=True)\n X_flat, y_flat = flatten_driving_log(X, y, correction=correction)\n print(\"{} after flattening\".format(len(X_flat)))\n\n print(\"Augment data\", flush=True)\n X_input, y_input = augment_driving_log(X_flat, y_flat)\n print(\"{} after augmenting\".format(len(X_input)))\n\n print(\"Adjust data\", flush=True)\n X_adj, y_adj = adjust_with_best_fit_line(X_input, y_input, angle_groups)\n print(\"{} after adjustment\".format(len(X_adj)))\n\n return X_adj, y_adj\n\n\ndef model_history_available(model_history_file_path=MODEL_HISTORY_FILE):\n return os.path.exists(model_history_file_path)\n\n\ndef load_model_history(model_history_file_path=MODEL_HISTORY_FILE):\n with open(model_history_file_path, 'rb') as f:\n return pickle.load(f)\n\n\ndef save_model_history(history, model_history_file_path=MODEL_HISTORY_FILE):\n if history is None:\n return\n\n with open(model_history_file_path, 'wb') as f:\n pickle.dump(history, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n\ndef plot_images(images, angles=None, cols=None, squeeze=True, title=None, savepath=None):\n if cols is None:\n cols = len(images)\n\n rows = len(images) // cols\n\n if squeeze:\n images = images.squeeze()\n\n fig, axs = plt.subplots(rows, cols, figsize=(cols * 5, rows * 5))\n fig.subplots_adjust(top=0.8)\n\n if cols == rows == 1:\n axs = [axs]\n else:\n axs = axs.ravel()\n\n for row in range(rows):\n for col in range(cols):\n index = col + row * cols\n image = images[index]\n axs[index].axis('off')\n if len(image.shape) == 2:\n axs[index].imshow(image, cmap=\"gray\")\n else:\n axs[index].imshow(image)\n\n if angles is not None:\n axs[index].set_title(angles[index])\n if title is not None:\n plt.suptitle(title, fontsize=12)\n\n if savepath is not None:\n plt.savefig(savepath, bbox_inches='tight')\n\n plt.tight_layout()\n plt.show()\n\n\ndef plot_distribution(angles, bins_num, range=(-1, 1), title=None, savepath=None):\n plt.figure(figsize=(12, 6))\n\n plt.xlim(range)\n\n if title is not None:\n plt.title('Distribution for {}'.format(title))\n plt.hist(angles, bins=bins_num)\n if savepath is not None:\n plt.savefig(savepath, bbox_inches='tight')\n\n plt.tight_layout()\n plt.show()\n\n\ndef plot_optimal_distribution(angles, bins_num, range=(-1, 1)):\n plt.figure(1, figsize=(12, 6))\n\n plt.hist(angles, bins_num)\n\n plt.xlim(range)\n x = np.linspace(min(angles), max(angles), bins_num)\n\n _, y = calc_optimal_distribution(angles, bins_num)\n\n plt.scatter(x, y, marker='+', s=1, c='r', lw=6, zorder=10)\n\n plt.tight_layout()\n plt.show()\n\n\ndef plot_model_history(filepath, title, lbls, show=True, ylim=(0, 0.05), xlim=(0, 30)):\n if model_history_available(model_history_file_path=filepath):\n history = load_model_history(model_history_file_path=filepath)\n if show:\n plt.figure(figsize=(8, 6))\n\n min_results = []\n for lbl in lbls:\n if show:\n plt.plot(history['epoch'], history[lbl], label=lbl)\n min_value_idx = np.argmin(history[lbl])\n epoch_number = history['epoch'][min_value_idx]\n min_value = history[lbl][min_value_idx]\n min_results.append((lbl, epoch_number, min_value))\n\n if show:\n plt.title(title)\n plt.ylabel('Data')\n plt.xlabel('Epochs')\n plt.legend(loc='lower right')\n plt.ylim(ylim)\n plt.xlim(xlim)\n plt.tight_layout()\n plt.show()\n return min_results\n else:\n print(\"{} not found\".format(filepath))\n return None\n\n\ndef plot_model_history_list(histories, lbls=('loss', 'val_loss', 'mean_squared_error'), plot_func=plot_model_history,\n show=True, ylim=(0, 0.05), xlim=(0, 30)):\n aggr_results = []\n for path, name in histories:\n results = plot_func(path, name, lbls, show, ylim, xlim)\n aggr_results.append((name, results))\n return aggr_results\n\n\ndef plot_images_with_ids(x, y, image_ids, rel_path=None, grid_cols=4):\n rnd_images = load_images(x[image_ids], rel_path)\n rnd_angles = y[image_ids]\n\n rnd_mod_images = []\n rnd_mod_classes = []\n for image, clazz in zip(rnd_images, rnd_angles):\n rnd_mod_images.append(update_image_with_guides(image, clazz))\n rnd_mod_classes.append(clazz)\n\n rnd_mod_images = np.array(rnd_mod_images)\n rnd_mod_classes = np.array(rnd_mod_classes)\n\n plot_images(rnd_mod_images, rnd_mod_classes, cols=grid_cols)\n\n print(rnd_mod_images.shape)\n print(rnd_mod_classes.shape)\n\n\ndef plot_random_images(x, y, rel_path=None, size=8, grid_cols=4):\n selected_ids = np.random.choice(len(x), size=size, replace=False)\n plot_images_with_ids(x, y, selected_ids, rel_path, grid_cols)\n\n\ndef save_model_architecture(model_path, output_path='model.png'):\n from keras.utils.visualize_util import plot\n model = load_model(model_path)\n plot(model, to_file=output_path, show_shapes=True)\n\n\ndef get_top_smallest_k_history_metrics(histories, top_k_smallest=5):\n results = plot_model_history_list(histories, show=False)\n\n loss_items = np.array([[item[1][0][1], item[1][0][2]] for item in results])\n val_loss_items = np.array([[item[1][1][1], item[1][1][2]] for item in results])\n mse_items = np.array([[item[1][2][1], item[1][2][2]] for item in results])\n\n # data[np.argsort(data[:, 0])] where the 0 is the column index on which to sort\n loss_items_top_k_sorted_indexes = np.argsort(loss_items[:, 1])[:top_k_smallest]\n val_loss_items_top_k_sorted_indexes = np.argsort(val_loss_items[:, 1])[:top_k_smallest]\n mse_items_top_k_sorted_indexes = np.argsort(mse_items[:, 1])[:top_k_smallest]\n\n top_k_loss_items = [(results[idx][0], loss_items[idx].tolist()) for idx in\n loss_items_top_k_sorted_indexes]\n top_k_val_loss = [(results[idx][0], val_loss_items[idx].tolist()) for idx in\n val_loss_items_top_k_sorted_indexes]\n top_k_mse = [(results[idx][0], mse_items[idx].tolist()) for idx in mse_items_top_k_sorted_indexes]\n\n return top_k_loss_items, top_k_val_loss, top_k_mse\n\n\ndef concat_n_images(images, rows, cols, cellHeight, cellWidth, target_depth=1):\n display = np.empty((cellHeight * rows, cellWidth * cols, target_depth), dtype=np.float32)\n\n for i, j in IT.product(range(rows), range(cols)):\n # arr = cellArray[i*ncols+j].image # you may need this\n arr = images[i * cols + j] # my simplified cellArray uses this\n x, y = i * cellHeight, j * cellWidth\n display[x:x + cellHeight, y:y + cellWidth, :] = arr\n return display\n\n\ndef conv_layer_output(model, layer, image):\n depth = layer.output_shape[-1]\n image = np.expand_dims(image, axis=0)\n\n test_model = Model(input=model.input, output=layer.output)\n\n conv_features = test_model.predict(image)\n # print(\"Convolutional features shape: \", conv_features.shape)\n\n feature_list = []\n for i in range(depth):\n conv_feature = conv_features[0, :, :, i]\n conv_feature = np.expand_dims(conv_feature, axis=len(conv_feature.shape))\n feature_list.append(conv_feature)\n\n return np.array(feature_list)\n\n\ndef combine_images_vert(a, b):\n ha, wa = a.shape[:2]\n hb, wb = b.shape[:2]\n total_height = ha + hb\n max_width = np.max([wa, wb])\n new_img = np.zeros(shape=(total_height, max_width, 1), dtype=np.float32)\n\n new_img[:ha, :wa] = a\n new_img[ha:ha + hb, :wb] = b\n\n return new_img\n\n\ndef combine_images_horiz(a, b):\n ha, wa = a.shape[:2]\n hb, wb = b.shape[:2]\n max_height = np.max([ha, hb])\n total_width = wa + wb\n new_img = np.zeros(shape=(max_height, total_width, 1), dtype=np.float32)\n\n new_img[:ha, :wa] = a\n new_img[:hb, wa:wa + wb] = b\n\n return new_img\n\n\ndef put_text(image, text, color=(255, 255, 255), make_border=True):\n image = image.copy()\n if np.max(image) <= 1:\n image = ((image + 0.5) * 255).round().astype(np.uint8)\n\n image = cv2.putText(img=image, text=text, org=(2, 10),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=.3, color=color, thickness=1)\n\n if make_border:\n image = cv2.copyMakeBorder(image.squeeze(), 1, 1, 1, 1, cv2.BORDER_CONSTANT,\n value=(255, 255, 255))\n image = np.expand_dims(image, axis=len(image.shape))\n return image\n\n\ndef save_image_feature_map(model, conv_layers, test_image, grid_setup, filepath):\n outdir = dirname(filepath)\n\n if not os.path.exists(outdir):\n os.makedirs(outdir, exist_ok=True)\n\n feature_map_images = []\n for layer in conv_layers:\n layer_height, layer_width, layer_depth = layer.output_shape[1:]\n\n cell_height, cell_width = layer_height, layer_width\n rows, cols = grid_setup[layer_depth]\n\n normalized_test_image = test_image.copy()\n layer_output_image = conv_layer_output(model, layer, normalized_test_image)\n\n concat_images = concat_n_images(layer_output_image, rows, cols, cell_height, cell_width, 1)\n title = layer.name\n concat_images = put_text(concat_images, title, make_border=True)\n\n feature_map_images.append(concat_images)\n\n merged_image = functools.reduce(combine_images_vert, feature_map_images)\n\n plt.figure(figsize=(16, 10))\n plt.axis('off')\n plt.imshow(merged_image.squeeze(), cmap=\"gray\")\n plt.tight_layout()\n plt.savefig(filepath, bbox_inches='tight')\n\n\ndef save_all_images_feature_map(image_dir, model_path, rel_path=None, target_dir=DATA_GENERATED_DIR):\n grid_setup = {\n 3: (1, 3),\n 24: (4, 6),\n 36: (6, 6),\n 48: (6, 8),\n 64: (8, 8)\n }\n\n X = np.array(sorted([path for path in glob.iglob(os.path.join(image_dir, \"*.jpg\"), recursive=True)]))\n\n model = load_model(model_path)\n model.summary()\n\n # we are interesting in first 5 layers, since next layers are more abstract and small to view anything\n # ____________________________________________________________________________________________________\n # Layer (type) Output Shape Param # Connected to\n # ====================================================================================================\n # -> lambda_1 (Lambda) (None, 160, 320, 3) 0 lambda_input_1[0][0]\n # ____________________________________________________________________________________________________\n # -> cropping2d_1 (Cropping2D) (None, 66, 320, 3) 0 lambda_1[0][0]\n # ____________________________________________________________________________________________________\n # -> convolution2d_1 (Convolution2D) (None, 31, 158, 24) 1824 cropping2d_1[0][0]\n # ____________________________________________________________________________________________________\n # -> convolution2d_2 (Convolution2D) (None, 14, 77, 36) 21636 convolution2d_1[0][0]\n # ____________________________________________________________________________________________________\n # -> convolution2d_3 (Convolution2D) (None, 5, 37, 48) 43248 convolution2d_2[0][0]\n # ____________________________________________________________________________________________________\n # convolution2d_4 (Convolution2D) (None, 3, 35, 64) 27712 convolution2d_3[0][0]\n # ____________________________________________________________________________________________________\n # convolution2d_5 (Convolution2D) (None, 1, 33, 64) 36928 convolution2d_4[0][0]\n # ____________________________________________________________________________________________________\n # dropout_1 (Dropout) (None, 1, 33, 64) 0 convolution2d_5[0][0]\n # ____________________________________________________________________________________________________\n # flatten_1 (Flatten) (None, 2112) 0 dropout_1[0][0]\n # ____________________________________________________________________________________________________\n # dense_1 (Dense) (None, 100) 211300 flatten_1[0][0]\n # ____________________________________________________________________________________________________\n # dense_2 (Dense) (None, 50) 5050 dense_1[0][0]\n # ____________________________________________________________________________________________________\n # dense_3 (Dense) (None, 10) 510 dense_2[0][0]\n # ____________________________________________________________________________________________________\n # dense_4 (Dense) (None, 1) 11 dense_3[0][0]\n # ====================================================================================================\n selected_layers = model.layers[0:-8]\n\n for original_file in tqdm(X):\n test_image = load_image(original_file, rel_path=rel_path)\n test_image_file_path = os.path.join(target_dir, basename(original_file))\n save_image_feature_map(model, selected_layers, test_image, grid_setup, test_image_file_path)\n\n\nif __name__ == \"__main__\":\n archive_state()\n post_init()\n\n IMAGE_DIR = \"/Users/volkodav/self_driving_car_proj/proj/CarND-Behavioral-Cloning-P3/video\"\n MODEL_PATH = \"/Users/volkodav/self_driving_car_proj/proj/work9/relu_nogeneration_rnd_zoom_brightness_channel_shift_cor010/output/model-09-0.01.h5\"\n save_all_images_feature_map(IMAGE_DIR, MODEL_PATH, target_dir=\"output_featuremap\")\n # work9/relu_nogeneration_rnd_zoom_brightness_channel_shift_cor010/output/model-09-0.01.h5\n\n save_model_architecture(MODEL_PATH)\n","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":23978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"619115911","text":"from rest_framework import serializers\nfrom rest_framework.fields import empty\n\n\nclass BaseModelSerializer(serializers.ModelSerializer):\n created_by = serializers.StringRelatedField(read_only=True)\n \n created_at = serializers.DateTimeField(read_only=True)\n\n updated_at = serializers.DateTimeField(read_only=True)\n \n def create(self, validated_data):\n request = self.context.get('request')\n\n if request:\n validated_data['created_by'] = request.user\n\n return super().create(validated_data)\n\n def __init__(self, instance=None, data=empty, **kwargs):\n request = kwargs.get('request')\n\n print('request is ', request)\n\n remove_fields = kwargs.pop('remove_fields', [])\n\n super().__init__(instance, data, **kwargs)\n\n if not isinstance(remove_fields, list):\n remove_fields = [remove_fields]\n\n for field_name in remove_fields:\n self.fields.pop(field_name)\n\n class Meta:\n exclude = ('deleted', 'deleted_at')\n","sub_path":"fwbasemodel/rest/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"148025807","text":"from tkinter import *\nimport time\nimport socket\n\nIP_ADRESS =\"localhost\"\nPORT = 5005\nBUFFER_SIZE = 1024\n\nclass Afficheur:\n instance = None\n def __init__(self,ip,port):\n if Afficheur.instance is not None:\n del(self)\n return\n else:\n Afficheur.instance= self\n self.ip = ip\n self.port = port\n self.socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n self.socket.connect((ip,port))\n self.root = Tk()\n self.time1 = ''\n self.clock = Label(self.root, font=('Arial', 20), bg='grey')\n self.clock.pack(fill=BOTH, expand=1)\n self.input = Entry(self.root,text=ip)\n self.input.pack()\n self.but = Button(self.root,text=\"Connexion\", command = self.setConn)\n self.but.pack()\n \n def tick(self):\n self.time2 = \"\"\n self.time2 = self.get_heure()\n if len(self.time2) > 8:\n self.time2 = self.time2[:8]\n if self.time2 != self.time1:\n self.time1= self.time2\n self.clock.config(text=self.time2)\n self.root.after(500,self.tick)\n\n def get_heure(self):\n data = self.socket.recv(BUFFER_SIZE)\n if len(data) == 0:\n data = \"error\"\n return data\n\n def setConn(self):\n self.ip = self.input.get()\n #self.socket.close()\n self.socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n self.socket.connect((self.ip,self.port))\n \n\na = Afficheur(IP_ADRESS,PORT)\na.tick()\n \n","sub_path":"afficherServeur.py","file_name":"afficherServeur.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"171363612","text":"import pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nimport os\nimport pickle\nimport time\n\nsave = True\nload = True\nnetwork_file = 'saves/network_conv.pt'\ngraph_file = 'saves/graph_conv.pkl'\nbatch_size = 1000\nepochs = 10\ncycles = 1\nseq_len = 200\n\ndevice = torch.device('cuda')\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 500)\ndf = pd.read_feather('merged.feather')\npred_hours = 1 - 1\n# -8302 - seq_len - pred_hours\n# -9302 - seq_len - pred_hours\ntrain = df.iloc[-50000:-372, 1:]\ntest = df.iloc[-372 - seq_len - pred_hours:, 1:]\n\n\none = torch.ones(1).to(device)\nzero = torch.zeros(1).to(device)\ndef profit(pred, pip, total=True):\n ned = pred.clone()\n ned[:, 0], ned[:, 1], ned[:, 2] = torch.where(pred[:, 0] >= 0.5, one, zero), torch.where(pred[:, 1] >= 0.5, -one, zero), torch.where(pred[:, 2] >= 0.5, zero, zero)\n ap = ned*torch.from_numpy(pip).reshape(-1, 1).cuda()\n ap = np.sum(ap.cpu().numpy(), axis=1).reshape(-1)*10000\n if total:\n return np.sum(ap)\n else:\n return ap\n\nclass Forex(Dataset):\n def __init__(self, df, seq_len=100, pred_hours = 1):\n super().__init__()\n self.pred_hours = pred_hours\n self.seq_len = seq_len\n self.df = df.values\n for i in np.argwhere(self.df == 0):\n a, b = i\n self.df[a, b] = self.df[a - 1, b]\n\n def __len__(self):\n return len(self.df) - self.seq_len - 2 - self.pred_hours\n\n def __getitem__(self, item):\n std = torch.Tensor([ 1.1960e-01, 1.0929e-01, 1.1639e-01, 1.1580e-01, 2.1910e+03,\n 8.8094e-04, 8.3699e-04, 7.7023e-04, 8.6849e-04, 2.1474e+03,\n 1.0025e-01, 9.1172e-02, 9.3719e-02, 9.9312e-02, 3.0291e+03,\n 9.8328e-04, 9.1960e-04, 9.6045e-04, 9.8505e-04, 2.0675e+03,\n 7.8079e-04, 7.2146e-04, 7.0829e-04, 7.5794e-04, 2.2338e+03,\n 1.3214e-01, 1.2168e-01, 1.3328e-01, 1.3046e-01, 4.9807e+03,\n 1.4860e-01, 1.3303e-01, 1.4739e-01, 1.4396e-01, 2.7807e+03,\n 1.0015e-03, 8.4915e-04, 9.3257e-04, 9.7762e-04, 2.0885e+03,\n 8.0313e-04, 7.6616e-04, 7.1742e-04, 7.7969e-04, 1.5349e+03,\n 1.4785e-03, 1.3635e-03, 1.8143e-03, 1.4648e-03, 2.1705e+03,\n 1.1488e-03, 1.0118e-03, 1.0972e-03, 1.1378e-03, 2.0229e+03,\n 1.1823e-01, 1.0630e-01, 1.1848e-01, 1.1411e-01, 3.2741e+03,\n 1.1122e-03, 1.1408e-03, 1.0701e-03, 1.0888e-03, 3.8207e+03])\n raw = self.df[item:item + self.seq_len]\n a_diff = self.df[item + self.seq_len + self.pred_hours] - self.df[item + self.seq_len - 1]\n long = np.where(a_diff >= 0.0003, 1, 0)\n short = np.where(a_diff <= -0.0003, 1, 0)\n nothing = np.where(np.logical_and(0.0003 > a_diff, a_diff > -0.0003), 1, 0)\n # pips = a_diff * self.df[item + self.seq_len - 1]\n norm = raw[1:] - raw[:-1]\n tposed = np.concatenate((np.zeros(norm.shape[1])[None], norm), axis=0) / std\n return tposed, np.argmax(np.concatenate((long[-2].reshape(1,1), short[-2].reshape(1,1), nothing[-2].reshape(1,1)), axis=1), axis=1), a_diff[-2]\n\n\nclass ForexLSTM(nn.Module):\n def __init__(self):\n super().__init__()\n # self.up = nn.Linear(65, 100)\n # self.dropout = nn.Dropout(p=0.2)\n self.conv = nn.Conv1d(200, 400, 5, 5)\n self.linear = nn.Linear(5200, 3)\n torch.nn.init.xavier_normal_(self.linear.weight)\n self.softmax = torch.nn.Softmax(dim=1)\n\n def init_hidden(self):\n init = torch.zeros(2, self.batch_size, 13).to(device).double()\n self.hidden = (init, init)\n\n def forward(self, input):\n self.batch_size = input.shape[0]\n out = self.linear(self.conv(input).view(self.batch_size, -1))\n out = self.softmax(out)\n return out\n\n\nnet = ForexLSTM().double().to(device)\nif os.path.exists(network_file) & load:\n net.load_state_dict(torch.load(network_file))\n # net.lstm.flatten_parameters()\n print('loaded net')\n\ntrain_dataset = Forex(train, seq_len=seq_len, pred_hours=pred_hours)\ntrain_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\ntest_dataset = Forex(test, seq_len=seq_len, pred_hours=pred_hours)\ntest_loader = DataLoader(dataset=test_dataset, batch_size=10000, shuffle=False)\n\ncriterion = torch.nn.CrossEntropyLoss()\n\ntloss = []\neloss = []\ntacc = []\nacc = []\n\nfor cycle in range(cycles):\n print(f'{cycle+1}/{cycles}')\n optimizer = torch.optim.Adam(net.parameters(), lr=0.1)\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, epochs)\n start = time.time()\n for epoch in range(epochs):\n peloss = []\n batch_profit = []\n for i, batch in enumerate(train_loader):\n x, y, a = batch\n x, y, a = x.to(device), y.to(device), a.numpy()\n pred = net(x)\n loss = criterion(pred.reshape(-1, 3).float(), y.reshape(-1).long())\n # loss = criterion(pred[:,0].reshape(-1, 1), y_long.reshape(-1, 1).double()) + criterion1(pred[:,1].reshape(-1, 1), y_short.reshape(-1, 1).double())\n optimizer.zero_grad()\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(net.parameters(), 10)\n optimizer.step()\n with torch.no_grad():\n peloss.append(loss.item())\n batch_profit.append(profit(pred,a))\n with torch.no_grad():\n pred, a = pred[-370:], a[-370:]\n tacc.append(profit(pred, a))\n for set in test_loader:\n x_t, y_t, c = set\n x_t, y_t, c = x_t.to(device), y_t.to(device), c.numpy()\n pred_t = net(x_t)\n loss_t = criterion(pred_t.reshape(-1, 3), y_t.reshape(-1).long())\n tloss.append(loss_t.item())\n acc.append(profit(pred_t, c))\n dict = {'epoch': [f'{epoch+1}/{epochs}'],\n 'Test profit': [f'{round(profit(pred_t, c), 2)} pips'],\n 'Train profit': [f'{round(profit(pred, a), 2)} pips'],\n 'Test loss': [round(loss_t.item(), 4)],\n 'Train loss': [round(np.mean(peloss), 4)],\n 'Learning rate': [param['lr'] for param in optimizer.param_groups],\n 'Batch profit': [np.where(np.array(batch_profit) > 0, 1, 0).mean()],\n 'ETA': [f'{round(time.time()-start, 2)}/{round((time.time()-start)/(epoch+1)*epochs, 2)}']}\n q = pd.DataFrame.from_dict(dict)\n print('\\n', q.to_string(index=False, justify='center'))\n scheduler.step()\n eloss.append(np.mean(peloss))\n\nif os.path.exists(graph_file):\n with open(graph_file, 'rb') as f:\n o_eloss, o_tloss, o_tacc, o_acc = pickle.load(f)\n o_eloss = o_eloss + eloss\n o_tloss = o_tloss + tloss\n o_tacc = o_tacc + tacc\n o_acc = o_acc + acc\nelse:\n o_eloss, o_tloss, o_tacc, o_acc = eloss, tloss, tacc, acc\n\nplt.show()\nplt.plot(o_eloss, color='blue')\nplt.plot(o_tloss, color='orange')\nplt.title('loss per epoch')\nplt.show()\nplt.plot(o_acc, color='orange')\nplt.plot(o_tacc, color='blue')\nplt.title('profit per epoch')\nplt.show()\nplt.plot(o_acc, color='orange')\nplt.show()\n\nap = profit(pred_t, c, False)\nplt.plot([np.sum(ap[:i]) for i in np.arange(len(ap))+1])\nplt.title('simulated profit')\nplt.show()\n\n# print('pips:', 10000 * np.sum(c.reshape(-1) * np.where(pre > 0.5, 1, 0)))\n\nif save:\n torch.save(net.state_dict(), network_file)\n with open(graph_file, 'wb') as f:\n graph = (o_eloss, o_tloss, o_tacc, o_acc)\n pickle.dump(graph, f)\n print('saved')\n","sub_path":"network_conv.py","file_name":"network_conv.py","file_ext":"py","file_size_in_byte":7819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"185206393","text":"this_file = __file__\n\n\n# The constructor and selectors of the mobile\n\ndef mobile(left, right):\n \"\"\"Construct a mobile from a left arm and a right arm.\"\"\"\n assert is_arm(left), \"left must be a arm\"\n assert is_arm(right), \"right must be a arm\"\n return ['mobile', left, right]\n\n\ndef is_mobile(m):\n \"\"\"Return whether m is a mobile.\"\"\"\n return type(m) == list and len(m) == 3 and m[0] == 'mobile'\n\n\ndef left(m):\n \"\"\"Select the left arm of a mobile.\"\"\"\n assert is_mobile(m), \"must call left on a mobile\"\n return m[1]\n\n\ndef right(m):\n \"\"\"Select the right arm of a mobile.\"\"\"\n assert is_mobile(m), \"must call right on a mobile\"\n return m[2]\n\n\n# The constructor and selectors of the arm\n\ndef arm(length, mobile_or_planet):\n \"\"\"Construct a arm: a length of rod with a mobile or planet at the end.\"\"\"\n assert is_mobile(mobile_or_planet) or is_planet(mobile_or_planet)\n return ['arm', length, mobile_or_planet]\n\n\ndef is_arm(s):\n \"\"\"Return whether s is a arm.\"\"\"\n return type(s) == list and len(s) == 3 and s[0] == 'arm'\n\n\ndef length(s):\n \"\"\"Select the length of a arm.\"\"\"\n assert is_arm(s), \"must call length on a arm\"\n return s[1]\n\n\ndef end(s):\n \"\"\"Select the mobile or planet hanging at the end of a arm.\"\"\"\n assert is_arm(s), \"must call end on a arm\"\n return s[2]\n\n\n# The constructor and selectors of the planet\n\ndef planet(size):\n \"\"\"Construct a planet of some size.\"\"\"\n assert size > 0\n return ['planet', size]\n\n\ndef size(w):\n \"\"\"Select the size of a planet.\"\"\"\n assert is_planet(w), 'must call size on a planet'\n return w[1]\n\n\ndef is_planet(w):\n \"\"\"Whether w is a planet.\"\"\"\n return type(w) == list and len(w) == 2 and w[0] == 'planet'\n\n\n# examples and usage\n\ndef examples():\n t = mobile(arm(1, planet(2)),\n arm(2, planet(1)))\n u = mobile(arm(5, planet(1)),\n arm(1, mobile(arm(2, planet(3)),\n arm(3, planet(2)))))\n v = mobile(arm(4, t), arm(2, u))\n return (t, u, v)\n\n\ndef total_weight(m):\n \"\"\"Return the total weight of m, a planet or mobile.\n\n >>> t, u, v = examples()\n >>> total_weight(t)\n 3\n >>> total_weight(u)\n 6\n >>> total_weight(v)\n 9\n \"\"\"\n if is_planet(m):\n return size(m)\n else:\n assert is_mobile(m), \"must get total weight of a mobile or a planet\"\n return total_weight(end(left(m))) + total_weight(end(right(m)))\n\n\ndef balanced(m):\n \"\"\"Return whether m is balanced.\n\n >>> t, u, v = examples()\n >>> balanced(t)\n True\n >>> balanced(v)\n True\n >>> w = mobile(arm(3, t), arm(2, u))\n >>> balanced(w)\n False\n >>> balanced(mobile(arm(1, v), arm(1, w)))\n False\n >>> balanced(mobile(arm(1, w), arm(1, v)))\n False\n \"\"\"\n if is_planet(m):\n return True\n elif m[1][1] * total_weight(m[1][2]) == m[2][1] * total_weight(m[2][2]):\n bl = balanced(m[1][2]) if is_mobile(m[1][2]) else 0\n br = balanced(m[2][2]) if is_mobile(m[2][2]) else 0\n if is_mobile(m[1][2]) and is_mobile(m[2][2]):\n return True if bl and br else False\n elif is_mobile(m[1][2]) or is_mobile(m[2][2]):\n return bl if is_mobile(m[1][2]) else br\n else:\n return True\n else:\n return False\n\n\ndef totals_tree(m):\n \"\"\"Return a tree representing the mobile with its total weight at the root.\n\n >>> t, u, v = examples()\n >>> print_tree(totals_tree(t))\n 3\n 2\n 1\n >>> print_tree(totals_tree(u))\n 6\n 1\n 5\n 3\n 2\n >>> print_tree(totals_tree(v))\n 9\n 3\n 2\n 1\n 6\n 1\n 5\n 3\n 2\n \"\"\"\n if is_planet(m):\n return tree(total_weight(m))\n else:\n lis = [totals_tree(m[1][2]), totals_tree(m[2][2])]\n return tree(total_weight(m), lis)\n\n\ndef preorder(t):\n \"\"\"Return a list of the entries in this tree in the order that they\n would be visited by a preorder traversal (see problem description).\n\n >>> numbers = tree(1, [tree(2), tree(8, [tree(2), tree(5)]), tree(9, [tree(3)])])\n >>> preorder(numbers)\n [1, 2, 8, 2, 5, 9, 3]\n >>> preorder(tree(2, [tree(4, [tree(6)])]))\n [2, 4, 6]\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n lis = []\n\n def so(tr):\n lis.append(label(tr))\n for i in branches(tr):\n so(i)\n\n so(t)\n return lis\n\n\ndef has_path(t, word):\n \"\"\"Return whether there is a path in a tree where the entries along the path\n spell out a particular word.\n\n >>> greetings = tree('h', [tree('i'),\n ... tree('e', [tree('l', [tree('l', [tree('o')])]),\n ... tree('y')])])\n >>> print_tree(greetings)\n h\n i\n e\n l\n l\n o\n y\n >>> has_path(greetings, 'h')\n True\n >>> has_path(greetings, 'i')\n False\n >>> has_path(greetings, 'hi')\n True\n >>> has_path(greetings, 'hello')\n True\n >>> has_path(greetings, 'hey')\n True\n >>> has_path(greetings, 'bye')\n False\n \"\"\"\n assert len(word) > 0, 'no path for empty word.'\n \"*** YOUR CODE HERE ***\"\n def preorderr(t):\n liss = []\n\n def so(tr):\n liss.append(label(tr))\n for i in branches(tr):\n so(i)\n\n so(t)\n return liss\n if word[0] != label(t):\n return False\n else:\n lis = preorderr(t)\n for i in range(0, len(word)):\n temp = 0\n for j in range(temp, len(lis)):\n if word[i] == lis[j] and i == len(word) - 1:\n return True\n elif word[i] == lis[j]:\n temp = j\n return False\n\n\n\ndef insert_items(lst, entry, elem):\n \"\"\"\n >>> test_lst = [1, 5, 8, 5, 2, 3]\n >>> new_lst = insert_items(test_lst, 5, 7)\n >>> new_lst\n [1, 5, 7, 8, 5, 7, 2, 3]\n >>> large_lst = [1, 4, 8]\n >>> large_lst2 = insert_items(large_lst, 4, 4)\n >>> large_lst2\n [1, 4, 4, 8]\n >>> large_lst3 = insert_items(large_lst2, 4, 6)\n >>> large_lst3\n [1, 4, 6, 4, 6, 8]\n >>> large_lst3 is large_lst\n True\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n temp = -2\n for i in range(0, lst.count(entry)):\n lst.insert(lst.index(entry, temp + 2) + 1, elem)\n temp = lst.index(entry, temp + 2)\n\n return lst\n\n\n## Tree Methods ##\n\n# Treat all the following code as being behind an abstraction layer, you shouldn't need to look at it!\n\ndef tree(label, branches=[]):\n \"\"\"Construct a tree with the given label value and a list of branches.\"\"\"\n if change_abstraction.changed:\n for branch in branches:\n assert is_tree(branch), 'branches must be trees'\n return {'label': label, 'branches': list(branches)}\n else:\n for branch in branches:\n assert is_tree(branch), 'branches must be trees'\n return [label] + list(branches)\n\n\ndef label(tree):\n \"\"\"Return the label value of a tree.\"\"\"\n if change_abstraction.changed:\n return tree['label']\n else:\n return tree[0]\n\n\ndef branches(tree):\n \"\"\"Return the list of branches of the given tree.\"\"\"\n if change_abstraction.changed:\n return tree['branches']\n else:\n return tree[1:]\n\n\ndef is_tree(tree):\n \"\"\"Returns True if the given tree is a tree, and False otherwise.\"\"\"\n if change_abstraction.changed:\n if type(tree) != dict or len(tree) != 2:\n return False\n for branch in branches(tree):\n if not is_tree(branch):\n return False\n return True\n else:\n if type(tree) != list or len(tree) < 1:\n return False\n for branch in branches(tree):\n if not is_tree(branch):\n return False\n return True\n\n\ndef is_leaf(tree):\n \"\"\"Returns True if the given tree's list of branches is empty, and False\n otherwise.\n \"\"\"\n return not branches(tree)\n\n\ndef change_abstraction(change):\n change_abstraction.changed = change\n\n\nchange_abstraction.changed = False\n\n\ndef print_tree(t, indent=0):\n \"\"\"Print a representation of this tree in which each node is\n indented by two spaces times its depth from the root.\n\n >>> print_tree(tree(1))\n 1\n >>> print_tree(tree(1, [tree(2)]))\n 1\n 2\n >>> numbers = tree(1, [tree(2), tree(3, [tree(4), tree(5)]), tree(6, [tree(7)])])\n >>> print_tree(numbers)\n 1\n 2\n 3\n 4\n 5\n 6\n 7\n \"\"\"\n print(' ' * indent + str(label(t)))\n for b in branches(t):\n print_tree(b, indent + 1)\n\n\ndef copy_tree(t):\n \"\"\"Returns a copy of t. Only for testing purposes.\n\n >>> t = tree(5)\n >>> copy = copy_tree(t)\n >>> t = tree(6)\n >>> print_tree(copy)\n 5\n \"\"\"\n return tree(label(t), [copy_tree(b) for b in branches(t)])\n","sub_path":"Lab/lab04/code/lab04.py","file_name":"lab04.py","file_ext":"py","file_size_in_byte":8818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"192061684","text":"import pathlib\nimport random\nimport tensorflow as tf\nfrom lib.models import TopModel\n\n\nimg_size = 224\ndata_augmentation = tf.keras.Sequential([\n tf.keras.layers.experimental.preprocessing.RandomFlip(\"horizontal_and_vertical\"),\n tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),\n ])\n\n\ndef preprocess_image(image):\n image = tf.image.decode_jpeg(image, channels=3)\n image = tf.image.resize(image, [img_size, img_size])\n return image\n\n\ndef load_and_preprocess_from_path_label(path, label):\n return load_and_preprocess_image(path), label\n\n\ndef load_and_preprocess_image(path):\n image = tf.io.read_file(path)\n return preprocess_image(image)\n\ndata_root = pathlib.Path('data/train')\nall_image_paths = list(data_root.glob('*/*'))\nall_image_paths = [str(path) for path in all_image_paths]\nrandom.shuffle(all_image_paths)\nimage_count = len(all_image_paths)\n\ntrain_image_paths = all_image_paths[:20000]\ntest_image_paths = all_image_paths[20000:]\nlabel_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())\nlabel_to_index = dict((name, index) for index, name in enumerate(label_names))\nall_image_labels = [label_to_index[pathlib.Path(path).parent.name]\n for path in all_image_paths]\ntrain_image_labels = all_image_labels[:20000]\ntest_image_labels = all_image_labels[20000:]\n\n\ntrain_ds = tf.data.Dataset.from_tensor_slices((train_image_paths, train_image_labels))\ntrain_image_label_ds = train_ds.map(load_and_preprocess_from_path_label)\nprint(train_image_label_ds)\n\ntest_ds = tf.data.Dataset.from_tensor_slices((test_image_paths, test_image_labels))\ntest_image_label_ds = test_ds.map(load_and_preprocess_from_path_label)\nprint(test_image_label_ds)\n\nBATCH_SIZE = 32\n\ntrain_ds = train_image_label_ds.shuffle(buffer_size=20000)\ntrain_ds = train_ds.repeat()\ntrain_ds = train_ds.batch(BATCH_SIZE)\ntrain_ds = train_ds.prefetch(buffer_size=128)\ntrain_ds = train_ds.map(lambda x, y: (data_augmentation(x, training=True), y))\nprint(train_ds)\n\ntest_ds = test_image_label_ds.shuffle(buffer_size=image_count-20000)\n# test_ds = test_ds.repeat()\ntest_ds = test_ds.batch(BATCH_SIZE)\n# test_ds = test_ds.prefetch(buffer_size=128)\ntest_ds = test_ds.map(lambda x, y: (data_augmentation(x, training=True), y))\nprint(test_ds)\n\nmodel = TopModel('resnet101', len(label_names), train_ds, test_ds, img_size)\nmodel.train()\nmodel.submit()\n","sub_path":"face_emotion_recognition/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"600731484","text":"import numpy as np\nimport matplotlib.pyplot as plt\n#import matplotlib.colors as colors\n\n#######################LL############################################\ndata_LL = np.loadtxt(\"LL_p_T_c\")\neLL=[]\nvLL=[]\nPLL=[]\neLL=data_LL[:,0]\nvLL=data_LL[:,1]\nPLL=data_LL[:,3]\n\ncol_LL = int(np.sqrt(len(eLL)))\n\n\np_LL = np.zeros((col_LL,col_LL)) \ne_LL = np.zeros((col_LL,col_LL))\nv_LL = np.zeros((col_LL,col_LL))\n\n\ne_LL = np.reshape(eLL,(col_LL,col_LL))\nv_LL = np.reshape(vLL,(col_LL,col_LL))\np_LL = np.reshape(PLL,(col_LL,col_LL))\n#p_LL = np.reshape(PLL,(col_LL,col_LL))\n###############################LH####################################\ndata_LH = np.loadtxt(\"LH_p_T_c\")\neLH=[]\nvLH=[]\nPLH=[]\neLH=data_LH[:,0]\nvLH=data_LH[:,1]\nPLH=data_LH[:,3]\n#\ncol_LH = int(np.sqrt(len(eLH)))\n#\n#\nP_LH = np.zeros((col_LH,col_LH)) \ne_LH = np.zeros((col_LH,col_LH))\nv_LH = np.zeros((col_LH,col_LH))\n#\ne_LH = np.reshape(eLH,(col_LH,col_LH))\nv_LH = np.reshape(vLH,(col_LH,col_LH))\n#P_LH = np.reshape(PLH,(col_LH,col_LH))\np_LH = np.reshape(PLH,(col_LH,col_LH))\n###############################HT#####################################\ndata_HT = np.loadtxt(\"HT_p_T_c\")\neHT=[]\nvHT=[]\nPHT=[]\neHT=data_HT[:,0]\nvHT=data_HT[:,1]\nPHT=data_HT[:,3]\n\ncol_HT = int(np.sqrt(len(eHT)))\n\n\nP_HT = np.zeros((col_HT,col_HT)) \ne_HT = np.zeros((col_HT,col_HT))\nv_HT = np.zeros((col_HT,col_HT))\n\n\ne_HT = np.reshape(eHT,(col_HT,col_HT))\nv_HT = np.reshape(vHT,(col_HT,col_HT))\np_HT = np.reshape(PHT,(col_HT,col_HT))\n########################R###########################################\ndata_R = np.loadtxt(\"R_p_T_c\")\neR=[]\nvR=[]\nPR=[]\neR=data_R[:,0]\nvR=data_R[:,1]\nPR=data_R[:,3]\n\ncol_R = int(np.sqrt(len(eR)))\n\n\nP_R = np.zeros((col_R,col_R)) \ne_R = np.zeros((col_R,col_R))\nv_R = np.zeros((col_R,col_R))\n\n\ne_R = np.reshape(eR,(col_R,col_R))\nv_R = np.reshape(vR,(col_R,col_R))\np_R = np.reshape(PR,(col_R,col_R))\n##########################two-phase#########################################\n#data_TP = np.loadtxt(\"TP2_error-P-T-x.txt\")\ndata_TP = np.loadtxt(\"TP2_T-x-vV-p-u-v.txt\")\neTP=[]\nvTP=[]\nPTP=[]\neTP=data_TP[:,4]\nvTP=data_TP[:,5]\nPTP=data_TP[:,0]\n\n\ncol_TP = int(np.sqrt(len(eTP)))\np_TP = np.zeros((col_TP,col_TP)) \ne_TP = np.zeros((col_TP,col_TP))\nv_TP = np.zeros((col_TP,col_TP))\ne_TP = np.reshape(eTP,(col_TP,col_TP))\nv_TP = np.reshape(vTP,(col_TP,col_TP))\np_TP = np.reshape(PTP,(col_TP,col_TP))\n\n#%%\n#norm=colors.PowerNorm(10)\nplt.figure(figsize=(8,4))\n#\nplt.pcolormesh(v_R, e_R/1e3, p_R, cmap='jet',shading='gouraud')\nplt.clim(200,800)\nplt.pcolormesh(v_LL, e_LL/1e3, p_LL, cmap='jet',shading='gouraud')\nplt.clim(200,800)\nplt.pcolormesh(v_LH, e_LH/1e3, p_LH, cmap='jet',shading='gouraud')\nplt.clim(200,800)\nplt.pcolormesh(v_TP, e_TP/1e3, p_TP, cmap='jet',shading='gourand')\nplt.clim(200,800)\nplt.pcolormesh(v_HT, e_HT/1e3, p_HT, cmap='jet',shading='gouraud')\nplt.clim(200,800)\n\n\n####\nax = plt.gca() \n#ax.set_xlim([0.8e-3, 1.7e-1])\n#ax.set_ylim([-4.5e2, 1e2])\n#ax.set_xlim([1.9e-3, 2.2e-3])\n#ax.set_ylim([-2.2e2, -1.8e2])\nax.tick_params(labelsize=5)\n#plt.clim(1e-5,1e-1)\n#plt.clim(1e-10,1e-1)\n#plt.title('Pressure Reletive Errors',fontsize=17)\nplt.ylabel('Internal Energy, e $[kJkg^{-1}]$',fontsize=15,position=(0,0.5),rotation = \"vertical\")\nplt.xlabel('Specific Volume, v $[m^{-3}kg]$',fontsize=15,rotation = \"horizontal\")\nplt.xticks(size = 12)\nplt.yticks(size = 12)\nax.set_xscale('log')\nplt.grid(True)\nplt.colorbar(fraction=0.046,pad=0.01)\nplt.text(3.3e-1,550, ' K', fontsize=11,fontweight='bold')\n\nplt.tight_layout()\n##,format='%.0e'\nplt.savefig(\"temperature.pdf\")\nplt.show()","sub_path":"CO2FoamSchemeV1/CO2/program/Tmap.py","file_name":"Tmap.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"190679379","text":"import SegmentationFunctions.SegmentationFunction as base\n\nimport numpy as np\nimport cv2\n\nfrom math import floor\n\nclass BarcodeSegmentation(base.SegmentationFunction):\n \"\"\"Class implementing barcode segmentation using python\"\"\"\n # https://hackernoon.com/barcode-image-segmentation-a36cdce69f03\n\n def __init__(self, input_image):\n return super().__init__(input_image)\n\n def get_result(self, parameters_vector):\n\n # functions def\n\n morphologyEx = cv2.morphologyEx\n\n # params def\n\n #lower_threshold = 10\n #upper_threshold = 255\n\n #blackhat_kernel_size = 3\n #morphology_kernel_size = 5\n\n #objects_to_remove_height = 21\n #objects_to_remove_width = 35\n\n threshhold = floor(parameters_vector[0])\n output_value = 255\n\n blackhat_kernel_size = floor(parameters_vector[1])\n morphology_kernel_size = floor(parameters_vector[2])\n\n objects_to_remove_height = floor(parameters_vector[3])\n objects_to_remove_width = floor(parameters_vector[4])\n \n # image loading\n im = self._input_image\n\n #blackhat\n kernel = np.ones((1, blackhat_kernel_size), np.uint8)\n im = morphologyEx(im, cv2.MORPH_BLACKHAT, kernel, anchor=(1, 0))\n\n #sogliatura\n thresh, im = cv2.threshold(im, threshhold, output_value, cv2.THRESH_BINARY)\n\n #operazioni morfologiche\n kernel = np.ones((1, morphology_kernel_size), np.uint8)\n im = morphologyEx(im, cv2.MORPH_DILATE, kernel, anchor=(2, 0), iterations=2) #dilatazione\n im = morphologyEx(im, cv2.MORPH_CLOSE, kernel, anchor=(2, 0), iterations=2) #chiusura\n\n kernel = np.ones((objects_to_remove_height, objects_to_remove_width), np.uint8)\n im = morphologyEx(im, cv2.MORPH_OPEN, kernel, iterations=1)\n \n return im;\n\n # function definitions\n\n\n \n\n\n\n","sub_path":"PSO/SegmentationFunctions/BarcodeSegmentation.py","file_name":"BarcodeSegmentation.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"641277131","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/01/23 11:10\n# @Author : c0l0121\n# @File : web_traffic_all_data.py\n# @Desc :\n\nimport scipy as sp\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n\ndef error(f, x, y):\n \"\"\"残差函数\n\n :param f: 函数\n :param x: 自变量集合\n :param y: 因变量集合\n :return: 残差\n \"\"\"\n return sp.sum((f(x) - y) ** 2)\n\n\n# 读入数据\ndata = np.genfromtxt('../dataset/mlsd/ch01/web_traffic.tsv', delimiter='\\t')\nprint(\"data[:10] = \", data[:5])\nprint(\"data.shape = \", data.shape)\n\n# 将数据拆分成x和y两组向量\nx = data[:, 0]\ny = data[:, 1]\n\n# 查看有多少个y值是NAN\nprint(\"np.sum(np.isnan(y)) = \", np.sum(np.isnan(y)))\n\nx = x[~np.isnan(y)]\ny = y[~np.isnan(y)]\n\nprint(\"len(x) = \", len(x))\n\n# 3.5有个拐点,因此将数据拆分成两段,分别求函数\ninflection = math.ceil(3.5 * 7 * 24)\nxa = x[:inflection]\nya = y[:inflection]\nxb = x[inflection:]\nyb = y[inflection:]\n\nfa1 = sp.poly1d(sp.polyfit(xa, ya, 1))\nfb1 = sp.poly1d(sp.polyfit(xb, yb, 1))\nfa_error = error(fa1, xa, ya)\nfb_error = error(fb1, xb, yb)\nprint(\"Error inflection=%f\" % (fa_error + fb_error))\n\n# 用散点图输出数据点\nplt.scatter(x, y, s=2, alpha=0.8)\n\n# 输出分段函数\nfax = sp.linspace(xa[0], xa[-1], 800)\nfbx = sp.linspace(xb[0], xb[-1], 200)\nplt.plot(fax, fa1(fax), color=\"m\", label=\"fa=%i\" % fa1.order)\nplt.plot(fbx, fb1(fbx), color=\"y\", label=\"fb=%i\" % fb1.order)\n\nplt.title(\"Web traffic over last month\")\nplt.xlabel(\"Time\")\nplt.ylabel(\"Hits/Hour\")\nplt.xticks([w * 7 * 24 for w in range(10)], ['week %i' % w for w in range(10)])\nplt.autoscale(tight=True)\nplt.grid()\nplt.legend()\nplt.show()\n","sub_path":"scipy_lib/web_traffic_split_two_parts_d1.py","file_name":"web_traffic_split_two_parts_d1.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"592391912","text":"\nimport torch\nimport os\nimport time\nimport numpy as np\nfrom torch_geometric.data import Data, DataLoader\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom torch_geometric.nn import GCNConv, NNConv, SplineConv\nfrom torch_geometric.data import InMemoryDataset\nimport matplotlib.pyplot as plt\nimport pickle\n\n\nnp.random.seed(0)\ntorch.manual_seed(0)\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef u(x):\n res = 1\n dim = len(x)\n for d in range(dim):\n res = res * np.sin(np.pi * x[d])\n return res\n\ndim = 3\nh = 16\nn = h**dim\ndepth = np.int(np.log2(h))\nprint(dim, h, depth)\n\n### generate y ###\nxs = np.linspace(0.0, 1.0, h)\nxs_list = [xs] * dim\nxs_grid = np.vstack([xx.ravel() for xx in np.meshgrid(*xs_list)]).T\n\ny = np.zeros(n_l)\nfor i in range(n_l):\n y[i] = u(xs_grid[i])\n\ninner_graphs = []\ninner_graphs_edge_index = []\n# inner_graphs_edge_attr = []\nfor l in range(1, depth+1):\n h_l = 2**l\n n_l = h_l ** dim\n\n\n xs = np.linspace(0.0, 1.0, h_l)\n xs_list = [xs] * dim\n xs_grid = np.vstack([xx.ravel() for xx in np.meshgrid(*xs_list)]).T\n print(l, h_l, xs_grid.shape)\n\n y = np.zeros(n_l)\n for i in range(n_l):\n y[i] = u(xs_grid[i])\n\n\n edge_index = []\n edge_attr = []\n for i in range(n_l):\n for j in range(i):\n if (np.linalg.norm(xs_grid[i] - xs_grid[j])) < 1.01 /(h_l-1):\n edge_index.append((i,j))\n edge_index.append((j,i))\n\n edge_index = np.array(edge_index, dtype=np.int).transpose()\n edge_attr = np.array(edge_attr)\n\n inner_graphs.append(torch.tensor(xs_grid, dtype=torch.float))\n\n hierarchy_edge_index.append(torch.tensor(edge_index, dtype=torch.long))\n hierarchy_edge_attr.append(torch.tensor(edge_attr, dtype=torch.float))\n print(edge_index.shape, edge_attr.shape)\n\n#os.mkdir(\"/Users/lizongyi/Downloads/GNN-PDE/fenics/data_multi_res/\")\npath = \"fenics/data_multi_res/\" + str(dim)+str(h)\n\nif not os.path.exists(path):\n os.mkdir(path)\n print(\"Directory \", path, \" Created \")\nelse:\n print(\"Directory \", path, \" already exists\")\n\npickle.dump(hierarchy_n, open(path + \"/hierarchy_n\", \"wb\"))\npickle.dump(hierarchy_l, open(path + \"/hierarchy_l\", \"wb\"))\npickle.dump(hierarchy_x, open(path + \"/hierarchy_x\", \"wb\"))\npickle.dump(hierarchy_y, open(path + \"/hierarchy_y\", \"wb\"))\npickle.dump(hierarchy_edge_index, open(path + \"/hierarchy_edge_index\", \"wb\"))\npickle.dump(hierarchy_edge_attr, open(path + \"/hierarchy_edge_attr\", \"wb\"))\n\n\ndim = 3\nh = 16\ndepth = np.int(np.log2(h))\nprint(dim, h, depth)\n\n#################################################\n#\n# load data\n#\n#################################################\n\npath = \"fenics/data_multi_res/\" + str(dim)+str(h)\n\nhierarchy_n = pickle.load(open(path + \"/hierarchy_n\", \"rb\"))\nhierarchy_l = pickle.load(open(path + \"/hierarchy_l\", \"rb\"))\nhierarchy_x = pickle.load(open(path + \"/hierarchy_x\", \"rb\"))\nhierarchy_y = pickle.load(open(path + \"/hierarchy_y\", \"rb\"))\nhierarchy_edge_index = pickle.load(open(path + \"/hierarchy_edge_index\", \"rb\"))\nhierarchy_edge_attr = pickle.load(open(path + \"/hierarchy_edge_attr\", \"rb\"))\n\nprint(hierarchy_n)\nprint(hierarchy_l)\nprint(hierarchy_x[-1].shape)\nprint(hierarchy_y[-1].shape)\nprint(hierarchy_edge_index[-1].shape)\nprint(hierarchy_edge_attr[-1].shape)\n\ndepth = len(hierarchy_n)\ndataset = []\nfor l in range(depth):\n data = Data(x=hierarchy_x[l], y=hierarchy_y[l], edge_index=hierarchy_edge_index[l], edge_attr=hierarchy_edge_attr[l]).to(device)\n\n dataset.append(data)\n#################################################\n#\n# network\n#\n#################################################\n\ndef Upsample(x, dim, channels, scale):\n x = x.transpose(0, 1)\n for i in range(dim):\n x = F.upsample(x.view(1, channels, -1), scale_factor=scale, mode='nearest').view(32, -1)\n channels = channels*2\n x = x.transpose(0, 1)\n return x\n\nclass Net_graph(torch.nn.Module):\n def __init__(self):\n super(Net_graph, self).__init__()\n self.conv11 = GCNConv(dim, 32)\n self.conv12 = GCNConv(32, 32)\n self.conv13 = GCNConv(32, 32)\n\n self.conv21 = GCNConv(dim, 32)\n self.conv22 = GCNConv(32, 32)\n self.conv23 = GCNConv(32, 32)\n\n self.conv31 = GCNConv(dim, 32)\n self.conv32 = GCNConv(32, 32)\n self.conv33 = GCNConv(32, 32)\n\n self.conv41 = GCNConv(dim, 32)\n self.conv42 = GCNConv(32, 32)\n self.conv43 = GCNConv(32, 32)\n\n self.fc = nn.Linear(32,1)\n\n def forward(self, dataset):\n x1, edge_index1 = dataset[-1].x, dataset[-1].edge_index\n\n x1 = self.conv11(x1, edge_index1)\n x1 = F.relu(x1)\n x1 = self.conv12(x1, edge_index1)\n x1 = F.relu(x1)\n x1 = self.conv13(x1, edge_index1)\n\n x2, edge_index2 = dataset[-2].x, dataset[-2].edge_index\n\n x2 = self.conv21(x2, edge_index2)\n x2 = F.relu(x2)\n x2 = self.conv22(x2, edge_index2)\n x2 = F.relu(x2)\n x2 = self.conv23(x2, edge_index2)\n\n x3, edge_index3 = dataset[-3].x, dataset[-3].edge_index\n\n x3 = self.conv31(x3, edge_index3)\n x3 = F.relu(x3)\n x3 = self.conv32(x3, edge_index3)\n x3 = F.relu(x3)\n x3 = self.conv33(x3, edge_index3)\n\n # x4, edge_index4 = dataset[-4].x, dataset[-4].edge_index\n #\n # x4 = self.conv41(x4, edge_index4)\n # x4 = F.relu(x4)\n # x4 = self.conv42(x4, edge_index4)\n # x4 = F.relu(x4)\n # x4 = self.conv43(x4, edge_index4)\n\n\n x2 = Upsample(x2, channels=32, dim=dim, scale=2)\n x3 = Upsample(x3, channels=32, dim=dim, scale=4)\n # x4 = Upsample(x4, channels=32, dim=dim, scale=8)\n\n\n #x1 = x1 + x2.view(-1, 32) + x3.view(-1, 32) + x4.view(-1, 32)\n #x1 = x1 + x2.view(-1, 32) + x3.view(-1, 32)\n x = F.relu(x1)\n x = self.fc(x)\n return x\n\n\nclass Net_MP(nn.Module):\n def __init__(self):\n super(Net_MP, self).__init__()\n nn1 = nn.Sequential(nn.Linear(dim, 16), nn.ReLU(), nn.Linear(16, dim * 32))\n nn1_short = nn.Linear(dim, dim * 32)\n self.conv1 = NNConv(dim, 32, nn1_short, aggr='add')\n\n nn2 = nn.Sequential(nn.Linear(dim, 16), nn.ReLU(), nn.Linear(16, 1024))\n nn2_short = nn.Linear(dim, 1024)\n self.conv2 = NNConv(32, 32, nn2_short, aggr='add')\n\n self.fc1 = torch.nn.Linear(32, 32)\n self.fc2 = torch.nn.Linear(32, 1)\n\n def forward(self, data):\n x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr\n x = F.relu(self.conv1(x, edge_index, edge_attr))\n x = F.relu(self.conv2(x, edge_index, edge_attr))\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n#################################################\n#\n# train\n#\n#################################################\n\nn = hierarchy_n[-1]\ny = hierarchy_y[-1].to(device)\n\nnum_train_node = n//2\n\nshuffle_idx = np.random.permutation(n)\ntrain_mask = shuffle_idx[:num_train_node]\ntest_mask = shuffle_idx[num_train_node:]\n\ntrivial_error = F.mse_loss(torch.zeros((num_train_node,1)).to(device), y.view(-1,1)[test_mask])\nmax_error = torch.max(y**2)\nprint('trivial error:', trivial_error)\nprint('max error:', max_error)\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n#model = Net_MP().to(device)\nmodel = Net_graph().to(device)\n#model = Net_full().to(device)\n#model = Net_skip().to(device)\n\ntest_loss = []\ntrain_loss = []\n\noptimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)\n\n#dataset = dataset.to(device)\nmodel.train()\nfor epoch in range(500):\n tic = time.time()\n\n if epoch == 500:\n for param_group in optimizer.param_groups:\n param_group['lr'] = 0.005\n\n if epoch == 1000:\n for param_group in optimizer.param_groups:\n param_group['lr'] = 0.002\n\n if epoch == 2000:\n for param_group in optimizer.param_groups:\n param_group['lr'] = 0.001\n\n optimizer.zero_grad()\n out = model(dataset)\n loss = F.mse_loss(out[train_mask], y.view(-1,1)[train_mask])\n loss2 = torch.sqrt(loss)/torch.sqrt(trivial_error)\n train_loss.append(loss2)\n\n loss.backward()\n optimizer.step()\n\n tac = time.time()\n with torch.no_grad():\n test_error = F.mse_loss(out[test_mask], y.view(-1,1)[test_mask])\n test_error2 = torch.sqrt(test_error)/torch.sqrt(trivial_error)\n test_loss.append(test_error2)\n print(\"{0} train: {1:.4f}, {2:.4f}, test: {3:.4f}, {4:.4f}, time: {5:2f}\".format(epoch, (loss/trivial_error).item(), (loss2).item(), (test_error/trivial_error).item(), (test_error2).item(), tac-tic))\n\n\ntorch.save(model, path + \"/model\")\n\n#################################################\n#\n# plot\n#\n#################################################\n\ntrain_loss = np.log10(np.array(train_loss))\ntest_loss = np.log10(np.array(test_loss))\n\nplt.plot(train_loss)\nplt.plot(test_loss)\nplt.show()\n\nnp.savetxt(path + \"/train_loss_net.txt\", train_loss)\nnp.savetxt(path + \"/test_loss_net.txt\", test_loss)\n","sub_path":"examples_and_experiments/graphNN_Li/multi_res_unet.py","file_name":"multi_res_unet.py","file_ext":"py","file_size_in_byte":9016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"114978893","text":"# coding: utf-8\r\n##############################################################################\r\n# Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.\r\n#\r\n# Subject to your compliance with these terms, you may use Microchip software\r\n# and any derivatives exclusively with Microchip products. It is your\r\n# responsibility to comply with third party license terms applicable to your\r\n# use of third party software (including open source software) that may\r\n# accompany Microchip software.\r\n#\r\n# THIS SOFTWARE IS SUPPLIED BY MICROCHIP \"AS IS\". NO WARRANTIES, WHETHER\r\n# EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED\r\n# WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A\r\n# PARTICULAR PURPOSE.\r\n#\r\n# IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,\r\n# INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND\r\n# WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS\r\n# BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE\r\n# FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN\r\n# ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,\r\n# THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.\r\n##############################################################################\r\n\r\ncomponentsIDTable = [\"HarmonyCore\", \"gfx_hal\", \"aria_gfx_library\", \"gfx_disp_atmxt-xpro_320x480\", \"gfx_driver_ili9488\"]\r\nautoConnectTable = [[\"gfx_hal\", \"gfx_display\", \"gfx_disp_atmxt-xpro_320x480\", \"gfx_display\"],\r\n\t\t\t\t\t[\"aria_gfx_library\", \"gfx_hal\", \"gfx_hal\", \"gfx_hal\"],\r\n\t\t\t\t\t[\"gfx_hal\", \"gfx_display_driver\", \"gfx_driver_ili9488\", \"gfx_driver_ili9488\"]]\r\ndeactivateIDTable = [\"FreeRTOS\"]\r\n\r\nexecfile(Module.getPath() + \"../common/pin_config.py\")\r\nexecfile(Module.getPath() + \"../common/bsp_utils.py\")\r\n\r\n#Add BSP support\r\nexecfile(Module.getPath() + \"Support_BSP_SAM_E70_Xplained_Ultra.py\")\r\nexecfile(Module.getPath() + \"Support_BSP_SAM_C21_Xplained_Pro.py\")\r\nexecfile(Module.getPath() + \"Support_BSP_SAM_E54_Xplained_Pro.py\")\r\n\r\ndef enableILI9488SPIPins(bspID, enable):\r\n\tili9488SPI4PinConfigs = getBSPSupportNode(bspID, \"SPI 4-line\").getPinConfig()\r\n\tif (ili9488SPI4PinConfigs != None):\r\n\t\tresetPins(ili9488SPI4PinConfigs)\r\n\t\tif (enable == True):\r\n\t\t\tconfigurePins(ili9488SPI4PinConfigs)\r\n\r\ndef enableILI9488ParallelPins(bspID, enable):\r\n\tili9488ParallelPinConfigs = getBSPSupportNode(bspID, \"Parallel\").getPinConfig()\r\n\tif (ili9488ParallelPinConfigs != None):\r\n\t\tresetPins(ili9488ParallelPinConfigs)\r\n\t\tif (enable == True):\r\n\t\t\tconfigurePins(ili9488ParallelPinConfigs)\r\n\r\ndef enableSPIInterface(bspID, enable):\r\n\tcomponentIDTable = getBSPSupportNode(bspID, \"SPI 4-line\").getComponentActivateList()\r\n\tautoConnectTable = getBSPSupportNode(bspID, \"SPI 4-line\").getComponentAutoConnectList()\r\n\tif (enable == True):\r\n\t\tif (componentIDTable != None):\r\n\t\t\tres = Database.activateComponents(componentIDTable)\r\n\t\tif (autoConnectTable != None):\r\n\t\t\tres = Database.connectDependencies(autoConnectTable)\r\n\telif (enable == False):\r\n\t\tif (componentIDTable != None):\r\n\t\t\tres = Database.deactivateComponents(componentIDTable)\r\n\tenableILI9488SPIPins(bspID, enable)\r\n\tif (getBSPSupportNode(bspID, \"SPI 4-line\").getEventCallbackFxn() != None):\r\n\t\tgetBSPSupportNode(bspID, \"SPI 4-line\").getEventCallbackFxn()(\"configure\")\r\n\t\r\ndef enableParallelInterface(bspID, enable):\r\n\tcomponentIDTable = getBSPSupportNode(bspID, \"Parallel\").getComponentActivateList()\r\n\tautoConnectTable = getBSPSupportNode(bspID, \"Parallel\").getComponentAutoConnectList()\r\n\tif (enable == True):\r\n\t\tif (componentIDTable != None):\r\n\t\t\tres = Database.activateComponents(componentIDTable)\r\n\t\tif (autoConnectTable != None):\r\n\t\t\tres = Database.connectDependencies(autoConnectTable)\r\n\telif (enable == False):\r\n\t\tif (componentIDTable != None):\r\n\t\t\tres = Database.deactivateComponents(componentIDTable)\r\n\tenableILI9488ParallelPins(bspID, enable)\r\n\tif (getBSPSupportNode(bspID, \"Parallel\").getEventCallbackFxn() != None):\r\n\t\tgetBSPSupportNode(bspID, \"Parallel\").getEventCallbackFxn()(\"configure\")\r\n\r\ndef configureDisplayInterface(bspID, interface):\r\n\tprint(\"Configuring for \" + str(interface) + \" Interface.\")\r\n\tif (bspID == None):\r\n\t\tprint(\"No BSP used, will not configure\")\r\n\telse:\r\n\t\tif (str(interface) == \"SPI 4-line\"):\r\n\t\t\tenableParallelInterface(bspID, False)\r\n\t\t\tenableSPIInterface(bspID, True)\r\n\t\telif (str(interface) == \"Parallel\"):\r\n\t\t\tenableSPIInterface(bspID, False)\r\n\t\t\tenableParallelInterface(bspID, True)\r\n\r\ndef onDisplayInterfaceSelected(interfaceSelected, event):\r\n\tbspID = getSupportedBSP()\r\n\tnewDisplayInterface= interfaceSelected.getComponent().getSymbolByID(\"DisplayInterface\").getValue()\r\n\tcurrDisplayInterface = interfaceSelected.getComponent().getSymbolByID(\"currDisplayInterface\").getValue()\r\n\tinterfaceSelected.getComponent().getSymbolByID(\"currDisplayInterface\").setValue(event[\"value\"], 1)\r\n\tconfigureDisplayInterface(bspID, str(newDisplayInterface))\r\n\r\ndef instantiateComponent(bspComponent):\r\n\tglobal componentsIDTable\r\n\tglobal autoConnectTable\r\n\tglobal supportedBSPsIDList\r\n\t\r\n\t#Check if a supported BSP is loaded\r\n\tbspID = getSupportedBSP()\r\n\r\n\tres = Database.activateComponents(componentsIDTable)\r\n\tres = Database.connectDependencies(autoConnectTable)\r\n\tres = Database.deactivateComponents(deactivateIDTable);\r\n\t\r\n\t#DisplayInterface = bspComponent.createComboSymbol(\"DisplayInterface\", None, [\"SPI 4-line\", \"Parallel\"])\r\n\tDisplayInterface = bspComponent.createComboSymbol(\"DisplayInterface\", None, [\"SPI 4-line\"])\r\n\tDisplayInterface.setLabel(\"Display Interface\")\r\n\tDisplayInterface.setDescription(\"Configures the display interface to the maXTouch Xplained Pro display.\")\r\n\tDisplayInterface.setDefaultValue(\"SPI 4-line\")\r\n\tDisplayInterface.setDependencies(onDisplayInterfaceSelected, [\"DisplayInterface\"])\r\n\tDisplayInterface.setVisible(True)\r\n\t\r\n\t# Shadow display interface symbol\r\n\tcurrDisplayInterface = bspComponent.createComboSymbol(\"currDisplayInterface\", None, [\"SPI 4-line\", \"Parallel\"])\r\n\tcurrDisplayInterface.setDefaultValue(\"SPI 4-line\")\r\n\tcurrDisplayInterface.setVisible(False)\r\n\t\r\n\tif (bspID != None):\r\n\t\tconfigureDisplayInterface(bspID, str(currDisplayInterface.getValue()))\r\n\telse:\r\n\t\tprint(\"No BSP used, only software components are configured. Please add board-specific components.\")\r\n\r\n","sub_path":"templates/aria_gfx_xplained_pro/aria_gfx_xplained_pro.py","file_name":"aria_gfx_xplained_pro.py","file_ext":"py","file_size_in_byte":6348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"484370402","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.welcome, name='welcome'),\n path('quests', views.quest_list, name='quest_list'),\n path('quests//', views.quest, name='quest'),\n path('player/new_player/', views.new_player, name='new_player'),\n path('player//', views.detail_player, name='detail_player'),\n path('player//edit_player/', views.edit_player, name='edit_player'),\n path('player', views.player_list, name='player_list'),\n]","sub_path":"story/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"221207027","text":"# -*- coding: utf-8 -*-\n# Copyright 2017 GIG Technology NV\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @@license_version:1.3@@\nimport logging\n\nfrom mcfw.rpc import returns, arguments\nfrom plugins.rogerthat_api.to import UserDetailsTO\nfrom plugins.tff_backend.bizz.global_stats import ApiCallException\nfrom plugins.tff_backend.bizz.iyo.see import get_see_documents, get_see_document\nfrom plugins.tff_backend.bizz.iyo.utils import get_iyo_organization_id, get_iyo_username\nfrom plugins.tff_backend.to.iyo.see import IYOSeeDocumentView, IYOSeeDocument\n\n\n@returns([IYOSeeDocumentView])\n@arguments(params=dict, user_detail=UserDetailsTO)\ndef api_iyo_see_list(params, user_detail):\n try:\n iyo_organization_id = get_iyo_organization_id()\n iyo_username = get_iyo_username(user_detail)\n return get_see_documents(iyo_organization_id, iyo_username)\n except:\n logging.error('iyo.see.list exception occurred', exc_info=True)\n raise ApiCallException(u'Could not load ThreeFold documents. Please try again later.')\n\n\n@returns(IYOSeeDocument)\n@arguments(params=dict, user_detail=UserDetailsTO)\ndef api_iyo_see_detail(params, user_detail):\n try:\n iyo_organization_id = get_iyo_organization_id()\n iyo_username = get_iyo_username(user_detail)\n return get_see_document(iyo_organization_id, iyo_username, params['uniqueid'], u'all')\n except:\n logging.error('iyo.see.detail exception occurred', exc_info=True)\n raise ApiCallException(u'Could not load ThreeFold document details. Please try again later.')\n","sub_path":"plugins/tff_backend/api/rogerthat/its_you_online.py","file_name":"its_you_online.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"259478251","text":"# description: You are given coins of different denominations and a total amount of money\n# amount. Write a function to compute the fewest number of coins that you need to make up\n# that amount. If that amount of money cannot be made up by any combination of the coins,\n# return -1.\n\nfrom typing import List\n\nclass Solution:\n\tdef coinChange(self, coins: List[int], amount: int) -> int:\n\t\tnumDenoms = len(coins)\n\t\t# corner cases\n\t\tif (amount == 0):\n\t\t\treturn 0\n\t\tif (numDenoms == 0 or amount < 0):\n\t\t\treturn -1\n\n\t\t# otherwise, sort the list of coins\n\t\tsortedCoins = sorted(coins, reverse=True)\n\t\t# remove duplicate denomination values\n\t\tredCoins = []\n\t\tfor denom in coins:\n\t\t\tif denom not in redCoins:\n\t\t\t\tredCoins.append(denom)\n\n\t\tnumDenoms = len(redCoins)\n\t\t# print(str(coins))\n\t\t# print(\"amount is: \" + str(amount))\n\n\t\t# start a memo, where memo[i] = min # coins to produce value i\n\t\tmemo = [0] * (amount + 1)\n\n\t\t# add the denoms we have to the memo as 1\n\t\tfor currDenom in redCoins:\n\t\t\tif (currDenom <= amount):\n\t\t\t\tmemo[currDenom] = 1\n\n\t\t# iterate through all values 1 to amount\n\t\tfor value in range(1, amount + 1):\n\t\t\tfor currDenom in range(numDenoms):\n\t\t\t\tprevAmount = value - redCoins[currDenom]\n\t\t\t\t# skip this denom, check smaller denoms (if possible)\n\t\t\t\tif (prevAmount<0):\n\t\t\t\t\tcontinue\n\t\t\t\t# the amount is a denom, keep as 1 and go to next value\n\t\t\t\telif (prevAmount==0):\n\t\t\t\t\tbreak\n\t\t\t\t# if there's already a value, we only update if it's an improvement\n\t\t\t\telif (memo[prevAmount] > 0):\n\t\t\t\t\tif (memo[value] > 0):\n\t\t\t\t\t\tif (memo[prevAmount] + 1 < memo[value]):\n\t\t\t\t\t\t\tmemo[value] = memo[prevAmount] + 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tmemo[value] = memo[prevAmount] + 1\n\t\t\tif (memo[value] == 0):\n\t\t\t\tmemo[value] = -1\n\n\t\t# print(memo[3])\n\t\t# print(memo[amount])\n\t\treturn memo[amount]\n\n\nif __name__ == '__main__':\n\tsoln = Solution()\n\ttest1 = soln.coinChange([1,2,5],11) #should return 3\n\ttest2 = soln.coinChange([1,2,5],1) #should return 1\n\ttest3 = soln.coinChange([],5) #should return -1\n\ttest4 = soln.coinChange([2,3,5],1) #should return -1\n\ttest5 = soln.coinChange([2,5,10],3) #should return -1\n\ttest6 = soln.coinChange([5,10,25],90) #should return 5\n\ttest7 = soln.coinChange([186,419,83,408],6249) #should return 20\n\n\tassert test1 == 3\n\tassert test2 == 1\n\tassert test3 == -1\n\tassert test4 == -1\n\tassert test5 == -1\n\tassert test6 == 5\n\tassert test7 == 20","sub_path":"DP/CoinChange.py","file_name":"CoinChange.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"551755728","text":"########\n# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\nimport os\nimport time\nimport uuid\nfrom contextlib import contextmanager\n\nimport requests\nfrom retrying import retry\n\nfrom cloudify.workflows import local\nfrom cloudify_cli import constants as cli_constants\nfrom cloudify_rest_client import CloudifyClient\nimport fabric.api as fab\nfrom fabric.api import env\n\nfrom cosmo_tester.framework.testenv import TestCase\n\nHELLO_WORLD_EXAMPLE_NAME = 'cloudify-hello-world-example'\nEXAMPLE_URL = 'https://github.com/cloudify-cosmo/{0}/archive/{1}.tar.gz'\nIMPLEMENTATION_MSG = 'this should be implemented in inheriting classes'\n\n\nclass TestCliPackage(TestCase):\n\n @property\n def package_parameter_name(self):\n raise Exception(IMPLEMENTATION_MSG)\n\n @property\n def cli_package_url(self):\n return os.environ[self.package_parameter_name]\n\n @property\n def client_cfy_work_dir(self):\n raise Exception(IMPLEMENTATION_MSG)\n\n @property\n def propertsadawed(self):\n raise Exception(IMPLEMENTATION_MSG)\n\n @property\n def client_user(self):\n raise Exception(IMPLEMENTATION_MSG)\n\n @property\n def image_name(self):\n raise Exception(IMPLEMENTATION_MSG)\n\n def get_local_env_outputs(self):\n self.public_ip_address = \\\n self.local_env.outputs()['vm_public_ip_address']\n\n def is_install_plugins(self):\n return False\n\n def additional_setup(self):\n if self.package_parameter_name not in os.environ:\n raise ValueError(\n '{0} environment variable not set'\n .format(self.package_parameter_name))\n\n blueprint_filename = self.local_env_blueprint_file_name\n blueprint_path = os.path.join(os.path.dirname(__file__),\n 'resources',\n blueprint_filename)\n self.prefix = '{0}-cli-host'.format(self.test_id)\n self.bootstrap_prefix = 'cloudify-{0}'.format(self.test_id)\n\n self.inputs = self.get_local_env_inputs()\n self.bootstrap_inputs = self.get_bootstrap_inputs()\n self.deployment_inputs = self.get_deployment_inputs()\n\n self.branch = os.environ.get('BRANCH_NAME_CORE', 'master')\n self.logger.info('Using branch/tag: {0}'.format(self.branch))\n\n self.logger.info('initialize local env for running the '\n 'blueprint that starts a vm')\n self.local_env = local.init_env(\n blueprint_path,\n inputs=self.inputs,\n name=self._testMethodName,\n ignored_modules=cli_constants.IGNORED_LOCAL_WORKFLOW_MODULES)\n\n self.logger.info('Starting vm to install CLI package on it later on')\n self.addCleanup(self.cleanup)\n self.local_env.execute('install',\n task_retries=40,\n task_retry_interval=30)\n\n self.get_local_env_outputs()\n self.logger.info('Outputs: {0}'.format(self.local_env.outputs()))\n\n env.update({\n 'timeout': 30,\n 'user': self.client_user,\n 'key_filename': self.inputs['key_pair_path'],\n 'host_string': self.public_ip_address,\n 'connection_attempts': 10,\n 'abort_on_prompts': True\n })\n\n def get_local_env_inputs(self):\n return {\n 'prefix': self.prefix,\n 'external_network': self.env.external_network_name,\n 'os_username': self.env.keystone_username,\n 'os_password': self.env.keystone_password,\n 'os_tenant_name': self.env.keystone_tenant_name,\n 'os_region': self.env.region,\n 'os_auth_url': self.env.keystone_url,\n 'image_name': self.image_name,\n 'flavor': self.env.medium_flavor_id,\n 'key_pair_path': '{0}/{1}-keypair.pem'.format(self.workdir,\n self.prefix)\n }\n\n def get_bootstrap_inputs(self):\n return {\n 'keystone_username': self.env.keystone_username,\n 'keystone_password': self.env.keystone_password,\n 'keystone_tenant_name': self.env.keystone_tenant_name,\n 'keystone_url': self.env.keystone_url,\n 'region': self.env.region,\n 'image_id': self.env.centos_7_image_id,\n 'flavor_id': self.env.medium_flavor_id,\n 'external_network_name': self.env.external_network_name,\n 'manager_server_name': self.env.management_server_name,\n 'management_network_name': self.env.management_network_name,\n 'manager_public_key_name': '{0}-manager-keypair'.format(\n self.prefix),\n 'agent_public_key_name': '{0}-agent-keypair'.format(\n self.prefix),\n }\n\n def local_env_blueprint_file_name(self):\n raise Exception(\"Each blueprint should specify it's local env \"\n \"blueprint\")\n\n def get_deployment_inputs(self):\n return {\n 'agent_user': 'ubuntu',\n 'image': self.env.ubuntu_trusty_image_id,\n 'flavor': self.env.medium_flavor_id\n }\n\n def setUp(self):\n super(TestCliPackage, self).setUp()\n self.additional_setup()\n\n def _execute_command(self, cmd, within_cfy_env=False, sudo=False,\n log_cmd=True, retries=0, warn_only=False):\n if within_cfy_env:\n cmd = 'source {0}/env/bin/activate && cfy {1}' \\\n .format(self.client_cfy_work_dir, cmd)\n\n if log_cmd:\n self.logger.info('Executing command: {0}'.format(cmd))\n else:\n self.logger.info('Executing command: ***')\n\n while True:\n if sudo:\n out = fab.sudo(cmd, warn_only=warn_only)\n else:\n out = fab.run(cmd, warn_only=warn_only)\n\n self.logger.info(\"\"\"Command execution result:\n Status code: {0}\n STDOUT:\n {1}\n STDERR:\n {2}\"\"\".format(out.return_code, out, out.stderr))\n if out.succeeded or (warn_only and retries == 0):\n return out\n else:\n if retries > 0:\n time.sleep(30)\n retries -= 1\n else:\n raise Exception('Command: {0} exited with code: '\n '{1}. Tried {2} times.'\n .format(cmd, out.return_code, retries + 1))\n\n def install_cli(self):\n self.logger.info('installing cli...')\n\n self._get_resource(self.cli_package_url, ops='-LO', sudo=True)\n self._get_resource('https://raw.githubusercontent.com/pypa/'\n 'pip/master/contrib/get-pip.py', ops='-L',\n pipe_command='sudo python2.7 -')\n self._execute_command('pip install virtualenv', sudo=True)\n\n last_ind = self.cli_package_url.rindex('/')\n package_name = self.cli_package_url[last_ind + 1:]\n self._execute_command('rpm -i {0}'.format(package_name), sudo=True)\n\n def _get_resource(self, resource_address, ops='', sudo=False,\n pipe_command=''):\n if pipe_command:\n pipe_command = \"| {0}\".format(pipe_command)\n\n return self._execute_command('curl {0} {1} {2}'\n .format(ops, resource_address,\n pipe_command),\n sudo=sudo)\n\n def change_to_tarzan_urls(self):\n pass\n\n def add_dns_nameservers_to_manager_blueprint(self, local_modify_script):\n remote_modify_script = os.path.join(self.client_cfy_work_dir,\n 'modify.py')\n self.logger.info(\n 'Uploading {0} to {1} on manager...'.format(local_modify_script,\n remote_modify_script))\n fab.put(local_modify_script, remote_modify_script, use_sudo=True)\n self.logger.info(\n 'Adding DNS name servers to remote manager blueprint...')\n fab.run('sudo python {0} {1}'.format(\n remote_modify_script, self.test_manager_blueprint_path))\n\n def get_manager_blueprint_file_name(self):\n return 'openstack-manager-blueprint.yaml'\n\n def prepare_manager_blueprint(self):\n self.manager_blueprints_repo_dir = '{0}/cloudify-manager-blueprints' \\\n '-commercial/' \\\n .format(self.client_cfy_work_dir)\n self.test_manager_blueprint_path = \\\n os.path.join(self.manager_blueprints_repo_dir,\n self.get_manager_blueprint_file_name())\n\n self.local_bootstrap_inputs_path = \\\n self.cfy._get_inputs_in_temp_file(self.bootstrap_inputs,\n self._testMethodName)\n self.remote_bootstrap_inputs_path = \\\n os.path.join(self.client_cfy_work_dir, 'bootstrap_inputs.json')\n fab.put(self.local_bootstrap_inputs_path,\n self.remote_bootstrap_inputs_path, use_sudo=True)\n\n self.change_to_tarzan_urls()\n\n def bootstrap_manager(self):\n self.logger.info('Bootstrapping Cloudify manager...')\n\n self._execute_command('init', within_cfy_env=True)\n\n install_plugins = ''\n if self.is_install_plugins():\n install_plugins = '--install-plugins'\n out = self._execute_command(\n 'bootstrap -p {0} -i {1} {2}'.format(\n self.test_manager_blueprint_path,\n self.remote_bootstrap_inputs_path,\n install_plugins),\n within_cfy_env=True)\n\n self.assertIn('bootstrapping complete', out,\n 'Bootstrap has failed')\n\n self.manager_ip = self._manager_ip()\n self.client = CloudifyClient(self.manager_ip)\n self.addCleanup(self.teardown_manager)\n\n def get_hello_world_url(self):\n return EXAMPLE_URL.format(HELLO_WORLD_EXAMPLE_NAME, self.branch)\n\n def get_app_blueprint_file(self):\n return 'blueprint.yaml'\n\n def publish_hello_world_blueprint(self):\n hello_world_url = self.get_hello_world_url()\n blueprint_id = 'blueprint-{0}'.format(uuid.uuid4())\n self.logger.info('Publishing hello-world example from: {0} [{1}]'\n .format(hello_world_url, blueprint_id))\n self._execute_command('blueprints publish-archive '\n '-l {0} -n {1} -b {2}'\n .format(hello_world_url,\n self.get_app_blueprint_file(),\n blueprint_id),\n within_cfy_env=True)\n return blueprint_id\n\n def prepare_deployment(self):\n self.local_deployment_inputs_path = \\\n self.cfy._get_inputs_in_temp_file(self.deployment_inputs,\n self._testMethodName)\n self.remote_deployment_inputs_path = \\\n os.path.join(self.client_cfy_work_dir,\n 'deployment_inputs.json')\n fab.put(self.local_deployment_inputs_path,\n self.remote_deployment_inputs_path, use_sudo=True)\n\n def create_deployment(self, blueprint_id):\n deployment_id = 'deployment-{0}'.format(uuid.uuid4())\n self.prepare_deployment()\n\n self.logger.info('Creating deployment: {0}'.format(deployment_id))\n self._execute_command(\n 'deployments create -b {0} -d {1} -i {2}'\n .format(blueprint_id, deployment_id,\n self.remote_deployment_inputs_path),\n within_cfy_env=True)\n\n return deployment_id\n\n def install_deployment(self, deployment_id):\n self.logger.info(\n 'Waiting for 15 seconds before installing deployment...')\n time.sleep(15)\n self.logger.info('Installing deployment...')\n self._execute_command('executions start -d {0} -w install'\n .format(deployment_id),\n within_cfy_env=True, retries=2)\n\n def uninstall_deployment(self):\n self.cfy._wait_for_stop_dep_env_execution_if_necessary(\n self.deployment_id)\n self.logger.info('Uninstalling deployment...')\n self._execute_command('executions start -d {0} -w uninstall'\n .format(self.deployment_id), within_cfy_env=True)\n\n def _test_cli_package(self):\n self.install_cli()\n self.prepare_manager_blueprint()\n self.add_dns_nameservers_to_manager_blueprint(\n os.path.join(os.path.dirname(__file__),\n 'resources/add_nameservers_to_subnet.py'))\n self.bootstrap_manager()\n blueprint_id = self.publish_hello_world_blueprint()\n self.deployment_id = self.create_deployment(blueprint_id)\n self.addCleanup(self.uninstall_deployment)\n self.install_deployment(self.deployment_id)\n self.assert_deployment_working(\n self._get_app_property('http_endpoint'))\n\n def _manager_ip(self):\n nova_client, _, _ = self.env.handler.openstack_clients()\n for server in nova_client.servers.list():\n if server.name == self.env.management_server_name:\n for network, network_ips in server.networks.items():\n if network == self.env.management_network_name:\n return network_ips[1]\n self.fail('Failed finding manager ip')\n\n def _get_app_property(self, property_name):\n outputs_resp = self.client.deployments.outputs.get(self.deployment_id)\n return outputs_resp['outputs'][property_name]\n\n @retry(stop_max_attempt_number=3, wait_fixed=3000)\n def assert_deployment_working(self, url):\n self.logger.info('Asserting deployment deployed successfully')\n server_page_response = requests.get(url)\n self.assertEqual(200, server_page_response.status_code,\n 'Failed to get home page of app')\n self.logger.info('Example deployed successfully')\n\n def cleanup(self):\n self.local_env.execute('uninstall',\n task_retries=40,\n task_retry_interval=30)\n\n def teardown_manager(self):\n self.logger.info('Tearing down Cloudify manager...')\n self._execute_command('teardown -f --ignore-deployments',\n within_cfy_env=True)\n\n @contextmanager\n def dns(self, dns_name_servers=('8.8.8.8', '8.8.4.4')):\n \"\"\"\n Enables setting custom dns servers on the local machine.\n This is useful mainly when the bootstrap doesn't contain a\n dns_nameservers.\n\n :param execute_command: the command executer (belong to some machine)\n :param logger: logger object on which to log.\n :param dns_name_servers: an iterable of dns addresses.\n defaults to ('8.8.8.8', '8.8.4.4').\n :return: None\n \"\"\"\n self._execute_command(\"chmod +w /etc/resolv.conf\", sudo=True)\n\n for server in dns_name_servers:\n self.logger.info('Adding {0} to dns list'.format(server))\n self._execute_command(\"echo 'nameserver {0}' >> /etc/resolv.conf\"\n .format(server), sudo=True)\n\n yield\n\n for server in dns_name_servers:\n self.logger.info('Removing {0} from dns list'.format(server))\n self._execute_command(\n \"sed -i '/nameserver {0}/c\\\\' /etc/resolv.conf\".format(server),\n sudo=True)\n","sub_path":"system_tests/test_cli_package.py","file_name":"test_cli_package.py","file_ext":"py","file_size_in_byte":16255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"457396392","text":"from OmniscienceMethods import *\nimport difflib\nfrom Tkinter import *\nimport omnisciencemodule\nimport webbrowser\n\nmasterPool=[]\nroot = Tk()\nroot.title(\"Omniscience\")\nroot.wm_iconbitmap('Repel.ico')\nroot.resizable(0, 0)\n\nradioFrame = Frame(root)\nradioFrame.pack(fill=X)\nscaleFrame = Frame(root)\nscaleFrame.pack(fill=X)\ngridFrame = Frame(root)\ngridFrame.pack(fill=X)\n\ngamemode = StringVar()\npickfirst = BooleanVar()\nranked = BooleanVar()\n\nRadiobutton(radioFrame,text=\"All Pick\",variable=gamemode,value=\"AP\",indicatoron=0).pack(side=LEFT)\nRadiobutton(radioFrame,text=\"Captains Mode\",variable=gamemode,value=\"CM\",indicatoron=0).pack(side=LEFT)\nRadiobutton(radioFrame,text=\"Random Draft\",variable=gamemode,value=\"RD\",indicatoron=0).pack(side=LEFT)\nRadiobutton(radioFrame,text=\"Captains Draft\",variable=gamemode,value=\"CD\",indicatoron=0).pack(side=LEFT)\n\nCheckbutton(radioFrame,text=\"First Pick\",variable=pickfirst).pack(side=LEFT)\nCheckbutton(radioFrame,text=\"Ranked\",variable=ranked).pack(side=LEFT)\n\ndef screenshot(*args):\n global masterPool, pickButton\n if gamemode.get()==\"CD\":\n masterPool=screenshot_CD()\n banButton.config(state=NORMAL)\n elif gamemode.get()==\"RD\":\n masterPool=screenshot_RD()\n pickButton.config(state=NORMAL)\n\nscreenshotButton=Button(radioFrame,text=\"Analyze Screenshot\",command=screenshot)\nscreenshotButton.pack(side=RIGHT)\n\ndef about(*args):\n about=Toplevel()\n about.title(\"Help / About\")\n about.resizable(0, 0)\n txt=Text(about, font=\"Times\", wrap=WORD)\n txt.pack()\n txt.insert(END,\n \"Data completeness: \"+str(sum([sum([data['unranked']['advantagedivisor'][a][b] for a in heroRange]) for b in heroRange])/len(heroRange)**2/100)+\"%\\n\\n\"+\\\n \"The Omniscience is a program that assists with hero selection in the game Dota 2. It uses statistics and a large database of games played previously to suggest heroes that work well with or against the heroes already selected.\\n\\n\"+\\\n \"The Omniscience is aware of the order in which the picks occur in the various modes. After pressing the Picks button or the Bans button, the entry box that you are expected to fill out next will be selected.\\n\\n\"+\\\n \"A screenshot can be analyzed to figure out the available hero pool in the Captain's Draft or Random Draft modes. Simply make sure the Omniscience window is not covering any of the hero boxes (suggested locations are the bottom of the screen or on a second monitor) and press the Analyze Screenshot button. Make sure your game is set to Borderless Window in video options to avoid having the game minimize while you do this. In Random Draft you must first press Grid View on the right and then check the Show All Heroes checkbox at the top. Only the default hero grid and 16:9 widescreen resolutions are supported.\\n\\n\"+\\\n \"Remember that while the Omniscience can take into account the relationships among the heroes in play, the pool available, and the winrate and popularity of the heroes, it does not account for team/lane composition or what heroes each person is good at playing. These factors are at least as important as the hero picks themselves, so you must still use your judgement when picking.\\n\\n\")\n version_frame = Frame(about)\n version_frame.pack(fill=X)\n Label(version_frame,text=\"Version 1.00\").pack(side=LEFT)\n Button(version_frame,text=\"Check web page for updates\",command=lambda:webbrowser.open(\"https://github.com/negative-energy/omniscience/releases/\")).pack(side=RIGHT)\n\nButton(radioFrame,text=\"About\",command=about).pack(side=RIGHT)\n\nLabel(scaleFrame,text=\"Situationality of hero selection:\").pack(side=LEFT)\nsituationalityScale = Scale(scaleFrame,orient=HORIZONTAL, from_=50)\nsituationalityScale.set(70)\nsituationalityScale.pack(side=LEFT)\nsuggestabilityScale = Scale(scaleFrame,orient=HORIZONTAL)\nsuggestabilityScale.set(50)\nsuggestabilityScale.pack(side=RIGHT)\nLabel(scaleFrame,text=\"Chance you will select the recommended hero:\").pack(side=RIGHT)\n\nLabel(gridFrame,text=\"Your Picks\").grid(row=0,column=0)\nLabel(gridFrame,text=\"Your Bans\").grid(row=0,column=1)\nLabel(gridFrame,text=\"Their Picks\").grid(row=0,column=3)\nLabel(gridFrame,text=\"Their Bans\").grid(row=0,column=4)\n\nfor column in [0,1,3,4]:\n for row in range(1,6):\n Entry(gridFrame).grid(row=row,column=column)\n\ndef validate_all():\n for row in range(1,6):\n for column in [0,1,3,4]:\n validate_entry(gridFrame.grid_slaves(row,column)[0])\ndef validate(event):\n validate_entry(event.widget)\ndef validate_entry(widget):\n if widget.get() == \"\": return\n hero_id=interpret_hero(widget.get())\n widget.delete(0,END)\n widget.insert(0,heroes[hero_id])\nfor column in [0,1,3,4]:\n for row in range(1,6):\n entry = gridFrame.grid_slaves(row,column)[0]\n entry.bind(\"\",validate)\n entry.bind(\"\",validate)\n\npickBox = Frame(gridFrame)\npickText = Text(pickBox)\npickScroll = Scrollbar(pickBox,command=pickText.yview)\npickText.config(yscrollcommand=pickScroll.set)\n\npickBox.grid(row=7,column=0,columnspan=5)\npickText.pack(side=LEFT)\npickScroll.pack(side=RIGHT, fill=Y)\n\ndef refresh_picks():\n refresh(\"<\")\ndef refresh_bans():\n refresh(\"{\")\ndef refresh(firstChar):\n omnisciencemodule.load_settings(1-situationalityScale.get()/100.,1-suggestabilityScale.get()/100.)\n order=orders[gamemode.get()]\n if gamemode.get()!=\"AP\":\n if not pickfirst.get(): order=order.translate(switchsides_trans)\n (order,nextBox)=getNextInOrder(order,gridFrame)\n order=order[order.index(firstChar):]\n while order.count(\"<\",1)+order.count(\">\",1) > modeSearchDepths[gamemode.get()] or order.endswith(\"{\") or order.endswith(\"}\"):\n order=order[:-1]\n nextBox.focus_set()\n\n validate_all()\n\n allies=[]\n enemies=[]\n pool=list(masterPool)\n \n for row in range(1,6):\n hero=gridFrame.grid_slaves(row,0)[0].get()\n if hero:\n hero = heroes.index(hero)\n allies.append(hero)\n if hero in pool: pool.remove(hero)\n\n hero=gridFrame.grid_slaves(row,1)[0].get()\n if hero:\n hero = heroes.index(hero)\n if hero in pool: pool.remove(hero)\n\n hero=gridFrame.grid_slaves(row,3)[0].get()\n if hero:\n hero = heroes.index(hero)\n enemies.append(hero)\n if hero in pool: pool.remove(hero)\n\n hero=gridFrame.grid_slaves(row,4)[0].get()\n if hero:\n hero = heroes.index(hero)\n if hero in pool: pool.remove(hero)\n heroQuality=map(to_fraction,omnisciencemodule.analyze([heroRange.index(i) for i in pool],\n [heroRange.index(i) for i in allies],\n [heroRange.index(i) for i in enemies],order))\n heroQuality=[0 if i not in pool else heroQuality[heroRange.index(i)] for i in range(len(heroes))]\n order=sorted(pool,key=lambda id: heroQuality[id],reverse=True)\n pickText.delete(\"0.0\",END)\n rankrange=max(.000001,float(heroQuality[order[0]]-heroQuality[order[-1]]))\n width=55\n for i in range(len(order)):\n normalized=int((heroQuality[order[i]]-heroQuality[order[-1]])*2*width/rankrange-width)\n pickText.insert(END,\"{:<3} {:<20} {}\\n\".format(\\\n i+1,\\\n heroes[order[i]],\\\n \"=\"*normalized+\" \"*min(width-normalized,width+normalized)+\"=\"*-normalized))\n\npickButton=Button(gridFrame,text=\"Picks\",command=refresh_picks)\npickButton.grid(row=6,column=0)\nbanButton=Button(gridFrame,text=\"Bans\", command=refresh_bans)\nbanButton.grid(row=6,column=1)\n\ndef gamemode_changed(*args):\n global masterPool\n masterPool=list(cmHeroRange if gamemode.get().startswith(\"C\") else heroRange)\n omnisciencemodule.load_data([[to_multiplicand(synergy(a,b,ranked.get())) for a in heroRange] for b in heroRange],\\\n [[to_multiplicand(advantage(a,b,ranked.get())) for a in heroRange] for b in heroRange],\\\n [data['ranked' if ranked.get() and not gamemode.get()==\"RD\" else 'unranked'][gamemode.get()]['popularity'][a] for a in heroRange])\n banButton.config(state=NORMAL if gamemode.get()==\"CM\" else DISABLED)\n if gamemode.get()==\"AP\" or gamemode.get()==\"CM\":\n screenshotButton.config(state=DISABLED)\n pickButton.config(state=NORMAL)\n else:\n screenshotButton.config(state=NORMAL)\n pickButton.config(state=DISABLED)\n \ngamemode.trace(\"w\",gamemode_changed)\nranked.trace(\"w\",gamemode_changed)\ngamemode.set(\"AP\")\n\nroot.mainloop()","sub_path":"Omniscience.py","file_name":"Omniscience.py","file_ext":"py","file_size_in_byte":8586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"261238409","text":"from time import sleep\nfrom dotenv import load_dotenv\n\nimport os\nimport urllib3\n\n# import the monitor class\nfrom monitor import Monitor\n\n# import the home assistant connection class\nfrom homeassistant_connection import HomeAssistant\n\n# Surpress SSL self signed certificate warnings\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n# Import our system variables and check their type()\ntry:\n load_dotenv('configuration.txt')\n\n HOMEASSISTANT_IP = str(os.getenv('HOMEASSISTANT_IP'))\n HOMEASSISTANT_PORT = int(os.getenv('HOMEASSISTANT_PORT'))\n HOMEASSISTANT_PROTOCOL = str(os.getenv('HOMEASSISTANT_PROTOCOL'))\n HOMEASSISTANT_API_TOKEN = str(os.getenv('HOMEASSISTANT_API_TOKEN'))\n HOMEASSISTANT_LIGHT_ID = str(os.getenv('HOMEASSISTANT_LIGHT_ID'))\n HOMEASSISTANT_LIGHT_BRIGHTNESS = int(os.getenv('HOMEASSISTANT_LIGHT_BRIGHTNESS'))\n\n MONLIGHT_SCREEN_NUMBER = int(os.getenv('MONLIGHT_SCREEN_NUMBER'))\n MONLIGHT_ANALYZE_ENTIRE_SCREEN = bool(os.getenv('MONLIGHT_ANALYZE_ENTIRE_SCREEN'))\n MONLIGHT_TIMER = float(os.getenv('MONLIGHT_TIMER'))\n\nexcept Exception as e:\n print('There is an error in the configuration file')\n print(e) # Print the error\n exit()\n\nif __name__ == \"__main__\":\n\n monitor = Monitor(MONLIGHT_SCREEN_NUMBER)\n homeassistant = HomeAssistant(\n HOMEASSISTANT_IP,\n HOMEASSISTANT_PORT,\n HOMEASSISTANT_PROTOCOL,\n HOMEASSISTANT_API_TOKEN,\n HOMEASSISTANT_LIGHT_ID,\n HOMEASSISTANT_LIGHT_BRIGHTNESS\n )\n\n oldrgb = (0, 0, 0)\n while(True):\n rgb = monitor.get_monitor_average_color()\n if rgb == oldrgb:\n print(\"Value hasn't changed\")\n sleep(MONLIGHT_TIMER)\n else:\n homeassistant.set_new_light_color(rgb)\n oldrgb = rgb\n sleep(MONLIGHT_TIMER)\n","sub_path":"run/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"388595951","text":"__author__ = 'Viktor Kerkez '\n__date__ = '21 September 2013'\n__copyright__ = 'Copyright (c) 2013 Viktor Kerkez'\n\nimport decimal\nimport unittest\nimport datetime\nfrom tea.utils import json\nfrom tea.utils import get_object\n\n\nclass Foo(object):\n def __init__(self, x, y=None):\n self.x = x\n self.y = y\n\n def foo(self):\n return self.x\n\n def __json__(self):\n return {\n 'x': self.x,\n 'y': self.y\n }\n\n\nclass TestGetObject(unittest.TestCase):\n def setUp(self):\n self.x = 42\n self.foo = Foo(self.x)\n\n def test_object_getter(self):\n self.assertEqual(get_object('foo', self), self.foo)\n self.assertEqual(get_object('foo.x', self), self.x)\n self.assertEqual(get_object('foo.foo', self)(), self.x)\n\n def test_module_getter(self):\n self.assertEqual(get_object('unittest'), unittest)\n value = get_object('subprocess')\n import subprocess\n self.assertEqual(value, subprocess)\n self.assertEqual(get_object('tea.tests.test_utils.unittest'),\n unittest)\n\n def test_module_object_getter(self):\n self.assertEqual(get_object('unittest.TestCase'), unittest.TestCase)\n self.assertEqual(get_object('tea.utils.get_object'), get_object)\n self.assertEqual(get_object('tea.tests.test_utils.Foo.foo'),\n Foo.foo)\n\n\ndef test_json_encode():\n d = {'a': 1, 'b': 2}\n\n data = json.loads(json.dumps({\n 'foo': Foo(3, 4),\n 'decimal': decimal.Decimal('3.0'),\n 'datetime': datetime.datetime(2017, 3, 16, 18, 30, 15),\n 'date': datetime.date(2017, 3, 16),\n 'set': {1, 2, 3},\n 'dict_keys': d.keys(),\n 'dict_values': d.values(),\n }))\n assert data['foo']['x'] == 3\n assert data['foo']['y'] == 4\n assert data['decimal'] == 3.0\n assert data['datetime'] == '20170316183015'\n assert data['date'] == '20170316000000'\n assert set(data['set']) == {1, 2, 3}\n assert set(data['dict_keys']) == {'a', 'b'}\n assert set(data['dict_values']) == {1, 2}\n","sub_path":"tea/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"346629387","text":"import pygame as pg\nfrom settings import *\nfrom abc import ABCMeta, abstractmethod\n\nclass Character(pg.sprite.Sprite, metaclass=ABCMeta):\n \"\"\" this is a general character abstract class that provides basis of drawing any character on screen\"\"\"\n def __init__(self, game, x, y, color):\n self.groups = game.all_sprites\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.image = pg.Surface((TILESIZE, TILESIZE))\n self.image.fill(color)\n self.rect = self.image.get_rect()\n self.x = x\n self.y = y\n\n def move(self, dx=0, dy=0):\n self.x += dx\n self.y += dy\n\n def teleport(self, new_x, new_y):\n \"\"\"\n this teleports Character to a new position\n :param new_x: new x coordinate\n :param new_y: new y coordinate\n \"\"\"\n self.x = new_x\n self.y = new_y\n\n def update(self):\n self.rect.x = self.x * TILESIZE\n self.rect.y = self.y * TILESIZE\n\nclass Player(Character):\n \"\"\"Player's implementation of Character class, that handles displaying Player's character on screen\"\"\"\n def __init__(self, game, x, y):\n color = YELLOW;\n super(Player, self).__init__(game, x, y, color);\n\nclass Monster(Character, metaclass=ABCMeta):\n \"\"\"Abstract class that provides implementation of Character class, that handles displaying a Monster on screen\"\"\"\n def __init__(self, game, x, y):\n color = RED;\n super(Monster, self).__init__(game, x, y, color);\n\nclass Snake(Monster):\n \"\"\"Example implementation of Abstract class, that handles displaying a Snake character on screen\"\"\"\n pass;\n\nclass Wall(pg.sprite.Sprite):\n def __init__(self, game, x, y):\n self.groups = game.all_sprites, game.walls\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.image = pg.Surface((TILESIZE, TILESIZE))\n self.image.fill(GREEN)\n self.rect = self.image.get_rect()\n self.x = x\n self.y = y\n self.rect.x = x * TILESIZE\n self.rect.y = y * TILESIZE\n\n","sub_path":"sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"331894133","text":"#!/usr/bin/env python3\nimport sys\nimport statistics\n\nkey = None\ncurrent_key = None\ntmp_list = []\n\n# input comes from STDIN\nfor line in sys.stdin:\n # remove leading and trailing whitespace\n line = line.strip()\n\n # parse the input we got from mapper.py\n key, value = line.split('\\t', 1)\n\n # convert value, currently a string to float\n try:\n value = float(value)\n except ValueError:\n # value was not a number, so silently\n # ignore/discard this line\n continue\n\n if current_key == key:\n tmp_list.append(value)\n else:\n if current_key:\n # write result to STDOUT\n print('%s\\tMax:%s\\tMin:%s\\tMedian:%s\\tStandard Deviation:%s' % (current_key, max(tmp_list), min(tmp_list), statistics.median(tmp_list), statistics.stdev(tmp_list)))\n tmp_list.clear()\n tmp_list.append(value)\n current_key = key\n\n# do not forget to output the last word if needed!\nif current_key == key:\n print('%s\\tMax:%s\\tMin:%s\\tMedian:%s\\tStandard Deviation:%s' % (current_key, max(tmp_list), min(tmp_list), statistics.median(tmp_list), statistics.stdev(tmp_list)))\n","sub_path":"Assignment2/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"435404758","text":"#Voir paragraphe \"3.6 Normalizing Text\", page 107 de NLP with Python\n\n\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.stem.wordnet import WordNetLemmatizer\n\n# Il faut retirer les stopwords avant de stemmer\n\nstemmer = SnowballStemmer(\"english\", ignore_stopwords=True)\nlemmatizer = WordNetLemmatizer()\n\nsource = [\"having\", \"have\", \"needs\", \"need\", \"inflation\", \"inflate\", \"developments\", \"developing\", \"aggregation\",\n \"aggregated\", \"population\", \"poverty\", \"poor\", \"poorer\", \"men\", \"man\", \"gases\", \"gas\", \"sues\", \"utilized\",\n \"damaged\"]\n\nstems1 = [stemmer.stem(word) for word in source]\nstems2 = [lemmatizer.lemmatize(word) for word in source]\nstems3 = [stemmer.stem(word) for word in stems2]\n\nprint(stems1)\nprint(stems2)\nprint(stems3)\n","sub_path":"Preprocessing/stemming.py","file_name":"stemming.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"573646204","text":"import config\nimport smtplib\nfrom dbdriver import select\n\nconfig = config.get('smtp')\n\nserver = smtplib.SMTP(config['server'], int(config['port']))\nserver.ehlo()\n\nif config['use_tls'] == 'yes':\n server.starttls()\nserver.login(config['user'], config['password'])\n\ndef send(dest, message):\n user = select('users', {'name': dest})[0]\n\n email = user['email']\n\n message = \"Subject : {}\\n\\n\".format(message)\n\n try:\n result = server.sendmail(config['user'], email, message)\n return result, None\n except smtplib.SMTPException as e:\n print (e)\n return None, e\n finally:\n server.close()\n","sub_path":"api/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"423958738","text":"import requests\nfrom PIL import Image\nfrom tesserocr import PyTessBaseAPI, RIL\nimport pyocr\nimport pyocr.builders\nimport os\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef getOcrMethod(methodName):\n if methodName == \"tesserocr\":\n return useOcrTesserocr\n elif methodName == \"pyocr\":\n return useOcrPyocr\n elif methodName == \"api\":\n return useOcrApi\n else:\n logger.error(\"invalid methodName: {}\".format(methodName))\n return None\n\n\ndef useOcrTesserocr(imgPath):\n image = Image.open(imgPath)\n results = []\n with PyTessBaseAPI() as api:\n api.SetImage(image)\n boxes = api.GetComponentImages(RIL.TEXTLINE, True)\n for i, (im, box, _, _) in enumerate(boxes):\n api.SetRectangle(box['x'], box['y'], box['w'], box['h'])\n ocrResult = api.GetUTF8Text().strip()\n if ocrResult != \"\":\n logger.debug((\"image path: {imgPath}, \"\n \"Box[{0}]: x={x}, y={y}, w={w}, h={h}, \"\n \"text: {1}\").format(i, ocrResult, imgPath=imgPath, **box))\n result = {\"boundingBox\": {\"tl_x\": box['x'],\n \"tl_y\": box['y'],\n \"br_x\": box['x'] + box['w'],\n \"br_y\": box['y'] + box['h']},\n \"text\": ocrResult}\n results.append(result)\n return results\n\n\ndef useOcrPyocr(imgPath):\n tools = pyocr.get_available_tools()\n if len(tools) == 0:\n logger.error(\"No available ocr library\")\n return []\n tool = tools[0]\n\n image = Image.open(imgPath)\n line_and_word_boxes = tool.image_to_string(\n image,\n lang=\"eng\",\n builder=pyocr.builders.LineBoxBuilder()\n )\n results = []\n for line in line_and_word_boxes:\n box, text = line.position, line.content.strip()\n if text != \"\":\n tl_x, tl_y = box[0]\n br_x, br_y = box[1]\n logger.debug((\"image path: {imgPath}, \"\n \"Box: x={x}, y={y}, w={w}, h={h}, \"\n \"text: {text}\").format(imgPath=imgPath, x=tl_x, y=tl_y, w=br_x-tl_x, h=br_y-tl_y, text=text))\n result = {\"boundingBox\": {\"tl_x\": tl_x,\n \"tl_y\": tl_y,\n \"br_x\": br_x,\n \"br_y\": br_y},\n \"text\": text}\n results.append(result)\n return results\n\n\ndef getOcrUri():\n # For microsoft cognitive-services\n ocrUri = os.getenv(\"OCR_URI\", None)\n if ocrUri is not None:\n return ocrUri\n else:\n # For internal version\n ocrIpport = os.getenv(\"OCR_IP_PORT\", None)\n return \"http://\" + ocrIpport + \"/vision/v2.0/recognizeTextDirect\" if ocrIpport else None\n\n\ndef useOcrApi(imgPath, timeout=10):\n ocrUri = getOcrUri()\n if ocrUri is None:\n logger.error(\"Can't get API address, please pass it by env OCR_URI or OCR_IP_PORT\")\n return []\n filename = os.path.basename(imgPath)\n _, ext = os.path.splitext(filename)\n with open(imgPath, \"rb\") as f:\n files = {\"file\": (filename, f, \"image/\" + ext.strip('.'))}\n try:\n res = requests.post(ocrUri, files=files, timeout=timeout)\n except requests.exceptions.Timeout:\n logger.error(\"API request timeout.\")\n return []\n\n ocrResponse = res.json()\n if \"status\" in ocrResponse:\n if ocrResponse[\"status\"] == \"Succeeded\":\n ocrResponse = ocrResponse[\"recognitionResult\"]\n else:\n logger.error(\"Remote API return failure\")\n return []\n\n results = []\n for line in ocrResponse[\"lines\"]:\n box, text = line[\"boundingBox\"], line[\"text\"]\n tl_x = min(box[0::2])\n tl_y = min(box[1::2])\n br_x = max(box[0::2])\n br_y = max(box[1::2])\n result = {\"boundingBox\": {\"tl_x\": tl_x,\n \"tl_y\": tl_y,\n \"br_x\": br_x,\n \"br_y\": br_y},\n \"text\": text}\n results.append(result)\n\n return results\n","sub_path":"examples/ocr-serving/copied_file/request_ocr/ocrLibrary/OcrHandle.py","file_name":"OcrHandle.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"171589608","text":"import requests\n\nfrom retrying import retry\n\nfrom . import config\n\n@retry(stop_max_attempt_number=3, wait_random_min=1000, wait_random_max=5000)\ndef send(title, message, url, image_url=None):\n for name, authentication in config.notification.items():\n handler = globals().get('handle_{name}'.format(name=name), lambda *args, **kwargs: None)\n handler(authentication, title, message, url, image_url)\n\ndef handle_pushover(authentication, title, message, url, image_url=None):\n from notification import Pushover\n\n parameters = {\n 'title': title,\n 'message': message,\n 'url': url,\n }\n\n if image_url:\n response = requests.get(image_url)\n if response.status_code == 200:\n parameters['attachment'] = response.content\n\n Pushover(**authentication).notify(**parameters)","sub_path":"productscanner/notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"75317003","text":"import html\nimport random\nimport re\nimport textwrap\nfrom html import unescape\n\nfrom datetime import datetime\nfrom discord.ext import commands\nimport discord\nimport orjson\n\n\nclass apis(commands.Cog):\n \"\"\"For commands related to apis.\"\"\"\n\n def __init__(self, bot: commands.Bot) -> None:\n self.bot = bot\n self.DB = bot.DB\n self.loop = bot.loop\n\n @commands.command()\n async def wolfram(self, ctx, *, query):\n \"\"\"Gets the output of a query from wolfram alpha.\"\"\"\n query = query.replace(\" \", \"+\")\n url = (\n \"https://lin2jing4-cors-1.herokuapp.com/api.wolframalpha.com/v2/query\"\n \"?&output=json&podstate=step-by-step+solution&podstate=step-by-step&podstate\"\n \"=show+all+steps&scantimeout=30&podtimeout=30&formattimeout=30&parsetimeout\"\n \"=30&totaltimeout=30&reinterpret=true&podstate=undefined&appid=\"\n f\"KQRKKJ-8WHPY395HA&input={query}&lang=en\"\n )\n\n headers = {\"Origin\": \"https://wolfreealpha.gitlab.io\"}\n embed = discord.Embed(color=discord.Color.blurple())\n\n async with ctx.typing(), self.bot.client_session.get(\n url, headers=headers\n ) as response:\n data = (await response.json())[\"queryresult\"]\n\n if data[\"error\"]:\n embed.description = \"```Calculation errored out```\"\n return await ctx.send(embed=embed)\n\n msg = \"\"\n\n for pod in data[\"pods\"]:\n if pod[\"title\"] and pod[\"subpods\"][0][\"plaintext\"]:\n msg += f\"{pod['title']}\\n{pod['subpods'][0]['plaintext']}\\n\\n\"\n\n embed.title = \"Results\"\n embed.description = f\"```\\n{msg}```\"\n await ctx.send(embed=embed)\n\n @commands.command()\n async def currency(self, ctx, orginal, amount: float, new):\n \"\"\"Converts between currencies.\n\n orginal: str\n The orginal currency.\n amount: int\n new: str\n The new currency.\n \"\"\"\n new = new.upper()\n orginal = orginal.upper()\n\n url = f\"https://api.frankfurter.app/latest?amount={amount}&from={orginal}&to={new}\"\n embed = discord.Embed(color=discord.Color.blurple())\n\n async with ctx.typing():\n data = await self.bot.get_json(url)\n\n embed.description = (\n f\"```{amount} {orginal} is {data['rates'][new]} {new}```\"\n )\n\n await ctx.send(embed=embed)\n\n @commands.command()\n async def country(self, ctx, *, name):\n \"\"\"Show information about a given country.\n\n name: str\n \"\"\"\n url = f\"https://restcountries.com/v2/name/{name}\"\n embed = discord.Embed(color=discord.Color.blurple())\n\n async with ctx.typing():\n data = await self.bot.get_json(url)\n\n if not isinstance(data, list):\n embed.description = \"```Country not found```\"\n return await ctx.send(embed=embed)\n\n data = data[0]\n\n embed.set_author(name=data[\"name\"], icon_url=data[\"flags\"][-1])\n embed.add_field(name=\"Capital\", value=data.get(\"capital\", \"No Capital\"))\n embed.add_field(name=\"Demonym\", value=data[\"demonym\"])\n embed.add_field(name=\"Continent\", value=data[\"continent\"])\n embed.add_field(name=\"Population\", value=f\"{data['population']:,}\")\n embed.add_field(\n name=\"Total Area\",\n value=f\"{data['area']:,.0f}km²\" if \"area\" in data else \"NaN\",\n )\n embed.add_field(name=\"TLD(s)\", value=\", \".join(data[\"topLevelDomain\"]))\n\n await ctx.send(embed=embed)\n\n @commands.command()\n async def fact(self, ctx):\n \"\"\"Gets a random fact.\"\"\"\n url = \"https://uselessfacts.jsph.pl/random.json?language=en\"\n\n async with ctx.typing():\n data = await self.bot.get_json(url)\n\n await ctx.send(\n embed=discord.Embed(\n color=discord.Color.blurple(), description=f\"```{data['text']}```\"\n )\n )\n\n @commands.command(aliases=[\"so\"])\n async def stackoverflow(self, ctx, *, search):\n \"\"\"Gets stackoverflow posts based off a search.\n\n search: str\n \"\"\"\n url = (\n \"https://api.stackexchange.com/2.3/search/advanced?pagesize=5&\"\n f\"order=asc&sort=relevance&q={search}&site=stackoverflow\"\n )\n\n embed = discord.Embed(color=discord.Color.blurple())\n\n async with ctx.typing():\n posts = (await self.bot.get_json(url))[\"items\"]\n\n if not posts:\n embed.description = \"```No posts found```\"\n return await ctx.send(embed=embed)\n\n for post in posts:\n embed.add_field(\n name=f\"`{unescape(post['title'])}`\",\n value=f\"\"\"\n Score: {post['score']}\n Views: {post['view_count']}\n Tags: {', '.join(post['tags'][:3])}\n [Link]({post['link']})\"\"\",\n inline=False,\n )\n\n await ctx.send(embed=embed)\n\n @commands.command(aliases=[\"kanye\"])\n async def justin(self, ctx):\n \"\"\"Gets a random Kanye West quote.\"\"\"\n url = \"https://api.kanye.rest\"\n\n async with ctx.typing():\n quote = await self.bot.get_json(url)\n embed = discord.Embed(\n color=discord.Color.blurple(), description=quote[\"quote\"]\n )\n embed.set_footer(text=\"― Kayne West\")\n await ctx.send(embed=embed)\n\n @commands.command()\n async def quote(self, ctx):\n \"\"\"Gets a random quote.\"\"\"\n url = \"https://api.fisenko.net/quotes?l=en\"\n\n async with ctx.typing():\n quote = await self.bot.get_json(url)\n embed = discord.Embed(\n color=discord.Color.blurple(), description=quote[\"text\"]\n )\n embed.set_footer(text=f\"― {quote['author']}\")\n await ctx.send(embed=embed)\n\n @commands.command()\n async def suntzu(self, ctx):\n \"\"\"Gets fake Sun Tzu art of war quotes.\"\"\"\n url = \"http://api.fakeartofwar.gaborszathmari.me/v1/getquote\"\n\n async with ctx.typing():\n quote = await self.bot.get_json(url)\n embed = discord.Embed(\n color=discord.Color.blurple(), description=quote[\"quote\"]\n )\n embed.set_footer(text=\"― Sun Tzu, Art Of War\")\n await ctx.send(embed=embed)\n\n @commands.command()\n async def rhyme(self, ctx, word):\n \"\"\"Gets words that rhyme with [word].\n\n words: str\n \"\"\"\n url = f\"https://api.datamuse.com/words?rel_rhy={word}&max=9\"\n\n async with ctx.typing():\n rhymes = await self.bot.get_json(url)\n\n embed = discord.Embed(color=discord.Color.blurple())\n\n if not rhymes:\n embed.description = \"```No results found```\"\n return await ctx.send(embed=embed)\n\n embed.set_footer(text=\"The numbers below are the scores\")\n\n for rhyme in rhymes:\n embed.add_field(name=rhyme[\"word\"], value=rhyme.get(\"score\", \"N/A\"))\n\n await ctx.send(embed=embed)\n\n @commands.command()\n async def spelling(self, ctx, word):\n \"\"\"Gets possible spellings of [word].\n\n words: str\n The words to get possible spellings of.\n \"\"\"\n url = f\"https://api.datamuse.com/words?sp={word}&max=9\"\n\n async with ctx.typing():\n spellings = await self.bot.get_json(url)\n\n embed = discord.Embed(\n color=discord.Color.blurple(), title=\"Possible spellings\"\n )\n\n if not spellings:\n embed.description = \"```No results found```\"\n return await ctx.send(embed=embed)\n\n embed.set_footer(text=\"The numbers below are the scores\")\n\n for spelling in spellings:\n embed.add_field(name=spelling[\"word\"], value=spelling[\"score\"])\n\n await ctx.send(embed=embed)\n\n @commands.command()\n async def meaning(self, ctx, *, words):\n \"\"\"Gets words with similar meaning to [words].\n Example .meaning ringing in the ears\n\n words: str\n The words to get possible meanings of.\n \"\"\"\n url = f\"https://api.datamuse.com/words?ml={words}&max=9\"\n\n async with ctx.typing():\n meanings = await self.bot.get_json(url)\n\n embed = discord.Embed(\n color=discord.Color.blurple(), title=\"Possible meanings\"\n )\n\n if not meanings:\n embed.description = \"```No results found```\"\n return await ctx.send(embed=embed)\n\n embed.set_footer(text=\"The numbers below are the scores\")\n\n for meaning in meanings:\n embed.add_field(name=meaning[\"word\"], value=meaning[\"score\"])\n\n await ctx.send(embed=embed)\n\n @commands.group()\n async def apis(self, ctx):\n \"\"\"Command group for the public apis api.\"\"\"\n if not ctx.invoked_subcommand:\n embed = discord.Embed(\n color=discord.Color.blurple(),\n description=f\"```Usage: {ctx.prefix}apis [categories/random/search]```\",\n )\n await ctx.send(embed=embed)\n\n @apis.command()\n async def categories(self, ctx):\n \"\"\"Gets all the categories of the public apis api.\"\"\"\n url = \"https://api.publicapis.org/categories\"\n\n async with ctx.typing():\n categories = await self.bot.get_json(url)\n\n await ctx.send(\n embed=discord.Embed(\n color=discord.Color.blurple(),\n title=\"All categories of the public apis api\",\n description=\"```{}```\".format(\"\\n\".join(categories)),\n )\n )\n\n @apis.command()\n async def random(self, ctx, category=\"\"):\n \"\"\"Gets a random api.\n\n category: str\n \"\"\"\n url = f\"https://api.publicapis.org/random?category={category}\"\n\n async with ctx.typing():\n data = (await self.bot.get_json(url))[\"entries\"][0]\n\n embed = discord.Embed(\n color=discord.Color.blurple(),\n title=data[\"API\"],\n description=f\"{data['Description']}\\n[Link]({data['Link']})\",\n )\n embed.add_field(\n name=\"Auth\", value=\"None\" if not data[\"Auth\"] else data[\"Auth\"]\n )\n embed.add_field(name=\"HTTPS\", value=data[\"HTTPS\"])\n embed.add_field(name=\"Cors\", value=data[\"Cors\"])\n embed.add_field(name=\"Category\", value=data[\"Category\"])\n\n await ctx.send(embed=embed)\n\n @apis.command()\n async def search(self, ctx, *, search):\n \"\"\"Searchs for an match via substring matching.\n\n search: str\n \"\"\"\n url = f\"https://api.publicapis.org/entries?title={search}\"\n\n async with ctx.typing():\n entries = (await self.bot.get_json(url))[\"entries\"]\n\n embed = discord.Embed(color=discord.Color.blurple())\n\n if not entries:\n embed.description = f\"No apis found for `{search}`\"\n return await ctx.send(embed=embed)\n\n for index, entry in enumerate(entries):\n if index == 12:\n break\n embed.add_field(\n name=entry[\"API\"],\n value=f\"{entry['Description']}\\n[Link]({entry['Link']})\",\n )\n\n await ctx.send(embed=embed)\n\n @commands.command()\n async def nationalize(self, ctx, first_name):\n \"\"\"Estimate the nationality of a first name.\n\n first_name: str\n \"\"\"\n url = f\"https://api.nationalize.io/?name={first_name}\"\n\n async with ctx.typing():\n data = await self.bot.get_json(url)\n\n embed = discord.Embed(color=discord.Color.blurple())\n\n if not data[\"country\"]:\n embed.description = \"```No results found```\"\n return await ctx.send(embed=embed)\n\n embed.title = f\"Estimates of the nationality of {data['name']}\"\n\n for country in data[\"country\"]:\n embed.add_field(\n name=country[\"country_id\"],\n value=f\"{country['probability'] * 100:.2f}%\",\n )\n await ctx.send(embed=embed)\n\n @commands.group()\n async def game(self, ctx):\n \"\"\"Gets a random game that is free\"\"\"\n if not ctx.invoked_subcommand:\n url = \"https://www.freetogame.com/api/games?platform=pc\"\n\n async with ctx.typing():\n game = random.choice(await self.bot.get_json(url))\n\n embed = discord.Embed(\n color=discord.Color.blurple(),\n title=game[\"title\"],\n description=f\"{game['short_description']}\\n[Link]({game['game_url']})\",\n )\n embed.add_field(name=\"Genre\", value=game[\"genre\"])\n embed.add_field(name=\"Publisher\", value=game[\"publisher\"])\n embed.add_field(name=\"Developer\", value=game[\"developer\"])\n embed.set_image(url=game[\"thumbnail\"])\n\n await ctx.send(embed=embed)\n\n @game.command()\n async def category(self, ctx, category=None):\n \"\"\"Gets a random game that is free by category.\n\n category: str\n \"\"\"\n embed = discord.Embed(color=discord.Color.blurple())\n\n if not category:\n embed.description = textwrap.dedent(\n \"\"\"\n ```mmorpg, shooter, strategy, moba, racing, sports\n social, sandbox, open-world, survival, pvp, pve, pixel\n voxel, zombie, turn-based, first-person, third-Person\n top-down, tank, space, sailing, side-scroller, superhero\n permadeath, card, battle-royale, mmo, mmofps, mmotps, 3d\n 2d, anime, fantasy, sci-fi, fighting, action-rpg, action\n military, martial-arts, flight, low-spec, tower-defense\n horror, mmorts```\n \"\"\"\n )\n return await ctx.send(embed=embed)\n\n url = f\"https://www.freetogame.com/api/games?platform=pc&category={category}\"\n\n async with ctx.typing():\n game = random.choice(await self.bot.get_json(url))\n\n embed.title = game[\"title\"]\n embed.description = (\n f\"{game['short_description']}\\n[Link]({game['game_url']})\"\n )\n\n embed.add_field(name=\"Genre\", value=game[\"genre\"])\n embed.add_field(name=\"Publisher\", value=game[\"publisher\"])\n embed.add_field(name=\"Developer\", value=game[\"developer\"])\n embed.set_image(url=game[\"thumbnail\"])\n\n await ctx.send(embed=embed)\n\n @commands.command()\n async def apod(self, ctx):\n \"\"\"Gets the NASA Astronomy Picture of the Day.\"\"\"\n url = \"https://api.nasa.gov/planetary/apod?api_key=DEMO_KEY\"\n\n async with ctx.typing():\n apod = await self.bot.get_json(url)\n\n embed = discord.Embed(\n color=discord.Color.blurple(),\n title=apod[\"title\"],\n description=\"[Link](https://apod.nasa.gov/apod/astropix.html)\",\n )\n embed.set_image(url=apod[\"hdurl\"])\n await ctx.send(embed=embed)\n\n @commands.command(name=\"githubtrending\", aliases=[\"githubt\", \"tgithub\"])\n async def github_trending(self, ctx):\n \"\"\"Gets trending github repositories.\"\"\"\n url = \"https://api.trending-github.com/github/repositories\"\n\n async with ctx.typing():\n repositories = await self.bot.get_json(url)\n embed = discord.Embed(\n color=discord.Color.blurple(), title=\"5 Trending Github Repositories\"\n )\n\n for index, repo in enumerate(repositories, start=1):\n if index == 6:\n break\n embed.add_field(\n name=repo[\"name\"].title(),\n value=textwrap.dedent(\n f\"\"\"\n `Description:` {repo['description']}\n `Language:` {repo['language']}\n `Url:` {repo['url']}\n `Stars:` {repo['stars']:,}\n `Forks:` {repo['forks']:,}\n \"\"\"\n ),\n inline=False,\n )\n await ctx.send(embed=embed)\n\n @commands.command()\n async def gender(self, ctx, first_name):\n \"\"\"Estimates a gender from a first name.\n\n first_name: str\n \"\"\"\n url = f\"https://api.genderize.io/?name={first_name}\"\n\n async with ctx.typing():\n data = await self.bot.get_json(url)\n embed = discord.Embed(color=discord.Color.blurple())\n embed.description = textwrap.dedent(\n f\"\"\"\n ```First Name: {data['name']}\n Gender: {data['gender']}\n Probability: {data['probability'] * 100}%\n Count: {data['count']}```\n \"\"\"\n )\n await ctx.send(embed=embed)\n\n @commands.command()\n async def trends(self, ctx, *, country=\"new zealand\"):\n \"\"\"Gets the current google search trends in a country.\n\n country: str\n \"\"\"\n url = \"https://trends.google.com/trends/hottrends/visualize/internal/data\"\n country = country.lower()\n\n async with ctx.typing():\n data = await self.bot.get_json(url)\n embed = discord.Embed(color=discord.Color.blurple())\n\n if country not in data:\n embed.description = f\"```Country {country.title()} not found.```\"\n return await ctx.send(embed=embed)\n\n embed.title = f\"{country.title()} Search Trends\"\n embed.description = \"```{}```\".format(\"\\n\".join(data[country]))\n await ctx.send(embed=embed)\n\n @commands.command(name=\"fakeuser\")\n async def fake_user(self, ctx):\n \"\"\"Gets a fake user with some random data.\"\"\"\n url = \"https://randomuser.me/api/?results=1\"\n\n async with ctx.typing():\n data = await self.bot.get_json(url)\n data = data[\"results\"][0]\n embed = (\n discord.Embed(color=discord.Color.blurple())\n .set_author(\n name=\"{} {} {}\".format(\n data[\"name\"][\"title\"],\n data[\"name\"][\"first\"],\n data[\"name\"][\"last\"],\n ),\n icon_url=data[\"picture\"][\"large\"],\n )\n .add_field(name=\"Gender\", value=data[\"gender\"])\n .add_field(name=\"Username\", value=data[\"login\"][\"username\"])\n .add_field(name=\"Password\", value=data[\"login\"][\"password\"])\n .add_field(\n name=\"Location\",\n value=\"{}, {}, {}, {}\".format(\n data[\"location\"][\"street\"][\"name\"],\n data[\"location\"][\"city\"],\n data[\"location\"][\"state\"],\n data[\"location\"][\"country\"],\n ),\n )\n .add_field(name=\"Email\", value=data[\"email\"])\n .add_field(name=\"Date of birth\", value=data[\"dob\"][\"date\"])\n .add_field(name=\"Phone\", value=data[\"phone\"])\n )\n\n await ctx.send(embed=embed)\n\n @commands.command(name=\"dadjoke\")\n async def dad_joke(self, ctx):\n \"\"\"Gets a random dad joke.\"\"\"\n url = \"https://icanhazdadjoke.com/\"\n headers = {\"Accept\": \"application/json\"}\n\n async with ctx.typing(), self.bot.client_session.get(\n url, headers=headers\n ) as reponse:\n data = await reponse.json()\n\n await ctx.reply(data[\"joke\"])\n\n @commands.command()\n async def cocktail(self, ctx, *, name=None):\n \"\"\"Searchs for a cocktail and gets a random result by default.\n\n name: str\n \"\"\"\n if not name:\n url = \"https://www.thecocktaildb.com/api/json/v1/1/random.php\"\n else:\n url = f\"https://www.thecocktaildb.com/api/json/v1/1/search.php?s={name}\"\n\n embed = discord.Embed(color=discord.Color.blurple())\n\n async with ctx.typing():\n data = await self.bot.get_json(url)\n if not data[\"drinks\"]:\n embed.description = \"```No cocktails found.```\"\n embed.color = discord.Color.red()\n return await ctx.send(embed=embed)\n drink = random.choice(data[\"drinks\"])\n\n embed.set_image(url=drink[\"strDrinkThumb\"])\n embed.set_author(name=drink[\"strDrink\"], icon_url=drink[\"strDrinkThumb\"])\n\n ingredients = []\n\n for i in range(1, 16):\n if not drink[f\"strIngredient{i}\"]:\n break\n ingredients.append(\n f\"{drink[f'strIngredient{i}']}: {drink[f'strMeasure{i}']}\"\n )\n\n embed.description = textwrap.dedent(\n f\"\"\"\n ```Category: {drink[\"strCategory\"]}\n \\nGlass: {drink[\"strGlass\"]}\n \\nAlcohol: {drink[\"strAlcoholic\"]}\n \\nInstructions: {drink[\"strInstructions\"]}\n \\n\\nIngredients:\n \\n{f\"{chr(10)}\".join(ingredients)}\n ```\"\"\"\n )\n await ctx.send(embed=embed)\n\n @commands.command()\n async def trivia(self, ctx, difficulty=\"easy\"):\n \"\"\"Does a simple trivia game.\n\n difficulty: str\n Choices are easy, medium or hard.\n \"\"\"\n url = f\"https://opentdb.com/api.php?amount=1&difficulty={difficulty}&type=multiple\"\n\n async with ctx.typing():\n data = await self.bot.get_json(url)\n\n result = data[\"results\"][0]\n\n embed = discord.Embed(\n color=discord.Color.blurple(), title=html.unescape(result[\"question\"])\n )\n options = result[\"incorrect_answers\"] + [result[\"correct_answer\"]]\n random.shuffle(options)\n\n for i, option in enumerate(options, start=0):\n embed.add_field(name=i + 1, value=option)\n\n message = await ctx.send(embed=embed)\n reactions = [\"1️⃣\", \"2️⃣\", \"3️⃣\", \"4️⃣\"]\n\n for emoji in reactions:\n await message.add_reaction(emoji)\n\n def check(reaction: discord.Reaction, user: discord.User) -> bool:\n return (\n user.id == ctx.author.id\n and reaction.message.channel == ctx.channel\n and reaction.emoji in reactions\n )\n\n reaction, user = await ctx.bot.wait_for(\n \"reaction_add\", timeout=60.0, check=check\n )\n\n if reactions.index(reaction.emoji) == options.index(result[\"correct_answer\"]):\n return await message.add_reaction(\"✅\")\n\n await message.add_reaction(\"❎\")\n await ctx.send(\n embed=discord.Embed(\n color=discord.Color.blurple(),\n description=f\"Correct answer was {result['correct_answer']}\",\n )\n )\n\n @commands.command()\n async def minecraft(self, ctx, ip):\n \"\"\"Gets some information about a minecraft server.\n\n ip: str\n \"\"\"\n url = f\"https://api.mcsrvstat.us/2/{ip}\"\n\n async with ctx.typing():\n data = await self.bot.get_json(url)\n\n embed = discord.Embed(color=discord.Color.blurple())\n\n if not data:\n embed.description = \"```Pinging timed out.```\"\n return await ctx.send(embed=embed)\n\n if data[\"debug\"][\"ping\"] is False:\n embed.description = \"```Pinging failed.```\"\n return await ctx.send(embed=embed)\n\n embed.description = (\n \"```Hostname: {}\\n\"\n \"Ip: {}\\n\"\n \"Port: {}\\n\\n\"\n \"Online: {}\\n\\n\"\n \"Players:\\n{}/{}\\n{}\\n\"\n \"Version(s):\\n{}\\n\\n\"\n \"Mods:\\n{}\\n\\n\"\n \"Motd:\\n{}```\"\n ).format(\n data[\"hostname\"],\n data[\"ip\"],\n data[\"port\"],\n data[\"online\"],\n data[\"players\"][\"online\"],\n data[\"players\"][\"max\"],\n \", \".join(data[\"players\"][\"list\"]) if \"list\" in data[\"players\"] else \"\",\n data[\"version\"],\n len(data[\"mods\"][\"names\"]) if \"mods\" in data else None,\n \"\\n\".join([s.strip() for s in data[\"motd\"][\"clean\"]]),\n )\n\n await ctx.send(embed=embed)\n\n @commands.command()\n async def define(self, ctx, *, word):\n \"\"\"Defines a word via the dictionary.com api.\n\n word: str\n \"\"\"\n url = f\"https://api.dictionaryapi.dev/api/v2/entries/en_US/{word}\"\n\n async with ctx.typing():\n definition = await self.bot.get_json(url)\n\n embed = discord.Embed(color=discord.Color.blurple())\n if isinstance(definition, dict):\n embed.description = \"```No definition found```\"\n return await ctx.send(embed=embed)\n\n definition = definition[0]\n\n if \"phonetics\" in definition:\n embed.title = definition[\"phonetics\"][0][\"text\"]\n embed.description = (\n f\"[pronunciation]({definition['phonetics'][0]['audio']})\"\n )\n\n for meaning in definition[\"meanings\"]:\n embed.add_field(\n name=meaning[\"partOfSpeech\"],\n value=f\"```{meaning['definitions'][0]['definition']}```\",\n )\n\n await ctx.send(embed=embed)\n\n @commands.command()\n async def latex(self, ctx, *, latex):\n r\"\"\"Converts latex into an image.\n\n To have custom preamble wrap it with %%preamble%%\n\n Example:\n\n %%preamble%%\n \\usepackage{tikz}\n \\usepackage{pgfplots}\n \\pgfplotsset{compat=newest}\n %%preamble%%\n\n latex: str\n \"\"\"\n url = \"https://quicklatex.com/latex3.f\"\n\n preamble = (\n \"\\\\usepackage{amsmath}\\n\"\n \"\\\\usepackage{amsfonts}\\n\"\n \"\\\\usepackage{amssymb}\\n\"\n \"\\\\newcommand{\\\\N}{\\\\mathbb N}\\n\"\n \"\\\\newcommand{\\\\Z}{\\\\mathbb Z}\\n\"\n \"\\\\newcommand{\\\\Q}{\\\\mathbb Q}\"\n \"\\\\newcommand{\\\\R}{\\\\mathbb R}\\n\"\n \"\\\\newcommand{\\\\C}{\\\\mathbb C}\\n\"\n \"\\\\newcommand{\\\\V}[1]{\\\\begin{bmatrix}#1\\\\end{bmatrix}}\\n\"\n \"\\\\newcommand{\\\\set}[1]{\\\\left\\\\{#1\\\\right\\\\}}\"\n )\n\n table = {37: \"%25\", 38: \"%26\"}\n latex = re.sub(r\"```\\w+\\n|```\", \"\", latex).strip(\"\\n\").translate(table)\n\n if \"%%preamble%%\" in latex:\n _, pre, latex = re.split(\"%%preamble%%\", latex)\n preamble += pre.translate(table)\n\n data = (\n f\"formula={latex}&fsize=50px&fcolor=FFFFFF&mode=0&out=1\"\n f\"&remhost=quicklatex.com&preamble={preamble}\"\n )\n\n async with ctx.typing(), self.bot.client_session.post(\n url, data=data\n ) as response:\n res = await response.text()\n\n if \"Internal Server Error\" in res:\n return await ctx.send(\n embed=discord.Embed(\n color=discord.Color.blurple(),\n description=\"Internal Server Error.\",\n )\n )\n\n image = res.split()[1]\n\n if image == \"https://quicklatex.com/cache3/error.png\":\n return await ctx.send(\n embed=discord.Embed(\n color=discord.Color.blurple(),\n description=f\"```latex\\n{res[49:]}```\",\n )\n )\n\n await ctx.send(image)\n\n @commands.command()\n async def xkcd(self, ctx):\n \"\"\"Gets a random xkcd comic.\"\"\"\n await ctx.send(f\"https://xkcd.com/{random.randint(0, 2509)}\")\n\n @commands.command(aliases=[\"urbandictionary\"])\n async def urban(self, ctx, *, search):\n \"\"\"Grabs the definition of something from the urban dictionary.\n\n search: str\n The term to search for.\n \"\"\"\n cache_search = f\"urban-{search}\"\n cache = orjson.loads(self.DB.main.get(b\"cache\"))\n\n embed = discord.Embed(colour=discord.Color.blurple())\n\n if cache_search in cache:\n defin = cache[cache_search].pop()\n\n if len(cache[cache_search]) == 0:\n cache.pop(cache_search)\n else:\n url = f\"https://api.urbandictionary.com/v0/define?term={search}\"\n\n async with ctx.typing():\n urban = await self.bot.get_json(url)\n\n if not urban:\n embed.title = \"Timed out try again later\"\n return await ctx.send(embed=embed)\n\n if not urban[\"list\"]:\n embed.title = \"No results found\"\n return await ctx.send(embed=embed)\n\n urban[\"list\"].sort(key=lambda defin: defin[\"thumbs_up\"])\n\n defin = urban[\"list\"].pop()\n cache[cache_search] = urban[\"list\"]\n\n embed.description = (\n \"```diff\\nDefinition of {}:\\n\"\n \"{}\\n\\n\"\n \"Example:\\n{}\\n\\n\"\n \"Votes:\\n{}\\n\\n```\"\n ).format(\n search,\n re.sub(r\"\\[(.*?)\\]\", r\"\\1\", defin[\"definition\"]),\n re.sub(r\"\\[(.*?)\\]\", r\"\\1\", defin[\"example\"]),\n defin[\"thumbs_up\"],\n )\n\n self.DB.main.put(b\"cache\", orjson.dumps(cache))\n self.loop.call_later(300, self.DB.delete_cache, cache_search, cache)\n await ctx.send(embed=embed)\n\n @commands.command()\n async def wikir(self, ctx):\n \"\"\"Gets a random wikipedia article.\"\"\"\n url = \"https://en.wikipedia.org/api/rest_v1/page/random/summary\"\n\n async with ctx.typing():\n data = await self.bot.get_json(url)\n\n embed = discord.Embed(\n color=discord.Color.blurple(),\n title=data[\"title\"],\n description=data[\"extract\"],\n url=data[\"content_urls\"][\"desktop\"][\"page\"],\n )\n embed.set_image(url=data[\"thumbnail\"][\"source\"])\n\n await ctx.send(embed=embed)\n\n @commands.command(aliases=[\"wiki\"])\n async def wikipedia(self, ctx, *, search: str):\n \"\"\"Return list of results containing your search query from wikipedia.\n\n search: str\n The term to search wikipedia for.\n \"\"\"\n async with ctx.typing():\n titles = await self.bot.get_json(\n f\"https://en.wikipedia.org/w/api.php?action=opensearch&search={search}\",\n )\n embed = discord.Embed(color=discord.Color.blurple())\n\n def check(message: discord.Message) -> bool:\n return (\n message.author.id == ctx.author.id\n and message.channel == ctx.channel\n )\n\n if not titles:\n embed.description = \"```Couldn't find any results```\"\n return await ctx.send(embed=embed)\n\n embed.title = f\"Wikipedia results for `{search}`\"\n embed.description = \"\\n\".join(\n f\"`{index}` [{title}]({url})\"\n for index, (title, url) in enumerate(zip(titles[1], titles[3]), start=1)\n )\n embed.timestamp = datetime.utcnow()\n\n await ctx.send(embed=embed)\n\n @commands.command()\n async def covid(self, ctx, *, country=\"nz\"):\n \"\"\"Shows current coronavirus cases, defaults to New Zealand.\n\n country: str - The country to search for\n \"\"\"\n url = f\"https://disease.sh/v3/covid-19/countries/{country}\"\n\n embed = discord.Embed(colour=discord.Color.red())\n\n async with ctx.typing():\n data = await self.bot.get_json(url)\n\n if \"message\" in data:\n embed.description = (\n \"```Not a valid country\\nExamples: NZ, New Zealand, all```\"\n )\n return await ctx.send(embed=embed)\n\n embed.set_author(\n name=f\"Cornavirus {data['country']}:\",\n icon_url=data[\"countryInfo\"][\"flag\"],\n )\n\n embed.description = textwrap.dedent(\n f\"\"\"\n ```\n Total Cases: Total Deaths:\n {data['cases']:<15,}{data['deaths']:,}\n\n Active Cases: Cases Today:\n {data['active']:<15,}{data['todayCases']:,}\n\n Deaths Today: Recovered Total:\n {data['todayDeaths']:<15,}{data['recovered']:,}\n ```\n \"\"\"\n )\n embed.timestamp = datetime.utcnow()\n\n await ctx.send(embed=embed)\n\n @commands.command(aliases=[\"gh\"])\n async def github(self, ctx, username: str):\n \"\"\"Fetches a members's GitHub information.\"\"\"\n async with ctx.typing():\n user_data = await self.bot.get_json(\n f\"https://api.github.com/users/{username}\"\n )\n\n if user_data.get(\"message\") is not None:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"The profile for `{username}` was not found.\",\n colour=discord.Colour.dark_red(),\n )\n )\n\n org_data = await self.bot.get_json(user_data[\"organizations_url\"])\n\n orgs = [\n f\"[{org['login']}](https://github.com/{org['login']})\"\n for org in org_data\n ]\n orgs_to_add = \" | \".join(orgs)\n\n gists = user_data[\"public_gists\"]\n\n if user_data[\"blog\"].startswith(\"http\"):\n blog = user_data[\"blog\"]\n elif user_data[\"blog\"]:\n blog = f\"https://{user_data['blog']}\"\n else:\n blog = \"No website link available\"\n\n embed = discord.Embed(\n title=f\"`{user_data['login']}`'s GitHub profile info\",\n description=user_data[\"bio\"] or \"\",\n colour=0x7289DA,\n url=user_data[\"html_url\"],\n timestamp=datetime.strptime(\n user_data[\"created_at\"], \"%Y-%m-%dT%H:%M:%SZ\"\n ),\n )\n embed.set_thumbnail(url=user_data[\"avatar_url\"])\n embed.set_footer(text=\"Account created at\")\n\n if user_data[\"type\"] == \"User\":\n\n embed.add_field(\n name=\"Followers\",\n value=f\"[{user_data['followers']}]({user_data['html_url']}?tab=followers)\",\n )\n embed.add_field(name=\"\\u200b\", value=\"\\u200b\")\n embed.add_field(\n name=\"Following\",\n value=f\"[{user_data['following']}]({user_data['html_url']}?tab=following)\",\n )\n\n embed.add_field(\n name=\"Public repos\",\n value=f\"[{user_data['public_repos']}]({user_data['html_url']}?tab=repositories)\",\n )\n embed.add_field(name=\"\\u200b\", value=\"\\u200b\")\n\n if user_data[\"type\"] == \"User\":\n embed.add_field(\n name=\"Gists\", value=f\"[{gists}](https://gist.github.com/{username})\"\n )\n\n embed.add_field(\n name=\"Organization(s)\",\n value=orgs_to_add or \"No organizations\",\n )\n embed.add_field(name=\"\\u200b\", value=\"\\u200b\")\n embed.add_field(name=\"Website\", value=blog)\n\n await ctx.send(embed=embed)\n\n @commands.command()\n async def tenor(self, ctx, *, search):\n \"\"\"Gets a random gif from tenor based off a search.\n\n search: str\n The gif search term.\n \"\"\"\n cache_search = f\"tenor-{search}\"\n cache = orjson.loads(self.DB.main.get(b\"cache\"))\n\n if cache_search in cache:\n url = random.choice(cache[cache_search])\n cache[cache_search].remove(url)\n\n if len(cache[cache_search]) == 0:\n cache.pop(cache_search)\n\n self.DB.main.put(b\"cache\", orjson.dumps(cache))\n\n return await ctx.send(url)\n\n url = f\"https://api.tenor.com/v1/search?q={search}&limit=50\"\n\n async with ctx.typing():\n tenor = await self.bot.get_json(url)\n\n tenor = [image[\"media\"][0][\"gif\"][\"url\"] for image in tenor[\"results\"]]\n image = random.choice(tenor)\n tenor.remove(image)\n cache[cache_search] = tenor\n\n self.DB.main.put(b\"cache\", orjson.dumps(cache))\n self.loop.call_later(300, self.DB.delete_cache, cache_search, cache)\n await ctx.send(image)\n\n\ndef setup(bot: commands.Bot) -> None:\n \"\"\"Starts apis cog.\"\"\"\n bot.add_cog(apis(bot))\n","sub_path":"cogs/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":36881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"278351467","text":"#\n# Test suite for Project Euler problems\n#\n#\n#\n#\n# Erlend Vollset\n#\n#\n#\n#\n\nfrom __future__ import print_function\nimport importlib, time, sys, glob, re\n\ndef main():\n gui = GUI()\n utils = Utils()\n specified_problems = []\n answers = utils.get_answers()\n\n gui.print_menu()\n ui = gui.get_user_input()\n if ui == \"2\":\n problems = utils.get_specified_problems()\n\n gui.print_table_start()\n\n for filename in glob.iglob(\"Problems/*.py\"):\n if ((ui == \"2\" and filename in problems) or ui == \"1\") and re.match(\"Problems/Problem_\", filename):\n problem_number = int(re.findall(r'\\d+', filename)[0])\n problem_module = importlib.import_module(\"Problems.Problem_{:02d}\".format(problem_number))\n\n start_time = time.time()\n answer = problem_module.compute()\n time_elapsed = time.time() - start_time\n\n gui.print_table_row(problem_number, answer, answers[problem_number], time_elapsed)\n\n gui.print_table_end()\n\nclass Utils:\n def get_answers(self):\n answers = {}\n ans_file = open(\"Data/Answers.txt\")\n for line in ans_file:\n ans = line.strip().split(\":\")\n answers[int(ans[0])] = ans[1]\n ans_file.close()\n return answers\n\n def get_specified_problems(self):\n return [\"Problems/Problem_{:02d}.py\".format(int(i)) for i in raw_input(\"Problems: \").split(\" \")]\n\nclass GUI:\n def print_menu(self):\n print(\"Test suite for Project Euler python solutions\")\n print(\"---------------------------------------------\")\n print(\"1) Test all problems\")\n print(\"2) Test specific problems (e.g. 1 23 47)\")\n\n def get_user_input(self):\n ui = raw_input(\"Option: \")\n while not re.match(\"[1-2]\", ui):\n ui = raw_input(\"Option: \")\n return ui\n\n def print_table_start(self):\n print(\"\\n+\" + \"-\"*12 + \"+\" + \"-\"*27 + \"+\" + \"-\"*22 + \"+\")\n print(\"| {:10} | {:25} | {:20} |\".format(\"Problem\", \"Answer\", \"Time(s)\"))\n print(\"+\" + \"-\"*12 + \"+\" + \"-\"*27 + \"+\" + \"-\"*22 + \"+\")\n\n def print_table_row(self, pn, ans, actual_ans, time):\n if ans == actual_ans:\n print(\"| {:<10} | {:25} | {:<20} |\".format(pn, ans, time))\n else:\n print(\"| {:<10} | {:^25} | {:<20} |\".format(pn, \"***** FAILED *****\", \"N/A\"))\n\n def print_table_end(self):\n print(\"+\" + \"-\"*12 + \"+\" + \"-\"*27 + \"+\" + \"-\"*22 + \"+\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Euler_Test_Suite_Python.py","file_name":"Euler_Test_Suite_Python.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"73204104","text":"from typing import ContextManager\r\nfrom django.shortcuts import render,redirect\r\nfrom django.http import HttpResponse\r\nfrom django.contrib.auth import login,authenticate,logout\r\nfrom django.conf import settings\r\nfrom django.db.models import Q\r\nfrom django.contrib import messages\r\nimport requests\r\nimport random\r\n\r\nfrom account.forms import AccountUpdateForm, RegistrationForm, AccountAuthenticationForm,CaptchaTestForm\r\nfrom account.models import Account\r\ndef search_view(request, *args, **kwargs):\r\n print(\"yooooo\")\r\n context = {}\r\n if request.method == \"GET\":\r\n search_query = request.GET.get(\"q\")\r\n if len(search_query) > 0:\r\n search_results = Account.objects.filter(Q(username__icontains=search_query) | Q(email__icontains=search_query))\r\n user = request.user\r\n accounts = [] # [(account1, True), (account2, False), ...]\r\n for s in search_results:\r\n accounts.append((s, False)) # you have no friends yet\r\n context[\"accounts\"] = accounts\r\n return render(request, 'results.html', context)\r\n\r\n\r\ndef register_view(request,*args,**kwargs):\r\n context={}\r\n \r\n user=request.user\r\n if user.is_authenticated:\r\n return HttpResponse(f\"You are already authenticated as : {user.email}.\")\r\n \r\n \r\n if request.POST:\r\n form=RegistrationForm(request.POST)\r\n cap=request.POST.get(\"captha\")\r\n if form.is_valid():\r\n form.save()\r\n email=form.cleaned_data.get('email').lower()\r\n raw_password=form.cleaned_data.get('password1')\r\n account = authenticate(email=email, password=raw_password)\r\n login(request, account)\r\n destination = kwargs.get(\"next\")\r\n if destination:\r\n return redirect(destination)\r\n return redirect('personal:home')\r\n \r\n else:\r\n context[\"registration_form\"] = form\r\n \r\n \r\n else:\r\n form = RegistrationForm()\r\n context['registration_form'] = form\r\n \r\n return render(request,'register.html',context)\r\ndef some_view(request):\r\n if request.POST:\r\n form = CaptchaTestForm(request.POST)\r\n\r\n # Validate the form: the captcha field will automatically\r\n # check the input\r\n if form.is_valid():\r\n return redirect(\"account:register\")\r\n else:\r\n form = CaptchaTestForm()\r\n\r\n return render(request, 'index.html', {'form': form})\r\n\r\ndef logout_view(request):\r\n logout(request)\r\n return redirect(\"personal:home\")\r\ndef login_view(request,*args,**kwargs):\r\n context={}\r\n user=request.user\r\n if user.is_authenticated:\r\n return redirect(\"personal:home\")\r\n destination=get_redirect_if_exists(request)\r\n print(\"destination: \" + str(destination))\r\n if request.POST:\r\n form = AccountAuthenticationForm(request.POST)\r\n if form.is_valid():\r\n email = request.POST['email']\r\n password = request.POST['password']\r\n user = authenticate(email=email, password=password)\r\n if user:\r\n login(request, user)\r\n if destination:\r\n return redirect(destination)\r\n return redirect(\"personal:home\")\r\n else:\r\n form = AccountAuthenticationForm()\r\n context['login_form'] = form\r\n return render(request, \"login.html\", context)\r\n\r\ndef get_redirect_if_exists(request):\r\n\tredirect = None\r\n\tif request.GET:\r\n\t\tif request.GET.get(\"next\"):\r\n\t\t\tredirect = str(request.GET.get(\"next\"))\r\n\treturn redirect\r\ndef account_views(request,*args,**kwargs):\r\n context={}\r\n user_id=kwargs.get(\"user_id\")\r\n try:\r\n account=Account.objects.get(pk=user_id)\r\n except Account.DoesNotExist:\r\n return HttpResponse(\"NOPE NOT USER\")\r\n if account:\r\n context['id']=account.id\r\n context['username']=account.username\r\n context['email']=account.email\r\n is_self=True\r\n user=request.user\r\n if user.is_authenticated and user!=account:\r\n is_self=False\r\n elif not user.is_authenticated:\r\n is_self=False\r\n context['is_self']=is_self\r\n context['BASE']=settings.BASE_URL\r\n return render(request, \"account.html\", context)\r\ndef edit_account_view(request,*args,**kwargs):\r\n if not request.user.is_authenticated:\r\n return redirect(\"Login\")\r\n user_id =kwargs .get(\"user_id\")\r\n try:\r\n account=Account.objects.get(pk=user_id)\r\n except Account.DoesNotExist:\r\n return HttpResponse(\"SOmething is wrong\")\r\n if account.pk !=request.user.pk:\r\n return HttpResponse(\"yOu cannot edit others\")\r\n context={}\r\n if request.POST:\r\n form=AccountUpdateForm(request.POST,request.FILES,instance=request.user)\r\n if form.is_valid():\r\n form.save()\r\n return redirect (\"account:view\",user_id=account.pk)\r\n else:\r\n form=AccountUpdateForm(request.POST, instance=request.user,\r\n initial={\r\n \"id\":account.pk,\r\n \"email\":account.email,\r\n \"username\":account.username,\r\n \"hide_email\":account.hide_email,\r\n\r\n }\r\n )\r\n context['form']=form\r\n else:\r\n form=AccountUpdateForm(\r\n initial={\r\n \"id\":account.pk,\r\n \"email\":account.email,\r\n \"username\":account.username,\r\n \"hide_email\":account.hide_email,\r\n\r\n }\r\n )\r\n\r\n context['form']=form\r\n return render(request, \"edit_account.html\", context)\r\n","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"574235468","text":"import os\nimport sys\nimport glob\nimport copy\nimport yaml\nimport root_numpy\nimport math\nimport numpy as np\nfrom pprint import pprint\nimport ROOT\n\n\nclass DataCard:\n def __init__(self,datacardName,path,yamlName,groups,hist_conv,era,use_syst=False,root_subdir=None):\n self.datacardName = datacardName\n self.path = path\n self.groups = groups\n self.hist_conv = hist_conv\n self.era = str(era)\n self.use_syst = use_syst\n self.root_subdir = root_subdir\n self.content = {k:{g:None for g in self.groups.keys()} for k in self.hist_conv.keys()}\n\n self.yaml_dict = self.loadYaml(os.path.join(self.path,yamlName))\n self.loopOverFiles()\n self.saveDatacard()\n\n def loopOverFiles(self):\n for f in glob.glob(os.path.join(self.path,'results','*.root')):\n if '__skeleton__' in f:\n continue\n sample = os.path.basename(f)\n # Check if in the group list #\n group = self.findGroup(sample)\n if group is None:\n raise RuntimeError(\"[WARNING] Could not find sample %s in group list\"%sample)\n\n hist_dict = self.getHistograms(f)\n self.addSampleToGroup(hist_dict,group)\n\n def addSampleToGroup(self,hist_dict,group):\n for histname,hist in hist_dict.items():\n if self.content[histname][group] is None:\n self.content[histname][group] = hist\n else:\n self.content[histname][group].Add(hist)\n\n def findGroup(self,sample):\n gr = None\n for key,val in self.groups.items():\n if not isinstance(val,list):\n raise RuntimeError(\"Group %s does not consist in a lost\"%key)\n if sample in val:\n gr = key\n break\n return gr\n \n def getHistograms(self,rootfile):\n f = ROOT.TFile(rootfile)\n sample = os.path.basename(rootfile)\n # Get config info #\n lumi = self.yaml_dict[\"luminosity\"][self.era]\n sample_type = self.yaml_dict[\"samples\"][sample]['type']\n if sample_type == \"mc\" or sample_type == \"signal\":\n xsec = self.yaml_dict[\"samples\"][sample]['cross-section']\n sumweight = self.yaml_dict[\"samples\"][sample]['generated-events']\n br = self.yaml_dict[\"samples\"][sample][\"branching-ratio\"] if \"branching-ratio\" in self.yaml_dict[\"samples\"][sample].keys() else 1\n else:\n xsec = None\n sumweight = None\n br = None\n\n # Get list of hist names #\n list_histnames = self.getHistList(f)\n\n # Loop through hists #\n hist_dict = {}\n for datacardname, histname in self.hist_conv.items():\n # Check #\n if not histname in list_histnames:\n print (\"Could not find hist %s in %s\"%(histname,rootfile))\n continue\n listsyst = [hn for hn in list_histnames if histname in hn and '__' in hn] if self.use_syst else []\n # Nominal histogram #\n h = self.getHistogram(f,histname,lumi,br,xsec,sumweight)\n hist_dict[datacardname] = h\n # Systematic histograms #\n for syst in listsyst:\n systName = syst.split('__')[-1]\n systName.replace('up','Up')\n systName.replace('down','Down')\n h = self.getHistogram(f,syst,lumi,br,xsec,sumweight)\n hist_dict[datacardname+'_'+systName] = h\n # Save in dict #\n f.Close()\n return hist_dict\n\n @staticmethod\n def getHistogram(f,histnom,lumi=None,xsec=None,br=None,sumweight=None):\n # Get hist #\n h = copy.deepcopy(f.Get(histnom))\n # Normalize hist to data #\n if lumi is not None and xsec is not None and br is not None and sumweight is not None:\n h.Scale(lumi*xsec*br/sumweight)\n return h\n \n \n @staticmethod\n def getHistList(f):\n l = []\n for key in f.GetListOfKeys():\n obj = key.ReadObj()\n if \"TH1\" in obj.ClassName():\n l.append(obj.GetName())\n return l\n \n\n def loadYaml(self,yaml_path):\n # Parse YAML #\n with open(yaml_path,\"r\") as handle:\n full_dict = yaml.load(handle,Loader=yaml.FullLoader)\n # Get Lumi per era # \n lumi_dict = full_dict[\"configuration\"][\"luminosity\"]\n\n # Get data per sample #\n sample_dict = {}\n info_to_keep = ['cross-section','generated-events','group','type','era']\n for sample,data in full_dict['files'].items():\n sample_dict[sample] = {k:data[k] for k in data.keys() & info_to_keep}\n\n return {'luminosity':lumi_dict,'samples':sample_dict}\n\n\n def saveDatacard(self):\n path_datacard = os.path.join(os.path.abspath(os.path.dirname(__file__)),self.datacardName)\n if not os.path.exists(path_datacard):\n os.makedirs(path_datacard)\n for key, gdict in self.content.items():\n # Create file #\n filename = os.path.join(path_datacard,key+'_'+self.era+'.root')\n f = ROOT.TFile(filename,'recreate')\n if self.root_subdir is not None:\n d = f.mkdir(self.root_subdir,self.root_subdir)\n d.cd()\n for group,hist in gdict.items():\n if hist is None:\n continue\n hist.SetTitle(group)\n hist.SetName(group)\n hist.Write(group)\n f.Write()\n f.Close()\n print (\"Saved file %s\"%filename)\n\n\n\n\nif __name__==\"__main__\":\n if len(sys.argv) !=2:\n raise RuntimeError(\"Must provide the YAML file\")\n with open(sys.argv[1],'r') as handle:\n f = yaml.load(handle)\n instance = DataCard(datacardName=sys.argv[1].replace('.yml',''),**f)\n","sub_path":"Datacards/produceDataCards.py","file_name":"produceDataCards.py","file_ext":"py","file_size_in_byte":5873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"88195289","text":"from .DbConnect import DbConnect\n\nfrom DAO.BarcodeDAO import BarcodeDAO\nfrom DAO.CustomerDAO import CustomerDAO\nfrom DAO.FootwearDesignDAO import FootwearDesignDAO\nfrom DAO.FootwearSelectionDAO import FootwearSelectionDAO\nfrom DAO.InventoryDAO import InventoryDAO\nfrom DAO.VirtualCartDAO import VirtualCartDAO\n\nfrom Tables.TableStructs import *\n\nclass AndroidService:\n\n\t# this method is to be used in other public methods. Thus, for the purpose of \n\t# one connection per transaction, pass connector as a parameter\n\n\t# given a barcode, it should return the number of items that are not reserved\n\tdef numItemsAvailable(self, barcode):\n\t\tdbConnect = DbConnect(FootwearSelectionDAO.getDbDir())\n\t\tconnector = dbConnect.getConnection()\n\n\t\treturn self.__numItemsAvailable(connector, barcode)\n\n\n\tdef __numItemsAvailable(self, connector, barcode):\n\n\t\tbarcodeDAO = BarcodeDAO(connector)\n\t\tbarcodeColHeader = \"Barcode\"\n\t\tbarcodePriKey = barcodeDAO.getPriKeys(barcodeColHeader, barcode)[0]\n\n\t\tinventoryDAO = InventoryDAO(connector)\n\t\tnumItems = 0\n\n\t\tbarcodesInInventory = inventoryDAO.getPriKeys(\"BarcodeDetailsFk\", barcodePriKey)\n\n\t\tfor barcodeInInventory in barcodesInInventory:\n\t\t\tif inventoryDAO.selectAColumn(\"CheckoutFlag\", barcodeInInventory) == 0:\n\t\t\t\tnumItems += 1\n\n\t\treturn numItems\n\n\t# retrieve information for the first page\n\t# a list of (selection, product name, brand name, description, cost)\n\tdef getFirstPageInfo(self):\n\n\t\tdbConnect = DbConnect(FootwearSelectionDAO.getDbDir())\n\t\tconnector = dbConnect.getConnection()\n\n\t\tfootwearSelectionDAO = FootwearSelectionDAO(connector)\n\n\t\tselections = footwearSelectionDAO.selectDistinct(\"Selection\")\n\n\t\tqueryResult = list()\n\t\tfor selection in selections:\n\n\t\t\tfootwearDesigns = list()\n\n\t\t\tfor entry in footwearSelectionDAO.selectEntries(\"Selection\", selection):\n\t\t\t\tfootwearDesigns.append(entry[1])\n\n\t\t\tfor footwearDesign in footwearDesigns:\n\t\t\t\tfootwearDesignDAO = FootwearDesignDAO(connector)\n\t\t\t\tentry = footwearDesignDAO.selectAnEntry(footwearDesign)\n\n\t\t\t\tqueryResult.append((selection, entry[1], entry[2], entry[3], entry[4]))\n\n\t\treturn queryResult\n\n\n\t# given a selection for one item, return barcode, size, gender, availability\n\tdef getSecondPageInfo(self, selection):\n\n\t\tdbConnect = DbConnect(FootwearSelectionDAO.getDbDir())\n\t\tconnector = dbConnect.getConnection()\n\n\t\tfootwearSelectionDAO = FootwearSelectionDAO(connector)\n\t\tfootwearSelectionPriKeys = footwearSelectionDAO.getPriKeys(\"Selection\", selection)\n\n\t\tretVal = list()\n\n\t\tbarcodeDAO = BarcodeDAO(connector)\n\t\tfor footwearSelectionPriKey in footwearSelectionPriKeys:\n\t\t\tentries = barcodeDAO.selectEntries(\"FootwearSelectionDetailsFk\", footwearSelectionPriKey)\n\n\t\t\tbarcodePriKey = entries[0]\n\n\t\t\tinventoryDAO = InventoryDAO(connector)\n\t\t\tinventories = inventoryDAO.getPriKeys(\"BarcodeDetailsFk\", barcodePriKey)\n\n\t\t\tfor entry in entries:\n\t\t\t\tretVal.append((entry[1], entry[3], entry[4], entry[5], entry[6], len(inventories) ) )\n\n\t\treturn retVal\n\n\n\t# given the customerId, barcode and quantity, check whether the requested number of\n\t# items (specified by barcode) are available in the inventory. If available, place \n\t# order and return (True, [[InventoryDetailsId, X_index, Y_index, X_encoder, Y_encoder], ...], virtualCartRowId)\n\t# Otherwise, return (False, numAvailableItems)\n\tdef reserveInventoryIfAvailable(self, customerId, barcode, quantity):\n\n\t\tdbConnect = DbConnect(BarcodeDAO.getDbDir())\n\t\tconnector = dbConnect.getConnection()\n\n\t\t# the customer should order the exact amount. For example if there exists 1 item in the\n\t\t# back room and the customer ordered 2 items, then the whole transaction is rejected.\n\t\t# if returning false, the available quantity\n\t\tnumAvailableItem = self.__numItemsAvailable(connector, barcode)\n\t\tif numAvailableItem < quantity:\n\t\t\treturn (False, numAvailableItem)\n\n\n\t\tbarcodeDAO = BarcodeDAO(connector)\n\t\tbarcodePriKey = barcodeDAO.getPriKeys(\"Barcode\", barcode)[0]\n\n\n\t\tinventoryDAO = InventoryDAO(connector)\n\t\tinventoryEntries = inventoryDAO.selectEntries(\"BarcodeDetailsFk\", barcodePriKey)\n\n\n\t\tvirtualCartDAO = VirtualCartDAO(connector)\n\t\tnewVirtualCartRowId = virtualCartDAO.createAnEntry( (customerId, barcodePriKey, quantity, 0) )\n\n\n\t\treservedInventoryLoc = list()\n\n\t\treserveCount = 0\n\t\tfor inventoryEntry in inventoryEntries:\n\n\t\t\tif reserveCount == quantity:\n\t\t\t\tbreak\n\n\t\t\tif inventoryEntry[6] != 0:\n\t\t\t\tcontinue\n\n\t\t\tinventoryDAO.update(inventoryEntry[0], \"CheckoutFlag\", 1)\n\n\t\t\t# [inventoryDetailsId, barcodeDetailsFk, X_index, Y_index, X_encoder, Y_encoder, checkoutFlag]\n\t\t\tentry = inventoryDAO.selectAnEntry(inventoryEntry[0])\n\n\t\t\treservedInventoryLoc.append(entry[0:1]+entry[2:6]) # omit barcodeDetailsFk and checkoutFlag\n\t\t\treserveCount += 1\n\n\t\treturn (True, reservedInventoryLoc, newVirtualCartRowId)\n","sub_path":"DataBaseAccess/Service/AndroidService.py","file_name":"AndroidService.py","file_ext":"py","file_size_in_byte":4757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"522723824","text":"# https://medium.com/hackernoon/how-to-run-asynchronous-web-requests-in-parallel-with-python-3-5-without-aiohttp-264dc0f8546\nimport asyncio\nfrom concurrent.futures import ThreadPoolExecutor\nimport requests\n\n\ndef create_asynchronous(func, args):\n async def get_asynchronous():\n with ThreadPoolExecutor(max_workers=10) as executor:\n with requests.Session() as session:\n loop = asyncio.get_event_loop()\n tasks = [\n loop.run_in_executor(\n executor,\n func,\n *(arg, session)\n )\n for arg in args\n ]\n return await asyncio.gather(*tasks)\n return get_asynchronous\n\n\ndef map_asynchronous(func, args):\n asynchronous_func = create_asynchronous(func, args)\n loop = asyncio.get_event_loop()\n future = asyncio.ensure_future(asynchronous_func())\n loop.run_until_complete(future)\n return future.result()\n","sub_path":"api/utils/asynchronous.py","file_name":"asynchronous.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"638212059","text":"#!/usr/bin/env python\n# encoding: utf-8\nimport json\n\nfrom flask import Blueprint, jsonify, render_template, request\n\nimport application.models as Models\nfrom application.utils import Pagination\nfrom application.utils.response import normal_resp\n\nadmin_bp = Blueprint('admin', __name__, url_prefix='/admin')\n\n\n@admin_bp.route('/', methods=['GET'])\ndef index():\n return render_template('admin/index.html')\n\n\n@admin_bp.route('/login', methods=['GET'])\ndef login():\n return render_template('admin/login.html')\n\n\n@admin_bp.route('/posts', methods=['GET'])\ndef all_posts():\n page = int(request.args.get('page', 1))\n posts = Models.Post.objects.all()\n page = Pagination(posts, page)\n return render_template('admin/post-list.html', page=page)\n\n\n@admin_bp.route('/posts/oper', methods=['GET'])\ndef oper_post():\n id = request.args.get('pid')\n if id:\n post = Models.Post.objects.get_by_id(id)\n return render_template('admin/add-new.html', post=post)\n return render_template('admin/add-new.html')\n\n\n@admin_bp.route('/tags', methods=['GET', 'POST'])\ndef all_tags():\n if request.method == 'GET':\n return render_template('admin/tag-list.html')\n else:\n tags = [tag.to_json() for tag in Models.Tag.objects.all()]\n rtn = {\n 'page': 1,\n 'total': len(Models.Tag.objects.all()),\n 'rows': [{'id': tag.get('id'),\n 'cell': [tag.get('id'), tag.get('uuid'), tag.get('name'), tag.get('slug'), 0]} for tag in tags]\n }\n return jsonify(rtn)\n\n\n@admin_bp.route('/categories', methods=['GET', 'POST'])\ndef all_categories():\n if request.method == 'GET':\n return render_template('admin/category-list.html')\n else:\n cates = [cate.to_json() for cate in Models.Category.objects.all()]\n rtn = {\n 'page': 1,\n 'total': len(Models.Category.objects.all()),\n 'rows': [{'id': cate.get('id'),\n 'cell': [cate.get('id'), cate.get('uuid'), cate.get('name'), cate.get('slug'), 0]} for cate in cates]\n }\n return jsonify(rtn)\n\n\n@admin_bp.route('/categories', methods=['PUT'])\ndef add_category():\n data = request.get_json()\n cate = Models.Category.objects.filter(name=data.get('category')).first()\n if not cate:\n cate = Models.Category.objects.create(name=data.get('category'))\n cate = Models.Category.objects.filter(name=data.get('category')).first()\n return normal_resp({'cate': cate.to_json()})\n\n\n@admin_bp.route('/uploads', methods=['GET'])\ndef all_uploads():\n return render_template('admin/upload-list.html')\n\n\n@admin_bp.route('/comments', methods=['GET'])\ndef all_comments():\n return render_template('admin/comment-list.html')\n\n\n@admin_bp.route('/settings', methods=['GET'])\ndef settings():\n return render_template('admin/profile.html')\n\n\n@admin_bp.route('/other', methods=['GET'])\ndef other():\n pass\n","sub_path":"application/controllers/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"338434470","text":"# -- encoding:utf-8 --\r\n\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow.contrib import slim\r\n\r\nfrom nets.metric import Metrics\r\nfrom nets import base_model\r\n\r\n\r\nclass Network(base_model.Network):\r\n\r\n def __init__(self, with_word2vec=False, vocab_size=None, embedding_dimensions=None,\r\n embedding_table=None, train_embedding_table=False,\r\n num_class=2, network_name=\"TextRNN\", weight_decay=0.01,\r\n optimizer_type=\"adam\", optimizer_parameters_func=None, saver_parameters={'max_to_keep': 2},\r\n num_units=128, layers=3,\r\n attention_dimension_size=128, attention_layers=3, attention_headers=16, *args, **kwargs):\r\n \"\"\"\r\n :param with_word2vec: 是否使用Word2Vec训练好的转换参数作为Embedding Lookup的参赛值\r\n :param vocab_size: 词汇数目\r\n :param embedding_dimensions: Embedding Loopup转换的时候,单词转换的词向量大小\r\n :param embedding_table: 训练好的单词向量映射表\r\n :param train_embedding_table: 是否训练train_embedding_table的参数值\r\n :param num_class: 类别数目\r\n :param network_name: 网络名称\r\n :param weight_decay: L2正则项的系数\r\n :param optimizer_type: 优化器的类别\r\n :param optimizer_parameters_func: 构建优化器的参数的函数\r\n :param saver_parameters: 模型持久化器的参数\r\n :param num_units: RNN Cell中的神经元数目\r\n :param layers: RNN的层次\r\n :param attention_dimension_size: Self Attention计算过程中的维度大小\r\n :param attention_layers: Transformer的层次\r\n :param attention_headers: 头的数目\r\n \"\"\"\r\n self.attention_dimension_size = attention_dimension_size\r\n self.attention_layers = attention_layers\r\n self.attention_headers = attention_headers\r\n self.num_units = num_units # RNN Cell的神经元数目\r\n self.layers = layers # RNN的层次\r\n\r\n super(Network, self).__init__(with_word2vec=with_word2vec, vocab_size=vocab_size,\r\n embedding_dimensions=embedding_dimensions,\r\n embedding_table=embedding_table, train_embedding_table=train_embedding_table,\r\n num_class=num_class, network_name=network_name, weight_decay=weight_decay,\r\n optimizer_type=optimizer_type,\r\n optimizer_parameters_func=optimizer_parameters_func,\r\n saver_parameters=saver_parameters)\r\n\r\n def interface(self):\r\n \"\"\"\r\n 前向网络构建\r\n batch_size: N\r\n feature height: H, 将序列长度T认为是H\r\n feature width: W,将Embedding size大小认为是W\r\n feature channel : C,一个文本就相当于一个Feature Map,通道数为1\r\n sentence_length: T\r\n embedding size: E\r\n :return:\r\n \"\"\"\r\n with tf.variable_scope(self.network_name):\r\n with slim.arg_scope(self.arg_score()):\r\n with tf.variable_scope(\"placeholders\"):\r\n self.global_step = tf.train.get_or_create_global_step()\r\n # 输入的单词id,形状为:[N,T]\r\n self.inputs = tf.placeholder(dtype=tf.int32, shape=[None, None], name='input_word_id')\r\n # 希望输出的类别id, 形状为:[N,]\r\n self.targets = tf.placeholder(dtype=tf.int32, shape=[None], name='target_class_id')\r\n # Dropout\r\n self.dropout_keep_prob = tf.placeholder_with_default(1.0, shape=[], name='dropout_keep_prob')\r\n\r\n # 1. Embedding Layer(N,T,E)\r\n embedding_inputs = self.embedding_lookup(self.inputs)\r\n\r\n # 2. 使用Transformer来提取高阶特征\r\n with tf.variable_scope(\"rnn\"):\r\n # a. 定义RNN的cell构建函数\r\n def cell(_units):\r\n _cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=_units)\r\n return tf.nn.rnn_cell.DropoutWrapper(cell=_cell, output_keep_prob=self.dropout_keep_prob)\r\n\r\n # b. 构建前向的cell和反向cell\r\n cell_fw = tf.nn.rnn_cell.MultiRNNCell(cells=[cell(self.num_units) for _ in range(self.layers)])\r\n cell_bw = tf.nn.rnn_cell.MultiRNNCell(cells=[cell(self.num_units) for _ in range(self.layers)])\r\n\r\n # c. 获取得到序列的输出向量\r\n # 数据都是按照原始的从左往右的序列得到的最终特征\r\n # (正向提取特征信息[N,T,E], 反向提取特征信息[N,T,E]),(正向最终的状态信息,反向最终的状态信息)\r\n (output_fw, output_bw), (output_state_fw, output_state_bw) = tf.nn.bidirectional_dynamic_rnn(\r\n cell_fw, # 前向的RNN Cell\r\n cell_bw, # 反向的RNN Cell\r\n inputs=embedding_inputs, # 输入值, [N,T,E]\r\n dtype=tf.float32, # 给定RNN状态初始化值的类型\r\n )\r\n\r\n with tf.variable_scope(\"transformer\"):\r\n with tf.variable_scope(\"Input\"):\r\n encoder_input = tf.layers.dense(tf.concat([output_fw, output_bw], axis=-1),\r\n units=self.attention_dimension_size,\r\n activation=tf.nn.relu)\r\n\r\n for layer in range(self.attention_layers):\r\n with tf.variable_scope(\"Encoder_{}\".format(layer)):\r\n # 1. 得到各个头的信息\r\n attention_outputs = []\r\n for header in range(self.attention_headers):\r\n with tf.variable_scope(\"Header_{}\".format(header)):\r\n attention_output = self._self_attention(\r\n H=encoder_input,\r\n attention_dimension_size=self.attention_dimension_size\r\n )\r\n attention_outputs.append(attention_output)\r\n\r\n # 2. 拼接\r\n attention_output = tf.concat(attention_outputs, axis=-1)\r\n\r\n # 3. 做一个线性转换\r\n attention_output = tf.layers.dense(attention_output,\r\n units=self.attention_dimension_size,\r\n activation=None)\r\n\r\n # 4. 将当前层的输出和当前层的输入做一个残差结构\r\n attention_output = tf.nn.relu(attention_output + encoder_input)\r\n\r\n # 5. 将当前层输出作为下一层的输入\r\n encoder_input = attention_output\r\n\r\n # 3. 将高阶特征拼接到一起,作为CNN提取出来的最终高阶特征信息\r\n with tf.variable_scope(\"merge_feature\"):\r\n # 4. 将所有时刻的特征信息求均值\r\n features = tf.reduce_mean(attention_output, axis=1)\r\n\r\n # 4. FFN+Softmax做最终的决策输出\r\n with tf.variable_scope(\"project\"):\r\n score = slim.fully_connected(features, num_outputs=self.num_class, activation_fn=None)\r\n # 重命名, 得到的是N个文本属于num_class个类别的置信度\r\n self.logits = tf.identity(score, 'logits')\r\n # 得到N个文本分别属于各个类别的概率值\r\n self.probability = tf.nn.softmax(self.logits, name='probability')\r\n # 得到最终的预测id\r\n self.predictions = tf.argmax(self.logits, axis=-1, name='predictions')\r\n\r\n # 配置一个参数表示仅恢复模型参数\r\n self.saver_parameters['var_list'] = tf.global_variables()\r\n\r\n def _self_attention(self, H, attention_dimension_size):\r\n \"\"\"\r\n 计算Self-Attention\r\n :param H: [N,T,E], N个序列,每个序列T个时刻,每个时刻E维的向量\r\n :return:\r\n \"\"\"\r\n # 0. 获取大小信息\r\n hidden_size = H.shape[-1]\r\n batch_size, sequence_length, _ = tf.unstack(tf.shape(H))\r\n # 1. 对输入数据reshape操作\r\n H = tf.reshape(H, shape=tf.stack([batch_size * sequence_length, hidden_size]))\r\n # 2. 分别计算Q、K、V\r\n Q = tf.layers.dense(H, units=attention_dimension_size)\r\n K = tf.layers.dense(H, units=attention_dimension_size)\r\n V = tf.layers.dense(H, units=attention_dimension_size, activation=tf.nn.relu)\r\n # 3. Reshape\r\n Q = tf.reshape(Q, shape=tf.stack([batch_size, sequence_length, attention_dimension_size]))\r\n K = tf.reshape(K, shape=tf.stack([batch_size, sequence_length, attention_dimension_size]))\r\n V = tf.reshape(V, shape=tf.stack([batch_size, sequence_length, attention_dimension_size]))\r\n # 4. 计算相关性([N,T,E],[N,T,E],F,T) --> [N,T,T]\r\n scores = tf.matmul(Q, K, False, True) / np.sqrt(attention_dimension_size)\r\n # 5. 计算概率值([N,T,T])\r\n weights = tf.nn.softmax(scores)\r\n # 6. 计算最终结果\r\n attention = tf.matmul(weights, V)\r\n return attention\r\n","sub_path":"text_classsification/nets/text_rnn_transformer.py","file_name":"text_rnn_transformer.py","file_ext":"py","file_size_in_byte":9739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"417777896","text":"bl_info = {\n \"name\": \"keUnbevel\",\n \"author\": \"Kjell Emanuelsson\",\n \"category\": \"Modeling\",\n \"version\": (1, 1, 0),\n \"blender\": (2, 80, 0),\n}\n\nimport bpy\nimport bmesh\nfrom bpy.types import Operator\nfrom .ke_utils import get_loops\n# from mathutils import Vector, Matrix\nfrom mathutils.geometry import intersect_line_line\n\n\nclass MESH_OT_ke_unbevel(Operator):\n bl_idname = \"mesh.ke_unbevel\"\n bl_label = \"Unbevel\"\n bl_description = \"Un-bevel by selecting non-looping edges across the bevel. (2 end-edges per selection are the vectors)\"\n bl_options = {'REGISTER', 'UNDO'}\n\n @classmethod\n def poll(cls, context):\n return (context.object is not None and\n context.object.type == 'MESH' and\n context.object.data.is_editmode)\n\n def execute(self, context):\n obj = bpy.context.active_object\n od = obj.data\n bm = bmesh.from_edit_mesh(od)\n sel_mode = bpy.context.tool_settings.mesh_select_mode[:]\n\n if sel_mode[1]:\n bm.edges.ensure_lookup_table()\n sel_edges = [e for e in bm.edges if e.select]\n vert_pairs = []\n for e in sel_edges:\n vp = [v for v in e.verts]\n vert_pairs.append(vp)\n\n loops = get_loops(vert_pairs, legacy=True)\n\n bpy.ops.mesh.select_mode(type='VERT')\n\n for loop in loops:\n bm.verts.ensure_lookup_table()\n bpy.ops.mesh.select_all(action='DESELECT')\n\n v1, v2, v3, v4 = loop[0].co, loop[1].co, loop[-1].co, loop[-2].co\n xpoint = intersect_line_line(v1, v2, v3, v4)[0]\n merge_point = loop[1]\n merge_point.co = xpoint\n\n merge_verts = loop[1:-1]\n for v in merge_verts:\n bm.verts[v.index].select = True\n\n bm.select_history.add(merge_point)\n bm.verts[merge_point.index].select = True\n bpy.ops.mesh.merge(type='LAST', uvs=True)\n\n bm.normal_update()\n bmesh.update_edit_mesh(od, True)\n bpy.ops.mesh.select_mode(type='EDGE')\n\n else:\n self.report({\"INFO\"}, \"Selection Mode Error: Please use EDGE selection.\")\n\n return {\"FINISHED\"}\n\n\n# -------------------------------------------------------------------------------------------------\n# Class Registration & Unregistration\n# -------------------------------------------------------------------------------------------------\nclasses = (MESH_OT_ke_unbevel,\n )\n\ndef register():\n for c in classes:\n bpy.utils.register_class(c)\n\n\ndef unregister():\n for c in reversed(classes):\n bpy.utils.unregister_class(c)\n\n\nif __name__ == \"__main__\":\n register()\n","sub_path":"scripts/addons/kekit/ke_unbevel.py","file_name":"ke_unbevel.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"499617729","text":"from django.shortcuts import render, redirect\nfrom .forms import ClientForm, ClientSearchForm\nfrom .models import Client\nfrom django.urls import reverse\n\n# Create your views here.\n\ndef client_ajout(request):\n\tdone = False\n\tif request.method == 'POST':\n\t\tclient_form = ClientForm(request.POST)\n\t\tif client_form.is_valid():\n\t\t\tcd = client_form.cleaned_data\n\t\t\tClient.objects.create(**cd)\n\t\t\tdone = True\n\t\t\tclient_form = ClientForm()\n\telse:\n\t\tclient_form = ClientForm()\n\tto_recherche = request.build_absolute_uri(reverse(\"client:client_recherche\"))\n\treturn render(request, 'client/ajout.html', {'form':client_form, 'done': done, \"recherche\": to_recherche})\n\ndef client_recherche(request):\n\tmsg = None\n\tif request.method == 'POST':\n\t\tclients = Client.objects.filter(nom=request.POST[\"nom\"])\n\t\tif len(clients) == 0:\n\t\t\tmsg = \"pas de resultat\"\n\telse:\n\t\tclients = Client.objects.all()\n\t\tif len(clients) == 0:\n\t\t\tmsg = \"aucun client n'est enregisté\"\n\tform = ClientSearchForm()\n\tliste = []\n\tfor client in clients:\n\t\tliste.append(\n\t\t\t(client, \n\t\t\t\trequest.build_absolute_uri(reverse(\"client:client_supp\", args=[client.id])),\n\t\t\t\trequest.build_absolute_uri(reverse(\"client:client_modifie\", args=[client.id]))),\n\t\t\t)\n\treturn render(request, 'client/recherche.html', \n\t\t{'form':form, \"msg\": msg, 'clients': clients, \n\t\t'len': len(clients), \"liste\": liste})\n\n\ndef client_supp(request, id_client=None):\n\tclient = Client.objects.filter(id=id_client).first()\n\tif request.method == \"POST\":\n\t\tclient.delete()\n\t\treturn redirect(\"client:client_recherche\")\n\treturn render(request, 'client/supprime.html', {\"client\": client})\n\ndef client_modifie(request, id_client=None):\n\tclient = Client.objects.get(id=id_client)\n\tclient_form = ClientForm(client)\n\tif request.method == \"POST\":\n\t\tclient_form = ClientForm(request.POST)\n\t\tif client_form.is_valid():\n\t\t\tcd = client_form.cleaned_data\n\t\t\tclient.nom = cd['nom']\n\t\t\tclient.tele = cd['tele']\n\t\t\tclient.addresse = cd['addresse']\n\t\t\tclient.save()\n\t\t\treturn redirect(\"client:client_recherche\")\n\n\treturn render(request, 'client/modife.html', {\"form\": client_form, \"client\": client})\n\n","sub_path":"stepbystep/step1/client/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"164845496","text":"# -*- coding: utf-8 -*-\nimport codecs\nimport os\nimport re\nimport setuptools\nfrom wikipedia import get_version\n\n\ndef local_file(file):\n ''' open the desired file in utf-8 '''\n return codecs.open(\n os.path.join(os.path.dirname(__file__), file), 'r', 'utf-8'\n )\n\ninstall_reqs = [\n line.strip()\n for line in local_file('requirements.txt').readlines()\n if line.strip() != ''\n]\n\n\n\nsetuptools.setup(\n name = \"wikipedia\",\n version = get_version(),\n author = \"Tyler Barrus\",\n author_email = \"barrust@gmail.com\",\n description = \"Wikipedia API for Python (forked from https://github.com/goldsmith/Wikipedia)\",\n license = \"MIT\",\n keywords = \"python wikipedia API\",\n url = \"https://github.com/barrust/Wikipedia\",\n install_requires = install_reqs,\n packages = ['wikipedia'],\n long_description = local_file('README.rst').read(),\n classifiers = [\n 'Development Status :: 4 - Beta',\n 'Topic :: Software Development :: Libraries',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3'\n ],\n test_suite=\"tests\"\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"229103899","text":"import numpy as np\nimport math\ndef imprimeVector(A,n):\n for i in range(n):\n varA=str(\"%8.7f\"%A[i])\n print(\" |\" + varA+\"|\")\ndef imprimeMatriz(A):\n\tfor i in range(len(A)):\n\t\ttext = \" |\"\n\t\tfor j in range(len(A[i])):\n\t\t\tif(j==len(A)):\n\t\t\t\ttext = text +str(\"%8.3f\"%A[i][j])\n\t\t\telse:\n\t\t\t\t text = text +str(\"%8.3f\"%A[i][j])\n\t\tprint (text+\"| \")\n\tprint\ndef imprimeSistema(A,x,b,n):\n\ttext = \" |\"\n\tfor i in range(n):\n\t\tfor j in range(n):\n\t\t\tvarA = str(\"%8.7f\"%A[i][j])\n\t\t\tif(A[i][j]>=0):\n\t\t\t\ttext = text +varA + \" \"\n\t\t\telse:\n\t\t\t\t text = text +varA + \" \"\n\t\tvarB = str(\"%8.7f\"%b[i])\n\t\tprint (text + \"|\" +\"|\"+\"\".join(x[i])+\"|\"+\" |\" + varB,end=\"\")\n\t\tif (b[i]<10):\n\t\t\tprint(\" |\")\n\t\telse:\n\t\t\tprint(\"|\")\n\t\ttext =\" |\"\ndef normainfinito(A):\n n=np.linalg.norm(A,np.inf)\n return n\ndef inversa(A):\n I=np.linalg.inv(A)\n return I\ndef residual(A,x,b):\n R=A.dot(x)-b\n return R\ndef condicional(A):\n c=np.linalg.cond(A, np.inf)\n return c\n\ndef solL(L,b): # Lx = b\n\tn = L.shape[0]\n\tx = np.zeros(L.shape[1])\n\tfor m in range(n):\n\t\tx[m] = b[m] - sum(L[m,i]*x[i] for i in range(m))\n\t\tx[m] /= L[m][m]\n\treturn x\n\ndef solU(U,b): # Ux = b\n\tn = U.shape[0]\n\tx = np.zeros(U.shape[1])\n\tfor m in range(n)[::-1]:\n\t\tx[m] = b[m] - sum(U[m,i]*x[i] for i in range(m,n))\n\t\tx[m] /= U[m][m]\n\treturn x\ndef solGaussJordan(A,b):\n\tA = A.copy()\n\tb = b.copy()\n\tn = A.shape[1]\n\tfor i in range(n):\n\t\tL = np.identity(n)\n\t\tL[:,i] = -A[:,i]/A[i,i]\n\t\tL[i,i] = 1/A[i,i]\n\t\tA = L.dot(A)\n\t\tb = L.dot(b)\n\treturn b\nprint(\"(a) Las variables son las demandas de cada producto de los primarios, intermedios y finales:\")\nprint(\"x1B1, x2B2, x3B3, x4Z1, x5Z2, x6E1, x7E2\")\nprint(\"(b) El sistema es:\")\n#print(\"\\nLa calidad de solucion del problema:\\n\")\n#print(str((np.linalg.norm(R,np.inf)/np.linalg.norm(b,np.inf))*(1/np.linalg.cond(A,np.inf)))+\" ≤ ||E||∞/||x||∞ ≤ \"+str((np.linalg.norm(R,np.inf)/np.linalg.norm(b,np.inf))*np.linalg.cond(A,np.inf)))\nn = 7\nA =np.array([[ 1, 0, 0, -3, -5, 0, 0],\n\t[ 0, 1, 0, -8, -4, 0, 0],\n\t[ 0, 0, 1, -2, -6, 0, 0],\n\t[ 0, 0, 0, 1, 0, -7, -8],\n [ 0, 0, 0, 0, 1, -6, -2],\n [ 0, 0, 0, 0, 0, 1, 0],\n [ 0, 0, 0, 0, 0, 0, 1]\n\t])\nx = [[\"B1\"], [\"B2\"], [\"B3\"], [\"Z1\"], [\"Z2\"], [\"E1\"], [\"E2\"]]\nb = np.array([ 5, 8, 10, 5, 2, 3, 4])\nimprimeSistema(A, x, b, n)\nprint(\"(c) Por el metodo de Gauss-Jordan, se tiene:\")\nx = solGaussJordan(A,b)\nprint(\"La solucion aproximada es:\")\nprint(\"X: \")\nimprimeVector(x,n)\nprint(\"(d) Se cumple:\")\nprint(str(normainfinito(residual(A,x,b)))+\"/(\"+str(normainfinito(b))+\"*\"+str(condicional(A))+\")\"+\" <= ||E||inf/||x||inf <= \"+str(condicional(A))+\"*\"+str(normainfinito(residual(A,x,b)))+\"/\"+str(normainfinito(b)))\nprint(\"==>\"+str(normainfinito(residual(A,x,b))/(normainfinito(b)*condicional(A)))+\"<=||E||inf/||x||inf<=\"+str(condicional(A)*normainfinito(residual(A,x,b))/normainfinito(b)))\nprint(\"Para este problema el metodo de Gauss-Jordan resulta ser bueno, dado que la condicion es un valor pequeño.\")","sub_path":"CM4F1_Análisis_Numérico_I/examenes/parcial/20-1/pregunta2.py","file_name":"pregunta2.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"568520232","text":"# Import Basic Function\nimport sys\nimport numpy as np\nimport math\n\n# Import DQN Parameters\nfrom Agent_controller import DQNController\nfrom RewardFun import RewardErr\n\n# Import Dynamics\nfrom dydt import RK4Order\n\n# Constant Parameters\nD2R = math.pi/180\nR2D = 180/math.pi\ne = 0\n\n# Simulation initialize\ndt = 0.1\nBlue_States = np.array([0, 0, 0, 0, 0, 0, 0, 0], dtype=np.float64)\nBlue_States[0] = 0 # Alpha dot\nBlue_States[1] = 0 # Alpha\nBlue_States[2] = 0 # Phi\nBlue_States[3] = 250 # Velocity\nBlue_States[4] = 0 # Psi\nBlue_States[5] = 37 * D2R # Latitude\nBlue_States[6] = 126 * D2R # Longitude\nBlue_States[7] = 10000 # Altitude\n\nEPISODES = 500\nEpoch = 10\ntheta_scores, psi_scores, episodes = [], [], []\nload_flag = False\nTheta_Agent = DQNController(2, 3, load_flag)\nPsi_Agent = DQNController(2, 3, load_flag)\n\nwhile True:\n theta_done = False\n psi_done = False\n e = e + 1\n theta_score = 0\n psi_score = 0\n theta_r = 0\n psi_r = 0\n timer = 0\n mean = 0\n sB = Blue_States # States\n uB = [sB[3], sB[1], sB[2]] # Command(Velocity, Alpha, Phi)\n\n # New Theta Angle\n sB[1] = np.random.randint(-10, 10) * D2R\n sB[4] = np.random.randint(-10, 10) * D2R\n theta_cmd = sB[1] + np.random.randint(-10, 10) * D2R\n psi_cmd = sB[4] + np.random.randint(-10, 10) * D2R\n while not theta_done and not psi_done:\n # Theta\n theta_Err = theta_cmd - sB[1]\n if theta_Err > math.pi:\n theta_Err = theta_Err - 360 * D2R\n elif theta_Err < -math.pi:\n theta_Err = theta_Err + 360 * D2R\n Theta_States = [theta_Err, uB[1]]\n action_Theta_index = Theta_Agent.get_action(Theta_States)\n # Psi\n psi_Err = psi_cmd - sB[4]\n if psi_Err > math.pi:\n psi_Err = psi_Err - 360 * D2R\n elif psi_Err < -math.pi:\n psi_Err = psi_Err + 360 * D2R\n Psi_States = [psi_Err, uB[2]]\n action_Psi_index = Psi_Agent.get_action(Psi_States)\n\n # Action 0 = Stay, 1 = +1deg Command, 2 = -1deg Command\n # Theta\n if action_Theta_index == 2:\n dtheta_Cmd = -1\n else:\n dtheta_Cmd = action_Theta_index\n # Psi\n if action_Psi_index == 2:\n dpsi_Cmd = -1\n else:\n dpsi_Cmd = action_Psi_index\n\n uB[1] = uB[1] + dtheta_Cmd * D2R\n uB[2] = uB[2] + dpsi_Cmd * D2R\n\n # State Update\n snB = RK4Order(sB, uB, dt)\n\n # New DQN States\n # Theta\n theta_Err = theta_cmd - snB[1]\n if theta_Err > math.pi:\n theta_Err = theta_Err - 360 * D2R\n elif theta_Err < -math.pi:\n theta_Err = theta_Err + 360 * D2R\n ThetaN_States = [theta_Err, uB[1]]\n theta_r, theta_done = RewardErr(theta_Err)\n # Psi\n psi_Err = psi_cmd - snB[4]\n if psi_Err > math.pi:\n psi_Err = psi_Err - 360 * D2R\n elif psi_Err < -math.pi:\n psi_Err = psi_Err + 360 * D2R\n PsiN_States = [psi_Err, uB[2]]\n psi_r, psi_done = RewardErr(psi_Err)\n\n # DQN State Append = [state action reward next_state flag]\n Theta_Agent.append_sample(Theta_States, action_Theta_index, theta_r, ThetaN_States, theta_done)\n Psi_Agent.append_sample(Psi_States, action_Psi_index, psi_r, PsiN_States, psi_done)\n\n # Training Model\n # Theta\n if len(Theta_Agent.memory) >= Theta_Agent.train_start:\n Theta_Agent.train_model()\n # Psi\n if len(Psi_Agent.memory) >= Psi_Agent.train_start:\n Psi_Agent.train_model()\n\n theta_score += theta_r\n psi_score += psi_r\n\n # new State\n sB = snB\n\n timer = timer + dt\n\n if timer > Epoch:\n theta_done = True\n psi_done = True\n # Theta\n if abs(theta_cmd - sB[1]) <= 2 * D2R:\n theta_scored = theta_score\n theta_score = 1000\n else:\n theta_scored = theta_score\n theta_score = -1000\n # Psi\n if abs(psi_cmd - sB[4]) <= 2 * D2R:\n psi_scored = psi_score\n psi_score = 1000\n else:\n psi_scored = psi_score\n psi_score = -1000\n\n if theta_done and psi_done:\n # Target Model Update\n Theta_Agent.update_target_model()\n Psi_Agent.update_target_model()\n\n theta_scores.append(theta_score)\n psi_scores.append(psi_score)\n episodes.append(e)\n\n print(\"episode:\", e, \" Theta score:\", theta_score, \"(\", round(theta_scored), \")\",\n \" Psi score:\", psi_score, \"(\", round(psi_scored), \")\", \" time:\", round(timer),\n \" theta cmd:\", round(theta_cmd * R2D, 1), \" Theta:\", round(sB[1] * R2D, 1),\n \" psi cmd:\", round(psi_cmd * R2D, 1), \" Psi:\", round(sB[4] * R2D, 1))\n if np.mean(theta_scores[-min(10, len(theta_scores)):]) >= 1000 and np.mean(psi_scores[-min(10, len(psi_scores)):]) >= 1000 and e > 10:\n Theta_Agent.model.save(\"./save_model/DQNController_theta.h5\")\n Psi_Agent.model.save(\"./save_model/DQNController_psi.h5\")\n sys.exit()\n","sub_path":"Pitch_Theta_control/Theta_Controller.py","file_name":"Theta_Controller.py","file_ext":"py","file_size_in_byte":5396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"548011553","text":"class QueueNode:\r\n def __init__(self,data,next):\r\n self.data = data\r\n self.next = next\r\n\r\nclass OurQueue:\r\n def __init__(self):\r\n self.head = None\r\n self.tail = None\r\n\r\n def enqueue(self,value):\r\n newnode = QueueNode(value,None)\r\n if self.tail is None:\r\n self.head = newnode\r\n else:\r\n self.tail.next = newnode\r\n self.tail = newnode\r\n\r\n def dequeue(self):\r\n saveme = self.head\r\n self.head = self.head.next\r\n return saveme.data\r\n\r\n def is_empty(self):\r\n return self.head is None\r\n\r\n\r\ndef main():\r\n t = OurQueue()\r\n t.enqueue(2)\r\n t.enqueue(6)\r\n t.enqueue(8)\r\n t.enqueue(9)\r\n\r\n while not t.is_empty():\r\n print(t.dequeue())\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","sub_path":"Queue.py","file_name":"Queue.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"536500792","text":"import mysql.connector\nimport random,string\n\nact_string = string.ascii_letters + string.digits\ndef create_code(count,length):\n # count = 200\n #length = 20\n code_list = []\n for x in range(count):\n re_string = \"\"\n for y in range(length):\n re_string += random.choice(act_string)\n code_list.append(re_string)\n return code_list\nif __name__ == \"__main__\":\n code_list = create_code(200,20)\n conn = mysql.connector.connect(user='root', password='root', database='test')\n cursor = conn.cursor()\n for i in range(len(code_list)):\n print(code_list[i])\n cursor.execute('insert into code (activation_code) values (%s)', [code_list[i]])\n conn.commit()\n cursor.close()\n conn.close()\n\n","sub_path":"0002/mysql_connect.py","file_name":"mysql_connect.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"201411085","text":"#Very raw algorithm to get female proportion selling albums in France.\n# coding: utf-8\nfrom gender import getGenders\nfrom string import Template\nimport pickle\nimport wikipedia\nimport re\n\ndef get_cached_genders():\n cache_file = open('genders.cache', 'rb')\n cached_genders = pickle.load(cache_file)\n cache_file.close()\n return cached_genders\n\ndef update_cached_genders(genders):\n output = open('genders.cache', 'wb')\n pickle.dump(genders, output)\n output.close()\n\ndef first_name(artist):\n first_word = artist.split(\" \")[0].lower()\n #exceptions...\n if(first_word == \"the\" or first_word == \"la\" or first_word == \"le\" or first_word == \"les\"):\n return artist.split(\" \")[1].lower()\n else:\n return first_word\n\nNAME_REGEX = '([A-ZÀÁÂÄÅÃÆÇÉÈÊËÍÌÎÏÑÓÒÔÖØÕÚÙÛ]\\w+ [A-ZÀÁÂÄÅÃÆÇÉÈÊËÍÌÎÏÑÓÒÔÖØÕÚÙÛ]\\w+)'\n\ndef get_gender_from_cache_or_genderize(artist, cached_genders):\n if(cached_genders.get(artist) is not None):\n return cached_genders.get(artist)\n else:\n #not in cache -> use https://github.com/block8437/gender.py (https://genderize.io)\n gender, _, _ = getGenders([artist])[0]\n cached_genders[artist] = gender\n update_cached_genders(cached_genders)\n return gender\n\ndef get_artist_from_wikipedia(artist, cached_genders, single_artist_stat, band_stat):\n try:\n page = wikipedia.page(artist).html()\n members_index=page.find(\"Nom de naissance\")\n members=[]\n\n if(members_index!=-1):\n members = re.findall(NAME_REGEX, page[members_index:members_index + 300])\n else:\n #3. get members of each bands\n members_index = page.find(\"Membres\")\n if(members_index!=-1):\n members = set(re.findall(NAME_REGEX, page[members_index:members_index + 1000]))\n\n former_members_index = page.find(\"Anciens membres\")\n if(former_members_index!=-1):\n members = set(re.findall(NAME_REGEX, page[members_index:former_members_index]))\n\n for member in members:\n name = first_name(member)\n gender = get_gender_from_cache_or_genderize(name,cached_genders)\n if(len(members) == 1):\n single_artist_stat[artist] = gender\n else:\n band_stat[artist + \" -> \" + member] = gender\n\n except Exception as e:\n single_artist_stat[artist] = \"None\"\n\ndef get_artist_or_band_members_gender(artist, cached_genders, single_artist_stat, band_stat):\n gender = get_gender_from_cache_or_genderize(first_name(artist), cached_genders)\n if(gender == \"female\" or gender == \"male\"):\n single_artist_stat[artist] = gender\n else:\n get_artist_from_wikipedia(artist, cached_genders, single_artist_stat, band_stat)\n\ndef female_and_male_stats(stat):\n s_all = {key: value for key, value in stat.items() if value == 'female' or value == 'male'}\n sfa = {key: value for key, value in s_all.items() if value == 'female'}\n female_stat = len(sfa) / len(s_all)\n sma = {key: value for key, value in s_all.items() if value == 'male'}\n male_stat = len(sma) / len(s_all)\n return (female_stat, male_stat)\n\ndef main(source, title):\n #1. Choose artist list, put for example in a file:\n #curl http://www.snepmusique.com/tops-annuel/top-albums-fusionnes/\\?ye\\=2015 | grep -o \".*\" | sed -E \"s|(.*)|\\1|\"\n #or curl https://fr.wikipedia.org/wiki/Liste_des_artistes_ayant_vendu_le_plus_de_disques | egrep -o '.*.*{name}'.format(name=a)\n\n single_raw_data_html = \"\"\n for k, v in sorted(single_artist_stat.items()):\n single_raw_data_html+='{name}{gender}'.format(name=k, gender=v)\n\n band_raw_data_html = \"\"\n for k, v in sorted(band_stat.items()):\n band_raw_data_html+='{name}{gender}'.format(name=k, gender=v)\n\n placeholders = {\"title\":generated_page_title,\n \"female_single\":str(female_single),\n \"male_single\":str(male_single),\n \"female_band\":str(female_band),\n \"male_band\":str(male_band),\n \"single_raw_data_html\":single_raw_data_html,\n \"band_raw_data_html\":band_raw_data_html,\n \"input_artists_html\":input_artists_html}\n\n template = open(\"template.html\")\n template_source = Template( template.read() )\n template.close()\n report_filename='report.html'\n report = open(report_filename,'w');\n report.write(template_source.substitute(placeholders))\n report.close()\n print(\"Wrote report to: \"+report_filename)\n\n\nif __name__ == '__main__':\n import argparse\n description = \"From an artist list (file with one artist name per line) generates a female/male 'report.html'\"\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('source',\n metavar='SOURCE',\n help='path to the artist list source')\n parser.add_argument('title',\n metavar='TITLE',\n help='report title')\n args = parser.parse_args()\n kwargs = dict(args._get_kwargs())\n main(**kwargs)\n","sub_path":"female-members-in-bands.py","file_name":"female-members-in-bands.py","file_ext":"py","file_size_in_byte":6076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"303001261","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport os\nimport time\nfrom typing import Iterable\n\nimport requests\n\nBUGBUG_HTTP_SERVER = os.environ.get(\n \"BUGBUG_HTTP_SERVER\", \"https://bugbug.herokuapp.com/\"\n)\n\n\ndef classification_http_request(url, bug_ids):\n response = requests.post(\n url, headers={\"X-Api-Key\": \"autonag\"}, json={\"bugs\": bug_ids}\n )\n\n response.raise_for_status()\n\n return response.json()\n\n\ndef get_bug_ids_classification(\n model: str, bugs: Iterable, retry_count: int = 21, retry_sleep: int = 10\n):\n \"\"\"Get the classification for a list of bug ids.\n\n Args:\n model: The model to use for the classification.\n bug_ids: The list of bug ids to classify, if a dictionary is passed, the\n keys will be used as bug ids.\n retry_count: The number of times to retry the request.\n retry_sleep: The number of seconds to sleep between retries.\n\n Returns:\n A dictionary with the bug ids as keys and the classification as values.\n \"\"\"\n # Copy the bug ids to avoid mutating it\n bug_ids = set(map(int, bugs))\n if len(bug_ids) == 0:\n return {}\n\n url = f\"{BUGBUG_HTTP_SERVER}/{model}/predict/batch\"\n\n json_response = {}\n\n for _ in range(retry_count):\n response = classification_http_request(url, list(bug_ids))\n\n # Check which bug ids are ready\n for bug_id, bug_data in response[\"bugs\"].items():\n if not bug_data.get(\"ready\", True):\n continue\n\n # The bug is ready, add it to the json_response and pop it\n # up from the current batch\n # The http service returns strings for backward compatibility reasons\n bug_ids.remove(int(bug_id))\n json_response[bug_id] = bug_data\n\n if len(bug_ids) == 0:\n break\n else:\n time.sleep(retry_sleep)\n\n else:\n total_sleep = retry_count * retry_sleep\n msg = f\"Couldn't get {len(bug_ids)} bug classifications in {total_sleep} seconds, aborting\"\n raise Exception(msg)\n\n return json_response\n","sub_path":"bugbot/bugbug_utils.py","file_name":"bugbug_utils.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"151615431","text":"import uuid\nimport logging\n\n# this import pattern lets fakecouch not depend on couchdbkit\ntry:\n import couchdbkit\nexcept ImportError:\n import json\n\n class ResourceNotFound(Exception):\n pass\n\n # copy-pasted from couchdbkit.resource\n def encode_params(params):\n \"\"\" encode parameters in json if needed \"\"\"\n _params = {}\n if params:\n for name, value in params.items():\n if name in ('key', 'startkey', 'endkey'):\n value = json.dumps(value)\n elif value is None:\n continue\n elif not isinstance(value, basestring):\n value = json.dumps(value)\n _params[name] = value\n return _params\nelse:\n from couchdbkit import ResourceNotFound\n from couchdbkit.resource import encode_params\n\n\n# This import pattern supports Python 2 and 3\ntry:\n from urllib.parse import urlencode\nexcept ImportError:\n from urllib import urlencode\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass FakeCouchDb(object):\n \"\"\"\n An in-memory mock of CouchDB, instantiated with a simple mapping\n of resource and params to results.\n\n mc = FakeCouchDb(views={\n 'my/view': [\n (\n {'startkey': ['j'], 'endkey': ['j', {}], reduce=True},\n [\n ... result objects ...\n ]\n ),\n ]},\n docs={\n 'my_doc_id': { ... doc object ...}\n }\n })\n\n doc = MyDoc()\n doc.save()\n\n mc.mock_docs[doc['_id']] == doc\n \"\"\"\n\n def __init__(self, views=None, docs=None):\n if views:\n self.view_mock = dict([\n (resource, dict([(self._param_key(params), result) for params, result in view_results]))\n for resource, view_results in views.items()])\n else:\n self.view_mock = {}\n\n self.mock_docs = docs or {}\n\n def reset(self):\n logger.info('Fakecouch reset')\n self.view_mock = {}\n self.mock_docs = {}\n\n def add_view(self, name, view_results):\n self.view_mock[name] = dict([(self._param_key(params), result) for params, result in view_results])\n logger.debug('View added: %s with keys: %s', name, self.view_mock[name].keys())\n\n def _param_key(self, params):\n params = encode_params(params)\n return urlencode(sorted(params.items(), key=lambda p: p[0]))\n\n def view(self, view_name, schema=None, wrapper=None, **params):\n view = self.view_mock.get(view_name, {})\n key = self._param_key(params)\n rows = view.get(key, [])\n logger.debug('view(view_name=%s, key=%s): results=%s', view_name, key, rows)\n if wrapper:\n rows = [wrapper(r) for r in rows]\n return MockResult(rows)\n\n def save_doc(self, doc, **params):\n if '_id' in doc:\n doc[\"_rev\"] = str(uuid.uuid1())\n self.mock_docs[doc[\"_id\"]] = doc\n logger.debug('save_doc(%s)', doc['_id'])\n else:\n id = uuid.uuid1()\n rev = str(uuid.uuid1())\n doc.update({ '_id': id, '_rev': rev})\n self.mock_docs[doc[\"_id\"]] = doc\n logger.debug('save_doc(%s): ID generated', doc['_id'])\n\n def get(self, docid, rev=None, wrapper=None):\n doc = self.mock_docs.get(docid, None)\n logger.debug('get(%s): %s', docid, 'Found' if doc else 'Not found')\n if not doc:\n raise ResourceNotFound\n elif wrapper:\n return wrapper(doc)\n else:\n return doc\n\n def open_doc(self, docid):\n logger.debug('open_doc(%s)', docid)\n doc = self.mock_docs.get(docid, None)\n return doc\n\n def delete_doc(self, docid):\n if docid not in self.mock_docs:\n raise ResourceNotFound\n else:\n del self.mock_docs[docid]\n\n\nclass MockResult(object):\n def __init__(self, rows):\n self.rows = rows\n\n def all(self):\n return self.rows\n\n @property\n def total_rows(self):\n return len(self.rows)\n\n def __iter__(self):\n return iter(self.rows)\n","sub_path":"fakecouch.py","file_name":"fakecouch.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"48018102","text":"#\n# Copyright 2018 the original author or authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom voltha.protos.openflow_13_pb2 import OFPXMC_OPENFLOW_BASIC\nimport voltha.core.flow_decomposer as fd\nimport openolt_platform as platform\nfrom voltha.adapters.openolt.protos import openolt_pb2\nfrom voltha.registry import registry\n\nHSIA_FLOW_INDEX = 0 # FIXME\nDHCP_FLOW_INDEX = 1 # FIXME\nDHCP_DOWNLINK_FLOW_INDEX = 6 # FIXME\nEAPOL_FLOW_INDEX = 2 # FIXME\nEAPOL_DOWNLINK_FLOW_INDEX = 3 # FIXME\nEAPOL_DOWNLINK_SECONDARY_FLOW_INDEX = 4 # FIXME\nEAPOL_UPLINK_SECONDARY_FLOW_INDEX = 5 # FIXME\n\n\nEAP_ETH_TYPE = 0x888e\n\n# FIXME - see also BRDCM_DEFAULT_VLAN in broadcom_onu.py\nDEFAULT_MGMT_VLAN = 4091\n\n\nclass OpenOltFlowMgr(object):\n\n def __init__(self, log, stub, device_id):\n self.log = log\n self.stub = stub\n self.device_id = device_id\n self.flow_proxy = registry('core').get_proxy(\n '/devices/{}/flows'.format(self.device_id))\n\n def add_flow(self, flow, is_down_stream):\n self.log.debug('add flow', flow=flow, is_down_stream=is_down_stream)\n classifier_info = dict()\n action_info = dict()\n\n in_port = fd.get_in_port(flow)\n assert in_port is not None\n\n for field in fd.get_ofb_fields(flow):\n if field.type == fd.ETH_TYPE:\n classifier_info['eth_type'] = field.eth_type\n self.log.debug('field-type-eth-type',\n eth_type=classifier_info['eth_type'])\n elif field.type == fd.IP_PROTO:\n classifier_info['ip_proto'] = field.ip_proto\n self.log.debug('field-type-ip-proto',\n ip_proto=classifier_info['ip_proto'])\n elif field.type == fd.IN_PORT:\n classifier_info['in_port'] = field.port\n self.log.debug('field-type-in-port',\n in_port=classifier_info['in_port'])\n elif field.type == fd.VLAN_VID:\n classifier_info['vlan_vid'] = field.vlan_vid & 0xfff\n self.log.debug('field-type-vlan-vid',\n vlan=classifier_info['vlan_vid'])\n elif field.type == fd.VLAN_PCP:\n classifier_info['vlan_pcp'] = field.vlan_pcp\n self.log.debug('field-type-vlan-pcp',\n pcp=classifier_info['vlan_pcp'])\n elif field.type == fd.UDP_DST:\n classifier_info['udp_dst'] = field.udp_dst\n self.log.debug('field-type-udp-dst',\n udp_dst=classifier_info['udp_dst'])\n elif field.type == fd.UDP_SRC:\n classifier_info['udp_src'] = field.udp_src\n self.log.debug('field-type-udp-src',\n udp_src=classifier_info['udp_src'])\n elif field.type == fd.IPV4_DST:\n classifier_info['ipv4_dst'] = field.ipv4_dst\n self.log.debug('field-type-ipv4-dst',\n ipv4_dst=classifier_info['ipv4_dst'])\n elif field.type == fd.IPV4_SRC:\n classifier_info['ipv4_src'] = field.ipv4_src\n self.log.debug('field-type-ipv4-src',\n ipv4_dst=classifier_info['ipv4_src'])\n elif field.type == fd.METADATA:\n classifier_info['metadata'] = field.table_metadata\n self.log.debug('field-type-metadata',\n metadata=classifier_info['metadata'])\n else:\n raise NotImplementedError('field.type={}'.format(\n field.type))\n\n for action in fd.get_actions(flow):\n if action.type == fd.OUTPUT:\n action_info['output'] = action.output.port\n self.log.debug('action-type-output',\n output=action_info['output'],\n in_port=classifier_info['in_port'])\n elif action.type == fd.POP_VLAN:\n action_info['pop_vlan'] = True\n self.log.debug('action-type-pop-vlan', in_port=in_port)\n elif action.type == fd.PUSH_VLAN:\n action_info['push_vlan'] = True\n action_info['tpid'] = action.push.ethertype\n self.log.debug('action-type-push-vlan',\n push_tpid=action_info['tpid'], in_port=in_port)\n if action.push.ethertype != 0x8100:\n self.log.error('unhandled-tpid',\n ethertype=action.push.ethertype)\n elif action.type == fd.SET_FIELD:\n # action_info['action_type'] = 'set_field'\n _field = action.set_field.field.ofb_field\n assert (action.set_field.field.oxm_class ==\n OFPXMC_OPENFLOW_BASIC)\n self.log.debug('action-type-set-field',\n field=_field, in_port=in_port)\n if _field.type == fd.VLAN_VID:\n self.log.debug('set-field-type-vlan-vid',\n vlan_vid=_field.vlan_vid & 0xfff)\n action_info['vlan_vid'] = (_field.vlan_vid & 0xfff)\n else:\n self.log.error('unsupported-action-set-field-type',\n field_type=_field.type)\n else:\n self.log.error('unsupported-action-type',\n action_type=action.type, in_port=in_port)\n\n # FIXME - Why ignore downstream flows?\n if is_down_stream is False:\n intf_id = platform.intf_id_from_uni_port_num(\n classifier_info['in_port'])\n onu_id = platform.onu_id_from_port_num(\n classifier_info['in_port'])\n self.divide_and_add_flow(intf_id, onu_id,\n flow.priority, classifier_info,\n action_info)\n # else:\n # self.log.info('ignore downstream flow', flow=flow,\n # classifier_info=classifier_info,\n # action_info=action_info)\n\n # FIXME - No need for divide_and_add_flow if\n # both upstream and downstream flows\n # are acted upon (not just upstream flows).\n def divide_and_add_flow(self, intf_id, onu_id, priority, classifier,\n action):\n if 'ip_proto' in classifier:\n if classifier['ip_proto'] == 17:\n self.log.debug('dhcp flow add')\n self.add_dhcp_trap(intf_id, onu_id, priority, classifier,\n action)\n elif classifier['ip_proto'] == 2:\n self.log.debug('igmp flow add ignored')\n else:\n self.log.debug(\"Invalid-Classifier-to-handle\",\n classifier=classifier,\n action=action)\n elif 'eth_type' in classifier:\n if classifier['eth_type'] == EAP_ETH_TYPE:\n self.log.debug('eapol flow add')\n self.add_eapol_flow(intf_id, onu_id, priority)\n\n elif 'push_vlan' in action:\n self.add_data_flow(intf_id, onu_id, priority, classifier, action)\n else:\n self.log.debug('Invalid-flow-type-to-handle',\n classifier=classifier,\n action=action)\n\n def add_data_flow(self, intf_id, onu_id, priority, uplink_classifier,\n uplink_action):\n\n downlink_classifier = dict(uplink_classifier)\n downlink_action = dict(uplink_action)\n\n uplink_classifier['pkt_tag_type'] = 'single_tag'\n\n downlink_classifier['pkt_tag_type'] = 'double_tag'\n downlink_classifier['vlan_vid'] = uplink_action['vlan_vid']\n downlink_classifier['metadata'] = uplink_classifier['vlan_vid']\n del downlink_action['push_vlan']\n downlink_action['pop_vlan'] = True\n\n # To-Do right now only one GEM port is supported, so below method\n # will take care of handling all the p bits.\n # We need to revisit when mulitple gem port per p bits is needed.\n self.add_hsia_flow(intf_id, onu_id, priority, uplink_classifier,\n uplink_action, downlink_classifier, downlink_action,\n HSIA_FLOW_INDEX)\n\n # Secondary EAP on the subscriber vlan\n (eap_active, eap_priority) = self.is_eap_enabled(intf_id, onu_id)\n if eap_active:\n self.add_eapol_flow(intf_id, onu_id, eap_priority,\n uplink_eapol_id=EAPOL_UPLINK_SECONDARY_FLOW_INDEX,\n downlink_eapol_id=EAPOL_DOWNLINK_SECONDARY_FLOW_INDEX,\n vlan_id=uplink_classifier['vlan_vid'])\n\n def add_hsia_flow(self, intf_id, onu_id, priority, uplink_classifier,\n uplink_action, downlink_classifier, downlink_action,\n hsia_id):\n\n gemport_id = platform.mk_gemport_id(onu_id)\n flow_id = platform.mk_flow_id(intf_id, onu_id, hsia_id)\n\n self.log.debug('add upstream flow', onu_id=onu_id,\n classifier=uplink_classifier, action=uplink_action,\n gemport_id=gemport_id, flow_id=flow_id)\n\n flow = openolt_pb2.Flow(\n onu_id=onu_id, flow_id=flow_id, flow_type=\"upstream\",\n access_intf_id=intf_id, gemport_id=gemport_id, priority=priority,\n classifier=self.mk_classifier(uplink_classifier),\n action=self.mk_action(uplink_action))\n\n self.stub.FlowAdd(flow)\n\n self.log.debug('add downstream flow', classifier=downlink_classifier,\n action=downlink_action, gemport_id=gemport_id,\n flow_id=flow_id)\n\n flow = openolt_pb2.Flow(\n onu_id=onu_id, flow_id=flow_id, flow_type=\"downstream\",\n access_intf_id=intf_id, gemport_id=gemport_id,\n priority=priority,\n classifier=self.mk_classifier(downlink_classifier),\n action=self.mk_action(downlink_action))\n\n self.stub.FlowAdd(flow)\n\n def add_dhcp_trap(self, intf_id, onu_id, priority, classifier, action):\n\n self.log.debug('add dhcp upstream trap', classifier=classifier,\n action=action)\n\n action.clear()\n action['trap_to_host'] = True\n classifier['pkt_tag_type'] = 'single_tag'\n classifier.pop('vlan_vid', None)\n\n gemport_id = platform.mk_gemport_id(onu_id)\n flow_id = platform.mk_flow_id(intf_id, onu_id, DHCP_FLOW_INDEX)\n\n upstream_flow = openolt_pb2.Flow(\n onu_id=onu_id, flow_id=flow_id, flow_type=\"upstream\",\n access_intf_id=intf_id, network_intf_id=0, gemport_id=gemport_id,\n priority=priority, classifier=self.mk_classifier(classifier),\n action=self.mk_action(action))\n\n self.stub.FlowAdd(upstream_flow)\n\n # FIXME - Fix OpenOLT handling of downstream flows instead\n # of duplicating the downstream flow from the upstream\n # flow.\n # FIXME - ONOS should send explicit upstream and downstream\n # exact dhcp trap flow.\n classifier['udp_src'] = 67\n classifier['udp_dst'] = 68\n classifier['pkt_tag_type'] = 'double_tag'\n action.pop('push_vlan', None)\n\n flow_id = platform.mk_flow_id(intf_id, onu_id,\n DHCP_DOWNLINK_FLOW_INDEX)\n\n downstream_flow = openolt_pb2.Flow(\n onu_id=onu_id, flow_id=flow_id, flow_type=\"downstream\",\n access_intf_id=intf_id, network_intf_id=0, gemport_id=gemport_id,\n priority=priority, classifier=self.mk_classifier(classifier),\n action=self.mk_action(action))\n\n self.log.debug('add dhcp downstream trap', access_intf_id=intf_id,\n onu_id=onu_id, flow_id=flow_id)\n self.stub.FlowAdd(downstream_flow)\n\n def add_eapol_flow(self, intf_id, onu_id, priority,\n uplink_eapol_id=EAPOL_FLOW_INDEX,\n downlink_eapol_id=EAPOL_DOWNLINK_FLOW_INDEX,\n vlan_id=DEFAULT_MGMT_VLAN):\n\n # self.log.debug('add eapol flow pre-process',\n # classifier=uplink_classifier)\n # #action=uplink_action)\n\n downlink_classifier = {}\n downlink_classifier['eth_type'] = EAP_ETH_TYPE\n downlink_classifier['pkt_tag_type'] = 'single_tag'\n downlink_classifier['vlan_vid'] = vlan_id\n\n downlink_action = {}\n downlink_action['push_vlan'] = True\n downlink_action['vlan_vid'] = vlan_id\n\n uplink_classifier = {}\n uplink_classifier['eth_type'] = EAP_ETH_TYPE\n uplink_classifier['pkt_tag_type'] = 'single_tag'\n uplink_classifier['vlan_vid'] = vlan_id\n\n uplink_action = {}\n uplink_action['trap_to_host'] = True\n\n gemport_id = platform.mk_gemport_id(onu_id)\n\n # Add Upstream EAPOL Flow.\n\n uplink_flow_id = platform.mk_flow_id(intf_id, onu_id, uplink_eapol_id)\n\n upstream_flow = openolt_pb2.Flow(\n onu_id=onu_id, flow_id=uplink_flow_id, flow_type=\"upstream\",\n access_intf_id=intf_id, gemport_id=gemport_id, priority=priority,\n classifier=self.mk_classifier(uplink_classifier),\n action=self.mk_action(uplink_action))\n\n self.stub.FlowAdd(upstream_flow)\n\n # Add Downstream EAPOL Flow.\n downlink_flow_id = platform.mk_flow_id(intf_id, onu_id,\n downlink_eapol_id)\n\n downstream_flow = openolt_pb2.Flow(\n onu_id=onu_id, flow_id=downlink_flow_id, flow_type=\"downstream\",\n access_intf_id=intf_id, gemport_id=gemport_id,\n classifier=self.mk_classifier(downlink_classifier),\n action=self.mk_action(downlink_action))\n\n self.stub.FlowAdd(downstream_flow)\n\n self.log.debug('eap flows', upstream_flow=upstream_flow,\n downstream_flow=downstream_flow)\n\n def mk_classifier(self, classifier_info):\n\n classifier = openolt_pb2.Classifier()\n\n if 'eth_type' in classifier_info:\n classifier.eth_type = classifier_info['eth_type']\n if 'ip_proto' in classifier_info:\n classifier.ip_proto = classifier_info['ip_proto']\n if 'vlan_vid' in classifier_info:\n classifier.o_vid = classifier_info['vlan_vid']\n if 'metadata' in classifier_info:\n classifier.i_vid = classifier_info['metadata']\n if 'vlan_pcp' in classifier_info:\n classifier.o_pbits = classifier_info['vlan_pcp']\n if 'udp_src' in classifier_info:\n classifier.src_port = classifier_info['udp_src']\n if 'udp_dst' in classifier_info:\n classifier.dst_port = classifier_info['udp_dst']\n if 'ipv4_dst' in classifier_info:\n classifier.dst_ip = classifier_info['ipv4_dst']\n if 'ipv4_src' in classifier_info:\n classifier.src_ip = classifier_info['ipv4_src']\n if 'pkt_tag_type' in classifier_info:\n if classifier_info['pkt_tag_type'] == 'single_tag':\n classifier.pkt_tag_type = 'single_tag'\n elif classifier_info['pkt_tag_type'] == 'double_tag':\n classifier.pkt_tag_type = 'double_tag'\n elif classifier_info['pkt_tag_type'] == 'untagged':\n classifier.pkt_tag_type = 'untagged'\n else:\n classifier.pkt_tag_type = 'none'\n\n return classifier\n\n def mk_action(self, action_info):\n action = openolt_pb2.Action()\n\n if 'pop_vlan' in action_info:\n action.o_vid = action_info['vlan_vid']\n action.cmd.remove_outer_tag = True\n elif 'push_vlan' in action_info:\n action.o_vid = action_info['vlan_vid']\n action.cmd.add_outer_tag = True\n elif 'trap_to_host' in action_info:\n action.cmd.trap_to_host = True\n else:\n self.log.info('Invalid-action-field', action_info=action_info)\n return\n return action\n\n def is_eap_enabled(self, intf_id, onu_id):\n flows = self.flow_proxy.get('/').items\n\n for flow in flows:\n eap_flow = False\n eap_intf_id = None\n eap_onu_id = None\n for field in fd.get_ofb_fields(flow):\n if field.type == fd.ETH_TYPE:\n if field.eth_type == EAP_ETH_TYPE:\n eap_flow = True\n if field.type == fd.IN_PORT:\n eap_intf_id = platform.intf_id_from_uni_port_num(\n field.port)\n eap_onu_id = platform.onu_id_from_port_num(field.port)\n\n if eap_flow:\n self.log.debug('eap flow detected', onu_id=onu_id,\n intf_id=intf_id, eap_intf_id=eap_intf_id,\n eap_onu_id=eap_onu_id)\n if eap_flow and intf_id == eap_intf_id and onu_id == eap_onu_id:\n return (True, flow.priority)\n\n return (False, 0)\n","sub_path":"voltha/adapters/openolt/openolt_flow_mgr.py","file_name":"openolt_flow_mgr.py","file_ext":"py","file_size_in_byte":17717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"264757282","text":"import torch\nfrom datasets import load_dataset, concatenate_datasets\nimport numpy as np\n\ndef paraphrase_loaders(config, tokenizer, max_len=256):\n\n domains = config['domains']\n\n # which label has least number of samples in train data as well as (test)validation data in all domains\n train_label_dist = 21829 # manually checked \n test_label_dist = 7075 # manually checked \n\n\n # we are not going to take words greater than 256\n paws = load_dataset(\"paws\", 'labeled_final')\n qqp = load_dataset(\"glue\", 'qqp')\n\n\n # # If you want to filter the data based on length | no filtering in actul experiment\n for _, (paws_set, qqp_set) in enumerate(zip(paws.keys(), qqp.keys())):\n\n # # applying filter\n # paws[paws_set] = paws[paws_set].filter(lambda example : (len(example['sentence1'])+len(example['sentence2']))<=max_len)\n # qqp[qqp_set] = qqp[qqp_set].filter(lambda example : (len(example['question1'])+len(example['question2']))<=max_len)\n\n # both paws and qqp has difference names for 2 input sentences \n # paws = (sentence1, sentence2) and qqp = (question1, question2) update qqp col to match paws\n\n qqp[qqp_set] = qqp[qqp_set].rename_column('question1', 'sentence1') \n qqp[qqp_set] = qqp[qqp_set].rename_column('question2', 'sentence2')\n\n\n # # merge the validation and test of both datasets\n paws['test'] = concatenate_datasets(dsets=[paws['test'], paws['validation']])\n qqp['test'] = concatenate_datasets(dsets=[qqp['test'], qqp['validation']])\n\n datasets = {\n \"paws\":paws,\n \"qqp\":qqp\n }\n\n labels = list(set(qqp['train']['label']))\n\n domain_dsets = {}\n for domain in domains:\n\n domain_dsets[domain] = {\n \"train\":[]\n }\n domain_dsets[domain].update({\"test\":[]})\n\n # take equal numbe of samples for each domain for each label for each set\n for label in labels:\n \n for domain in domain_dsets:\n\n train = datasets[domain]['train'].filter(lambda example:example['label']==label).shuffle().select(range(train_label_dist))\n test = datasets[domain]['test'].filter(lambda example:example['label']==label).shuffle().select(range(test_label_dist))\n\n domain_dsets[domain]['train'].append(train)\n domain_dsets[domain]['test'].append(test)\n\n\n\n\n\n # concatenate the dataset and shuffle them\n # before it would be list of datasets and after it will be single dataset\n for domain in domains:\n\n # concate class label datasets\n domain_dsets[domain]['train'] = concatenate_datasets(dsets=domain_dsets[domain]['train']).shuffle()\n domain_dsets[domain]['test'] = concatenate_datasets(dsets=domain_dsets[domain]['test']).shuffle()\n\n\n # # tokenize \n domain_dsets[domain]['train'] = domain_dsets[domain]['train'].map(lambda x: tokenizer(x['sentence1'], x['sentence2'], padding='max_length', truncation=True, max_length=config['max_seq_length']), batched=True)\n domain_dsets[domain]['test'] = domain_dsets[domain]['test'].map(lambda x: tokenizer(x['sentence1'], x['sentence2'], padding='max_length', truncation=True, max_length=config['max_seq_length']), batched=True)\n \n\n\n # change the dtype \n domain_dsets[domain]['train'].set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])\n domain_dsets[domain]['test'].set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])\n\n # create dataloaders\n domain_dsets[domain]['train'] = torch.utils.data.DataLoader(dataset = domain_dsets[domain]['train'], batch_size=config[\"batch_size\"], shuffle=True, num_workers=4)\n domain_dsets[domain]['test'] = torch.utils.data.DataLoader(dataset = domain_dsets[domain]['test'], batch_size=config[\"batch_size\"], shuffle=True, num_workers=4)\n domain_dsets[domain]['valid'] = domain_dsets[domain]['test'] # validation and test will be same\n \n \n\n\n # why the hell I am doing this?\n loaders = domain_dsets\n \n\n return loaders\n\n\n\n","sub_path":"adaptil/dataset/paraphrase.py","file_name":"paraphrase.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"560915480","text":"#!/usr/bin/python3\n\"\"\" Start flask app web, using storage engine \"\"\"\nfrom models import storage\nfrom flask import render_template\nfrom models.state import State\nfrom models.city import City\nfrom flask import Flask\napp = Flask(__name__)\n\n\n@app.route('/cities_by_states', strict_slashes=False)\ndef index():\n \"\"\" Index to display lists \"\"\"\n states = storage.all(State)\n cities = storage.all(City)\n # states_list = {}\n # for key, value in states.items():\n # states_list[value.name] = value\n return render_template('8-cities_by_states.html', states=states,\n cities=cities)\n\n\n@app.teardown_appcontext\ndef close(states):\n ''' Close the connection with db '''\n storage.close()\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","sub_path":"web_flask/8-cities_by_states.py","file_name":"8-cities_by_states.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"150966180","text":"#_*_ coding:utf-8\n\nimport xgboost as xgb\nimport numpy as np\nfrom xgboost.sklearn import XGBClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn import metrics\nimport time\nimport matplotlib.pyplot as plt\nfrom . import log_class\n#parameters to be tunned\ntunned_max_depth = [3,5,7,9]\ntunned_learning_rate =[0.01,0.015,0.025,0.05,0.1] # aka eta in xgboost\ntunned_min_child_weight = [1,3,5,7]\ntunned_gamma = [0.05,0.1,0.3,0.5,0.7,0.9,1]\ntunned_colsample_bytree = [0.6,0.7,0.8,1]\n# 还需要添加subsample,lambda,alpha等参数\n\n\n#define a decorator function to record the running time\n'''\nhttp://xgboost.readthedocs.io/en/latest/parameter.html\nhttp://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn\n'''\nclass xgboost_CV(object):\n def __init__(self,x,y,tunning_params,metric,metric_proba = 0,metric_name='auc',scoring='roc_auc',n_jobs=2,save_model = 0,processed_data_version_dir = './',if_classification=False):\n '''\n tunning_params: 字典类型,key是待调整参数名,values是候选集合\n metric_proba indicates if the metric need the probability to calculate the score\n metric 其实就是训练过程中的评估函数,是我自己手动用来评估训练效果的。\n metric_name 是xgboost cv 中的一个参数。\n scoring是专门给GridSearchCV设置的一个参数,因为GridSearchCV只接受指定的那几个字符串。\n scoring只可以选择sklearn对应的几个,其值是越大越好。\n 当metric是越高越好时,他们三者是一致的,否则 scoring与 metric 和 metric_name 不一致。\n '''\n #default metrics should be metrics.log_loss\n import os\n if not os.path.exists(processed_data_version_dir):\n os.mkdir(processed_data_version_dir)\n self.x = x\n self.y = y\n self.tunning_params = tunning_params\n # define the algorithm using default parameters\n self.model = None\n self.cv_folds = 5\n self.early_stopping_rounds = 50\n self.metric = metric\n self.metric_proba = metric_proba\n self.metric_name = metric_name\n self.scoring = scoring\n self.n_jobs = n_jobs\n self.save_model = save_model\n self.train_scores = []\n self.cv_scores = []\n self.logger = log_class.log_class('xgboost',top_level = processed_data_version_dir)\n self.path = processed_data_version_dir\n self.if_classification = if_classification\n\n def plot_save(self,name='xgboostRegression'):\n '''\n :param name: 模型名称\n :return: None,但保存模型为pkl到相关目录\n '''\n fig = plt.figure()\n ax1 = fig.add_subplot(1,2,1)\n ax1.plot(self.train_scores)\n ax1.set_xlabel('train_scores')\n ax1.set_ylabel(self.metric.__name__)\n ax2 = fig.add_subplot(1,2,2)\n ax2.plot(self.cv_scores)\n ax2.set_xlabel('cv scores')\n\n #save\n import os\n npath = self.path\n if not os.path.exists(npath+'/curve'):\n os.mkdir(npath+'/curve')\n import time\n cur_time = time.strftime(\"%Y-%m-%d-%H-%M\",time.localtime())\n fig.savefig(npath+'/curve/'+name+'_'+cur_time+'_train_cv.png')\n\n #save train score and cv score\n str_train_score =\"train score sequence \"+\" \".join([str(item) for item in self.train_scores])\n str_cv_score = \"cv score sequence\"+ \" \".join([str(item) for item in self.cv_scores])\n self.logger.add(str_train_score)\n self.logger.add(str_cv_score)\n\n #determine if save model\n if self.save_model==1:\n #save model here\n from sklearn.externals import joblib\n if not os.path.exists(npath+'/modules'):\n os.mkdir(npath+'/modules')\n joblib.dump(self.model,npath+'/modules/'+name+\"_\"+cur_time+\".pkl\")\n\n # get the best num rounds after changing a parameter\n def modelfit(self):\n xgb_param = self.model.get_xgb_params()\n dtrain = xgb.DMatrix(self.x,label = self.y)\n cvresult = xgb.cv(xgb_param,dtrain,num_boost_round=500,nfold=self.cv_folds,metrics=self.metric_name,early_stopping_rounds=self.early_stopping_rounds)\n self.model.set_params(n_estimators=cvresult.shape[0])\n self.model.fit(self.x,self.y)\n\n #calculate train score\n if self.metric_proba == 0:\n pred = self.model.predict(self.x)\n else:\n pred = self.model.predict_proba(self.x)\n self.train_scores.append(self.metric(self.y,pred))\n\n\n def cv_score(self):\n # 5-fold crossvalidation,对回归和分类任务是不同的\n if self.if_classification:\n kf = StratifiedKFold(n_splits = 5,shuffle=True,random_state=2018)\n else:\n kf = KFold(n_splits=5,shuffle=True,random_state=2018)\n score = []\n params = self.model.get_xgb_params()\n for train_ind,test_ind in kf.split(self.x,self.y):\n train_valid_x,train_valid_y = self.x[train_ind],self.y[train_ind]\n test_valid_x,test_valid_y = self.x[test_ind],self.y[test_ind]\n dtrain = xgb.DMatrix(train_valid_x,label = train_valid_y)\n dtest = xgb.DMatrix(test_valid_x)\n pred_model = xgb.train(params,dtrain,num_boost_round=int(params['n_estimators']))\n if self.metric_proba == 0:\n pred_test = pred_model.predict(dtest)\n else:\n pred_test = pred_model.predict_proba(dtest)\n score.append(self.metric(test_valid_y,pred_test))\n mean_cv_score = np.mean(score)\n self.cv_scores.append(mean_cv_score)\n print('final {} on cv:{}'.format(self.metric.__name__,mean_cv_score))\n\n\n def cross_validation(self):\n scoring = self.scoring\n for param_item in self.tunning_params.keys():\n print('tunning {} ...'.format(param_item))\n params = {param_item:self.tunning_params[param_item]}\n print(self.model.get_params())\n gsearch = GridSearchCV(self.model,param_grid=params,scoring=scoring,n_jobs=1,iid=False,cv=3)\n gsearch.fit(self.x,self.y)\n #这是一种将变量作为参数名的方法\n to_set = {param_item:gsearch.best_params_[param_item]}\n self.model.set_params(**to_set)\n print(gsearch.best_params_)\n self.modelfit()\n print('best_num_round after tunning para: {}'.format(self.model.get_params()['n_estimators']))\n self.cv_score()\n\n\n ## 保存最终的参数\n params = self.model.get_params()\n self.logger.add(params,ifdict=1)\n #用新参数重新训练一遍\n self.model.fit(self.x,self.y)\n\n return self.model\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"kagglemethods/xgboost_base.py","file_name":"xgboost_base.py","file_ext":"py","file_size_in_byte":6932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"183339842","text":"\"\"\"\n792. Kth Prime Number\n给出质数n,输出它是第几个质数。\n\n样例\n给出 n = 3,返回 2。\n\n解释:\n[2,3,5],3是第2个质数。\n给出 n = 11,返回 5。\n\n解释:\n[2,3,5,7,11],11是第五个质数。\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param n: the number\n @return: the rank of the number\n \"\"\"\n\n # time:1007ms\n def kthPrime(self, n):\n # write your code here\n res = []\n for i in range(2, n + 1):\n if self.zhishu(i):\n res.append(i)\n return res\n\n # 6素数法\n def zhishu(self, n):\n import math\n if n == 2 or n == 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n k = int(math.sqrt(n) + 1)\n for i in range(5, k, 6):\n if n % i == 0 or n % (i + 2) == 0:\n return False\n return True\n\n\ns = Solution()\nprint(s.kthPrime(11))\n","sub_path":"算法 - 数学/数学/792.Kth Prime Number.py","file_name":"792.Kth Prime Number.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"421475896","text":"'''\r\nCreated on 2012-6-4\r\n\r\n@author: Sky\r\n'''\r\nfrom SocketLib.Telnet import *\r\n\r\ng_telnetcolors=[[[0 for x in range(3)] for y in range(3)] for z in range(3)]\r\ng_telnetcolors[0][0][0] = black + dim\r\ng_telnetcolors[0][0][1] = blue + dim\r\ng_telnetcolors[0][0][2] = blue + bold\r\ng_telnetcolors[0][1][0] = green + dim\r\ng_telnetcolors[0][1][1] = cyan + dim\r\ng_telnetcolors[0][1][2] = blue + bold\r\ng_telnetcolors[0][2][0] = green + bold\r\ng_telnetcolors[0][2][1] = green + bold\r\ng_telnetcolors[0][2][2] = cyan + bold\r\n\r\ng_telnetcolors[1][0][0] = red + dim\r\ng_telnetcolors[1][0][1] = magenta + dim\r\ng_telnetcolors[1][0][2] = magenta + bold\r\ng_telnetcolors[1][1][0] = yellow + dim\r\ng_telnetcolors[1][1][1] = white + dim\r\ng_telnetcolors[1][1][2] = blue + bold\r\ng_telnetcolors[1][2][0] = green + bold\r\ng_telnetcolors[1][2][1] = green + bold\r\ng_telnetcolors[1][2][2] = cyan + bold\r\n\r\ng_telnetcolors[2][0][0] = red + bold\r\ng_telnetcolors[2][0][1] = red + bold\r\ng_telnetcolors[2][0][2] = magenta + bold\r\ng_telnetcolors[2][1][0] = yellow + dim\r\ng_telnetcolors[2][1][1] = red + bold\r\ng_telnetcolors[2][1][2] = magenta + bold\r\ng_telnetcolors[2][2][0] = yellow + bold\r\ng_telnetcolors[2][2][1] = yellow + bold\r\ng_telnetcolors[2][2][2] = white + bold\r\n\r\nclass BetterTelnet:\r\n def __init__(self):\r\n self.m_buffer = \"\"\r\n \r\n def GetBuffered(self):\r\n return len(self.m_buffer)\r\n \r\n def Translate(self, p_conn, p_buffer):\r\n for i in range(0, len(p_buffer)):\r\n c = p_buffer[i]\r\n buffersize = len(self.m_buffer)\r\n if int(c) >= 32 and int(c) != 127 and buffersize < 1024:\r\n self.m_buffer += chr(c)\r\n elif int(c) == 8 and buffersize > 0:\r\n self.m_buffer = self.m_buffer[0:buffersize - 1]\r\n elif c == 10 or c == 13:\r\n if buffersize > 0 and p_conn.Handler() != None:\r\n p_conn.Handler().Handle(self.m_buffer)\r\n self.m_buffer = \"\"\r\n \r\n def SendString(self, p_conn, p_string):\r\n string = self.TranslateColors(p_string)\r\n p_conn.BufferData(string)\r\n \r\n def TranslateColors(self, p_str):\r\n string = p_str.replace(\"<$black>\", black).replace(\"<$red>\", red).replace(\"<$green>\", green).replace(\"<$yellow>\", yellow).replace(\"<$blue>\", blue).replace(\"<$magenta>\", magenta).replace(\"<$cyan>\", cyan).replace(\"<$white>\", white).replace(\"<$bold>\", bold).replace(\"<$dim>\", dim).replace(\"<$reset>\", reset)\r\n i = p_str.find(\"<\")\r\n \r\n while i != -1:\r\n if p_str[i + 1] == '#':\r\n j = p_str.find(\">\", i)\r\n if j != -1 and j - i == 8:\r\n temp = p_str[i:j + 1]\r\n string = string.replace(temp, self.TranslateNumberColor(temp))\r\n i = p_str.find(\"<\", i + 1)\r\n \r\n return string\r\n \r\n def ASCIIToHex(self, c):\r\n if c >= '0' and c <= '9':\r\n return ord(c) - ord('0')\r\n if c >= 'A' and c <= 'F':\r\n return ord(c) - ord('A') + 10\r\n if c >= 'a' and c <= 'a':\r\n return ord(c) - ord('a') + 10\r\n return 0\r\n \r\n def TranslateNumberColor(self, p_str):\r\n #chop off the six digits, ie the \"XXXXXX\" inside \"<#XXXXXX>\"\r\n col = p_str[2:8]\r\n \r\n r = self.ASCIIToHex(col[0]) * 16 + self.ASCIIToHex(col[1])\r\n g = self.ASCIIToHex(col[2]) * 16 + self.ASCIIToHex(col[3])\r\n b = self.ASCIIToHex(col[4]) * 16 + self.ASCIIToHex(col[5])\r\n \r\n #convert the numbers to the 0-2 range\r\n #ie: 0 - 85 = 0\r\n # 86 - 171 = 1\r\n # 172 - 255 = 2\r\n #This gives a good approximation of the true color by assigning equal\r\n #ranges to each value.\r\n r = r // 86\r\n g = g // 86\r\n b = b // 86\r\n \r\n return g_telnetcolors[r][g][b]\r\n","sub_path":"src/network/BetterTelnet.py","file_name":"BetterTelnet.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"197479489","text":"#!/usr/bin/env python3\n\n__description__ = \"Windows Shortcut file (LNK) parser\"\n__author__ = \"Matúš Jasnický\"\n__version__ = \"0.3.3\"\n\nimport json\nimport datetime\nimport argparse\nfrom subprocess import list2cmdline\n\nfrom LnkParse3.lnk_header import LnkHeader\nfrom LnkParse3.lnk_targets import LnkTargets\nfrom LnkParse3.lnk_info import LnkInfo\nfrom LnkParse3.info_factory import InfoFactory\nfrom LnkParse3.string_data import StringData\nfrom LnkParse3.extra_data import ExtraData\n\n\nclass LnkFile(object):\n def __init__(self, fhandle=None, indata=None, debug=False, cp=None):\n if fhandle:\n self.indata = fhandle.read()\n elif indata:\n self.indata = indata\n\n self.debug = debug\n self.cp = cp\n\n self.process()\n\n def has_relative_path(self):\n return bool(\"HasRelativePath\" in self.header.link_flags())\n\n def has_arguments(self):\n return bool(\"HasArguments\" in self.header.link_flags())\n\n def is_unicode(self):\n return bool(\"IsUnicode\" in self.header.link_flags())\n\n def has_name(self):\n return bool(\"HasName\" in self.header.link_flags())\n\n def has_working_dir(self):\n return bool(\"HasWorkingDir\" in self.header.link_flags())\n\n def has_icon_location(self):\n return bool(\"HasIconLocation\" in self.header.link_flags())\n\n def has_target_id_list(self):\n return bool(\"HasTargetIDList\" in self.header.link_flags())\n\n def has_link_info(self):\n return bool(\"HasLinkInfo\" in self.header.link_flags())\n\n def force_no_link_info(self):\n return bool(\"ForceNoLinkInfo\" in self.header.link_flags())\n\n def process(self):\n index = 0\n\n # Parse header\n self.header = LnkHeader(indata=self.indata)\n index += self.header.size()\n\n # XXX: json\n self._target_index = index + 2\n\n # Parse ID List\n self.targets = None\n if self.has_target_id_list():\n self.targets = LnkTargets(indata=self.indata[index:], cp=self.cp)\n index += self.targets.size()\n\n # Parse Link Info\n self.info = None\n if self.has_link_info() and not self.force_no_link_info():\n info = LnkInfo(indata=self.indata[index:], cp=self.cp)\n info_class = InfoFactory(info).info_class()\n if info_class:\n self.info = info_class(indata=self.indata[index:], cp=self.cp)\n index += self.info.size()\n\n # Parse String Data\n self.string_data = StringData(self, indata=self.indata[index:], cp=self.cp)\n index += self.string_data.size()\n\n # Parse Extra Data\n self.extras = ExtraData(indata=self.indata[index:], cp=self.cp)\n\n def print_lnk_file(self):\n print(\"Windows Shortcut Information:\")\n print(\n \"\\tLink Flags: %s - (%s)\"\n % (self.format_linkFlags(), self.header.r_link_flags())\n )\n print(\n \"\\tFile Flags: %s - (%s)\"\n % (self.format_fileFlags(), self.header.r_file_flags())\n )\n print(\"\")\n print(\"\\tCreation Timestamp: %s\" % (self.header.creation_time()))\n print(\"\\tModified Timestamp: %s\" % (self.header.write_time()))\n print(\"\\tAccessed Timestamp: %s\" % (self.header.access_time()))\n print(\"\")\n print(\n \"\\tFile Size: %s (r: %s)\"\n % (str(self.header.file_size()), str(len(self.indata)))\n )\n print(\"\\tIcon Index: %s \" % (str(self.header.icon_index())))\n print(\"\\tWindow Style: %s \" % (str(self.header.window_style())))\n print(\"\\tHotKey: %s \" % (str(self.header.hot_key())))\n print(\"\")\n\n for key, value in self.string_data.as_dict().items():\n print(\"\\t%s: %s\" % (key, value))\n\n print(\"\")\n print(\"\\tEXTRA BLOCKS:\")\n\n for extra in self.extras:\n print(f\"\\t\\t{extra.name()}\")\n for key, value in extra.as_dict().items():\n print(f\"\\t\\t\\t[{key}] {value}\")\n\n def format_linkFlags(self):\n return \" | \".join(self.header.link_flags())\n\n def format_fileFlags(self):\n return \" | \".join(self.header.file_flags())\n\n # FIXME: Simple concat of path and arguments\n @property\n def lnk_command(self):\n out = []\n\n if self.has_relative_path():\n relative_path = self.string_data.relative_path()\n out.append(list2cmdline([relative_path]))\n\n if self.has_arguments():\n out.append(self.string_data.command_line_arguments())\n\n return \" \".join(out)\n\n def print_short(self, pjson=False):\n out = self.lnk_command\n\n if pjson:\n print(json.dumps({\"command\": out}))\n else:\n print(out)\n\n def print_json(self, print_all=False):\n res = self.get_json(print_all)\n\n def _datetime_to_str(obj):\n if isinstance(obj, datetime.datetime):\n return obj.replace(microsecond=0).isoformat()\n return obj\n\n print(\n json.dumps(\n res,\n indent=4,\n separators=(\",\", \": \"),\n default=_datetime_to_str,\n sort_keys=True,\n )\n )\n\n def get_json(self, get_all=False):\n res = {\n \"header\": {\n \"guid\": self.header.link_cls_id(),\n \"r_link_flags\": self.header.r_link_flags(),\n \"r_file_flags\": self.header.r_file_flags(),\n \"creation_time\": self.header.creation_time(),\n \"accessed_time\": self.header.access_time(),\n \"modified_time\": self.header.write_time(),\n \"file_size\": self.header.file_size(),\n \"icon_index\": self.header.icon_index(),\n \"windowstyle\": self.header.window_style(),\n \"hotkey\": self.header.hot_key(),\n \"r_hotkey\": self.header.raw_hot_key(),\n \"link_flags\": self.header.link_flags(),\n \"file_flags\": self.header.file_flags(),\n \"header_size\": self.header.size(),\n \"reserved0\": self.header.reserved0(),\n \"reserved1\": self.header.reserved1(),\n \"reserved2\": self.header.reserved2(),\n },\n \"data\": self.string_data.as_dict(),\n \"extra\": {extra.name(): extra.as_dict() for extra in self.extras},\n }\n\n if self.targets:\n res[\"target\"] = {\n \"size\": self.targets.id_list_size(),\n \"items\": [x.as_item() for x in self.targets],\n \"index\": self._target_index,\n }\n\n res[\"link_info\"] = {}\n if self.info:\n res[\"link_info\"] = {\n \"link_info_size\": self.info.size(),\n \"link_info_header_size\": self.info.header_size(),\n \"link_info_flags\": self.info.flags(),\n \"volume_id_offset\": self.info.volume_id_offset(),\n \"local_base_path_offset\": self.info.local_base_path_offset(),\n \"common_network_relative_link_offset\": self.info.common_network_relative_link_offset(),\n \"common_path_suffix_offset\": self.info.common_path_suffix_offset(),\n }\n\n res[\"link_info\"][\"location_info\"] = {}\n if type(self.info).__name__ == \"Local\":\n res[\"link_info\"][\"local_base_path\"] = self.info.local_base_path()\n res[\"link_info\"][\"common_path_suffix\"] = self.info.common_path_suffix()\n res[\"link_info\"][\"location\"] = self.info.location()\n\n res[\"link_info\"][\"location_info\"] = {\n \"volume_id_size\": self.info.volume_id_size(),\n \"r_drive_type\": self.info.r_drive_type(),\n \"volume_label_offset\": self.info.volume_label_offset(),\n \"drive_serial_number\": self.info.drive_serial_number(),\n \"drive_type\": self.info.drive_type(),\n \"volume_label\": self.info.volume_label(),\n }\n\n if self.info.local_base_path_offset_unicode():\n res[\"link_info\"][\"location_info\"][\n \"local_base_path_offset_unicode\"\n ] = self.info.local_base_path_offset_unicode()\n if self.info.common_path_suffix_unicode():\n res[\"link_info\"][\"location_info\"][\n \"common_path_suffix_unicode\"\n ] = self.info.common_path_suffix_unicode()\n if self.info.volume_id():\n res[\"link_info\"][\"location_info\"][\n \"volume_id\"\n ] = self.info.volume_id()\n if self.info.common_network_relative_link():\n res[\"link_info\"][\"location_info\"][\n \"common_network_relative_link\"\n ] = self.info.common_network_relative_link()\n if self.info.volume_label_unicode_offset():\n res[\"link_info\"][\"location_info\"][\n \"volume_label_unicode_offset\"\n ] = self.info.volume_label_unicode_offset()\n if self.info.volume_label_unicode():\n res[\"link_info\"][\"location_info\"][\n \"volume_label_unicode\"\n ] = self.info.volume_label_unicode()\n if self.info.local_base_unicode():\n res[\"link_info\"][\"location_info\"][\n \"local_base_unicode\"\n ] = self.info.local_base_unicode()\n elif type(self.info).__name__ == \"Network\":\n res[\"link_info\"][\"location_info\"] = {\n \"common_network_relative_link_size\": self.info.common_network_relative_link_size(),\n \"common_network_relative_link_flags\": self.info.common_network_relative_link_flags(),\n \"net_name_offset\": self.info.net_name_offset(),\n \"device_name_offset\": self.info.device_name_offset(),\n \"r_network_provider_type\": self.info.r_network_provider_type(),\n }\n if self.info.network_provider_type():\n res[\"link_info\"][\"location_info\"][\n \"network_provider_type\"\n ] = self.info.network_provider_type()\n if self.info.net_name_offset_unicode():\n res[\"link_info\"][\"location_info\"][\n \"net_name_offset_unicode\"\n ] = self.info.net_name_offset_unicode()\n if self.info.net_name_unicode():\n res[\"link_info\"][\"location_info\"][\n \"net_name_unicode\"\n ] = self.info.net_name_unicode()\n if self.info.device_name_offset_unicode():\n res[\"link_info\"][\"location_info\"][\n \"device_name_offset_unicode\"\n ] = self.info.device_name_offset_unicode()\n if self.info.net_name():\n res[\"link_info\"][\"location_info\"][\"net_name\"] = self.info.net_name()\n if self.info.device_name():\n res[\"link_info\"][\"location_info\"][\n \"device_name\"\n ] = self.info.device_name()\n\n if not get_all:\n res[\"header\"].pop(\"header_size\", None)\n res[\"header\"].pop(\"reserved0\", None)\n res[\"header\"].pop(\"reserved1\", None)\n res[\"header\"].pop(\"reserved2\", None)\n res[\"link_info\"].pop(\"link_info_size\", None)\n res[\"link_info\"].pop(\"link_info_header_size\", None)\n res[\"link_info\"].pop(\"volume_id_offset\", None)\n res[\"link_info\"].pop(\"local_base_path_offset\", None)\n res[\"link_info\"].pop(\"common_network_relative_link_offset\", None)\n res[\"link_info\"].pop(\"common_path_suffix_offset\", None)\n if \"Local\" in res[\"link_info\"]:\n res[\"link_info\"][\"location_info\"].pop(\"volume_id_size\", None)\n res[\"link_info\"][\"location_info\"].pop(\"volume_label_offset\", None)\n if \"Network\" in res[\"link_info\"]:\n res[\"link_info\"][\"location_info\"].pop(\n \"common_network_relative_link_size\", None\n )\n res[\"link_info\"][\"location_info\"].pop(\"net_name_offset\", None)\n res[\"link_info\"][\"location_info\"].pop(\"device_name_offset\", None)\n\n if \"target\" in res:\n res[\"target\"].pop(\"index\", None)\n if \"items\" in res[\"target\"]:\n for item in res[\"target\"][\"items\"]:\n if item:\n item.pop(\"modification_time\", None)\n\n return res\n\n\ndef main():\n arg_parser = argparse.ArgumentParser(description=__description__)\n arg_parser.add_argument(\n \"-f\",\n \"--file\",\n dest=\"file\",\n required=True,\n help=\"absolute or relative path to the file\",\n )\n arg_parser.add_argument(\n \"-t\", \"--target\", action=\"store_true\", help=\"print target only\"\n )\n arg_parser.add_argument(\n \"-j\", \"--json\", action=\"store_true\", help=\"print output in JSON\"\n )\n arg_parser.add_argument(\n \"-c\",\n \"--codepage\",\n dest=\"cp\",\n default=\"cp1252\",\n help=\"codepage of ASCII strings\",\n )\n arg_parser.add_argument(\n \"-d\",\n \"--json_debug\",\n action=\"store_true\",\n help=\"print all extracted data in JSON (i.e. offsets and sizes)\",\n )\n arg_parser.add_argument(\n \"-D\", \"--debug\", action=\"store_true\", help=\"print debug info\"\n )\n args = arg_parser.parse_args()\n\n with open(args.file, \"rb\") as file:\n lnk = LnkFile(fhandle=file, debug=args.debug, cp=args.cp)\n if args.target:\n lnk.print_short(pjson=args.json)\n elif args.json:\n lnk.print_json(args.json_debug)\n else:\n lnk.print_lnk_file()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"LnkParse3/lnk_file.py","file_name":"lnk_file.py","file_ext":"py","file_size_in_byte":14041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"473461625","text":"\"\"\"\r\nCopyright 2020 by Aleksandar Eri\r\n\"\"\"\r\n\r\nimport os\r\nimport os.path\r\nimport time\r\nimport sys\r\nimport tkinter\r\nimport tkinter.ttk\r\nimport tkinter.font\r\nimport tkinter.messagebox\r\ntry:\r\n\timport queue\r\nexcept:\r\n\timport Queue as queue\r\n\t\r\ndef save_text_with_append(filename):\r\n\ttry:\r\n\t\tf = open(filename, \"at\", encoding=\"utf-8\")\r\n\texcept:\r\n\t\tf = open(filename, \"a\")\r\n\treturn f\r\n\r\ndef save_text(filename):\r\n\ttry:\r\n\t\tf = open(filename, \"wt\", encoding=\"utf-8\")\r\n\texcept:\r\n\t\tf = open(filename, \"w\")\r\n\treturn f\r\n\r\nclass StdoutRedirector(object):\r\n\tdef __init__(self, master):\r\n\t\tself.master = master\r\n\t\tself.manager = master.manager\r\n\r\n\tdef write(self, string):\r\n\t\tstringType = 0\r\n\t\tif string.startswith(\"WARNING\"):\r\n\t\t\tstringType = 1\r\n\t\tself.master.txtOutput.write(string, stringType)\r\n\t\tif self.manager.log is not None:\r\n\t\t\twith save_text_with_append(self.manager.log) as log:\r\n\t\t\t\tlog.write(string)\r\n\r\n\tdef flush(self):\r\n\t\tpass\r\n\r\nclass StderrRedirector(object):\r\n\tdef __init__(self, master):\r\n\t\tself.master = master\r\n\t\tself.manager = self.master.manager\r\n\t\trootDir = master.manager.rootDir\r\n\t\tself.errorLog = rootDir + \"/logs/error.log\"\r\n\t\tif not os.path.exists(rootDir + \"/logs\"):\r\n\t\t\tos.makedirs(rootDir + \"/logs\")\r\n\r\n\r\n\tdef write(self, string):\r\n\t\tself.master.txtOutput.write(string, -1)\r\n\t\twith open(self.errorLog, 'at') as log:\r\n\t\t\tlog.write(string)\r\n\t\tif self.manager.log is not None:\r\n\t\t\twith open(self.manager.log, 'at') as log:\r\n\t\t\t\tlog.write(string)\r\n\r\n\tdef flush(self):\r\n\t\tpass\r\n\r\nclass ThreadSafeConsole(tkinter.Text):\r\n\tdef __init__(self, master, **options):\r\n\t\ttkinter.Text.__init__(self, master, **options)\r\n\t\tself.manager = master.manager\r\n\t\tself.tag_config(\"err\", foreground=\"red\")\r\n\t\tself.tag_config(\"war\", foreground=\"yellow\")\r\n\t\tself.error_indexes = []\r\n\t\tself.warning_indexes = []\r\n\t\tself.queue = queue.Queue()\r\n\t\tself.update_me()\r\n\r\n\tdef write(self, line, error = 0):\r\n\t\ttimeStamp = time.strftime(\"%H:%M:%S\", time.localtime())\r\n\t\tif line != \"\\n\" and error == 0:\r\n\t\t\tprintLine = timeStamp + \" \" + line\r\n\t\telse:\r\n\t\t\tprintLine = line\r\n\t\tself.queue.put((printLine, error))\r\n\r\n\tdef clear(self):\r\n\t\tself.queue.put(None)\r\n\r\n\tdef update_me(self):\r\n\t\ttry:\r\n\t\t\twhile 1:\r\n\t\t\t\tline = self.queue.get_nowait()\r\n\t\t\t\tif line is None:\r\n\t\t\t\t\tself.delete(1.0, tkinter.END)\r\n\t\t\t\t\tself.error_indexes = []\r\n\t\t\t\t\tself.warning_indexes = []\r\n\t\t\t\telse:\r\n\t\t\t\t\terror = line[1]\r\n\t\t\t\t\tline = line[0]\r\n\t\t\t\t\tif error != 0:\r\n\t\t\t\t\t\tstartPos = str((float(self.index(\"end\")) - 1)/ 1)\r\n\t\t\t\t\t\tendPos = str((float(startPos) + 1) / 1)\r\n\t\t\t\t\t\tif error == -1:\r\n\t\t\t\t\t\t\tself.error_indexes.append((startPos,endPos))\r\n\t\t\t\t\t\telif error == 1:\r\n\t\t\t\t\t\t\tself.warning_indexes.append((startPos,endPos))\r\n\r\n\t\t\t\t\tself.insert(tkinter.END, str(line))\r\n\t\t\t\t\tstatus = line[line.find(\" \") + 1:].strip()\r\n\t\t\t\t\tif len(status) > 5:\r\n\t\t\t\t\t\tstatus_max_length = 50\r\n\t\t\t\t\t\tself.manager.write_status(status\r\n\t\t\t\t\t\t\t\t\t\t\t\t if len(status) < status_max_length\r\n\t\t\t\t\t\t\t\t\t\t\t\t else \"%s...%s\" % (status[:status_max_length//2],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tstatus[-status_max_length//2:]))\r\n\t\t\t\ttry:\r\n\t\t\t\t\tself.see(tkinter.END)\r\n\t\t\t\t\tself.update_idletasks()\r\n\t\t\t\texcept:\r\n\t\t\t\t\tpass\r\n\t\texcept:\r\n\t\t\tpass\r\n\t\tself.highlight_errors()\r\n\t\tif len(self.error_indexes) > 0:\r\n\t\t\tself.manager.progress_colour(\"red\")\r\n\t\telif len(self.warning_indexes) > 0:\r\n\t\t\tself.manager.progress_colour(\"yellow\")\r\n\t\tself.after(100, self.update_me)\r\n\r\n\tdef highlight_errors(self):\r\n\t\tfor indexes in self.error_indexes:\r\n\t\t\tself.tag_add(\"err\", indexes[0], indexes[1])\r\n\t\tfor indexes in self.warning_indexes:\r\n\t\t\tself.tag_add(\"war\", indexes[0], indexes[1])\r\n\r\nclass TabLog(tkinter.Frame):\r\n\tdef __init__(self, manager):\r\n\t\ttkinter.Frame.__init__(self, manager.nTabs)\r\n\t\tself.manager = manager\r\n\t\tself.rootDir = manager.rootDir\r\n\t\tself.label = 'Log'\r\n\t\tmanager.nTabs.add(self, text = self.label)\r\n\t\tself.grid_columnconfigure(2, weight = 1)\r\n\t\tself.grid_columnconfigure(5, weight = 1)\r\n\t\tself.grid_rowconfigure(0, weight = 1)\r\n\r\n\t\t# Logs\r\n\t\tself.lbLogs = tkinter.Listbox(self, selectmode = tkinter.EXTENDED, font = tkinter.font.Font(size = 7),\r\n\t\t\t\t\t\t\t\t\t exportselection=0)\r\n\t\tself.lbLogs.grid(column = 0, row = 0, columnspan = 4, sticky = \"NWSE\", padx = 5, pady = 5)\r\n\t\tself.updateLogs()\r\n\r\n\t\tself.sLogs = tkinter.Scrollbar(self, orient = tkinter.VERTICAL)\r\n\t\tself.sLogs.config(command = self.lbLogs.yview)\r\n\t\tself.sLogs.grid(column = 4, row = 0, sticky = 'NWS')\r\n\t\tself.lbLogs.config(yscrollcommand=self.sLogs.set)\r\n\r\n\r\n\t\t# Console\r\n\t\tself.outputFont = tkinter.font.Font(family=\"Courier\", size=10)\r\n\r\n\t\tself.txtOutput = ThreadSafeConsole(self, wrap='word', font = self.outputFont, bg=\"black\", fg=\"white\")\r\n\t\tself.txtOutput.grid(column = 5, row = 0, sticky='NSWE', padx=5, pady=5)\r\n\t\tself.txtOutput.insert(tkinter.END, \"Device Manager\" + os.linesep + \"-----------\" + os.linesep + os.linesep)\r\n\r\n\t\tsys.stdout = StdoutRedirector(self)\r\n\t\tsys.stderr = StderrRedirector(self)\r\n\r\n\t\t# Logo\r\n\t\ttkinter.Label(self, image = self.manager.img).grid(column = 7, row = 0, sticky = 'NE')\r\n\r\n\t\t# Console scroll bar\r\n\t\tself.sConsole = tkinter.Scrollbar(self, orient = tkinter.VERTICAL)\r\n\t\tself.sConsole.config(command = self.txtOutput.yview)\r\n\t\tself.sConsole.grid(column = 6, row = 0, sticky = 'NSEW')\r\n\r\n\t\tself.txtOutput.config(yscrollcommand=self.sConsole.set)\r\n\r\n\r\n\t\tself.create_context_menus()\r\n\r\n\tdef create_context_menus(self):\r\n\t\tself.menuConsole = tkinter.Menu(self.txtOutput, tearoff=0)\r\n\t\tself.menuConsole.add_separator()\r\n\t\tself.menuConsole.add_command(label=\"Clear console\",\tcommand=self.mtd_btn_clear_console)\r\n\t\tself.txtOutput.bind(\"\", self.menuConsolePopup)\r\n\r\n\t\tself.menuLogs = tkinter.Menu(self.lbLogs, tearoff=0)\r\n\t\tself.menuLogs.add_separator()\r\n\t\tself.menuLogs.add_command(label=\"Refresh\",\tcommand=self.updateLogs)\r\n\r\n\t# Console Context Menu\r\n\r\n\tdef mtd_btn_copy(self):\r\n\t\tself.manager.root.clipboard_clear()\r\n\t\tself.manager.root.clipboard_append(self.txtOutput.selection_get())\r\n\r\n\tdef mtd_btn_copy_all(self):\r\n\t\tself.manager.root.clipboard_clear()\r\n\t\tself.manager.root.clipboard_append(self.txtOutput.get(1.0, tkinter.END))\r\n\r\n\tdef mtd_btn_clear_console(self):\r\n\t\tself.txtOutput.clear()\r\n\r\n\tdef menuConsolePopup(self, event):\r\n\t\tself.menuConsole.post(event.x_root, event.y_root)\r\n\r\n\t# Logs Context Menu\r\n\tdef updateLogs(self, event = None):\r\n\t\tself.lbLogs.delete(0, tkinter.END)\r\n\t\tlogsFolder = \"logs\"\r\n\t\tif os.path.exists(self.rootDir + \"/\" + logsFolder):\r\n\t\t\tfor log in os.listdir(self.rootDir + \"/\" + logsFolder):\r\n\t\t\t\tif os.path.isfile(self.rootDir + \"/\" + logsFolder + \"/\" + log):\r\n\t\t\t\t\tself.lbLogs.insert(tkinter.END, log)\r\n","sub_path":"app_ui_log.py","file_name":"app_ui_log.py","file_ext":"py","file_size_in_byte":6552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"24412816","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom models import Pages\nfrom django.views.decorators.csrf import csrf_exempt\n\n# Create your views here.\n\n\ndef index(request, peticion_id):\n\n try:\n solicitud = Pages.objects.get(id=int(peticion_id))\n #en vez del id lo podemos hacer tambien con el nombre pero esta mejor asi\n except Pages.DoesNotExist:\n return HttpResponse('Page Not Found')\n respuesta = 'Hola, soy ' + solicitud.name + \": \" + str(solicitud.page)\n return HttpResponse(respuesta)\n\n@csrf_exempt\ndef paginanueva(request, nombre, pagina):\n if request.method == \"GET\":\n p = Pages(name=nombre, page=pagina)\n p.save()\n elif request.method == \"PUT\":\n info = request.body\n p = Pages(name=nombre, page=info)\n p.save()\n return HttpResponse('Todo ha ido bien')\n\ndef dame_paginas(request):\n lista_paginas = Pages.objects.all()\n respuesta = \"
      \"\n for pag in lista_paginas:\n respuesta += '
    1. ' + pag.name + ''\n respuesta += \"
    \"\n return HttpResponse(respuesta)\n","sub_path":"myproject/cmsput/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"223942790","text":"import os\nfrom struct import unpack,pack\nimport ebx\n\nfrom getpath import getGamePath, getExtractPath\n\n\n#You can hardcode paths here. If they're not empty, the functions below won't do anything.\n\n# where did you put the results of dumper.py? \ndumpPath = \"\" #r\"E:\\Games\\Dragon_Age_Inquisition_Export\"\n\n# where do you want to put the extracted assets?\ntargetPath = \"\" #dumpPath+\"\\_assets\" \n\n# helper code from getpath.py to verify existance of directory\ndumpDirectory = getOrCreatePathWithQuery(dumpPath, \"enter path to results of dump.py\", False) # should exist already\ntargetDirectory = getOrCreatePathWithQuery(targetPath, \"destination path for created assets (will be created if necessary)\", True) \n\ninputFolder = getRelativePathWithQuery(dumpPath + \"bundles\\ebx\", \"\", \"Enter subfolder (relative to dumpDirectory\\bundles\\ebx) to restrict result processing.\\n If in doubt, leave empty.\") #r\"audio\\music\" #relative to ebxFolder\n\n\n#These paths are relative to the dumpDirectory. They don't need to be changed.\nebxFolder = r\"bundles\\ebx\"\nresFolder = r\"bundles\\res\"\nchunkFolder = r\"chunks\"\nchunkFolder2 = r\"bundles\\chunks\" #if the chunk is not found in the first folder, use this one\n\n#Run through the sound ebx files, find fields with chunk Guids and fieldName = ChunkId.\n#The script will overwrite existing files.\n\n#The filename of the ebx file importing an audio chunk becomes the name of the wav file.\n#There are three indices used in the following order.\n#1: Variation.ChunkIndex: Some ebx files import several completely independent audio chunk files. This index differentiates between them.\n#2: Variation.Index: An ebx may use the same audio chunk for several sound variations, this index keeps them apart.\n#3: Segment.Index: A variation may contain several segments, so there is another index.\n\n##############################################################\n##############################################################\n\nebxFolder, resFolder, chunkFolder,chunkFolder2 = [os.path.join(dumpDirectory, path) for path in (ebxFolder, resFolder, chunkFolder, chunkFolder2)]\ninputFolder=os.path.join(ebxFolder,inputFolder)\n\nprint(\"Loading GUID table...\")\nebx.loadGuidTable(dumpDirectory)\n\nfor dir0, dirs, ff in os.walk(inputFolder):\n for fname in ff:\n dbx=ebx.Dbx(os.path.join(dir0,fname),ebxFolder)\n dbx.extractAssets(chunkFolder,chunkFolder2,resFolder,targetDirectory)\n","sub_path":"frostbite2/ebxtoasset.py","file_name":"ebxtoasset.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"355189724","text":"import argparse\n\nfrom src.distortion import DistortionWavAndArray\nfrom src.utils import get_data_file_path\n\n\ndef main():\n # 取得した引数\n otowari_level = 1024\n origin_file = \"origin.wav\"\n play_otowari(origin_file, otowari_level)\n\n\ndef play_otowari(file_name, distortion_level=20):\n origin_path = get_data_file_path(file_name)\n # wav_file=WavAndArray(origin_path)\n wav_file = DistortionWavAndArray(origin_path,\n distortion_level=distortion_level)\n wav_file.play_file()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"511610034","text":"import time\nfrom graph import Graph, phase_lines\nfrom tests import profileit\nfrom tests.test_graph import graph3x3, graph_cycle_6, graph_cycle_5, fully_connected_4, mountain_river_map\n\n\ndef test_subgraph():\n g = graph3x3()\n g2 = g.subgraph_from_nodes([1, 2, 3, 4])\n d = {1: {2: 1, 4: 1},\n 2: {3: 1},\n }\n assert g2.is_subgraph(g)\n for k, v in d.items():\n for k2, d2 in v.items():\n assert g.edge(k, k2) == g2.edge(k, k2)\n\n g3 = graph3x3()\n g3.add_edge(3, 100, 7)\n assert not g3.is_subgraph(g2)\n\n\ndef test_is_partite():\n g = graph_cycle_6()\n bol, partitions = g.is_partite(n=2)\n assert bol is True\n\n g = graph_cycle_5()\n bol, part = g.is_partite(n=2)\n assert bol is False\n bol, part = g.is_partite(n=5)\n assert bol is True\n assert len(part) == 5\n\n\ndef test_is_cyclic():\n g = graph_cycle_5()\n assert g.has_cycles()\n\n\ndef test_is_not_cyclic():\n g = graph3x3()\n assert not g.has_cycles()\n\n\ndef test_is_really_cyclic():\n g = Graph(from_list=[(1, 1, 1), (2, 2, 1)]) # two loops onto themselves.\n assert g.has_cycles()\n\n\ndef test_components():\n g = Graph(from_list=[\n (1, 2, 1), # component 1\n (2, 1, 1),\n (3, 3, 1), # component 2\n (4, 5, 1),\n (5, 6, 1), # component 3\n (5, 7, 1),\n (6, 8, 1),\n (7, 8, 1),\n (8, 9, 1),\n ])\n g.add_node(10) # component 4\n components = g.components()\n assert len(components) == 4\n assert {1, 2} in components\n assert {3} in components\n assert {4, 5, 6, 7, 8, 9} in components\n assert {10} in components\n\n\ndef test_network_size():\n g = graph3x3()\n ns1 = g.network_size(n1=1)\n assert len(ns1) == 9 # all nodes.\n\n ns1_2 = g.network_size(n1=1, degrees_of_separation=2)\n assert all(i not in ns1_2 for i in [6, 8, 9])\n\n ns5 = g.network_size(n1=5)\n assert len(ns5) == 4 # all nodes downstream from 5 (plus 5 itself)\n\n ns9 = g.network_size(n1=9)\n assert len(ns9) == 1 # just node 9, as there are no downstream peers.\n\n\ndef test_network_size_when_fully_connected():\n \"\"\" tests network size when the peer has already been seen during search.\"\"\"\n g = fully_connected_4()\n ns = g.network_size(n1=1)\n assert len(ns) == len(g.nodes())\n\n\ndef test_phase_lines_with_loop():\n g = graph3x3()\n g.add_edge(9, 1)\n try:\n _ = g.phase_lines()\n raise AssertionError(\"The graph is a cycle.\")\n except AttributeError:\n assert True\n\n\ndef test_phase_lines_with_inner_loop():\n g = graph3x3()\n g.add_edge(9, 2)\n p = g.phase_lines()\n expected = {1: 0,\n 2: 1, 4: 1,\n 3: 2, 5: 2, 7: 2,\n 6: 3, 8: 3,\n 9: 4}\n assert p == expected\n\n\ndef test_phase_lines_with_inner_loop2():\n g = graph3x3()\n g.add_edge(3, 2)\n p = g.phase_lines()\n expected = {1: 0,\n 2: 1, 4: 1,\n 3: 2, 5: 2, 7: 2,\n 6: 3, 8: 3,\n 9: 4}\n assert p == expected\n\n\ndef test_phase_lines_with_inner_loop3():\n g = graph3x3()\n g.add_edge(9, 10)\n g.add_edge(10, 5)\n p = g.phase_lines()\n expected = {1: 0,\n 2: 1, 4: 1,\n 3: 2, 5: 2, 7: 2,\n 6: 3, 8: 3,\n 9: 4,\n 10: 5}\n assert p == expected\n\n\ndef test_offset_phase_lines():\n \"\"\"\n This test recreates a bug\n\n 1\n \\\n 2\n \\\n 3 <---\n \\ / \\\n 4 A ^\n \\ \\ |\n 5 B |\n \\___\\ /\n 6____/\n \\\n 7\n\n \"\"\"\n g = Graph(from_list=[\n (1, 2, 1),\n (2, 3, 1),\n (3, 4, 1),\n (4, 5, 1),\n (5, 6, 1),\n (6, 7, 1),\n (6, 4, 1),\n ('a', 'b', 1),\n ('b', 6, 1)\n ])\n p = g.phase_lines()\n expected = {1: 0, 2: 1, 3: 2, 4: 3, 5: 4, 6: 2, 7: 3, 'a': 0, 'b': 1}\n assert p == expected, {(k, v) for k, v in p.items()} - {(k, v) for k, v in expected.items()}\n\n\ndef test_phaselines():\n \"\"\"\n 1 +---> 3 +--> 5 +---> 6 [7]\n ^ ^\n +------------+ |\n |\n 2 +---> 4 +----------> +\n \"\"\"\n g = Graph(from_list=[\n (1, 3, 1),\n (2, 4, 1),\n (2, 5, 1),\n (3, 5, 1),\n (4, 6, 1),\n (5, 6, 1),\n ])\n g.add_node(7)\n\n p = g.phase_lines()\n assert set(g.nodes()) == set(p.keys())\n expects = {1: 0, 2: 0, 7: 0,\n 3: 1, 4: 1,\n 5: 2,\n 6: 3}\n assert p == expects, (p, expects)\n\n\ndef test_phaselines_for_ordering():\n \"\"\"\n u1 u4 u2 u3\n \\ \\ \\_______\\\n csg cs3 append\n \\ \\ \\\n op1 \\ op3\n \\ \\ \\\n op2 \\ cs2\n \\ \\___________\\\n cs1 join\n \\ \\\n map1 map2\n \\___________\\\n save\n\n \"\"\"\n L = [\n ('u1', 'csg', 1),\n ('csg', 'op1', 1),\n ('op1', 'op2', 1),\n ('op2', 'cs1', 1),\n ('cs1', 'map1', 1),\n ('map1', 'save', 1),\n ('u4', 'cs3', 1),\n ('cs3', 'join', 1),\n ('join', 'map2', 1),\n ('map2', 'save', 1),\n ('u2', 'append', 1),\n ('u3', 'append', 1),\n ('append', 'op3', 1),\n ('op3', 'cs2', 1),\n ('cs2', 'join', 1)\n ]\n\n g = Graph(from_list=L)\n\n p = g.phase_lines()\n\n expected = {'u1': 0, 'u4': 0, 'u2': 0, 'u3': 0,\n 'csg': 1, 'cs3': 1, 'append': 1,\n 'op1': 2, 'op3': 2,\n 'op2': 3, 'cs2': 3,\n 'cs1': 4, 'join': 4,\n 'map1': 5, 'map2': 5,\n 'save': 6, }\n\n assert p == expected, {(k, v) for k, v in p.items()} - {(k, v) for k, v in expected.items()}\n\n\ndef test_phaselines_for_larger_graph():\n g = mountain_river_map()\n start = time.time()\n p = g.phase_lines()\n g.has_cycles()\n end = time.time()\n\n # primary objective: correctness.\n assert len(p) == 253, len(p)\n\n # secondary objective: timeliness\n assert end - start < 1 # second.\n\n # third objective: efficiency.\n max_calls = 13000\n\n profiled_phaseline_func = profileit(phase_lines)\n\n calls, text = profiled_phaseline_func(g)\n if calls > max_calls:\n raise Exception(f\"too many function calls: {text}\")\n\n\ndef test_sources():\n \"\"\"\n [1]+--->[3]+-->[5]+--->[6] [7] (isolated node)\n ^ ^\n +------------+ |\n |\n [2]+--->[4]+----------> +\n \"\"\"\n g = Graph(from_list=[\n (1, 3, 1),\n (2, 4, 1),\n (2, 5, 1),\n (3, 5, 1),\n (4, 6, 1),\n (5, 6, 1),\n ])\n g.add_node(7)\n s = g.sources(5)\n e = {1, 2, 3}\n assert s == e\n\n s2 = g.sources(1)\n e2 = set()\n assert s2 == e2, s2\n\n s3 = g.sources(6)\n e3 = {1, 2, 3, 4, 5}\n assert s3 == e3\n\n s4 = g.sources(7)\n e4 = set()\n assert s4 == e4\n","sub_path":"tests/test_topology.py","file_name":"test_topology.py","file_ext":"py","file_size_in_byte":6917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"220399612","text":"import time\nimport dash\nimport dash_table\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output, State\nimport plotly.graph_objs as go\nfrom flask import Flask\nimport sys; sys.path.append('/opt/docker/pymodules')\nimport es_search\nfrom navbar import Navbar\nfrom dateselect import DateSelect\n\napp = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])\napp.title = 'Daily Graph'\nserver = app.server\n\nnavbar = Navbar()\nbody = dbc.Container(\n [\n dbc.Row(\n [\n dbc.Col(\n [\n html.H2(\"Daily Graph\"),\n html.P(\n \"\"\"\\\nThe lifestyle is recorded.\nRecordkeeping uses elasticsearch.\nmade with python and flask.\nIt is based on docker container.\"\"\"\n ),\n ],\n md=3,\n ),\n dbc.Col(\n [\n DateSelect(),\n html.Div(id='date-display-value')\n ]\n ),\n ]\n )\n ],\n className=\"mt-4\",\n)\n\napp.layout = html.Div([navbar, body])\n\n@app.callback(\n Output('date-display-value', 'children'),\n [Input('date-dropdown', 'value')])\ndef display_value(value):\n if value is None:\n df = es_search.result(time.strftime(\"%Y-%m\"))\n else:\n df = es_search.result(value)\n graph = dcc.Graph(\n figure={\n 'data': [go.Scatter(x=df[\"date\"],y=df[\"1\"],name=\"reading\",mode=\"lines+markers\"),\n go.Scatter(x=df[\"date\"],y=df[\"2\"],name=\"exercise\",mode=\"lines+markers\"),\n go.Scatter(x=df[\"date\"],y=df[\"3\"],name=\"coding\",mode=\"lines+markers\"),\n go.Scatter(x=df[\"date\"],y=df[\"4\"],name=\"english\",mode=\"lines+markers\")],\n 'layout':{'xaxis':{'tickformat':'%Y/%m/%d'}}\n }\n ),\n return graph\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","sub_path":"graph/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"74138313","text":"\n\nimport torch\nimport itertools\nfrom . import embeddings\n\n\nclass Vocabulary:\n\n def __init__(self ):\n self.create()\n \n def create(self):\n self.word2index = {}\n self.word2count = {}\n self.index2word = {}\n self.n_words = 0\n self.embeddings = [] \n self.PAD_token = 0 # Used for padding short sentences\n self.SOS_token = 1 # Start-of-sentence token\n self.EOS_token = 2 # End-of-sentence token\n self.UNK_token = 3 # Unkown sentence token\n self.trimmed = False\n\n def addSentence(self, sentence):\n for word in sentence.split(' '):\n self.addWord(word)\n\n def addWord(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n def load_embeddings( self, pathname, type='emb' ):\n if type == 'emb': \n emb = embeddings.load_sentence_embeddings(pathname)\n self.word2index = emb['word2index'] \n self.index2word = emb['index2word'] \n self.word2count = emb['word2count'] \n self.embeddings = emb['embeddings'] \n self.n_words = emb['n_words'] \n self.PAD_token = emb['PAD_token'] \n self.SOS_token = emb['SOS_token'] \n self.EOS_token = emb['EOS_token'] \n self.UNK_token = emb['UNK_token']\n else: \n print('Not embedding load type ...')\n assert(False)\n\n\n # Remove words below a certain count threshold\n def trim(self, min_count):\n if self.trimmed:\n return\n self.trimmed = True\n keep_words = []\n\n for k, v in self.word2count.items():\n if v >= min_count:\n keep_words.append(k)\n\n print('keep_words {} / {} = {:.4f}'.format(\n len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)\n ))\n\n # Reinitialize dictionaries\n self.word2index = {}\n self.word2count = {}\n self.index2word = {self.PAD_token: \"*\", self.SOS_token: \"\", self.EOS_token: \"\", self.UNK_token: \"UUUNKKK\"}\n self.num_words = 4 # Count default tokens\n \n for word in keep_words:\n self.addWord(word)\n \n \ndef trimRareWords(voc, pairs, min_cout=1):\n # Trim words used under the min_cout from the voc\n voc.trim(min_cout) \n # Filter out pairs with trimmed words\n keep_pairs = []\n for pair in pairs:\n input_sentence = pair[0]\n output_sentence = pair[1]\n keep_input = True\n keep_output = True\n # Check input sentence\n for word in input_sentence.split(' '):\n if word not in voc.word2index:\n keep_input = False\n break\n # Check output sentence\n for word in output_sentence.split(' '):\n if word not in voc.word2index:\n keep_output = False\n break\n # Only keep pairs that do not contain trimmed word(s) in their input or output sentence\n if keep_input and keep_output:\n keep_pairs.append(pair)\n\n print(\"Trimmed from {} pairs to {}, {:.4f} of total\".format(len(pairs), len(keep_pairs), len(keep_pairs) / len(pairs)))\n return keep_pairs\n\n\ndef indexesFromSentence(voc, sentence):\n return [voc.word2index.get( word, voc.UNK_token ) for word in sentence.split(' ')] + [ voc.EOS_token ]\n\ndef zeroPadding(l, fillvalue):\n return list(itertools.zip_longest(*l, fillvalue=fillvalue))\n\ndef binaryMatrix(l, value):\n m = []\n for i, seq in enumerate(l):\n m.append([])\n for token in seq:\n if token == value:\n m[i].append(0)\n else:\n m[i].append(1)\n return m\n\n# Returns padded input sequence tensor and lengths\ndef inputVar(l, voc):\n indexes_batch = [indexesFromSentence(voc, sentence) for sentence in l]\n lengths = torch.tensor([len(indexes) for indexes in indexes_batch])\n padList = zeroPadding(indexes_batch, voc.PAD_token)\n padVar = torch.LongTensor(padList)\n return padVar, lengths\n\n\n# Returns padded target sequence tensor, padding mask, and max target length\ndef outputVar(l, voc):\n indexes_batch = [indexesFromSentence(voc, sentence) for sentence in l]\n max_target_len = max([len(indexes) for indexes in indexes_batch])\n padList = zeroPadding(indexes_batch, voc.PAD_token)\n mask = binaryMatrix(padList, voc.PAD_token)\n mask = torch.ByteTensor(mask)\n padVar = torch.LongTensor(padList)\n return padVar, mask, max_target_len\n\n","sub_path":"phrases/model/vocabulary.py","file_name":"vocabulary.py","file_ext":"py","file_size_in_byte":4777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"101632245","text":"import shlex\nimport subprocess\nimport argparse\nimport os\nimport difflib\nimport sys\n\nhtml_begin ='''\n\n\n\n \n teffer\n \n\n\n'''\nhtml_legend = '''\n\n \n \n
    Legends
    \n \n \n \n \n \n
    Colors
     Added 
    Changed
    Deleted
    \n \n \n \n \n \n
    Links
    (f)irst change
    (n)ext change
    (t)op
    \n'''\nhtml_end = '''\n\n\n'''\n\nexpected = \"expected.txt\"\nactual = \"actual.txt\"\n\nhelp_str = '''\nThe argument to this script are all positional, other than the help command.\nIf the first argument is -h, this message will be printed.\nOtherwise, the first argument is always assumed to be the diff output directory.\nAll following arguments are the files to diff against one-another.\n'''\n\noutput = 'diff'\nif not os.path.exists(output):\n os.makedirs(output)\n\nfile_counter = 0\nindex_file = open(os.path.join(output, 'index.html'), \"w\")\nindex_file.write(html_begin)\nindex_file.write('

    teffer diff results

    ')\nindex_file.write('
    ')\n\nif(len(sys.argv) <= 2):\n print(help_str)\n sys.exit()\n\nfiles = sys.argv[2:]\n\ndiff_map = {}\n\nfor i in files:\n for j in files:\n if i != j:\n i_lines = open(i, 'U').readlines()\n j_lines = open(j, 'U').readlines()\n diff = difflib.HtmlDiff().make_table(i_lines, j_lines, i, j)\n\n dfn = str(file_counter) + '.html'\n diff_file = open(os.path.join(output, dfn), \"w\")\n file_counter += 1\n \n diff_file.write(html_begin)\n diff_file.write('

    teffer diff results

    ')\n diff_file.write('

    ' + i + ' -- ' + j + '

    ')\n diff_file.write('
    ')\n diff_file.write(diff)\n diff_file.write('
    ')\n diff_file.write(html_end)\n\n\n lines = max(len(i_lines), len(j_lines)) * 2\n\n difference = 0\n for line in difflib.unified_diff(i_lines, j_lines, fromfile=i, tofile=j):\n if line.startswith('-') or line.startswith('+'):\n difference += 1\n rating = difference / lines\n \n link = '

    ' + i + ' and ' + j + ' ( ' + str(round(rating, 2)) + ' difference)

    '\n \n diff_map[rating] = link\n\nfor key in sorted(diff_map.keys()):\n index_file.write('
    ')\n index_file.write(diff_map[key])\n index_file.write('
    ')\n \nindex_file.write('
    ')\nindex_file.write(html_end)\nindex_file.close()\n\n","sub_path":"pcomp.py","file_name":"pcomp.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"131274245","text":"from django.utils import translation\nfrom django.shortcuts import render, redirect\nfrom django.conf import settings\n\n\ndef index(request):\n if request.LANGUAGE_CODE:\n context = {'LANGUAGE_CODE': request.LANGUAGE_CODE}\n else:\n context = {'LANGUAGE_CODE': request.session[translation.LANGUAGE_SESSION_KEY]}\n return render(request, 'languages/index.html', context)\n\n\ndef set_language_from_url(request, user_language):\n if user_language == 'pt':\n user_language = 'pt-br'\n translation.activate(user_language)\n request.session[translation.LANGUAGE_SESSION_KEY] = user_language\n return redirect('core:home')\n","sub_path":"ocp/languages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"368495496","text":"import AST\nfrom AST import addToClass\n\n'''\nModule contenant l'analyseur sémantique du langage Natural.\nIl permet de construire le flux d'exécution du programme (avec couture).\nAvec ce flux, il permet de faire de l'analyse sémantique sur le programme.\n'''\n\nvariable = dict()\nscopeForId = 0\nscopeForDepth = 0 \n\n\n@addToClass(AST.Node)\ndef thread(self, lastNode):\n for c in self.children:\n lastNode = c.thread(lastNode)\n lastNode.addNext(self)\n return self\n\n@addToClass(AST.AssignNode)\ndef thread(self, lastNode):\n expression = self.children[1]\n identifier = self.children[0]\n if identifier.tok not in variable.keys():\n variable[identifier.tok] = (scopeForId,scopeForDepth)\n lastNode = identifier.thread(lastNode)\n lastNode = expression.thread(lastNode)\n lastNode.addNext(self)\n return self\n\n@addToClass(AST.TokenNode)\ndef thread(self, lastNode):\n # Handling instantiation error (WARNING)\n if not (self.tok in variable.keys() or isinstance(self.tok,(int,float))):\n print(\"WARNING : variable '%s' must be instantiate before utilization\" % self.tok)\n # Handling scope error (WARNING) for 'for' loop\n if self.tok in variable.keys():\n if not ((variable[self.tok][0] == scopeForId or variable[self.tok][0] == 0) and variable[self.tok][1] <= scopeForDepth):\n print(\"WARNING : variable '%s' scope isn't reachable !\" % self.tok)\n lastNode.addNext(self)\n return self\n\n@addToClass(AST.ForNode)\ndef thread(self, lastNode):\n global scopeForId\n global scopeForDepth\n scopeForDepth +=1\n if scopeForDepth==1:\n scopeForId +=1\n beforeCond = lastNode\n exitIterator = self.children[0].thread(lastNode)\n exitCond = self.children[1].thread(exitIterator)\n exitCond.addNext(self)\n exitBody = self.children[2].thread(self)\n exitBody.addNext(beforeCond.next[-1])\n scopeForDepth-=1\n return self\n\n@addToClass(AST.IfNode)\ndef thread(self, lastNode):\n condition = self.children[0].thread(lastNode)\n condition.addNext(self)\n prog1 = self.children[1].thread(self)\n prog1.addNext(self)\n prog2 = self.children[2].thread(self)\n prog2.addNext(self)\n return self\n\n\n@addToClass(AST.OpNode)\ndef thread(self, lastNode):\n expression1 = self.children[0]\n expression2 = self.children[1]\n\n lastNode = expression1.thread(lastNode)\n lastNode = expression2.thread(lastNode)\n # Handling division by zero (WARINING)\n if self.op == \"divise par\" and lastNode.type=='token' and lastNode.tok==0:\n print(\"WARNING : division by zero !\")\n lastNode.addNext(self)\n return self\n\ndef thread(tree):\n entry = AST.EntryNode()\n tree.thread(entry)\n return entry\n\n\n\nif __name__ == \"__main__\":\n from naturalParser import parse\n import sys, os\n prog = open(sys.argv[1]).read()\n ast = parse(prog)\n entry = thread(ast)\n\n graph = ast.makegraphicaltree()\n entry.threadTree(graph)\n \n name = os.path.splitext(sys.argv[1])[0]+'-ast-threaded.pdf'\n graph.write_pdf(name) \n \n print (\"wrote threaded ast to\", name) ","sub_path":"threader.py","file_name":"threader.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"136410839","text":"import sys\r\nfrom PyQt4.QtGui import QSystemTrayIcon,QWidget,QMenu,QIcon,QApplication\r\nfrom PyQt4.QtCore import QTimer,QThread\r\nfrom window import MainWindow\r\n\r\ntry:\r\n from envimkey import EnVimKey\r\nexcept ImportError:\r\n print(\"import error\")\r\n sys.exit()\r\n\r\nclass SystemTrayIcon(QSystemTrayIcon):\r\n\r\n def __init__(self,envimkey, icon, parent=None):\r\n QSystemTrayIcon.__init__(self, icon, parent)\r\n self.envimkey = envimkey\r\n self.parent = parent\r\n menu = QMenu(parent)\r\n exit_action = menu.addAction(\"Exit\")\r\n exit_action.triggered.connect(parent.close)\r\n envimkey.about_to_quit.connect(parent.close)\r\n # envimkey.about_to_quit.connect(self.close_test)\r\n self.setContextMenu(menu)\r\n self.message_icon = QSystemTrayIcon.MessageIcon(1)\r\n\r\n def show_balloon(self, title, message, show_time):\r\n self.showMessage(title, message, self.message_icon, show_time)\r\n\r\n def close_test(self):\r\n print(\"test\")\r\n self.parent().close()\r\n\r\ndef main():\r\n\r\n app = QApplication(sys.argv)\r\n\r\n envimkey=EnVimKey()\r\n systemtray_widget = QWidget()\r\n systemtray_icon = SystemTrayIcon(envimkey, QIcon(\"hidebox_close.png\"), systemtray_widget)\r\n envimkey.changed_mode[str, str, int].connect(systemtray_icon.show_balloon)\r\n\r\n main_window = MainWindow()\r\n # main_window_thread = QThread()\r\n # main_window_thread.finished.connect(main_window.deleteLater)\r\n # main_window.moveToThread(main_window_thread)\r\n envimkey.about_to_quit.connect(main_window.close)\r\n envimkey.about_to_search[object].connect(main_window.test)\r\n # main_window_thread.start()\r\n\r\n envimkey_thread = QThread()\r\n envimkey_thread.finished.connect(envimkey.deleteLater)\r\n envimkey.moveToThread(envimkey_thread)\r\n envimkey_thread.start()\r\n\r\n # QTのループが始まるときにon_startが呼ばれるようにする\r\n QTimer.singleShot(0, envimkey.keylock)\r\n # 終了する直前にon_stopを呼ぶようにする\r\n app.aboutToQuit.connect(envimkey.key_unlock)\r\n app.aboutToQuit.connect(main_window.deleteLater)\r\n\r\n # システムトレイに表示する\r\n systemtray_icon.show()\r\n sys.exit(app.exec_())\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"systemtrayapp.py","file_name":"systemtrayapp.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"643513975","text":"\"\"\"Add Position\n\nRevision ID: 4ee2d02f091\nRevises: 4ad3f96f41b\nCreate Date: 2014-08-26 17:52:03.089830\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4ee2d02f091'\ndown_revision = '4ad3f96f41b'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('positions',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('x', sa.Float(), nullable=False),\n sa.Column('y', sa.Float(), nullable=False),\n sa.Column('music_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['music_id'], ['musics.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('positions')\n ### end Alembic commands ###\n","sub_path":"tento/migrations/versions/4ee2d02f091_add_position.py","file_name":"4ee2d02f091_add_position.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"10260726","text":"def oddTup(aTup):\r\n new_Tup=()\r\n for i in range(0,len(aTup)+1,2):\r\n new_Tup=new_Tup + (aTup[i],)\r\n return new_Tup\r\n\r\ndef main():\r\n x=(\"I\",\"am\",\"a\",\"student\",\"at\",\"Politechnika\",\"Warszawska\")\r\n print(oddTup(x))\r\n\r\nif __name__==\"__main__\":\r\n main()","sub_path":"mit_week3.py","file_name":"mit_week3.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"417051293","text":"import frappe\nfrom community.www.courses.utils import redirect_if_not_a_member, get_course, get_instructor, get_batch\n\ndef get_context(context):\n context.no_cache = 1\n context.course_slug = frappe.form_dict[\"course\"]\n context.course = get_course(context.course_slug)\n context.batch_code = frappe.form_dict[\"batch\"]\n redirect_if_not_a_member(context.course_slug, context.batch_code)\n\n context.instructor = get_instructor(context.course.owner)\n context.batch = get_batch(context.batch_code)\n context.mentors = get_mentors(context.batch.name)\n print(context.mentors)\n\ndef get_mentors(batch):\n mentors = []\n memberships = frappe.get_all(\"LMS Batch Membership\", {\"batch\": batch, \"member_type\": \"Mentor\"}, [\"member\"])\n for membership in memberships:\n member = frappe.db.get_value(\"Community Member\", membership.member, [\"name\",\"full_name\"], as_dict=True)\n member.batch_count = len(frappe.get_all(\"LMS Batch Membership\", {\"member\": member.name, \"member_type\": \"Mentor\"}))\n mentors.append(member)\n return mentors","sub_path":"community/www/courses/about/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"445249192","text":"import googlemaps\nimport pygmaps\n\nfrom googlemaps import convert\nfrom googlemaps.roads import _roads_extract\nfrom JavaConnection import jarWrapper\n\n_ROADS_BASE_URL = \"https://roads.googleapis.com\"\nkey = \"AIzaSyBRJ7VFUc_vxT0FyCJNDpX9rrKN0i6QnKk\"\nclient = googlemaps.Client(key)\n\npathList = []\n\nDate = \"2015-03-12\"\nTime = \"10\"\nID = \"14100064\"\n\nargs = [\"C:\\Dataconnectie-1-jar-with-dependencies.jar\", \"positions\", Date, Time, ID] #connections, positions, events, monitoring\nresult = jarWrapper(*args)\n\nif(result.__len__() == 0):\n print(\"No Result\")\nelse:\n for i in range(result.__len__()):\n Longitude = float(result[i][\"Longitude\"].encode('ascii', 'ignore'))\n Latitude = float(result[i][\"Lattitude\"].encode('ascii', 'ignore'))\n Locations = (Latitude, Longitude)\n pathList.append(Locations)\n print (pathList[i])\n\n print (pathList)\n\n def snap_to_roads(client, path, interpolate=False):\n\n params = {\"path\": convert.location_list(path)}\n\n if interpolate:\n params[\"interpolate\"] = \"true\"\n\n return client._get(\"/v1/snapToRoads\", params,\n base_url=_ROADS_BASE_URL,\n accepts_clientid=False,\n extract_body=_roads_extract)[\"snappedPoints\"]\n\n\n iets = snap_to_roads(client, pathList)\n print (iets)\n\n mymap = pygmaps.maps(51.4333, 5.4833, 10)\n\n for i in xrange(len(iets)):\n x = iets[i]['location']['latitude']\n y = iets[i]['location']['longitude']\n print(x)\n print(y)\n mymap.addpoint(x, y)\n\n mymap.draw('RoadsAPI.html')\n","sub_path":"Python/GoogleMapsRoadAPI.py","file_name":"GoogleMapsRoadAPI.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"255686778","text":"import numpy\nfrom LinkedListQueue import queue\nclass Graph:\n def __init__(self, vertices):\n self._vertices = vertices\n self._adjmatrix = numpy.zeros((vertices, vertices))\n self._size = 0\n def addEdge(self, u, v, w = 1):\n self._adjmatrix[u][v] = w\n def deleteEdge(self, u, v):\n self._adjmatrix[u][v] = 0\n def bfs(self, source):\n i = source\n q = queue()\n q.push(source)\n l = [0]*self._vertices\n l[i] = 1\n print(i, end= '->')\n while not q.is_empty():\n i = q.pop()\n for j in range(self._vertices):\n if self._adjmatrix[i][j] == 1 and l[j] == 0:\n print(j, end='->')\n q.push(j)\n l[j] = 1\ng = Graph(7)\ng.addEdge(0, 1)\ng.addEdge(0, 5)\ng.addEdge(0, 4)\ng.addEdge(0, 2)\ng.addEdge(2, 5)\ng.addEdge(2, 3)\ng.addEdge(6, 3)\ng.addEdge(1, 3)\ng.addEdge(6, 4)\ng.addEdge(1, 6)\nprint(g._adjmatrix)\ng.bfs(0)","sub_path":"Python/BFS.py","file_name":"BFS.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"552916648","text":"# coding=utf-8\n\"\"\"Tools to analyze tweets.\"\"\"\nimport itertools\nimport multiprocessing\nimport unicodedata\nfrom typing import Callable, Dict, Iterable, Iterator, List, Optional, Tuple\n\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.tokenize import sent_tokenize\nfrom nltk.tokenize.casual import TweetTokenizer\nfrom nltk.util import ngrams\n\nfrom tp.db import count, read\n\n\ndef count_ngrams_in_tweets(\n jobs: Optional[int],\n *,\n ngram_len: int,\n party: Optional[str],\n reporter: Optional[Callable] = None) -> Dict[Tuple[str, ...], int]:\n \"\"\"Find the top ngrams in the corpus of tweets.\n\n :param jobs: The number of processes to spawn. If ``None``, spawn one per\n CPU.\n :param ngram_len: The length of ngrams.\n :param party: The party whose tweets should be analyze. If ``None``, all\n tweets are analyzed.\n :param reporter: A function that reports progress to the user. Must accept\n one argument, where that argument is a multiprocessing ``Connection``\n object. Values from 0 to 1, inclusive, will be sent through the\n connection.\n :return: A dict mapping each ngram to the number of times it appears within\n the corpus of tweets currently in the database.\n \"\"\"\n totals: Dict[Tuple[str, ...], int] = {}\n with multiprocessing.Pool(jobs) as pool:\n # chunksize chosen empirically with an R7 1700 CPU\n for ngram_counts in pool.imap_unordered(\n func=call_cnit,\n iterable=gen_cnit_args(ngram_len, party, reporter),\n chunksize=2**5):\n for ngram, ncount in ngram_counts.items():\n totals.setdefault(ngram, 0)\n totals[ngram] += ncount\n return totals\n\n\ndef call_cnit(args):\n \"\"\"Call :meth:`tp.analyze.count_ngrams_in_tweet`.\"\"\"\n return count_ngrams_in_tweet(*args)\n\n\ndef gen_cnit_args(\n ngram_len: int,\n party: Optional[str],\n reporter: Optional[Callable] = None) -> Iterator[Tuple[str, int]]:\n \"\"\"Generate arguments for :meth:`tp.analyze.count_ngrams_in_tweet`.\n\n :param ngram_len: Passed through to\n :meth:`tp.analyze.count_ngrams_in_tweet`.\n :param party: If specified, only yield tweets from the given party, instead\n of all tweets.\n :param reporter: A function that reports progress to the user. Must accept\n one argument, where that argument is a multiprocessing ``Connection``\n object. Values from 0 to 1, inclusive, will be sent through the\n connection.\n :return: A generator that yields tuples of arguments.\n \"\"\"\n if reporter:\n num_tweets: int = count.tweets(party)\n tweets_yielded: int = 0\n\n conn_out: multiprocessing.connection.Connection\n conn_in: multiprocessing.connection.Connection\n conn_out, conn_in = multiprocessing.Pipe(duplex=False)\n proc: multiprocessing.Process = (\n multiprocessing.Process(target=reporter, args=(conn_out,))\n )\n proc.start()\n\n for tweet in read.tweets(party):\n yield (tweet, ngram_len)\n\n if reporter:\n tweets_yielded += 1\n if tweets_yielded % 2**8 == 0:\n conn_in.send(tweets_yielded / num_tweets)\n\n if reporter:\n conn_in.send(1)\n conn_in.close()\n proc.join()\n\n\ndef count_ngrams_in_tweet(\n tweet: str,\n ngram_len: int) -> Dict[Tuple[str, ...], int]:\n \"\"\"Find ngrams within a tweet.\n\n :param tweet: The tweet to analyze.\n :return: A dict mapping each ngram to the number of times it appears within\n the given tweet.\n \"\"\"\n stemmer = SnowballStemmer('english')\n tweet_tokenizer = TweetTokenizer()\n ngram_counts: Dict[Tuple[str, ...], int] = {}\n for sentence in sent_tokenize(tweet):\n words: Iterable[str] = tweet_tokenizer.tokenize(sentence)\n no_punct = itertools.filterfalse(punctuation, words)\n # The stemmer lower-cases words.\n normalized_words: Iterator[str] = (stemmer.stem(word) for word in no_punct)\n ngram: List[str]\n for ngram in ngrams(normalized_words, n=ngram_len):\n immutable_ngram: Tuple[str, ...] = tuple(ngram)\n ngram_counts.setdefault(immutable_ngram, 0)\n ngram_counts[immutable_ngram] += 1\n return ngram_counts\n\n\ndef punctuation(string: str) -> bool:\n \"\"\"Tell whether the given string consists entirely of punctuation.\"\"\"\n # If this function is too slow, consider switching to the third-party regex\n # module and using its ability to match strings based on unicde attributes.\n for char in string:\n if not unicodedata.category(char).startswith('P'):\n return False\n return True\n","sub_path":"python/tweet-phraseologist/tp/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"188092547","text":"# 两个数的交集 II\r\n# https://leetcode-cn.com/problems/intersection-of-two-arrays-ii/\r\n\r\nclass Solution:\r\n def intersect(self, nums1, nums2):\r\n \"\"\"\r\n :type nums1: List[int]\r\n :type nums2: List[int]\r\n :rtype: List[int]\r\n \"\"\"\r\n \r\n num = []\r\n for i in nums1:\r\n if i in nums2 and len(nums2)>0:\r\n num.append(i)\r\n nums2.remove(i)\r\n \r\n return num\r\n","sub_path":"LeetCode/0350. Intersection of Two Arrays II.py","file_name":"0350. Intersection of Two Arrays II.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"134292501","text":"\"\"\"\nTrain a machine learning model on Ecobici trips data for trips between 2014-01-01 AND 2016-07-31\n\nThe aim is to predict bicycle trip duration in seconds based on the following features:\n\n- Gender\n- Age\n- Weekday vs. Weekend\n- Hour of Day\n- Month\n- Start location based on hexagonal grid\n\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import SGDRegressor\nfrom sklearn.metrics import r2_score, median_absolute_error, mean_squared_error, mean_absolute_error\nfrom sklearn.externals import joblib\nimport logging\nimport argparse\n\n\ndef regression_report(y_true, y_pred):\n report = \"\"\n report += \"R2 Score:\\t\\t{}\".format(r2_score(y_true, y_pred))\n report += \"\\n\\n\"\n report += \"median_absolute_error:\\t\\t{}\".format(\n median_absolute_error(y_true, y_pred))\n report += \"\\n\\n\"\n report += \"mean_squared_error:\\t\\t{}\".format(\n mean_squared_error(y_true, y_pred))\n report += \"\\n\\n\"\n report += \"mean_absolute_error:\\t\\t{}\".format(\n mean_absolute_error(y_true, y_pred))\n report += \"\\n\\n\"\n return report\n\nif __name__ == \"__main__\":\n\n logging.basicConfig(filename=\"output/fit_model.log\", level=logging.INFO)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-l\", \"--loss\", help=\"Loss function\")\n parser.add_argument(\n \"-p\", \"--penalty\", help=\"Penalty\")\n parser.add_argument(\n \"-e\", \"--eta\", help=\"Eta0\")\n parser.add_argument(\n \"-c\", \"--chunksize\", help=\"Chunksize for processing training data\")\n args = parser.parse_args()\n\n loss = str(args.loss)\n penalty = str(args.penalty)\n eta = float(args.eta)\n chunksize = int(args.chunksize)\n\n epsilon = 60\n num_iterations = 1\n random_state = 1\n\n clf = SGDRegressor(\n loss=loss, penalty=penalty, n_iter=num_iterations, epsilon=epsilon, random_state=random_state, eta0=eta)\n\n # Iterate over the training data\n i = 1\n for chunk in pd.read_hdf('output/train.h5', chunksize=chunksize):\n logging.info(\"Processing chunk {}\".format(i))\n chunk = chunk.drop('Unnamed: 0', axis=1)\n cols = chunk.columns.tolist()\n cols.remove('duration')\n y = np.ravel(chunk['duration'])\n chunk = chunk.drop('duration', axis=1)\n clf.partial_fit(chunk, y)\n i += 1\n\n logging.info(\"Starting test run\")\n i = 1\n # Make some predictions also in chunks\n for chunk in pd.read_hdf('output/test.h5', chunksize=chunksize):\n logging.info(\"Processing chunk {}\".format(i))\n chunk = chunk.drop('Unnamed: 0', axis=1)\n cols = chunk.columns.tolist()\n cols.remove('duration')\n y = np.ravel(chunk['duration'])\n chunk = chunk.drop('duration', axis=1)\n\n y_pred = clf.predict(chunk)\n df = pd.DataFrame(np.array([y, y_pred]).T,\n columns=['True', 'Predicted'])\n df.to_hdf('output/predicted.h5', 'predicted', append=True, format='t')\n i += 1\n\n logging.info(\"Save model to disk:\\n\")\n joblib.dump(clf, 'models/regressor/regressor.pkl') \n logging.info(\"Detailed Regression report:\\n\")\n df = pd.read_hdf('output/predicted.h5')\n logging.info(regression_report(df['True'], df['Predicted']))\n \n","sub_path":"fit_regression_model.py","file_name":"fit_regression_model.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"120189116","text":"import csv\n\ndef main():\n data = []\n courseCode = []\n courseCredit = []\n courseGrade = []\n with open(\"dataIO.txt\") as f:\n reader = csv.reader(f,delimiter=\",\")\n for row in reader:\n data.append(row)\n courseCode.append(row[2])\n courseCredit.append(row[3])\n courseGrade.append(row[4])\n length=len(data)\n counter = 0\n #student1\n courseCodeGroup=[]\n courseCreditGroup = []\n courseGradeGroup=[]\n # student2\n courseCodeGroup2 = []\n courseCreditGroup2 = []\n courseGradeGroup2 = []\n # student3\n courseCodeGroup3 = []\n courseCreditGroup3 = []\n courseGradeGroup3 = []\n for row in data:\n if(counter<3):\n courseCodeGroup.append(courseCode[counter])\n courseCreditGroup.append(courseCredit[counter])\n courseGradeGroup.append(courseGrade[counter])\n elif(counter<7):\n courseCodeGroup2.append(courseCode[counter])\n courseCreditGroup2.append(courseCredit[counter])\n courseGradeGroup2.append(courseGrade[counter])\n else:\n courseCodeGroup3.append(courseCode[counter])\n courseCreditGroup3.append(courseCredit[counter])\n courseGradeGroup3.append(courseGrade[counter])\n\n counter=counter+1\n\n printReport(\"BOKOW, R.\", \"2333021\", courseCodeGroup, courseCreditGroup, courseGradeGroup)\n printReport(\"FALLIN, D.\", \"2574063\", courseCodeGroup2, courseCreditGroup2, courseGradeGroup2)\n printReport(\"KINGSLEY, M.\", \"2663628\", courseCodeGroup3, courseCreditGroup3, courseGradeGroup3)\n\n\ndef getGradeNumber(grade,strCredit):\n credit=int(strCredit)\n if grade=='A':\n return 4*credit\n elif grade=='B':\n return 3*credit\n elif grade == 'C':\n return 2*credit\n elif grade == 'D':\n return 1*credit\n else:\n return 0\n\ndef printReport(name,id,courseArray,creditArray,gradeArray):\n report = open(name+\".txt\", \"w\")\n report.write(\"Student Name:\" + name+\"\\n\")\n report.write(\"Student ID Number: \"+id+\"\\n\")\n report.write(\"Course Code \\t Course Credits \\t Course Grade\\n\")\n counter=0\n creditArrayINT=[]\n gradeArrayINT=[]\n for x in courseArray:\n report.write(x+\" \\t\\t\\t \"+creditArray[counter]+\" \\t\\t\\t\\t\\t \"+gradeArray[counter]+\"\\n\")\n creditArrayINT.append(int(creditArray[counter]))\n gradeArrayINT.append(getGradeNumber(gradeArray[counter],creditArray[counter]))\n counter=counter+1\n #print(\"Total Semester course credits completed: \"+str(sum(creditArrayINT)))\n gpa=str(round(sum(gradeArrayINT)/sum(creditArrayINT),1))\n report.write(\"Total Semester Course Credits Compled: \"+ str(sum(creditArrayINT))+\"\\n\")\n report.write(\"Semester GPA:\"+ gpa+\"\\n\")\n\nmain()\n","sub_path":"HW4/gpaprint.py","file_name":"gpaprint.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"145662587","text":"M,N,K = map(int,input().split(\" \"))\nboard = [[0 for i in range(N)] for j in range(M)]\nfor i in range(K) :\n x,y,x2,y2 = map(int,input().split(\" \"))\n for j in range(x,x2) :\n for k in range(y,y2) :\n board[k][j] = 1\n# print(M,N,K)\n# print(board)\n\n\n# M,N,K = 5,7,3\n# board = [[0, 0, 0, 0, 1, 1, 0], [0, 1, 0, 0, 1, 1, 0], [1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0]]\n\n\n# for j in range(M) :\n# for i in range(N) :\n# if(board[j][i] == 0) :\n# q.append((i,j))\n\ndx = [0,0,-1,1]\ndy = [1,-1,0,0]\n\n# bfs\ndef bfs(q) :\n count = 0\n\n while(q) :\n ex,ey = q.pop(0)\n if (board[ey][ex] == 0):\n count +=1\n board[ey][ex] = 1\n\n for i in range(4) :\n nx = ex+dx[i]\n ny = ey+dy[i]\n if(nx>=0 and nx=0 and ny 100:\n raise ValueError('score must between 0 ~ 100!')\n self._score = value\n\ns = Student()\ns.set_score(60) # ok!\nprint (s.get_score())\n# 60 \n# s.set_score(9999) # score must between 0 ~ 100!\n\n\n\n# 案例2\n# 利用装饰器\n# @property 装饰器 把 score 方法变成了属性,同时创建了 @score.setter 装饰器\n# @score.setter 装饰器 负责把方法 变成属性赋值时调用\nclass Studen(object):\n\n @property # 装饰器作用: 读取时 使用这个函数\n def score(self):\n return self._score\n\n @score.setter # 装饰器作用: 写入时 用这个函数\n def score(self, value):\n if not isinstance(value, int):\n raise ValueError('score must be an integer!')\n if value < 0 or value > 100:\n raise ValueError('score must between 0 ~ 100!')\n self._score = value\n\n\ns = Studen()\ns.score = 66 # OK,实际转化为s.set_score(60)\nprint(s.score) # OK,实际转化为s.get_score()\n# 60\n# s.score = 9999\n\n# @property 装饰器 还可以定义只读属性 只使用 @property 而不使用 @score.setter\n\n# 下面的birth是可读写属性,而age就是一个只读属性,因为age可以根据birth和当前时间计算出来。\nclass Stude(object):\n\n @property\n def birth(self):\n return self._birth\n\n @birth.setter\n def birth(self, value):\n self._birth = value\n\n @property\n def age(self):\n return 2015 - self._birth\n\ns = Stude()\ns.birth = 1990\n# s.age = 35 # AttributeError: can't set attribute 这个属性不可写。只能读\n\nprint(\"birth:\",s.birth)\nprint(\"age:\",s.age)\n\n","sub_path":"oop/demo2/demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"150203017","text":"#!/user/bin/env python\n\n# THIS IS THE ARDUINO SERVER - MINIMAL CHANGES: DIFFERENT SQLITE FILE, DIFFERENT PYGMENTS LEXER\n\n# Expose a json-rpc service that stores into a sqlite db and queries from it\n#\n# bjoern 6/27/09 foo boo\n# @see http://json-rpc.org/wiki/python-json-rpc\n\nfrom jsonrpc import handleCGI, ServiceMethod\nfrom pysqlite2 import dbapi2 as sqlite\nimport difflib\nfrom pygments import highlight\nfrom pygments.lexers import CppLexer\nfrom tokenlineformatter import TokenLineFormatter\nimport re\n\nclass HelpMeOutService(object):\n def connect(self):\n #todo: added fields here\n con = sqlite.connect(\"/project/helpmeout-server/db/helpmeout-arduino.sqlite\") #ARUDINO - use different db\n con.execute('create table if not exists compilererrors(id INTEGER PRIMARY KEY,timestamp,errmsg,diff,votes INTEGER DEFAULT 0)')\n con.execute('CREATE TABLE if not exists exceptions(id INTEGER PRIMARY KEY,timestamp VARCHAR,errmsg VARCHAR,line VARCHAR,stacktrace VARCHAR,diff VARCHAR, votes INTEGER DEFAULT 0)')\n con.execute('CREATE TABLE if not exists querylog(id INTEGER PRIMARY KEY, timestamp VARCHAR, type INTEGER, status INTEGER, error VARCHAR, code VARCHAR, stacktrace VARCHAR)')\n con.execute('CREATE TABLE if not exists comments(id INTEGER PRIMARY KEY, timestamp VARCHAR, type INTEGER, fix_id INTEGER, comment VARCHAR)')\n con.execute('CREATE TABLE if not exists fixidlog(id INTEGER PRIMARY KEY, log_id INTEGER, fix_id INTEGER)')\n con.execute('CREATE TABLE if not exists idelog(id INTEGER PRIMARY KEY, timestamp VARCHAR, log VARCHAR)')\n return con\n \n def find_best_exception_match(self,code,querytrace,arr,num_results=3) :\n htmlDiff = difflib.HtmlDiff()\n ranked_results = []\n #clean up querytrace\n lines = querytrace.splitlines(1)\n good_lines = [l for l in lines if (l.strip().startswith(\"at\") and not l.strip().startswith(\"at bsh.\"))]\n cleaned_querytrace = ''.join(good_lines)\n \n for einfo in arr:\n fixid = einfo[0]\n trace = einfo[1]\n diff = einfo[2]\n all_lines = diff.splitlines(1)\n old_lines = [l[2:] for l in all_lines if l.startswith('-')]\n new_lines = [l[2:] for l in all_lines if l.startswith('+')]\n votes = einfo[3]\n comment = einfo[4]\n #clean fix trace\n lines = trace.splitlines(1)\n good_lines = [l for l in lines if (l.strip().startswith(\"at\") and not l.strip().startswith(\"at bsh.\"))]\n cleaned_trace = ''.join(good_lines)\n # calculate similarity between cleaned stacktraces\n s = difflib.SequenceMatcher(None,cleaned_querytrace,cleaned_trace)\n ratio = s.ratio()\n \n # generate a html diff report\n file1 = difflib.restore(all_lines,1)\n file2 = difflib.restore(all_lines,2)\n # use more context for exceptions\n table = htmlDiff.make_table(file1,file2,\"Before (Broken)\",\"After (Fixed)\",context=True,numlines=1)\n\n # hack: for now make sure everything we return is a list of strings\n ranked_results.append((ratio,votes,{'id':[str(fixid),''],'old':old_lines,'new':new_lines,'table':[table,''],'comment':[comment,'']}))\n \n #sort by decreasing similarity ranking \n ranked_results.sort(reverse=True)\n \n # take voting into account\n vote_ranked_results = [r[1:] for r in ranked_results[0:(2*num_results)]]\n vote_ranked_results.sort(reverse=True)\n\n #now return up to top num_results as [{'new':[new_lines1],'old':[old_lines1]),...]\n return [r[1] for r in vote_ranked_results[0:num_results]] # how many?\n \n # Find the best example fixes in our database for the error \n # assume error is a single line string, \n # assume diffs is an array of (id,multiline string,votes) tuples\n def find_best_source_match(self,error_line,diffs,num_results=3):\n htmlDiff = difflib.HtmlDiff()\n \n ranked_diffs = []\n for diff in diffs:\n fixid = diff[0]\n votes = diff[2]\n comment = diff[3]\n max_sim = 0.0 #maximum similarity metric found so far\n all_lines = diff[1].splitlines(1)\n \n # split the diff report into lines unique to broken, fixed source\n # [2:] omits the \"- \" and \"+ \" of the diff format.\n old_lines = [l[2:] for l in all_lines if l.startswith('-')]\n new_lines = [l[2:] for l in all_lines if l.startswith('+')]\n \n # within broken old lines, find line with max similarity to error_line argument\n # version 1: ofperate directly on source text\n #for old_line in old_lines:\n # s = difflib.SequenceMatcher(None,error_line,old_line)\n # ratio = s.ratio() # calculate edit-distance based metric, [0.0-1.0]\n # if ratio > max_sim:\n # max_sim = ratio # new maximum-similarity line\n \n #version2: tokenized distance metric\n old_string = ''.join([l for l in old_lines]) #zip lines back up\n #tokenize both \n token_error_line = highlight(error_line, CppLexer(), TokenLineFormatter(\"arduino\")) #ARDUNIO\n token_old_string = highlight(old_string, CppLexer(), TokenLineFormatter(\"arduino\")) #ARDUINO\n for line in token_old_string.splitlines(1):\n s = difflib.SequenceMatcher(None,token_error_line,line)\n ratio = s.ratio()\n if ratio > max_sim:\n max_sim = ratio\n \n # generate a html diff report\n file1 = difflib.restore(all_lines,1)\n file2 = difflib.restore(all_lines,2)\n table = htmlDiff.make_table(file1,file2,\"Before (Broken)\",\"After (Fixed)\",context=True,numlines=0)\n \n # hack: for now make sure everything we return is a list of strings\n ranked_diffs.append((max_sim,votes,{'id':[str(fixid),''],'old':old_lines,'new':new_lines,'table':[table,''],'comment':[comment,'']}))\n \n #sort by decreasing similarity ranking \n ranked_diffs.sort(reverse=True)\n \n #how should we take voting into account?\n #take top 2N results (if we're returning n) and sort by votes\n vote_ranked_diffs = [r[1:] for r in ranked_diffs[0:(2*num_results)]]\n vote_ranked_diffs.sort(reverse=True)\n \n #now return up to top num_results as [{'new':[new_lines1],'old':[old_lines1]),...]\n return [r[1] for r in vote_ranked_diffs[0:num_results]] # how many?\n \n @ServiceMethod\n def echo(self,msg):\n return msg\n \n # store an IDE log into the db\n @ServiceMethod\n def storeidelog(self,log):\n con = self.connect()\n cur = con.cursor()\n cur.execute(\"insert into idelog values (null,datetime('now','localtime'),?)\",(log,))\n con.commit()\n return \"Stored log in db\"\n \n # store a record into the db\n @ServiceMethod\n def store(self,error,diff):\n con = self.connect()\n cur = con.cursor()\n cur.execute(\"insert into compilererrors values (null,datetime('now','localtime'),?,?,0)\",(error,diff))\n con.commit()\n return \"Stored error in db\"\n \n @ServiceMethod\n def storeexception(self,error,line,stacktrace,file1,file2):\n con = self.connect()\n cur = con.cursor()\n diff_obj = difflib.ndiff(file1.splitlines(1),file2.splitlines(1))\n diff_str = ''.join(diff_obj)\n cur.execute(\"insert into exceptions values (null,datetime('now','localtime'),?,?,?,?,0)\",(error,line,stacktrace,diff_str))\n con.commit()\n return \"Stored error into table exceptions\"\n \n # transmit string and all of both files\n # easier to call from java so we can generate right diff format in python\n @ServiceMethod\n def store2(self,error,file1,file2):\n diff_obj = difflib.ndiff(file1.splitlines(1),file2.splitlines(1))\n diff_str = ''.join(diff_obj)\n return self.store(error,diff_str)\n \n # vote for a fix - either up or down\n @ServiceMethod\n def errorvote(self,id,vote):\n con = self.connect()\n cur = con.cursor()\n res = cur.execute(\"update compilererrors set votes = (select votes from compilererrors where id = :id)+(:vote) where id=:id\",locals())\n con.commit()\n return \"Updated vote\"\n\n # vote for a fix - either up or down\n @ServiceMethod\n def errorvoteexception(self,id,vote):\n con = self.connect()\n cur = con.cursor()\n res = cur.execute(\"update exceptions set votes = (select votes from exceptions where id = :id)+(:vote) where id=:id\",locals())\n con.commit()\n return \"Updated vote\"\n \n #query the database\n #error comes in pre-cleaned in java - this did not work: cleaned_error = re.sub(u'[\\u201c].*?[\\u201d]','%',error)\n @ServiceMethod\n def query(self,error,code):\n con = self.connect()\n cur = con.cursor()\n # note: assumption is that there is at most one comment for each error.\n # otherwise we should group_concat(comment) but our sqlite3 version doesn't support it yet\n res = con.execute(\"SELECT compilererrors.id,diff,votes,comment FROM compilererrors LEFT JOIN comments ON compilererrors.id=comments.fix_id WHERE (comments.type IS NULL OR comments.type=0) AND errmsg LIKE ?\",(error,))\n if res==None:\n cur.execute(\"insert into querylog values (null, datetime('now','localtime'), 0, 2, ?, ?, '')\",(error,code))\n con.commit()\n return 'ERROR'\n arr = [d for d in res] # d[0] has id, d[1] has diff, d[2] the votes, d[3] the comment or None\n if len(arr)==0:\n cur.execute(\"insert into querylog values (null, datetime('now','localtime'), 0, 1, ?, ?, '')\",(error,code))\n con.commit()\n return 'NO_RESULT'\n # we have at least one useful result:\n # go over returned results one by one and check distance\n best = self.find_best_source_match(code,arr)\n best_ids = [int(r['id'][0]) for r in best]\n cur.execute(\"insert into querylog values (null, datetime('now','localtime'), 0, 0, ?, ?, '')\",(error,code))\n res = con.execute(\"select last_insert_rowid() from querylog\")\n last_query_log_id = [r for r in res][0][0]\n for id in best_ids:\n cur.execute(\"insert into fixidlog values(null, ?, ?)\",(last_query_log_id,id))\n con.commit()\n return best\n \n # query the database for matching exceptions\n @ServiceMethod\n def queryexception(self,error,code,stacktrace):\n con = self.connect()\n cur = con.cursor()\n res = con.execute(\"SELECT exceptions.id,stacktrace,diff,votes,comment FROM exceptions LEFT JOIN comments on exceptions.id=comments.fix_id WHERE (comments.type IS NULL OR comments.type=1) AND errmsg LIKE ?\",(error,))\n if res==None:\n cur.execute(\"insert into querylog values(null, datetime('now','localtime'), 1, 2, ?, ?, ?)\",(error,code,stacktrace))\n con.commit()\n return 'ERROR'\n arr = [d for d in res] # d[0] has id, d[1] has stacktrace, d[2] the diff, d[3] the votes, d[4] the comment\n if len(arr)==0:\n cur.execute(\"insert into querylog values(null, datetime('now','localtime'), 1, 1, ?, ?, ?)\",(error,code,stacktrace))\n con.commit()\n return 'NO_RESULT'\n best = self.find_best_exception_match(code,stacktrace,arr)\n best_ids = [int(r['id'][0]) for r in best]\n cur.execute(\"insert into querylog values (null, datetime('now','localtime'), 1, 0, ?, ?, ?)\",(error,code,stacktrace))\n res = con.execute(\"select last_insert_rowid() from querylog\")\n last_query_log_id = [r for r in res][0][0]\n for id in best_ids:\n cur.execute(\"insert into fixidlog values (null, ?, ?)\",(last_query_log_id,id))\n con.commit()\n return best\n\n #dump the entire database out\n @ServiceMethod\n def dumpdb(self):\n con = self.connect()\n res = con.execute(\"select * from compilererrors\")\n if res==None:\n return 'ERROR'\n return [d for d in res]\n \n \nservice = HelpMeOutService()\n","sub_path":"helpmeout-server/trunk/server-arduino-01.py","file_name":"server-arduino-01.py","file_ext":"py","file_size_in_byte":12368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"434333033","text":"import gym\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass NormalizedActions(gym.ActionWrapper):\n def _action(self, action):\n low = self.action_space.low\n high = self.action_space.high\n\n action = low + (action + 1.0) * 0.5 * (high - low)\n action = np.clip(action, low, high)\n\n return action\n\n def _reverse_action(self, action):\n low = self.action_space.low\n high = self.action_space.high\n\n action = 2 * (action - low) / (high - low) - 1\n action = np.clip(action, low, high)\n\n return action\n\n\ndef plotter(env_name, num_episodes, rewards_list, ylim):\n x = np.arange(0, num_episodes)\n y = np.asarray(rewards_list)\n plt.plot(x, y)\n plt.ylim(top=ylim + 10)\n plt.xlabel(\"Number of Episodes\")\n plt.ylabel(\"Avg Rewards Last 100 Episodes\")\n plt.title(\"Rewards Over Time For %s\" %env_name)\n plt.savefig(\"progress.png\")\n plt.close()\n\ndef raw_score_plotter(scores):\n plt.plot(np.arange(len(scores)), scores)\n plt.ylabel('Episode Rewards')\n plt.xlabel('Number of Episodes')\n plt.title(\"Raw Scores Over Time\")\n plt.savefig(\"RawScore.png\")\n plt.close()\n","sub_path":"Tennis_Data/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"129758800","text":"from pyspark.ml.recommendation import ALS\nfrom pyspark.sql import Row\nfrom pyspark.ml.feature import StringIndexer\nimport pyspark.sql.functions as func\nfrom pyspark.mllib.evaluation import RankingMetrics\nfrom pyspark import SparkContext\nfrom pyspark.sql import SparkSession\nimport pandas as pd\n\n# create Spark Session \nspark = SparkSession.builder.getOrCreate()\nsc = spark._sc\n\n# read in data\ntrainSample = spark.read.parquet('train_sample3.parquet')\ntestSample = spark.read.parquet('test_sample3.parquet')\nvalSample = spark.read.parquet('val_sample3.parquet')\nvalSample.createOrReplaceTempView('valSample')\ntrainSample.createOrReplaceTempView('trainSample')\ntestSample.createOrReplaceTempView('testSample')\n\n# StringIndexer to create new columns and dataframes\nindexer_obj_1 = StringIndexer(inputCol=\"user_id\", outputCol=\"user_id_numer\").setHandleInvalid(\"keep\")\nindexer_model_1 = indexer_obj_1.fit(trainSample)\nindexer_df_1 = indexer_model_1.transform(trainSample)\n\nindexer_obj_2 = StringIndexer(inputCol=\"track_id\", outputCol=\"track_id_numer\").setHandleInvalid(\"keep\")\nindexer_model_2= indexer_obj_2.fit(indexer_df_1)\nindexer_df_2 = indexer_model_2.transform(indexer_df_1)\n\ntrain_df = indexer_df_2.drop('user_id')\ntrain_df= train_df.drop('track_id')\n\nval_df_1 = indexer_model_1.transform(valSample)\nval_df_2= indexer_model_2.transform(val_df_1)\n\nval_df = val_df_2.drop('user_id')\nval_df= val_df.drop('track_id')\n\n# Build the recommendation model using ALS on the training data\n# Note we set cold start strategy to 'drop' to ensure we don't get NaN evaluation metrics\nals = ALS(maxIter=5, regParam=0.01, userCol=\"user_id_numer\", itemCol=\"track_id_numer\", ratingCol= \"count\",\n coldStartStrategy=\"drop\", implicitPrefs = True)\nmodel = als.fit(train_df)\n\n# use model to transform validation dataset\nval_transformed = model.transform(val_df)\n\n# for each user, sort track ids by count\nval_true = val_df.orderBy('count')\n\n# flatten to group by user id and get list of true track ids\nval_true_flatten = val_true.groupby('user_id_numer').agg(func.collect_list('track_id_numer').alias(\"track_id_numer\"))\n\n# add to dictionary\nval_true_dict = val_true_flatten.collect()\nval_true_dict = [{r['user_id_numer']: r['track_id_numer']} for r in val_true_dict]\nval_true_dict = dict((key,d[key]) for d in val_true_dict for key in d)\n\n\n# get distinct users from transformed validation set\nusers = val_transformed.select(als.getUserCol()).distinct()\n\n# get predictions for validation users\nval_preds = model.recommendForUserSubset(users, 10)\nval_preds_explode = val_preds.select(val_preds.user_id_numer,func.explode(val_preds.recommendations.track_id_numer))\nval_preds_flatten = val_preds_explode.groupby('user_id_numer').agg(func.collect_list('col').alias(\"col\"))\n\n# add validation predictions to dictionary\nval_preds_dict = val_preds_flatten.collect()\nval_preds_dict = [{r['user_id_numer']: r['col']} for r in val_preds_dict]\nval_preds_dict = dict((key,d[key]) for d in val_preds_dict for key in d)\n\n# create predictions and labels RDD and get MAP\nlabels_list = []\n\nfor user in val_preds_dict.keys():\n labels_list.append((val_preds_dict[user][0], [int(i) for i in val_true_dict[user]][0]))\n\nlabels = spark.sparkContext.parallelize(labels_list)\nmetrics = RankingMetrics(labels)\nprint(metrics.meanAveragePrecision)\n\n","sub_path":"archived_versions/baserecsys_3pct.py","file_name":"baserecsys_3pct.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"561334974","text":"# -*- coding: utf-8 -*-\nfrom django import forms\n\nfrom captcha.fields import CaptchaField\n\nfrom django_geoip.models import City\n\nfrom models import Club\n \n\nclass ClubForm(forms.ModelForm):\n def __init__(self, request, *args, **kwargs):\n super(ClubForm, self).__init__(*args, **kwargs)\n self.fields['user'].initial = request.user\n self.fields['user'].widget = forms.HiddenInput()\n if not request.user.is_authenticated():\n self.fields['captcha'] = CaptchaField()\n \n\n \n\n class Meta:\n model = Club\n exclude = ('long', 'lat', 'enable_comments', 'date', 'moderate')\n\t\n","sub_path":"clubs/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"617008343","text":"from twilio.rest import Client\n\n# Your Account SID from twilio.com/console\naccount_sid = \"AC2e5b4db2459089cd85ddb005293a9a90\"\n# Your Auth Token from twilio.com/console\nauth_token = \"c1bd3cabd3000bbd5609fb884d08f838\"\n\nclient = Client(account_sid, auth_token)\n\ndef addMessages():\n for sms in client.messages.list():\n print(sms)\n","sub_path":"phoneItegration.py","file_name":"phoneItegration.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"486856276","text":"import logging\n\nfrom dvc.progress import Tqdm\n\nfrom .tree import Tree\n\nlogger = logging.getLogger(__name__)\n\n\ndef save(odb, obj, **kwargs):\n if isinstance(obj, Tree):\n for _, entry in Tqdm(obj):\n odb.add(entry.path_info, entry.fs, entry.hash_info, **kwargs)\n odb.add(obj.path_info, obj.fs, obj.hash_info, **kwargs)\n\n\ndef check(odb, obj):\n odb.check(obj.hash_info)\n\n if isinstance(obj, Tree):\n for _, entry in obj:\n odb.check(entry.hash_info)\n\n\ndef load(odb, hash_info):\n if hash_info.isdir:\n return Tree.load(odb, hash_info)\n return odb.get(hash_info)\n","sub_path":"dvc/objects/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"5779","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jul 14 03:01:14 2018\r\n\r\n@author: aap48\r\n\"\"\"\r\n\r\nimport sys\r\nimport pygame\r\n\r\ndef check_keydown(event, character):\r\n \"\"\" Check for keypresses. \"\"\"\r\n if event.key == pygame.K_LEFT:\r\n character.moving_left = True\r\n \r\n elif event.key == pygame.K_RIGHT:\r\n character.moving_right = True\r\n\r\ndef check_keyup(event, character):\r\n \" Check for key releases. \"\"\"\r\n if event.key == pygame.K_LEFT:\r\n character.moving_left = False\r\n \r\n elif event.key == pygame.K_RIGHT:\r\n character.moving_right = False\r\n\r\ndef check_events(character):\r\n \"\"\" Check for keypresses or mouse inputs. \"\"\"\r\n for event in pygame.event.get():\r\n \r\n # Respond to closing game screen.\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n \r\n elif event.type == pygame.KEYDOWN:\r\n check_keydown(event, character)\r\n \r\n elif event.type == pygame.KEYUP:\r\n check_keyup(event, character)","sub_path":"test_game/my_functions.py","file_name":"my_functions.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"152128919","text":"# -*- coding: utf-8 -*-\n\ntry:\n irange = xrange\nexcept NameError:\n irange = range\n\ntry:\n from itertools import imap, izip\nexcept ImportError:\n imap, izip = map, zip\n\ntry:\n unicode = unicode\nexcept NameError:\n unicode = str\n\nstrlike = (bytes, unicode)\n","sub_path":"dask_image/_pycompat.py","file_name":"_pycompat.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"614190536","text":"import numpy as np\nfrom keras.preprocessing import image\nfrom keras.models import load_model\nfrom keras.preprocessing.image import ImageDataGenerator\nimport matplotlib.pyplot as plt\n\ntrain_datagen = ImageDataGenerator(rescale = 1./255,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True)\n\ntest_datagen = ImageDataGenerator(rescale = 1./255)\ntraining_set = train_datagen.flow_from_directory('Dataset/Train',\n target_size = (128, 128),\n batch_size = 32,\n class_mode = 'binary')\ntest_image = image.load_img('Use_Case/Challenging Images/color/100BN55HNPF_30_20180223T051516319Z.jpg', target_size = (128, 128))\ntest_image.show()\ntest_image = image.img_to_array(test_image)\ntest_image = np.expand_dims(test_image, axis = 0)\nclassifier = load_model('my_model.h5')\n\nresult = classifier.predict(test_image)\n\n#plt.imshow('test_image')\ntraining_set.class_indices\nif result[0][0] == 1:\n prediction = 'gray image'\n print(prediction)\nelse:\n prediction = 'color image'\n print(prediction)\n","sub_path":"predict_new_image.py","file_name":"predict_new_image.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"18187710","text":"from __future__ import division; __metaclass__ = type\nimport sys\nimport numpy as np\nfrom math import sqrt\nimport argparse\nimport logging\n\nimport MDAnalysis\n#from MDAnalysis.coordinates.xdrfile.libxdrfile2 import read_xtc_natoms, xdrfile_open\n\nfrom IPython import embed\n\nfrom scipy.spatial import cKDTree\nimport itertools\n#from skimage import measure\n\nfrom rhoutils import rho, cartesian\nfrom mdtools import ParallelTool\n\nfrom constants import SEL_SPEC_HEAVIES, SEL_SPEC_HEAVIES_NOWALL\nfrom mdtools.fieldwriter import RhoField\nimport sys\nimport argparse\n\nfrom work_managers.environment import default_env\n\ndef pbc(pos, box_dim):\n\n for i, box_len in enumerate(box_dim):\n pos[pos[:,i] > box_len, i] -= box_len\n pos[pos[:,i] < 0, i] += box_len\n\n return pos\n\n\n# Get instantaneous density profile in x,y,z voxels\n#\n# Returns a 3d array of shape: (xvals.size-1, yvals.size-1, zvals.size-1)\n\ndef get_rhoxyz_simple(water_pos, xvals, yvals, zvals, this_idx=-1):\n \n bounds = [xvals, yvals, zvals]\n\n return this_idx, np.histogramdd(water_pos, bounds)[0]\n\n# Get instantaneous density profile in x,y,z voxels\n#\n# Returns a 3d array of shape: (xvals.size-1, yvals.size-1, zvals.size-1)\ndef get_rhoxyz(water_pos, tree_grid, nx, ny, nz, cutoff=7.0, sigma=2.4, this_idx=-1):\n cutoff_sq = cutoff**2\n sigma_sq = sigma**2\n\n tree_water = cKDTree(water_pos)\n # Len: (n_waters,)\n # Gives indices of grid points w/in cutoff of each water\n res = tree_water.query_ball_tree(tree_grid, r=cutoff)\n\n # Coarse-grained density at each grid point\n # Shape: (n_gridpts)\n this_rho = np.zeros(tree_grid.data.shape[0])\n\n for idx in range(water_pos.shape[0]):\n\n this_water_pos = water_pos[idx]\n\n # Indices of all grid points within cutoff of this water\n indices = res[idx]\n if len(indices) == 0:\n continue\n #assert len(indices)\n\n close_gridpts = tree_grid.data[indices]\n\n dist_vec = close_gridpts - this_water_pos\n\n this_rho[indices] += rho(dist_vec.astype(np.float32), sigma, sigma_sq, cutoff, cutoff_sq)\n\n return this_idx, this_rho.reshape((nx, ny, nz))\n\n\n\n\nextract_float_from_str = lambda instr: np.array(instr.split(), dtype=float)\n\nparser = argparse.ArgumentParser('Output cavity voxels for each frame', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('-c', '--top', type=str, default='ofile.gro', help='input structure file')\nparser.add_argument('-f', '--traj', type=str, default='ofile.xtc', help='Input trajectory')\nparser.add_argument('-b', '--start', default=500, type=int, help='start time, in ps')\nparser.add_argument('--equil-vals', type=str, \n help='path to file with equilibrium values - will calc density')\nparser.add_argument('-dx', default=0.5, type=float, help='spacing in x (z), in **Angstroms**. ')\nparser.add_argument('-dy', default=0.5, type=float, help='spacing in y, in **angstroms**.')\nparser.add_argument('-dz', default=0.5, type=float, help='spacing in y, in **angstroms**.')\n#parser.add_argument('-dr', default=0.5, type=float, help='spacing in r, in **Angstroms**. ')\nparser.add_argument('--rmax', default=30, type=float, help='Maximum distance r (in A), to calc rho(z,r)')\nparser.add_argument('--V-min', default=\"28.5 10.0 10.0\", type=str,\n help=\"Coordinates of big V's minimum, inputted as a string of three values\")\nparser.add_argument('--V-max', default=\"48.5 60.0 60.0\", type=str,\n help=\"Coordinates of big V's minimum, inputted as a string of three values\")\nparser.add_argument('--print-out', action='store_true',\n help='If true, print out shifted data')\n\ndefault_env.add_wm_args(parser)\n\nargs = parser.parse_args()\n\ndefault_env.process_wm_args(args)\nwm = default_env.make_work_manager()\n\nwm.startup()\n\ndo_calc_rho = False\n\n\n## Hard (nooo!) coded dimensions of big probe V (large cubic box)\nxmin, ymin, zmin = extract_float_from_str(args.V_min)\nxmax, ymax, zmax = extract_float_from_str(args.V_max)\n\nprint('Vmin: ({:.2f} {:.2f} {:.2f}) Vmax: ({:.2f} {:.2f} {:.2f})'.format(xmin, ymin, zmin, xmax, ymax, zmax))\nsys.stdout.flush()\n\nbox_vol = (xmax-xmin)*(ymax-ymin)*(zmax-zmin)\n\nycom = ymin + (ymax - ymin)/2.0\nzcom = zmin + (zmax - zmin)/2.0\nbox_com = np.array([xmin, ycom, zcom])\n\n# Equil vals avail for V (i.e., N_V and COM at equil - well, unbiased)\n#. Since they're available - calculate rho(z,r)\nif args.equil_vals is not None:\n\n do_calc_rho = True\n\n equil_ds = np.load(args.equil_vals)\n # Average num waters in V at bphi=0\n avg_0 = equil_ds['n0'].item()\n # Average COM of waters in V at bphi=0\n com_0 = equil_ds['avg_com']\n # Average water density in box (at bphi=0)\n bulk_rho = avg_0 / box_vol\n\n dx = args.dx\n xvals = np.arange(xmin, xmax+dx, dx)\n dy = args.dy\n yvals = np.arange(ymin, ymax+dy, dy) \n dz = args.dz\n zvals = np.arange(zmin, zmax+dz, dz)\n\n nx = xvals.size-1\n ny = yvals.size-1\n nz = zvals.size-1\n\n print(\"Doing rho calculation (and shifting cav COM)...\")\n print(\"dx: {:0.2f} (from {:.2f} to {:.2f})\".format(dx, xmin, xmax))\n print(\"dy: {:0.2f} (from {:.2f} to {:.2f})\".format(dy, ymin, ymax))\n print(\"dz: {:0.2f} (from {:.2f} to {:.2f})\".format(dz, zmin, zmax))\n\n xx, yy, zz = np.meshgrid(xvals[:-1], yvals[:-1], zvals[:-1], indexing='ij')\n # Center point of each voxel of V\n gridpts = np.vstack((xx.ravel(), yy.ravel(), zz.ravel())).T + 0.5*np.array([dx,dy,dz])\n n_voxels = gridpts.shape[0]\n\n tree_grid = cKDTree(gridpts)\n\ncutoff = 7.0\nsigma = 2.4\n\nuniv = MDAnalysis.Universe(args.top, args.traj)\nstart_frame = int(args.start / univ.trajectory.dt)\nn_frames = univ.trajectory.n_frames\n\nn_waters = np.zeros(n_frames-start_frame)\nwater_com = np.zeros((n_frames-start_frame, 3))\n\nif do_calc_rho:\n # Shape: (n_frames, n_x, n_y, n_z)\n rho_xyz = np.zeros((n_frames-start_frame, nx, ny, nz), dtype=np.float32)\n\nif args.print_out:\n W = MDAnalysis.Writer(\"shift.xtc\", univ.atoms.n_atoms)\n\n\nfutures = []\n\nfor i, i_frame, in enumerate(np.arange(start_frame, n_frames)):\n if i_frame % 100 == 0:\n print(\"frame: {}\".format(i_frame))\n sys.stdout.flush()\n\n univ.trajectory[i_frame]\n\n ## Find waters in V at this step\n waters = univ.select_atoms(\"name OW\")\n water_pos = waters.positions\n\n selx = (water_pos[:,0] >= xmin) & (water_pos[:,0] < xmax)\n sely = (water_pos[:,1] >= ymin) & (water_pos[:,1] < ymax)\n selz = (water_pos[:,2] >= zmin) & (water_pos[:,2] < zmax)\n\n sel_mask = selx & sely & selz\n\n # Waters in V at this frame\n this_waters = waters[sel_mask]\n #this_waters.write(\"noshift.gro\")\n\n ## COM of waters in V\n this_water_com = this_waters.positions.mean(axis=0)\n water_com[i] = this_water_com\n\n ## N_V: number of waters in V\n this_n_waters = this_waters.n_atoms\n n_waters[i] = this_n_waters\n\n\n ## Center cavity COM in V's COM (in y,z)\n if do_calc_rho:\n \n n_cav = avg_0 - this_n_waters\n #First, find com of cavity\n # Found from a weighted difference of water COM at bphi=0 and at this ensemble\n cavity_com = (avg_0*com_0 - this_n_waters*this_water_com) / (avg_0 - this_n_waters)\n \n # Assume no cav if sys has lost fewer than 1 % of its waters\n if (n_cav / avg_0) < 0.01 or cavity_com.min() < 0:\n print(\"no cav; not centering...\")\n cavity_com = box_com\n\n # now shift all atoms so cav COM lies in center of cubic box - but only in y,z\n shift_vector = np.array([0,ycom,zcom]) - cavity_com\n shift_vector[0] = 0\n #print(\"frame: {} shift: {}\".format(i_frame, shift_vector))\n # Shift *all* water positions in this frame \n water_pos_shift = water_pos + shift_vector\n \n # Fix any waters that have been shifted outside of the box\n pbc(water_pos_shift, univ.dimensions[:3])\n waters.positions = water_pos_shift\n \n # Find waters that are in V after shifting cavity C.O.M.\n selx = (water_pos_shift[:,0] >= xmin-cutoff) & (water_pos_shift[:,0] < xmax+cutoff)\n sely = (water_pos_shift[:,1] >= ymin-cutoff) & (water_pos_shift[:,1] < ymax+cutoff)\n selz = (water_pos_shift[:,2] >= zmin-cutoff) & (water_pos_shift[:,2] < zmax+cutoff)\n\n sel_mask = selx & sely & selz\n\n # Waters that are in V, after shifting cav COM\n this_waters_shift = waters[sel_mask]\n \n # Finally - get instantaneous (un-normalized) density\n # (Get rho z is *count* of waters at each x,r and x+dx,r+dr)\n #this_rho_xyz = get_rhoxyz(this_waters_shift.positions, xvals, yvals, zvals)\n #this_rho_xyz = get_rhoxyz(this_waters_shift.positions, tree_grid, nx, ny, nz, cutoff=cutoff, sigma=sigma)\n\n #rho_xyz[i, ...] = this_rho_xyz\n #del this_rho_xyz\n fn_args = (this_waters_shift.positions, tree_grid, nx, ny, nz)\n fn_kwargs = {'cutoff': cutoff, 'sigma': sigma, 'this_idx': i}\n \n #futures.append(wm.submit(get_rhoxyz, fn_args, fn_kwargs))\n fn_args = (this_waters_shift.positions, xvals, yvals, zvals)\n fn_kwargs = {'this_idx': i}\n futures.append(wm.submit(get_rhoxyz_simple, fn_args, fn_kwargs))\n\n print(\"submitted job {}\".format(i))\n ## Optionally print out shifted frame\n if args.print_out:\n W.write(univ.atoms)\n if i_frame == n_frames - 1:\n univ.atoms.write(\"shift.gro\")\n\n# Output number of waters in V at each frame, as well as their COM's\n# Note: *non* shifted positions - just directly from trajectory\nif not do_calc_rho:\n print(\"saving cube data...\")\n np.savetxt(\"phiout_cube.dat\", n_waters, fmt='%3d', header='Vmin: ({:.2f} {:.2f} {:.2f}) A Vmax: ({:.2f} {:.2f} {:.2f}) A'.format(xmin, ymin, zmin, xmax, ymax, zmax))\n np.save(\"com_cube.dat\", water_com)\n\n\n## Collect results\nfor i, future in enumerate(wm.as_completed(futures)):\n idx, this_rho_xyz = future.get_result(discard=True)\n if i % 100 == 0:\n print(\"getting result {} of {}\".format(i, len(futures)))\n sys.stdout.flush()\n rho_xyz[idx, ...] = this_rho_xyz\n del this_rho_xyz\n\nif args.print_out:\n W.close()\n\nwm.shutdown()\n\n\n\nif do_calc_rho:\n print(\"...Done: Outputting results\")\n np.savez_compressed(\"rhoxyz.dat\", rho=rho_xyz, xbins=xvals, ybins=yvals, zbins=zvals, nframes=rho_xyz.shape[0])\n \n","sub_path":"scratch/cyl_sam/old/shift_cav_com_old.py","file_name":"shift_cav_com_old.py","file_ext":"py","file_size_in_byte":10420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"383173125","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# This program allows you to store information in the key/value format.\n# Use the console. Request format: path...> python storage.py --key KEY --val VALUE\n# To add a key and value, enter the key and value: path...> python storage.py --key KEY --val VALUE\n# To get the value by key, enter only the key: path...> python storage.py --key KEY\n# To clear the temporary storage, enter an empty query: path...> python storage.py\n\n# D.Plotnikov 2021\n\nimport os\nimport tempfile\nimport argparse\nimport json\n\nstorage_path = os.path.join(tempfile.gettempdir(), 'storage.data')\n\n\ndef get_data():\n if os.path.exists(storage_path):\n with open(storage_path, 'r') as data_base:\n raw_data = json.load(data_base)\n return raw_data\n else:\n return {}\n\n\ndef put(key, value):\n data = get_data()\n if key in data:\n data[key] = data[key], value\n with open(storage_path, 'w') as data_base:\n json.dump(data, data_base)\n else:\n with open(storage_path, 'w') as data_base:\n data[key] = value\n json.dump(data, data_base)\n\n\ndef read(key):\n data = get_data()\n if key in data:\n value = data[key]\n if type(value) == list:\n print(*data[key], sep=\", \")\n else:\n print(data[key])\n else:\n print(None)\n\n\ndef clearn():\n os.remove(storage_path)\n\n\ndef storage():\n parser = argparse.ArgumentParser()\n parser.add_argument('--key', type=str, help='Key')\n parser.add_argument('--val', type=str, help='Value')\n args = parser.parse_args()\n\n if args.key and args.val:\n put(args.key, args.val)\n elif args.key:\n read(args.key)\n else:\n clearn()\n\n\nif __name__ == \"__main__\":\n storage()\n","sub_path":"storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"370730154","text":"from flask import Flask, render_template, json, request, flash, redirect, url_for, session\n\nfrom base64 import b64encode, b64decode, encodestring, decodestring\nimport base64\n\nimport cloudant\nfrom cloudant.client import Cloudant\nfrom cloudant.error import CloudantException\nfrom cloudant.result import Result, ResultByKey\nfrom cloudant.query import Query\n\nserviceURL = \"$enter bluemix url$\"\n\nclient = Cloudant(\"$your username$\",\n \"$your password$\",\n url=serviceURL)\nclient.connect()\n\ndatabaseName = \"volunteer\"\n\nmyDatabase = client.create_database(databaseName)\nif myDatabase.exists():\n print(\"'{0}' successfully created.\\n\".format(databaseName))\n\napp = Flask(__name__)\n\n@app.route(\"/\")\n@app.route(\"/home\")\ndef main():\n return render_template('index.html')\n\n\n@app.route(\"/maps\")\ndef volunteerLocation():\n\n return render_template('maps.html', dicto=dicto)\n\n@app.route(\"/showSignUp\", methods=['GET', 'POST'])\ndef showSignUp():\n if request.method == 'POST':\n _name = request.form.get('inputName')\n _email = request.form.get('inputEmail')\n _phoneme = request.form.get('phone')\n _items = request.form.get('item')\n _weight = request.form.get('weight')\n file = request.files['file']\n file_name = file.filename\n uploaded_file_content = encodestring(file.read())\n _lat = request.form.get('lat')\n _lon = request.form.get('lon')\n\n name = _name\n email = _email\n items = _items\n\n\n jsonDocument = {\n \"nameField\": _name,\n \"emailField\": _email,\n \"phoneField\": _phoneme,\n \"itemsField\": _items,\n \"weightField\": _weight,\n \"lonField\": _lon,\n \"latField\": _lat,\n \"fileField\": uploaded_file_content\n }\n\n #image_encode = uploaded_file_content\n #image_64_decode = base64.decodestring(image_encode)\n #image_result = open('deer_decode.jpg','wb') # create a writable image and write the decoding result\n #image_result.write(image_64_decode)\n\n if name and email and items and _phoneme and _weight and _lat and _lon:\n newDocument = myDatabase.create_document(jsonDocument)\n\n if newDocument.exists():\n flash(\"Successfully Donated\")\n print(\"Document '{0}' successfully created.\".format(_name))\n return redirect(url_for('thanks'))\n\n else:\n return json.dumps({'html': 'Enter the required fields'})\n\n else:\n return render_template('signup copy.html')\n\n\n@app.route(\"/thanks\")\ndef thanks():\n return render_template('donated.html')\n\n@app.route('/showdownload', methods=['GET', 'POST'])\ndef showdownload():\n if request.method == 'POST':\n _name = request.form.get('database')\n return redirect(url_for('showDatabase'))\n\n else:\n return render_template('download.html')\n\n\n@app.route('/showDatabase')\ndef showDatabase():\n databaseName = \"volunteer\"\n\n end_point = '{0}/{1}'.format(serviceURL, databaseName + \"/_all_docs\")\n params = {'include_docs': 'true'}\n\n response = client.r_session.get(end_point, params=params)\n\n query = cloudant.query.Query(myDatabase, selector={'_id': {'$gt': 0}}, fields=['latField', 'lonField', 'weightField', 'itemsField'])\n\n dicto = {'lat': [], 'lon': [], 'weight':[], 'items': []}\n\n for doc in query(limit=100)['docs']:\n #print doc\n dicto['lat'].append(doc['latField'].encode(\"ascii\",\"replace\"))\n dicto['lon'].append(doc['lonField'].encode(\"ascii\",\"replace\"))\n dicto['weight'].append(doc['weightField'].encode(\"ascii\",\"replace\"))\n dicto['items'].append(doc['itemsField'].encode(\"ascii\",\"replace\"))\n\n\n #return json.dumps(\"{0}\\n\".format(doc))\n return render_template('thankyou.html', dicto = dicto)\n\n\n\nif __name__ == \"__main__\":\n app.secret_key = 'SuperFly'\n app.run()\n","sub_path":"myapp/app2.py","file_name":"app2.py","file_ext":"py","file_size_in_byte":3932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"327291308","text":"import numpy as np\nimport pandas as pd\nimport os\nimport seaborn as sns\nfrom sklearn.ensemble import RandomForestRegressor\n\nsns.set_style(\"darkgrid\")\n\n\ncwd = os.getcwd()\ncwd\n\nsteelData=pd.read_csv(\"input/steel_supply_demand.csv\")\nX_test = steelData.iloc[:,1:-1]\n\n\n\nsteelData2=steelData.iloc[:-1,1:]\n\n\nsns.pairplot(steelData2)\nsns.heatmap(steelData2)\n\nX_train = steelData2.iloc[:,:-1]\nY_train = steelData2.iloc[:,-1]\n\nrandom_forest = RandomForestRegressor(n_estimators=100)\nrandom_forest.fit(X_train, Y_train)\n\nrf_pred = random_forest.predict(X_test)\nsteelData['rf_pred'] = rf_pred\nsteelData['error_percent'] = (steelData.rf_pred-steelData.rebar_price)/steelData.rebar_price*100\n\nsteelData\n\nsteelData.iloc[:,-3:].set_index(steelData.iloc[:,0])\n\nsteelData.to_csv(\"HeiliSteelDataResult.csv\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"520824970","text":"#!/usr/bin/env python\n\n\"\"\"\nMakes the lamp templates from calcos-\nproduced corrtag files by creating a \n\"fake\" flt image, and then summing inside\nthe extraction region around the WCA to\ncreate the final summed spectra.\n\nThs is to be compared to the spectra\nproduced by calcos when the extraction\nregion is changed to extract the WCA \ninstead of the PSA.\n\"\"\"\n\nimport numpy as np\nimport calcos\nfrom astropy.io import fits\nimport glob\nfrom matplotlib import pyplot as plt\nimport os\nfrom functools import partial\nimport multiprocessing as mp\nfrom astropy.table import Table as tab\n\n#define directories \nrawdatadir = '/user/esnyder/fuvwavecal/data/'\noutfolder = '/user/esnyder/fuvwavecal/extracted_calcos/'\n\nout_dir = '/user/esnyder/fuvwavecal/manextr_calcos_x1don_bin/'\n# this folder signifies that the binning is updated to be\n# the same as how calcos bins to make the flt files\n\n\ninputcorrtags = glob.glob(os.path.join(outfolder,'*_corrtag_a.fits'))\n\nfor myfile in inputcorrtags:\n #outname = os.path.join(outfolder, myfile.split('/')[-1].split('_')[0] + '_lampflash_' + myfile.split('/')[-1].split('_')[2])\n with fits.open(myfile) as f:\n dataA = f[1].data\n hdrA = f[0].header\n hdrA1 = f[1].header\n rootname = hdrA['rootname']\n\n aper = hdrA['aperture']\n cenwave = hdrA['cenwave']\n segmentA = hdrA['segment']\n exptimeA = hdrA1['exptime']\n fppos = hdrA['FPPOS']\n lp = hdrA['LIFE_ADJ']\n #xtractabA = hdrA['xtractab'].split('$')[1]\n xtractabA = hdrA['xtractab']\n xarrA = dataA['xcorr']\n yarrA = dataA['ycorr']\n\n with fits.open(os.path.join('/grp/hst/cdbs/lref/', xtractabA)) as table:\n index= np.where( (table[1].data['APERTURE'] == aper) & \n (table[1].data['CENWAVE'] == cenwave) &\n (table[1].data['SEGMENT'] == segmentA) )\n xtract_dataA= table[1].data[index]\n lower_boundA= xtract_dataA['B_SPEC'] - (xtract_dataA['HEIGHT']/2)\n upper_boundA= xtract_dataA['B_SPEC'] + (xtract_dataA['HEIGHT']/2)\n\n new_imgA= np.zeros((1024, 16384))\n\n xbinA = np.asarray( np.floor((xarrA + 0.5)), dtype=np.int)\n ybinA = np.asarray( np.floor((yarrA + 0.5)), dtype=np.int)\n\n # Add a count for each x,y pair\n for x, y in zip(xbinA, ybinA):\n try:\n new_imgA[y, x] += 1\n except IndexError:\n continue\n\n spectrumA = np.sum(new_imgA[int(lower_boundA):int(upper_boundA), :], axis= 0)\n\n myfileb = os.path.join(outfolder,rootname + '_corrtag_b.fits')\n\n if os.path.isfile(myfileb):\n with fits.open(myfileb) as f:\n dataB = f[1].data\n hdrB = f[0].header\n hdrB1 = f[1].header \n exptimeB = hdrB1['exptime']\n segmentB = hdrB['segment']\n #xtractabB = hdrB['xtractab'].split('$')[1]\n xtractabB = hdrB['xtractab']\n xarrB = dataB['xcorr']\n yarrB = dataB['ycorr']\n with fits.open(os.path.join('/grp/hst/cdbs/lref/', xtractabB)) as table:\n index= np.where( (table[1].data['APERTURE'] == aper) & \n (table[1].data['CENWAVE'] == cenwave) &\n (table[1].data['SEGMENT'] == segmentB) )\n xtract_dataB= table[1].data[index]\n lower_boundB= xtract_dataB['B_SPEC'] - (xtract_dataB['HEIGHT']/2)\n upper_boundB= xtract_dataB['B_SPEC'] + (xtract_dataB['HEIGHT']/2)\n new_imgB= np.zeros((1024, 16384))\n\n xbinB = np.asarray(np.floor((xarrB + 0.5)), dtype=np.int)\n ybinB = np.asarray(np.floor((yarrB + 0.5)), dtype=np.int)\n\n # Add a count for each x,y pair\n for x, y in zip(xbinB, ybinB):\n try:\n new_imgB[y, x] += 1\n except IndexError:\n continue\n\n spectrumB = np.sum(new_imgB[int(lower_boundB):int(upper_boundB), :], axis= 0)\n\n else:\n spectrumB = np.zeros(len(spectrumA))\n exptimeB = 0.0\n segmentB = 'NUVB'\n\n #plt.plot(spectrum,'b-')\n #plt.show()\n\n col1 = fits.Column(name='XLOCATION', format='16384E', array=np.array([np.arange(len(spectrumA)),np.arange(len(spectrumB))]))\n col2 = fits.Column(name='COUNTS', format='16384E', array=np.array([spectrumA, spectrumB]))\n col4 = fits.Column(name='EXPTIME', format='4D',array=np.array([exptimeA, exptimeB]))\n col3 = fits.Column(name='SEGMENT', format='4A', array=np.array([segmentA, segmentB]))\n\n cols = fits.ColDefs([col1, col2, col3, col4])\n\n fitsdata = fits.BinTableHDU.from_columns(cols)\n primhdr = fits.PrimaryHDU(header = hdrA)\n fitsfile = fits.HDUList([primhdr, fitsdata])\n\n fitsfile.writeto(os.path.join(out_dir, '{}_{}_fp{}_LP{}_lamp_x1don_bin.fits'.format(rootname, cenwave, fppos, lp)), clobber=True)\n print('Written: {}'.format('{}_{}_fp{}_LP{}_lamp_x1don_bin.fits'.format(rootname, cenwave, fppos, lp)))\n","sub_path":"makelampspec.py","file_name":"makelampspec.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"367686985","text":"class Node:\n def __init__(self,data):\n self.data = data\n self.next = None\n\nclass Linkedlist:\n # initialize the head to None\n def __init__(self):\n self.head = None\n\n#insertion\n\n # to insert the first node\n def insert(self,NewNode):\n if self.head == None:\n self.head = NewNode\n\n # to insert at the beg\n def push(self,Newnode):\n node = Newnode\n node.next = self.head\n self.head = node\n\n # insert anywhere in between the nodes\n def insert_after(self,pre_node,newNode):\n if pre_node == None:\n print(\"the node should be in linked list\")\n return\n node = newNode\n node.next = pre_node.next\n pre_node.next = node\n # add at the end of the list\n def append(self,newnode):\n node = newnode\n temp = self.head\n while (temp.next):\n temp = temp.next \n temp.next = node\n node.next = None\n\n# deletion\n\n # del the given node with help of the values\n def keys(self,key):\n temp = self.head\n\n #if key is first node\n if temp != None:\n if (temp.data==key):\n self.head = temp.next\n temp = None\n return\n\n # if key is present at any other location\n while (temp is not None):\n if temp.data == key:\n break\n prev = temp #saving the previous node\n temp = temp.next\n\n prev.next = temp.next\n\n temp =None\n\n # del the node bt position\n def position (self,position):\n temp = self.head\n if position == 0:\n self.head = temp.next\n temp.next = None\n\n for i in range (position -1):\n temp = temp.next\n if temp.next == None:\n return\n \n if temp == None:\n return\n\n if temp.next == None:\n return\n\n next = temp.next.next\n\n temp.next = None\n\n temp.next = next\n\n# searching of element based on key\n def search(self,key):\n temp = self.head\n count=1\n\n while temp is not None:\n if temp.data == key:\n print(\" {} is prsend at location {} \".format(key,count))\n return\n temp = temp.next\n count = count+1\n \n \n\n print(\"the given element is ot present in the list\")\n \n\n\n#printing of the Linked List\n def print_node(self):\n temp = self.head\n while (temp):\n print(temp.data)\n temp = temp.next\n\n\n\nnode1 = Node(4)\nnode2 = Node(5)\nnode3 = Node(7)\nnode4 = Node(10)\nnode8 = Node(8)\nnode5 = Node(11)\nnode6 = Node(12)\nlink = Linkedlist()\nlink.insert(node1)\nlink.push(node2)\nlink.push(node3)\nlink.insert_after(node2,node4)\nlink.insert_after(node2,node8)\nlink.append(node5)\nlink.append(node6)\nlink.position(3)\nlink.keys(7)\nlink.print_node()\nlink.search(12)\n\n","sub_path":"Linked list/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"267930919","text":"from pyunity import *\nclass Rotator(Behaviour):\n def Update(self, dt):\n self.transform.eulerAngles += Vector3(0, 90, 135) * dt\n\nscene = SceneManager.AddScene(\"Scene\")\nscene.mainCamera.transform.localPosition = Vector3(0, 0, -10)\ncube = GameObject(\"Cube\")\nrenderer = cube.AddComponent(MeshRenderer)\nrenderer.mesh = Mesh.cube(2)\nrenderer.mat = Material(Color(255, 0, 0))\ncube.AddComponent(Rotator)\nscene.Add(cube)\nLoader.SaveScene(scene)\n","sub_path":"gltests/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"379998736","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"IS 211 Week 2 Assignment\nDave Soto\"\"\"\n\n\"\"\"Importing the nescessary librarires\"\"\"\nimport urllib.request\nimport datetime\nimport logging\nimport csv\nimport argparse\nimport sys\n\n\n \ndef downloadData(url):\n try:\n with urllib.request.urlopen(url) as response:\n html = response.read()\n except urllib.error.URLError as u:\n print(\"Reason: \" + u.reason)\n except urllib.error.HTTPError as h:\n print(\"Reason\" + h.reason)\n else:\n return html\n \n\ndef processData(html):\n \n csvfile=html.decode('utf-8')\n file = csvfile.strip().split(\"\\n\")\n reader = csv.reader(file)\n header = next(reader)\n data = []\n for row in reader:\n data.append(row)\n \n di = {}\n count=0\n for i in data:\n key = i[0]\n v1 = i[1]\n v2 = i[2]\n \n \n try:\n bday=datetime.datetime.strptime(v2, '%d/%m/%Y')\n except ValueError:\n logging.error(\"Error processing line \" + str(count) + \" for ID #:\" + str(key)) \n di[key] = (v1,v2)\n else:\n di[key]=(v1,bday)\n count+=1\n \n return di\n\ndef main():\n\n val = input(\"Please neter an ID number\")\n csvData = downloadData(args.url)\n personData = processData(csvData)\n \n while int(val) > 0:\n try: \n displayPerson(val,personData)\n val = input(\"Please neter an ID number?\")\n except:\n print(\"An error has occured\")\n sys.exit(1)\n \n\ndef displayPerson(id, personData): \n id=str(id)\n if id in personData.keys(): \n print(\"Person \" + id +\" is \" + personData[id][0] + \" with a birthday of \" + str(personData[id][1]))\n else: \n print(\"No User with that id\")\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"URL needed\")\n parser.add_argument(\"--url\", help=\"The website name\", type=str)\n args = parser.parse_args()\n\n LOG_FILENAME = \"/D/Desktop/error.log\"\n LOG_FORMAT = \"%(message)s\"\n logging.basicConfig(filename = LOG_FILENAME,\n level = logging.ERROR,\n format = LOG_FORMAT,\n filemode = 'w'\n )\n logger = logging.getLogger('assignment2')\n \n main()\n","sub_path":"assignment2.py","file_name":"assignment2.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"203379435","text":"import os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#########################################################\n# get parent directory...\nimport sys\nimport csv\nfrom datetime import datetime\nfrom datetime import time\nfrom datetime import date\n\nfrom openpyxl import load_workbook\nfrom openpyxl.styles import colors\nfrom openpyxl.styles import Font, Color\nfrom openpyxl.utils import get_column_letter\n\nfrom openpyxl.styles.borders import Border, Side\n\nimport smtplib\nfrom Profile import *\nfrom periodProcess import *\n\ncountryColumn = 2\nyearColumn = 3\nmatchColumn = 4\ndataColumn = 5\n\nmatchValue = 'Life expectancy at birth for both sexes (years)'\nnewSpreadSheet = 'otherStatsPopFert.xlsx'\n\ntoaddrs = 'EfraimMKrug@gmail.com'\ntitle = ''\n# accountArray = []\ngrid = []\n# accountArray.append(dict())\n# server = 0\nnow_dt = datetime.today()\ntd = timedelta(days=-10)\n\ndef openTrx():\n wb = load_workbook('./data/UNStats.xlsx')\n return wb\n\ndef openOther():\n wb = load_workbook('./data/UNPopFertility001.xlsx')\n return wb\n\ndef writeTrx(sheet, country, pop, year, line):\n sheet.cell(row=line, column=1).value = country\n sheet.cell(row=line, column=2).value = year\n sheet.cell(row=line, column=3).value = pop\n\ndef getValue(sheet, line):\n return sheet.cell(row=line, column=dataColumn).value\n\ndef getLatest(yearNumberList):\n year = 0\n number = 0\n for yn in yearNumberList:\n # print(yn)\n if yn[1] > year:\n year = yn[1]\n number = yn[0]\n\n return [number, year]\n\ndef getCountryLines(sheet, country, firstLine):\n lineList = []\n i = 0\n # print([country, firstLine])\n for ln in range(firstLine, sheet.max_row):\n lineCountry = sheet.cell(row=ln, column=countryColumn).value\n matchVal = sheet.cell(row=ln, column=matchColumn).value\n if lineCountry == country.decode(\"UTF-8\") and matchValue.decode(\"UTF-8\") == matchVal:\n year = sheet.cell(row=ln, column=yearColumn).value\n lineList.append([ln, year])\n\n return lineList\n\ndef getNextCountry(sheet, firstLine):\n country = sheet.cell(row=firstLine, column=countryColumn).value\n for ln in range(firstLine, sheet.max_row):\n if not sheet.cell(row=ln, column=countryColumn).value is country:\n return [sheet.cell(row=ln, column=countryColumn).value, ln]\n return ['DONE', sheet.max_row]\n\n########################################################################\n\ntrx = openTrx()\nother = openOther()\ncountryLine = ['START', 3]\npopDensity = []\ntrxLine = 4\n\nwhile countryLine[0] is not 'DONE':\n countryLine = getNextCountry(other[other.sheetnames[0]], countryLine[1])\n # if countryLine[0] is 'DONE':\n # break\n # print(countryLine[0])\n try:\n popDensity = getCountryLines(other[other.sheetnames[0]], countryLine[0], countryLine[1])\n except:\n continue\n # if not popDensity:\n # break\n dataArray = getLatest(popDensity)\n if dataArray[0] == 0:\n continue\n popData = getValue(other[other.sheetnames[0]], dataArray[0])\n writeTrx(trx[trx.sheetnames[0]], countryLine[0], popData, dataArray[1], trxLine)\n trxLine = trxLine + 1\n\n# getTrx(trx[trx.sheetnames[0]], pops[pops.sheetnames[0]])\ntrx.save(newSpreadSheet)\ntrx.close()\nother.close()\n","sub_path":"gatherPopFert.py","file_name":"gatherPopFert.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"85406762","text":"class IntCode:\n\n def __init__(self, obj_id, init_state):\n self.id = obj_id\n self.init_cond = init_state\n self.state = self.init_cond.copy()\n\n self.op_param_count = {\n 1: 3,\n 2: 3,\n 3: 1,\n 4: 1,\n 5: 2,\n 6: 2,\n 7: 3,\n 8: 3,\n 9: 1,\n 99: 0,\n }\n self.op_param_modal = {\n 1: 2,\n 2: 2,\n 3: 0,\n 4: 1,\n 5: 2,\n 6: 2,\n 7: 2,\n 8: 2,\n 9: 1,\n 99: 0,\n }\n\n self.pos = 0\n self.input_pos = 0\n self.relative_base = 0\n\n self.auto_inputs = None\n self.outputs = []\n\n self.cmd_count = 0\n self.halted = False\n\n def get_addr(self, addr):\n if addr >= len(self.state) - 1:\n return 0\n return self.state[addr]\n\n def set_addr(self, addr, val):\n while addr >= len(self.state) - 1:\n self.state.append(0)\n self.state[addr] = val\n\n def run_op(self):\n full_op = self.state[self.pos]\n op = full_op % 100\n params = self.state[self.pos+1:self.pos+self.op_param_count[op]+1]\n for i in range(self.op_param_count[op]):\n mode = (full_op // [100, 1000, 10000][i]) % 10\n if i < self.op_param_modal[op]:\n if mode == 0:\n params[i] = self.get_addr(params[i])\n elif mode == 2:\n params[i] = self.get_addr(self.relative_base + params[i])\n elif mode == 2:\n params[i] = self.relative_base + params[i]\n\n self.cmd_count += 1\n\n if op == 1:\n self.set_addr(params[2], params[0] + params[1])\n elif op == 2:\n self.set_addr(params[2], params[0] * params[1])\n elif op == 3:\n self.set_addr(params[0], self.auto_inputs[self.input_pos])\n self.input_pos += 1\n elif op == 4:\n self.outputs.append(params[0])\n elif op == 5:\n if params[0] != 0:\n self.pos = params[1] - 3\n elif op == 6:\n if params[0] == 0:\n self.pos = params[1] - 3\n elif op == 7:\n self.set_addr(params[2], 1 if params[0] < params[1] else 0)\n elif op == 8:\n self.set_addr(params[2], 1 if params[0] == params[1] else 0)\n elif op == 9:\n self.relative_base += params[0]\n elif op == 99:\n self.halted = True\n return False\n\n self.pos += self.op_param_count[op] + 1\n\n return True\n\n def reset_state(self):\n self.state = self.init_cond.copy()\n\n self.pos = 0\n self.input_pos = 0\n self.relative_base = 0\n\n self.auto_inputs = None\n self.outputs = []\n\n self.cmd_count = 0\n self.halted = False\n\n\nclass AdventOfCode:\n\n def __init__(self, filename):\n with open(filename) as f:\n self.input = f.read()\n self.init_cond = list(map(int, self.input.split(',')))\n\n def part1(self):\n machine = IntCode('machine', self.init_cond)\n machine.auto_inputs = [1]\n while not machine.halted:\n machine.run_op()\n\n return machine.outputs[-1]\n\n def part2(self):\n machine = IntCode('machine', self.init_cond)\n machine.auto_inputs = [2]\n while not machine.halted:\n machine.run_op()\n\n return machine.outputs[-1]\n","sub_path":"py/2019/09.py","file_name":"09.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"478809104","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\nPREPROCESAMIENTO DE LOS PAPERS QUE CONFORMAN LA BASE DE DATOS\r\nLos archivos tienen formato JSON, de los muchos campos de información que \r\ntraen solo nos interesan 3 que son los que extaeremos.\r\n\r\n 1.-'paper_id': número único de indetificación del documento\r\n 2.- 'title': título del artículo(puede estar vacío)\r\n 3.-'abstract': resumen del contenido del artículo y sobre el cual aplicaremos\r\n los algoritmos del estudio a lrealizar\r\n \r\nUna vez filtrados, realizaremos una copia de cada archivo con una división\r\npor frases para poder determinar en la fase final del proyecto el tema por frases\r\napartes de por el conjunto total de texto.\r\n\r\n\r\n@author: Jesús Medina Taboada\r\n\"\"\" \r\n\r\n\r\n \r\nimport json\r\nimport time\r\nimport os\r\n\r\n# Biblioteca para hacer la división de texto en frases\r\nimport nltk\r\nnltk.download('punkt')\r\nfrom tokenize import tokenize\r\n\r\n# Medimos tiempo inicio ejecución\r\ninicio = time.time()\r\n\r\n# Directorio actual de trabajo\r\nWORK_DIR = os.path.dirname(os.path.abspath(__file__))\r\n\r\n# Ruta del la base de datos de papers\r\npath_docs = WORK_DIR + \"\\pdf_json\"\r\n\r\n# Ruta de carpeta donde guardaremos los documentos prprocesados(si no existe se crea)\r\nif not os.path.exists(WORK_DIR + \"\\\\final_database\\\\text\"):\r\n os.makedirs(WORK_DIR + \"\\\\final_database\\\\text\")\r\npath_preprocess_text = WORK_DIR + \"\\\\final_database\\\\text\"\r\n\r\nif not os.path.exists(WORK_DIR + \"\\\\final_database\\\\phrases\"):\r\n os.makedirs(WORK_DIR + \"\\\\final_database\\\\phrases\")\r\npath_preprocess_phrases = WORK_DIR + \"\\\\final_database\\\\phrases\"\r\n\r\n\r\n\r\n# Listamos todos los archivos del directorio con los documentos\r\nonlyfiles = os.listdir(path_docs)\r\n\r\n# Variables para marcadores durante la ejecución y tiempo que tarda\r\ncont = 1\r\nl = len(onlyfiles)\r\ninicio = time.time() # Medimos tiempo inicio ejecución\r\ncont_2=1\r\n\r\n# Para cada archivo de la lista lo cargamos como json, nos quedamos con los \r\n# campos que nos interesan y guardamos el resultado en la nueva carpeta\r\nfor file in onlyfiles:\r\n # if cont > 5000:\r\n # break\r\n cont_2+=1\r\n obj = json.load(open(path_docs+\"\\\\\"+file)) # Documento original\r\n \r\n\r\n \r\n # Si el abstract está vacío o comienza por citations o es\r\n preprocesed_document_text = {} # Documento preprocessado texto entero\r\n preprocesed_document_phrases = {} # Documento preprocessado por frases\r\n \r\n\r\n # Metemos paper_id y title en ambos\r\n preprocesed_document_text[\"paper_id\"] = obj[\"paper_id\"]\r\n preprocesed_document_phrases[\"paper_id\"] = obj[\"paper_id\"]\r\n\r\n preprocesed_document_text[\"title\"] = obj[\"metadata\"][\"title\"]\r\n preprocesed_document_phrases[\"title\"] = obj[\"metadata\"][\"title\"]\r\n \r\n #Metemos bastrac en text y phrases de diferente forma\r\n \r\n # Como el abstract puede estar conformado por varios párrafos vamos a unarlos todos\r\n # en uno para poder aplicar los algoritmos\r\n whole_text = \"\"\r\n preprocesed_document_text[\"abstract\"]= []\r\n preprocesed_document_phrases[\"abstract\"]= []\r\n \r\n # Juntamos todos los parrafos del abstract\r\n for subkey in obj[\"abstract\"]:\r\n whole_text += subkey[\"text\"]\r\n \r\n # FILTRADO DE DOCUMENTOS\r\n # Algunos documentos no tienen abstract o no son representativos porque\r\n # son citaciones o están incompletos, por ello los eliminamos\r\n \r\n if len(whole_text) > 150 and whole_text[:8].upper() != \"CITATION\":\r\n \r\n # TEXTO\r\n # Añadimos texto completo a los documentos TEX\r\n preprocesed_document_text[\"abstract\"].append({\"text\":whole_text})\r\n \r\n # FRASES\r\n # A partir del abstract completo lo dividimos en frases usando la biblioteca nltk\r\n # Buscamos cuantas frases hay en el texto entero, usamos como separador los puntos\r\n phrase_list = tokenize.sent_tokenize(whole_text)\r\n \r\n \r\n for element in phrase_list:\r\n # Condición para evitar que se cuelen espacios despues de los puntos\r\n if len(element) > 0:\r\n preprocesed_document_phrases[\"abstract\"].append({\"text\":element})\r\n \r\n \r\n # Guardamos doc JSON preprocesado en nueva carpeta TEXTO COMPLETO \r\n open(path_preprocess_text+\"\\\\\"+file, \"w\").write(\r\n json.dumps(preprocesed_document_text, indent=4, separators=(',', ': '))\r\n )\r\n # Guardamos doc JSON preprocesado en nueva carpeta TEXTO POR FRASES \r\n open(path_preprocess_phrases+\"\\\\\"+file, \"w\").write(\r\n json.dumps(preprocesed_document_phrases, indent=4, separators=(',', ': '))\r\n )\r\n \r\n cont+=1\r\n \r\n \r\n # Mensaje para seguimiento del preprocesamiento\r\n if(cont_2 % 500 == 0):\r\n print(\"Procesando file {}/{}\".format(cont_2,l))\r\n \r\n\r\nprint(\"{}/{}\".format(cont,cont_2))\r\n\r\n# Medición fin de tiempo\r\nfin = time.time()\r\ntotal = fin - inicio\r\ntotal_m = total / 60\r\ntotal_h = total_m / 60\r\n\r\nprint(str(total) + \" segundos\") \r\nprint(str(total_m) + \" minutos\")\r\nprint(str(total_h) + \" horas\")\r\n\r\n\r\n","sub_path":"src/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":5233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"33195971","text":"\"\"\"This module contains functions to interact with the CTA alerts API.\"\"\"\n\nimport datetime as dt\nimport pytz\nimport requests\n\nfrom dateutil.parser import parse\n\nfrom glom import glom, Coalesce, Literal\n\n\nCTA_ALERTS_URI = \"http://lapi.transitchicago.com/api/1.0/alerts.aspx?outputType=JSON\" # noqa\n\n\ndef _chicago_to_utc(timestamp):\n \"\"\"Convert a Chicago timestamp to a UTC timestamp.\"\"\"\n\n if timestamp is None:\n return None\n\n return (pytz.timezone('America/Chicago')\n .localize(parse(timestamp))\n .astimezone(pytz.UTC)\n .isoformat())\n\n\ndef _query_api(uri=CTA_ALERTS_URI):\n \"\"\"Query the alerts API and retrieve a list of alerts.\"\"\"\n return requests.get(uri).json()\n\n\ndef _parse_api_response(resp_json):\n \"\"\"Parse the json returned by the alerts API and extract relevant bits.\"\"\"\n\n service_subspec = {\n 'ServiceType': 'ServiceType',\n 'ServiceTypeDescription': 'ServiceTypeDescription',\n 'ServiceName': 'ServiceName',\n 'ServiceId': 'ServiceId'}\n\n spec = ('CTAAlerts.Alert', [\n\n # for each entry in CTAAlerts.Alert pull out the following info\n {'AlertId': ('AlertId', int),\n 'ShortDescription': 'ShortDescription',\n 'FullDescription': 'FullDescription.#cdata-section',\n 'Impact': 'Impact',\n 'SeverityScore': ('SeverityScore', int),\n 'LastSeen': Literal(pytz.UTC\n .localize(dt.datetime.utcnow())\n .isoformat()),\n 'EventStart': Coalesce(('EventStart', _chicago_to_utc),\n default=None),\n 'EventEnd': Coalesce(('EventEnd', _chicago_to_utc),\n default=None),\n\n # pull out each entry in ImpactedService if there are multiple\n 'ImpactedService': (\n 'ImpactedService.Service',\n # some chicanery to make sure this entry is always a\n # list even though the API just returns a nested dict\n # when there's only one service\n lambda s: glom(s, [service_subspec])\n if isinstance(s, list)\n else [glom(s, service_subspec)])\n }]\n )\n\n return glom(resp_json, spec)\n\n\ndef _filter_train_alerts(alerts):\n \"\"\"Select the subset of alerts which are related to train lines.\"\"\"\n\n def is_train_affected(alert):\n \"\"\"Returns true if the alert affects a train line.\"\"\"\n return 'R' in glom(alert, ('ImpactedService', ['ServiceType']))\n\n return list(filter(is_train_affected, alerts))\n\n\ndef _filter_delays(alerts):\n \"\"\"Select the subset of alerts which refer to delays.\"\"\"\n\n def is_not_low_impact(alert):\n \"\"\"Returns true if the alert impact isn't from the minor categories.\"\"\"\n\n minors = ['Planned Reroute', 'Special Note', 'Bus Stop Relocation',\n 'Added Service', 'Planned Work', 'Service Change',\n 'Elevator Status', 'Bus Stop Note']\n\n return alert.get('Impact') not in minors\n\n return list(filter(is_not_low_impact, alerts))\n\n\ndef get_alerts():\n \"\"\"Get the any current train delay alerts from the CTA API.\"\"\"\n\n resp_json = _query_api()\n\n alerts = _parse_api_response(resp_json)\n\n return _filter_delays(_filter_train_alerts(alerts))\n","sub_path":"ctadelaybot/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"381536714","text":"#!/usr/bin/python3 \n\n\nfrom math import *\nimport euclid as euclid\n\n\ndef babyStepGiantStep(a,g,n,ordg):\n\tng=floor(sqrt(ordg))\n\ta_inv=euclid.inv(a,n)\n\tg_1=[]\n\tfor i in range(0,ng+1):\n\t\tg_1.append(pow(g,i)%n)\n\n\n\tfor i in range(0,ng+1):\n\t\tg_2=(a*euclid.inv(pow(g,i*ng),n))%n\n\t\tfor j in range(0,ng+1):\n\t\t\tif g_2==g_1[j]:\n\t\t\t\treturn [j,i] \n\n\n\n\na = 67\ng=5\nn=103\nordg=103\n[x0,x1 ] = babyStepGiantStep(a,g,n,ordg) \n\nx = x0+ floor(sqrt(ordg))*x1\nif 67 == (pow(g,x))%n :\n\tprint(\"Test OK\")\n\tprint(\"x is {}\".format(x))\n\n\n\n\n\n","sub_path":"dl-problem/babyStep.py","file_name":"babyStep.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"597512482","text":"#!/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\n\nwith open(sys.argv[1], \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n\nfor line in lines[:int(sys.argv[2])]:\n print(line.rstrip())\n","sub_path":"14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"606001475","text":"\"\"\"\n*******************\nglimix_core package\n*******************\n\nFast inference for Generalised Linear Mixed Models.\n\n\"\"\"\n\nfrom __future__ import absolute_import as _\n\nfrom . import cov, ggp, glmm, gp, lik, link, lmm, mean, random, util\nfrom .testit import test\n\n__name__ = \"glimix-core\"\n__version__ = \"1.3.7\"\n__author__ = \"Danilo Horta\"\n__author_email__ = \"horta@ebi.ac.uk\"\n\n__all__ = [\n \"__name__\", \"__version__\", \"__author__\", \"__author_email__\", \"test\", 'ggp',\n 'gp', 'lmm', 'glmm', 'cov', 'lik', 'mean', 'link', 'random', 'util'\n]\n","sub_path":"glimix_core/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"271863386","text":"#!/usr/bin/python3\nfrom Bio import SeqIO\nfrom Bio import SearchIO\nfrom Bio.Blast import NCBIXML\nimport argparse\nimport sys\nimport re\nimport linecache\n\nparser = argparse.ArgumentParser(description='please specify input file.')\nparser.add_argument('--blastout', help='blastout xml file')\n\nargs = parser.parse_args()\nargs_dict=vars(args) # convert namespace(args) to dict style\ninput_file=args_dict['blastout'] #extract the file option value from dict\n\nif len(sys.argv)==1: # print help message if arguments are not valid\n parser.print_help()\n sys.exit(1)\n\ndef main():\n\n\ttry:\n\t\twith open(\"{}.parsed.tab\".format(input_file),'w') as writefilehandle: \t\t\t\n\t\t\twritefilehandle.write(\"query_id\\tquery_length\\tquery_start\\tquery_end\\tquery_frame\\tquery_coverage\\thit_acc\\thit_desc\\thit_id\\thit_length\\thit_start\\thit_end\\thit_frame\\thit_coverage\\tE_val\\tpercent_identity\\tgap_num\\n\")#write header to parsed output file\n\t\t\tblast_Qresults = SearchIO.parse(input_file, 'blast-xml') #open blast output file , format is 'blast-xml'\n\t\t\tfor QueryResults in blast_Qresults:\t\n\t\t\t\tquery_id=QueryResults.id\n\t\t\t\tquery_len=QueryResults.seq_len\n\t\t\t\tfor hit in QueryResults:\n\t\t\t\t\thit_acc=hit.accession\n\t\t\t\t\thit_desc=hit.description\n\t\t\t\t\thit_id=hit.id\n\t\t\t\t\thit_len=hit.seq_len\n\t\t\t\t\tfor hsp in hit:\n\t\t\t\t\t\thsp_eval=hsp.evalue\n\t\t\t\t\t\thsp_identicalnum=hsp.ident_num\n\t\t\t\t\t\thsp_gapnum=hsp.gap_num\n\t\t\t\t\t\thsp_hitstart=hsp.hit_start\n\t\t\t\t\t\thsp_hitend=hsp.hit_end\n\t\t\t\t\t\thsp_querystart=hsp.query_start\n\t\t\t\t\t\thsp_queryend=hsp.query_end\n\t\t\t\t\t\thsp_hitframe=hsp.hit_frame\n\t\t\t\t\t\thsp_queryframe=hsp.query_frame\n\t\t\t\t\t\thsp_hitseq=hsp.hit\n\t\t\t\t\t\thsp_alnlen=hsp.aln_span\n\t\t\t\t\t\tpercent_ident=100*hsp_identicalnum/hsp_alnlen\n\t\t\t\t\t\tquery_coverage=(int(hsp_queryend)-int(hsp_querystart))/int(query_len)\n\t\t\t\t\t\thit_coverage=(int(hsp_hitend)-int(hsp_hitstart))/int(hit_len)\n\t\t\t\t\t\twritefilehandle.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(query_id,query_len,hsp_querystart,hsp_queryend,hsp_queryframe,str(query_coverage),hit_acc,hit_desc,hit_id,hit_len,hsp_hitstart,hsp_hitend,hsp_hitframe,str(hit_coverage),hsp_eval,str(percent_ident),hsp_gapnum))#write current hsp to output file\n\texcept Exception as e:\n\t\tprint(\"Unexpected error:\", str(sys.exc_info()))\n\t\tprint(\"additional information:\", e)\n\t\tPrintException()\n\t\t\n##########################\n## function definitions ##\n##########################\ndef PrintException():\n exc_type, exc_obj, tb = sys.exc_info()\n f = tb.tb_frame\n lineno = tb.tb_lineno\n filename = f.f_code.co_filename\n linecache.checkcache(filename)\n line = linecache.getline(filename, lineno, f.f_globals)\n print('EXCEPTION IN ({}, LINE {} \"{}\"): {}'.format(filename, lineno, line.strip(), exc_obj))\n\t\t\n\nif __name__ == \"__main__\": main()\n","sub_path":"parse_xml_blastout.py","file_name":"parse_xml_blastout.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"376213930","text":"import firebase_admin\nfrom firebase_admin import credentials, firestore\n\ncred = credentials.Certificate(\"./firebase-key.private.json\")\nfirebase_admin.initialize_app(cred)\ndb = firestore.client()\n\n# Firebase collections\nNeweetee = db.collection(\"neweetee\")\nTeeseo = db.collection(\"teeseo\")\n","sub_path":"src/utils/firestore.py","file_name":"firestore.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"172388669","text":"\"\"\"Next-gen variant detection and evaluation with GATK and SnpEff.\n\"\"\"\nimport os\nimport json\n\nfrom bcbio.variation.genotype import variant_filtration, gatk_evaluate_variants\nfrom bcbio.variation import freebayes, phasing\nfrom bcbio.pipeline.shared import configured_vrn_files\nfrom bcbio.structural import hydra\n\n# ## Genotyping\n\ndef finalize_genotyper(call_file, bam_file, ref_file, config):\n \"\"\"Perform SNP genotyping and analysis.\n \"\"\"\n vrn_files = configured_vrn_files(config, ref_file)\n variantcaller = config[\"algorithm\"].get(\"variantcaller\", \"gatk\")\n if variantcaller in [\"freebayes\", \"cortex\", \"samtools\", \"gatk-haplotype\", \"varscan\"]:\n call_file = freebayes.postcall_annotate(call_file, bam_file, ref_file, vrn_files, config)\n filter_snp = variant_filtration(call_file, ref_file, vrn_files, config)\n phase_snp = phasing.read_backed_phasing(filter_snp, bam_file, ref_file, config)\n _eval_genotyper(phase_snp, ref_file, vrn_files.dbsnp, config)\n return phase_snp\n\ndef _eval_genotyper(vrn_file, ref_file, dbsnp_file, config):\n \"\"\"Evaluate variant genotyping, producing a JSON metrics file with values.\n \"\"\"\n metrics_file = \"%s.eval_metrics\" % vrn_file\n target = config[\"algorithm\"].get(\"hybrid_target\", None)\n if not os.path.exists(metrics_file):\n stats = gatk_evaluate_variants(vrn_file, ref_file, config, dbsnp_file, target)\n with open(metrics_file, \"w\") as out_handle:\n json.dump(stats, out_handle)\n return metrics_file\n\n# ## Structural variation\n\ndef detect_sv(data):\n \"\"\"Detect structural variation for input sample.\n \"\"\"\n sv_todo = data[\"config\"][\"algorithm\"].get(\"sv_detection\", None)\n if sv_todo is not None and data.get(\"fastq2\"):\n if sv_todo == \"hydra\":\n sv_calls = hydra.detect_sv(data[\"work_bam\"], data[\"genome_build\"],\n data[\"dirs\"], data[\"config\"])\n else:\n raise ValueError(\"Unexpected structural variation method:{}\".format(sv_todo))\n return [[data]]\n","sub_path":"bcbio/pipeline/variation.py","file_name":"variation.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"637247584","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nimport time\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom os import getcwd\r\ndriver=webdriver.Chrome(getcwd() + \"/chromedriver\")\r\ndriver.get(\"https://www.ncbi.nlm.nih.gov/labs/virus/vssi/#/virus?SeqType_s=Nucleotide\")\r\npaths=\"//*[@id='filter-toggle-VirusLineage_ss']\"\r\nselect=driver.find_element_by_xpath(paths).click()\r\nenter_virus=driver.find_element_by_css_selector(\"#typeahead-input-VirusLineage_ss\").send_keys(\"riboviria\")\r\ntime.sleep(1)\r\nselect_virus=driver.find_element_by_css_selector(\".flyout-panel .usa-sidenav-list.typeahead-results button\");\r\nselect_virus.click()\r\ntime.sleep(0.5)\r\nselect_complete=driver.find_element_by_xpath(\"//*[@id='Completeness_s-panel-toggle']\").click()\r\ntime.sleep(1)\r\nselect_complete=driver.find_element_by_xpath(\"//*[@id='Completeness_s-filter-panel']/uswds-ncbi-app-facet-item-checkbox[1]/div/label\").click()\r\nselect_target_date=driver.find_element_by_xpath(\"//*[@id='filter-toggle-CreateDate_dt']\").click()\r\nenter_start_date=driver.find_element_by_xpath(\"//*[@id='CreateDate_dt_From']\").send_keys(\"1/1/2016\")\r\ntime.sleep(0.3)\r\nenter_end_date=driver.find_element_by_id(\"CreateDate_dt_To\")\r\ntime.sleep(0.3)\r\nenter_end_date.send_keys(\"null\")\r\ntime.sleep(0.3)\r\nenter_end_date.clear()\r\nenter_end_date.send_keys(\"12/31/2018\")\r\nsubit_date=driver.find_element_by_xpath(\"//*[@id='submit-CreateDate_dt']\").click()\r\ntime.sleep(0.3)\r\ni=0\r\ntry:\r\n for b in range(1,6):\r\n for a in range(1,201):\r\n time.sleep(5)\r\n check_box=driver.find_element_by_css_selector(\"#DataTables_Table_0 > tbody > tr:nth-child(\"+str(a)+\") > td:nth-child(1)\").click()\r\n time.sleep(5)\r\n download=driver.find_element_by_xpath(\"//*[@id='cmscontent']/section/uswds-ncbi-app-root/uswds-ncbi-app-report/div/div[2]/uswds-ncbi-app-tool-button/div/uswds-ncbi-app-download-module/button\").click()\r\n time.sleep(0.3)\r\n download_next = driver.find_element_by_xpath(\"//*[@id='1']/div/button\").click()\r\n time.sleep(0.3)\r\n download_next_next = driver.find_element_by_xpath(\"//*[@id='2']/div/span[1]/button\").click()\r\n time.sleep(0.3)\r\n final_download = driver.find_element_by_xpath(\"//*[@id='3']/div/span[1]/button[2]\").click()\r\n time.sleep(5)\r\n checkbox = WebDriverWait(driver, 120).until(EC.element_to_be_clickable((By.CSS_SELECTOR,\"#DataTables_Table_0 > tbody > tr:nth-child(\"+str(a)+\") > td:nth-child(1)\" ))).click()\r\n\r\n time.sleep(2)\r\n\r\n i+=1\r\n if i==1000:\r\n break\r\n else:\r\n next_page=driver.find_element_by_xpath(\"//*[@id='cmscontent']/section/uswds-ncbi-app-root/uswds-ncbi-app-report/div/div[2]/uswds-ncbi-app-report-data/div/div[3]/div/div/uswds-ncbi-app-results-pagination/form/div/button[2]\").click()\r\n continue\r\nexcept:\r\n print(\"No more records\")\r\n","sub_path":"scraper/data_extraction (1).py","file_name":"data_extraction (1).py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"237187207","text":"import json\nimport re\n\nfrom behave import *\n\n@given(u'I store the json response value at {path} to \"{store_key}\"')\ndef step_impl(context, path, store_key):\n value = parse_json_path(context.json_response, path)\n if \"store\" not in context:\n context.store = {}\n context.store[store_key] = value\n\n@then(\"I will receive a JSON string\")\ndef step_impl(context):\n context.json_response = json.loads(context.response.body)\n\n@then(u'the json response value at {key_path} will be \"{value}\"')\ndef step_impl(context, key_path, value):\n print(context.json_response)\n assert value == parse_json_path(context.json_response, key_path)\n\n@then(u'the json response value at {key_path} will be a non-zero string')\ndef step_impl(context, key_path):\n assert parse_json_path(context.json_response, key_path) != \"\"\n\n@then(u'I will receive a {code} status code')\ndef step_impl(context, code):\n assert int(code) == context.response.status_code\n\ndef parse_json_path(data, key_path):\n value = data\n for part in re.findall(r\"\\[\\\"?(?P.+?)\\\"?\\]\", key_path):\n try:\n part = int(part)\n except ValueError:\n pass\n value = value[part]\n return \"\" if value == data else value","sub_path":"endpoints/features/steps/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"261802055","text":"#Cameron Stanley\n#2-7-2021\n# This program will convert binary to decimal.\n# the way it works is you enter one digit at a time in binary from\n# then you enter a number that is not 1 or 0 and it tells you the decimal form\n# the binary number that you entered.\nn = -1\n# n is negative one because in the loop n is getting +1 in each loop and you need to start with 0\n# -1 + 1= 0 then keep adding 1 each loop.\nc = 0\n# c is zero so the loop will keep going till you stop it.\ny = 0\n# y is zero so the loop will remember what y was from the previous loop. \nwhile c == 0:\n x = int(input(\"Enter number\"))\n n+=1\n if x == 1 or x ==0: #if the number you input is not 0 or 1 then it won't put that number in the equation and will end the loop. \n y = x*(2**n) + y # the equation is what ever x is times 2 to n power + the previous loops equation.\n continue\n else:#only prints final result\n print(y)\n break\n \n","sub_path":"private/temp/stanley_virtual/engr2310/number2.py","file_name":"number2.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"205563695","text":"import unittest\nimport HTMLTestRunner\nimport time\nfrom common.send_mail import Email\n\ndiscover = unittest.defaultTestLoader.discover(\"TestCase/\",pattern=\"testcase_*.py\")\n\nif __name__ == \"__main__\":\n now = time.strftime(\"%Y-%m-%d_%H-%M-%S\")\n filename = \"Test_Report/\"+now+\"_test_result.html\"\n fp = open(filename,\"wb\")\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp,title=u\"H5测试报告\",description=u\"测试用例执行情况\")\n runner.run(discover)\n fp.close()\n email = Email()\n email.send_email(filename)\n\n","sub_path":"all_run.py","file_name":"all_run.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"428731643","text":"from simglucose.patient.t1dpatient import Action\nfrom simglucose.analysis.risk import risk_index, risk_index2\nimport pandas as pd\nfrom datetime import timedelta\nimport logging\nfrom collections import namedtuple\nfrom simglucose.simulation.rendering import Viewer\nimport numpy as np\nfrom gym import spaces\n\ntry:\n from rllab.envs.base import Step\nexcept ImportError:\n _Step = namedtuple(\"Step\", [\"observation\", \"reward\", \"done\", \"info\"])\n\n\n def Step(observation, reward, done, **kwargs):\n \"\"\"\n Convenience method creating a namedtuple with the results of the\n environment.step method.\n Put extra diagnostic info in the kwargs\n \"\"\"\n return _Step(observation, reward, done, kwargs)\n\nObservation = namedtuple('Observation', ['CGM', 'INSULIN', 'CHO'])\nlogger = logging.getLogger(__name__)\n\n\ndef risk_diff(BG_last_hour):\n if len(BG_last_hour) < 2:\n return 0\n else:\n _, _, risk_current = risk_index2([BG_last_hour[-1]], 1) # Horizon\n _, _, risk_prev = risk_index2([BG_last_hour[-2]], 1) # Horizon\n normalized = lambda x: x / 120\n # normalized = lambda x: x\n # return normalized(risk_prev - risk_current)\n return -normalized(risk_current)\n\n\nnormalize_cgm = lambda x: 2 * (x - 39) / (600 - 39) - 1\nnormalize_ins = lambda x: x / 30\nnormalize_cho = lambda x: x / 1000\n\n\nclass T1DSimEnv(object):\n\n def __init__(self, patient, sensor, pump, scenario):\n self.patient = patient\n self.sensor = sensor\n self.pump = pump\n self.scenario = scenario\n # self.observation_space = spaces.Box(20, 350, (1, 1))\n # self.action_space = spaces.Box(0, 400, (2, 1))\n self._reset()\n\n # def seed(self, rand_seed):\n # self.sensor.seed = rand_seed\n\n @property\n def time(self):\n return self.scenario.start_time + timedelta(minutes=self.patient.t)\n\n def mini_step(self, action):\n # current action\n action_offset = 15 # ASSUMPTION: max pump 30\n patient_action = self.scenario.get_action(self.time)\n basal = self.pump.basal(action.basal[0]) # making sure not out fo bounds\n # basal = self.pump.basal(max(min(action.basal, action_offset), -action_offset)) # making sure not out fo bounds\n # basal = self.pump.basal(action[0]) # CHANGED\n # bolus = self.pump.bolus(max(min(action.bolus, action_offset), -action_offset)) # making sure not out fo bounds\n # bolus = self.pump.bolus(action[1]) # CHANGED\n # insulin = basal + bolus + action_offset * 2 # CHANGED\n insulin = basal\n CHO = patient_action.meal\n patient_mdl_act = Action(insulin=insulin, CHO=CHO)\n\n # State update\n self.patient.step(patient_mdl_act)\n\n # next observation\n BG = self.patient.observation.Gsub\n CGM = self.sensor.measure(self.patient)\n\n return CHO, insulin, BG, CGM\n\n def step(self, action, reward_fun=risk_diff):\n '''\n action is a namedtuple with keys: basal, bolus\n '''\n CHO = 0.0\n insulin = 0.0\n BG = 0.0\n CGM = 0.0\n\n CHO_norm = 0.0\n insulin_norm = 0.0\n CGM_norm = 0.0\n\n for _ in range(int(self.sample_time)):\n # Compute moving average as the sample measurements\n tmp_CHO, tmp_insulin, tmp_BG, tmp_CGM = self.mini_step(action)\n if tmp_insulin < 0:\n print(tmp_insulin)\n print(self.patient.t)\n CHO += tmp_CHO / self.sample_time\n insulin += tmp_insulin / self.sample_time\n BG += tmp_BG / self.sample_time\n CGM += tmp_CGM / self.sample_time\n # CHO_norm += normalize_cho(tmp_CHO / self.sample_time)\n # CGM_norm += normalize_cgm(tmp_CGM / self.sample_time)\n # insulin_norm += normalize_ins(tmp_insulin / self.sample_time)\n\n # Compute risk index\n horizon = 1\n LBGI, HBGI, risk = risk_index([BG], horizon)\n\n # Record current action\n self.CHO_hist.append(CHO)\n # self.CHO_hist.append(CHO_norm)\n self.insulin_hist.append(insulin)\n # self.insulin_hist.append(insulin_norm)\n\n # Record next observation\n self.time_hist.append(self.time)\n self.BG_hist.append(BG)\n self.CGM_hist.append(CGM)\n # self.CGM_hist.append(CGM_norm)\n self.risk_hist.append(risk)\n self.LBGI_hist.append(LBGI)\n self.HBGI_hist.append(HBGI)\n\n # Compute reward, and decide whether game is over\n window_size = int(60 / self.sample_time) # Horizon\n BG_last_hour = self.CGM_hist[-window_size:]\n reward = reward_fun(BG_last_hour)\n # done = BG < 70 or BG > 350\n done = self.patient.t == (24 * 60) / self.sample_time - 1 * self.sample_time\n cgm_s = self.CGM_hist[-window_size:]\n ins_s = self.insulin_hist[-window_size:]\n cho_s = self.CHO_hist[-window_size:]\n if min(len(self.CGM_hist), len(self.insulin_hist)) < window_size: # Padding\n pad_size_cgm = max(window_size - len(self.CGM_hist), 0)\n pad_size_IN = max(window_size - len(self.insulin_hist), 0)\n pad_size_CHO = max(window_size - len(self.CHO_hist), 0)\n cgm_s = [self.CGM_hist[0]] * pad_size_cgm + self.CGM_hist # Blood Glucose Last Hour\n ins_s = [self.insulin_hist[0]] * pad_size_IN + self.insulin_hist # Insulin Last Hour\n cho_s = [self.CHO_hist[0]] * pad_size_CHO + self.CHO_hist # Insulin Last Hour\n\n obs = Observation(CGM=cgm_s, INSULIN=ins_s, CHO=cho_s)\n\n return Step(\n observation=obs,\n reward=reward,\n done=done,\n sample_time=float(self.sample_time),\n patient_name=self.patient.name,\n meal=CHO,\n patient_state=self.patient.state)\n\n def _reset(self):\n self.sample_time = self.sensor.sample_time\n self.viewer = None\n\n BG = self.patient.observation.Gsub\n horizon = 1 # TODO understand\n LBGI, HBGI, risk = risk_index([BG], horizon)\n CGM = self.sensor.measure(self.patient)\n self.time_hist = [self.scenario.start_time]\n self.BG_hist = [BG]\n self.CGM_hist = [CGM]\n self.risk_hist = [risk]\n self.LBGI_hist = [LBGI]\n self.HBGI_hist = [HBGI]\n self.CHO_hist = []\n self.insulin_hist = []\n self.CHO_hist = []\n self.insulin_hist = []\n\n def reset(self):\n self.patient.reset()\n self.sensor.reset()\n self.pump.reset()\n self.scenario.reset()\n self._reset()\n CGM = self.sensor.measure(self.patient)\n obs = Observation(CGM=[normalize_cgm(CGM)] * 20, INSULIN=[0] * 20, CHO=[0] * 20)\n return Step(\n observation=obs,\n reward=0,\n done=False,\n sample_time=self.sample_time,\n patient_name=self.patient.name,\n meal=0,\n patient_state=self.patient.state)\n\n def render(self, close=True):\n if close:\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n return\n\n if self.viewer is None:\n self.viewer = Viewer(self.scenario.start_time, self.patient.name, )\n\n self.viewer.render(self.show_history())\n\n def show_history(self):\n df = pd.DataFrame()\n df['Time'] = pd.Series(self.time_hist)\n df['BG'] = pd.Series(self.BG_hist)\n df['CGM'] = pd.Series(self.CGM_hist)\n df['CHO'] = pd.Series(self.CHO_hist)\n df['insulin'] = pd.Series(self.insulin_hist)\n df['LBGI'] = pd.Series(self.LBGI_hist)\n df['HBGI'] = pd.Series(self.HBGI_hist)\n df['Risk'] = pd.Series(self.risk_hist)\n df = df.set_index('Time')\n return df\n","sub_path":"backup/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":7831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"88104395","text":"n = int(input())\nd = {}\nfor i in range(n):\n l = list(map(str, input().split()))\n if l[0] not in d:\n d[l[0]] = list(set(l[2:]))\n else:\n d[l[0]] = list(set(list(list(set(l[2:])) + d[l[0]])))\nfor i in d:\n a = d[i]\n ans = []\n m = len(a)\n for j in range(m):\n c = 1\n for z in range(m):\n if j != z and a[z].endswith(a[j]):\n c = 0\n if c:\n ans.append(a[j])\n d[i] = ans\nprint(len(d))\nfor i in d:\n print(i, len(d[i]), *d[i])","sub_path":"codeforces/cf_rounds/1/451/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"436125019","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import messagebox\n\nimport GUI.ROOT_AND_MAIN.USER_WINDOW.SUBJECT_FRAME.callbacks as callbacks\nfrom GUI.ROOT_AND_MAIN.USER_WINDOW.SUBJECT_FRAME.grid import grid\n\nclass Subject_frame:\n def __init__(self, parent):\n self.parent = parent\n self.frame = ttk.Frame(parent.frame)\n\n self.labels = {\n \"subscribed subjects\": {\n \"text\": \"Ramos Inscritos:\"\n },\n \"possible subjects\": {\n \"text\": \"Ramos:\"\n }\n }\n self.buttons = {\n \"subscribe\": {\n \"text\": \"Inscribir\",\n \"command\": self.subscribe_button_callback\n },\n \"unsubscribe\": {\n \"text\": \"Desinscribir\",\n \"command\": self.unsubscribe_button_callback\n }\n }\n self.lists = {\n \"subscribed subjects\": {\n \"items\": [],\n \"listvariable\": tk.StringVar(self.frame),\n \"height\": 7,\n \"scrollbar\": ttk.Scrollbar(self.frame, orient=tk.VERTICAL)\n },\n \"possible subjects\": {\n \"items\": callbacks.get_possible_subjects_list(),\n \"listvariable\": tk.StringVar(self.frame),\n \"height\": 7,\n \"scrollbar\": ttk.Scrollbar(self.frame, orient=tk.VERTICAL)\n }\n }\n\n def set_widgets(self):\n for label in self.labels:\n self.labels[label]['widget'] = ttk.Label(\n self.frame, # parent\n text=self.labels[label]['text'] # text\n )\n\n for button in self.buttons:\n self.buttons[button]['widget'] = ttk.Button(\n self.frame,\n text=self.buttons[button]['text'],\n command=self.buttons[button]['command']\n )\n \n for current_list in self.lists:\n self.lists[current_list]['listvariable'].set(\n self.lists[current_list]['items']\n )\n self.lists[current_list]['widget'] = tk.Listbox(\n self.frame,\n height=self.lists[current_list]['height'],\n listvariable=self.lists[current_list]['listvariable']\n )\n # scrollbar config\n self.lists[current_list]['widget']['yscrollcommand'] = \\\n self.lists[current_list]['scrollbar'].set\n self.lists[current_list]['scrollbar']['command'] = \\\n self.lists[current_list]['widget'].yview\n \n # bind events\n self.frame.bind(\"\", self.update_frame)\n\n def grid(self):\n self.frame.grid(row=1, column=0, sticky='wens')\n\n for widget_type in grid:\n for widget_key in grid[widget_type]:\n self.__dict__[widget_type][widget_key]['widget'].grid(\n row=grid[widget_type][widget_key][\"row\"],\n column=grid[widget_type][widget_key][\"column\"],\n rowspan=grid[widget_type][widget_key][\"rowspan\"],\n columnspan=grid[widget_type][widget_key][\"columnspan\"],\n sticky=grid[widget_type][widget_key][\"sticky\"],\n padx=grid[widget_type][widget_key][\"padx\"],\n pady=grid[widget_type][widget_key][\"pady\"]\n )\n if widget_type == 'lists':\n self.lists[widget_key]['scrollbar'].grid(\n row=grid[widget_type][widget_key][\"row\"],\n column=grid[widget_type][widget_key][\"column\"] + 1,\n rowspan=grid[widget_type][widget_key][\"rowspan\"],\n columnspan=grid[widget_type][widget_key][\"columnspan\"],\n sticky='wns'\n )\n\n # CALLBACKS\n\n def subscribe_button_callback(self):\n if self.get_current_user() is None:\n return\n selection = \\\n self.lists['possible subjects']['widget'].get(tk.ANCHOR)\n if not selection:\n messagebox.showinfo(\n title='Inscribir Asignatura',\n message='No hay asignaturas para inscribir'\n ) \n elif selection in self.lists['subscribed subjects']['items']:\n messagebox.showinfo(\n title='Inscribir Asignatura',\n message='Esta asignatura ya fue inscrita'\n ) \n else:\n self.lists['subscribed subjects']['items'].append(selection)\n self.lists['subscribed subjects']['listvariable'].set(\n self.lists['subscribed subjects']['items']\n )\n callbacks.subscribe_subject(\n self.get_current_user(),\n selection\n )\n \n def unsubscribe_button_callback(self):\n if self.get_current_user() is None:\n return\n selection = \\\n self.lists['subscribed subjects']['widget'].get(tk.ANCHOR)\n if not selection:\n messagebox.showinfo(\n title='Inscribir Asignatura',\n message='No hay asignaturas para desinscribir'\n )\n else:\n self.lists['subscribed subjects']['items'].remove(selection)\n self.lists['subscribed subjects']['listvariable'].set(\n self.lists['subscribed subjects']['items']\n )\n callbacks.unsubscribe_subject(\n self.get_current_user(),\n selection\n )\n\n def get_current_user(self):\n return self.parent.children['user_child'].current_user\n \n def refresh_possible_subjects_list(self):\n self.lists[\"possible subjects\"][\"items\"] = \\\n callbacks.get_possible_subjects_list()\n self.lists['possible subjects']['listvariable'].set(\n self.lists[\"possible subjects\"][\"items\"]\n )\n\n def update_frame(self, ve):\n self.refresh_possible_subjects_list()","sub_path":"frontend/GUI/ROOT_AND_MAIN/USER_WINDOW/SUBJECT_FRAME/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":6013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"532904228","text":"#!/usr/bin/env python\n# coding=utf-8\n# *************************************************\n# File Name : setup.py\n# Author : Cole Smith\n# Mail : tobewhatwewant@gmail.com\n# Github : whatwewant\n# Created Time : 2016年02月12日 星期五 21时11分12秒\n# *************************************************\n\nimport os\nfrom setuptools import setup\n\nwith open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:\n README = readme.read()\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nsetup(\n name='mobile-address',\n version='0.0.1',\n packages=['mobile_address', ],\n # package_dir={'spherical_functions': '.'},\n include_package_data=True,\n install_requires = ['requests'],\n license='BSD License',\n description='A simple app for searching mobile address.',\n long_description=README,\n url='https://github.com/whatwewant/mobile-address',\n author='Cole Smith',\n author_email='tobewhatwewant@gmail.com',\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n ],\n)\n","sub_path":"pypi_install_script/mobile-address-0.0.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"499907688","text":"# coding: utf-8\n\nimport numpy as np\nimport codecs\nfrom multiprocessing import Pool\nimport pickle\n\nbatch_size = 128\nembedding_size = 256\n\n# read_words = '../data_raw_final/first_stage/fact2word_te.npy'\n# read_ys = '../data_raw_final/first_stage/y_te.npy'\n# batch_path = '../data_raw_final/first_stage/test_s496/'\n# batch_path = '../data_raw_final/first_stage/test_s200/'\n# batch_path = '../data_raw_final/first_stage/test_0200/'\n\nread_words = './fact2word_te.npy'\nread_ys = './y_te.npy'\n# batch_path = '../data_raw_final/restData/rest_s200/' # 修改 batch id\n# batch_path = './SF200B300_wv256_bs128/' # 修改 batch id\nbatch_path = './test_p0496_wv256_bs128/' # 修改 batch id\n# batch_path = './s200_wv256_bs64/' # 修改 batch id\n\n###########################################################\n# read_words_te = './fact2word_test.npy'\n# read_ys_te = './y_test.npy'\n#\n# read_words_tr = './fact2word_train.npy'\n# read_ys_tr = './y_train.npy'\n#\n# read_words_va = './fact2word_valid.npy'\n# read_ys_va = './y_valid.npy'\n#\n# batch_path_te = './SF200B300_wv256_bs128/test/'\n# batch_path_tr = './SF200B300_wv256_bs128/train/'\n# batch_path_va = './SF200B300_wv256_bs128/valid/'\n###########################################################\n\naccu_id_dict = dict()\ni = 0\nrf = codecs.open('./accu.txt', 'r', 'utf-8')\nfor line in rf.readlines():\n line = line.replace('\\n', '').replace('\\r', '')\n accu_id_dict[line] = i\n i += 1\nprint(accu_id_dict)\n\ndef get_idaccu(accu):\n \"\"\"获取 accu 所对应的 id.\"\"\"\n if accu not in accu_id_dict:\n return 1\n else:\n return accu_id_dict[accu]\n\ndef get_id4accus(accus):\n \"\"\"把 accus 转为 对应的 id\"\"\"\n ids = list(map(get_idaccu, accus)) # 获取id\n return ids\n###########################################################\nlaw_id_dict = dict()\ni = 0\nrf = codecs.open('./law.txt', 'r', 'utf-8')\nfor line in rf.readlines():\n line = int(line.strip('\\n'))\n law_id_dict[line] = i\n i += 1\nprint(law_id_dict)\ndef get_idlaw(law):\n \"\"\"获取 law 所对应的 id.\"\"\"\n if law not in law_id_dict:\n print('law ont in dic law id')\n exit(0)\n else:\n return law_id_dict[law]\n\ndef get_id4laws(laws):\n \"\"\"把 laws 转为 对应的 id\"\"\"\n ids = list(map(get_idlaw, laws)) # 获取id\n return ids\n###########################################################\nembedding_file_name = '../../data/' + 'sr_word2id_' + str(embedding_size) + '.pkl'\nwith open(embedding_file_name, 'rb') as inp:\n sr_word2id = pickle.load(inp)\ndict_word2id = dict()\n\nfor i in range(len(sr_word2id)):\n dict_word2id[sr_word2id.index[i]] = sr_word2id.values[i]\n\ndef get_idword(word):\n if word not in dict_word2id:\n return 1\n else:\n return dict_word2id[word]\n\ndef get_id4words(words):\n ids = list(map(get_idword, words))\n return ids\n\n###########################################################\n###########################################################\n###########################################################\n\ntrue_dict = {True:1, False:0}\ndef get_impid(imp):\n return true_dict[imp]\n\ndef pad_X200_same(words, max_len=200):\n words_len = len(words)\n if words_len == max_len:\n return words\n if words_len > max_len:\n return words[(words_len - max_len):]\n if words_len < max_len:\n num = int(max_len / words_len)\n words_num = words * num\n words_num = np.asarray(words_num)\n end_idx = max_len - words_len * num - 1\n words_last = words[-1:-end_idx-2:-1]\n words_last = np.asarray(words_last)\n return np.hstack([words_last, words_num])\n\n\ndef pad_X500_same(words, max_len=500):\n words_len = len(words)\n if words_len == max_len:\n return words\n if words_len > max_len:\n return words[(words_len - max_len):]\n if words_len < max_len:\n num = int(max_len / words_len)\n words_num = words * num\n words_num = np.asarray(words_num)\n end_idx = max_len - words_len * num - 1\n words_last = words[-1:-end_idx-2:-1]\n words_last = np.asarray(words_last)\n return np.hstack([words_last, words_num])\n\n\ndef pad_X496_same(words, max_len=496):\n words_len = len(words)\n if words_len == max_len:\n return words\n if words_len > max_len:\n return words[(words_len - max_len):]\n if words_len < max_len:\n num = int(max_len / words_len)\n words_num = words * num\n words_num = np.asarray(words_num)\n end_idx = max_len - words_len * num - 1\n words_last = words[-1:-end_idx-2:-1]\n words_last = np.asarray(words_last)\n return np.hstack([words_last, words_num])\n\n\ndef pad_X496_0(words, max_len=496):\n words_len = len(words)\n if words_len == max_len:\n return words\n if words_len > max_len:\n return words[(words_len - max_len):]\n if words_len < max_len:\n words_last = np.zeros(shape=max_len - words_len, dtype=np.int32)\n return np.hstack([words, words_last])\n\n\ndef pad_X500_S_F200B200(words, max_len=400):\n words_len = len(words)\n if words_len == max_len:\n return words\n if words_len > max_len:\n F = words[0:200]\n B = words[words_len-200:words_len]\n return np.hstack([F, B])\n if words_len < max_len:\n num = int(max_len / words_len)\n words_num = words * num\n words_num = np.asarray(words_num)\n end_idx = max_len - words_len * num - 1\n words_last = words[-1:-end_idx-2:-1]\n words_last = np.asarray(words_last)\n return np.hstack([words_last, words_num])\n\n\ndef id_pad_length_over_sample(file_words, file_ys, batch_path, batch_num):\n p = Pool()\n words = np.load(file_words)\n accuwords, relewords, death, impr, life = np.load(file_ys)\n rel_id = np.asarray(list(p.map(get_id4laws, relewords)))\n acc_id = np.asarray(list(p.map(get_id4accus, accuwords)))\n word2id = np.asarray(list(p.map(get_id4words, words)))\n death_id = np.asarray(list(p.map(get_impid, death)))\n life_id = np.asarray(list(p.map(get_impid, life)))\n del words, accuwords, relewords, death, life\n print(rel_id[0])\n print(acc_id[0])\n print(word2id[0])\n print(death_id[0])\n print(life_id[0])\n sample_num = len(death_id)\n lengths = []\n for sample in word2id:\n lengt = len(sample)\n if lengt>16*16:\n lengths.append(16*16)\n else:\n lengths.append(lengt)\n\n lengths = np.asarray(lengths)\n new_index = np.random.permutation(sample_num)\n word2id = word2id[new_index]\n rel_id = rel_id[new_index]\n acc_id = acc_id[new_index]\n death_id = death_id[new_index]\n impr = impr[new_index]\n life_id = life_id[new_index]\n lengths = lengths[new_index]\n for start in list(range(0, sample_num, batch_size)):\n print(batch_num)\n end = min(start + batch_size, sample_num)\n batch_name = batch_path + str(batch_num) + '.npz'\n X_batch = word2id[start:end]\n # X_batch = np.asarray(list(p.map(pad_X496_same, X_batch)), dtype=np.int64)\n X_batch = np.asarray(list(p.map(pad_X496_0, X_batch)), dtype=np.int64)\n # X_batch = np.asarray(list(p.map(pad_X500_S_F200B200, X_batch)), dtype=np.int64)\n if batch_num == 0:\n print(len(X_batch[0]))\n acc_batch = acc_id[start:end]\n law_batch = rel_id[start:end]\n death_batch = death_id[start:end]\n imp_batch = impr[start:end]\n lif_batch = life_id[start:end]\n lengths_batch = lengths[start:end]\n np.savez(batch_name, X=X_batch, length=lengths_batch,\n acc=acc_batch, law=law_batch, death=death_batch, imp=imp_batch, lif=lif_batch)\n batch_num += 1\n\n\nif __name__ == '__main__':\n id_pad_length_over_sample(read_words, read_ys, batch_path, batch_num=0)\n # id_pad_length_over_sample(read_words, read_ys, batch_path, batch_num=0)\n ####################################################################\n # id_pad_length_over_sample(read_words_te, read_ys_te, batch_path_te, batch_num=0)\n # id_pad_length_over_sample(read_words_va, read_ys_va, batch_path_va, batch_num=0)\n # id_pad_length_over_sample(read_words_tr, read_ys_tr, batch_path_tr, batch_num=0)\n\n\"\"\"\nbs 128 batch rest: 5846\nbs 128 batch train: 12978\n\nbs 64 batch rest: \nbs 64 batch train: 25956\n\"\"\"\n","sub_path":"text_classification_models/models/HAN/hantoID_batch_pad.py","file_name":"hantoID_batch_pad.py","file_ext":"py","file_size_in_byte":8295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"423627493","text":"from django.core.management.base import BaseCommand, CommandError\nfrom gallery.models import GalleryImage\n\nfrom django.contrib.staticfiles import finders\nfrom os import listdir\nfrom os.path import isfile, join\n\n# Adds images from gallery_photos to gallery that aren't already there\nclass Command(BaseCommand):\n\thelp = 'Creates gallery'\n\n\tdef handle(self, *args, **options):\n\t\t# resolves the full local path of static files\n\t\tgallery_dir = finders.find('gallery_photos/') or \"None\"\n\n\n\t\timages = [\n\t\t\t# every file in the gallery directory\n\t\t\tf for f in listdir(gallery_dir)\n\t\t\tif isfile(join(gallery_dir, f))\n\n\t\t\t# that doesn't have an image already\n\t\t\tand not GalleryImage.objects.filter(filename=f).exists()\n\t\t]\n\n\t\tfor f in images:\n\t\t\ti = GalleryImage.objects.create(filename=f, caption=\"No caption\")\n\t\t\tself.stdout.write('Added {} to gallery'.format(f))\n\n\t\tself.stdout.write('All done!')\n","sub_path":"gallery/management/commands/makegallery.py","file_name":"makegallery.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"329399924","text":"from .game_object import TankGame\nimport numpy as np\nfrom tank.ammo_object import Ammo\nfrom options.video import MOVES_PER_FRAME\n\n# calculate moves from actions\n# observation, reward, done, info = env.step(actions) - must be like that\ndef step(self):\n info = None\n # MOVES_PER_FRAME mechanics. second part of function is at the end of function\n if self.frame_step <= 0:\n self.data = self.connection.get_actions()\n\n # win check\n team1_alive = 0\n team2_alive = 0\n\n # Move Tanks\n # coll_map, accelerate, turn_body, turn_tower, shot, skill\n for i in range(len(self.team1)):\n if self.team1[i].hp > 0:\n team1_alive += 1\n idd = self.team1[i].id_game - self.ID_START\n old_xy, old_coords, shot, skill = self.team1[i].move(self.map_coll, self.data[idd])\n self.move_obj_on_maps(self.team1[i], 1, old_xy, old_coords) # changing maps\n self.reward(idd, self.score_move, 'move')\n\n if shot:\n # tank, id\n self.bullets.append(Ammo(self.team1[i], self.id_bul))\n self.bullets_in_act.append(self.id_bul-200,)\n self.id_bul += 1 # id of bullets\n self.reward(idd, self.score_shot, 'shot')\n\n\n for i in range(len(self.team2)):\n if self.team2[i].hp > 0:\n team2_alive += 1\n idd = self.team2[i].id_game - self.ID_START\n old_xy, old_coords, shot, skill = self.team2[i].move(self.map_coll, self.data[idd])\n self.move_obj_on_maps(self.team2[i], 2, old_xy, old_coords) # changing maps\n self.reward(idd, self.score_move, 'move')\n\n if shot:\n self.bullets.append(Ammo(self.team2[i], self.id_bul))\n self.bullets_in_act.append(self.id_bul - 200,)\n self.id_bul += 1 # id of bullets\n self.reward(idd, self.score_shot, 'shot')\n\n\n # TODO Winning\n if team1_alive <= 0:\n info = {'game_done': True}\n elif team2_alive <= 0:\n info = {'game_done': True}\n\n\n # Move bullets\n for i in self.bullets_in_act:\n old_xy, old_coords, hit = self.bullets[i].move(self.map_coll)\n\n if hit:\n # check if tank damaged\n if self.bullets[i].damaged_target_id > 100:\n # move from array to new array\n self.bullets_in_act.remove(self.bullets[i].id_game - 200)\n # calculate angle of hitting target, angle 0 - down; 0.5 - up\n\n angle_diff = self.id_tanks[self.bullets[i].damaged_target_id].direction_tank - self.bullets[i].angle\n # front hit ±30°\n # TODO very simple hit place detection\n if abs(angle_diff) < 0.083:\n side = 'back'\n # 30° - 150°\n elif angle_diff < -0.083 and angle_diff > -0.4167:\n side = 'right'\n elif angle_diff > 0.083 and angle_diff < 0.4167:\n side = 'left'\n else:\n side = 'front'\n print('\\nangle:', round(angle_diff,3),\n 'dir:',round(self.id_tanks[self.bullets[i].damaged_target_id].direction_tank,3),\n 'bul:', round(self.bullets[i].angle, 3))\n # calc dmg and hp to damaged tank\n dmg_dealed = self.id_tanks[self.bullets[i].damaged_target_id].damaged(self.bullets[i].damage_dealed_potencial, side)\n\n # give reward to shooter according to dmg and hit\n self.reward(self.bullets[i].parent.id_game - self.ID_START, self.score_hit+self.score_dmg*dmg_dealed, 'hit id: {} dmg: {}'.format(\n str(int(self.bullets[i].damaged_target_id) - self.ID_START), dmg_dealed))\n # penalty to damaged tank\n self.reward(int(self.bullets[i].damaged_target_id) - self.ID_START,\n self.score_take_hit + self.score_take_dmg * dmg_dealed, 'took dmg from id: {} dmg: {}'.format(\n str(self.bullets[i].parent.id_game - self.ID_START), dmg_dealed))\n self.bullets[i].damaged_target_id\n\n # obstacles hit\n # if its wall -> broke or calc hp\n else:\n self.bullets_in_act.remove(self.bullets[i].id_game - 200)\n # if its wall and bullet type can destroy\n if self.bullets[i].damaged_target_id == self.map_obs_d['wall'] and self.bullets[i].destroy:\n # erase wall from map\n self.map[int(self.bullets[i].X)*self.PIX_CELL:int(self.bullets[i].X + 1)*self.PIX_CELL,\n int(self.bullets[i].Y)*self.PIX_CELL:int(self.bullets[i].Y + 1)*self.PIX_CELL, 0] = 0\n # map environment\n self.map_env[int(self.bullets[i].X), int(self.bullets[i].Y), 0] = 0\n # map collision\n self.map_coll[int(self.bullets[i].X)*self.PIX_CELL:int(self.bullets[i].X + 1)*self.PIX_CELL,\n int(self.bullets[i].Y)*self.PIX_CELL:int(self.bullets[i].Y + 1)*self.PIX_CELL, 0] = 0\n\n # erasing bullet from maps\n self.map[old_coords[0], old_coords[1], 3] = 0\n self.map_env[round(old_xy[0]), round(old_xy[1]), 3] = 0\n else:\n if self.bullets[i].done:\n # erasing from maps\n self.map[old_coords[0], old_coords[1], 3] = 0\n self.map_env[round(old_xy[0]), round(old_xy[1]), 3] = 0\n self.bullets_in_act.remove(self.bullets[i].id_game - 200)\n else:\n self.move_obj_on_maps(self.bullets[i], 3, old_xy, old_coords)\n\n # MOVES_PER_FRAME mechanics. first part of function is at the start of function\n if self.frame_step <= 0:\n self.steps += 1\n self.send_data_to_players(info)\n self.frame_step = MOVES_PER_FRAME - 1\n # if MOVES_PER_FRAME = 1, frame_step always will be 0\n else:\n if MOVES_PER_FRAME > 1:\n self.frame_step -= 1\n\n\nsetattr(TankGame, 'step', step)\n\n\ndef move_obj_on_maps(self, obj, layer, old_xy, old_coords):\n self.map_coll[obj.coords_xy[0], obj.coords_xy[1], 1 if layer < 3 else 2] = obj.id_game\n self.map[old_coords[0], old_coords[1], layer] = 0\n self.map[obj.coords_xy[0], obj.coords_xy[1], layer] = self.tank_type_d[obj.type] if layer < 3 else 1\n self.map_env[round(old_xy[0]), round(old_xy[1]), layer] = 0\n self.map_env[round(obj.X): round(obj.X+max(obj.width, 1)),\n round(obj.Y): round(obj.Y + max(obj.height, 1)), layer] = self.tank_type_d[obj.type] if layer < 3 else 1\n\n\nsetattr(TankGame, 'move_obj_on_maps', move_obj_on_maps)","sub_path":"game/decorator_step.py","file_name":"decorator_step.py","file_ext":"py","file_size_in_byte":6725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"267095923","text":"from ftplib import FTP\nimport time\nfrom util import is_csv\n\n\ninbound = '/Inbox30/clutch_batch_result'\noutbound = '/Outbox/'\n\ndef connection(folder):\n ftp = FTP('mftstg.manheim.com')\n ftp.login(user='cai_clutch_tec',passwd='password')\n ftp.cwd(inbound)\n data = []\n lines = ftp.dir('-t',data.append)\n # entries.sort(key=lambda entry: entry[1]['modify'], reverse=True)\n return ftp,data\n\ndef close_connection(ftp):\n ftp.close()\n\n\n\ndef latest_file():\n ftp,data = connection(inbound)\n print (data)\n l = data[0]\n col = l.split()\n filename = col[8]\n month,day,t = col[5],col[6],col[7]\n date = month+' '+day+' '+' '+t\n close_connection(ftp)\n return filename,date\n\ndef grabfile():\n ftp, data = connection(inbound)\n filename,date = latest_file()\n handle = open(filename,'wb')\n ftp.retrbinary(\"RETR \" +filename,handle.write,8*1024)\n ftp.quit()\n handle.close()\n\ndef placefile(filename):\n ftp, data = connection()\n ftp.storbinary('STOR ' + filename,open(filename,'rb'))\n ftp.quit()\n\n\nprint (connection(outbound))\nprint (latest_file())\n# print (grabfile())\n\n\n\n","sub_path":"src/develop.py","file_name":"develop.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"285958597","text":"import math\r\n\r\nnum=16384\r\n\r\nseq=[]\r\n\r\nwhile num>-1:\r\n\trem=num%2\r\n\tnum=math.trunc(num/2)\r\n\tseq=[rem]+seq\r\n\tif num==1:\r\n\t\tseq=[num]+seq\r\n\t\tbreak\r\n\r\nfor i in range(0,len(seq)):\r\n\tprint(seq[i])","sub_path":"python/comp.py","file_name":"comp.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"142642863","text":"# -*- coding: utf-8 -*-\n\"\"\"Main entry point for OpenPype command.\n\nBootstrapping process of OpenPype is as follows:\n\n`OPENPYPE_PATH` is checked for existence - either one from environment or\nfrom user settings. Precedence takes the one set by environment.\n\nOn this path we try to find OpenPype in directories version string in their names.\nFor example: `openpype-v3.0.1-foo` is valid name, or even `foo_3.0.2` - as long\nas version can be determined from its name _AND_ file `openpype/openpype/version.py`\ncan be found inside, it is considered OpenPype installation.\n\nIf no OpenPype repositories are found in `OPENPYPE_PATH` (user data dir)\nthen **Igniter** (OpenPype setup tool) will launch its GUI.\n\nIt can be used to specify `OPENPYPE_PATH` or if it is _not_ specified, current\n*\"live\"* repositories will be used to create zip file and copy it to\nappdata dir in user home and extract it there. Version will be determined by\nversion specified in OpenPype module.\n\nIf OpenPype repository directories are found in default install location\n(user data dir) or in `OPENPYPE_PATH`, it will get list of those dirs there and\nuse latest one or the one specified with optional `--use-version` command\nline argument. If the one specified doesn't exist then latest available\nversion will be used. All repositories in that dir will be added\nto `sys.path` and `PYTHONPATH`.\n\nIf OpenPype is live (not frozen) then current version of OpenPype module will be\nused. All directories under `repos` will be added to `sys.path` and\n`PYTHONPATH`.\n\nOpenPype depends on connection to `MongoDB`_. You can specify MongoDB connection\nstring via `OPENPYPE_MONGO` set in environment or it can be set in user\nsettings or via **Igniter** GUI.\n\nSo, bootstrapping OpenPype looks like this::\n\n.. code-block:: bash\n\n+-------------------------------------------------------+\n| Determine MongoDB connection: |\n| Use `OPENPYPE_MONGO`, system keyring `openPypeMongo` |\n+--------------------------|----------------------------+\n .--- Found? --.\n YES NO\n | |\n | +------v--------------+\n | | Fire up Igniter GUI |<---------+\n | | and ask User | |\n | +---------------------+ |\n | |\n | |\n+-----------------v------------------------------------+ |\n| Get location of OpenPype: | |\n| 1) Test for `OPENPYPE_PATH` environment variable | |\n| 2) Test `openPypePath` in registry setting | |\n| 3) Test user data directory | |\n| ................................................... | |\n| If running from frozen code: | |\n| - Use latest one found in user data dir | |\n| If running from live code: | |\n| - Use live code and install it to user data dir | |\n| * can be overridden with `--use-version` argument | |\n+-------------------------|----------------------------+ |\n .-- Is OpenPype found? --. |\n YES NO |\n | | |\n | +---------------v-----------------+ |\n | | Look in `OPENPYPE_PATH`, find | |\n | | latest version and install it | |\n | | to user data dir. | |\n | +--------------|------------------+ |\n | .-- Is OpenPype found? --. |\n | YES NO --------+\n | |\n |<---------+\n |\n+-------------v------------+\n| Run OpenPype |\n+--------------------------+\n\n\nTodo:\n Move or remove bootstrapping environments out of the code.\n\nAttributes:\n silent_commands (list): list of commands for which we won't print OpenPype\n logo and info header.\n\n.. _MongoDB:\n https://www.mongodb.com/\n\n\"\"\"\nimport os\nimport re\nimport sys\nimport traceback\nimport subprocess\nimport site\nfrom pathlib import Path\n\n# add dependencies folder to sys.pat for frozen code\nif getattr(sys, 'frozen', False):\n frozen_libs = os.path.normpath(\n os.path.join(os.path.dirname(sys.executable), \"dependencies\"))\n sys.path.append(frozen_libs)\n # add stuff from `/dependencies` to PYTHONPATH.\n pythonpath = os.getenv(\"PYTHONPATH\", \"\")\n paths = pythonpath.split(os.pathsep)\n paths.append(frozen_libs)\n os.environ[\"PYTHONPATH\"] = os.pathsep.join(paths)\n\nimport igniter # noqa: E402\nfrom igniter import BootstrapRepos # noqa: E402\nfrom igniter.tools import get_openpype_path_from_db # noqa\nfrom igniter.bootstrap_repos import OpenPypeVersion # noqa: E402\n\nbootstrap = BootstrapRepos()\nsilent_commands = [\"run\", \"igniter\", \"standalonepublisher\",\n \"extractenvironments\"]\n\n\ndef set_openpype_global_environments() -> None:\n \"\"\"Set global OpenPype's environments.\"\"\"\n import acre\n\n from openpype.settings import get_environments\n\n all_env = get_environments()\n\n # TODO Global environments will be stored in \"general\" settings so loading\n # will be modified and can be done in igniter.\n env = acre.merge(\n acre.parse(all_env[\"global\"]),\n dict(os.environ)\n )\n os.environ.clear()\n os.environ.update(env)\n\n # Hardcoded default values\n os.environ[\"PYBLISH_GUI\"] = \"pyblish_pype\"\n # Change scale factor only if is not set\n if \"QT_AUTO_SCREEN_SCALE_FACTOR\" not in os.environ:\n os.environ[\"QT_AUTO_SCREEN_SCALE_FACTOR\"] = \"1\"\n\n\ndef run(arguments: list, env: dict = None) -> int:\n \"\"\"Use correct executable to run stuff.\n\n This passing arguments to correct OpenPype executable. If OpenPype is run\n from live sources, executable will be `python` in virtual environment.\n If running from frozen code, executable will be `openpype_console` or\n `openpype_gui`. Its equivalent in live code is `python start.py`.\n\n Args:\n arguments (list): Argument list to pass OpenPype.\n env (dict, optional): Dictionary containing environment.\n\n Returns:\n int: Process return code.\n\n \"\"\"\n if getattr(sys, 'frozen', False):\n interpreter = [sys.executable]\n else:\n interpreter = [sys.executable, __file__]\n\n interpreter.extend(arguments)\n\n p = subprocess.Popen(interpreter, env=env)\n p.wait()\n print(f\">>> done [{p.returncode}]\")\n return p.returncode\n\n\ndef set_avalon_environments():\n \"\"\"Set avalon specific environments.\n\n These are non modifiable environments for avalon workflow that must be set\n before avalon module is imported because avalon works with globals set with\n environment variables.\n \"\"\"\n from openpype import PACKAGE_DIR\n\n # Path to OpenPype's schema\n schema_path = os.path.join(\n os.path.dirname(PACKAGE_DIR),\n \"schema\"\n )\n # Avalon mongo URL\n avalon_mongo_url = (\n os.environ.get(\"AVALON_MONGO\")\n or os.environ[\"OPENPYPE_MONGO\"]\n )\n os.environ.update({\n # Mongo url (use same as OpenPype has)\n \"AVALON_MONGO\": avalon_mongo_url,\n\n \"AVALON_SCHEMA\": schema_path,\n # Mongo DB name where avalon docs are stored\n \"AVALON_DB\": \"avalon\",\n # Name of config\n \"AVALON_CONFIG\": \"openpype\",\n \"AVALON_LABEL\": \"OpenPype\"\n })\n\n\ndef set_modules_environments():\n \"\"\"Set global environments for OpenPype modules.\n\n This requires to have OpenPype in `sys.path`.\n \"\"\"\n\n from openpype.modules import ModulesManager\n import acre\n\n modules_manager = ModulesManager()\n\n module_envs = modules_manager.collect_global_environments()\n publish_plugin_dirs = modules_manager.collect_plugin_paths()[\"publish\"]\n\n # Set pyblish plugins paths if any module want to register them\n if publish_plugin_dirs:\n publish_paths_str = os.environ.get(\"PYBLISHPLUGINPATH\") or \"\"\n publish_paths = publish_paths_str.split(os.pathsep)\n _publish_paths = {\n os.path.normpath(path) for path in publish_paths if path\n }\n for path in publish_plugin_dirs:\n _publish_paths.add(os.path.normpath(path))\n module_envs[\"PYBLISHPLUGINPATH\"] = os.pathsep.join(_publish_paths)\n\n # Merge environments with current environments and update values\n if module_envs:\n parsed_envs = acre.parse(module_envs)\n env = acre.merge(parsed_envs, dict(os.environ))\n os.environ.clear()\n os.environ.update(env)\n\n\ndef _process_arguments() -> tuple:\n \"\"\"Process command line arguments.\n\n Returns:\n tuple: Return tuple with specific version to use (if any) and flag\n to prioritize staging (if set)\n \"\"\"\n # check for `--use-version=3.0.0` argument and `--use-staging`\n use_version = None\n use_staging = False\n for arg in sys.argv:\n if arg == \"--use-version\":\n print(\"!!! Please use option --use-version like:\")\n print(\" --use-version=3.0.0\")\n sys.exit(1)\n\n m = re.search(r\"--use-version=(?P\\d+\\.\\d+\\.\\d*.+?)\", arg)\n if m and m.group('version'):\n use_version = m.group('version')\n sys.argv.remove(arg)\n break\n if \"--use-staging\" in sys.argv:\n use_staging = True\n sys.argv.remove(\"--use-staging\")\n\n # handle igniter\n # this is helper to run igniter before anything else\n if \"igniter\" in sys.argv:\n import igniter\n return_code = igniter.open_dialog()\n\n # this is when we want to run OpenPype without installing anything.\n # or we are ready to run.\n if return_code not in [2, 3]:\n sys.exit(return_code)\n\n return use_version, use_staging\n\n\ndef _determine_mongodb() -> str:\n \"\"\"Determine mongodb connection string.\n\n First use ``OPENPYPE_MONGO`` environment variable, then system keyring.\n Then try to run **Igniter UI** to let user specify it.\n\n Returns:\n str: mongodb connection URL\n\n Raises:\n RuntimeError: if mongodb connection url cannot by determined.\n\n \"\"\"\n\n openpype_mongo = os.getenv(\"OPENPYPE_MONGO\", None)\n if not openpype_mongo:\n # try system keyring\n try:\n openpype_mongo = bootstrap.secure_registry.get_item(\n \"openPypeMongo\")\n except ValueError:\n print(\"*** No DB connection string specified.\")\n print(\"--- launching setup UI ...\")\n import igniter\n igniter.open_dialog()\n\n try:\n openpype_mongo = bootstrap.secure_registry.get_item(\n \"openPypeMongo\")\n except ValueError:\n raise RuntimeError(\"missing mongodb url\")\n\n return openpype_mongo\n\n\ndef _initialize_environment(openpype_version: OpenPypeVersion) -> None:\n version_path = openpype_version.path\n os.environ[\"OPENPYPE_VERSION\"] = openpype_version.version\n # set OPENPYPE_ROOT to point to currently used OpenPype version.\n os.environ[\"OPENPYPE_ROOT\"] = os.path.normpath(version_path.as_posix())\n # inject version to Python environment (sys.path, ...)\n print(\">>> Injecting OpenPype version to running environment ...\")\n bootstrap.add_paths_from_directory(version_path)\n\n # Additional sys paths related to OPENPYPE_ROOT directory\n # TODO move additional paths to `boot` part when OPENPYPE_ROOT will point\n # to same hierarchy from code and from frozen OpenPype\n additional_paths = [\n os.environ[\"OPENPYPE_ROOT\"],\n # add OpenPype tools\n os.path.join(os.environ[\"OPENPYPE_ROOT\"], \"openpype\", \"tools\"),\n # add common OpenPype vendor\n # (common for multiple Python interpreter versions)\n os.path.join(\n os.environ[\"OPENPYPE_ROOT\"],\n \"openpype\",\n \"vendor\",\n \"python\",\n \"common\"\n )\n ]\n\n split_paths = os.getenv(\"PYTHONPATH\", \"\").split(os.pathsep)\n for path in additional_paths:\n split_paths.insert(0, path)\n sys.path.insert(0, path)\n\n os.environ[\"PYTHONPATH\"] = os.pathsep.join(split_paths)\n\n\ndef _find_frozen_openpype(use_version: str = None,\n use_staging: bool = False) -> Path:\n \"\"\"Find OpenPype to run from frozen code.\n\n This will process and modify environment variables:\n ``PYTHONPATH``, ``OPENPYPE_VERSION``, ``OPENPYPE_ROOT``\n\n Args:\n use_version (str, optional): Try to use specified version.\n use_staging (bool, optional): Prefer *staging* flavor over production.\n\n Returns:\n Path: Path to version to be used.\n\n Raises:\n RuntimeError: If no OpenPype version are found or no staging version\n (if requested).\n\n \"\"\"\n openpype_version = None\n openpype_versions = bootstrap.find_openpype(include_zips=True,\n staging=use_staging)\n if not os.getenv(\"OPENPYPE_TRYOUT\"):\n try:\n # use latest one found (last in the list is latest)\n openpype_version = openpype_versions[-1]\n except IndexError:\n # no OpenPype version found, run Igniter and ask for them.\n print('*** No OpenPype versions found.')\n print(\"--- launching setup UI ...\")\n import igniter\n return_code = igniter.open_dialog()\n if return_code == 2:\n os.environ[\"OPENPYPE_TRYOUT\"] = \"1\"\n if return_code == 3:\n # run OpenPype after installation\n\n print('>>> Finding OpenPype again ...')\n openpype_versions = bootstrap.find_openpype(\n staging=use_staging)\n try:\n openpype_version = openpype_versions[-1]\n except IndexError:\n print((\"!!! Something is wrong and we didn't \"\n \"found it again.\"))\n sys.exit(1)\n elif return_code != 2:\n print(f\" . finished ({return_code})\")\n sys.exit(return_code)\n\n if not openpype_versions:\n # no openpype versions found anyway, lets use then the one\n # shipped with frozen OpenPype\n if not os.getenv(\"OPENPYPE_TRYOUT\"):\n print(\"*** Still no luck finding OpenPype.\")\n print((\"*** We'll try to use the one coming \"\n \"with OpenPype installation.\"))\n version_path = _bootstrap_from_code(use_version)\n openpype_version = OpenPypeVersion(\n version=BootstrapRepos.get_version(version_path),\n path=version_path)\n _initialize_environment(openpype_version)\n return version_path\n\n # get path of version specified in `--use-version`\n version_path = BootstrapRepos.get_version_path_from_list(\n use_version, openpype_versions)\n\n if not version_path:\n if use_version is not None:\n if not openpype_version:\n ...\n else:\n print((\"!!! Specified version was not found, using \"\n \"latest available\"))\n # specified version was not found so use latest detected.\n version_path = openpype_version.path\n print(f\">>> Using version [ {openpype_version} ]\")\n print(f\" From {version_path}\")\n\n # test if latest detected is installed (in user data dir)\n is_inside = False\n try:\n is_inside = openpype_version.path.resolve().relative_to(\n bootstrap.data_dir)\n except ValueError:\n # if relative path cannot be calculated, openpype version is not\n # inside user data dir\n pass\n\n if not is_inside:\n # install latest version to user data dir\n version_path = bootstrap.install_version(\n openpype_version, force=True)\n\n if openpype_version.path.is_file():\n print(\">>> Extracting zip file ...\")\n version_path = bootstrap.extract_openpype(openpype_version)\n openpype_version.path = version_path\n\n _initialize_environment(openpype_version)\n return version_path\n\n\ndef _bootstrap_from_code(use_version):\n \"\"\"Bootstrap live code (or the one coming with frozen OpenPype.\n\n Args:\n use_version: (str): specific version to use.\n\n Returns:\n Path: path to sourced version.\n\n \"\"\"\n # run through repos and add them to `sys.path` and `PYTHONPATH`\n # set root\n if getattr(sys, 'frozen', False):\n openpype_root = os.path.normpath(\n os.path.dirname(sys.executable))\n local_version = bootstrap.get_version(Path(openpype_root))\n print(f\" - running version: {local_version}\")\n assert local_version\n else:\n openpype_root = os.path.normpath(\n os.path.dirname(\n os.path.dirname(\n os.path.realpath(igniter.__file__))))\n # get current version of OpenPype\n local_version = bootstrap.get_local_live_version()\n\n os.environ[\"OPENPYPE_VERSION\"] = local_version\n if use_version and use_version != local_version:\n openpype_versions = bootstrap.find_openpype(include_zips=True)\n version_path = BootstrapRepos.get_version_path_from_list(\n use_version, openpype_versions)\n if version_path:\n # use specified\n bootstrap.add_paths_from_directory(version_path)\n os.environ[\"OPENPYPE_VERSION\"] = use_version\n else:\n version_path = openpype_root\n os.environ[\"OPENPYPE_ROOT\"] = openpype_root\n repos = os.listdir(os.path.join(openpype_root, \"repos\"))\n repos = [os.path.join(openpype_root, \"repos\", repo) for repo in repos]\n # add self to python paths\n repos.insert(0, openpype_root)\n for repo in repos:\n sys.path.insert(0, repo)\n\n # add venv 'site-packages' to PYTHONPATH\n python_path = os.getenv(\"PYTHONPATH\", \"\")\n split_paths = python_path.split(os.pathsep)\n # Add repos as first in list\n split_paths = repos + split_paths\n # last one should be venv site-packages\n # this is slightly convoluted as we can get here from frozen code too\n # in case when we are running without any version installed.\n if not getattr(sys, 'frozen', False):\n split_paths.append(site.getsitepackages()[-1])\n # TODO move additional paths to `boot` part when OPENPYPE_ROOT will point\n # to same hierarchy from code and from frozen OpenPype\n additional_paths = [\n # add OpenPype tools\n os.path.join(os.environ[\"OPENPYPE_ROOT\"], \"openpype\", \"tools\"),\n # add common OpenPype vendor\n # (common for multiple Python interpreter versions)\n os.path.join(\n os.environ[\"OPENPYPE_ROOT\"],\n \"openpype\",\n \"vendor\",\n \"python\",\n \"common\"\n )\n ]\n for path in additional_paths:\n split_paths.insert(0, path)\n sys.path.insert(0, path)\n\n os.environ[\"PYTHONPATH\"] = os.pathsep.join(split_paths)\n\n return Path(version_path)\n\n\ndef boot():\n \"\"\"Bootstrap OpenPype.\"\"\"\n\n # ------------------------------------------------------------------------\n # Play animation\n # ------------------------------------------------------------------------\n\n # from igniter.terminal_splash import play_animation\n\n # don't play for silenced commands\n # if all(item not in sys.argv for item in silent_commands):\n # play_animation()\n\n # ------------------------------------------------------------------------\n # Process arguments\n # ------------------------------------------------------------------------\n\n use_version, use_staging = _process_arguments()\n\n # ------------------------------------------------------------------------\n # Determine mongodb connection\n # ------------------------------------------------------------------------\n\n try:\n openpype_mongo = _determine_mongodb()\n except RuntimeError as e:\n # without mongodb url we are done for.\n print(f\"!!! {e}\")\n sys.exit(1)\n\n os.environ[\"OPENPYPE_MONGO\"] = openpype_mongo\n os.environ[\"OPENPYPE_DATABASE_NAME\"] = \"openpype\" # name of Pype database\n\n # ------------------------------------------------------------------------\n # Set environments - load OpenPype path from database (if set)\n # ------------------------------------------------------------------------\n # set OPENPYPE_ROOT to running location until proper version can be\n # determined.\n if getattr(sys, 'frozen', False):\n os.environ[\"OPENPYPE_ROOT\"] = os.path.dirname(sys.executable)\n else:\n os.environ[\"OPENPYPE_ROOT\"] = os.path.dirname(__file__)\n\n # Get openpype path from database and set it to environment so openpype can\n # find its versions there and bootstrap them.\n openpype_path = get_openpype_path_from_db(openpype_mongo)\n if not openpype_path:\n print(\"*** Cannot get OpenPype path from database.\")\n\n if not os.getenv(\"OPENPYPE_PATH\") and openpype_path:\n os.environ[\"OPENPYPE_PATH\"] = openpype_path\n\n # ------------------------------------------------------------------------\n # Find OpenPype versions\n # ------------------------------------------------------------------------\n # WARNING: Environment OPENPYPE_ROOT may change if frozen OpenPype\n # is executed\n if getattr(sys, 'frozen', False):\n # find versions of OpenPype to be used with frozen code\n try:\n version_path = _find_frozen_openpype(use_version, use_staging)\n except RuntimeError as e:\n # no version to run\n print(f\"!!! {e}\")\n sys.exit(1)\n else:\n version_path = _bootstrap_from_code(use_version)\n\n # set this to point either to `python` from venv in case of live code\n # or to `openpype` or `openpype_console` in case of frozen code\n os.environ[\"OPENPYPE_EXECUTABLE\"] = sys.executable\n\n if getattr(sys, 'frozen', False):\n os.environ[\"OPENPYPE_REPOS_ROOT\"] = os.environ[\"OPENPYPE_ROOT\"]\n else:\n os.environ[\"OPENPYPE_REPOS_ROOT\"] = os.path.join(\n os.environ[\"OPENPYPE_ROOT\"], \"repos\")\n\n # delete OpenPype module and it's submodules from cache so it is used from\n # specific version\n modules_to_del = [\n sys.modules.pop(module_name)\n for module_name in tuple(sys.modules)\n if module_name == \"openpype\" or module_name.startswith(\"openpype.\")\n ]\n\n try:\n for module_name in modules_to_del:\n del sys.modules[module_name]\n except AttributeError:\n pass\n except KeyError:\n pass\n\n print(\">>> loading environments ...\")\n # Avalon environments must be set before avalon module is imported\n print(\" - for Avalon ...\")\n set_avalon_environments()\n print(\" - global OpenPype ...\")\n set_openpype_global_environments()\n print(\" - for modules ...\")\n set_modules_environments()\n\n from openpype import cli\n from openpype.lib import terminal as t\n from openpype.version import __version__\n\n assert version_path, \"Version path not defined.\"\n info = get_info()\n info.insert(0, f\">>> Using OpenPype from [ {version_path} ]\")\n\n t_width = 20\n try:\n t_width = os.get_terminal_size().columns - 2\n except (ValueError, OSError):\n # running without terminal\n pass\n\n _header = f\"*** OpenPype [{__version__}] \"\n\n info.insert(0, _header + \"-\" * (t_width - len(_header)))\n for i in info:\n # don't show for running scripts\n if all(item not in sys.argv for item in silent_commands):\n t.echo(i)\n\n try:\n cli.main(obj={}, prog_name=\"openpype\")\n except Exception: # noqa\n exc_info = sys.exc_info()\n print(\"!!! OpenPype crashed:\")\n traceback.print_exception(*exc_info)\n sys.exit(1)\n\n\ndef get_info() -> list:\n \"\"\"Print additional information to console.\"\"\"\n from openpype.lib.mongo import get_default_components\n from openpype.lib.log import PypeLogger\n\n components = get_default_components()\n\n inf = []\n if not getattr(sys, 'frozen', False):\n inf.append((\"OpenPype variant\", \"staging\"))\n else:\n inf.append((\"OpenPype variant\", \"production\"))\n inf.append((\"Running OpenPype from\", os.environ.get('OPENPYPE_ROOT')))\n inf.append((\"Using mongodb\", components[\"host\"]))\n\n if os.environ.get(\"FTRACK_SERVER\"):\n inf.append((\"Using FTrack at\",\n os.environ.get(\"FTRACK_SERVER\")))\n\n if os.environ.get('DEADLINE_REST_URL'):\n inf.append((\"Using Deadline webservice at\",\n os.environ.get(\"DEADLINE_REST_URL\")))\n\n if os.environ.get('MUSTER_REST_URL'):\n inf.append((\"Using Muster at\",\n os.environ.get(\"MUSTER_REST_URL\")))\n\n # Reinitialize\n PypeLogger.initialize()\n\n log_components = PypeLogger.log_mongo_url_components\n if log_components[\"host\"]:\n inf.append((\"Logging to MongoDB\", log_components[\"host\"]))\n inf.append((\" - port\", log_components[\"port\"] or \"\"))\n inf.append((\" - database\", PypeLogger.log_database_name))\n inf.append((\" - collection\", PypeLogger.log_collection_name))\n inf.append((\" - user\", log_components[\"username\"] or \"\"))\n if log_components[\"auth_db\"]:\n inf.append((\" - auth source\", log_components[\"auth_db\"]))\n\n maximum = max(len(i[0]) for i in inf)\n formatted = []\n for info in inf:\n padding = (maximum - len(info[0])) + 1\n formatted.append(\n \"... {}:{}[ {} ]\".format(info[0], \" \" * padding, info[1]))\n return formatted\n\n\nif __name__ == \"__main__\":\n boot()\n","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":25907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"163097656","text":"from ckeditor_uploader.fields import RichTextUploadingField\nfrom django.db import models\nfrom django.forms import ModelForm, TextInput, Textarea, EmailInput\nfrom crispy_forms.helper import FormHelper\n\n# Create your models here.\n\nclass Setting(models.Model):\n STATUS = (\n ('True', 'True'),\n ('False', 'False')\n )\n\n title = models.CharField(max_length=150)\n keywords = models.CharField(max_length=255)\n description = models.CharField(max_length=255)\n company = models.CharField(max_length=50)\n address = models.CharField(blank=True, max_length=150)\n phone = models.IntegerField()\n email = models.EmailField(blank=True)\n smtpserver = models.CharField(blank=True, max_length=255)\n smtpemail = models.CharField(blank=True, max_length=150)\n smtppassword = models.CharField(blank=True, max_length=255)\n icon = models.ImageField(blank=True, upload_to='images/')\n facebook = models.CharField(blank=True, max_length=50)\n instagram = models.CharField(blank=True, max_length=50)\n twitter = models.CharField(blank=True, max_length=50)\n youtube = models.CharField(blank=True, max_length=50)\n aboutus = RichTextUploadingField(blank=True)\n contactus = RichTextUploadingField(blank=True)\n references = RichTextUploadingField(blank=True)\n status = models.CharField(max_length=15, choices=STATUS)\n create_at = models.DateTimeField(auto_now_add=True)\n update_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.title\n\n\n# contact us model\nclass Contact(models.Model):\n STATUS = (\n ('New', 'New'),\n ('Read', 'Read'),\n ('Closed', 'Closed'),\n )\n first_name = models.CharField(max_length=20)\n last_name = models.CharField(max_length=20)\n email = models.EmailField(max_length=50)\n subject = models.CharField(max_length=100)\n message = models.TextField(max_length=255)\n status = models.CharField(max_length=25, choices=STATUS, default='New')\n ip = models.CharField(blank=True, max_length=25)\n note = models.CharField(blank=True, max_length=100)\n create_at = models.DateTimeField(auto_now_add=True)\n update_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.first_name + '_' + self.last_name\n\n\nclass ContactForm(ModelForm):\n\n class Meta:\n model = Contact\n fields = ['first_name', 'last_name', 'email', 'subject', 'message']\n labels = {\n 'first_name':'','last_name':'','email':'','subject':'','message':'',\n\n }\n\n widgets = {\n 'first_name': TextInput(attrs={'class': 'input', 'placeholder': 'First Name','label':'None'}),\n 'last_name': TextInput(attrs={'class': 'input', 'placeholder': 'Last Name'}),\n 'email': EmailInput(attrs={'class': 'input', 'placeholder': 'Email'}),\n 'subject': TextInput(attrs={'class': 'input', 'placeholder': 'Subject'}),\n 'message': Textarea(attrs={'class': 'input', 'placeholder': 'Write your message','rows':4}),\n }\n\n","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"98519484","text":"import pprint\r\n\r\n\r\ndef get_word_pattern(word):\r\n # Returns a string of the pattern form of the given word.\r\n # e.g. '0.1.2.3.4.1.2.3.5.6' for 'DUSTBUSTER'\r\n word = word.upper()\r\n next_num = 0\r\n letter_nums = {}\r\n word_pattern = []\r\n\r\n for letter in word:\r\n if letter not in letter_nums:\r\n letter_nums[letter] = str(next_num)\r\n next_num += 1\r\n\r\n word_pattern.append(letter_nums[letter])\r\n\r\n return '.'.join(word_pattern)\r\n\r\n\r\ndef main():\r\n all_patterns = {}\r\n\r\n fo = open('dictionary.txt')\r\n word_list = fo.read().split('\\n')\r\n fo.close()\r\n\r\n for word in word_list:\r\n pattern = get_word_pattern(word)\r\n\r\n if pattern not in all_patterns:\r\n all_patterns[pattern] = [word]\r\n else:\r\n all_patterns[pattern].append(word)\r\n\r\n # This is code that writes code. The word_patterns.py file contains\r\n # one very, very large assignment statement.\r\n fo = open('word_patterns.py', 'w')\r\n fo.write('all_patterns = ')\r\n fo.write(pprint.pformat(all_patterns))\r\n fo.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"simple_substitution_cipher/make_word_patterns.py","file_name":"make_word_patterns.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"629923310","text":"# -- coding: utf-8 --\nimport dbaccess\nimport re\n\n\ndef speratefeatures(inputstring):\n regex1 = \"【(.*?)】\"\n # 这里遇到的坑 http://www.crifan.com/python_re_search_vs_re_findall/\n regex2 = \"】(.*?)(?:【|$)\"\n pat1 = re.compile(regex1)\n pat2 = re.compile(regex2)\n reviewid = inputstring[0]\n reviewsentence = inputstring[1]\n para = reviewsentence.strip()\n # 两个模式让其分离出各种特性\n features = re.findall(pat1, para)\n features_details = re.findall(pat2, para)\n sqlinsertfeatureclass = \"INSERT INTO featureclass (feature,detail,reviewid) VALUES (%s,%s,%s)\"\n\n # 如果输入中不包含特性\n with con.cursor() as cursor:\n if len(features) == 0:\n feature = \"无\"\n features_detail = reviewsentence\n cursor.execute(sqlinsertfeatureclass, (feature, features_detail, reviewid))\n lastrowid = cursor.lastrowid\n breakintosentence(features_detail, lastrowid)\n else:\n for feature, features_detail in zip(features, features_details):\n cursor.execute(sqlinsertfeatureclass, (feature, features_detail, reviewid))\n lastrowid = cursor.lastrowid\n breakintosentence(features_detail, lastrowid)\n\n\ndef breakintosentence(inputstring, lastrowid):\n regex = \"[。?!]+\"\n pattern = re.compile(regex)\n sentences = re.split(pattern, inputstring)\n sql = \"INSERT INTO reviewsentiment (sentence, featureclassid) VALUES (%s,%s)\"\n with con.cursor() as cursor:\n for sentence in sentences:\n if sentence.strip() == \"\":\n continue\n cursor.execute(sql, (sentence, lastrowid))\n\n\ncon = dbaccess.getcon(\"other\")\ntry:\n with con.cursor() as cursor:\n processnum = 0\n sqltotal = \"SELECT count(*) FROM koubei\"\n cursor.execute(sqltotal)\n total = cursor.fetchone()[0]\n with con.cursor() as cursor:\n sql = \"SELECT id,neirong FROM koubei WHERE id > 76975\"\n cursor.execute(sql)\n while True:\n one = cursor.fetchone()\n if one is None:\n break\n speratefeatures(one)\n if processnum % 5 == 0:\n con.commit()\n processnum += 1\n print(\"processing:\" + str(processnum) + \"/\" + str(total))\nexcept Exception as e:\n print(e)\nfinally:\n con.commit()\n con.close()\n\n\n# file = open(\"test.txt\", 'r', encoding=\"utf8\")\n# string = file.read()\n# breakreviewintosentence(string)\n","sub_path":"breakintosentence.py","file_name":"breakintosentence.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"644105273","text":"#\n# Project: PROJECT\n# http://www.edna-site.org\n#\n# File: \"$Id$\"\n#\n# Copyright (C) 2010, European Synchrotron Radiation Facility, Grenoble, France\n#\n# Principal author: Jerome Kieffer\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n\n__author__ = \"Jerome Kieffer\"\n__license__ = \"GPLv3+\"\n__copyright__ = \"2010, European Synchrotron Radiation Facility, Grenoble, France\"\n\nimport os\n\nfrom EDUtilsArray import EDUtilsArray\nfrom EDVerbose import EDVerbose\nfrom EDAssert import EDAssert\nfrom EDTestCasePluginExecute import EDTestCasePluginExecute\nfrom XSDataNormalizeImage import XSDataResultNormalize\nfrom EDFactoryPluginStatic import EDFactoryPluginStatic\n\nEDFactoryPluginStatic.loadModule(\"EDInstallNumpyv1_3\")\nEDFactoryPluginStatic.loadModule(\"EDInstallPILv1_1_7\")\nEDFactoryPluginStatic.loadModule(\"EDInstallFabio_v0_0_7\")\n\n\n\nclass EDTestCasePluginExecuteExecNormalizeImagev1_1(EDTestCasePluginExecute):\n \"\"\"\n Those are all execution tests for the EDNA Exec plugin NormalizeImagev1_0\n \"\"\"\n\n def __init__(self, _strTestName=None):\n \"\"\"\n \"\"\"\n EDTestCasePluginExecute.__init__(self, \"EDPluginExecNormalizeImagev1_1\")\n# self.setConfigurationFile(os.path.join(self.getPluginTestsDataHome(),\n# \"XSConfiguration_NormalizeImage.xml\"))\n self.setDataInputFile(os.path.join(self.getPluginTestsDataHome(), \\\n \"XSDataInputNormalizeImage_reference_array.xml\"))\n self.setReferenceDataOutputFile(os.path.join(self.getPluginTestsDataHome(), \\\n \"XSDataResultNormalizeImage_array.xml\"))\n\n def preProcess(self):\n \"\"\"\n PreProcess of the execution test: download an EDF file from http://www.edna-site.org\n and remove any existing output file, i.e. /tmp/diff6105.edf \n \"\"\"\n EDTestCasePluginExecute.preProcess(self)\n self.loadTestImage([ \"normaliseImageDark10.edf\", \"normaliseImageDark15.edf\", \"normaliseImageDark2.edf\", \"normaliseImageDark3.edf\", \"normaliseImageData10.edf\", \"normaliseImageData15.edf\", \"normaliseImageFlat2.edf\", \"normaliseImageFlat3.edf\"])\n strExpectedOutput = self.readAndParseFile (self.getReferenceDataOutputFile())\n# xsDataResultReference = XSDataResultNormalize.parseString(strExpectedOutput)\n# outputFileName = xsDataResultReference.getThumbnailPath().getPath().getValue()\n# EDVerbose.DEBUG(\" Output file is %s\" % outputFileName)\n# if os.path.isfile(outputFileName):\n# EDVerbose.DEBUG(\" Output file exists %s, I will remove it\" % outputFileName)\n# os.remove(outputFileName)\n\n\n\n def testExecute(self):\n \"\"\"\n \"\"\"\n self.run()\n# plugin = self.getPlugin()\n\n strExpectedOutput = self.readAndParseFile (self.getReferenceDataOutputFile())\n strObtainedOutput = self.readAndParseFile (self.m_edObtainedOutputDataFile)\n EDVerbose.DEBUG(\"Checking obtained result...\")\n xsDataResultReference = XSDataResultNormalize.parseString(strExpectedOutput)\n xsDataResultObtained = XSDataResultNormalize.parseString(strObtainedOutput)\n\n #EDAssert.strAlmostEqual(xsDataResultReference.marshal(), xsDataResultObtained.marshal(), \"Result XML are the same\")\n\n npaReference = EDUtilsArray.getArray(xsDataResultReference.output)\n npaObtained = EDUtilsArray.getArray(xsDataResultObtained.output)\n EDAssert.arraySimilar(npaReference, npaObtained, \"Arrays are the same\", _fAbsMaxDelta=1e-6)\n EDAssert.equal(npaReference.dtype, npaObtained.dtype, \"Datatypes are the same\")\n# strExpectedOutput = self.readAndParseFile (self.getReferenceDataOutputFile())\n# xsDataResultReference = XSDataResultExecThumbnail.parseString(strExpectedOutput)\n# outputFileName = xsDataResultReference.getThumbnailPath().getPath().getValue()\n# outputImage = Image.open(outputFileName)\n#\n# referenceFileName = os.path.join(self.getPluginTestsDataHome(), os.path.basename(outputFileName))\n# referenceImage = Image.open(referenceFileName)\n#\n\n\n\n def process(self):\n \"\"\"\n \"\"\"\n self.addTestMethod(self.testExecute)\n\n\n\nif __name__ == '__main__':\n\n testNormalizeImagev1_0instance = EDTestCasePluginExecuteExecNormalizeImagev1_1(\"EDTestCasePluginExecuteExecNormalizeImagev1_1\")\n testNormalizeImagev1_0instance.execute()\n","sub_path":"execPlugins/plugins/EDPluginGroupNormalize-v1.0/tests/testsuite/EDTestCasePluginExecuteExecNormalizeImagev1_1.py","file_name":"EDTestCasePluginExecuteExecNormalizeImagev1_1.py","file_ext":"py","file_size_in_byte":5180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"132821107","text":"import argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.distributions import Categorical\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass Environment:\n\n \"\"\"A epidemic infection simulation model.\n\n Attributes\n ----------\n is_terminal:\n\n _start:\n\n Methods\n -------\n init_state: Prepare the environment for the starting state of a trial.\n\n step: Make a state transition.\n\n obeserved_state: To transform a global state to an obsevable state for\n agent.\n\n start(property): Return the starting starting state of a trial.\n\n \"\"\"\n\n def __init__(self):\n\n self.is_terminal = False\n\n self._start = torch.tensor([0, 0, 0, 0, 0], device=device)\n\n def init_state(self):\n \"\"\" We need to define this function to initialize the environment.\n\n Returns\n -------\n The starting state of the environment\n\n \"\"\"\n\n self.is_terminal = False\n\n return self.start\n\n def step(self, s, a, t):\n\n \"\"\" We need to define this function as the transition function.(SEIR....)\n\n Args\n ----\n s: current state\n a: action taken by agent in current state\n t: time\n\n Returns\n -------\n next_sate (torch tensor ): The next state (aka s' ).\n reward (int) : reward of (s, a, s')\n is_terminal (boolean) : if next_state a terminal state\n\n \"\"\"\n next_sate = torch.tensor([0, 1, 2, 3, 4, 6, 7], device=device).float()\n\n reward = 1\n\n return next_sate, reward, self.is_terminal\n\n def obeserved_state(self, state):\n \"\"\" To transform a global state to an obsevable state for agent.\n\n Args\n ----\n state : global state\n\n Returns\n -------\n observed_state : obsevable state for agent\n\n \"\"\"\n\n observed_state = torch.tensor([0, 1, 2, 3, 4], device=device).float()\n\n return observed_state\n\n @property\n def start(self):\n return self._start\n\n\nclass Agent(nn.Module):\n \"\"\" The decision-making policy network for the simulation.\n\n Attributes\n ----------\n init_legal_actions:\n\n legal_actions:\n\n log_probs:\n\n rewards:\n\n action_embeddings:\n\n net:\n\n Methods\n -------\n init_agent: reset the history fo the log probability and rewards\n\n forward: the inference of the policy network\n\n select_actions: return the action sampled from the policy nwetwork\n\n \"\"\"\n def __init__(self, dim_input, dim_output, max_actions, init_legal_actions):\n \"\"\" Initialize the parameters of the policynwtwork\n\n Args\n ----\n dim_input:\n dim_output:\n max_actions:\n init_legal_actions:\n\n \"\"\"\n\n super(Agent, self).__init__()\n\n self.init_legal_actions = init_legal_actions\n\n self.legal_actions = self.init_legal_actions.copy()\n\n self.log_probs = []\n\n self.rewards = []\n\n self.action_embeddings = nn.Embedding(max_actions, dim_output)\n\n self.net = nn.Sequential(\n nn.Linear(dim_input, 8),\n nn.Dropout(0.1),\n nn.Linear(8, dim_output)\n )\n\n def init_agent(self):\n\n self.legal_actions = self.init_legal_actions.copy()\n\n self.log_probs = []\n\n self.rewards = []\n\n def forward(self, state):\n\n state_vector = self.net(state)\n\n actions = torch.tensor(self.legal_actions, device=device)\n\n actions_vectors = self.action_embeddings(actions)\n\n scores = torch.matmul(actions_vectors, state_vector.unsqueeze(1)).squeeze()\n\n return F.softmax(scores, dim=0)\n\n def select_actions(self, scores):\n\n distribution = Categorical(scores)\n\n action = distribution.sample()\n\n self.log_probs.append(distribution.log_prob(action))\n \n return action.item()\n\n\nclass Simulatoin:\n \"\"\" The interaction between environment(epidemic infection model) and agent(policy network)\n\n Attributes\n ----------\n agent\n enviroment\n gamma\n optimizer\n\n Methods\n -------\n policy_gradient_update:\n\n episodes:\n \"\"\"\n\n def __init__(self, agent: Agent, environment: Environment, optimizer=optim.Adam(self.agent.parameters(), lr=1e-3)):\n\n self.agent = agent\n\n self.environment = environment\n\n self.gamma = 0.9\n\n self.optimizer = optimizer\n \n def policy_gradient_update(self):\n\n policy_loss = 0\n accumulated_rewards = []\n\n # v_t = r_t + gammar*v_{t+1}\n # v_T = 0\n # agent.rewards : [r_0, r_1, r_2, ........ r_T]\n\n v = 0 # V_T = 0\n\n for r in self.agent.rewards[::-1]: # r_T, r_{T-1}, .....r_0\n\n v = r + self.gamma * v\n\n accumulated_rewards.insert(0, v)\n\n accumulated_rewards = torch.tensor(accumulated_rewards, device=device)\n\n accumulated_rewards = (accumulated_rewards - accumulated_rewards.mean()) / (accumulated_rewards.std() + 1e-9)\n\n for log_prob, r in zip(self.agent.log_probs, accumulated_rewards):\n\n policy_loss += (-log_prob * r)\n\n self.optimizer.zero_grad()\n \n policy_loss.backward()\n\n self.optimizer.step()\n\n def episodes(self, max_episodes=3, max_steps=10):\n\n for episode in range(max_episodes):\n\n state = self.environment.init_state()\n\n state_observed = self.environment.obeserved_state(state)\n\n reward_episode = 0\n\n for t in range(max_steps):\n\n actions_probs = self.agent.forward(state_observed)\n\n action = self.agent.select_actions(actions_probs)\n\n state, reward, is_terminal = self.environment.step(state, action, t=t)\n\n state_observed = self.environment.obeserved_state(state)\n\n self.agent.rewards.append(reward)\n\n reward_episode += reward\n\n if is_terminal:\n\n break\n\n self.policy_gradient_update()\n\n self.agent.init_agent()\n\n\ndef main():\n\n init_legal_actions = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n args_agent = {\n \"dim_input\": 5, \n \"dim_output\": 3,\n \"max_actions\": 10,\n \"init_legal_actions\": init_legal_actions\n }\n\n agent = Agent(**args_agent).to(device)\n\n env = Environment()\n\n game = Simulatoin(agent, env)\n\n game.episodes()\n\n\nif __name__ == \"__main__\":\n\n main()\n","sub_path":"rl_reinforce.py","file_name":"rl_reinforce.py","file_ext":"py","file_size_in_byte":6626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"539091089","text":"from math import inf as infinity\nimport logging\nimport json\nimport requests\nimport sseclient\n\n\nfrom flask import request, jsonify\n\nfrom codeitsuisse import app\n\nlogger = logging.getLogger(__name__)\n\n@app.route('/tic-tac-toe', methods=['POST'])\ndef evaluateTic():\n # Global setup\n ME = -1\n COMP = +1\n\n board = [\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]\n ]\n\n actionToCoord = {\n 'NW': (0, 0),\n 'N': (0, 1),\n 'NE': (0, 2),\n 'W': (1, 0),\n 'C': (1, 1),\n 'E': (1, 2),\n 'SW': (2, 0),\n 'S': (2, 1),\n 'SE': (2, 2)\n }\n\n def getBoardPosStr(coord):\n if coord == (0, 0):\n return 'NW'\n elif coord == (0, 1):\n return 'N'\n elif coord == (0, 2):\n return 'NE'\n elif coord == (1, 0):\n return 'W'\n elif coord == (1, 1):\n return 'C'\n elif coord == (1, 2):\n return 'E'\n elif coord == (2, 0):\n return 'SW'\n elif coord == (2, 1):\n return 'S'\n elif coord == (2, 2):\n return 'SE'\n else:\n return 'Invalid'\n\n\n def getCoordPos(direction, actionToCoord):\n try:\n t = actionToCoord[direction]\n return t\n except:\n return (-1, -1)\n\n def game_over(state):\n \"\"\"\n This function test if the human or computer wins\n :param state: the state of the current board\n :return: True if the human or computer wins\n \"\"\"\n return wins(state, ME) or wins(state, COMP)\n\n def wins(board, player):\n \"\"\"\n This function tests if a specific player wins. Possibilities:\n * Three rows [X X X] or [O O O]\n * Three cols [X X X] or [O O O]\n * Two diagonals [X X X] or [O O O]\n :param board: the board of the current board\n :param player: a human or a computer\n :return: True if the player wins\n \"\"\"\n win_board = [\n [board[0][0], board[0][1], board[0][2]],\n [board[1][0], board[1][1], board[1][2]],\n [board[2][0], board[2][1], board[2][2]],\n [board[0][0], board[1][0], board[2][0]],\n [board[0][1], board[1][1], board[2][1]],\n [board[0][2], board[1][2], board[2][2]],\n [board[0][0], board[1][1], board[2][2]],\n [board[2][0], board[1][1], board[0][2]],\n ]\n if [player, player, player] in win_board:\n return True\n else:\n return False\n\n def evaluate(board):\n if wins(board, ME):\n return 1\n elif wins(board, COMP):\n return -1\n else:\n return 0\n\n def empty_cells(state):\n \"\"\"\n Each empty cell will be added into cells' list\n :param state: the state of the current board\n :return: a list of empty cells\n \"\"\"\n cells = []\n\n for x, row in enumerate(state):\n for y, cell in enumerate(row):\n if cell == 0:\n cells.append((x, y))\n\n return cells\n\n def isInvalidMove(coord, board):\n return board[coord[0]][coord[1]] != 0\n\n def minimax(board, depth, player):\n if player == ME:\n best = [-1, -1, -infinity]\n else:\n best = [-1, -1, +infinity]\n\n if depth == 0 or game_over(board):\n score = evaluate(board)\n return [-1, -1, score]\n\n for cell in empty_cells(board):\n x, y = cell[0], cell[1]\n board[x][y] = player # make my move\n score = minimax(board, depth - 1, -player)\n board[x][y] = 0 # return to initial state\n score[0], score[1] = x, y\n\n if player == ME:\n if score[2] > best[2]:\n best = score # max value\n else:\n if score[2] < best[2]:\n best = score # min value\n\n return best\n\n\n battleId_raw = request.get_json()\n battleId = battleId_raw['battleId']\n logging.info(f\"session_id={battleId}\")\n startUrl = f\"https://cis2021-arena.herokuapp.com/tic-tac-toe/start/{battleId}\"\n playUrl = f\"https://cis2021-arena.herokuapp.com/tic-tac-toe/play/{battleId}\"\n logging.info(f\"sending get request to ={startUrl}\")\n response = requests.get(startUrl, stream=True, params={'Accept': 'text/event-stream', 'Connection': 'keep-alive'})\n client = sseclient.SSEClient(response)\n myRole = None\n for event in client.events():\n logging.info(f\"Recevied Events. {json.dumps(event.data)}\")\n gameEvent = json.loads(event.data)\n # primary move\n if 'youAre' in gameEvent and gameEvent['youAre'] == \"O\":\n logging.info(f\"I am O.\")\n myRole = \"O\"\n board[1][1] = ME\n requests.post(playUrl, data={\"action\": \"putSymbol\", \"position\": \"C\"})\n elif 'youAre' in gameEvent and gameEvent['youAre'] == \"X\":\n myRole = \"X\"\n logging.info(f\"I am the second player.\")\n continue\n\n elif 'player' in gameEvent and 'action' in gameEvent:\n if gameEvent['player'] == myRole:\n logging.info(\"Safely ignored the game news.\")\n continue\n if 'position' not in gameEvent:\n logging.info(f\"Flip Table.\")\n requests.post(playUrl, data={\"action\": \"(╯°□°)╯︵ ┻━┻\"})\n break\n boardNewMove = getCoordPos(gameEvent['position'], actionToCoord)\n # Flip Table\n if boardNewMove == (-1, -1) or isInvalidMove(boardNewMove, board):\n logging.info(f\"Flip Table.\")\n requests.post(playUrl, data={\"action\": \"(╯°□°)╯︵ ┻━┻\"})\n break\n else:\n row, col = boardNewMove\n board[row][col] = COMP\n boardForCal = board.copy()\n logging.info(f\"Before MiniMax after component moved {board}\")\n move = minimax(boardForCal, 9, ME)\n logging.info(f\"myNewMove {move}\")\n myMove_row, myMove_col = move[0], move[1]\n board[myMove_row][myMove_col] = ME\n logging.info(f\"After MiniMax after I moved {board}\")\n # update to server\n boardPosStr = getBoardPosStr((myMove_row, myMove_col))\n logging.info({\"action\": \"putSymbol\", \"position\": boardPosStr})\n requests.post(playUrl, data={\"action\": \"putSymbol\", \"position\": boardPosStr})\n elif 'winner' in gameEvent:\n break\n \n else:\n logging.info(f\"Unknown incoming response {event.data}\")\n requests.post(playUrl, data={\"action\": \"(╯°□°)╯︵ ┻━┻\"})\n break\n return json.dumps({'result': \"ended\"})\n\n\n\n","sub_path":"codeitsuisse/routes/tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":6879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"440574653","text":"import logging\nfrom time import sleep\n\nfrom bitmex_api import BitMEX\nfrom bitmex_websocket import BitMEXWebsocket\nfrom config import ENDPOINT, API_KEY, API_SECRET, API_KEY_2, API_SECRET_2\n\n\ndef run():\n logger = setup_logger()\n\n # Instantiating the WS will make it connect. Be sure to add your api_key/api_secret.\n ws = BitMEXWebsocket(endpoint=ENDPOINT, symbol=\"\",\n api_key=API_KEY, api_secret=API_SECRET)\n\n logger.info(\"Instrument data: %s\" % ws.get_instrument())\n\n api = BitMEX(base_url=ENDPOINT, apiKey=API_KEY_2, apiSecret=API_SECRET_2)\n executions_copied = {}\n\n # Run forever\n while(ws.ws.sock.connected):\n # logger.info(\"Market Depth: %s\" % ws.market_depth())\n executions = ws.executions()\n\n for execution in executions:\n execID = execution['execID']\n if execID not in executions_copied:\n executions_copied[execID] = execution\n if execution['execType'] == 'New':\n if execution['ordType'] == 'Limit':\n api.place_order_limit(execution['orderQty'],execution['price'],execution['symbol'],execution['side'],execution['orderID'])\n elif execution['ordType'] == 'Market':\n api.place_order_market(execution['orderQty'],execution['symbol'],execution['side'],execution['orderID'])\n elif execution['execType'] == 'Canceled':\n api.cancelByCl([execution['orderID']])\n\n\n sleep(1)\n\n\ndef setup_logger():\n # Prints logger info to terminal\n logger = logging.getLogger()\n logger.setLevel(logging.INFO) # Change this to DEBUG if you want a lot more info\n ch = logging.StreamHandler()\n # create formatter\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n # add formatter to ch\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger\n\n\nif __name__ == \"__main__\":\n run()","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"162696150","text":"import cv2;\nimport math;\nimport numpy as np;\n\ndef DarkChannel(im,sz):\n # b,g,r = cv2.split(im)\n # dc = cv2.min(cv2.min(r,g),b);\n # kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(sz,sz))\n # dark = cv2.erode(dc,kernel)\n # return dark\n dark = np.zeros(im.shape[:-1])\n pad_w = math.floor(sz/2)\n padded_image = np.pad(im, ((pad_w, pad_w), (pad_w, pad_w), (0, 0)), 'edge')\n\n for i, j in np.ndindex(im.shape[:-1]):\n dark[i, j] = np.min(padded_image[i:i + sz, j:j + sz, :]) \n return dark\n\ndef AtmLight(im,dark):\n [h,w] = im.shape[:2]\n imsz = h*w\n # numpx = int(max(math.floor(imsz/1000),1))\n numpx = int(0.01 * imsz)\n darkvec = dark.reshape(1,imsz);\n\n \n\n indices = darkvec.argsort();\n max_10_per = indices[0,imsz-numpx:]\n A = np.zeros((1,3))\n for j in range(3):\n for i in range(numpx):\n x = int(max_10_per[i] / w)\n y = max_10_per[i] % w\n A[0,j] += im[x,y,j]\n A = A / numpx\n\n # imvec = im.reshape(imsz,3);\n # atmsum = np.zeros([1,3])\n # for ind in range(1,numpx):\n # atmsum = atmsum + imvec[indices[ind]]\n\n # A = atmsum / numpx;\n # A = np.array([[1, 1, 1]])\n return A\n\ndef TransmissionEstimate(im,A,sz):\n omega = 0.95;\n im3 = np.empty(im.shape,im.dtype);\n\n for ind in range(0,3):\n im3[:,:,ind] = im[:,:,ind]/A[0,ind]\n\n transmission = 1 - omega*DarkChannel(im3,sz);\n return transmission\n\ndef Guidedfilter(im,p,r,eps):\n mean_I = cv2.boxFilter(im,cv2.CV_64F,(r,r));\n mean_p = cv2.boxFilter(p, cv2.CV_64F,(r,r));\n mean_Ip = cv2.boxFilter(im*p,cv2.CV_64F,(r,r));\n cov_Ip = mean_Ip - mean_I*mean_p;\n\n mean_II = cv2.boxFilter(im*im,cv2.CV_64F,(r,r));\n var_I = mean_II - mean_I*mean_I;\n\n a = cov_Ip/(var_I + eps);\n b = mean_p - a*mean_I;\n\n mean_a = cv2.boxFilter(a,cv2.CV_64F,(r,r));\n mean_b = cv2.boxFilter(b,cv2.CV_64F,(r,r));\n\n q = mean_a*im + mean_b;\n return q;\n\ndef TransmissionRefine(im,et):\n gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY);\n gray = np.float64(gray)/255;\n r = 600;\n eps = 0.0001;\n t = Guidedfilter(gray,et,r,eps);\n\n return t;\n\ndef Recover(im,t,A,tx = 0.1):\n res = np.empty(im.shape,im.dtype);\n t = cv2.max(t,tx);\n\n for ind in range(0,3):\n res[:,:,ind] = (im[:,:,ind]-A[0,ind])/t + A[0,ind]\n\n return res\n\nif __name__ == '__main__':\n import sys\n try:\n fn = sys.argv[1]\n except:\n fn = '1.jpg'\n\n def nothing(*argv):\n pass\n\n src = cv2.imread('./image/'+fn);\n\n I = src.astype('float64')/255;\n \n dark = DarkChannel(I,15);\n A = AtmLight(I,dark);\n te = TransmissionEstimate(I,A,15);\n t = TransmissionRefine(src,te);\n J = Recover(I,t,A,0.3);\n J = J * 1.1 + 0.05\n\n # cv2.imshow(\"dark\",dark);\n # cv2.imshow(\"t\",t);\n # cv2.imshow('I',src);\n # cv2.imshow('J',J);\n cv2.imwrite('./image/'+'1_p.jpg',J*255);\n cv2.waitKey();\n \n","sub_path":"201904/image_dehaze/dehaze.py","file_name":"dehaze.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"442884507","text":"from os.path import join, abspath, dirname, normpath, basename\nfrom os import getenv\nimport dj_database_url\nfrom sys import path\n\n# PATH vars\n\n\nBASE_DIR = dirname(dirname(abspath(__file__)))\n\n# PATH CONFIGURATION\n# Absolute filesystem path to the Django project directory:\nDJANGO_ROOT = dirname(dirname(dirname(abspath(__file__))))\n\n# Absolute filesystem path to the top-level project folder:\nSITE_ROOT = dirname(DJANGO_ROOT)\n\n# Site name:\nSITE_NAME = basename(DJANGO_ROOT)\n\npath.append(DJANGO_ROOT)\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = getenv(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nALLOWED_HOSTS = []\n\n# Application definition\n\nDJANGO_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n)\n\nTHIRD_PARTY_APPS = (\n 'corsheaders',\n # 'social.apps.django_app.default',\n # 'storages',\n # 'bootstrapform',\n # 'corsheaders',\n # 'payments',\n # 'django_forms_bootstrap',\n # 'tagging',\n # 'mptt',\n # 'rest_framework',\n)\n\n\n# Apps specific for this project go here.\nLOCAL_APPS = (\n)\n\nPROJECT_APPS = ()\n\nINSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n)\n\nROOT_URLCONF = '{{cookiecutter.repo_name}}.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = '{{cookiecutter.repo_name}}.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {}\nDATABASES['default'] = dj_database_url.config(\n default=\"postgres://localhost/{{cookiecutter.repo_name}}\"\n)\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-gb'\n\nTIME_ZONE = 'UTC' # 'Europe/London'\n\nUSE_I18N = False\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\n# MEDIA CONFIGURATION\nMEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))\nMEDIA_URL = '/media/'\n# END MEDIA CONFIGURATION\n\n\n# STATIC FILE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root\nSTATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'\nSTATIC_ROOT = normpath(join(DJANGO_ROOT, 'assets'))\nSTATIC_URL = '/assets/'\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/\n# #std:setting-STATICFILES_DIRS\n# Note: There is a presumption that the first entry here is 'static' so that\n# trash dirs work.\nSTATICFILES_DIRS = (\n normpath(join(DJANGO_ROOT, 'static')),\n)\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [normpath(join(DJANGO_ROOT, 'templates'))],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'django.core.context_processors.request',\n ],\n },\n },\n]\n","sub_path":"{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"578124164","text":"import numpy\nfrom scipy import misc\n\ndef get_picture(paths):\n address = 0\n fo = open(\"texture\",\"w\")\n for path in paths:\n image = misc.imread(path,mode=\"RGBA\")\n the_shape = image.shape\n new_shape = (the_shape[0],the_shape[1])\n shape_array = numpy.ones(new_shape, dtype=numpy.int)\n tri = numpy.tril(shape_array)\n for i in range(the_shape[0]):\n for j in range(the_shape[1]):\n if i == 0 and j == 0:\n print(str(hex(address))[2:])\n if tri[i][j] == 1:\n fo.write(\"@{0}\\n\".format(str(hex(address))[2:]))\n address = address + 1\n fo.write(numpy.binary_repr(image[the_shape[0] - i - 1][j][0],width=8))\n fo.write(numpy.binary_repr(image[the_shape[0] - i - 1][j][1],width=8))\n fo.write(numpy.binary_repr(image[the_shape[0] - i - 1][j][2],width=8))\n fo.write(\"\\n\")\n fo.close()\n\nif __name__ == \"__main__\":\n get_picture([\"5_Target.png\",\"5_Target.png\"])","sub_path":"demo_scripts/Texture_format.py","file_name":"Texture_format.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"190069812","text":"import re\n\n\nclass Grid(object):\n def __init__(self, num_rows, num_cols, ascii_blob):\n self.num_rows = num_rows\n self.num_cols = num_cols\n self.ascii_blob = ascii_blob\n self.binary = [\n [1 if char == 'x' else 0 for char in line]\n for line in self.ascii_blob.splitlines()\n ]\n #print(\"self.binary = {}\".format(self.binary))\n self.points = [\n [i, j] for i in range(num_rows) for j in range(num_cols)\n if self.binary[i][j] == 1\n ]\n #print(\"self.points = {}\".format(self.points))\n self.num_points = sum([sum(row) for row in self.binary])\n\n @classmethod\n def from_file(cls, file_input):\n \"\"\"\n Reads the input as specified in the problem, and populates the attributes in ``__init__``.\n :param file_input: A readable file object (either STDIN or an opened file).\n :return: A corresponding ``Grid`` object.\n \"\"\"\n\n lines = file_input.read().splitlines()\n\n num_lines, num_cols = [int(x) for x in lines[0].strip().split(\" \")]\n ascii_blob = \"\\n\".join(lines[1:])\n\n assert (len(lines) == num_lines + 1)\n assert (len(set([len(x) for x in ascii_blob.split(\"\\n\")])) == 1)\n assert (len((ascii_blob.split(\"\\n\"))[0]) == num_cols)\n\n return cls(num_lines, num_cols, ascii_blob)\n\n def count_chains(self):\n\n result = 0\n\n points_accounted_for = []\n frontier = []\n\n while len(points_accounted_for) < len(self.points):\n\n # If the frontier is empty, find a point that we haven't\n # accounted for, yet, and toss it in there. This forms a new chain.\n\n if not frontier:\n others = [p for p in self.points if p not in points_accounted_for]\n frontier.append(others[0])\n # Can we move one unit up, down, or sideways\n # and get a new point?\n\n new_points = []\n\n #print(\"frontier = {}\".format(frontier))\n\n for point in frontier:\n #print(\"point = {}\".format(point))\n row, col = point\n\n for candidate in [[row - 1, col], [row + 1, col], [row, col - 1], [row, col + 1]]:\n c_row, c_col = candidate\n\n if c_row not in range(self.num_rows) or c_col not in range(self.num_cols) or \\\n candidate in frontier:\n continue\n\n if self.binary[c_row][c_col] and candidate not in new_points and \\\n candidate not in points_accounted_for and candidate not in frontier:\n new_points.append(candidate)\n\n if not new_points:\n # If we didn't find any new points, then we've completed our chain\n result += 1\n points_accounted_for.extend(new_points)\n\n # Set the frontier to our newly-found set of points.\n frontier = list(new_points)\n\n def __str__(self):\n return self.ascii_blob\n\nif __name__ == \"__main__\":\n import io\n\n f = io.StringIO(\"\"\"2 8\nxxxxxxxx\nx x\"\"\")\n\n g = Grid.from_file(f)\n\n print(g)\n\n g.count_chains()","sub_path":"227_intermediate_2015-08-12_contiguous_chains/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"202635628","text":"from typing import List\nfrom typing import Optional\n\nimport numpy as np\nimport pytest\n\nfrom optuna.study import create_study\nfrom optuna.testing.visualization import prepare_study_with_trials\nfrom optuna.trial import Trial\nfrom optuna.visualization.matplotlib import plot_contour\nfrom optuna.visualization.matplotlib._contour import _create_zmap\nfrom optuna.visualization.matplotlib._contour import _interpolate_zmap\n\n\ndef test_create_zmap() -> None:\n\n x_values = np.arange(10)\n y_values = np.arange(10)\n z_values = list(np.random.rand(10))\n\n # we are testing for exact placement of z_values\n # so also passing x_values and y_values as xi and yi\n zmap = _create_zmap(x_values, y_values, z_values, x_values, y_values)\n\n assert len(zmap) == len(z_values)\n for coord, value in zmap.items():\n # test if value under coordinate\n # still refers to original trial value\n xidx = coord[0]\n yidx = coord[1]\n assert xidx == yidx\n assert z_values[xidx] == value\n\n\ndef test_interpolate_zmap() -> None:\n\n contour_point_num = 2\n zmap = {(0, 0): 1.0, (1, 1): 4.0}\n expected = np.array([[1.0, 2.5], [2.5, 4.0]])\n\n actual = _interpolate_zmap(zmap, contour_point_num)\n\n assert np.allclose(expected, actual)\n\n\ndef test_target_is_none_and_study_is_multi_obj() -> None:\n\n study = create_study(directions=[\"minimize\", \"minimize\"])\n with pytest.raises(ValueError):\n plot_contour(study)\n\n\ndef test_target_is_not_none_and_study_is_multi_obj() -> None:\n\n # Multiple sub-figures.\n study = prepare_study_with_trials(more_than_three=True, n_objectives=2, with_c_d=True)\n plot_contour(study, target=lambda t: t.values[0])\n\n # Single figure.\n study = prepare_study_with_trials(more_than_three=True, n_objectives=2, with_c_d=False)\n plot_contour(study, target=lambda t: t.values[0])\n\n\n@pytest.mark.parametrize(\n \"params\",\n [\n [],\n [\"param_a\"],\n [\"param_a\", \"param_b\"],\n [\"param_b\", \"param_d\"],\n [\"param_a\", \"param_b\", \"param_c\"],\n [\"param_a\", \"param_b\", \"param_d\"],\n [\"param_a\", \"param_b\", \"param_c\", \"param_d\"],\n None,\n ],\n)\ndef test_plot_contour(params: Optional[List[str]]) -> None:\n\n # Test with no trial.\n study_without_trials = prepare_study_with_trials(no_trials=True)\n figure = plot_contour(study_without_trials, params=params)\n assert len(figure.get_lines()) == 0\n\n # Test whether trials with `ValueError`s are ignored.\n\n def fail_objective(_: Trial) -> float:\n\n raise ValueError\n\n study = create_study()\n study.optimize(fail_objective, n_trials=1, catch=(ValueError,))\n figure = plot_contour(study, params=params)\n assert len(figure.get_lines()) == 0\n\n # Test with some trials.\n study = prepare_study_with_trials(more_than_three=True)\n\n # Test ValueError due to wrong params.\n with pytest.raises(ValueError):\n plot_contour(study, [\"optuna\", \"Optuna\"])\n\n figure = plot_contour(study, params=params)\n if params is not None and len(params) < 3:\n if len(params) <= 1:\n assert len(figure.get_lines()) == 0\n elif len(params) == 2:\n assert len(figure.get_lines()) == 0\n elif params is None:\n assert figure.shape == (len(study.best_params), len(study.best_params))\n for i in range(len(study.best_params)):\n assert figure[i][0].yaxis.label.get_text() == list(study.best_params)[i]\n else:\n assert figure.shape == (len(params), len(params))\n for i in range(len(params)):\n assert figure[i][0].yaxis.label.get_text() == list(params)[i]\n\n\n@pytest.mark.parametrize(\n \"params\",\n [\n [\"param_a\", \"param_b\"],\n [\"param_a\", \"param_b\", \"param_c\"],\n ],\n)\ndef test_plot_contour_customized_target(params: List[str]) -> None:\n\n study = prepare_study_with_trials(more_than_three=True)\n with pytest.warns(UserWarning):\n figure = plot_contour(study, params=params, target=lambda t: t.params[\"param_d\"])\n if len(params) == 2:\n assert len(figure.get_lines()) == 0\n else:\n assert figure.shape == (len(params), len(params))\n for i in range(len(params)):\n assert figure[i][0].yaxis.label.get_text() == list(params)[i]\n\n\n@pytest.mark.parametrize(\n \"params\",\n [\n [\"param_a\", \"param_b\"],\n [\"param_a\", \"param_b\", \"param_c\"],\n ],\n)\ndef test_plot_contour_customized_target_name(params: List[str]) -> None:\n\n study = prepare_study_with_trials(more_than_three=True)\n figure = plot_contour(study, params=params, target_name=\"Target Name\")\n if len(params) == 2:\n assert len(figure.get_lines()) == 0\n else:\n assert figure.shape == (len(params), len(params))\n for i in range(len(params)):\n assert figure[i][0].yaxis.label.get_text() == list(params)[i]\n","sub_path":"tests/visualization_tests/matplotlib_tests/test_contour.py","file_name":"test_contour.py","file_ext":"py","file_size_in_byte":4854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"153729058","text":"import numpy as np\r\nimport pandas as pd\r\nfrom mapcreator import MapGenerator\r\nfrom test_data import RandomDataGenerator\r\nimport math\r\n\r\nclass DataWorker():\r\n\tdef __init__(self):\r\n\t\tself.dataframe = pd.DataFrame(columns=['UniqueID', 'TrackID', 'Timestamp', 'Geopos', 'Accdata'])\r\n\t\tself.mapgen = MapGenerator()\r\n\r\n\r\n\tdef append_data(self, datastring):\r\n\t\tself.dataframe = self.dataframe.append(pd.Series(datastring), ignore_index=True)\r\n\r\n\tdef append_batch(self, batch):\r\n\t\tfor datastring in batch:\r\n\t\t\tself.append_data(datastring)\r\n\r\n\tdef save_data(self, filename, dataframe=None, path='data/', inplace=False):\r\n\t\tif inplace:\r\n\t\t\tself.dataframe.to_csv(path + filename, index=False)\r\n\t\telse:\r\n\t\t\tdataframe.to_csv(path + filename, index=False)\r\n\r\n\tdef load_data(self, filename='data.csv', path='data/'):\r\n\t\tself.dataframe = pd.read_csv(path + filename)\r\n\t\tself.dataframe['TrackID'] = self.dataframe.TrackID.apply(str)\r\n\t\tself.dataframe['UniqueID'] = self.dataframe.UniqueID.apply(str)\r\n\t\tprint(self.dataframe.info())\r\n\r\n\tdef calc_power(self, x):\r\n\t\treturn np.linalg.norm(np.array(eval(str(x))))\r\n\r\n\tdef limit_check(self, dataframe, limit):\r\n\t\tdataframe['Power'] = dataframe.loc[:, 'Accdata'].apply(self.calc_power)\r\n\t\treturn dataframe[dataframe['Power'] >= limit]\r\n\r\n\tdef get_destination(self, x1, x2):\r\n\t\tdlat = x2[0] - x1[0]\r\n\t\tdlon = x2[1] - x2[1]\r\n\r\n\t\tx1[0] = x1[0] * math.pi * 180\r\n\t\tx2[0] = x2[0] * math.pi * 180\r\n\r\n\t\ta = math.sin(dlat/2) * math.sin(dlat/2) + math.sin(dlon/2) * math.sin(dlon/2) * math.cos(x1[0]) * math.cos(x2[0]) * 6371\r\n\t\treturn a\r\n\r\n\tdef k_nearest_check(self, data, radius=0.01, k=10):\r\n\t\tprint(len(data))\r\n\t\tdata = data.reset_index()\r\n\t\tdata['IsTruth'] = pd.Series([i for i in range(0, len(data))])\r\n\t\tfor index in range(0, len(data) - 1):\r\n\t\t\tgeopos = data.loc[index, 'Geopos']\r\n\t\t\tk_ = 0\r\n\t\t\tfor index_ in range(index + 1, len(data)):\r\n\t\t\t\tgeopos_ = data.loc[index_, 'Geopos']\r\n\t\t\t\tif self.get_destination(eval(geopos), eval(geopos_)) <= radius:#np.sqrt(np.sum((np.asarray(eval(geopos)) - np.asarray(eval(geopos_))) ** 2)) <= radius:\r\n\t\t\t\t\t#print(self.get_destination(eval(geopos), eval(geopos_)))\r\n\t\t\t\t\tk_ += 1\r\n\t\t\tif k_ >= k:\r\n\t\t\t\tdata.loc[index, 'IsTruth'] = 1\r\n\t\t\telse:\r\n\t\t\t\tdata.loc[index, 'IsTruth'] = 0\r\n\t\treturn data\r\n\r\n\t# Вспомогательгые функции сортировки и выборки данных\r\n\r\n\tdef get_tracks(self, dataframe):\r\n\t\treturn dataframe.iloc[:, 'TrackID'].unique()\r\n\t\r\n\tdef sort_by_track(self, dataframe, track_id, inplace=True):\r\n\t\tif not inplace:\r\n\t\t\treturn dataframe[dataframe['TrackID'] == track_id]\r\n\t\telse:\r\n\t\t\tself.dataframe = self.dataframe[self.dataframe['TrackID'] == track_id]\r\n\t\t\tself.dataframe.reset_index()\r\n\r\n\tdef get_users(self, dataframe):\r\n\t\treturn dataframe.iloc[:, 'UniqueID'].unique()\r\n\t\r\n\tdef sort_by_users(self, dataframe, user_id, inplace=True):\r\n\t\tif not inplace:\r\n\t\t\treturn dataframe[dataframe['UniqueID'] == user_id]\r\n\t\telse:\r\n\t\t\tself.dataframe = self.dataframe[self.dataframe['UniqueID'] == user_id]\r\n\t\t\tself.dataframe.reset_index()\r\n\r\n\t# Финальные визуализации\r\n\tdef graph_data(self, data, filename):\r\n\t\tdata['Power'] = data.loc[:, 'Accdata'].apply(self.calc_power)\r\n\t\tdata = data.loc[:, ['Geopos', 'Power', 'UniqueID', 'TrackID', 'IsTruth']]\r\n\t\t#data.to_csv('svd.csv', index=False)\r\n\t\tdata = data.values\r\n\t\tself.mapgen.get_map(data, filename)\r\n\r\n\tdef prod_get_graph(self, limit, filename, radius=0.01, k=6):\r\n\t\tdata = self.limit_check(self.dataframe, limit)\r\n\t\tdata = self.k_nearest_check(data, radius=radius, k=k)\r\n\t\tself.graph_data(data, filename)\r\n\r\n\r\nif __name__ == '__main__':\r\n\td = DataWorker()\r\n\tr = RandomDataGenerator()\r\n\td.append_batch(r.create_line_data(np.array([55.517298, 36.993981]), np.array([55.667247, 37.426248])))\r\n\td.append_batch(r.create_line_data(np.array([55.608965, 49.300274]), np.array([55.623160, 49.260022]), trid='Kazan'))\r\n\td.append_batch(r.create_line_data(np.array([55.692110, 49.192970]), np.array([55.623160, 49.260022]), trid='Kazan'))\t\r\n\td.save_data(filename='data.csv', inplace=True)","sub_path":"Server/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"506879712","text":"import cv2\nimport time\nimport contrast\n\nimg = cv2.imread('tiger_low_constrast.jpg', cv2.IMREAD_GRAYSCALE)\n\nstart_time = time.time()\n\ncontrast.adjust_contrast(img, 3)\n\nelapsed_time = time.time() - start_time\nprint(elapsed_time)\n\ncv2.imshow('image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"contrast3.py","file_name":"contrast3.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"308725753","text":"from django.db.models import Manager, F, Q\n\n\nclass PostManager(Manager):\n\n def get_home_posts(self):\n queryset = self.get_queryset()\n queryset = queryset.filter(\n Q(owner='$'),\n ).order_by(\n '-price_per_thousand'\n )\n return queryset\n","sub_path":"coursatch/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"127110939","text":"# -*-coding:utf-8 -*-\nfrom db import basic_db\nfrom logger.log import storage\n\n\ndef save(sos):\n ins_count = 0\n insert_sql = (\n 'insert into weibo_spread_other (user_id,user_screenname,user_province,user_city,user_location,'\n 'user_description,user_url,user_profileimageurl,user_gender,user_followerscount,user_friendscount,'\n 'user_statusescount,user_createdat,user_verifiedtype,user_verifiedreason,status_createdat,'\n 'status_mid,status_source,status_repostscount,status_commentscount,upper_user_id,'\n 'original_status_id,status_url) '\n \" values (:user_id,:user_screenname,:user_province,:user_city,:user_location,\"\n \":user_description,:user_url,:user_profileimageurl,:user_gender,:user_followerscount,\"\n \":user_friendscount,:user_statusescount,:user_createdat,:user_verifiedtype,:user_verifiedreason,\"\n \":status_createdat,:status_mid,:status_source,:status_repostscount,:status_commentscount,\"\n \":upper_user_id,:original_status_id,:status_url)\"\n )\n\n with basic_db.db_execute() as conn:\n\n for item in sos:\n if item.verify_type == '':\n item.verify_type = 0\n try:\n args = {\n 'user_id': item.id,\n 'user_url': item.blog_url,\n 'user_profileimageurl': item.headimg_url,\n 'user_screenname': item.screen_name.encode('gbk', 'ignore').decode('gbk'),\n 'user_province': item.province.encode('gbk', 'ignore').decode('gbk'),\n 'user_city': item.city.encode('gbk', 'ignore').decode('gbk'),\n 'user_location': item.location.encode('gbk', 'ignore').decode('gbk'),\n 'user_description': item.description.encode('gbk', 'ignore').decode('gbk'),\n 'user_gender': item.gender.encode('gbk', 'ignore').decode('gbk'),\n 'user_verifiedreason': item.verify_info.encode('gbk', 'ignore').decode('gbk'),\n 'status_source': item.device.encode('gbk', 'ignore').decode('gbk'),\n 'user_followerscount': int(item.followers_count),\n 'user_friendscount': int(item.friends_count),\n 'user_statusescount': int(item.status_count),\n 'status_repostscount': int(item.reposts_count),\n 'status_commentscount': int(item.comments_count),\n 'user_verifiedtype': item.verify_type,\n 'user_createdat': item.register_time,\n 'status_createdat': item.status_post_time,\n 'status_mid': item.mid,\n 'upper_user_id': item.upper_user_id,\n 'original_status_id': item.original_status_id,\n 'status_url': item.status_url,\n }\n basic_db.db_dml_parms(conn, insert_sql, args)\n except Exception as why:\n storage.error(item.__dict__)\n storage.error(why)\n else:\n ins_count += 1\n storage.info('一共插入了{ins}条数据'.format(ins=ins_count))\n\n\n\n","sub_path":"db/spread_other_dao.py","file_name":"spread_other_dao.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"369589022","text":"def print_board(board):\n for i in range(len(board)):\n print (board[i])\n \n \n# TAI color\n# sem cor = 0\n# com cor > 0\ndef get_no_color():\n return 0\ndef no_color (c):\n return c==0\ndef color (c):\n return c > 0\n\n# TAI pos\n# Tuplo (l, c)\ndef make_pos (l, c):\n return (l, c)\ndef pos_l (pos):\n return pos[0]\ndef pos_c (pos):\n return pos[1] \n\ndef new_group():\n group = []\n return group\ndef add_group(group,pos):\n group.append(pos)\n return group\ndef find_group(group,pos):\n index = group.index(pos)\n return index\n \n\ndef board_find_groups(board):\n group_lst = []\n k = 0\n for i in range(len(board)):\n for j in range(len(board[i])):\n if (i == 0) and (j == 0):\n pos = make_pos(i,j)\n group_lst.append(new_group())\n temp = group_lst[k]\n add_group(temp,pos)\n elif ((board[i])[j]) == ((board[i])[j-1]):\n pos = make_pos(i,j)\n temp = group_lst[k]\n add_group(temp,pos)\n elif ((board[i])[j]) == ((board[i-1])[j]):\n index = 0\n for k in range(len(group_lst)):\n for l in range(len(group_lst[k])):\n if ((group_lst[k])[l]) == ((board[i])[j]):\n index = k\n pos = make_pos(i,j)\n temp = group_lst[index]\n add_group(temp,pos)\n else:\n k = k+1\n group_lst.append(new_group())\n temp = group_lst[k]\n add_group(temp,pos) \n \n return group_lst\n\ndef board_remove_group(board,group):\n col_check = []\n clone = board\n for l, c in group:\n if (l != 0) and (color((clone[l-1])[c])):\n (clone[l])[c] = (clone[l-1])[c]\n (clone[l-1])[c] = 0\n else:\n (clone[l])[c] = 0\n for j in len(clone):\n for k in len(clone[j]):\n if (clone[j])[k] == 0:\n col_check[k] += 1\n for l in len(col_check):\n if col_check[l] == (len(clone)-1):\n for m in len(clone):\n (clone[m])[l] = (clone[m])[l+1]\n (clone[m])[l+1] = 0\n col_check[l+1] = (len(clone)-1)\n \n \n \n \n","sub_path":"Project1.py","file_name":"Project1.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"452737388","text":"import cairo\nimport gtk\n\nclass Canvas(gtk.DrawingArea):\n def __init__(self, drawingInstructions):\n \"\"\" Build a Canvas, stretching it to 800x600 and connecting the expose\n event to the provided drawingInstructions method. \n\n Args:\n drawingInstructions - drawing instructions for the Canvas\n \"\"\"\n \n super(Canvas, self).__init__()\n\n # Hook expose_event to the onExposure() method\n self.connect(\"expose_event\", self.onExposure)\n\n # Size the Window to 800 x 600\n self.set_size_request(800, 600)\n\n self.drawingInstructions = drawingInstructions\n\n def onExposure(self, widget, event):\n \"\"\" React to screen exposure. \"\"\"\n\n # Get the Rectangle of this DrawingArea.\n rect = self.get_allocation()\n\n # Build and Normalize a Cairo Context.\n context = widget.window.cairo_create()\n context.scale (rect.width, rect.width)\n\n # Delegate drawing to the provided drawingInstructions.\n self.drawingInstructions(context)\n","sub_path":"canvas.py","file_name":"canvas.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"347409998","text":"\"\"\"\ndistribution.py\nAuthor: Nathan Subrahmanian\nCredit: https://stackoverflow.com/questions/4978787/how-to-split-a-string-into-array-of-characters\n\nAssignment:\n\nWrite and submit a Python program (distribution.py) that computes and displays \nthe distribution of characters in a given sample of text.\n\nOutput of your program should look like this:\n\nPlease enter a string of text (the bigger the better): The rain in Spain stays mainly in the plain.\nThe distribution of characters in \"The rain in Spain stays mainly in the plain.\" is:\niiiiii\nnnnnnn\naaaaa\nsss\nttt\nee\nhh\nll\npp\nyy\nm\nr\n\nNotice about this example:\n\n* The text: 'The rain ... plain' is provided by the user as input to your program.\n* Uppercase characters are converted to lowercase\n* Spaces and punctuation marks are ignored completely.\n* Characters that are more common appear first in the list.\n* Where the same number of characters occur, the lines are ordered alphabetically. \n For example, in the printout above, the letters e, h, l, p and y both occur twice \n in the text and they are listed in the output in alphabetical order.\n* Letters that do not occur in the text are not listed in the output at all.\n\"\"\"\nwords=input(\"Please enter a string of text (the bigger the better): \")\nprint(\"The distribution of characters in \\\"{0}\\\" is: \".format(words))\n\nwords = words.lower()\n\nlines = []\n\nallletters = \"abcdefghijklmnopqrstuvwxyz\"\nfor i in allletters:\n lines.append(words.count(i)*i)\n \nmaxlen = len(words)\nwhile maxlen > 0:\n for line in lines:\n if len(line) == maxlen:\n print(line)\n maxlen -= 1","sub_path":"distribution.py","file_name":"distribution.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"239153288","text":"from rest_framework import permissions\nfrom django.contrib.auth.models import User\n\nclass MyPermissions(permissions.BasePermission):\n \"\"\"\n Custom permission to only allow owners of an object to edit it.\n \"\"\"\n\n message = \"Only some users have permissions\"\n\n def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS:\n return True\n if request.method == 'POST':\n return request.user.groups.filter(name='Comercials').exists()\n\n return request.user.groups.filter(name='Comercials').exists()\n\n def has_permission(self,request,view):\n if request.method in permissions.SAFE_METHODS:\n return True\n return request.user.groups.filter(name='Comercials').exists()\n","sub_path":"ykea/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"158522925","text":"import math\nimport gym\nfrom gym import spaces, logger\nfrom gym.utils import seeding\nimport numpy as np\n\nclass BallControlEnv(gym.Env):\n \n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second' : 50\n }\n \n\n\n def __init__(self):\n\n self.debug = [\"!method_calls\", \"!misc\", \"!kinematics\", \"!action\"]\n\n if(\"method_calls\" in self.debug): print(\"ball_hover_env: __INIT__\")\n\n\n self.gravity = -9.8\n self.cartmass = 1.0\n self.force_mag = 20.0\n\n self.tau = 0.02 # seconds between updates\n\n self.y_threshold = 0.8\n self.x_threshold = 0.8\n\n high = np.array([self.y_threshold*2, np.finfo(np.float32).max, self.x_threshold*2, np.finfo(np.float32).max], dtype=np.float32)\n \n self.action_space = spaces.Discrete(4)\n self.observation_space = spaces.Box(-high, high, dtype=np.float32)\n\n self.seed()\n self.viewer = None\n self.state = None\n\n self.steps_beyond_done = None\n\n\n\n\n\n def seed(self, seed=None):\n\n if(\"method_calls\" in self.debug): print(\"ball_hover_env: SEED\")\n\n self.np_random, seed = seeding.np_random(seed)\n return[seed]\n\n\n\n\n def step(self, action):\n\n if(\"method_calls\" in self.debug): print(\"ball_hover_env: STEP\")\n \n assert self.action_space.contains(action), \"%r (%s) invalid\"%(action, type(action))\n \n state = self.state\n y, y_vel, x, x_vel = state\n\n if (\"action\" in self.debug): print(\"action: \" + str(action))\n\n if(action==1):\n y_force = self.force_mag\n x_force = 0\n\n elif(action==2):\n y_force = 0\n x_force = self.force_mag\n\n elif(action==3):\n y_force = 0\n x_force = -self.force_mag\n else:\n y_force = 0\n x_force = 0\n\n if(\"kinematics\" in self.debug): print(\"y-pos: \" + str(y) + \", y-vel: \" + str(y_vel))\n if(\"kinematics\" in self.debug): print(\"x-pos: \" + str(x) + \", x-vel: \" + str(x_vel))\n\n y_acc = (y_force/self.cartmass) + self.gravity\n x_acc = (x_force/self.cartmass)\n\n y_vel = y_vel + (y_acc * self.tau)\n y = y + (y_vel * self.tau)\n\n x_vel = x_vel + (x_acc * self.tau)\n x = x + (x_vel * self.tau)\n\n self.state = (y, y_vel, x, x_vel)\n\n done = y < -self.y_threshold \\\n or y > self.y_threshold \\\n or x < -self.x_threshold \\\n or x > self.x_threshold\n\n done = bool(done)\n\n\n if not done:\n if(\"misc\" in self.debug): print(\"not done\")\n reward = 1.0\n elif self.steps_beyond_done is None:\n if(\"misc\" in self.debug): print(\"not done and steps beyond done is none\")\n self.steps_beyond_done = 0\n reward = 1.0\n else:\n if(\"misc\" in self.debug): print(\"done\")\n if self.steps_beyond_done == 0:\n if(\"misc\" in self.debug): print(\"done and steps beyond done is none\")\n logger.warn(\"env has returned 'done'. no more steps to take. please call 'reset()'\")\n self.steps_beyond_done += 1\n reward = 0.0\n\n return np.array(self.state), reward, done, {}\n\n\n\n\n def reset(self):\n\n if(\"method_calls\" in self.debug): print(\"ball_hover_env: RESET\")\n\n self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4,))\n self.steps_beyond_done = None\n return np.array(self.state)\n\n\n\n\n def render(self, mode=\"human\"):\n\n if(\"method_calls\" in self.debug): print(\"ball_hover_env: RENDER\")\n\n screen_width = 600\n screen_height = 600\n\n #world_width = self.y_threshold*2\n world_width = 6\n #world_width = 60\n \n scale = screen_width/world_width\n \n cart_x = screen_width/2\n cart_y = 100 # top of cart\n cart_width = 40\n cart_height = 40\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(screen_width, screen_height)\n\n l,r,t,b = -cart_width/2, cart_width/2, cart_height/2, -cart_height/2\n\n cart = rendering.FilledPolygon([\n (l,b),\n (l,t),\n (r,t),\n (r,b)\n ])\n\n self.carttrans = rendering.Transform()\n cart.add_attr(self.carttrans)\n self.viewer.add_geom(cart)\n\n #self.track = rendering.Line((screen_width/2, 0), (screen_width/2, screen_height))\n #self.track.set_color = (0,0,0)\n #self.viewer.add_geom(self.track)\n\n self.top_bound = rendering.Line((0, (screen_height/2 + (self.y_threshold*scale))),(screen_width, (screen_height/2 + (self.y_threshold*scale))))\n self.top_bound.set_color = (1,0,0)\n self.viewer.add_geom(self.top_bound)\n\n self.bot_bound = rendering.Line((0, (screen_height/2 - (self.y_threshold*scale))),(screen_width, (screen_height/2 - (self.y_threshold*scale))))\n self.bot_bound.set_color = (1,0,0)\n self.viewer.add_geom(self.bot_bound)\n\n self.left_bound = rendering.Line((screen_width/2-(self.x_threshold*scale), 0),(screen_width/2-(self.x_threshold*scale), screen_height))\n\n self.left_bound.set_color = (1,0,0)\n self.viewer.add_geom(self.left_bound)\n\n self.right_bound = rendering.Line((screen_width/2+(self.x_threshold*scale), 0), (screen_width/2+(self.x_threshold*scale), screen_height))\n self.right_bound.set_color = (1,0,0)\n self.viewer.add_geom(self.right_bound)\n\n if self.state is None: return None\n \n alpha = self.state\n\n cart_y = alpha[0] * scale + screen_height/2.0\n cart_x = alpha[2] * scale + screen_width/2.0\n\n \n self.carttrans.set_translation(cart_x, cart_y)\n\n return self.viewer.render(return_rgb_array = mode==\"rgb_array\")\n\n\n\n\n def close(self):\n\n if(\"method_calls\" in self.debug): print(\"ball_hover_env: CLOSE\")\n\n if self.viewer:\n self.viewer.close()\n self.viewer = None\n\n\n","sub_path":"ball_hover/envs/ball_control_env_dir/ball_control_env.py","file_name":"ball_control_env.py","file_ext":"py","file_size_in_byte":6192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"19115533","text":"#!/usr/bin/env python\n\"\"\" lbinstall package\n\"\"\"\n\nfrom setuptools import setup, find_packages\nimport sys\n\ndependencies = {\n '3': ['influxdb', 'pyrabbit2'],\n '2': ['influxdb', 'pyrabbit2']\n}\n\n\nsetup(name='lbCVMFSchecker',\n use_scm_version=True,\n description='LHCb CVMFS-Stratum0 checker tool',\n long_description='LHCb CVMFS-Stratum0 checker tool',\n url='https://gitlab.cern.ch/lhcb-core/cvmfs-stratum0-checker',\n\n author='CERN - LHCb Core Software',\n author_email='lhcb-core-soft@cern.ch',\n license='GPL',\n\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n 'Development Status :: 3 - Alpha',\n\n # Indicate who your project is intended for\n # 'Intended Audience :: Users',\n # 'Topic :: Software Installation :: Installation',\n\n # Pick your license as you wish (should match \"license\" above)\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n\n ],\n\n # What does your project relate to?\n keywords='LHCb',\n\n # You can just specify the packages manually here if your project is\n # simple. Or you can use find_packages().\n packages=find_packages(exclude=[]),\n\n # Alternatively, if you want to distribute just a my_module.py, uncomment\n # this:\n # py_modules=[\"my_module\"],\n\n # List run-time dependencies here. These will be installed by pip when\n # your project is installed. For an analysis of \"install_requires\" vs pip\n # requirements files see:\n # https://packaging.python.org/en/latest/requirements.html\n # install_requires=['pyyaml','argparse'],\n install_requires=dependencies[str(sys.version_info[0])],\n\n # List additional groups of dependencies here (e.g. development\n # dependencies). You can install these using the following syntax,\n # for example:\n # $ pip install -e .[dev,test]\n extras_require={\n },\n\n setup_requires=['setuptools_scm'],\n\n # To provide executable scripts, use entry points in preference to the\n # \"scripts\" keyword. Entry points provide cross-platform support and\n # allow pip to create the appropriate form of executable for the target\n # platform.\n scripts=[\"bin/lbCVMFSchecker\"])\n","sub_path":"pypi_install_script/lbCVMFSchecker-1.0.10.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"81061922","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/3/4 13:55\n# @Author : SELF-T-YY\n# @Site : \n# @File : our_sample_gai5_for_CH.py\n# @Software: PyCharm\n\n\nimport math\nimport json\nimport random\nimport copy\nfrom threading import Thread\n\nposition = 'cit-HepTh'\nfile_name = 'CH'\n\nall_data_dict = json.load(open('../data/' + position + '/' + file_name + '_forceData.json'))\nall_edges_dict = all_data_dict['edges']\nall_nodes_dict = json.load((open('../data/' + position + '/' + file_name + '_id_x_y_kde.json')))\n\nfile_path = r'../data/' + position + '/' + file_name + '_id_x_y_kde_edges_betweenness.json'\nfile_write_path = r'../data/' + position + '/' + file_name + '_id_x_y_kde.json'\n\nprint('file_path: {}'.format(file_path))\nprint('file_write_path: {}'.format(file_write_path))\n\n\ndef calculate_r(fpi, bi, beta=1):\n beta = 1 - alpha\n temp = (alpha * fpi * 1000 + beta * bi)\n if temp == 0:\n temp = 1\n r = ra/temp\n return r\n\n\ndef calculate_r_for_all(p_dict):\n for temp_p in p_dict:\n p_dict[temp_p].update({\"pr\": calculate_r(fpi=float(p_dict[temp_p][\"kde\"]), bi=int(p_dict[temp_p][\"betweenness\"]))})\n\n\ndef distance(p1, p2):\n\n return math.sqrt(math.pow(float(p1['x']) - float(p2['x']), 2) + math.pow(float(p1['y']) - float(p2['y']), 2))\n\n\ndef ns_around_p(p, p_dict):\n ans_dict = {}\n for temp_p in p_dict:\n if distance(p, p_dict[temp_p]) <= float(p['pr']):\n ans_dict.update({p_dict[temp_p]['id']: p_dict[temp_p]})\n return ans_dict\n\n\ndef ns_around_p_inR2_outR1_both(p, p_dict):\n ans_dict = {}\n for temp_p in p_dict:\n dis = distance(p, p_dict[temp_p])\n if dis <= float(p['pr']):\n continue\n\n if dis <= float(p['pr']*2):\n if dis > float(p_dict[temp_p]['pr']):\n ans_dict.update({p_dict[temp_p]['id']: p_dict[temp_p]})\n return ans_dict\n\n\n# remove-----True\n# save-------False\ndef remove_or_save(p, pd_p):\n max_r = max(float(p['pr']), float(pd_p['pr']))\n if distance(p, pd_p) < max_r:\n return True\n else:\n return False\n\n\ndef dict_key_value_max(all_dict, p_dict):\n edges_list = p_dict[\"edges\"]\n max_betweenness_temp = 0\n max_betweenness_key = -1\n for edge in edges_list:\n if edge not in all_dict:\n continue\n if all_dict[edge][\"betweenness\"] > max_betweenness_temp:\n max_betweenness_temp = all_dict[edge][\"betweenness\"]\n max_betweenness_key = edge\n return max_betweenness_key\n\n\ndef max_betweenness(all_dict):\n max_betweeness = 0\n max_betweeness_key = -1\n temp_p_list = []\n for edge in all_dict:\n if all_dict[edge][\"betweenness\"] > max_betweeness:\n max_betweeness = all_dict[edge][\"betweenness\"]\n max_betweeness_key = edge\n temp_p_list = [edge]\n elif all_dict[edge][\"betweenness\"] == max_betweeness:\n temp_p_list.append(edge)\n return random.choice(temp_p_list)\n\n\ndef choose_p_key(data_list, p_dict, all_dict):\n temp_p_list = []\n p_list = []\n for p in data_list:\n for edge in all_dict[p]['edges']:\n if edge in p_dict:\n p_list.append(edge)\n if not p_list:\n return -1\n else:\n max_betweeness = 0\n for p in p_list:\n if all_dict[p][\"betweenness\"] > max_betweeness:\n max_betweeness = all_dict[p][\"betweenness\"]\n temp_p_list = [p]\n elif all_dict[p][\"betweenness\"] == max_betweeness:\n temp_p_list.append(p)\n return random.choice(temp_p_list)\n\n\ndef poisson_disc(p_dict, per):\n pan_stack = Stack()\n ans_list = []\n p_temp_dict = {}\n temp_dict = copy.deepcopy(p_dict)\n flag = True\n while temp_dict:\n if pan_stack.empty() and flag:\n flag = False\n p_key = max_betweenness(temp_dict)\n # p_key = random.choice(list(temp_dict))\n else:\n if len(ans_list) / len(p_dict) * 100 > per:\n break\n p_key = choose_p_key(ans_list, temp_dict, p_dict)\n if p_key == -1:\n break\n\n pan_stack.push(p_key)\n p_temp_dict.update({p_key: temp_dict[p_key]})\n temp_dict.pop(p_key)\n ans_list.append(p_key)\n while p_key != -1:\n around_p = ns_around_p(p_temp_dict[p_key], temp_dict)\n if not around_p == {}:\n for remove_p in around_p:\n temp_dict.pop(remove_p)\n\n if_next = False\n for edge in p_temp_dict[p_key][\"edges\"]:\n if edge not in temp_dict:\n p_temp_dict[p_key][\"edges\"].remove(edge)\n else:\n if_next = True\n if not if_next:\n p_key = pan_stack.pop()\n continue\n if p_key == -1:\n continue\n p_key = dict_key_value_max(ns_around_p_inR2_outR1_both(p_temp_dict[p_key], temp_dict), p_temp_dict[p_key])\n pan_stack.push(p_key)\n if p_key == -1:\n continue\n p_temp_dict.update({p_key: temp_dict[p_key]})\n temp_dict.pop(p_key)\n ans_list.append(p_key)\n ans_list = list(set(ans_list))\n return ans_list\n\n\nclass Stack:\n def __init__(self):\n self.data = []\n\n def empty(self):\n if not self.data:\n return True\n else:\n return False\n\n def size(self):\n return len(self.data)\n\n def pop(self):\n if self.empty():\n return -1\n else:\n return self.data.pop()\n\n def push(self, data):\n if data != -1:\n self.data.append(data)\n\n def top(self):\n return self.data[-1]\n\n\ndef reflash(p_dict):\n ans_nodes = []\n ans_edges = []\n\n check_nodes =[]\n for edge in all_edges_dict:\n n1 = edge['source']\n n2 = edge['target']\n if n1 in p_dict and n2 in p_dict:\n ans_edges.append(edge)\n if n1 not in check_nodes:\n check_nodes.append(n1)\n ans_nodes.append({'id': n1,\n 'x': all_nodes_dict[n1]['x'],\n 'y': all_nodes_dict[n1]['y']})\n if n2 not in check_nodes:\n check_nodes.append(n2)\n ans_nodes.append({'id': n2,\n 'x': all_nodes_dict[n2]['x'],\n 'y': all_nodes_dict[n2]['y']})\n return {'nodes': ans_nodes, 'edges': ans_edges}\n\n\ndef test():\n with open(file_path) as f:\n\n data_dict = json.load(f)\n len1 = len(list(data_dict.keys()))\n calculate_r_for_all(data_dict)\n\n max_len = 0\n for i in range(1):\n final_list = poisson_disc(data_dict, per)\n final_list = reflash(final_list)\n len2 = len(final_list['nodes'])\n len_fin = len2 / len1 * 100\n print(len_fin, \"%\", sep='')\n if max_len < len_fin:\n ans_list = final_list\n max_len = len_fin\n # print(ans_list)\n print(max_len, '%', sep='')\n\n\ndef run_do(times):\n with open(file_path) as f:\n\n data_dict = json.load(f)\n len1 = len(list(data_dict.keys()))\n calculate_r_for_all(data_dict)\n\n max_len = 0\n for i in range(1):\n final_list = poisson_disc(data_dict, per)\n final_list = reflash(final_list)\n len2 = len(final_list['nodes'])\n len_fin = len2 / len1 * 100\n # print(len_fin, \"%\", sep='')\n if max_len < len_fin:\n ans_list = final_list\n max_len = len_fin\n # print(ans_list)\n print(max_len, '%', sep='')\n f_file_path = r'../data/' + position + '/our_sample_adv/our_sample_times_' + \\\n str(times) + '_a_0.5_b_0.5_rata_' + str(per) + '.json'\n print(f_file_path)\n f_file = open(f_file_path, 'w+')\n ans_json = json.dumps(final_list)\n f_file.write(ans_json)\n f_file.close()\n\n\ndef run():\n for t in range(0, 5):\n Thread(target=run_do, args=(t, )).start()\n\n\n# for i in range(5, 41, 5):\n# alpha = 0.5\n# ra = 1\n# per = i\n# # ra = ra/100\n#\n# # test()\n# run()\n\nalpha = 0.5\nra = 1/10\nper = 40\n# ra = ra/100\n\n# test()\nrun()\n\n\n# 5\n# 10\n# 15\n# 20\n# 25\n# 30\n# 35\n# 40\n","sub_path":"Python/our_sample_gai5_for_CH.py","file_name":"our_sample_gai5_for_CH.py","file_ext":"py","file_size_in_byte":8355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"161383718","text":"'''\r\nCreated on Mar 12, 2015\r\n\r\n@author: mohiuda\r\n'''\r\n\r\nimport math\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nINFRA_AVG_ANNUAL_CPI = 0.0174681286906602\r\nINFRA_ACCRUAL = 0.055\r\n\r\nPELBO_ACCRUAL = 0.03\r\nPEDEBT_ACCRUAL = 0.05\r\n\r\ndef generateAggregatedHierarchy(dfHedged, dfUnhedged, dfCCYBreakdown, dfStat, dfRelationships):\r\n for bmName in dfHedged.columns.values:\r\n if bmName in redefinedBMs:\r\n redefinedBMs[bmName](dfHedged, dfUnhedged, dfCCYBreakdown, dfStat)\r\n \r\n #We've changed the children. If any parent nodes in hierarchy, we can't use them anymore\r\n parentNodes = dfRelationships.values()\r\n dfHedged.drop([i for i in parentNodes if i in dfHedged], axis=1, inplace=True)\r\n dfUnhedged.drop([i for i in parentNodes if i in dfUnhedged], axis=1, inplace=True)\r\n dfCCYBreakdown.drop([i for i in parentNodes if i in dfCCYBreakdown], axis=1, inplace=True)\r\n dfStat.drop([i for i in parentNodes if i in dfStat], axis=0, inplace=True)\r\n \r\n dfStat = dfStat.T\r\n for node in parentNodes:\r\n if node not in dfHedged:\r\n dfHedged[node] = getAggregatedStat(node, dfHedged,dfRelationships)\r\n if node not in dfUnhedged:\r\n dfUnhedged[node] = getAggregatedStat(node, dfUnhedged,dfRelationships)\r\n if node not in dfCCYBreakdown:\r\n dfCCYBreakdown[node] = getAggregatedStat(node, dfCCYBreakdown,dfRelationships)\r\n if node not in dfStat:\r\n dfStat[node] = getAggregatedStat(node, dfStat,dfRelationships)\r\n \r\n dfStat = dfStat.T\r\n \r\n dfHedged.sort_index(axis=1, kind='heapsort', inplace=True)\r\n dfUnhedged.sort_index(axis=1, kind='heapsort', inplace=True)\r\n dfCCYBreakdown.sort_index(axis=1, kind='heapsort', inplace=True)\r\n dfStat.sort_index(axis=0, kind='heapsort', inplace=True)\r\n\r\n return dfHedged, dfUnhedged, dfCCYBreakdown, dfStat\r\n\r\ndef getAggregatedStat(node, statDF, relationShips):\r\n if node in statDF:\r\n return statDF[node]\r\n else:\r\n sumSeries = pd.Series(np.zeros(len(statDF)), index=statDF.index, name=node, dtype=np.float64)\r\n for child in [i for i in relationShips if relationShips[i]==node]:\r\n sumSeries = sumSeries + getAggregatedStat(child, statDF, relationShips)\r\n \r\n statDF[node] = sumSeries\r\n return statDF[node]\r\n \r\n\r\ndef genInfraBM(dfHedged, dfUnhedged, dfCCYBreakdown, dfStat):\r\n dfHedged['Infrastructure Benchmark'] = dfStat['PV']['Infrastructure Benchmark']*(INFRA_AVG_ANNUAL_CPI+INFRA_ACCRUAL)/math.sqrt(52)\r\n dfUnhedged['Infrastructure Benchmark'] = dfHedged['Infrastructure Benchmark']\r\n\r\n \r\n\r\ndef genPELBOBM(dfHedged, dfUnhedged, dfCCYBreakdown, dfStat):\r\n dfHedged['Private Equity LBO Benchmark'] = dfHedged['Private Equity LBO Benchmark'] + PELBO_ACCRUAL*dfStat['PV']['Private Equity LBO Benchmark']/math.sqrt(52)\r\n dfUnhedged['Private Equity LBO Benchmark'] = dfUnhedged['Private Equity LBO Benchmark'] + PELBO_ACCRUAL*dfStat['PV']['Private Equity LBO Benchmark']/math.sqrt(52)\r\n \r\ndef genPEDebtBM(dfHedged, dfUnhedged, dfCCYBreakdown, dfStat):\r\n dfHedged['Private Equity Debt Benchmark'] = dfHedged['Private Equity Debt Benchmark'] + PEDEBT_ACCRUAL*dfStat['PV']['Private Equity Debt Benchmark']/math.sqrt(52)\r\n dfUnhedged['Private Equity Debt Benchmark'] = dfUnhedged['Private Equity Debt Benchmark'] + PEDEBT_ACCRUAL*dfStat['PV']['Private Equity Debt Benchmark']/math.sqrt(52)\r\n\r\n\r\nredefinedBMs = {\"Infrastructure Dex Benchmark\": genInfraBM,\r\n \"Infrastructure Benchmark\": genInfraBM,\r\n \"Private Equity LBO Benchmark\": genPELBOBM,\r\n \"Private Equity Debt Benchmark\": genPEDebtBM}","sub_path":"AggEngine/src/DataProcessors/BMHierarchyAggregator.py","file_name":"BMHierarchyAggregator.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"73436049","text":"import pandas as pd \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \nimport numpy as np \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \nimport datetime as dt \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \nimport os \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \nfrom util import get_data, plot_data \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \ndef compute_portvals(orders, start_val = 1000000, commission=9.95, impact=0.005): \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n '''\n Computes portfolio values for a given orders file with a starting portfolio value.\n Default commission rates of $9.95 and impact of 0.005 of transaction value.\n '''\n # Process order file\n orders, start_date, end_date, symbols, prices = process_orders(orders)\n # Process trades\n trades = process_trades(orders, prices, commission, impact)\n # Process shares\n shares = process_shares(orders, prices, trades, start_val)\n # Calculate portfolio values over time\n return calculate_portfolio(prices, shares)\n\ndef process_shares(orders, prices, trades, start_val):\n '''\n Helper function for processing shares based on trades.\n '''\n shares = pd.DataFrame(np.zeros((prices.shape)), index = prices.index, columns = prices.columns)\n shares.iloc[0,:-1] = trades.iloc[0,:-1].copy()\n shares.iloc[0,-1] = trades.iloc[0,-1] + start_val\n for row in range(1, len(shares)):\n shares.iloc[row] = shares.iloc[row-1] + trades.iloc[row]\n return shares\n\ndef process_trades(orders, prices, commission, impact):\n '''\n Helper function for processing all listed trades. Calculates for each trade in the order book:\n 1. Change in asset values as a result of the trade.\n 2. Transaction cost as a result of the trade.\n 3. Modifies trades dataframe as a result of orders.\n '''\n trades = pd.DataFrame(np.zeros((prices.shape)), index = prices.index, columns = prices.columns)\n for index, row in orders.iterrows():\n trade_delta = prices.loc[index, row[\"Symbol\"]] * row[\"Shares\"]\n transaction_cost = impact*trade_delta + commission\n buy_or_sell = 1 if row[\"Order\"]==\"BUY\" else -1\n trades.loc[index, row[\"Symbol\"]] += buy_or_sell*row[\"Shares\"]\n trades.loc[index, \"Cash\"] += -buy_or_sell*trade_delta - transaction_cost\n return trades\n\ndef calculate_portfolio(prices, shares):\n '''\n Helper function for calculating portfolio values given prices for all traded symbols\n and number of shares over time.\n '''\n share_value = prices*shares;\n portvals = pd.DataFrame(share_value.sum(axis = 1), share_value.index, [\"Portfolio Value\"])\n return portvals \n\ndef process_orders(orders):\n '''\n Helper function for processing the given orders file, with NaN replacement.\n Returns a dataframe with all of the orders, sorted by ascending date.\n Returns a start_date, end_date, and list of symbols traded.\n Returns adjusted close prices for each of the symbols through the time window.\n '''\n orders.sort_index(ascending = True, inplace=True);\n sd = orders.index[0]\n ed = orders.index[-1]\n sym = orders[\"Symbol\"].unique().tolist()\n p_d = get_data(sym, pd.date_range(sd, ed), addSPY=True)\n del p_d[\"SPY\"]\n p_d.fillna(method='ffill', inplace = True)\n p_d.fillna(method='bfill', inplace=True)\n p_d[\"Cash\"] = 1.0\n return orders, sd, ed, sym, p_d\n\ndef test_code(): \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n # this is a helper function you can use to test your code \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n # note that during autograding his function will not be called. \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n # Define input parameters \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n of = \"./orders/orders-01.csv\" \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n sv = 1000000 \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n\n # Process orders \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n portvals = compute_portvals(orders_file = of, start_val = sv) \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n if isinstance(portvals, pd.DataFrame): \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n portvals = portvals[portvals.columns[0]] # just get the first column \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n else: \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n \"warning, code did not return a DataFrame\" \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n\n # Get portfolio stats \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n # Here we just fake the data. you should use your code from previous assignments. \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n start_date = dt.datetime(2008,1,1) \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n end_date = dt.datetime(2008,6,1) \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n cum_ret, avg_daily_ret, std_daily_ret, sharpe_ratio = [0.2,0.01,0.02,1.5] \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n cum_ret_SPY, avg_daily_ret_SPY, std_daily_ret_SPY, sharpe_ratio_SPY = [0.2,0.01,0.02,1.5] \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n\n # Compare portfolio against $SPX \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n print(f\"Date Range: {start_date} to {end_date}\") \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n print() \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n print(f\"Sharpe Ratio of Fund: {sharpe_ratio}\") \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n print(f\"Sharpe Ratio of SPY : {sharpe_ratio_SPY}\") \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n print() \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n print(f\"Cumulative Return of Fund: {cum_ret}\") \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n print(f\"Cumulative Return of SPY : {cum_ret_SPY}\") \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n print() \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n print(f\"Standard Deviation of Fund: {std_daily_ret}\") \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n print(f\"Standard Deviation of SPY : {std_daily_ret_SPY}\") \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n print() \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n print(f\"Average Daily Return of Fund: {avg_daily_ret}\") \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n print(f\"Average Daily Return of SPY : {avg_daily_ret_SPY}\") \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n print() \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n print(f\"Final Portfolio Value: {portvals[-1]}\") \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n\nif __name__ == \"__main__\": \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n test_code() \t\t \t \t\t\t \t \t\t \t\t \t\t \t \t\t \t\t \t\t \t\t \n","sub_path":"Strategy Learner Evaluation/MarketSimulator.py","file_name":"MarketSimulator.py","file_ext":"py","file_size_in_byte":6759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"377188847","text":"from openpyxl import Workbook\nimport os, search2, openpyxl\n\n\ndef main():\n\tfile_name = input('Имя файла?: ') +'.xlsx'\n\tif search2.chek_file(file_name):\n\t\tdt = search2.read_exel_in_dict(file_name)\n\t\tprint('В файле ', file_name, len(dt), ' номеров.')\n\t\tlist_key = list(dt.keys())\n\t\tdel_list = []\n\t\tfor n in list_key:\n\t\t\tif n[2] != '9':\n\t\t\t\tdel_list.append(n)\n\t\tif del_list:\n\t\t\tprint(del_list)\n\t\t\tprint('Найдено ', len(del_list), ' городских номеров.')\n\t\t\tif input('Удалить: ') == 'y':\n\t\t\t\tfor n in del_list:\n\t\t\t\t\tdel dt[n]\n\t\tdel_list = []\n\t\tfor k in dt:\n\t\t\tif 'натяжн' in dt[k]['Заголовок'].lower():\n\t\t\t\tdel_list.append(k)\n\t\t\t\tdel_list.append(dt[k]['Заголовок'])\n\t\tif del_list:\n\t\t\tprint(del_list)\n\t\t\tprint('Найдено ', int(len(del_list)/2), ' конкурентов')\n\t\t\tif input('Удалить: ') == 'y':\n\t\t\t\ti = 1\n\t\t\t\tfor data in del_list:\n\t\t\t\t\tif i % 2 != 0:\n\t\t\t\t\t\tdel dt[data]\n\t\t\t\t\ti+=1\n\t\tdel_list = []\n\t\tfeed_dt = search2.init_feedback()\n\t\tfor k in dt:\n\t\t\tif k in feed_dt['black']:\n\t\t\t\tdel_list.append(k)\n\t\t\t\tdel_list.append(feed_dt['black'][k])\n\t\t\n\t\tif del_list:\n\t\t\tprint('Найдено ', int(len(del_list)/2), ' номеров из черного списка')\n\t\t\tif input('Удалить: ') == 'y':\n\t\t\t\ti = 1\n\t\t\t\tfor data in del_list:\n\t\t\t\t\tif i % 2 != 0:\n\t\t\t\t\t\tdel dt[data]\n\t\t\t\t\ti+=1\n\t\t\n\t\tdt = testing_word(dt)\n\n\t\trec_dt = {}\n\t\tfor k in dt:\n\t\t\trec_dt[k] = {}\n\t\t\trec_dt[k]['Продавец'] = dt[k]['Продавец']\n\n\t\tresp = int(input('Количество файлов?: '))\n\t\tcount = len(rec_dt) // resp\n\t\tl_key = list(rec_dt.keys())\n\t\trec_list = []\n\t\tfor i in range(1, resp + 1):\n\t\t\tif i == 1:\n\t\t\t\trec_list.append(l_key[:count])\n\t\t\t\tcontinue\n\t\t\tif i == resp:\n\t\t\t\trec_list.append(l_key[(i * count) - count :])\n\t\t\t\tbreak\n\t\t\trec_list.append(l_key[(i * count) - count : i * count])\n\t\t\n\t\tfor i, r_lict in enumerate(rec_list, 1):\n\t\t\td = {}\n\t\t\tfor k in r_lict:\n\t\t\t\td[k] = rec_dt[k]\n\t\t\tsearch2.writin_new_exele(str(i)+'.xlsx', d, writin_keys=False)\n\t\t\n\n\telse:\n\t\tprint('Not found', file_name)\n\n\ndef testing_word(dt):\n\t\n\tdef read_txt(p):\n\t\tf = open(p)\n\t\tl = []\n\t\tl = f.read().splitlines()\n\t\tf.close()\n\t\tl2 = []\n\t\tfor n in l:\n\t\t\tl2.append(n.title())\n\t\treturn l2\n\n\tdef searcher(client_n):\n\t\tclient_n = client_n.title()\n\t\tfl = False\n\t\tfor tn in test_name:\n\t\t\tif client_n == tn:\n\t\t\t\tdt[k]['Продавец'] = client_n\n\t\t\t\tfl = True\n\t\t\t\tbreak\n\t\treturn fl\n\n\ttest_name = read_txt('name.txt')\n\n\tfor k in dt:\n\t\tseller = dt[k]['Продавец']\n\t\tcont_name = dt[k]['Контактное лицо']\n\t\tif type(seller) == int:\n\t\t\tseller = str(seller)\n\t\tif type(cont_name) == int:\n\t\t\tcont_name = str(cont_name)\n\t\tfl = False\n\n\t\tif seller is None:\n\t\t\tif cont_name is None:\n\t\t\t\tcontinue\n\t\t\tif ' ' in cont_name:\n\t\t\t\tl_name = cont_name.split(' ')\n\t\t\t\tfor name in l_name:\n\t\t\t\t\tfl = searcher(name)\n\t\t\t\t\tif fl:\n\t\t\t\t\t\tbreak\n\t\t\t\tif fl:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tdt[k]['Продавец'] = None\n\t\t\t\t\tdt[k]['Контактное лицо'] = cont_name\n\t\t\t\n\t\t\telse:\n\t\t\t\tfl = searcher(cont_name)\n\t\n\t\t\t\tif fl:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tdt[k]['Продавец'] = None\n\t\t\t\t\tdt[k]['Контактное лицо'] = cont_name\n\t\telse:\n\t\t\tif ' ' in seller:\n\t\t\t\tl_name = seller.split(' ')\n\t\t\t\tfor ln in l_name:\n\t\t\t\t\tfl = searcher(ln)\n\t\t\t\t\tif fl:\n\t\t\t\t\t\tbreak\n\t\t\t\tif fl:\n\t\t\t\t\tcontinue\n\n\t\t\t\tif cont_name != None:\n\t\t\t\t\tif ' ' in cont_name:\n\t\t\t\t\t\tl_name = cont_name.split(' ')\n\t\t\t\t\t\tfor ln in l_name:\n\t\t\t\t\t\t\tfl = searcher(ln)\n\t\t\t\t\t\t\tif fl:\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif fl:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdt[k]['Продавец'] = None\n\t\t\t\t\t\t\tdt[k]['Контактное лицо'] = seller + ' ' + cont_name\n\t\t\t\t\telse:\n\t\t\t\t\t\tfl = searcher(cont_name)\n\t\t\t\t\t\tif fl: \n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdt[k]['Продавец'] = None\n\t\t\t\t\t\t\tdt[k]['Контактное лицо'] = seller + ' ' + cont_name\n\t\t\t\telse:\n\t\t\t\t\tdt[k]['Продавец'] = None\n\t\t\t\t\tdt[k]['Контактное лицо'] = seller\n\t\t\t\n\t\t\telse:\n\t\t\t\tfl = searcher(seller)\n\t\t\t\tif fl:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tif cont_name != None:\n\t\t\t\t\t\tif ' ' in cont_name:\n\t\t\t\t\t\t\tl_name = cont_name.split(' ')\n\t\t\t\t\t\t\tfor ln in l_name:\n\t\t\t\t\t\t\t\tfl = searcher(ln)\n\t\t\t\t\t\t\t\tif fl:\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\tif fl:\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tdt[k]['Продавец'] = None\n\t\t\t\t\t\t\t\tdt[k]['Контактное лицо'] = seller + ' ' + cont_name\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfl = searcher(cont_name)\n\t\t\t\t\t\t\tif fl:\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tdt[k]['Продавец'] = None\n\t\t\t\t\t\t\t\tdt[k]['Контактное лицо'] = seller + ' ' + cont_name\n\t\t\t\t\telse:\n\t\t\t\t\t\tdt[k]['Продавец'] = None\n\t\t\t\t\t\tdt[k]['Контактное лицо'] = seller\n\treturn dt\n","sub_path":"name2.py","file_name":"name2.py","file_ext":"py","file_size_in_byte":4693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"136335988","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom scrapy.conf import settings\nimport pymongo\n\nclass KsbbsPipeline(object):\n def __init__(self):\n host=settings['MONGODB_HOST']\n port=settings['MONGODB_PORT']\n dbName=settings['MONGODB_DBNAME']\n docName=settings['MONGODB_DOCNAME']\n\n client = pymongo.MongoClient(host,port)\n tdb = client[dbName]\n self.post = tdb[docName]\n def process_item(self, item, spider):\n articleInfo=dict(item)\n #print '=============pipline============='\n self.post.insert(articleInfo)\n return item\n","sub_path":"ksbbs/ksbbs/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"584115836","text":"import math\nimport os\nimport sys\nimport time\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport tqdm\nfrom numpy import random\nfrom torch.autograd import Variable\nimport pickle\nfrom utils import *\n\n\nclass GATNEModel(nn.Module):\n def __init__(self, num_courses, num_students, embedding_size, embedding_u_size, edge_type_count, dim_a, dim_course_descrip, hidden_size):\n super(GATNEModel, self).__init__()\n self.num_courses = num_courses\n self.num_students = num_students\n self.embedding_size = embedding_size\n self.embedding_u_size = embedding_u_size\n self.edge_type_count = edge_type_count\n self.dim_a = dim_a\n self.course_embed = None\n self.course_des_embed = None\n\n self.course_h1 = nn.Linear(dim_course_descrip, hidden_size).cuda()\n self.course_h_relu = nn.ReLU().cuda()\n self.course_h2 = nn.Linear(hidden_size, embedding_size).cuda()\n self.course_trans = nn.Parameter(torch.FloatTensor(dim_course_descrip, embedding_size).cuda(), requires_grad=True)\n\n # base embeddings for courses, if not include course features\n #self.course_embeddings = nn.Parameter(torch.FloatTensor(num_courses, embedding_size).cuda(), requires_grad=True) # (*, d)\n # base embeddings for students\n self.student_embeddings = nn.Parameter(torch.FloatTensor(num_students, embedding_size).cuda(), requires_grad=True) # (*, d)\n # course has grade type embeddings\n self.course_type_embeddings = nn.Parameter(torch.FloatTensor(num_courses, edge_type_count, embedding_u_size).cuda(), requires_grad=True) # (*, m, s)\n # student has a type for embeddings\n self.student_type_embeddings = nn.Parameter(torch.FloatTensor(num_students, 1, embedding_u_size).cuda(), requires_grad=True)\n\n # courses\n self.trans_weights_c = nn.Parameter(torch.FloatTensor(edge_type_count, embedding_u_size, embedding_size).cuda(), requires_grad=True) # (m, s, d)\n # students\n self.trans_weights_s = nn.Parameter(torch.FloatTensor(1, embedding_u_size, embedding_size).cuda(), requires_grad=True) # (1, s, d)\n\n self.trans_weights_s1 = nn.Parameter(torch.FloatTensor(edge_type_count, embedding_u_size, dim_a).cuda(), requires_grad=True) # (m, s, da)\n self.trans_weights_s2 = nn.Parameter(torch.FloatTensor(edge_type_count, dim_a, 1).cuda(), requires_grad=True) # (m, da, 1)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n #self.course_embeddings.data.uniform_(-1.0, 1.0)\n self.course_trans.data.uniform_(-1.0, 1.0)\n self.student_embeddings.data.uniform_(-1.0, 1.0)\n self.course_type_embeddings.data.uniform_(-1.0, 1.0)\n self.student_type_embeddings.data.uniform_(-1.0, 1.0)\n self.trans_weights_c.data.normal_(std=1.0 / math.sqrt(self.embedding_size))\n self.trans_weights_s.data.normal_(std=1.0 / math.sqrt(self.embedding_size))\n self.trans_weights_s1.data.normal_(std=1.0 / math.sqrt(self.embedding_size))\n self.trans_weights_s2.data.normal_(std=1.0 / math.sqrt(self.embedding_size))\n\n def forward(self, input_type, train_inputs, train_types, node_neigh, course_descrip=None):\n\n if input_type == 'c': # course as input, self-attention\n\n course_descrip = Variable(torch.FloatTensor(course_descrip), requires_grad=False).cuda()\n\n #course_embed = self.course_embeddings[train_inputs]\n self.course_embed = self.course_h1(course_descrip)\n self.course_embed = self.course_h_relu(self.course_embed)\n self.course_embed = self.course_h2(self.course_embed)\n\n self.course_des_embed = torch.matmul(course_descrip, self.course_trans)\n\n args = parse_args()\n course_embed_neighbors = Variable(torch.FloatTensor(len(train_inputs), self.edge_type_count, args.neighbor_samples, self.embedding_u_size), requires_grad=True).cuda()\n for i in range(self.edge_type_count):\n neigh_i = node_neigh[:, i].reshape(1, len(train_inputs) * args.neighbor_samples).squeeze(0).tolist()\n temp_stu = self.student_type_embeddings[:, 0]\n temp_course = self.course_type_embeddings[:, i]\n temp = torch.cat((temp_stu, temp_course), 0)\n neigh_i_emb = temp[neigh_i].contiguous().view(len(train_inputs), args.neighbor_samples, self.embedding_u_size)\n course_embed_neighbors[:, i] = neigh_i_emb\n course_type_embed = torch.mean(course_embed_neighbors, dim=2)\n\n trans_w = self.trans_weights_c[train_types]\n trans_w_s1 = self.trans_weights_s1[train_types]\n trans_w_s2 = self.trans_weights_s2[train_types]\n\n attention = F.softmax(torch.matmul(torch.tanh(torch.matmul(course_type_embed, trans_w_s1)), trans_w_s2).squeeze(2), dim=1).unsqueeze(1)\n course_type_embed = torch.matmul(attention, course_type_embed)\n course_embed = self.course_embed + torch.matmul(course_type_embed, trans_w).squeeze(1) + self.course_des_embed\n\n last_course_embed = F.normalize(course_embed, dim=1)\n return last_course_embed\n\n elif input_type == 's': # student as input, skip-gram\n student_embed = self.student_embeddings[train_inputs]\n args = parse_args()\n student_embed_neighbors = Variable(torch.FloatTensor(len(train_inputs), self.edge_type_count, args.neighbor_samples, self.embedding_u_size), requires_grad=True).cuda()\n for i in range(self.edge_type_count):\n neigh_i = node_neigh[:, i].reshape(1, len(train_inputs) * args.neighbor_samples).squeeze(0).tolist()\n temp_course = self.course_type_embeddings[:, i]\n temp_stu = self.student_type_embeddings[:, 0]\n temp = torch.cat((temp_course, temp_stu), 0)\n neigh_i_emb = temp[neigh_i].contiguous().view(len(train_inputs), args.neighbor_samples, self.embedding_u_size)\n student_embed_neighbors[:, i] = neigh_i_emb\n student_type_embed = torch.mean(torch.mean(student_embed_neighbors, dim=2), dim=1)\n trans_w = self.trans_weights_s[0]\n student_embed = student_embed + torch.matmul(student_type_embed, trans_w).squeeze(1)\n last_student_embed = F.normalize(student_embed, dim=1)\n return last_student_embed\n\n\nclass NSLoss(nn.Module):\n def __init__(self, num_nodes, node_freq, num_sampled, embedding_size):\n super(NSLoss, self).__init__()\n self.num_nodes = num_nodes # courses + students\n self.num_sampled = num_sampled\n self.embedding_size = embedding_size\n self.weights = nn.Parameter(torch.FloatTensor(num_nodes, embedding_size).cuda()) # context weights (*, d)\n # node freq is a list of node frequency for all nodes\n self.node_freq = np.array(node_freq) ** (3/4)\n total_freq = np.sum(node_freq)\n self.sample_weights = torch.Tensor([self.node_freq[k] / total_freq for k in range(num_nodes)])\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.weights.data.normal_(std=1.0 / math.sqrt(self.embedding_size))\n\n def forward(self, n, embs, label):\n #n = len(input)\n log_target = torch.log(torch.sigmoid(torch.sum(torch.mul(embs, self.weights[label]), 1)))\n negs = torch.multinomial(self.sample_weights, self.num_sampled * n, replacement=True).cuda()\n noise = torch.neg(self.weights[negs].view(n, self.num_sampled, -1))\n sum_log_sampled = torch.sum(torch.log(torch.sigmoid(torch.bmm(noise, embs.unsqueeze(2)))), 1).squeeze()\n\n loss = log_target + sum_log_sampled\n return -loss.sum() / n\n\n\n","sub_path":"src_attri/GraphModel.py","file_name":"GraphModel.py","file_ext":"py","file_size_in_byte":7763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"165776715","text":"import numpy as np\nimport theano\nimport theano.tensor as T\n\n\"\"\"Jorg Bornschein's implementation of attention was used as a reference\"\"\"\nclass SelectiveAttentionModel(object):\n \"\"\"Haven't tested the cases when A != B\"\"\"\n def __init__(self, A, B, N):\n self.A = A\n self.B = B\n self.N = N\n\n def internal_dot_product(self, matrix1, matrix2):\n '''Matrix1 has dimension dim1 x dim2 x dim3 ; Matrix2 has dimension dim1 x dim3 x dim4\n Result is dim1 x dim2 x dim4'''\n\n matrix3 = matrix1.dimshuffle([0,1,2,'x']) * matrix2.dimshuffle([0,'x',1,2]) \n return matrix3.sum(axis=2)\n\n def matrix2att(self, matrix):\n '''Input is vector of size (batch_size,5) in theano terms'''\n g_hat_x = matrix[:,0]\n g_hat_y = matrix[:,1]\n log_delta = matrix[:,2]\n log_sigma_sqr = matrix[:,3]\n log_gamma = matrix[:,4]\n\n g_x = (self.A + 1.0) / 2.0 * (g_hat_x + 1.0)\n g_y = (self.B + 1.0) / 2.0 * (g_hat_y + 1.0)\n\n delta = (max(self.A,self.B) - 1.0) / (self.N - 1) * T.exp(log_delta)\n gamma = T.exp(log_gamma).dimshuffle(0, 'x')\n sigma = T.exp(log_sigma_sqr/2.0)\n\n return g_y, g_x, delta, sigma, gamma\n\n def matrix2att_cpu(self, matrix):\n '''Input is vector of size (batch_size,5) in numpy terms'''\n g_hat_x = matrix[:,0]\n g_hat_y = matrix[:,1]\n log_delta = matrix[:,2]\n log_sigma_sqr = matrix[:,3]\n log_gamma = matrix[:,4]\n\n g_x = (self.A + 1.0) / 2.0 * (g_hat_x + 1.0)\n g_y = (self.B + 1.0) / 2.0 * (g_hat_y + 1.0)\n\n delta = (max(self.A,self.B) - 1.0) / (self.N - 1) * np.exp(log_delta)\n gamma = np.exp(log_gamma)\n sigma = np.exp(log_sigma_sqr/2.0)\n\n return g_y, g_x, delta, sigma, gamma\n\n def get_filterbank_matrices(self, g_y, g_x, delta, sigma):\n\n tol = 1e-04\n mu_x = g_x.dimshuffle([0, 'x']) + delta.dimshuffle([0, 'x'])*(T.arange(self.N)-self.N/2-0.5) # dimension (batch_size, N)\n mu_y = g_y.dimshuffle([0, 'x']) + delta.dimshuffle([0, 'x'])*(T.arange(self.N)-self.N/2-0.5)\n\n a = T.arange(self.A)\n b = T.arange(self.B)\n\n f_x = T.exp( -(a-mu_x.dimshuffle([0,1,'x']))**2 / 2. / sigma.dimshuffle([0,'x','x'])**2 ) # dimension (batch_size, N, A)\n f_y = T.exp( -(b-mu_y.dimshuffle([0,1,'x']))**2 / 2. / sigma.dimshuffle([0,'x','x'])**2 )\n\n f_x = f_x / (f_x.sum(axis=2).dimshuffle(0, 1, 'x') + tol) # dimension (batch_size, N, A)\n f_y = f_y / (f_y.sum(axis=2).dimshuffle(0, 1, 'x') + tol)\n return f_y, f_x\n\n def get_mean_filters_cpu(self, g_y, g_x, delta, sigma):\n mu_x = g_x + delta * (np.arange(self.N) - self.N/2 - 0.5)\n mu_y = g_y + delta * (np.arange(self.N) - self.N/2 - 0.5)\n\n return mu_y, mu_x\n\n def read(self, images, g_y, g_x, delta, sigma):\n f_y, f_x = self.get_filterbank_matrices(g_y, g_x, delta, sigma)\n batch_size = images.shape[0]\n reshaped_images = images.reshape( (batch_size, self.A, self.B) ) # dimension (batch_size, A, B)\n\n w = self.internal_dot_product(self.internal_dot_product(f_y, reshaped_images), f_x.transpose([0,2,1]))\n return w.reshape((batch_size, self.N*self.N))\n\n def write(self, windows, g_y, g_x, delta, sigma):\n f_y, f_x = self.get_filterbank_matrices(g_y, g_x, delta, sigma)\n batch_size = windows.shape[0]\n reshaped_windows = windows.reshape( (batch_size, self.N, self.N) ) # dimension (batch_size, N, N)\n\n im = self.internal_dot_product(self.internal_dot_product(f_y.transpose([0,2,1]), reshaped_windows), f_x)\n return im.reshape((batch_size, self.A * self.B))","sub_path":"mnist-captions/attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"406900724","text":"# chat/consumers.py\n\nimport json\n\nfrom channels.db import database_sync_to_async\nfrom channels.generic.websocket import AsyncWebsocketConsumer\nfrom django.contrib.auth.models import User\n\nfrom .models import ChatGroup, ChatGroupMessage\n\n\nclass ChatConsumer(AsyncWebsocketConsumer):\n async def connect(self):\n # print()\n # print(\"connect method called\")\n # print()\n user_id = self.scope[\"session\"][\"_auth_user_id\"]\n self.group_name = \"{}\".format(user_id)\n # Join room group\n # print(\"the Channel Name is \", self.channel_name)\n\n await self.channel_layer.group_add(self.group_name, self.channel_name)\n\n await self.accept()\n\n async def disconnect(self, close_code):\n # Leave room group\n await self.channel_layer.group_discard(self.group_name, self.channel_name)\n\n # Receive message from WebSocket\n # async def receive(self, text_data=None,bytes_data = None):\n\n # text_data_json = json.loads(text_data)\n # message = text_data_json['message']\n # print(\"the id is \",message)\n # # Send message to room group(channel layer)\n # print(\"The chat_group_name is \",self.chat_group_name)\n # await self.channel_layer.group_send(\n # self.chat_group_name,\n # {\n # 'type': 'recieve_group_message',\n # 'message': message\n # }\n # )\n\n async def recieve_group_message(self, event):\n message = event['message']\n # print(\"The message id is \", message)\n\n # Send message to WebSocket\n\n await self.send(text_data=json.dumps({'message': message}))\n\n\nclass GroupChatConsumer(AsyncWebsocketConsumer):\n async def connect(self):\n print(\"connect called\")\n # self.room_name = self.scope['url_route']['kwargs']['room_name']\n # self.room_group_name = 'chat_%s' % self.room_name\n self.room_group_name = 'test'\n\n # Join room group\n await self.channel_layer.group_add(self.room_group_name, self.channel_name)\n\n await self.accept()\n\n async def disconnect(self, close_code):\n # Leave room group\n await self.channel_layer.group_discard(self.room_group_name, self.channel_name)\n\n # Receive message from WebSocket\n async def receive(self, text_data):\n text_data_json = json.loads(text_data)\n message = text_data_json['message']\n # model_name =ChatGroupMessage.objects.get()\n\n # Send message to room group\n await self.channel_layer.group_send(\n self.room_group_name, {'type': 'chat_message', 'message': message}\n )\n\n # Receive message from room group\n async def chat_message(self, event):\n message = event['message']\n # group_name = ChatGroup.objects.all()[1]\n # author_name = User.objects.all()[1]\n # ChatGroupMessage(group_name=group_name, author=author_name, message=message)\n # ChatGroupMessage.objects.create(group_name=group_name, author=author_name, message=message)\n group_name = await self.group_name()\n author_name = await self.author_name()\n await self.post_message(group_name=group_name, author_name=author_name, message=message)\n\n # Send message to WebSocket\n await self.send(text_data=json.dumps({'message': message}))\n\n @database_sync_to_async\n def group_name(self):\n return ChatGroup.objects.all()[1]\n\n\n @database_sync_to_async\n def author_name(self):\n return User.objects.all()[1]\n\n\n @database_sync_to_async\n def post_message(self, group_name, author_name, message):\n # GroupMessage.objects.create(group=self.room_group_name, message=message)\n ChatGroupMessage.objects.create(group_name=group_name, author=author_name, message=message)\n","sub_path":"core/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"164098022","text":"import yaml\nimport logging\n\n_log = logging.getLogger(__name__)\n\n\nclass WhiteListRule(object):\n\n MATCHABLE = ['cve', 'name', 'version', 'vendor', 'product']\n version = None\n status = None\n\n def __init__(self, cve=None, name=None, version=None, vendor=None,\n product=None, comment=None, status='ignore'):\n self.cve = cve\n self.name = name\n self.comment = comment\n self.vendor = vendor\n self.product = product\n if version is not None and not isinstance(version, str):\n raise RuntimeError(\n '{}: version must be a string (try quotes)'.format(self))\n self.version = version\n if status not in ['ignore', 'inprogress', 'notfixed']:\n raise RuntimeError(\n '{}: status must be one of ignore, inprogress, notfixed'.\n format(self))\n self.status = status\n if not any((getattr(self, m) for m in self.MATCHABLE)):\n raise RuntimeError(\n \"Whitelist rules must specify at least one of the matchable \"\n \"attributes {}: {}/{}\".format(\n ', '.join(self.MATCHABLE), comment, status))\n\n def matches(self, vulnerability, cpe, derivation):\n \"\"\"A rule matches when a vulnerability/derivation combination\n is whitelisted by this rule.\n \"\"\"\n if self.cve and vulnerability.cve_id != self.cve:\n return False\n if self.name and derivation.pname != self.name:\n return False\n if self.version and derivation.version != self.version:\n return False\n if self.vendor and cpe.vendor != self.vendor:\n return False\n if self.product and cpe.product != self.product:\n return False\n# XXX WTF?!?\n if self.status in ('inprogress', 'notfixed'):\n derivation.status = self.status\n return\n return True\n\n def __str__(self):\n base = '-'.join(filter(None, [self.vendor, self.product, self.name,\n self.version]))\n if not base and self.cve:\n return self.cve\n elif self.cve:\n return '{} ({})'.format(base, self.cve)\n return base\n\n\nclass WhiteList(object):\n\n def __init__(self):\n self.rules = []\n\n def parse(self, fobj):\n \"\"\"Extends whitelist with rules read from fobj.\"\"\"\n prep_rules = []\n\n if hasattr(fobj, 'name'):\n _log.debug('reading whitelist from %s', fobj.name)\n\n whitelist = yaml.load(fobj)\n for line in whitelist:\n # special case: use cve key for more than one cve\n if 'cve' in line.keys() and type(line['cve']) == list:\n for cve_id in line['cve']:\n prep_rules.append(dict(**line))\n prep_rules[-1]['cve'] = cve_id\n else:\n prep_rules.append(line)\n for rule in prep_rules:\n self.rules.append(WhiteListRule(**rule))\n\n def __contains__(self, spec):\n (vuln, cpe, derivation) = spec\n return any(rule.matches(vuln, cpe, derivation) for rule in self.rules)\n","sub_path":"src/vulnix/whitelist.py","file_name":"whitelist.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"303623159","text":"import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndef hidden_init(layer):\n fan_in = layer.weight.data.size()[0]\n lim = 1. / np.sqrt(fan_in)\n return (-lim, lim)\n\nclass Actor(nn.Module):\n \"\"\"Actor (Policy) Model.\"\"\"\n\n def __init__(self, state_size, action_size, seed, fc1_units=256, fc2_units=256):\n \"\"\"Initialize parameters and build model.\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n seed (int): Random seed\n fc1_units (int): Number of nodes in first hidden layer\n fc2_units (int): Number of nodes in second hidden layer\n \"\"\"\n super(Actor, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.fc1 = nn.Linear(state_size, fc1_units)\n self.fc2 = nn.Linear(fc1_units, fc2_units)\n self.fc3 = nn.Linear(fc2_units, action_size)\n self.fc23 = nn.Linear(fc2_units, fc2_units)\n self.fc34 = nn.Linear(fc2_units, fc2_units)\n self.bc1 = nn.BatchNorm1d(fc1_units, affine=False)\n self.bc2 = nn.BatchNorm1d(fc2_units, affine=False)\n self.ln = nn.LayerNorm(33)\n self.reset_parameters()\n\n def reset_parameters(self):\n self.fc1.weight.data.uniform_(*hidden_init(self.fc1))\n self.fc2.weight.data.uniform_(*hidden_init(self.fc2))\n self.fc3.weight.data.uniform_(-3e-3, 3e-3)\n\n def forward(self, state):\n \"\"\"Build an actor (policy) network that maps states -> actions.\"\"\"\n #print(state.size())\n x = F.leaky_relu(self.fc1((state)))\n #print(x.size())\n #x = self.bc1(x)\n x = F.leaky_relu(self.fc2(x))\n #x = F.leaky_relu(self.fc23(x))\n #x = F.leaky_relu(self.fc34(x))\n #x = self.bc2(x)\n return F.tanh(self.fc3(x))\n\n\nclass Critic(nn.Module):\n \"\"\"Critic (Value) Model.\"\"\"\n\n def __init__(self, state_size, action_size, seed, fcs1_units=256, fc2_units=128):\n \"\"\"Initialize parameters and build model.\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n seed (int): Random seed\n fcs1_units (int): Number of nodes in the first hidden layer\n fc2_units (int): Number of nodes in the second hidden layer\n \"\"\"\n super(Critic, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.fcs1 = nn.Linear(state_size, fcs1_units)\n self.fc2 = nn.Linear(fcs1_units+action_size, fc2_units)\n self.fc3 = nn.Linear(fc2_units, 1)\n #self.fc3 = nn.Linear(fc2_units, action_size)\n self.fc23 = nn.Linear(fc2_units,fc2_units)\n #self.fc34 = nn.Linear(fc2_units,fc2_units)\n #self.fc3 = nn.Linear(fc2_units, 1)\n self.bc1 = nn.BatchNorm1d(fcs1_units, affine=False)\n self.bc2 = nn.BatchNorm1d(fc2_units, affine=False)\n self.ln = nn.LayerNorm(33)\n self.reset_parameters()\n\n def reset_parameters(self):\n self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))\n self.fc2.weight.data.uniform_(*hidden_init(self.fc2))\n self.fc3.weight.data.uniform_(-3e-3, 3e-3)\n\n \n def discretize(self, ts, bins, global_min=None, global_max=None): \n \n num_bins = bins\n\n min_value = ts.min()\n max_value = ts.max()\n if min_value == max_value:\n min_value = global_min\n max_value = global_max\n step = (max_value - min_value) / num_bins\n ts_bins = np.arange(min_value, max_value, step)\n \n ts_bins = bins\n\n inds = np.digitize(ts, ts_bins)\n #binned_ts = tuple(str(i - 1) for i in inds)\n binned_ts = inds*step + min_value\n return binned_ts \n \n def forward(self, state, action):\n \"\"\"Build a critic (value) network that maps (state, action) pairs -> Q-values.\"\"\" \n #new_state = np.digitize(state,np.linspace(state.min(), state.max(),20))*20+state.min() #self.discretize(state,5.0,-10.0,10.0)\n #xs = F.relu(self.fcs1(new_state.cuda().float()))\n xs = F.leaky_relu(self.fcs1((state)))\n #xs = self.bc1(xs)\n x = torch.cat((xs, action), dim=1)\n x = F.leaky_relu(self.fc2(x))\n x = F.leaky_relu(self.fc23(x))\n #x = F.leaky_relu(self.fc34(x))\n #x = self.bc2(x)\n return self.fc3(x)\n\n ","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"273307562","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 29 14:18:16 2020\n\n@author: jakem\n\"\"\"\n\n\nfrom monodisperse_box_xform import Monodisperse2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nN = 1000 \nB = 40\nBx = 40 #box length (x)\nBy = 40 #box length (y)\nseed = 125 \n\narea_frac = 0.5\nkick_list = [0.5, 0.3, 0.1, 0.05,.01]\ncycle_number = 30000\nxform = .75\narea_frac_array = np.array(np.linspace(0,1,150))\n\n\nscale_x = 1\nscale_y = xform\nfor kick in kick_list:\n m = Monodisperse2(N,Bx,By,seed)\n count = m.train_xform(xform, 1, area_frac, kick, cycle_number, noise=0)\n print('Cycles:', count)\n m.transform_centers(scale_x, scale_y) #Transform centers along readout axis\n func_count_x = m.tag_count_xform \n func_rate_x = m.tag_rate_xform\n func_curve_x = m.tag_curve_xform\n data_count_x = func_count_x(area_frac_array, scale_x, scale_y)\n data_rate_x = func_rate_x(area_frac_array, scale_x, scale_y)\n data_curve_x = func_curve_x(area_frac_array, scale_x, scale_y)\n m.inv_transform_centers(scale_x, scale_y) #Transform centers back\n mem2 = m.detect_memory_xform(0, 1, .005, scale_x,scale_y)\n print('x-axis:', mem2)\n plt.plot(area_frac_array, data_rate_x)\n \nplt.ylim([0,.9])\n#plt.xlim([.4,.75])\nplt.ylabel('Rate')\nplt.xlabel('Area Fraction')\nplt.legend(['kick = 0.5','kick = 0.3','kick = 0.1','kick = 0.05','kick = 0.01'])\nplt.show()\n\n\n\n \n# scale_x = 1\n# scale_y = xform\n# m.transform_centers(scale_x, scale_y) #Transform centers along readout axis \n# func_count_y = m.tag_count_xform\n# func_rate_y = m.tag_rate_xform\n# func_curve_y = m.tag_curve_xform\n# data_count_y = func_count_y(area_frac_array, scale_x, scale_y)\n# data_rate_y = func_rate_y(area_frac_array, scale_x, scale_y)\n# data_curve_y = func_curve_y(area_frac_array, scale_x, scale_y)\n# m.inv_transform_centers(scale_x, scale_y) #Transform centers back\n# mem3 = m.detect_memory_xform(0, 1, .005, scale_x,scale_y)\n# print('y-axis:',mem3)\n\n\n# plt.plot(area_frac_array, data_count_x)\n# plt.plot(area_frac_array, data_rate_x)\n# plt.plot(area_frac_array, data_curve_x)\n# plt.plot(area_frac_array, data_count_y)\n# plt.ylabel('Count')\n# plt.xlabel('Area Fraction')\n# plt.legend(['X-axis', 'Y-axis'])\n# plt.show()\n\n# plt.plot(area_frac_array, data_rate_y)\n# plt.ylabel('Rate')\n# plt.xlabel('Area Fraction')\n# plt.legend(['X-axis', 'Y-axis'])\n# plt.show()\n\n# plt.plot(area_frac_array, data_curve_y)\n# plt.ylabel('Curve')\n# plt.xlabel('Area Fraction')\n# plt.legend(['Isotropic', 'X-axis', 'Y-axis'])\n# plt.show()","sub_path":"swellpy/kick_lim.py","file_name":"kick_lim.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"254797936","text":"import re\r\nimport unicodedata,sys\r\n\r\ns = ' hello world \\n'\r\nprint(s.strip())\r\nprint(s.lstrip())\r\nprint(s.rstrip())\r\n\r\nt = '-----hello====='\r\nprint(t.lstrip('-'))\r\nprint(t.strip('-='))\r\n\r\n\r\ns = ' hello world \\n'\r\nprint(s.replace(' ',''))\r\nprint(re.sub('\\s+',' ',s))\r\n\r\n'''\r\nwith open('2.9unicode.py') as f:\r\n lines = (line.strip() for line in f)\r\n for line in lines:\r\n print(line)\r\n'''\r\n\r\nprint('-'*40)\r\ns = 'pýtĥöñ\\fis\\tawesome\\r\\n'\r\nprint(s)\r\n\r\nremap = {\r\n ord('\\t') : ' ',\r\n ord('\\f') : ' ',\r\n ord('\\r') : None # Deleted\r\n}\r\nprint(s.translate(remap))\r\n\r\ncmb_chrs = dict.fromkeys(c for c in range(sys.maxunicode) if unicodedata.combining(chr(c)))\r\nb=unicodedata.normalize('NFD',s)\r\nprint(b)\r\nprint(b.translate(cmb_chrs))\r\n\r\ndef clean_spaces(s):\r\n s = s.replace('\\r', '')\r\n s = s.replace('\\t', ' ')\r\n s = s.replace('\\f', ' ')\r\n return s\r\n\r\n","sub_path":"python3/C2/2.11clean_str.py","file_name":"2.11clean_str.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"125589343","text":"import os\nfrom argparse import ArgumentParser, Namespace\nfrom dataclasses import asdict, dataclass, field\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Any, Dict, Sequence, Tuple, Union\n\nimport numpy as np\nfrom numpy import ndarray\n\n\n# noinspection PyArgumentList\nclass Channel(Enum):\n ALL = slice(None)\n LAST = slice(-1, None)\n NONE = None\n\n\n@dataclass\nclass _HyperParameters:\n # devices\n device: Union[int, str, Sequence[str], Sequence[int]] = (0, 1, 2, 3)\n out_device: Union[int, str] = 2\n num_workers: int = 4\n\n # select dataset\n # feature: str = 'IV'\n # feature: str = 'DirAC'\n # feature: str = 'mulspec'\n # room_train: str = 'room1+2+3'\n # room_test: str = 'room1+2+3'\n # room_create: str = ''\n\n # model_name: str = 'UNet'\n\n # feature parameters\n fs: int = 16000\n n_fft: int = 512\n l_frame: int = 512\n n_freq: int = 257\n l_hop: int = 256\n num_snr: int = 1\n\n\n use_mel: bool = True\n sampling_rate: int = 22050\n## filter_length: int = 1024\n## win_length: int = 1024\n mel_fmin: float = 0.0\n mel_fmax: float = 8000.0\n max_wav_value: float = 32768.0\n\n\n # training\n n_data: int = 0 # <=0 to use all data\n train_ratio: float = 0.90\n n_epochs: int = 200\n batch_size: int = 4\n learning_rate: float = 0.01\n thr_clip_grad: float = 4.\n weight_decay: float = 1e-3 # Adam weight_decay\n optimizer: str = \"novograd\"\n\n #validation\n num_stoi: int = 200\n stoi_iters: int = 32\n stoi_iters_rate: int = 10\n\n # summary\n period_save_state: int = 10\n draw_test_fig: bool = False\n n_save_block_outs: int = 0\n n_glim_iter: int = 100\n repeat_train: int = 2\n repeat_test: int = 256\n noisy_init: bool = True\n speed_test: bool = False\n\n # paths\n # logdir will be converted to type Path in the init_dependent_vars function\n logdir: str = f'/result/deq3'\n path_speech: Path = Path('/data/TIMIT')\n path_feature: Path = Path('/data')\n path_mel: Path = Path('./data')\n dest_test: str = ''\n # path_feature: Path = Path('./backup')\n sfx_featuredir: str = ''\n\n\n use_deqGLI: bool = False\n\n ## deq_gli parameters\n use_deq: bool = False\n\n\n ##model\n model_type: str = \"vanilla\"\n use_fp16: bool = False\n\n # layers : int = 4\n # k_x: int = 3\n # k_y: int = 3\n # s_x:int = 1\n # s_y: int = 2\n # widening: int = 16 \n # use_bn: bool = False\n\n ed_model: Dict[str, Channel] = field(init=False)\n\n # file names\n form_feature: str = '{:04d}_{:+.2f}dB.npz' # i_speech, snr_db\n form_result: str = 'spec_{}.mat'\n\n\n # defined in __post_init__\n channels: Dict[str, Channel] = field(init=False)\n model: Dict[str, Any] = field(init=False)\n scheduler: Dict[str, Any] = field(init=False)\n spec_data_names: Dict[str, str] = field(init=False)\n\n deq_config: Dict[str, Any] = field(init=False)\n vanilla_model: Dict[str, Any] = field(init=False)\n\n\n # dependent variables\n dummy_input_size: Tuple = None\n dict_path: Dict[str, Path] = None\n kwargs_stft: Dict[str, Any] = None\n kwargs_istft: Dict[str, Any] = None\n\n\n def __post_init__(self):\n self.channels = dict(path_speech=Channel.NONE,\n wav=Channel.ALL,\n x=Channel.ALL,\n y=Channel.ALL,\n y_mag=Channel.ALL,\n length=Channel.ALL,\n )\n\n self.model = dict(n_fft=self.n_fft,\n hop_length=self.l_hop,\n depth=1,\n out_all_block=True\n )\n\n\n self.ed_model = dict(layers= 6,\n k_x = 3, k_y = 3,\n s_x = 1, s_y = 2,\n widening = 16, \n use_bn = True,\n lamb = 0.1,\n linear_finalizer = True,\n convGlu = False,\n act = \"relu\",\n act2 = \"selu\",\n glu_bn = True\n )\n\n self.vanilla_model = dict(k_x1 = 11, k_y1 = 11,\n k_x2 = 7, k_y2 = 7, num_channel=16,\n act = \"sigmoid\")\n\n self.deq_config = dict( wnorm=False,\n num_branches = 1,\n base_channels = 16,\n ratio2head = 2,\n fuse_method = \"SUM\",\n droprate = 0.0,\n final_multiplier = 2,\n pretrain_steps = 10000,\n f_thres = 24,\n b_thres = 24,\n num_layers = 3,\n ch_hidden= 16,\n k1 = 11, \n k2 = 7, \n p2 = 3,\n freq_embedding = 256\n )\n\n\n self.scheduler = dict(mode='min',\n factor=0.6,\n patience=5,\n verbose=False,\n threshold=0.01,\n threshold_mode='rel',\n cooldown=0,\n min_lr=1e-5,\n eps=1e-08\n )\n\n self.spec_data_names = dict(x='spec_noisy', y='spec_clean',\n wav='speech',\n y_mag='mag_clean',\n path_speech='path_speech',\n length='length',\n out='spec_estimated',\n res='spec_dnn_output',\n )\n\n def init_dependent_vars(self):\n self.logdir = Path(self.logdir)\n\n self.dummy_input_size = [\n (2,\n self.n_freq,\n int(2**np.floor(np.log2(4 / 3 * self.n_freq)))),\n (1,\n self.n_freq,\n int(2**np.floor(np.log2(4 / 3 * self.n_freq))))\n ]\n\n # path\n self.dict_path = dict(\n speech_train=Path(self.path_speech) / 'TRAIN',\n speech_test=Path(self.path_speech) / 'TEST',\n speech_valid=Path(self.path_speech) / 'VALID',\n feature_train=Path(self.path_feature) / 'TRAIN',\n feature_test=Path(self.path_feature) / 'TEST',\n feature_valid=Path(self.path_feature) / 'VALID',\n\n # normconst_train=path_feature_train / 'normconst.npz',\n\n figures=Path('./figures'),\n )\n\n # dirspec parameters\n self.kwargs_stft = dict(hop_length=self.l_hop, window='hann', center=True,\n n_fft=self.n_fft, dtype=np.complex64)\n self.kwargs_istft = dict(hop_length=self.l_hop, window='hann', center=True,\n dtype=np.float32)\n\n @staticmethod\n def is_featurefile(f: os.DirEntry) -> bool:\n return (f.name.endswith('.npz')\n and not f.name.startswith('metadata')\n and not f.name.startswith('normconst'))\n\n # Function for parsing argument and set hyper parameters\n def parse_argument(self, parser=None, print_argument=True) -> Namespace:\n def set_attr_to_parsed(obj: Any, attr_name: str, attr_type: type, parsed: str):\n if parsed == '':\n return\n try:\n v = eval(parsed)\n except:\n v = None\n if attr_type == str or v is None or type(v) != attr_type:\n if (parsed.startswith(\"'\") and parsed.endswith(\"'\")\n or parsed.startswith('\"') and parsed.endswith('\"')):\n parsed = parsed[1:-1]\n if isinstance(obj, dict):\n obj[attr_name] = parsed\n else:\n setattr(obj, attr_name, parsed)\n else:\n if isinstance(obj, dict):\n obj[attr_name] = v\n else:\n setattr(obj, attr_name, v)\n\n if not parser:\n parser = ArgumentParser()\n args_already_added = [a.dest for a in parser._actions]\n dict_self = asdict(self)\n for k in dict_self:\n if hasattr(args_already_added, k):\n continue\n if isinstance(dict_self[k], dict):\n for sub_k in dict_self[k]:\n parser.add_argument(f'--{k}--{sub_k}', default='')\n else:\n parser.add_argument(f'--{k}', default='')\n\n args = parser.parse_args()\n for k in dict_self:\n if isinstance(dict_self[k], dict):\n for sub_k, sub_v in dict_self[k].items():\n parsed = getattr(args, f'{k}__{sub_k}')\n set_attr_to_parsed(getattr(self, k), sub_k, type(sub_v), parsed)\n else:\n parsed = getattr(args, k)\n set_attr_to_parsed(self, k, type(dict_self[k]), parsed)\n\n self.init_dependent_vars()\n if print_argument:\n print(repr(self))\n\n return args\n\n def __repr__(self):\n result = ('-------------------------\\n'\n 'Hyper Parameter Settings\\n'\n '-------------------------\\n')\n\n result += '\\n'.join(\n [f'{k}: {v}' for k, v in asdict(self).items() if not isinstance(v, ndarray)])\n result += '\\n-------------------------'\n return result\n\n\nhp = _HyperParameters()\n","sub_path":"hparams1.py","file_name":"hparams1.py","file_ext":"py","file_size_in_byte":9832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"72092514","text":"\"\"\"\nCreated on Fri May 1 2020\n\n@author: landengozashti\n\"\"\"\n\n\"\"\"\nAssociate high parsimony mutations with submitting and originating labs\n\n\n\nInputs: parsimony BED file, corresponding VCF, GISAID Metadata file\n\n\n\nOutputs: \n\nflagged_snps_by_lab.out --- Flagged snps by submitting and originating lab\n\nflagged_snps_summary.tsv --- flagged mutations and respective reasoning\n\n\"\"\" \n\nimport scipy.stats as stats\nimport os\nimport numpy as np\nfrom statistics import mean \nimport argparse\nimport time\nimport pandas as pd\n\n\"\"\"\n\nArguments\n\nmetafilter = False\nif True, program will check metadata for possible errors and return file containing metadata flags\n\noutput = ''\nUser specified output directory\n\nNextstrain Metadata = ''\nPath to metadata file\n\nNextstrain Parsimony Bed File = ''\nPath to nextrain parsimony bed file\n\nNextstrain VCF = ''\nPath to nextrain VCF file\n\n\n\n\"\"\"\n\n\nparser = argparse.ArgumentParser(description='Flag suspicious lab-associated variants in COVID-19 genomic data')\nparser.add_argument('-m', nargs='?', required=True,\n help='Path to GISAID metadata file')\nparser.add_argument('-p', nargs='?', required=True,\n help='Path to parsimony file')\nparser.add_argument('-v', nargs='?', required=True,\n help='Path to VCF file')\nparser.add_argument('-o', nargs='?', required=True,\n help='Path to output directory')\nparser.add_argument('-b', nargs='?',const='T', default=None,\n help='Program will also flag suspicious variants that exhibit low minor allele frequency and are significantly assosiated with 1 or more particular lab')\nparser.add_argument('-min_parsimony', nargs='?',\n help='Minimum parsimony; must be an integer; default = 4 (Beware that reducing this parameter from the default can obscure accuracy)')\nparser.add_argument('-include', nargs='?',const='T', default=None,\n help='Include all mutations with parsimony > min_parsimony regardless of flags')\nparser.add_argument('-min_contribution', nargs='?',\n help='Minimum percent of alternate alleles contributed by a specific lab for variants to be considered highly suspect; default = 80 (beware that reducing this parameter from the default can obscure accuracy)') \nparser.add_argument('-track', nargs='?',const='T', default=None,\n help='Program will output a custom track annotating highly suspect lab assosiated mutations and a custom track annotating Artic primers that overlap or are within 10bp of lab-associated mutations in BED Detail format. Both can be directly uploaded to the UCSC Genome Browser. It will also output a bed file containing more information on each lab-associated mutation that can be converted to a binary bigBed file and uploaded as a custom track on the UCSC Genome Browser.')\nparser.add_argument('-fast', nargs='?',const='T', default=None,\n help='(highly recommended) Program will only consider highly suspicious lab-associated mutations and will not output flagged_snps_by_lab.tsv. However, it will run much faster!')\n\nargs = vars(parser.parse_args())\nif args['min_parsimony'] == None:\n args['min_parsimony'] = 4\nif args['min_contribution'] == None:\n args['min_contribution'] = .8\nelse:\n args['min_contribution'] = float(args['min_contribution'])/100.0\nif args['fast'] != None:\n args['fast'] = True\n\nrefChrom = 'NC_045512v2'\nparsimonyDic = {}\nsourceList = []\nsourceDic = {}\ntotalParsimoniousmutationCount = 0\nparsimonySourceDic = {}\naaChangeDic = {}\nglobalRefCountDic = {}\nglobalAltCountDic = {}\n\nprint('Reading parsimony file')\nwith open('{0}'.format(args['p']), 'r') as f:\n for line in f:\n sp = line.split('\\t')\n if int(sp[-1]) >= int(args['min_parsimony']):\n parsimonyDic[sp[2]] = sp[-1]\n\nstart_time = time.time()\nprint('Reading VCF')\nmafDic = {}\n\nwith open(args['v'], 'r') as vcf:\n for line in vcf:\n if '##' in line:\n pass\n elif '#' in line:\n sourceList = line.split('\\t')\n else:\n cols = line.split('\\t')\n (pos, mut) = (cols[1], cols[2])\n if pos in parsimonyDic:\n total = int(cols[7].split(';')[1].split('=')[1])\n preAlt = cols[7].split(';')[0].split('=')[1]\n if ',' in preAlt:\n alt = sum([int(i) for i in preAlt.split(',')])\n else:\n alt = int(preAlt)\n ref = total - alt\n globalRefCountDic[mut] = ref\n globalAltCountDic[mut] = alt\n MAF = float(alt)/(float(alt)+ float(ref))\n mafDic[mut] = MAF\n\n\n parsimonySourceDic[mut] = {}\n for i in range(9,len(cols)):\n epiId = sourceList[i].split('|')[0]\n parsimonySourceDic[mut][epiId] = cols[i].split(':')[0]\n info = cols[7]\n infoParts = dict([ part.split('=') for part in info.split(';') ])\n if ('AACHANGE' in infoParts):\n aaChangeDic[mut] = infoParts['AACHANGE']\n\n\nprint('VCF took {0} seconds'.format(time.time() - start_time))\n\noriParsCountDic = {}\nsubParsCountDic = {}\nsubAccessionDic = {}\noriAccessionDic = {}\nstart_time2 = time.time()\nprint('Reading metadata')\naccessionToOri = {}\naccessionToSub = {}\n\nwith open('{0}'.format(args['m']), 'r') as meta:\n for line in meta:\n sp = line.split('\\t')\n if 'strain' == sp[0]:\n indexPosOri = sp.index('originating_lab')\n indexPosSub = sp.index('submitting_lab')\n\n else:\n try:\n if sp[indexPosOri] not in oriAccessionDic:\n oriAccessionDic[sp[indexPosOri]] = [sp[2]]\n accessionToOri[sp[2]] = sp[indexPosOri] \n else:\n oriAccessionDic[sp[indexPosOri]].append(sp[2])\n accessionToOri[sp[2]] = sp[indexPosOri]\n\n\n if sp[indexPosSub] not in subAccessionDic:\n subAccessionDic[sp[indexPosSub]] = [sp[2]]\n accessionToSub[sp[2]] = sp[indexPosOri]\n\n else:\n subAccessionDic[sp[indexPosSub]].append(sp[2])\n accessionToSub[sp[2]] = sp[indexPosOri]\n\n except IndexError:\n pass\nprint('metadata took {0} seconds'.format((time.time() - start_time2)))\n\n\nprint('Working...')\nif args['fast'] == None:\n with open('{0}/flagged_snps_by_lab.tsv'.format(args['o']), 'w') as f:\n f.write('bin\\tsource\\tlab\\tsnp\\tref\\talt\\tglobal ref\\tglobal alt\\tfisher exact\\tproportion of calls\\tMAF\\n')\ntrashDic = {}\nsusDic = {}\ncompDic = {}\notherDic = {}\naltDic = {}\nrefDic = {}\nstart_time3 = time.time()\n\nif args['fast'] == True:\n for ori in subAccessionDic:\n for snp in parsimonySourceDic:\n refCount = 0\n altCount = 0\n for accession in subAccessionDic[ori]:\n try:\n if int(parsimonySourceDic[snp][accession]) == 0:\n refCount += 1\n elif int(parsimonySourceDic[snp][accession]) > 0:\n altCount += 1\n except KeyError:\n pass\n try:\n if (refCount > 0 or altCount > 0) and (float(altCount)/float(globalAltCountDic[snp]) > args['min_contribution']):\n oddsratio, pvalue = stats.fisher_exact(np.array([[globalRefCountDic[snp]-refCount,refCount],[globalAltCountDic[snp]-altCount,altCount]]))\n\n if snp not in compDic or float(altCount)/float(globalAltCountDic[snp]) > compDic[snp]:\n compDic[snp] = float(altCount)/float(globalAltCountDic[snp])\n\n trashDic[snp] = '{0}% of alternate allele calls stem from {1}'.format(round(float(altCount)/float(globalAltCountDic[snp])*100,2), ori)\n\n except KeyError:\n pass\n for ori in oriAccessionDic:\n \n for snp in parsimonySourceDic:\n refCount = 0\n altCount = 0\n for accession in oriAccessionDic[ori]:\n try:\n if int(parsimonySourceDic[snp][accession]) == 0:\n refCount += 1\n elif int(parsimonySourceDic[snp][accession]) > 0:\n altCount += 1\n except KeyError:\n pass\n try:\n\n if (refCount > 0 or altCount > 0) and (float(altCount)/float(globalAltCountDic[snp]) > args['min_contribution']):\n \n oddsratio, pvalue = stats.fisher_exact(np.array([[globalRefCountDic[snp]-refCount,refCount],[globalAltCountDic[snp]-altCount,altCount]]))\n\n if snp not in compDic or float(altCount)/float(globalAltCountDic[snp]) > compDic[snp]:\n compDic[snp] = float(altCount)/float(globalAltCountDic[snp])\n\n trashDic[snp] = '{0}% of alternate allele calls stem from {1}'.format(round(float(altCount)/float(globalAltCountDic[snp])*100,2), ori)\n except KeyError:\n pass\n\n\n print('Finished association in {0} seconds'.format(time.time() - start_time3))\n\n\n\n\nelse:\n\n for ori in subAccessionDic:\n altDic[ori] = {}\n refDic[ori] = {}\n for snp in parsimonySourceDic:\n refCount = 0\n altCount = 0\n for accession in subAccessionDic[ori]:\n try:\n if int(parsimonySourceDic[snp][accession]) == 0:\n refCount += 1\n elif int(parsimonySourceDic[snp][accession]) > 0:\n altCount += 1\n except KeyError:\n pass\n\n if refCount > 0 or altCount > 0:\n refDic[ori][snp] = refCount\n altDic[ori][snp] = altCount\n\n trashDic = {}\n susDic = {}\n compDic = {}\n otherDic = {}\n mafDic = {}\n\n\n\n with open('{0}/flagged_snps_by_lab.tsv'.format(args['o']), 'a') as f:\n for ori in refDic:\n for snp in refDic[ori]:\n oddsratio, pvalue = stats.fisher_exact(np.array([[globalRefCountDic[snp]-refDic[ori][snp],refDic[ori][snp]],[globalAltCountDic[snp]-altDic[ori][snp],altDic[ori][snp]]]))\n MAF = float(globalAltCountDic[snp])/(float(globalAltCountDic[snp])+ float(globalRefCountDic[snp]))\n mafDic[snp] = MAF\n\n if (float(altDic[ori][snp])/float(globalAltCountDic[snp]) > args['min_contribution']):\n if snp not in compDic or float(altDic[ori][snp])/float(globalAltCountDic[snp]) > compDic[snp]:\n compDic[snp] = float(altDic[ori][snp])/float(globalAltCountDic[snp])\n\n trashDic[snp] = '{0}% of alternate allele calls stem from {1}'.format(round(float(altDic[ori][snp])/float(globalAltCountDic[snp])*100,2), ori)\n f.write('highly suspect\\tsubmitting lab\\t{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}\\t{8}\\n'.format(ori, snp, refDic[ori][snp], altDic[ori][snp], globalRefCountDic[snp], globalAltCountDic[snp],pvalue,float(altDic[ori][snp])/float(globalAltCountDic[snp]),float(globalAltCountDic[snp])/(float(globalAltCountDic[snp])+ float(globalRefCountDic[snp])) ))\n\n elif (pvalue < 0.05 and MAF < 0.01 and float(altDic[ori][snp])/float(globalAltCountDic[snp]) > 0.1 and args['b']=='T' and snp not in trashDic):\n f.write('suspect\\tsubmitting lab\\t{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}\\t{8}\\n'.format(ori, snp, refDic[ori][snp], altDic[ori][snp], globalRefCountDic[snp], globalAltCountDic[snp],pvalue,float(altDic[ori][snp])/float(globalAltCountDic[snp]),float(globalAltCountDic[snp])/(float(globalAltCountDic[snp])+ float(globalRefCountDic[snp])) ))\n susDic[snp] = 'Minor allele frequency is {0} and {1} contributes {3}% of minor allele calls (p = {2})'.format(MAF, ori, pvalue,round(float(altDic[ori][snp])/float(globalAltCountDic[snp])*100,2))\n elif args['include']=='T' and snp not in trashDic and snp not in susDic:\n otherDic[snp] = 'parsimony > {0}'.format(int(args['min_parsimony'])-1)\n f.write('no flag\\tsubmitting lab\\t{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}\\t{8}\\n'.format(ori, snp, refDic[ori][snp], altDic[ori][snp], globalRefCountDic[snp], globalAltCountDic[snp],pvalue,float(altDic[ori][snp])/float(globalAltCountDic[snp]),float(globalAltCountDic[snp])/(float(globalAltCountDic[snp])+ float(globalRefCountDic[snp])) ))\n\n \n\n altDic = {}\n refDic = {}\n\n for ori in oriAccessionDic:\n refDic[ori] = {}\n altDic[ori] = {}\n for snp in parsimonySourceDic:\n refCount = 0\n altCount = 0\n for accession in oriAccessionDic[ori]:\n try:\n if int(parsimonySourceDic[snp][accession]) == 0:\n refCount += 1\n elif int(parsimonySourceDic[snp][accession]) > 0:\n altCount += 1\n except KeyError:\n pass\n\n if refCount > 0 or altCount > 0:\n refDic[ori][snp] = refCount\n altDic[ori][snp] = altCount\n\n\n with open('{0}/flagged_snps_by_lab.tsv'.format(args['o']), 'a') as f:\n for ori in refDic:\n for snp in refDic[ori]:\n MAF = float(globalAltCountDic[snp])/(float(globalAltCountDic[snp])+ float(globalRefCountDic[snp]))\n oddsratio, pvalue = stats.fisher_exact(np.array([[globalRefCountDic[snp]-refDic[ori][snp],refDic[ori][snp]],[globalAltCountDic[snp]-altDic[ori][snp],altDic[ori][snp]]]))\n\n if (float(altDic[ori][snp])/float(globalAltCountDic[snp]) > args['min_contribution']):\n if snp not in compDic or float(altDic[ori][snp])/float(globalAltCountDic[snp]) > compDic[snp]:\n compDic[snp] = float(altDic[ori][snp])/float(globalAltCountDic[snp])\n \n trashDic[snp] = '{0}% of alternate allele calls stem from {1}'.format(round(float(altDic[ori][snp])/float(globalAltCountDic[snp])*100,2), ori)\n f.write('highly suspect\\toriginating lab\\t{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}\\t{8}\\n'.format(ori, snp, refDic[ori][snp], altDic[ori][snp], globalRefCountDic[snp], globalAltCountDic[snp],pvalue,float(altDic[ori][snp])/float(globalAltCountDic[snp]),float(globalAltCountDic[snp])/(float(globalAltCountDic[snp])+ float(globalRefCountDic[snp])) ))\n\n elif (pvalue < 0.05 and MAF < 0.01 and float(altDic[ori][snp])/float(globalAltCountDic[snp]) > 0.1 and args['b']=='T' and snp not in trashDic):\n f.write('suspect\\toriginating lab\\t{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}\\t{8}\\n'.format(ori, snp, refDic[ori][snp], altDic[ori][snp], globalRefCountDic[snp], globalAltCountDic[snp],pvalue,float(altDic[ori][snp])/float(globalAltCountDic[snp]),float(globalAltCountDic[snp])/(float(globalAltCountDic[snp])+ float(globalRefCountDic[snp])) ))\n susDic[snp] = 'Minor allele frequency is {0} and {1} contributes {3}% of minor allele calls (p = {2})'.format(MAF, ori, pvalue,round(float(altDic[ori][snp])/float(globalAltCountDic[snp])*100,2))\n elif args['include']=='T' and snp not in trashDic and snp not in susDic:\n otherDic[snp] = 'parsimony > {0}'.format(int(args['min_parsimony'])-1)\n f.write('no flag\\toriginating lab\\t{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}\\t{8}\\n'.format(ori, snp, refDic[ori][snp], altDic[ori][snp], globalRefCountDic[snp], globalAltCountDic[snp],pvalue,float(altDic[ori][snp])/float(globalAltCountDic[snp]),float(globalAltCountDic[snp])/(float(globalAltCountDic[snp])+ float(globalRefCountDic[snp])) ))\nprint('Writing output files')\nwith open('{0}/flagged_snps_summary.tsv'.format(args['o']), 'w') as f:\n f.write('snp\\tbin\\treasoning\\n')\n\n for snp in trashDic:\n f.write('{0}\\thighly suspect\\t{1}\\n'.format(snp, trashDic[snp]))\n if args['b']=='T':\n for snp in susDic:\n f.write('{0}\\tsuspect\\t{1}\\n'.format(snp, susDic[snp]))\n if args['include']=='T':\n for snp in otherDic:\n if snp not in susDic and snp not in trashDic:\n f.write('{0}\\tno flag\\t{1}\\n'.format(snp, otherDic[snp]))\n\nif args['track'] != None:\n primerTrackList = []\n primerDic = {}\n with open('primers.txt', 'r') as primeFile:\n for line in primeFile:\n\n for snp in trashDic:\n\n sp = line.split('\\t')\n if ',' in snp:\n pos = snp.split(',')[0][1:-1]\n else:\n pos = snp[1:-1]\n if int(pos) > (int(sp[1]) - 10) and int(pos) < (int(sp[2]) + 10):\n primerTrackList.append(line)\n if snp not in primerDic:\n primerDic[snp] = sp[3]\n else:\n primerDic[snp] += (',' + sp[3])\n\n with open('{0}/lab_associated_error.bed'.format(args['o']), 'w') as f:\n for snp in trashDic:\n if ',' in snp:\n pos = snp.split(',')[0][1:-1]\n primer = primerDic[snp] if snp in primerDic else 'NA'\n aaChange = aaChangeDic[snp] if snp in aaChangeDic else 'NA'\n f.write('\\t'.join([ refChrom, str(int(pos)-1), pos, snp.replace('T', 'U'),\n parsimonyDic[pos].strip('\\n'), str(globalAltCountDic[snp]),\n str(mafDic[snp]), primer, aaChange, trashDic[snp] ]) + '\\n')\n\n with open('{0}/Artic_primers.bed'.format(args['o']), 'w') as f:\n for line in primerTrackList:\n f.write('{0}\\t{1}\\t{2}\\t{3}\\n'.format(refChrom, line.split('\\t')[1], line.split('\\t')[2], line.split('\\t')[3]))\n os.system('sort -k1,1 -k2,2n {0}/Artic_primers.bed > {0}/Artic_primers_sorted.bed'.format(args['o']))\n os.system('sort -k1,1 -k2,2n {0}/lab_associated_error.bed > {0}/lab_associated_error_sorted.bed'.format(args['o']))\n\n os.system('rm -r {0}/lab_associated_error.bed'.format(args['o']))\n os.system('rm -r {0}/Artic_primers.bed'.format(args['o']))\n\n\n with open('{0}/lab_associated_error_sorted.bed'.format(args['o']),'r') as sorted:\n lineList = []\n for line in sorted:\n lineList.append(line)\n\n with open('{0}/lab_associated_error_final.bedDetail'.format(args['o']),'w') as final:\n final.write('browser position NC_045512v2:0-29903\\ntrack name=lab type=bedDetail description=\"Lab-associated Mutations\" visibility=pack\\n#chrom\\tchromStart\\tchromStop\\tname\\tparsimony score\\tnumber of alt alleles\\tminor allele frequency\\tPrimer within 10bp\\tcomment\\n')\n for line in lineList:\n cols = line.split(\"\\t\")\n bedDetailCols = cols[0:4] + cols[8:10]\n final.write('\\t'.join(bedDetailCols))\n\n with open('{0}/lab_associated_error_final.bedForBigBed'.format(args['o']),'w') as final:\n for line in lineList:\n final.write(line)\n\n os.system('rm -r {0}/lab_associated_error_sorted.bed'.format(args['o']))\n\n with open('{0}/Artic_primers_sorted.bed'.format(args['o']),'r') as sorted:\n lineList = []\n for line in sorted:\n lineList.append(line)\n with open('{0}/Artic_primers_final.bed'.format(args['o']),'w') as final:\n final.write('browser position NC_045512v2:0-29903\\ntrack name=primers type=bed description=\"Artic Primers\" visibility=pack\\n#chrom\\tchromStart\\tchromStop\\tname\\n')\n for line in lineList:\n final.write(line)\n os.system('rm -r {0}/Artic_primers_sorted.bed'.format(args['o']))\n\n\n\n\nprint('That all took like {0} minutes... sorry for the wait'.format(int(round((time.time() - start_time)/float(60),0))))\n\n","sub_path":"parsimony_per_site_analysis3.py","file_name":"parsimony_per_site_analysis3.py","file_ext":"py","file_size_in_byte":19835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"116832241","text":"experiment = {'bkg_img':None,#None gives default of video_filename[:-4] + '_bkgimg.png'\n 'sample':'Normal Bacteria + 500nm colloids in buffer just bright',\n 'fps':30\n }\n\ncrop = {'crop_method': 'crop_box',\n 'crop_coords': (76, 119, 944, 900),\n 'mask': None\n }\n\npreprocess = {\n 'preprocess_method': ('variance','grayscale','medianblur','adaptive_threshold',),#'variance'\n 'load_bkg_img':True,\n 'grayscale':{},\n 'threshold':{'threshold':[1,0,255,1],\n 'th_mode':[1,0,1,1]},\n 'adaptive_threshold':{'block_size': [29,1,300,2],\n 'C': [-23, -30, 30, 1],\n 'ad_mode': [0, 0, 1, 1]\n },\n 'distance':{},\n 'blur':{'kernel':[1,1,15,2]},\n 'medianblur':{'kernel':[3,1,15,2]},\n 'gamma':{'gamma':[1,0,100,1]},\n 'resize':{'scale':[1,0,500,1]},\n 'subtract_bkg':{'subtract_bkg_type':'img',\n 'subtract_bkg_blur_kernel': 3,\n 'subtract_bkg_invert':[1,0,1,1],\n 'subtract_bkg_norm':True\n },\n 'variance':{'variance_type':'img',\n 'variance_blur_kernel': 3,\n 'variance_bkg_norm':True\n },\n 'flip':{},\n }\n\ntrack = {\n 'track_method':('trackpy',),\n 'trackpy':{'size_estimate':[7,1, 1001,2],\n 'invert':[0,0,1,1]\n },\n 'hough':{'min_dist':[10,1,201,2],\n 'p1':[10, 1, 201,2],\n 'p2':[10, 1, 201,2],\n 'min_rad':[10, 1, 201,2],\n 'max_rad':[10, 1, 201,2]\n },\n 'contours':{'noise_cutoff':[2,1,50,1],\n 'area':[20, 1, 200, 1],\n 'aspect':[1,1,20,1]},\n 'distance':{}\n }\n\nlink = {\n 'link_method':('default',),\n 'default':{ 'pos_columns':None,\n 'max_frame_displacement': 10,\n 'memory': 3,\n 'min_frame_life': 10\n #\n }\n }\n\npostprocess = {\n 'postprocess_method': ('subtract_drift','difference','difference*y','magnitude','max','classify',),\n 'smooth':{'column_name':'y',\n 'output_name':'y_smooth',\n 'span':5,\n 'method':'default'\n },\n 'subtract_drift':{},\n 'difference':{'column_name':'x_drift',\n 'output_name':'x_diff',\n 'span':10\n },\n 'difference*y':{'column_name':'y_drift',\n 'output_name':'y_diff',\n 'span':20\n },\n 'magnitude':{'column_names':('x_diff','y_diff'),\n 'output_name':'r_diff'\n },\n 'median':{'column_name':'r_diff',\n 'output_name':'median_r',},\n\n 'max':{'column_name':'r_diff',\n 'output_name':'max_r',},\n\n 'classify':{'column_name':'max_r',\n 'output_name':'classifier',\n 'value':[18, 1, 100, 1]\n },\n\n 'angle':{'column_names':('x','y'),\n 'output_name':'theta',\n 'units':'degrees'\n\n },\n 'rate':{'column_name':'x',\n 'output_name':'vx',\n 'fps':50.0,\n 'method':'finite_difference'\n },\n 'rate*2':{'column_name':'y',\n 'output_name':'vy',\n 'fps':50.0,\n 'method':'finite_difference'\n },\n 'neighbours':{'method':'delaunay',\n 'neighbours':6,\n 'cutoff':[50,1,200,1],\n }\n\n }\n\nannotate = {\n 'annotate_method': ('circles','circles*2','circles*nans','text_label','trajectories','trajectories*bacteria',),\n 'videowriter':'opencv',\n 'subsection':(200,600),#(start,stop) frame numbers\n 'text_label':{'text':'BP1',\n 'position':(100,100),\n 'font_colour':(255,0,0),\n 'font_size':3,\n 'font_thickness':2\n },\n 'var_label':{'var_column':'index',\n 'position':(100,100),\n 'font_colour':(255,0,255),\n 'font_size':4,\n 'font_thickness':3\n },\n 'particle_values': {'values_column': 'particle',\n 'font_colour': (255, 0, 255),\n 'font_size': 1,\n 'font_thickness': 1\n },\n 'circles':{'radius':6,\n 'cmap_type':'static',#'continuous',\n 'cmap_column':'x',#For continuous\n 'cmap_max':[470,1,2000,1],#For continuous\n 'cmap_scale':1,\n 'colour': (0,0,255),#For static\n 'classifier_column': 'classifier',#For discrete or continuous\n 'classifier': 1,#For discrete or continuous\n 'thickness':2\n },\n 'circles*2': {'radius': 6,\n 'cmap_type': 'static', # 'continuous',\n 'cmap_column': 'x', # For continuous\n 'cmap_max': [470, 1, 2000, 1], # For continuous\n 'cmap_scale': 1,\n 'colour': (255, 0, 0), # For static\n 'classifier_column': 'classifier',\n 'classifier': 2, # For discrete or continuous\n 'thickness': 2\n },\n 'circles*nans':{'radius': 10,\n 'cmap_type': 'static', # 'continuous',\n 'cmap_column': 'x', # For continuous\n 'cmap_max': [470, 1, 2000, 1], # For continuous\n 'cmap_scale': 1,\n 'colour': (255, 255, 0), # For static\n 'classifier_column': 'classifier',\n 'classifier': 2, # For discrete or continuous\n 'thickness': 2\n },\n 'boxes':{'radius':10,\n 'cmap_type':'continuous',\n 'cmap_column':'x',#None\n 'cmap_max':[1,1,2000,1],\n 'thickness':2\n },\n 'contours':{'radius':10,\n 'cmap_type':'continuous',\n 'cmap_column':'x',#Nonedf['neighbours'].loc(particle)\n 'cmap_max':[1,1,2000,1],\n 'thickness':2\n },\n 'networks':{'colour':(0,255,0),\n 'thickness':2\n },\n\n 'vectors':{'dx_column':'x',\n 'dy_column':'y',\n 'thickness':2,\n 'line_type':8,\n 'tip_length':[1,1,100,1],\n 'vector_scale':[1,1,2000,1],\n 'cmap_type':'static',#'continuous',\n 'cmap_column':'x',#For continuous\n 'cmap_max':[470,1,2000,1],#For continuous\n 'cmap_scale':1,\n 'colour': (0,0,255),#For static\n 'classifier_column':None,#For discrete or continuous\n 'classifier': None,#For discrete or continuous\n 'thickness':2\n },\n 'trajectories':{'x_column':'x',\n 'y_column':'y',\n 'traj_length': [1000,0,1000,1],\n 'cmap_type':'static',#'continuous',\n 'cmap_column':'x',#For continuous\n 'cmap_max':[470,1,2000,1],#For continuous\n 'cmap_scale':1,\n 'colour': (64,224,208),#For static\n 'classifier_column':'classifier',#For discrete or continuous\n 'classifier': 1,#For discrete or continuous\n 'thickness':1\n },\n 'trajectories*bacteria':{'x_column':'x',\n 'y_column':'y',\n 'traj_length': [1000,0,1000,1],\n 'cmap_type':'static',#'continuous',\n 'cmap_column':'x',#For continuous\n 'cmap_max':[470,1,2000,1],#For continuous\n 'cmap_scale':1,\n 'colour': (255,255,0),#For static\n 'classifier_column':'classifier',#For discrete or continuous\n 'classifier': 2,#For discrete or continuous\n 'thickness':1\n }\n\n\n }\n\nPARAMETERS = {\n 'experiment': experiment,\n 'crop': crop,\n 'preprocess':preprocess,\n 'track':track,\n 'link':link,\n 'postprocess':postprocess,\n 'annotate':annotate\n }\n\n","sub_path":"project/bacteria.py","file_name":"bacteria.py","file_ext":"py","file_size_in_byte":8159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"473325593","text":"from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nimport requests\n\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"This is for establishing telegram bot\")\nparser.add_argument('-t', '--token', help=\"telegram token\", required=True)\n\nargs = parser.parse_args()\n\n#'543323547:AAHAEgEUZYRXKnMg-lUEcwv0TLxDXJ2Rib8'\nTOKEN = args.token\nURL = 'http://155.69.146.216:4444'\n\ndef predict(bot, update):\n predict_msg = {'question': update.message.text}\n resp = requests.post(URL+\"/predict\", json=predict_msg)\n update.message.reply_text(\"Possible topic: \" + resp.json()['result'])\n\ndef main():\n # Create Updater object and attach dispatcher to it\n updater = Updater(TOKEN)\n dispatcher = updater.dispatcher\n print(\"Bot started\")\n\n # Add command handler to dispatcher\n predict_handler = MessageHandler(Filters.text, predict)\n dispatcher.add_handler(predict_handler)\n\n # Start the bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C\n updater.idle()\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"570116414","text":"#Exercise 36\nimport re #Modulo de expresiones regulares\n\n#ingresando valores por teclado\nletter = input('Introduzca la letra: ')\nletter = letter.lower()\n\n#Estructura de control que valida\n#Si letter es una vocal o consonante\nif re.match('[aeiou]' , letter):\n print(letter.title() + ' es una vocal')\n\nelif letter == 'y':\n print(letter.title() + ' es aveces una vocal o una consonante')\n\nelif re.match('[0-9]',letter) or re.match('[!~@#$%^&*()_+]',letter):\n print('Error, debe escribir una letra')\n\nelse:\n print(letter.title() + ' es consonante')\n","sub_path":"Chapter2/vowel_or_consonat.py","file_name":"vowel_or_consonat.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"243987832","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.core.validators import validate_email\n\nfrom rest_framework import status\n\nfrom .models import *\n\nimport json\n\n\n\ndef index(request):\n \n context = {\n 'mangas': Manga.objects.all()\n }\n \n return render(request, 'main/index.html', context=context)\n\n\ndef register_email(request):\n \n if request.is_ajax():\n \n data = request.GET\n \n email = data['email']\n mangas = json.loads(data['mangas'])\n \n try:\n validate_email(email)\n \n except Exception as e:\n \n return JsonResponse(\n {\n 'error': 'This email is invalid'\n },\n status=status.HTTP_400_BAD_REQUEST\n )\n \n subscriber, created = Subscriber.objects.get_or_create(email=email)\n \n mangas = Manga.objects.filter(name__in=mangas)\n \n subscriber.mangas.set(mangas, clear=True)\n \n return JsonResponse({\n 'created': created\n })\n","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"70880082","text":"import numpy as np\r\n\r\n\"\"\"\r\n ## DICTIONARY ##\r\nch = chromosome\r\nch_size = chromosome size\r\npop = population\r\npop_size = population size\r\nprob_cut = probability to cut the chromosome for crossover\r\nprob_mut = probability to cut the chromosome for mutation\r\nprob = random prob for tests (Parent, Cut, Mutation)\r\ntotal_fit = sum of the values of the fitness from all the population\r\nparents = vector that contains the probability of every chromosome becoming a parent for new chromosomes\r\niter = number of populations created before ending the program\r\n\r\n#FUNC: fitness(chromosome) 1 0 1 1 0 => 1*(5*5) + 0*(4*4) + 1*(3*3) + 1*(2*2) + 0*(1*1)\r\n\"\"\"\r\n\r\n#GLOBAL\r\nch_size = 5\r\npop_size = 5\r\nprob_cut = 0.25\r\nprob_mut = 0.10\r\niter = 2\r\n\r\n#RANDOM\r\ndef random():\r\n return np.random.rand()\r\n\r\n#CREATE CRHOMOSOME\r\ndef gen_chromosome(ch_size):\r\n ch = np.random.randint(2, size=ch_size)\r\n\r\n return ch\r\n\r\n#CREATE FIRST_POP\r\ndef gen_first_pop():\r\n pop = np.empty([pop_size, ch_size])\r\n\r\n for i in range(pop_size):\r\n pop[i] = gen_chromosome(ch_size)\r\n\r\n return pop\r\n\r\n#FITNESS\r\ndef fitness(ch):\r\n return sum([(i+1)*(i+1)*ch[ch_size-i-1] for i in range(ch_size)])\r\n\r\n#DEFINE PARENT_CHANCE\r\ndef parents_prob(pop):\r\n total_fit = sum(fitness(pop[i]) for i in range(pop_size))\r\n\r\n parents = np.empty([pop_size, 1])\r\n\r\n for i in range(pop_size):\r\n parents[i] = fitness(pop[i]) / total_fit\r\n\r\n \"\"\" # FIRST TEST\r\n print(\"pop:\", pop, \"\\n pop_size: \", pop_size, \" total_fit: \", total_fit, \"\\n\")\r\n for i in range (pop_size): print(i, \": fitness\", fitness(pop[i]), \": p_percentage\", parents[i])\r\n \"\"\"\r\n\r\n return parents\r\n\r\n#SELECT THE PARENT\r\ndef selection(pop, parents):\r\n prob = random()\r\n\r\n for i in range(1, pop_size):\r\n parents[i] += parents[i-1]\r\n\r\n for i in range(1, pop_size):\r\n if prob>parents[i-1] and prob 100:\n return False\n self.vox_size_X = self.vox_util.default_vox_size_X\n self.vox_size_Y = self.vox_util.default_vox_size_Y\n self.vox_size_Z = self.vox_util.default_vox_size_Z\n self.summ_writer.summ_scalar('zoom_sampling/num_tries', float(num_tries))\n self.summ_writer.summ_scalar('zoom_sampling/num_inb', torch.mean(num_inb).cpu().item())\n \n _boxlist_camRs = feed[\"boxlists\"]\n _tidlist_s = feed[\"tidlists\"] # coordinate-less and plural\n _scorelist_s = feed[\"scorelists\"] # coordinate-less and plural\n _scorelist_s = __u(utils_misc.rescore_boxlist_with_inbound(\n utils_geom.eye_4x4(self.B*self.S),\n __p(_boxlist_camRs),\n __p(_tidlist_s),\n self.Z, self.Y, self.X,\n self.vox_util,\n only_cars=False, pad=2.0))\n boxlist_camRs_, tidlist_s_, scorelist_s_ = utils_misc.shuffle_valid_and_sink_invalid_boxes(\n __p(_boxlist_camRs), __p(_tidlist_s), __p(_scorelist_s))\n self.boxlist_camRs = __u(boxlist_camRs_)\n self.tidlist_s = __u(tidlist_s_)\n self.scorelist_s = __u(scorelist_s_)\n\n # for b in list(range(self.B)):\n # # if torch.sum(scorelist_s[b,0]) == 0:\n # if torch.sum(self.scorelist_s[:,0]) < (self.B/2): # not worth it; return early\n # return 0.0, None, True\n\n self.lrtlist_camRs = __u(utils_geom.convert_boxlist_to_lrtlist(__p(self.boxlist_camRs)))\n self.lrtlist_camR0s = __u(utils_geom.apply_4x4_to_lrtlist(__p(self.camR0s_T_camRs), __p(self.lrtlist_camRs)))\n self.lrtlist_camXs = __u(utils_geom.apply_4x4_to_lrtlist(__p(self.camXs_T_camRs), __p(self.lrtlist_camRs)))\n \n \n \n # self.summ_writer.summ_rgb('2D_inputs/rgb_camX0', self.rgb_camXs[:,0])\n # # self.occ_memX0s = __u(self.vox_util.voxelize_xyz(__p(self.xyz_camX0s), self.Z, self.Y, self.X))\n # # self.summ_writer.summ_rgbs('2D_inputs/rgb_camXs', torch.unbind(self.rgb_camXs, dim=1))\n # # self.summ_writer.summ_occs('3D_inputs/occ_memRs', torch.unbind(self.occ_memRs, dim=1))\n # # self.summ_writer.summ_occs('3D_inputs/occ_memR0s', torch.unbind(self.occ_memR0s, dim=1))\n # # self.summ_writer.summ_occs('3D_inputs/occ_memX0s', torch.unbind(self.occ_memX0s, dim=1))\n # # self.summ_writer.summ_unps('3D_inputs/unp_memX0s', torch.unbind(self.unp_memX0s, dim=1), torch.unbind(self.occ_memX0s, dim=1))\n # # self.summ_writer.summ_occs('3D_inputs/obj_occR0s', torch.unbind(self.obj_occR0s, dim=1))\n # # self.summ_writer.summ_feat('3D_inputs/obj_mask', self.obj_mask_template, pca=False)\n\n # # self.deglist = [-6, 0, 6]\n # # self.trim = 5\n # # self.radlist = [utils_geom.deg2rad(deg) for deg in self.deglist]\n\n\n # visX_e = []\n # for s in list(range(0, self.S, 2)):\n # visX_e.append(self.summ_writer.summ_lrtlist(\n # '', self.rgb_camXs[:,s],\n # self.lrtlist_camXs[:,s],\n # self.scorelist_s[:,s],\n # self.tidlist_s[:,s],\n # # torch.cat([self.lrtlist_camXs[:,s:s+1]], dim=1),\n # # torch.cat([self.lrtlist_camXs[:,s:s+1]], dim=1),\n # # torch.cat([torch.ones([self.B,1]).float().cuda()], dim=1),\n # # torch.arange(1,3).reshape(self.B, 2).long().cuda(),\n # self.pix_T_cams[:,s], only_return=True))\n # self.summ_writer.summ_rgbs('obj/box_camXs_g', visX_e)\n\n # print('set_name', self.set_name)\n # print('vox_size_X', self.vox_size_X)\n # print('vox_size_Y', self.vox_size_Y)\n # print('vox_size_Z', self.vox_size_Z)\n \n return True # OK\n \n def run_train(self, feed):\n results = dict()\n\n global_step = feed['global_step']\n total_loss = torch.tensor(0.0).cuda()\n\n __p = lambda x: utils_basic.pack_seqdim(x, self.B)\n __u = lambda x: utils_basic.unpack_seqdim(x, self.B)\n \n self.rgb_camXs = feed[\"rgb_camXs\"]\n self.unp_memXs = __u(self.vox_util.unproject_rgb_to_mem(\n __p(self.rgb_camXs), self.Z, self.Y, self.X, __p(self.pix_T_cams)))\n self.unp_memX0s = self.vox_util.apply_4x4s_to_voxs(self.camX0s_T_camXs, self.unp_memXs)\n self.occ_memX0s = __u(self.vox_util.voxelize_xyz(__p(self.xyz_camX0s), self.Z, self.Y, self.X))\n \n if hyp.do_feat3D:\n feat_memX0s_input = torch.cat([\n self.occ_memX0s,\n self.unp_memX0s*self.occ_memX0s,\n ], dim=2)\n feat3D_loss, feat_memX0s_, valid_memX0s_ = self.featnet3D(\n __p(feat_memX0s_input[:,1:]),\n self.summ_writer,\n )\n feat_memX0s = __u(feat_memX0s_)\n valid_memX0s = __u(valid_memX0s_)\n total_loss += feat3D_loss\n\n # print('feat_memX0s', feat_memX0s.shape)\n # input()\n\n feat_memX0 = utils_basic.reduce_masked_mean(\n feat_memX0s,\n valid_memX0s.repeat(1, 1, hyp.feat3D_dim, 1, 1, 1),\n dim=1)\n valid_memX0 = torch.sum(valid_memX0s, dim=1).clamp(0, 1)\n self.summ_writer.summ_feat('3D_feats/feat_memX0', feat_memX0, valid=valid_memX0, pca=True)\n # self.summ_writer.summ_feat('3D_feats/valid_memX0', valid_memX0, pca=False)\n\n if hyp.do_emb3D:\n _, altfeat_memX0, altvalid_memX0 = self.featnet3D_slow(feat_memX0s_input[:,0])\n self.summ_writer.summ_feat('3D_feats/altfeat_memX0', altfeat_memX0, valid=altvalid_memX0, pca=True)\n # self.summ_writer.summ_feat('3D_feats/altvalid_memX0', altvalid_memX0, pca=False)\n\n if hyp.do_emb3D:\n # compute 3D ML\n \n emb_loss_3D = self.embnet3D(\n feat_memX0,\n altfeat_memX0,\n valid_memX0.round(),\n altvalid_memX0.round(),\n self.summ_writer)\n total_loss += emb_loss_3D\n \n \n # feat_memXs_input = torch.cat([\n # self.occ_memXs,\n # self.unp_memXs*self.occ_memXs,\n # ], dim=2)\n # feat_memRs_input = torch.cat([\n # self.occ_memRs,\n # self.unp_memRs*self.occ_memRs,\n # ], dim=2)\n \n # feat3D_loss, feat_memXs_ = self.featnet3D(\n # __p(feat_memXs_input[:,1:]),\n # self.summ_writer,\n # )\n # feat_memXs = __u(feat_memXs_)\n # valid_memXs = torch.ones_like(feat_memXs[:,:,0:1])\n # total_loss += feat3D_loss\n \n # # warp things to R\n # feat_memRs = self.vox_util.apply_4x4s_to_voxs(self.camRs_T_camXs[:, 1:], feat_memXs)\n # valid_memRs = self.vox_util.apply_4x4s_to_voxs(self.camRs_T_camXs[:, 1:], valid_memXs)\n\n # feat_memR = utils_basic.reduce_masked_mean(\n # feat_memRs,\n # valid_memRs.repeat(1, 1, hyp.feat3D_dim, 1, 1, 1),\n # dim=1)\n # valid_memR = torch.sum(valid_memRs, dim=1).clamp(0, 1)\n\n # _, altfeat_memR = self.featnet3D(feat_memRs_input[:,0])\n \n # self.summ_writer.summ_feat('3D_feats/feat_memR', feat_memR, valid=valid_memR, pca=True)\n # self.summ_writer.summ_feat('3D_feats/altfeat_memR', altfeat_memR, pca=True)\n\n # if hyp.do_vq3drgb:\n # # overwrite altfeat_memR with its quantized version\n # vq3drgb_loss, altfeat_memR, _ = self.vq3drgbnet(\n # altfeat_memR,\n # self.summ_writer,\n # )\n # total_loss += vq3drgb_loss\n\n if hyp.do_occ:\n _, _, Z_, Y_, X_ = list(feat_memX0.shape)\n occ_memX0_sup, free_memX0_sup, _, _ = self.vox_util.prep_occs_supervision(\n self.camX0s_T_camXs,\n self.xyz_camXs,\n Z_, Y_, X_,\n agg=True)\n self.summ_writer.summ_occ('occ_sup/occ_sup', occ_memX0_sup)\n self.summ_writer.summ_occ('occ_sup/free_sup', free_memX0_sup)\n occ_loss, occ_memX0_pred = self.occnet(\n altfeat_memX0, \n occ_memX0_sup,\n free_memX0_sup,\n altvalid_memX0, \n self.summ_writer)\n total_loss += occ_loss\n\n # if hyp.do_view:\n # assert(hyp.do_feat3D)\n # # decode the feat volume into an image\n # view_loss, rgb_e, view_camX0 = self.viewnet(\n # self.pix_T_cams[:,0],\n # self.camX0_T_camR,\n # altfeat_memR,\n # self.rgb_camXs[:,0], \n # summ_writer=self.summ_writer)\n # total_loss += view_loss\n \n # if hyp.do_emb2D:\n # assert(hyp.do_view)\n # # create an embedding image, representing the bottom-up 2D feature tensor\n # emb_loss_2D, _ = self.embnet2D(\n # view_camX0,\n # feat_camX0,\n # torch.ones_like(view_camX0[:,0:1]),\n # self.summ_writer)\n # total_loss += emb_loss_2D\n \n # if hyp.do_emb3D:\n # # compute 3D ML\n # emb_loss_3D = self.embnet3D(\n # feat_memR,\n # altfeat_memR,\n # valid_memR.round(),\n # torch.ones_like(valid_memR),\n # self.summ_writer)\n # total_loss += emb_loss_3D\n\n # if hyp.do_linclass:\n \n # masklist_memR = self.vox_util.assemble_padded_obj_masklist(\n # self.lrtlist_camRs[:,0], self.scorelist_s[:,0], self.Z2, self.Y2, self.X2, coeff=1.2)\n # mask_memR = torch.sum(masklist_memR, dim=1)\n # self.summ_writer.summ_oned('obj/mask_memR', mask_memR.clamp(0, 1), bev=True)\n\n # occ_memRs = __u(self.vox_util.voxelize_xyz(__p(self.xyz_camRs), self.Z2, self.Y2, self.X2))\n # occ_memR = torch.sum(occ_memRs, dim=1).clamp(0,1)\n \n # obj_memR = (mask_memR * occ_memR).view(-1)\n # bkg_memR = ((1.0 - mask_memR) * occ_memR).view(-1)\n # obj_inds = torch.nonzero(obj_memR, as_tuple=False)\n # # obj_inds = obj_inds.detach().cpu().numpy()\n # bkg_inds = torch.nonzero(bkg_memR, as_tuple=False)\n # # bkg_inds = bkg_inds.detach().cpu().numpy()\n\n # # print('altfeat_memR', altfeat_memR.shape)\n # # print('mask_memR', mask_memR.shape)\n # # print('occ_memR', occ_memR.shape)\n # # print('%d obj_inds; %d bkg_inds' % (len(obj_inds), len(bkg_inds)))\n # # input()\n \n # code_vec = altfeat_memR.detach().permute(0,2,3,4,1).reshape(-1, hyp.feat3D_dim)\n # obj_inds = obj_inds.reshape([-1])\n # bkg_inds = bkg_inds.reshape([-1])\n\n # if len(obj_inds) and len(bkg_inds):\n\n # # print('obj_inds', obj_inds.shape)\n # # print('bkg_inds', bkg_inds.shape)\n # # print('codes_flat', codes_flat.shape)\n\n # linclass_loss = self.linclassnet(\n # code_vec, obj_inds, bkg_inds, self.summ_writer)\n\n # # print('feat_memR', feat_memR.shape)\n # # print('mask_memR', mask_memR.shape)\n \n # total_loss += linclass_loss\n \n self.summ_writer.summ_scalar('loss', total_loss.cpu().item())\n return total_loss, results, False\n\n def run_test(self, feed):\n results = dict()\n\n global_step = feed['global_step']\n total_loss = torch.tensor(0.0).cuda()\n\n __p = lambda x: utils_basic.pack_seqdim(x, self.B)\n __u = lambda x: utils_basic.unpack_seqdim(x, self.B)\n\n test_linmatch = True\n\n if test_linmatch:\n\n # use inputs pre-registered to R\n feat_memRs = torch.cat([\n self.occ_memRs,\n self.unp_memRs*self.occ_memRs,\n ], dim=2)\n _, feat_memRs_ = self.featnet3D(\n __p(feat_memRs),\n self.summ_writer,\n )\n feat_memRs = __u(feat_memRs_)\n feat_memR = torch.mean(feat_memRs, dim=1)\n self.summ_writer.summ_feat('3D_feats/feat_memR', feat_memR, pca=True)\n # overwrite feat_memR with its quantized version\n _, feat_memR, code_vox = self.vq3drgbnet(\n feat_memR,\n None,\n )\n self.summ_writer.summ_lrtlist('obj/boxlist_g', self.rgb_camRs[:,0], self.lrtlist_camRs[:,0],\n self.scorelist_s[:,0], self.tidlist_s[:,0], self.pix_T_cams[:,0])\n masklist_memR = self.vox_util.assemble_padded_obj_masklist(\n self.lrtlist_camRs[:,0], self.scorelist_s[:,0], self.Z4, self.Y4, self.X4, coeff=1.2)\n mask_memR = torch.sum(masklist_memR, dim=1)\n print('mask_memR', mask_memR.shape)\n self.summ_writer.summ_oned('obj/mask_memR', mask_memR.clamp(0, 1), bev=True)\n\n occ_memRs = __u(self.vox_util.voxelize_xyz(__p(self.xyz_camRs), self.Z4, self.Y4, self.X4))\n occ_memR = torch.sum(occ_memRs, dim=1).clamp(0,1)\n\n obj_memR = (mask_memR * occ_memR).view(-1)\n bkg_memR = ((1.0 - mask_memR) * occ_memR).view(-1)\n obj_inds = torch.nonzero(obj_memR, as_tuple=False)\n obj_inds = obj_inds.detach().cpu().numpy()\n bkg_inds = torch.nonzero(bkg_memR, as_tuple=False)\n bkg_inds = bkg_inds.detach().cpu().numpy()\n print('%d obj_inds; %d bkg_inds' % (len(obj_inds), len(bkg_inds)))\n # input()\n\n # print('feat_memR', feat_memR.shape)\n # print('mask_memR', mask_memR.shape)\n \n codes_flat = np.reshape(code_vox.detach().cpu().numpy(), [-1])\n mean_acc, mean_pool_size, num_codes_w_20 = utils_eval.linmatch(self.labelpools, obj_inds, bkg_inds, codes_flat)\n utils_misc.add_loss('vq3drgbnet/mean_acc', 0.0, mean_acc, 0.0, self.summ_writer)\n utils_misc.add_loss('vq3drgbnet/mean_pool_size', 0.0, mean_pool_size, 0.0, self.summ_writer)\n utils_misc.add_loss('vq3drgbnet/num_codes_w_20', 0.0, num_codes_w_20, 0.0, self.summ_writer)\n return total_loss, None, False\n\n def forward(self, feed):\n \n set_name = feed['set_name']\n \n # if set_name=='moc2D_init':\n # self.prepare_common_tensors(feed, prep_summ=False)\n # return self.prep_neg_emb2D(feed)\n \n # if set_name=='moc3D_init':\n # self.prepare_common_tensors(feed, prep_summ=False)\n # return self.prep_neg_emb3D(feed)\n\n ok = self.prepare_common_tensors(feed)\n if not ok:\n total_loss = torch.tensor(0.0).cuda()\n return total_loss, None, True\n else:\n if set_name=='train':\n return self.run_train(feed)\n elif set_name=='test':\n return self.run_test(feed)\n\n # # arriving at this line is bad\n # print('weird set_name:', set_name)\n # assert(False)\n","sub_path":"pytorch_disco_recovery/model_carla_ml.py","file_name":"model_carla_ml.py","file_ext":"py","file_size_in_byte":21484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"319533092","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.models import Model, Input\nfrom keras.layers import Conv2D, Flatten, Dropout\nfrom keras.layers import Dense, MaxPooling2D\nfrom keras.utils import np_utils\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.datasets import cifar10\npath = 'C:/Users/bitcamp/Desktop/서누/Review/Model/'\nos.chdir(path)\n\nmodel_path = path + 'CNN-{epoch:02d}-{val_loss:.4f}.hdf5'\ncp = ModelCheckpoint(filepath = model_path, monitor = 'val_loss',\n save_best_only = True, mode = 'auto')\nes = EarlyStopping(monitor = 'val_loss', mode = 'min', patience = 10)\n\n## 데이터\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\nprint(x_train.shape) # (50000, 32, 32, 3)\nprint(x_test.shape) # (10000, 32, 32, 3)\nprint(y_train.shape) # (50000, 1)\nprint(y_test.shape) # (10000, 1)\n\n## 정규화\nx_train = x_train.astype('float32') / 255 # Dense 모델은 2차원 와꾸를 받는다\nx_test = x_test.astype('float32') / 255 # Dense 모델은 2차원 와꾸를 받는다\nprint(x_train.shape) # (50000, 32, 32, 3)\nprint(x_test.shape) # (10000, 32, 32, 3)\n\n## 레이블 범주화\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\nprint(y_train.shape) # (50000, 10)\nprint(y_test.shape) # (10000, 10)\n\n## 모델링\ninput1 = Input(shape = (32, 32, 3))\nx1 = Conv2D(filters = 32, kernel_size = (3, 3),\n padding = 'same', activation = 'relu')(input1)\nx1 = Conv2D(filters = 32, kernel_size = (3, 3),\n padding = 'same', activation = 'relu')(x1)\nx1 = MaxPooling2D(pool_size = (3, 3))(x1)\nx1 = Dropout(rate = 0.2)(x1)\nx1 = Conv2D(filters = 16, kernel_size = (3, 3),\n padding = 'same', activation = 'relu')(x1)\nx1 = Conv2D(filters = 16, kernel_size = (3, 3),\n padding = 'same', activation = 'relu')(x1)\nx1 = MaxPooling2D(pool_size = (3, 3))(x1)\nx1 = Dropout(rate = 0.2)(x1)\nx1 = Flatten()(x1)\nx1 = Dense(10, activation = 'relu')(x1)\noutput1 = Dense(10, activation = 'softmax')(x1)\n\nmodel = Model(inputs = input1, outputs = output1)\nmodel.summary()\n\n## 컴파일 및 훈련\nmodel.compile(loss = 'categorical_crossentropy',\n metrics = ['accuracy'],\n optimizer = 'adam')\nhist = model.fit(x_train, y_train, verbose = 1,\n epochs = 100, batch_size = 128,\n validation_split = 0.2, callbacks = [es, cp])\nprint(hist.history.keys())\n\n## 모델 평가\nres = model.evaluate(x_test, y_test, batch_size = 128)\nprint('loss : ', res[0])\nprint('acc : ', res[1])\n\n## 훈련 과정 시각화\nplt.figure(figsize = (10, 6))\nplt.subplot(2, 1, 1)\nplt.plot(hist.history['loss'], marker = '.', c = 'red', label = 'loss')\nplt.plot(hist.history['val_loss'], marker = '.', c = 'blue', label = 'val_loss')\nplt.grid()\nplt.title('Loss & val_Loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(loc = 'best')\n\nplt.subplot(2, 1, 2)\nplt.plot(hist.history['accuracy'], marker = '.', c = 'violet', label = 'acc')\nplt.plot(hist.history['val_accuracy'], marker = '.', c = 'green', label = 'val_acc')\nplt.grid()\nplt.title('Acc & val_Acc')\nplt.ylabel('acc')\nplt.xlabel('epoch')\nplt.ylim(0, 1)\nplt.legend(loc = 'best')\nplt.show()","sub_path":"self_study/Self_Review/keras/(re)keras63_cifar10_cnn.py","file_name":"(re)keras63_cifar10_cnn.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"434451732","text":"import json\nfrom PIL import Image\nimport base64\nimport os\n\nbasedir = \"/home/gosha20777/files/datasets/sunflower_data/images/train\"\nfiles = os.listdir(basedir)\n\nfor file_name in files:\n if 'json' not in file_name:\n continue\n\n print(file_name)\n base_mane = os.path.basename(file_name).split('.')[0]\n json_name = os.path.join(basedir, file_name)\n img_name = os.path.join(basedir, f'{base_mane}.jpg')\n \n res_points = []\n text = open(json_name).read()\n img = Image.open(img_name)\n w, h = img.size\n print(h, w)\n with open(img_name, \"rb\") as image_file:\n encoded_string = base64.b64encode(image_file.read())\n\n res = {\n \"version\": \"4.2.9\",\n \"flags\": {},\n \"shapes\": [],\n \"imagePath\": \"..\\\\20190731_092735.jpg\",\n \"imageData\": encoded_string.decode()\n }\n input_json = json.loads(text)\n\n for polygon in input_json:\n res_points = []\n points = polygon[\"data\"]\n\n if len(points) < 3:\n print('incorrect polygon')\n continue\n for point in points:\n x = h - point['x'] * h\n y = point['y'] * w\n p_list = []\n p_list.append(y)\n p_list.append(x)\n res_points.append(p_list)\n \n shape = {\n \"label\": \"1\",\n \"points\": res_points,\n \"group_id\": None,\n \"shape_type\": \"polygon\",\n \"flags\": {}\n }\n res['shapes'].append(shape)\n \n json.dump(res, open(json_name, 'w'))\n print('ok')","sub_path":"utils/toloka2labelme.py","file_name":"toloka2labelme.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"379862294","text":"# encoding:UTF-8\n#import urllib.request\nfrom urllib import request\n\nurlst = [ \"https://www.yandex.com/\", \"http://www.baidu.com\", \"http://www.1688.com\"]\nuf = [2]\nopenre = request.urlopen(urlst[2])\ndata = openre.read()\ntry:\n data = data.decode('UTF-8')\n Dco = data.encode( encoding = 'gbk', errors = 'backslashreplace' ) \nexcept Exception as e:\n print(e)\n Dco = 'Error'\nF = open( 'recordt.txt', 'w' )\n#UnicodeEncodeError:gbk codec can’t encode character ...\n# to avoid the above case ,use the function encode with parameter errors = 'backslashreplace'\nprint( Dco )\nF.write( str(Dco) )\nF.close()\n","sub_path":"JN_1.py","file_name":"JN_1.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"554139113","text":"import yfinance as yf\nimport pandas as pd\nfrom importer import clean_frame\n\n\ntickerSymbol = 'TSLA'\n\ntickerData = yf.Ticker(tickerSymbol)\n\ntickerDF = tickerData.history(period='1d', start='2010-1-1', end='2021-6-7')\n\nprint('Do you want to update an existing file?. If you want to update introduce yes. \\n')\nupdate = input('Do you want to update?: ')\n\nif update == 'yes':\n tickerDF.to_csv(\"file_path\", mode='a')\n tickerDF = pd.read_csv(\"file_path\", parse_dates=[0], index_col=['Date'])\n clean_frame(tickerDF)\n tickerDF.to_csv(\"file_path\", mode='w')\n\nelse:\n tickerDF.to_csv(\"file path\", mode='w')","sub_path":"stock_importer.py","file_name":"stock_importer.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"281949874","text":"#Importing Dependencies\nimport pandas as pd\nimport numpy as np\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nimport re\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nimport xgboost as xgb\nfrom sklearn.metrics import f1_score, classification_report, accuracy_score,confusion_matrix\nfrom sklearn.model_selection import GridSearchCV\nimport pickle\n\n#Reading the file\ndf = pd.read_csv('concated.csv')\n\n#Droping the unwanted columns\ndf.drop(['HotelRatings','HotelAddress','HotelName','Unnamed: 0','Authorlocation','AuthorName'],axis=1,inplace=True)\ndf.head()\n#checking the null values\n(df.isnull().sum(axis=0)/len(df))*100\n\n#Droping columns with nulls with high percentage\ndf.drop(['Sleep Quality','Business service (e.g., internet access)','Check in / front desk'],axis=1,inplace=True)\ndf['Overall'] =df['Overall'].astype('str')\n\n#Droping over all rating with null values\ndf.drop(df[df.Overall == '0.0'].index, inplace = True) \ndf.Overall.value_counts()\ndf[\"ReviewDetails\"]=df['title']+\" \"+df[\"Review\"]\ndf.head()\n#Removing Stop words and performing Stemming\nstemmer = PorterStemmer()\nwords=stopwords.words(\"english\")\ndf['cleaned'] = df['ReviewDetails'].apply(lambda x: \" \".join([stemmer.stem(i) for i in re.sub(\"[^a-zA-Z]\", \" \", x).split() if i not in words]).lower())\ndf['cleaned'].head()\ndf['Cleaned_with_price'] = df['cleaned']+\" \"+df['HotelPrice']\ndf['Cleaned_with_price'].to_pickle(\"cleaned.pkl\")\ndf_cleaned=pd.read_pickle(\"cleaned.pkl\")\n\n# #### Splitting to test and train \nX_train, X_test, Y_train, Y_test = train_test_split(df_cleaned,df['Overall'], test_size = 0.10, random_state = 42)\n#TF-IDF on Training set\nvectorizer=TfidfVectorizer(min_df=0.1,stop_words=\"english\",sublinear_tf=True,ngram_range=(1,1),norm='l2')\nfinal_features_train = vectorizer.fit(X_train)\nfinal_features_train = vectorizer.transform(X_train)\nfinal_features_train.shape\n#TF-IDF on Test set\nfinal_features_test = vectorizer.transform(X_test)\nfinal_features_test.shape\n\n# Save the vectorizer as a pickle string\nwith open('vectorizer.pk', 'wb') as fin:\n pickle.dump(vectorizer, fin)\n\n# ## Grid Search to avoid Overfitting\nRANDOM_STATE = 100\nparam_test = {\n'max_depth':[4,5,6,10,15,20,25,30,35,50,100],\n'learning_rate': [0.0001, 0.001, 0.01,0.5]\n}\n\ngsearch2 = GridSearchCV(estimator = xgb.XGBClassifier(learning_rate=0.1, n_estimators=1, max_depth=5, gamma=0, subsample=0.8, colsample_bytree=0.8,objective= 'multi:softmax', num_class = 6 ,scale_pos_weight=1), \nparam_grid = param_test, scoring='accuracy', cv=5)\ngsearch2.fit(final_features_train,Y_train)\ngsearch2.best_params_ , gsearch2.best_score_\n\n# # Training and testing XG Boost Model\nxgb_model = xgb.XGBClassifier(max_depth=50, random_state=RANDOM_STATE,\nlearning_rate=0.01, colsample_bytree=.7, gamma=0, alpha=0,objective='multi:softmax', eta=0.3,subsample=0.8)\nxgb_model.fit(final_features_train, Y_train)\n\n# Save the trained model as a pickle string.\nwith open('model.pk', 'wb') as fin:\n saved_model = pickle.dump(xgb_model, fin)\n#saved_model = pickle.dumps(xgb_model) \n# Load the pickled model \nxgb_model_from_pickle = pickle.loads(saved_model) \nxgb_prediction = xgb_model_from_pickle.predict(final_features_train)\nxgb_prediction = xgb_model_from_pickle.predict(final_features_test)\n\n## Model Evaluation\n## Training Score\nprint('training score:', f1_score(Y_train, xgb_prediction, average='macro'))\nprint(classification_report(Y_train, xgb_model.predict(final_features_train)))\n\n# Testing Score\nprint('validation score:', f1_score(Y_test, xgb_prediction, average='macro'))\nprint(classification_report(Y_test, xgb_prediction))\naccuracy = accuracy_score(Y_test, xgb_prediction)\naccuracy\n\n#Confusion Matrix\nconfusion_matrix(Y_test, xgb_prediction)\n\n\n\n","sub_path":"deployment - docker/Multi-text classification using XgBoost.py","file_name":"Multi-text classification using XgBoost.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"492365289","text":"#!/usr/bin/env python\r\n#coding:utf-8\r\n\r\n\"\"\"\r\n多外键关联,一对多\r\n\"\"\"\r\n\r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy.ext.declarative import declarative_base\r\nfrom sqlalchemy import Column,Integer,String,DateTime,ForeignKey,DATE\r\nfrom sqlalchemy.orm import sessionmaker,relationship\r\n\r\n#declared base class,create engine\r\nBase = declarative_base()\r\nengine = create_engine('mysql://root:123456@192.168.2.210/oldboydb')\r\n\r\nclass Customer(Base):\r\n __tablename__ = \"customer\"\r\n id = Column(Integer,primary_key=True)\r\n name = Column(String(64))\r\n\r\n billing_address_id = Column(Integer,ForeignKey(\"address.id\"))\r\n shipping_address_id = Column(Integer,ForeignKey(\"address.id\"))\r\n\r\n billing_address = relationship(\"Address\",foreign_keys=[billing_address_id])\r\n shipping_address = relationship(\"Address\",foreign_keys=[shipping_address_id])\r\n\r\nclass Address(Base):\r\n __tablename__ = \"address\"\r\n id = Column(Integer,primary_key=True)\r\n street = Column(String(32))\r\n city = Column(String(32))\r\n state = Column(String(32))\r\n\r\n def __repr__(self):\r\n return self.street\r\n#Base.metadata.create_all(engine) #创建表结构,如果表存在就不会创建","sub_path":"数据库/orm_many_fk.py","file_name":"orm_many_fk.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"280468859","text":"# -*- codeing = utf-8 -*-\n# -*- coding: utf-8 -*-\n# @Time : 2021/3/25 22:48\n# @Author : 谭洋 202031001951\n# @Email : 1767774755@qq.com\n# @File : get_shapes.py\n# @Software: PyCharm\n\nimport cv2\n\nimg=cv2.imread(\"img.jpeg\")\ngry_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nimg_Blur = cv2.GaussianBlur(gry_img,(5,5),2)\nimg_canny = cv2.Canny(img_Blur,20,20)\nimg_Contours=img.copy()\ndef getcontours(img):\n pass\n contours,hierarchy=cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)#contours表示找到的轮廓对象\n\n for cnt in contours:\n area = cv2.contourArea(cnt)\n print(area)\n if area>300: #除噪 过滤掉小于面积300的检测图形\n cv2.drawContours(img_Contours,cnt,-1,(255,0,0),2)\n peri=cv2.arcLength(cnt,True)#True 表示闭合\n approx = cv2.approxPolyDP(cnt,0.02*peri,True) #图形逼近算法检测拐点数字越小精度越高\n print(len(approx))\n objCor = len(approx)\n\n x, y, w, h =cv2.boundingRect(approx) #approx 是一张图片(二值图) 返回他的左上角坐标,和宽高\n cv2.rectangle( img_Contours , (x , y) , (x + w , y + h) , (0 , 0 , 255) , 2 )#用最小的矩形框选出图像\n if objCor ==3:objectType=\"tri\"\n elif objCor == 4:\n aspRatio = w/float(h)\n if aspRatio<1.2 and aspRatio > 0.8:objectType=\"Square\"\n else:objectType=\"Rectangle\"\n elif objCor>4:objectType=\"Circle\"\n else:objectType=\"None\"\n cv2.putText(img_Contours,objectType,(x+(w//2)-20,y+(h//2)-5),\n cv2.FONT_ITALIC,0.5,(0,0,0),1)\n\ngetcontours(img_canny)\n\ncv2.imshow(\"img_canny\",img_canny)\ncv2.imshow(\"input\",img)\ncv2.imshow(\"gray\",gry_img)\ncv2.imshow(\"img_Blur\",img_Blur)\ncv2.imshow(\"img_Contours\",img_Contours)\ncv2.waitKey(0)\n\n\n\n\n","sub_path":"Opencv-python_Test/imRange_get_shapes/get_shapes.py","file_name":"get_shapes.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"501936798","text":"# Uses python3\nimport sys\n\ndef gcd_faster (a , b):\n remainder = 1\n while remainder != 0:\n if a >= b:\n big = a\n small = b\n remainder = big % small\n b = remainder\n a = small\n else:\n big = b\n small = a\n remainder = big % small\n a = remainder\n b = small\n return small\n\n\ndef lcm_naive(a, b):\n if a != 0 and b != 0:\n lcm = a * b / gcd_faster(a, b)\n else:\n lcm = 0\n return int(lcm)\n\n##if __name__ == '__main__':\n## input = sys.stdin.read()\n## a, b = map(int, input.split())\n## print(lcm_naive(a, b))\n\nif __name__ == '__main__':\n a, b = map(int, input().split())\n print(lcm_naive(a, b))\n","sub_path":"week2_algorithmic_warmup/4_least_common_multiple/lcm.py","file_name":"lcm.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"180536417","text":"# -*- coding: utf-8 -*-\n# Copyright (C) Shrujal Ambati(2019)\n#\n# This file is part of the LMXB package\n#\n# LMXB is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# progenitor is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with LMXB. If not, see .\n\n__author__ = ['Shrujal Ambati']\n\nimport argparse\nimport Observed\nimport TrajTools\nimport numpy as np\nimport pickle \n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dt', type = float)\nparser.add_argument('--T', type = float)\nparser.add_argument('--n_runs', type = int)\nparser.add_argument('--sys_name', type = str)\n\nargs = parser.parse_args()\n\ntest_sys = Observed.LMXB_Sys(args.sys_name)\n\n#creates empty lists for Velocity/Position array and time array\nRarr, tarr = [], []\n\nfor run in range(args.n_runs):\n test_sys.setRandUVWXYZ()\n R_t, t = test_sys.getTrajectory(args.T, args.dt, test_sys.RK4)\n\n Rarr.append(R_t)\n tarr.append(t)\n\n\nwith open(args.sys_name + '_R_t', 'wb') as f:\n pickle.dump(Rarr, f)\n \nwith open(args.sys_name + '_t', 'wb') as f:\n pickle.dump(tarr, f)","sub_path":"SysTrajectory.py","file_name":"SysTrajectory.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"465949746","text":"# This file is part of tofbot, a friendly IRC bot.\n# You may redistribute it under the Simplified BSD License.\n# If we meet some day, and you think this stuff is worth it,\n# you can buy us a beer in return.\n#\n# Copyright (c) 2011 Etienne Millon \n# Martin Kirchgessner \nfrom toflib import Plugin\nfrom toflib import distance\nimport glob\nimport unidecode\nimport os\n\n\nclass PluginDassin(Plugin):\n\n def __init__(self, bot):\n super(PluginDassin, self).__init__(bot)\n self.songs = []\n here = os.path.dirname(os.path.realpath(__file__))\n for path in glob.glob('%s/dassin_data/*.txt' % here):\n self._load_song(path)\n\n def _load_song(self, path):\n with open(path) as f:\n song = [line.strip() for line in f]\n self.songs.append(song)\n\n def handle_msg(self, msg_text, chan, nick):\n\n searched = unidecode.unidecode(msg_text).lower()\n minDist = 9999999\n best = \"\"\n\n for song in self.songs:\n try:\n i = 0\n for line in song:\n dist = distance(line, searched)\n if dist < minDist:\n best = song[i + 1]\n minDist = dist\n i += 1\n except IndexError:\n pass\n\n if len(best) > 3 and minDist < (len(searched) / 3):\n self.say(best)\n","sub_path":"plugins/dassin.py","file_name":"dassin.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"585702134","text":"# 1. Make your API Key\n# 2. Python2/3 you can make API calls\n# 3. Json try separate elements into variables\n\n# import requests\n#\n#\n# r = requests.get(url='http://api.openweathermap.org/data/2.5/weather?q=delhi&appid=31af9666b4f84729a6fefbc21248b12a')\n# json_data= r.json()\n# print(json_data['weather'][0]['id'])\n\n\n# import tkinter\n#\n# m=tkinter.Tk(className='First window')\n# m.mainloop()\n\nimport datetime\nimport json\nimport urllib.request\n\n\ndef time_converter(time):\n converted_time = datetime.datetime.fromtimestamp(\n int(time)\n ).strftime('%I:%M %p')\n return converted_time\n\n\ndef url_builder(city_id):\n user_api = '31af9666b4f84729a6fefbc21248b12a' # Obtain yours form: http://openweathermap.org/\n unit = 'metric' # For Fahrenheit use imperial, for Celsius use metric, and the default is Kelvin.\n api = 'http://api.openweathermap.org/data/2.5/weather?id=' # Search for your city ID here: http://bulk.openweathermap.org/sample/city.list.json.gz\n\n full_api_url = api + str(city_id) + '&mode=json&units=' + unit + '&APPID=' + user_api\n return full_api_url\n# http://api.openweathermap.org/data/2.5/weather?id=1273294&mode=json&units=metric&APPID=31af9666b4f84729a6fefbc21248b12a\n\n\ndef data_fetch(full_api_url):\n url = urllib.request.urlopen(full_api_url)\n output = url.read().decode('utf-8')\n print(output)\n raw_api_dict = json.loads(output)\n print(raw_api_dict)\n url.close()\n return raw_api_dict\n\n\ndef data_organizer(raw_api_dict):\n data = dict(\n city=raw_api_dict.get('name'),\n country=raw_api_dict.get('sys').get('country'),\n temp=raw_api_dict.get('main').get('temp'),\n temp_max=raw_api_dict.get('main').get('temp_max'),\n temp_min=raw_api_dict.get('main').get('temp_min'),\n humidity=raw_api_dict.get('main').get('humidity'),\n pressure=raw_api_dict.get('main').get('pressure'),\n sky=raw_api_dict['weather'][0]['main'],\n sunrise=time_converter(raw_api_dict.get('sys').get('sunrise')),\n sunset=time_converter(raw_api_dict.get('sys').get('sunset')),\n wind=raw_api_dict.get('wind').get('speed'),\n wind_deg=raw_api_dict.get('deg'),\n dt=time_converter(raw_api_dict.get('dt')),\n cloudiness=raw_api_dict.get('clouds').get('all')\n )\n return data\n\n\ndef data_output(data):\n m_symbol = '\\xb0' + 'C'\n print('---------------------------------------')\n print('Current weather in: {}, {}:'.format(data['city'], data['country']))\n print(data['temp'], m_symbol, data['sky'])\n print('Max: {}, Min: {}'.format(data['temp_max'], data['temp_min']))\n print('')\n print('Wind Speed: {}, Degree: {}'.format(data['wind'], data['wind_deg']))\n print('Humidity: {}'.format(data['humidity']))\n print('Cloud: {}'.format(data['cloudiness']))\n print('Pressure: {}'.format(data['pressure']))\n print('Sunrise at: {}'.format(data['sunrise']))\n print('Sunset at: {}'.format(data['sunset']))\n print('')\n print('Last update from the server: {}'.format(data['dt']))\n print('---------------------------------------')\n\n\n\ntry:\n city_name= input(\"Enter a city name:\")\n data_output(data_organizer(data_fetch(url_builder(1273294))))\nexcept IOError:\n print('no internet')\n\n\n\n","sub_path":"14. Web-Api/weather_1.py","file_name":"weather_1.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"154210265","text":"\"\"\"Utility functions for schema to/from JSON.\"\"\"\nimport json\nimport typing\n\nfrom . import types\nfrom .types import Node\n\n\ndef object_encode(node: typing.Any) -> typing.Union[dict, str]:\n \"\"\"Convert an Entity node to a dictionary\"\"\"\n if not isinstance(node, types.Entity):\n return str(node)\n\n node_dict = {\n \"type\": node.__class__.__name__\n }\n node_dict.update(node.__dict__)\n return node_dict\n\n\ndef from_dict(node_dict: dict) -> typing.Union[dict, Node]:\n \"\"\"Convert a dictionary to an Entity node (if it has a `type` item).\"\"\"\n if \"type\" not in node_dict:\n return node_dict\n\n node_type = node_dict.pop(\"type\")\n class_ = getattr(types, node_type, None)\n\n if class_ is None:\n return node_dict\n\n node_kwargs = {}\n\n for key, val in node_dict.items():\n if isinstance(val, dict):\n val = from_dict(val)\n elif isinstance(val, list):\n processed_list = []\n for sub_val in val:\n if isinstance(sub_val, dict):\n processed_list.append(from_dict(sub_val))\n else:\n processed_list.append(sub_val)\n val = processed_list\n\n node_kwargs[key] = val\n\n return class_(**node_kwargs)\n\n\ndef to_json(node: Node) -> str:\n \"\"\"Convert a node to JSON\"\"\"\n return json.dumps(node, default=object_encode, indent=2)\n\n\ndef from_json(serialized: str) -> typing.Union[dict, Node]:\n \"\"\"Convert JSON to a Node\"\"\"\n node = json.loads(serialized)\n return from_dict(node) if isinstance(node, dict) else node\n","sub_path":"py/stencila/schema/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"594469419","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 13 19:08:42 2020\n\n@author: offic\n\"\"\"\n\n\n\"\"\"\n Face Recongnition/Classification\n Find the in out database of known people who has the closest measurements to our test image,\n \n # Face Alignent\n -> face alignment plays an important role in face recognition,because it is often used as a per-processing step.\n \n -> the automatic detection of the face and eye region and align face based on translation,sclae,and rotation\n \n -> scaling means height and width\n -> Orientation means changing the angle\n # Feature Extraction\n Pass the detected and pre-Processed face into a pre-trained face recognizer\n that is capable of producing 128-D embedding\n \n # so openface library for face recognition\n \n Reusing the Image Face Detection Code\n \n Step 1: Load Sample Images and Extract the face Encodings\n \n # load a sample a picture and extract face encodings.\n # Return a list of 128- Dimensional face encodings\n # (one are getting the first face(assuming only one face ))\n \n # load next sample picture and extract face encodings.\n \n grayscale_image = face_recogntion.load_image_file(\"images/samples/gray_scale.jpg\")\n \n gray_scale_encoding = face_recognition_encodings(gray_scale_image)[0]\n \n Step 2: Create an array to save the encodings\n \n known_face_encodings = [lena_face_encoding,grayscale_encoding]\n Step 3: Create another array to held labels\n \n known_face_names =[\"Lena\",\"Gray Scale\"]\n \n \n \n\"\"\"\n#importing the required libraries\nimport cv2\nimport face_recognition\n\n#loading the image to detect\noriginal_image = cv2.imread('images/testing/irfan_unknown.jpg')\n\n#load the sample images and get the 128 face embeddings from them\nirfan_image = face_recognition.load_image_file('images/samples/irfan.jpg')\nirfan_face_encodings = face_recognition.face_encodings(irfan_image)[0]\n\n#irfan_image = face_recognition.load_image_file('images/samples/irfan.jpg')\n#irfan_face_encodings = face_recognition.face_encodings(irfan_image)[0]\n\n#save the encodings and the corresponding labels in seperate arrays in the same order\nknown_face_encodings = [irfan_face_encodings]\nknown_face_names = [\"IRFAN\"]\n\n#load the unknown image to recognize faces in it\nimage_to_recognize = face_recognition.load_image_file('images/testing/irfan_unknown.jpg')\n#arguments are image,no_of_times_to_upsample, model # cnn is better to recognize face and accurate image scan but cnn is nural network and deep learning\nall_face_locations = face_recognition.face_locations(image_to_recognize,model='hog')\n#detect face encodings for all the faces detected\nall_face_encodings = face_recognition.face_encodings(image_to_recognize,all_face_locations)\n\n#print the number of faces detected\nprint('There are {} no of faces in this image'.format(len(all_face_locations)))\n\n#looping through the face locations and the face embeddings\nfor current_face_location,current_face_encoding in zip(all_face_locations,all_face_encodings):\n #splitting the tuple to get the four position values of current face\n top_pos,right_pos,bottom_pos,left_pos = current_face_location\n \n \n #find all the matches and get the list of matches\n all_matches = face_recognition.compare_faces(known_face_encodings, current_face_encoding)\n \n #string to hold the label\n name_of_person = 'Unknown face'\n \n #check if the all_matches have at least one item\n #if yes, get the index number of face that is located in the first index of all_matches\n #get the name corresponding to the index number and save it in name_of_person\n if True in all_matches:\n first_match_index = all_matches.index(True)\n name_of_person = known_face_names[first_match_index]\n \n #draw rectangle around the face \n \n cv2.rectangle(original_image,(left_pos,top_pos),(right_pos,bottom_pos),(255,0,0),2)\n #display the name as text in the image\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(original_image, name_of_person, (left_pos,bottom_pos), font, 0.5, (255,255,255),1)\n \n #display the image\n cv2.imshow(\"Faces Identified\",original_image)\n\n\n\n\n","sub_path":"Image_Face_Recognition.py","file_name":"Image_Face_Recognition.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"386035973","text":"# %load q01_feature_engineering/build.py\nimport pandas as pd\nimport numpy as np\n\npd.set_option('display.max_columns', 500)\nbattles = pd.read_csv('data/battles.csv')\ncharacter_predictions = pd.read_csv('data/character-predictions.csv')\n\ndef q01_feature_engineering(df_1, df_2):\n 'write your solution here'\n \n # initializing data with resp datasets\n data_1 = df_1\n data_2 = df_2\n \n # defining and assiging new columns\n data_1['attacker_count'] = data_1[['attacker_1', 'attacker_2', 'attacker_3', 'attacker_4']].sum(1)\n data_1['defender_count'] = data_1[['defender_1', 'defender_2', 'defender_3', 'defender_4']].sum(1)\n data_1['att_comm_count'] = data_1['attacker_commander'].map(lambda x: 0 if str(x) == 'nan' else len(str(x).split(',')))\n data_2['no_of_books'] = data_2[['book1', 'book2', 'book3', 'book4', 'book5']].sum(1)\n return(data_1, data_2)\n #print(data_2)\n\nq01_feature_engineering(battles, character_predictions)\n#df1= battles\n#df1.columns\n#df1.attacker_commander.value_counts()\n#df1['defender_3']\n\n\n","sub_path":"q01_feature_engineering/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"1866247","text":"#return largest connected component\ndef getLargestCC(segments):\n '''Return a mask corresponding to the largest object'''\n labels = label(segments.astype('int'),connectivity=1)\n largestCC = labels == np.argmax(np.bincount(labels.flat, weights=segments.flat))\n return largestCC\n\na=getLargestCC(binary)\n\nprint('Volume largest obj: '+str(np.count_nonzero(a)))\nprint('Volume dense phase: '+str(np.count_nonzero(binary)))\n","sub_path":"partII/solutions/ex08.py","file_name":"ex08.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"587875105","text":"from PySide.QtCore import *\nfrom PySide.QtGui import *\n\nfrom .painter import Painter\n\nclass InterlockingWidget(QWidget):\n\n\tdef __init__(self, parent, layout, bbox, interlocking):\n\t\tQWidget.__init__(self, parent)\n\t\tself._layout = layout\n\n\t\tself._interlocking = interlocking\n\t\tself._bbox = bbox\n\n\tdef paintEvent(self, e):\n\t\twith Painter(self) as qp:\n\t\t\tself.render(qp)\n\n\tdef render(self, qp):\n\t\tpen = QPen(QColor(255, 255, 255))\n\t\tpen.setWidth(0.5)\n\t\tqp.setPen(pen)\n\t\tqp.drawRect(self._bbox)\n\n\t\tf = QFont(\"Helvetica\", 12)\n\t\tf.setWeight(QFont.Bold)\n\t\tf.setStyleHint(QFont.SansSerif)\n\t\tf.setHintingPreference(QFont.HintingPreference.PreferNoHinting)\n\t\tqp.setFont(f)\n\t\tpoint = self._bbox.topLeft()\n\t\tpoint.setY(point.y() - 8)\n\t\tqp.drawText(point, self._interlocking.id)\n","sub_path":"s3client/interlocking_widget.py","file_name":"interlocking_widget.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"574579966","text":"from socket import *\nfrom time import ctime\nfrom testDataRead import forecastResult\n\nimport struct\n\nHOST = \"127.0.0.1\"\nPORT = 2341\nBUFSIZ = 1024\nADDR = (HOST, PORT)\n\ntcpSerSock = socket(AF_INET, SOCK_STREAM)\ntcpSerSock.bind(ADDR) # 绑定IP地址和端口号s\ntcpSerSock.listen(5) # 监听,使得主动变为被动\n\nrecvdata=\"testData.csv 航空货物运输合同纠纷.csv\"\nfilenamearrary = recvdata.split(\" \")\nprint(filenamearrary[0],filenamearrary[1])\nresultValue = forecastResult(\"testData.csv\", \"航空货物运输合同纠纷.csv\")\n\nwhile True:\n print('正在等待连接....')\n tcpCliSock, addr = tcpSerSock.accept() # 当来新的连接时,会产生一个的新的套接字为客户端服务\n # print('收到来自%d的连接.....tcpSerSock..' % addr)\n\n while True:\n data = tcpCliSock.recv(BUFSIZ) # 接受数据,缓存区设置为1kb\n print(data)\n if not data:\n break\n # tcp 发送数据格式 \"testnamefile trainnamefile\"\n data = str(data)\n filenamearrary = data.split(\" \")\n pathname=\"/home/work/\"\n # resultValue = forecastResult( pathname + filenamearrary[0] , pathname + filenamearrary[1]) #path为服务器下存储的关键词库的路径\n tcpCliSock.send(struct.pack(resultValue)) # 加上时间戳,并对数据编码\n tcpCliSock.close()\n\n# tcpSerSock.close()","sub_path":"nbpy_NLPwiners/PYReadWriteExcelDemo-master/PYReadWriteExcelDemo-master/forecastServer.py","file_name":"forecastServer.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"470054553","text":"# libs\nimport os\nimport os.path\nimport pprint\n\n# fabric\nfrom fabric.api import abort, env, task\nfrom fabric.contrib.console import confirm\n\n# local\nimport amazon\nimport azure\nimport config\nimport google\n\ncfg = config.Config()\nproviders = {\n \"amazon\": amazon.Provider(cfg),\n \"azure\": azure.Provider(cfg),\n \"google\": google.Provider(cfg),\n}\n\npp = pprint.PrettyPrinter(indent=2)\nenv.key_filename = [\n os.path.dirname(os.path.realpath(__file__)) + \"/keys/amazon.pem\",\n os.path.dirname(os.path.realpath(__file__)) + \"/keys/google.pem\",\n os.path.dirname(os.path.realpath(__file__)) + \"/keys/azure.pem\",\n]\nenv.password = \"NOTUSED\"\n\n@task\ndef create_database(name):\n database = cfg.get_database(name)\n if not database:\n abort(\"`%s` is not in the config file\" % name)\n\n if database[\"provider\"] in providers:\n providers[database[\"provider\"]].create_database(name)\n else:\n abort(\"provider `%s` not implemented\" % (database[\"provider\"]))\n\n@task\ndef create_environment(name):\n environment = cfg.get_environment(name)\n if not environment:\n abort(\"`%s` is not in the config file\" % name)\n\n # create all the databases first\n if \"databases\" in environment:\n for dn, d in environment[\"databases\"].iteritems():\n create_database(dn)\n\n if \"machines\" in environment:\n # create all the web servers second\n for mn, m in environment[\"machines\"].iteritems():\n if \"web\" in m[\"roles\"]:\n create_instance(mn)\n\n # finally create all the other servers\n for mn, m in environment[\"machines\"].iteritems():\n if \"web\" not in m[\"roles\"]:\n create_instance(mn)\n\n\n@task\ndef create_instance(name):\n machine = cfg.get_machine(name)\n if not machine:\n abort(\"`%s` is not in the config file\" % name)\n\n if machine[\"provider\"] in providers:\n providers[machine[\"provider\"]].create_instance(name)\n else:\n abort(\"provider `%s` not implemented\" % (machine[\"provider\"]))\n\n@task\ndef destroy_database(name):\n database = cfg.get_database(name)\n if not database:\n abort(\"`%s` is not in the config file\" % name)\n\n if database[\"provider\"] in providers:\n providers[database[\"provider\"]].destroy_database(name)\n else:\n abort(\"provider `%s` not implemented\" % (database[\"provider\"]))\n\n@task\ndef destroy_instance(name):\n cfg = config.Config()\n machine = cfg.get_machine(name)\n if not machine:\n abort(\"`%s` is not in the config file\" % name)\n\n if machine[\"provider\"] in providers:\n providers[machine[\"provider\"]].destroy_instance(name)\n else:\n abort(\"provider `%s` not implemented\" % (machine[\"provider\"]))\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"370378142","text":"import mock\nimport six\n\nfrom django_mailbox.tests.base import EmailMessageTestCase\nfrom django_mailbox.transports import ImapTransport, Pop3Transport\n\n\nclass TestImapTransport(EmailMessageTestCase):\n def setUp(self):\n self.arbitrary_hostname = 'one.two.three'\n self.arbitrary_port = 100\n self.ssl = False\n self.transport = ImapTransport(\n self.arbitrary_hostname,\n self.arbitrary_port,\n self.ssl\n )\n self.transport.server = None\n super(TestImapTransport, self).setUp()\n\n def test_get_email_message(self):\n with mock.patch.object(self.transport, 'server') as server:\n server.search.return_value = (\n 'OK',\n [\n 'One', # This is totally arbitrary\n ]\n )\n server.fetch.return_value = (\n 'OK',\n [\n [\n '1 (RFC822 {8190}', # Wat?\n self._get_email_as_text('generic_message.eml')\n ],\n ')',\n ]\n )\n\n actual_messages = list(self.transport.get_message())\n\n self.assertEqual(len(actual_messages), 1)\n\n actual_message = actual_messages[0]\n expected_message = self._get_email_object('generic_message.eml')\n\n self.assertEqual(expected_message, actual_message)\n\n\n\nclass TestPop3Transport(EmailMessageTestCase):\n def setUp(self):\n self.arbitrary_hostname = 'one.two.three'\n self.arbitrary_port = 100\n self.ssl = False\n self.transport = Pop3Transport(\n self.arbitrary_hostname,\n self.arbitrary_port,\n self.ssl\n )\n self.transport.server = None\n super(TestPop3Transport, self).setUp()\n\n def test_get_email_message(self):\n with mock.patch.object(self.transport, 'server') as server:\n # Consider this value arbitrary, the second parameter\n # should have one entry per message in the inbox\n server.list.return_value = [None, ['some_msg']]\n server.retr.return_value = [\n '+OK message follows',\n [\n line.encode('ascii')\n for line in self._get_email_as_text(\n 'generic_message.eml'\n ).decode('ascii').split('\\n')\n ],\n 10018, # Some arbitrary size, ideally matching the above\n ]\n\n actual_messages = list(self.transport.get_message())\n\n self.assertEqual(len(actual_messages), 1)\n\n actual_message = actual_messages[0]\n expected_message = self._get_email_object('generic_message.eml')\n\n self.assertEqual(expected_message, actual_message)\n","sub_path":"django_mailbox/tests/test_transports.py","file_name":"test_transports.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"51331239","text":"# Copyright 2020 New Relic, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nimport pytest\nfrom newrelic_telemetry_sdk.metric import GaugeMetric, CountMetric, SummaryMetric\nfrom newrelic_telemetry_sdk.metric_batch import MetricBatch\n\n\nclass VerifyLockMetricBatch(MetricBatch):\n \"\"\"Verify sensitive attributes are accessed / assigned under lock\n\n These attributes are sensitive and should only be accessed under lock.\n NOTE: It doesn't guarantee that the values returned are only modified under\n lock; however, this provides some level of checking.\n \"\"\"\n\n @property\n def _batch(self):\n assert self._lock.locked()\n return self._internal_batch\n\n @_batch.setter\n def _batch(self, value):\n if hasattr(self, \"_internal_batch\"):\n assert self._lock.locked()\n self._internal_batch = value\n\n @property\n def _interval_start(self):\n assert self._lock.locked()\n return self._internal_interval_start\n\n @_interval_start.setter\n def _interval_start(self, value):\n if hasattr(self, \"_internal_interval_start\"):\n assert self._lock.locked()\n self._internal_interval_start = value\n\n @property\n def _common(self):\n return self._internal_common\n\n @_common.setter\n def _common(self, value):\n # This attribute should never be assigned\n assert not hasattr(self, \"_internal_common\")\n self._internal_common = value\n\n\n@pytest.mark.parametrize(\"tags\", (None, {\"foo\": \"bar\"}))\ndef test_create_identity(tags):\n metric = GaugeMetric(\"name\", 1000, tags=tags)\n\n expected_tags = frozenset(tags.items()) if tags else None\n identity = MetricBatch.create_identity(metric)\n assert len(identity) == 3\n assert identity[0] is GaugeMetric\n assert identity[1] == \"name\"\n assert identity[2] == expected_tags\n\n\n@pytest.mark.parametrize(\n \"metric_a, metric_b, expected_value\",\n (\n (GaugeMetric(\"name\", 1), GaugeMetric(\"name\", 2), 2),\n (CountMetric(\"name\", 1), CountMetric(\"name\", 2), 3),\n (\n SummaryMetric.from_value(\"name\", 1),\n SummaryMetric.from_value(\"name\", 2),\n {\"count\": 2, \"max\": 2, \"min\": 1, \"sum\": 3},\n ),\n ),\n)\ndef test_merge_metric(metric_a, metric_b, expected_value):\n batch = VerifyLockMetricBatch()\n\n batch.record(metric_a)\n batch.record(metric_b)\n\n assert metric_a.start_time_ms\n assert metric_b.start_time_ms\n\n assert len(batch._internal_batch) == 1\n _, metric = batch._internal_batch.popitem()\n\n assert metric.name == \"name\"\n assert metric.value == expected_value\n assert \"timestamp\" not in metric\n\n\n@pytest.mark.parametrize(\n \"metric_a, metric_b\",\n (\n (GaugeMetric(\"name\", 1), CountMetric(\"name\", 1)),\n (GaugeMetric(\"foo\", 1), GaugeMetric(\"bar\", 1)),\n (GaugeMetric(\"foo\", 1, {\"foo\": 1}), GaugeMetric(\"foo\", 1, {\"foo\": 2})),\n ),\n)\ndef test_different_metric(metric_a, metric_b):\n batch = VerifyLockMetricBatch()\n\n batch.record(metric_a)\n batch.record(metric_b)\n\n assert len(batch._internal_batch) == 2\n\n\n@pytest.mark.parametrize(\"tags\", (None, {\"foo\": \"bar\"},))\ndef test_flush(monkeypatch, tags):\n metric = GaugeMetric(\"name\", 1)\n\n DELTA = 4.0\n current_t = [1.0]\n\n def _time():\n # Move time forward by DELTA on every call\n current_t[0] *= DELTA\n return current_t[0]\n\n monkeypatch.setattr(time, \"time\", _time, raising=True)\n\n batch = VerifyLockMetricBatch(tags)\n batch.record(metric)\n\n # Timestamp starts at 4\n assert batch._internal_interval_start == 4000\n\n metrics, common = batch.flush()\n\n assert len(metrics) == 1\n\n assert common[\"timestamp\"] == 4000\n assert common[\"interval.ms\"] == 12000\n if tags:\n assert common[\"attributes\"] == tags\n else:\n assert \"attributes\" not in common\n\n # Verify internal state is updated\n assert batch._internal_interval_start > 0\n assert batch._internal_batch == {}\n\n # Verify that we don't return the same objects twice\n assert batch.flush()[1] is not common\n","sub_path":"tests/test_metric_batch.py","file_name":"test_metric_batch.py","file_ext":"py","file_size_in_byte":4592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"144097927","text":"import json\nimport requests\n\nfrom middlewared.alert.base import ProThreadedAlertService, ellipsis\nfrom middlewared.schema import Dict, Str\n\n\nclass OpsGenieAlertService(ProThreadedAlertService):\n title = \"OpsGenie\"\n\n schema = Dict(\n \"opsgenie_attributes\",\n Str(\"cluster_name\"),\n Str(\"api_key\"),\n )\n\n def create_alert(self, alert):\n r = requests.post(\n \"https://api.opsgenie.com/v2/alerts\",\n headers={\"Authorization\": f\"GenieKey {self.attributes['api_key']}\",\n \"Content-type\": \"application/json\"},\n data=json.dumps({\n \"message\": ellipsis(alert.formatted, 130),\n \"alias\": self._alert_id(alert),\n \"description\": ellipsis(alert.formatted, 15000),\n }),\n timeout=15,\n )\n r.raise_for_status()\n\n def delete_alert(self, alert):\n r = requests.delete(\n \"https://api.opsgenie.com/v2/alerts/\" + self._alert_id(alert),\n params={\"identifierType\": \"alias\"},\n headers={\"Authorization\": f\"GenieKey {self.attributes['api_key']}\"},\n timeout=15,\n )\n r.raise_for_status()\n","sub_path":"src/middlewared/middlewared/alert/service/opsgenie.py","file_name":"opsgenie.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"482312293","text":"\"\"\"This module encapsulates the places365 dataset as provided by torchvision\nas a pytorch-lightning datamodule.\n\"\"\"\n\nfrom typing import Optional\n\nimport pytorch_lightning\n\nimport torch\nimport torch.utils.data\nimport torchvision\nimport torchvision.transforms\n\n\nclass PlacesDataModule(pytorch_lightning.LightningDataModule):\n def __init__(self, batch_size: int, root='/places365', num_data_workers: int=4):\n super().__init__()\n\n self.batch_size = batch_size\n self.root = root\n self.train_ds = None\n self.val_ds = None\n self.num_data_workers = num_data_workers\n\n\n def setup(self, stage: Optional[str]=None) -> None:\n # Values taken from ImageNet\n normalize = torchvision.transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n self.train_ds = torchvision.datasets.Places365(\n self.root, small=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.RandomCrop(224),\n torchvision.transforms.RandomHorizontalFlip(),\n torchvision.transforms.ToTensor(),\n normalize\n ]))\n\n self.val_ds = torchvision.datasets.Places365(\n self.root, split='val', small=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.CenterCrop(224),\n torchvision.transforms.ToTensor(),\n normalize\n ]))\n\n\n def train_dataloader(self):\n return torch.utils.data.DataLoader(\n self.train_ds,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=self.num_data_workers,\n persistent_workers=True)\n\n def val_dataloader(self):\n return torch.utils.data.DataLoader(\n self.val_ds,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_data_workers)\n","sub_path":"lecture2/bootcamp/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"337659406","text":"import binascii\n\nstring1 = str(input(\"insert ciphertext for bruteforce: \"))\nstring2 = str(input(\"insert keyword for bruteforce (optional): \"))\nscale= 16\n\nbytearray1= bytearray(string1, \"utf8\")\nbytearray2 = bytearray(string2, \"utf8\")\nbin1 = bin(int(string1, scale)).zfill(8)\nbin2 = bin(int(string2, scale)).zfill(8)\n\n#print(bin(bin1 ^ bin2))\n#print(binary_string)\n\nbyte_list1 = []\nbyte_list2 = []\nbyte_list3 = []\nfor byte in bytearray1:\n binary_representation = bin(byte)\n byte_list1.append(binary_representation)\n \nfor byte in bytearray2:\n binary_representation = bin(byte)\n byte_list2.append(binary_representation)\n\nfor x in range(0, len(byte_list1)):\n byte_list3.append(bin(byte_list1[x] ^ byte_list2[x]))\n\nprint(byte_list3)\nprint(bytearray1)","sub_path":"IT-Security/Encryption/xor/xor_encrypt.py","file_name":"xor_encrypt.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"97025135","text":"#!/usr/bin/python3\nfrom scapy.all import *\nE = Ether()\nA = ARP(\n op= \"is-at\",\n # 毒化记录中的MAC,即M的MAC地址\n hwsrc = \"d0:c5:d3:03:1d:fd\",\n # 发送方IP地址/毒化记录中的IP,B的IP地址\n psrc = \"192.168.43.49\",\n\n # 目标Mac地址/被欺骗主机MAC,A的MAC地址\n hwdst=\"00:0c:29:c8:4b:9f\",\n # 目标IP地址/被欺骗主机IP地址,A的IP地址\n pdst=\"192.168.43.8\"\n)\npkt = E/A\nprint(pkt.show())\nwhile (1):\n sendp(pkt)\n\n\n","sub_path":"arp/arp2.py","file_name":"arp2.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"307039305","text":"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\nfrom torch import nn\nimport torch\n\nclass Yolov3Predictor(nn.Module):\n def __init__(self, cfg):\n super(Yolov3Predictor, self).__init__()\n\n def forward(self, x):\n # N = x.shape[0]\n # C = x.shape[1]\n # H = x.shape[2]\n # W = x.shape[3]\n # x = x.permute(0, 2, 3, 1)\n # x = x.view(N, H, W, 3, int(C/3))\n\n # objectness = x[:, :, :, :, 0]\n # rpn_box_regression = x[:, :, :, :, 1:5]\n # cls = x[:, :, :, :, 5:85]\n\n objectness1 = x[:, 0, :, :]\n objectness2 = x[:, 85, :, :]\n objectness3 = x[:, 170, :, :]\n objectness1 = torch.unsqueeze(objectness1, 1)\n objectness2 = torch.unsqueeze(objectness2, 1)\n objectness3 = torch.unsqueeze(objectness3, 1)\n objectness = torch.cat((objectness1, objectness2, objectness3), 1)\n rpn_box_regression1 = x[:, 1:5, :, :]\n rpn_box_regression2 = x[:, 86:90, :, :]\n rpn_box_regression3 = x[:, 171:175, :, :]\n rpn_box_regression = torch.cat((rpn_box_regression1, rpn_box_regression2, rpn_box_regression3), 1)\n cls1 = x[:, 5:85, :, :]\n cls2 = x[:, 90:170, :, :]\n cls3 = x[:, 175:255, :, :]\n cls = torch.cat((cls1, cls2, cls3), 1)\n\n return [objectness, rpn_box_regression, cls]\n\n\ndef make_yolov3_predictor(cfg):\n return Yolov3Predictor(cfg)\n","sub_path":"DetectionHub/modeling/rpn/yolov3/yolov3_predictor.py","file_name":"yolov3_predictor.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"630489215","text":"#Demonstrates various functions that can be used to perform data conversion operations\n\nfruits = [\"apples\", 45, \"grapes\", 30]\n\nempId = \"150\"\n#int(x [,base]) converts the integer x in string format to the integer with base if base is specified\n#base can be 0 0-26\nprint(\"converting the integer in string format to decimal format: \", int(empId))\nprint(\"converting the integer in string format to integral formal: \", int(empId,0))\n\n\n#float(x)\n\n#To create a complex number use complex(real,imag)\nprint(\"The complex number created using complex(real,imag) is : \", complex(2,6))\n\n#To convert an object to string\nprint(\"Converting a list object to string using str(obj): \", str(fruits))\n\n#To convert an object to an expression string.\nrepr(x)\n\n#Evaluates a string a returns an object\neval(x)\n\n#converts s to a tuple\ntuple(s)\n\n#Converts s to a list\nlist(s)\n\n#Sets s to a set\nset(s)\n\n#creates a dictionary where d is a sequence of key value pairs\ndict(d)\n\n#converts s to a frozen set\nfrozenset(s)\n\n#converts an integer to a character\nchr(x)\n\n#Converts an integer to a unicode character\nunichr(x)\n\n#Converts a single character to its integer value\nord(x)\n\n#Converts an integer to a hexadecimal string\nhex(x)\n\n#Converts an integer to an octal strin\noct(x)\n","sub_path":"datatypes/datatype_conversions.py","file_name":"datatype_conversions.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"472913232","text":"from sklearn.preprocessing import MinMaxScaler, StandardScaler, Imputer\nimport numpy as np\n\n\ndef mm():\n \"\"\"归一化处理\n X' = (x-min)/(max-min)\n X'' = X'*(mx-mi)+mi\n \"\"\"\n m = MinMaxScaler(feature_range=(5,10)) # 默认范围为0-1\n array = [[90,2,10,40],[60,4,15,45],[75,3,13,46]]\n data = m.fit_transform(array)\n print(data)\n\ndef standard():\n \"\"\"标准化缩放\n 相比于归一化,标准化对于存在异常值而对结果的影响不大,适合大数据\n 而归一化,由于受异常点的影响,所以......\n \"\"\"\n s = StandardScaler()\n array = [[1,-1,3], [2,4,2], [4,6,-1]]\n data = s.fit_transform(array)\n print(data)\n\ndef im():\n \"\"\"缺失值处理\"\"\"\n im = Imputer(missing_values='NaN', strategy='mean', axis=0) # nan 或 NaN都可以,固定写法,填补策略(平均值),按列填充\n data = im.fit_transform([[1,2],[np.nan,3],[7,6]])\n print(data)\n\n\nif __name__ == '__main__':\n mm()\n standard()\n im()","sub_path":"02_machine_learning/20191028.py","file_name":"20191028.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"678591","text":"import time\nimport requests\n\nfrom selenium import webdriver\n\n\ndef refresh_html(jianshu_url):\n firefoxdriver =\"C:\\Program Files\\Mozilla Firefox\\firefox.exe\"\n browser = webdriver.firefox(firefoxdriver)\n browser.get(jianshu_url)\n while True:\n time.sleep(1)\n browser.refresh()\n\nif __name__ == '__main__':\n url = 'https://www.toutiao.com/a6744255163768242692/'\n try:\n refresh_html(url)\n except:\n refresh_html(url)","sub_path":"untitled1/刷今日头条阅读量.py","file_name":"刷今日头条阅读量.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"636885640","text":"# Object Detection\n\n# Importing the libraries\nimport torch\nfrom torch.autograd import Variable\nimport cv2 # to draw the rectangles\nfrom data import BaseTransform, VOC_CLASSES as labelmap\nfrom ssd import build_ssd\nimport imageio\n\n# Defining a function that will do the detections\ndef detect(frame, net, transform):\n height, width = frame.shape[:2] # There are 3 parameters of frame.shape: height, width and color\n \n # Transform 1: to numpy array\n frame_t = transform(frame)[0] # transform return 2 elements\n \n # Transform 2: to torch tensor (more advanced matrix than numpy array)\n x = torch.from_numpy(frame_t).permute(2, 0, 1) # to arrange the colors from RBG to GRB\n \n # Transform 3: to CNN acceptable batch format with the 4th dimension added\n x.unsqueeze(0) # dimension # stats from 0: we added the batch dimension to position 0\n \n # Transform4: to torch variable\n x = Variable(x.unsqueeze(0))\n \n # Feed our variable to the nueral network\n y = net(x) # the network will output\n \n # As the output contains both data and more, we need to pick the data part out\n detections = y.data\n \n # To normalize the detection data\n scale = torch.Tensor([width, height, width, height])\n ''' \n the 1st width and height corresponds to the upper \n left corner of the triangle\n the 2nd width and height corresponds to the lower \n right conner of the triangle\n \n detections = [batch, number of classes, number of occurence, (score, x0, y0, x1, y1]\n class: for each class of objects, such as a bird\n number of occurence: how many times a class is detected\n score: thredhold for objection detection. If over a certain value, the detection will be valid.\n x0, y0: upper left corner of the detection triangle\n x1, y1: lower right corner of the dtection triagnel\n '''\n for i in range(detections.size(1)):\n j = 0 # number of occurence\n while detections[0, i, j, 0] >= 0.6:\n pt = (detections[0, i, j, 1:] * scale).numpy()\n \n # draw the rectangle\n cv2.rectangle(frame, (int(pt[0]), int(pt[1])), (int(pt[2]), int(pt[3])), (255, 0, 0), 2) # We draw a rectangle around the detected object.\n '''\n cv2.rectangle(frame, upperleftpoint(x,y), lowerrightpoint(x,y), color, thickness)\n '''\n # display the labels\n cv2.putText(frame, labelmap[i - 1], (int(pt[0]), int(pt[1])), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2, cv2.LINE_AA) # We put the label of the class right above the rectangle.\n j += 1 # We increment j to get to the next occurrence.\n return frame # We return the original frame with the detector rectangle and the label around the detected object.\n \n# Creating the SDD nuerual netowork\nnet = build_ssd('test')\n\n\n# Load the previously trained weights\nnet.load_state_dict(torch.load('ssd300_mAP_77.43_v2.pth', map_location = lambda storage, loc: storage)) # We get the weights of the neural network from another one that is pretrained (ssd300_mAP_77.43_v2.pth).\n# Creating the transforms\n# 1st parameter: the figure size taken into the neural network\n# 2nd parameter: the color range\ntransform = BaseTransform(net.size, (104/256.0, 117/256.0, 123/256.0))\n\n# Doing some Object Detection on a video\n\n#load the video\nreader = imageio.get_reader('funny_dog.mp4')\n#get the frame rate parameter\nfps = reader.get_meta_data()['fps']\n\n# output the video with detections\nwriter = imageio.get_writer('output.mp4', fps = fps)\nfor i, frame in enumerate(reader):\n # net.eval() is the function we want to pass into the detec function\n frame = detect(frame, net.eval(), transform)\n # now frame has the detection rectangles\n \n # To append a frame to the output video\n writer.append_data(frame)\n print(i)\n# close this output video \nwriter.close()\n ","sub_path":"object_detection_Fan.py","file_name":"object_detection_Fan.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"374301511","text":"from itsdangerous import TimedJSONWebSignatureSerializer as TJWSSerializer\nfrom django.conf import settings\n\n\n# 1,加密openid\ndef encode_openid(openid):\n # 1,创建加密对象\n serializer = TJWSSerializer(secret_key=settings.SECRET_KEY,expires_in=300)\n\n # 2,加密openid\n token = serializer.dumps({\"openid\":openid})\n\n # 3,返回加密的openid\n return token.decode()\n\n\n# 2,解密openid\ndef decode_openid(token):\n # 1,创建加密对象\n serializer = TJWSSerializer(secret_key=settings.SECRET_KEY,expires_in=300)\n\n # 2,加密openid\n try:\n data_dict = serializer.loads(token)\n except Exception as e:\n return None\n else:\n # 3,返回加密的openid\n return data_dict.get(\"openid\")\n","sub_path":"meiduo/meiduo/utils/my_openid.py","file_name":"my_openid.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"574308502","text":"#!/usr/bin/python3\n\n\nimport os\nimport config, info, mail\nfrom language import Language\nfrom cache import Cache\n\ndef main():\n\n cache = Cache()\n\n for user in config.users:\n\n # Check that all information about the user is present\n try:\n name = user['name']\n language = user['language']\n email = user['email']\n modules = user['modules']\n except:\n print(\"Some keys missing\")\n continue\n\n lang = Language(language)\n if lang.dictionary == {}:\n continue\n\n header = lang.getHeader(name)\n body = \"\"\n footer = lang.getFooter()\n\n # Date\n if \"date\" in modules:\n date = info.get_date()\n week = info.get_week()\n body += lang.getDate(str(date), str(week))\n\n # Birthdays\n if \"birthdays\" in modules:\n birthdays = info.get_birthdays(user[\"birthdayDBA\"])\n birthday_string = lang.getBirthdays(birthdays)\n if birthday_string != \"\":\n body += birthday_string + \"\\n\"\n\n # SOL information\n if \"sol\" in modules:\n try:\n places = user['sol']\n sol_info = info.get_sol_info(places, cache)\n sol_string = lang.getSol(sol_info)\n if sol_string != \"\":\n body += sol_string + \"\\n\"\n except:\n print(\"Error with the SOL module\")\n\n # Temperature information\n if \"temperature\" in modules:\n try:\n places = user['temperature']\n temperature_info = info.get_temperatures(places, cache)\n temperature_string = lang.getTemperature(temperature_info)\n if temperature_string != \"\":\n body += temperature_string + \"\\n\"\n except:\n print(\"Error with the temperature\")\n\n # Aurora information\n if \"aurora\" in modules:\n try:\n values = info.get_aurora_info(cache)\n body += lang.getAurora(values)\n except:\n print(\"Error with aurora\")\n\n # IP information\n if \"ip\" in modules:\n current_ip = info.getip()\n body += lang.getIp(current_ip)\n\n # Exchange information\n if \"exchange\" in modules:\n try:\n exchanges = user['exchange']\n exchange_values = info.get_exchange_values(exchanges)\n exchange_string = lang.getExchange(exchange_values)\n body += exchange_string + \"\\n\"\n except:\n print(\"Error with the exchange module\")\n\n text = header + body + footer\n\n # Email\n filelist = [ f for f in os.listdir(config.application_path) if f.endswith(\".png\") ]\n mail.send_mail(email, lang.getSubject(info.get_date()), text, filelist)\n\n # Remove the graphs created in the exchange module\n if \"exchange\" in modules:\n for f in filelist:\n try:\n os.remove(f)\n except:\n pass\n\n\nif __name__==\"__main__\":\n main()\n","sub_path":"myemail.py","file_name":"myemail.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"65292335","text":"import numpy as np\nfrom keras.models import Sequential\n\nfrom keras.layers.core import Dense, Dropout, Flatten, Activation\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D\nfrom keras.layers.advanced_activations import LeakyReLU, PReLU\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.utils import np_utils, generic_utils\n\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nfrom dataset import Dataset\nfrom constants import *\n\n#seed - делаем непредсказуемым разброс рандомных чисел\nnp.random.seed(42)\n\ndset = Dataset()\ndset.load()\n(xTrain, yTrain),(xTest, yTest),(xfinTest, yfinTest) = dset.getProps()\n\n\nbatchSize = 64\nnumEpoch = 50\nnumClasses = len(EMOTIONS)\n\nprint (\"xTraint shape: \", xTrain.shape)\nprint(\"xTest shape: \", xTest.shape)\n\n# yTrain = np_utils.to_categorical(yTrain, numClasses)\n# yTest = np_utils.to_categorical(yTest, numClasses)\n# yfinTest = np_utils.to_categorical(yfinTest, numClasses)\n\nxTrain = xTrain.astype(\"float32\")\nxTest = xTest.astype(\"float32\")\nxfinTest = xfinTest.astype(\"float32\")\nxTrain /= 255\nxTest /= 255\nxfinTest /= 255\n\n# datagenerator = ImageDataGenerator(\n# featurewise_center=False,\n# samplewise_center=False,\n# featurewise_std_normalization=False,\n# samplewise_std_normalization=False,\n# zca_whitening=False,\n# rotation_range=None,\n# width_shift_range=None,\n# height_shift_range=None,\n# horizontal_flip=True,\n# vertical_flip=False\n# )\n# batch = 0\n# for X_batch, y_batch in datagenerator.flow(xTrain, yTrain, batch_size=2048):\n# print(batch, end='...', flush=True)\n# xTrain = np.vstack((xTrain, X_batch))\n# yTrain = np.vstack((yTrain, y_batch))\n# batch += 1\n#\n# print('X_train shape:', xTrain.shape)\n\n\n# shape = xTrain.shape[1:]\nshape = (WIDTH, HEIGHT, 3)\nprint(shape)\n\n#model\nmodel = Sequential()\n\n#первая связка\nmodel.add(Convolution2D(48, (3, 3), input_shape=shape, padding=\"same\"))\nmodel.add(LeakyReLU(alpha=0.2))\nmodel.add(Convolution2D(48, (3, 3)))\nmodel.add(LeakyReLU(alpha=0.2))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.2))\n\n#вторая связка\nmodel.add(Convolution2D(96, (3, 3), padding=\"same\"))\nmodel.add(LeakyReLU(alpha=0.2))\nmodel.add(Convolution2D(96, (3, 3)))\nmodel.add(LeakyReLU(alpha=0.2))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.3))\n\n#третья связка\nmodel.add(Convolution2D(192, (3, 3), padding=\"same\"))\nmodel.add(LeakyReLU(alpha=0.2))\nmodel.add(Convolution2D(192, (3, 3)))\nmodel.add(LeakyReLU(alpha=0.2))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.4))\n\n#полносвязная часть\nmodel.add(Flatten())\nmodel.add(Dense(768))\nmodel.add(Activation(\"relu\"))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(numClasses))\nmodel.add(Activation(\"softmax\"))\n\n#и компилируем\nmodel.compile(\n loss='categorical_crossentropy',\n optimizer='adam',\n class_mode='categorical',\n metrics=['accuracy'])\n\ncheckpointer = ModelCheckpoint(\n filepath='cnn_weights.hdf5',\n verbose=1,\n save_best_only=True)\nearlystopping = EarlyStopping(\n monitor='val_loss',\n patience=10,\n verbose=1)\n\nmodel.fit(xTrain, yTrain,\n batch_size=batchSize,\n epochs=numEpoch,\n validation_data=(xTest, yTest),\n callbacks=[checkpointer, earlystopping],\n verbose=1)\n\n\nscores = model.evaluate(xTest, yTest, verbose=1)\nprint(\"Точность работы на тестовых данных = \", scores[1] * 100)\n\nscores = model.evaluate(xfinTest, yfinTest, verbose=1)\nprint(\"Точность работы на финальных тестовых данных = \", scores[1] * 100)\n\njson = model.to_json()\nmodelConfig = open(\"cnn_model.json\", \"w\")\nmodelConfig.write(json)\nmodelConfig.close()\n\nmodel.save_weights(\"cnn_final_wights.h5\")\nprint(\"END\")\n","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"620826061","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2014, Stephan Rave\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nimport yaml\n\n\n\ndef unicode_representer(dumper, data):\n if '\\n' in data:\n return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')\n else:\n return dumper.represent_unicode(data)\n\ndef str_representer(dumper, data):\n if '\\n' in data:\n return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')\n else:\n return dumper.represent_str(data)\n\nyaml.add_representer(unicode, unicode_representer)\nyaml.add_representer(str, str_representer)\n\n\npath = sys.argv[1]\n\n\nfor ds in os.listdir(os.path.join(path, 'DATA')):\n info = yaml.load(open(os.path.join(path, 'DATA', ds, 'INFO')))\n info['experiment'] = info['experiment'] + '_' + info.pop('kind')\n yaml.dump(info, open(os.path.join(path, 'DATA', ds, 'INFO'), 'w'))\n os.rename(os.path.join(path, 'DATA', ds),\n os.path.join(path, 'DATA', '-'.join((lambda l: [l[0] + '_' + l[1]] + l[2:])(ds.split('-')))))\n\nfor run in os.listdir(os.path.join(path, 'RUNS')):\n for ds in os.listdir(os.path.join(path, 'RUNS', run)):\n if ds in ('INFO', 'SCRIPT', 'FINISHED', 'FAILED'):\n continue\n new_name = '-'.join((lambda l: [l[0] + '_' + l[1]] + l[2:])(ds.split('-')))\n os.unlink(os.path.join(path, 'RUNS', run, ds))\n os.symlink(os.path.join('..', '..', 'DATA', new_name),\n os.path.join(path, 'RUNS', run, new_name))\n","sub_path":"update_01_02.py","file_name":"update_01_02.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"620097738","text":"import uuid\nimport datetime\nimport json\nimport random\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\n# -*- coding: utf-8 -*-\n# this file is released under public domain and you can use without limitations\n\n#########################################################################\n## This is a sample controller\n## - index is the default action of any application\n## - user is required for authentication and authorization\n## - download is for downloading files uploaded in the db (does streaming)\n#########################################################################\n\n\ndef index():\n userId = session.userId\n # initialize categories list in session\n if not session.categories:\n session.categories = []\n if userId == None:\n # Generating new user\n userId = uuid.uuid4().hex\n print(\"New user: \" + userId)\n session.userId = userId\n # Selecting condition\n session.userCondition = 3 # TODO randomly select condition\n userCondition = session.userCondition\n\n # get user ideas\n ideas = db(db.idea.userId == userId).select()\n # get user categories\n category_idea = db((db.idea.id==db.category_idea.idea) & (db.category.id==db.category_idea.category))\n category_results = category_idea(db.idea.userId==userId).select(db.category.category)\n categories = [d.category for d in category_results] \n for cat in session.categories:\n categories.append(cat)\n return dict(userCondition=userCondition, ideas=ideas, categories=categories)\n\ndef test():\n return db(db.idea.userId == session.userId).select()\n\ndef add_idea():\n '''\n Endpoint for adding a new idea.\n '''\n userId = session.userId\n userCondition = session.userCondition\n if userId != None:\n idea = request.vars['idea']\n category1 = request.vars['category1']\n category2 = request.vars['category2']\n dateAdded = datetime.datetime.now()\n ideaId = db.idea.insert(userId=userId, idea=idea, dateAdded=dateAdded, userCondition=userCondition)\n # Get category Ids\n category1_id = __insert_or_retrieve_category_id(category1)\n if category2: # category 2 is optional\n category2_id = __insert_or_retrieve_category_id(category2)\n # insert relationship\n db.category_idea.insert(category=category1_id, idea=ideaId)\n if category2: # insert only if there was one\n db.category_idea.insert(category=category2_id, idea=ideaId)\n\ndef get_category():\n # calculate the least used category\n category_idea = db((db.idea.id==db.category_idea.idea) & (db.category.id==db.category_idea.category))\n idea_categories = category_idea(db.idea.userCondition == 3).select(db.category.category, db.category.id)\n count = dict()\n # get all user categories\n used_categories = session.categories # get hint categories\n category_idea = db((db.idea.id==db.category_idea.idea) & (db.category.id==db.category_idea.category))\n category_results = category_idea(db.idea.userId==session.userId).select(db.category.category) # get user generated categories\n for cat in category_results:\n used_categories.append(cat.category)\n # Count categories to find least used ones\n for cat in idea_categories:\n if not cat.category in session.categories:\n if not cat.category in count.keys():\n count[cat.category] = 0\n count[cat.category] += 1\n if len(count.keys()):\n min_value = min(count.itervalues())\n min_cat = [k for k, v in count.iteritems() if v == min_value]\n category = random.choice(min_cat)\n cat_id = [d.id for d in idea_categories if d.category == category][0] # getting category id\n session.categories.append(category)\n return json.dumps(dict(category=category, id=cat_id)) # randomly choose an element if there's a tie\n else:\n return json.dumps(dict())\n\ndef matrix():\n # get categories\n categories_result = db(db.category.id > 0).select()\n categories = [d.category for d in categories_result]\n ids = [d.id for d in categories_result]\n # this maps a category id to a matrix cell\n cat_map = {d.id : n for (d,n) in zip(categories_result, range(0, len(categories_result)))}\n\n # get map between ideas and categories\n idea_categories_results = db(db.category_idea.id > 0).select()\n idea_categories = [(d.idea, d.category) for d in idea_categories_results]\n idea_category_map = __build_idea_category_map(idea_categories)\n\n # build matrix\n connections = db(db.category_idea.id > 0)\n matrix = [[0 for x in xrange(len(categories))] for x in xrange(len(categories))]\n for idea in idea_category_map.keys():\n cell = idea_category_map[idea]\n print(cat_map[cell[0]])\n if len(cell) == 1:\n matrix[cat_map[cell[0]]][cat_map[cell[0]]] += 1\n else:\n matrix[cat_map[cell[0]]][cat_map[cell[1]]] += 1\n matrix[cat_map[cell[1]]][cat_map[cell[0]]] += 1\n return json.dumps(dict(categories=categories, matrix=matrix, ids=ids))\n\ndef __build_idea_category_map(idea_category):\n ic_map = dict()\n for t in idea_category:\n if not t[0] in ic_map:\n ic_map[t[0]] = []\n ic_map[t[0]].append(t[1])\n return ic_map\n\n\ndef __insert_or_retrieve_category_id(category):\n categoryResults = db(db.category.category == category).select(db.category.id)\n if categoryResults:\n return categoryResults[0].id\n else:\n return db.category.insert(category=category)\n\ndef new_ideas():\n '''\n Endpoint for retrieving most recent ideas (given a timestamp)\n '''\n # Get last update time\n rawLastUpdateTime = request.vars[\"lastUpdateTime\"]\n print(\"Last: \" + str(rawLastUpdateTime))\n if rawLastUpdateTime == None or not rawLastUpdateTime:\n lastUpdateTime = datetime.datetime.min\n else:\n lastUpdateTime = datetime.datetime.strptime(rawLastUpdateTime, '%Y-%m-%d %H:%M:%S.%f') #\"2016-02-09 16:15:59.497000\n # Subtract a couple of seconds to account for any conflicts\n lastUpdateTime = lastUpdateTime - datetime.timedelta(seconds=2)\n print(\"Last (offset): \" + str(lastUpdateTime))\n \n # Retrieving new ideas\n rows = db((db.idea.dateAdded > lastUpdateTime) & \n (db.idea.userCondition == session.userCondition) & \n (db.idea.userId != session.userId)).select()\n print(\"Returned results: \" + str(len(rows)))\n ideas = []\n for row in rows: \n ideas.append(row.idea)\n \n current_time = datetime.datetime.now()\n return json.dumps(dict(ideas=ideas, lastUpdate=str(current_time)))\n\ndef user():\n \"\"\"\n exposes:\n http://..../[app]/default/user/login\n http://..../[app]/default/user/logout\n http://..../[app]/default/user/register\n http://..../[app]/default/user/profile\n http://..../[app]/default/user/retrieve_password\n http://..../[app]/default/user/change_password\n http://..../[app]/default/user/bulk_register\n use @auth.requires_login()\n @auth.requires_membership('group name')\n @auth.requires_permission('read','table name',record_id)\n to decorate functions that need access control\n also notice there is http://..../[app]/appadmin/manage/auth to allow administrator to manage users\n \"\"\"\n return dict(form=auth())\n\n\n@cache.action()\ndef download():\n \"\"\"\n allows downloading of uploaded files\n http://..../[app]/default/download/[filename]\n \"\"\"\n return response.download(request, db)\n\n\ndef call():\n \"\"\"\n exposes services. for example:\n http://..../[app]/default/call/jsonrpc\n decorate with @services.jsonrpc the functions to expose\n supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv\n \"\"\"\n return service()\n\n\n","sub_path":"controllers/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":7820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"611349608","text":"#usr/bin/env python3\n'''\n__author__ = 'abba y abdullahi '\n'''\nimport turtle\n\nturtle.pencolor('red')\nturtle.penup()\nturtle.setposition(-45, 100)\nturtle.pendown()\nfor i in range(8):\n turtle.forward(80)\n turtle.right(45)\n\n\n# Draw a blue spiral centered at (0, 0)\ndistance = 0.2\nangle = 40\nturtle.pencolor('blue')\nturtle.penup()\n# Left pen to move it\nturtle.setposition(0, 0)\n# Position the pen at coordinates (0, 0)\nturtle.pendown()\nfor i in range(100):\n turtle.forward(distance)\n turtle.left(angle)\n distance += 0.5\n\n\nturtle.hideturtle()\nturtle.exitonclick()\n","sub_path":"sharp.py","file_name":"sharp.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"60153440","text":"from imutils.video import VideoStream\nfrom imutils import face_utils\nimport numpy\nimport imutils\nimport dlib\nimport cv2\ndef euclidean_dist(point, point2):\n return numpy.linalg.norm(point - point2)\ndef eye_aspect_ratio(eye):\n return (euclidean_dist(eye[1], eye[5]) + euclidean_dist(eye[2], eye[4])) / (2.0 * euclidean_dist(eye[0], eye[3]))\ncount = 0\ndefaultear = []\nprint(\"[INFO] loading facial landmark predictor...\")\ndetector = cv2.CascadeClassifier(\"haarcascadeface.xml\")\npredictor = dlib.shape_predictor(\"facess.dat\")\n(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"left_eye\"]\n(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"right_eye\"]\nprint(\"[INFO] starting video stream thread...\")\nvs = VideoStream(src=0).start()\nwhile True:\n frame = imutils.resize(vs.read(), width=450)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n rects = detector.detectMultiScale(gray, scaleFactor=1.1, \n minNeighbors=5, minSize=(30, 30),\n flags=cv2.CASCADE_SCALE_IMAGE)\n for (x, y, w, h) in rects:\n shape = face_utils.shape_to_np(predictor(gray, dlib.rectangle(int(x), int(y), int(x + w), int(y + h))))\n leftEye = shape[lStart:lEnd]\n rightEye = shape[rStart:rEnd]\n ear = (eye_aspect_ratio(leftEye) + eye_aspect_ratio(rightEye)) / 2.0\n leftEyeHull = cv2.convexHull(leftEye)\n rightEyeHull = cv2.convexHull(rightEye)\n cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)\n cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)\n if ear < 0.3:\n count += 1\n if count >= 6:\n #alrm()\n cv2.putText(frame, \"ALERTA!\", (10, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n else:\n count= 0\n cv2.putText(frame, \"EAR: {:.3f}\".format(ear), (300, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\") or key == ord(\"Q\"):\n break\ncv2.destroyAllWindows()\nvs.stop()\n","sub_path":"drowsiness detection.py","file_name":"drowsiness detection.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"203861671","text":"\ndatas = [{\"aaa\": 111, \"bbb\": 222}, {\"aaa\": 333, \"bbb\": 444}]\n\nfor data in datas:\n data.update({\"aaa\": 999})\n data.update({\"ccc\": 000})\n\nprint(datas)\n\n# from urllib.parse import quote, unquote\n# url = 'file://appform/headimg/202009/3a5c9a44da244f9a921e969cabf1fb1f_简历照片 蒋志宇.jpg?belong=pHh0xxxxxe'.encode('utf8')\n# print(url)\n# q_url = 'http://api-t2.dqprism.com/user/v4/headimg/auth?url={url}&userId=0&hrId=94968&appid=A11005&interfaceid=A11005001'.format(url=quote(url))\n# print(q_url)\n","sub_path":"test_any/test_dict.py","file_name":"test_dict.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"276253452","text":"import pygame\nimport random\nfrom variables import *\n\nclass Enemy(pygame.sprite.Sprite):\n \"\"\" This class represent the enemy sprites \"\"\"\n # Images for different directions\n down = enemy_down_img\n up = enemy_up_img\n left = enemy_left_img\n right = enemy_right_img\n \n # Directional image counter\n d_c = 0\n u_c = 0\n r_c = 0\n l_c = 0\n\t\n # Speed vector\n change_x = 0\n change_y = 0\n curr_dir=\"\"\n \n def __init__(self, x, y):\n super(Enemy,self).__init__()\n # Creating the image object\n self.image = pygame.image.load(self.right[self.r_c])\n \n # Assigning the positions and dimensions to the block and giving it a random direction to start with\n self.rect = self.image.get_rect()\n self.rect.y = y\n self.rect.x = x\n self.curr_dir = random.choice(direction)\n \n def changespeed(self):\n \"\"\" Change the speed of the Enemy. Called with a keypress. \"\"\"\n if self.curr_dir == \"LEFT\":\n self.change_x = -4\n if self.curr_dir == \"RIGHT\":\n self.change_x = 4\n if self.curr_dir == \"UP\":\n self.change_y =-4\n if self.curr_dir == \"DOWN\":\n self.change_y = 4 \n \n\n def move(self, walls):\n \"\"\" Find its new position and state \"\"\"\n if self.curr_dir ==\"RIGHT\":\n self.r_c+=1\n if self.r_c == 12:\n self.r_c = 0\n self.image = pygame.image.load(self.right[self.r_c])\n \n if self.curr_dir ==\"LEFT\":\n self.l_c+=1\n if self.l_c == 12:\n self.l_c = 0\n self.image = pygame.image.load(self.left[self.l_c])\n \n if self.curr_dir ==\"DOWN\":\n self.d_c+=1\n if self.d_c == 12:\n self.d_c = 0\n self.image = pygame.image.load(self.down[self.d_c])\n \n if self.curr_dir ==\"UP\":\n self.u_c+=1\n if self.u_c == 12:\n self.u_c = 0\n self.image = pygame.image.load(self.up[self.u_c])\n\n # Move left/right and check if hit any wall or not\n self.rect.x += self.change_x\n \n block_hit_list = pygame.sprite.spritecollide(self, walls, False)\n for block in block_hit_list:\n if self.change_x > 0:\n self.rect.right = block.rect.left\n else:\n self.rect.left = block.rect.right\n self.change_x = 0\n # Change direction after every hit\n self.curr_dir = random.choice(direction)\n \n # Move up/down and check if hit any wall or not\n self.rect.y += self.change_y\n \n block_hit_list = pygame.sprite.spritecollide(self, walls, False)\n for block in block_hit_list:\n \n if self.change_y > 0:\n self.rect.bottom = block.rect.top\n else:\n self.rect.top = block.rect.bottom\n self.change_y =0\n # Change direction after every hit\n self.curr_dir = random.choice(direction)\n","sub_path":"enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"432244826","text":"import kivy\nimport random\n\nfrom kivy.app import App\nfrom kivy.clock import Clock\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.relativelayout import RelativeLayout\nfrom kivy.properties import NumericProperty\n\n\nclass GameSpace(RelativeLayout):\n\n def __init__(self, **kwargs):\n self.acc_height = 50\n self.current_width = 500\n self.end = False\n super(GameSpace, self).__init__(**kwargs)\n Clock.schedule_once(lambda _: self.add_widget(Block(size_hint=(None, None),\n size=(500, 50),\n pos=(self.center_x - 250, 0))))\n Clock.schedule_once(lambda _: self.add_widget(Block(size_hint=(None, None),\n size=(500, 50),\n pos=(self.center_x - 250, self.acc_height))))\n\n def on_touch_down(self, touch):\n self.acc_height += 50\n current_block = self.children[0]\n before_block = self.children[1]\n if current_block.x + current_block.width < before_block.x or current_block.x > before_block.x + before_block.width:\n print(\"end\")\n self.end = True\n elif current_block.x < before_block.x:\n current_block.width = current_block.width - (before_block.x - current_block.x)\n current_block.x = before_block.x\n elif current_block.x > before_block.x:\n current_block.width = current_block.width - (current_block.x - before_block.x)\n current_block.x = before_block.x + before_block.width - current_block.width\n current_block.move_event.cancel()\n if not self.end:\n self.add_widget(Block(size_hint=(None, None),\n size=(current_block.width, 50),\n pos=(random.choice([0, self.parent.width - current_block.width]), self.acc_height)))\n self.score += 1\n if self.acc_height > self.height * (1/3):\n for each in self.children:\n each.y -= each.height\n self.acc_height -= 50\n Block.movement = -1.2 ** (7 - self.score) + 5\n\n\nclass Block(Widget):\n\n movement = 1\n\n def __init__(self, **kwargs):\n self.move_event = Clock.schedule_interval(self.update, 0.01)\n super().__init__(**kwargs)\n\n def update(self, dt):\n if self == self.parent.children[0]:\n if self.x < 0:\n self.movement = self.movement * -1\n elif self.x + self.width > self.parent.width:\n self.movement = self.movement * -1\n self.x += self.movement\n\n\nclass OverlayApp(App):\n title = \"Game Overlay\"\n\n\nOverlayApp().run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"362418250","text":"# '''\n# Input: a List of integers\n# Returns: a List of integers\n# '''\n\n\ndef product_of_all_other_numbers(array):\n # create a holder array to hold the result\n result = []\n\n # outer loop\n for firstIndex in range(len(array)):\n product = 1\n # inner loop\n for secondIndex in range(len(array)):\n\n if firstIndex != secondIndex:\n # calculate the product of the rest elements in the array\n product *= array[secondIndex]\n result.append(product)\n\n return result\n\n\nif __name__ == '__main__':\n # Use the main function to test your implementation\n arr = [1, 7, 3, 4]\n\n print(f\"Output of product_of_all_other_numbers: {product_of_all_other_numbers(arr)}\")\n","sub_path":"product_of_all_other_numbers/product_of_all_other_numbers.py","file_name":"product_of_all_other_numbers.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"141665172","text":"from .base_query import BaseQuery\nfrom database import Constants, DBFunction\n\n\nclass Delete(BaseQuery):\n def __init__(self, table_name, where=None):\n super().__init__(table_name=table_name, where=where)\n\n def where(self, *args, **kwargs):\n self._where = args, kwargs\n return self\n\n @property\n def build(self):\n query = Constants.Templates.DELETE.format(table=self._table_name)\n query_params = ()\n\n if self.where:\n where_query, where_query_params = self._build_where()\n query += where_query\n query_params = where_query_params\n\n return query, query_params\n","sub_path":"database/methods/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"207283895","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\n# time: O(nlogn)\n# space: O(n)\nclass Solution:\n def buildTree(self, preorder, inorder):\n \"\"\"\n :type preorder: List[int]\n :type inorder: List[int]\n :rtype: TreeNode\n \"\"\"\n inorderDict = {num: i for i, num in enumerate(inorder)}\n pre = iter(preorder)\n\n def helper(start, end):\n if start > end:\n return None\n val = next(pre)\n root = TreeNode(val)\n idx = inorderDict[val]\n root.left = helper(start, idx - 1)\n root.right = helper(idx + 1, end)\n return root\n\n return helper(0, len(inorder) - 1)\n","sub_path":"MY_REPOS/INTERVIEW-PREP-COMPLETE/Leetcode/105.py","file_name":"105.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"517793325","text":"def e9():\n c=0\n i=0\n while(i<11):\n n=int(input(\"Ingrese un numero: \"))\n if(n%2==0):\n c=c+1\n i=i+1\n print(\"Ingreso\",c,\" par(es)\")\n \n \n","sub_path":"Cuantos_pares_while.py","file_name":"Cuantos_pares_while.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"98964551","text":"import sys\r\nimport os\r\nimport cv2\r\nimport operations as op\r\n\r\ndef get_images(frame, faces_coord, shape):\r\n \"\"\" This function draws the countour around the found face given by faces_coord\r\n and also cuts the face from the original image. Returns both images.\"\"\"\r\n\r\n if shape == \"rectangle\":\r\n faces_img = op.cut_face_rectangle(frame, faces_coord\r\n )\r\n frame = op.draw_face_rectangle(frame, faces_coord)\r\n faces_img = op.normalize_intensity(faces_img)\r\n faces_img = op.resize(faces_img)\r\n return (frame, faces_img)\r\n\r\ndef detect(detector, image, biggest_only=True):\r\n \"\"\" Find the biggest face in an image and return its position and dimensions (top, left, width and height).\"\"\"\r\n is_color = len(image) == 3\r\n\r\n if is_color:\r\n image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n else:\r\n image_gray = image\r\n\r\n # Change to True if we want to detect only one face\r\n flags = cv2.CASCADE_FIND_BIGGEST_OBJECT | \\\r\n cv2.CASCADE_DO_ROUGH_SEARCH if biggest_only else \\\r\n cv2.CASCADE_SCALE_IMAGE\r\n\r\n face_coord = detector.detectMultiScale(image_gray, scaleFactor=1.2, minNeighbors=5, minSize=(30, 30), flags=flags)\r\n return face_coord\r\n\r\ndef add_person(face_dataset, shape):\r\n \"\"\" Funtion add faces of a person\"\"\"\r\n person_name = raw_input('Please enter your name: ').lower()\r\n folder = face_dataset + person_name\r\n if not os.path.exists(folder):\r\n os.mkdir(folder)\r\n video_capture = cv2.VideoCapture(0)\r\n detector = cv2.CascadeClassifier('classifier/frontal_face.xml')\r\n counter = 1\r\n timer = 0\r\n cv2.namedWindow('Video Feed', cv2.WINDOW_AUTOSIZE)\r\n cv2.namedWindow('Saved Face', cv2.WINDOW_NORMAL)\r\n raw_input(\"I will now take 5 pictures. Press ENTER when ready.\")\r\n while counter < 6:\r\n ret, frame = video_capture.read()\r\n face_coord = detect(detector, frame)\r\n if len(face_coord):\r\n frame, face_img = get_images(frame, face_coord, shape)\r\n if timer % 100 == 10:\r\n cv2.imwrite(folder + '/' + str(counter) + '.jpg', face_img[0])\r\n print ('Images Saved:' + str(counter))\r\n counter += 1\r\n cv2.imshow('Saved Face', face_img[0])\r\n cv2.imshow('Video Feed', frame)\r\n cv2.waitKey(50)\r\n timer += 10\r\n #else:\r\n # print (\"This name already exists.\")\r\n # sys.exit()\r\n\r\ndef main():\r\n FACE_DATASET = \"data/face_dataset/\"\r\n SHAPE = \"rectangle\"\r\n\r\n if not os.path.exists(FACE_DATASET):\r\n os.makedirs(FACE_DATASET)\r\n add_person(FACE_DATASET, SHAPE)\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"Face Recogition/Add_faces.py","file_name":"Add_faces.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"312451268","text":"menu = ['mot', 'hai', 'ba']\nfor index, item in enumerate(menu):\n print(index+1, ', ', item, sep='')\nn = int(input(\"Position you want to update?\"))\nif n > len(menu):\n menu.append(input(\"Your replacing favorite?\"))\nelse:\n menu[n-1] = input(\"Your replacing favorite?\")\nfor index, item in enumerate(menu):\n print(index+1, ', ', item, sep='')\n","sub_path":"Fundamentals/Fun03/class/update-list.py","file_name":"update-list.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"379924168","text":"from scipy.stats import norm\nimport numpy as np\n\n# use the Black-Scholes formula to price a European call or put\ndef europ():\n CP = int(input(\"Enter 0 for call, 1 for put: \"))\n S = float(input(\"Current price: \"))\n K = float(input(\"Strike: \"))\n tau = float(input(\"Expiration: \"))\n sigma = float(input(\"Volatility: \"))\n r = float(input(\"Risk-free interest rate: \"))\n a = float(input(\"Continuous dividend rate: \"))\n\n d1 = (1/(sigma*np.sqrt(tau)))*(np.log(S/K) + (r-a+(sigma**2/2))*tau)\n d2 = (1/(sigma*np.sqrt(tau)))*(np.log(S/K) + (r-a-(sigma**2/2))*tau)\n\n if CP == 0:\n price = S*np.exp(-a*tau)*norm.cdf(d1) - K*np.exp(-r*tau)*norm.cdf(d2)\n else:\n price = K*np.exp(-r*tau)*norm.cdf(-d2) - S*np.exp(-a*tau)*norm.cdf(-d1)\n\n return price\n","sub_path":"python_versions/blackscholes.py","file_name":"blackscholes.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"523257108","text":"#!/usr/bin/env python\nfrom os import path\nfrom lxml.etree import parse, fromstring\ntry:\n from osgeo import gdal\nexcept:\n sys.exit('ERROR: cannot find GDAL/OGR modules, install gdal 2.1.3 with python bindings')\n\nclass S1GDALReader:\n\n def __init__(self, sentinel1_safe_pkg_path):\n splitted = sentinel1_safe_pkg_path.split('/')\n self.safe_package_path = sentinel1_safe_pkg_path\n self.granule_identifier = splitted[len(splitted)-1].split('.')[0]\n self.datastore = gdal.Open(sentinel1_safe_pkg_path)\n\n def get_metadata(self):\n metadata_dict = self.datastore.GetMetadata()\n metadata_dict['NAME'] = self.granule_identifier\n return metadata_dict\n #def get_footprint():\n # self.datastore.\n\n def get_footprint(self):\n GML_NS = \"{http://www.opengis.net/gml}\"\n gml_coordinates = \"\"\n with open(path.join(self.safe_package_path, \"manifest.safe\")) as f:\n obj = parse(f)\n for el in obj.iter(GML_NS + \"coordinates\"):\n gml_coordinates = el.text\n wkt_coordinates = gml_coordinates.replace(\",\", \";\")\n wkt_coordinates = wkt_coordinates.replace(\" \", \",\")\n wkt_coordinates = wkt_coordinates.replace(\";\", \" \")\n wkt_coordinates = wkt_coordinates + \",\" + wkt_coordinates.split(\",\")[0]\n return \"POLYGON ((\" + wkt_coordinates + \"))\"\n\n def get_preview_image(self):\n return path.join(self.safe_package_path, \"preview\", \"quick-look.png\")\n","sub_path":"metadata-ingestion/utils/S1Reader.py","file_name":"S1Reader.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"189264213","text":"from sklearn.cluster import KMeans\r\nfrom sklearn import metrics\r\nfrom sklearn.gaussian_process import GaussianProcessRegressor\r\nfrom sklearn.gaussian_process.kernels import ConstantKernel, Matern\r\nfrom sklearn.mixture import GaussianMixture\r\n\r\nfrom scipy.optimize import minimize\r\n\r\nimport numpy as np\r\n\r\nimport time\r\n\r\nclass Data_Generator():\r\n def __init__(self, num_clusters, data_points):\r\n\r\n self.num_clusters = num_clusters\r\n self.data_points = data_points\r\n self.centers = []\r\n self.data = []\r\n\r\n for i in range(self.num_clusters):\r\n cluster_center = self.pickCenters(self.centers,self.num_clusters)\r\n new_data = self.generateData(cluster_center, int(self.data_points/self.num_clusters))\r\n self.centers.append(cluster_center)\r\n self.data.append(new_data)\r\n\r\n self.data = np.vstack(self.data)\r\n\r\n def generateData(self, cluster_center, data_points):\r\n \r\n mean = [cluster_center[0],cluster_center[1]]\r\n cov = [[1,0], [0,1]]\r\n x = np.random.multivariate_normal(mean, cov, data_points)\r\n return x\r\n\r\n def pickCenters(self,centers,num_clusters):\r\n cluster_center = np.random.uniform(-50,50,2)\r\n return [cluster_center[0], cluster_center[1]]\r\n \r\n\r\n\r\ndef cluster_and_score(data, num_clusters, method):\r\n if method== 'kmeans':\r\n cluster_result = clusterKMeans(data, num_clusters)\r\n\r\n return -1*(metrics.silhouette_score(data, cluster_result.labels_, metric='euclidean'))\r\n\r\n if method == 'em':\r\n cluster_result = clusterEM(data, num_clusters)\r\n\r\n return -1*(metrics.silhouette_score(data, cluster_result.means_, metric = 'euclidean'))\r\n\r\n\r\ndef clusterKMeans(data, num_clusters):\r\n kmeans = KMeans(n_clusters = num_clusters).fit(data)\r\n return kmeans\r\n\r\n\r\ndef clusterEM(data, num_clusters):\r\n gmm = GaussianMixture(n_components = num_clusters).fit(data)\r\n return gmm\r\n \r\n\r\ndef clusterVI(data):\r\n print('TODO')\r\n\r\nclass Bayes_Optimal_Clusters():\r\n\r\n def __init__(self, domain, data, method, n_iter):\r\n self.domain = domain\r\n self.data = data\r\n self.method = method \r\n self.noise = 0.1\r\n self.n_iter = n_iter\r\n self.num_clusters = None\r\n\r\n initial_x = np.linspace(2, self.domain, num=5)\r\n initial_x = initial_x.reshape(-1,1)\r\n initial_y = np.asarray([cluster_and_score(data, int(i), method) for i in initial_x ])\r\n initial_y = initial_y.reshape(-1,1)\r\n m52 = ConstantKernel(1.0)*Matern(length_scale=1.0, nu=2.5)\r\n self.gpmodel = GaussianProcessRegressor(kernel=m52, alpha = self.noise**2)\r\n\r\n \r\n self.x_sample, self.y_sample = initial_x, initial_y\r\n for i in range(n_iter):\r\n self.x_next = self.propose_location(self.gpmodel, self.domain)\r\n self.y_next = cluster_and_score(self.data, int(self.x_next), self.method)\r\n self.x_sample = np.vstack((self.x_sample, self.x_next))\r\n self.y_sample = np.vstack((self.y_sample, self.y_next))\r\n self.gpmodel.fit(self.x_sample, self.y_sample)\r\n \r\n self.num_clusters = self.x_next\r\n \r\n\r\n def lower_confidence_bound(self, x, gpmodel, kappa=2):\r\n mu, sigma = gpmodel.predict(np.full(1,x).reshape(-1,1), return_std = True)\r\n return mu-kappa*sigma\r\n\r\n def propose_location(self, gpmodel, domain, n_restarts = 25):\r\n dim = 1\r\n min_val = 1\r\n min_x = None\r\n def min_obj(X):\r\n return self.lower_confidence_bound(X, gpmodel)\r\n\r\n for x0 in np.random.uniform(2, domain, size=(n_restarts, dim)):\r\n res = minimize(min_obj, x0=x0, bounds = ((2,domain),), method='L-BFGS-B')\r\n if res.fun < min_val:\r\n min_val = res.fun[0]\r\n min_x = res.x\r\n return round(min_x[0])\r\n\r\n\r\nclass Brute_Force_Optimal_Clusters():\r\n\r\n def __init__(self, domain, data, method):\r\n self.domain = domain\r\n self.data = data\r\n self.method = method\r\n self.num_clusters = None\r\n self.result = 1\r\n for i in range(2, self.domain):\r\n new_result = cluster_and_score(self.data, i, self.method)\r\n if new_result < self.result:\r\n self.result = new_result\r\n self.num_clusters = i\r\n \r\nif __name__ == \"__main__\":\r\n true_clusters = 40\r\n sample_data_points = 100000\r\n data_gen = Data_Generator(true_clusters, sample_data_points)\r\n data = data_gen.data\r\n start_bayes_opt = time.time()\r\n opt_clusters_bayes_opt = Bayes_Optimal_Clusters(domain = 70, data = data, method = 'kmeans', n_iter = 20).num_clusters\r\n stop_bayes_opt = time.time()\r\n\r\n start_brute_force = time.time()\r\n opt_clusters_brute_force = Brute_Force_Optimal_Clusters(domain = 70, data = data, method = 'kmeans').num_clusters\r\n stop_brute_force = time.time()\r\n\r\n print('True number of clusters is {}'.format(true_clusters))\r\n print('Brute force found an optimal number of clusters {} in {:0.1f} seconds'.format(opt_clusters_brute_force, stop_brute_force-start_brute_force))\r\n print('Bayes optimization found an optimal number of clusters {} in {:0.1f} seconds'.format(int(opt_clusters_bayes_opt), stop_bayes_opt - start_bayes_opt))","sub_path":"optimal_clusters.py","file_name":"optimal_clusters.py","file_ext":"py","file_size_in_byte":5316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"362765863","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom douban.items import DoubanItem\n\nclass DoubanSpiderSpider(scrapy.Spider):\n name = 'douban_spider'\n allowed_domains = ['movie.douban.com']\n start_urls = ['https://movie.douban.com/top250']\n\n def parse(self, response):\n movie_list = response.css(\".article ol.grid_view li\")\n for i_item in movie_list:\n douban_item = DoubanItem()\n douban_item['serial_number'] = i_item.css('div.item em::text').extract_first()\n douban_item['movie_name'] = i_item.css('div.info div.hd a span:first-child::text').extract_first()\n content = i_item.css('div.info div.bd p:first-child::text').extract()\n content_s = ''\n for i_content in content:\n content_s += \"\".join(i_content.split())\n douban_item['introduce'] = content_s\n douban_item['star'] = i_item.css('span.rating_num::text').extract_first()\n douban_item['evaluate'] = i_item.css('div.star span:nth-child(4)::text').extract_first()\n douban_item['describe'] = i_item.css('p.quote span::text').extract_first()\n yield douban_item\n print(douban_item)\n next_link = response.css('span.next link::attr(href)').extract()\n if next_link:\n next_link = next_link[0]\n yield scrapy.Request(\"https://movie.douban.com/top250\" + next_link,callback=self.parse)\n \n \n","sub_path":"Python/douban/douban/spiders/douban_spider.py","file_name":"douban_spider.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"183708640","text":"import pandas as pd\n\ndef time_period(x):\n if (x >= 6) & (x < 12):\n y = 1\n elif (x >= 12) & (x < 18):\n y = 2\n elif (x >= 18) & (x < 24):\n y = 3\n else:\n y = 0\n return y\n\n\ndef time_features(data):\n data['refresh_hour_period'] = data['refresh_hour'].apply(time_period)\n return data\n\n\nif __name__ == '__main__':\n train = pd.read_csv('../data/backup/train.csv', index_col=False)\n valid = pd.read_csv('../data/backup/valid.csv', index_col=False)\n test = pd.read_csv('../data/backup/test.csv', index_col=False)\n print('read done')\n\n train = time_features(train)\n valid = time_features(valid)\n test = time_features(test)\n print('features done')\n\n train.to_csv('../data/train_dataset/train.csv')\n valid.to_csv('../data/train_dataset/valid.csv')\n test.to_csv('../data/train_dataset/test.csv')\n\n","sub_path":"feature/add_features.py","file_name":"add_features.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"460162986","text":"import threading, queue\nimport time\n\n\ndef washer(dishes, output):\n for dish in dishes:\n print(\"Washing\", dish, \"dish\")\n time.sleep(5)\n output.put(dish)\n\n\ndef dryer(input):\n while True:\n dish = input.get()\n print(\"Drying\", dish, \"dish\")\n time.sleep(10)\n input.task_done()\n\n\n# はじめに\n# 標準PythonシステムはGIL(グローバルインタープリタロック)を使っている。\n# そのためスレッドを使ってもCPUバウンドな処理は高速化されない。\n# CPUバウンド -> CPU処理待ち。数値処理の実行中が該当する。\n# I/Oバウンド -> I/O処理待ち。CPUではなくメモリなどのアクセスが該当する。\nif __name__ == \"__main__\":\n dish_queue = queue.Queue()\n for n in range(2):\n # 書籍では\"daemon=True\"が記載されていないため、queue.Queue().join()が完了しない。\n # deamon=False(Default)で同等動作を実現するには、作成した全threadに対してthread.join()を行う必要がある。\n # daemon=True --> DaemonThread\n # daemon=False -> UserThread\n dryer_thread = threading.Thread(target=dryer, args=(dish_queue,), daemon=True)\n dryer_thread.start()\n\n dishes = [\"salad\", \"bread\", \"entree\", \"dessert\"]\n washer(dishes, dish_queue)\n dish_queue.join()\n","sub_path":"sampleproject/book/BeginningPython3_O_REILLY/chapter11/thread_dishes.py","file_name":"thread_dishes.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"311814292","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport weakref\n\nfrom django.db.models.expressions import Col, Expression\nfrom django.db.models.options import Options\n\n\nclass CTEModel(object):\n\n def __init__(self, cte, query):\n self._meta = CTEMeta(cte, query)\n\n def _copy_for_query(self, query):\n return type(self)(self._meta._cte(), query)\n\n\nclass CTEMeta(Options):\n\n def __init__(self, cte, query):\n super(CTEMeta, self).__init__(None)\n self.managed = False\n self.model = None\n self._cte = weakref.ref(cte)\n self._query = weakref.ref(query)\n\n @property\n def db_table(self):\n return self._cte().name\n\n @db_table.setter\n def db_table(self, value):\n if value != '':\n raise AttributeError(\"CTEMeta.db_table is read-only\")\n\n @property\n def local_fields(self):\n cte = self._cte()\n query = cte._queryset.query\n opts = query.get_meta()\n fields = []\n if query.default_cols:\n assert not query.select, query.select\n fields.extend(opts.concrete_fields)\n else:\n fields.extend(\n CTEField(cte, col.target.column, col.output_field)\n for col in query.select\n )\n fields.extend(\n CTEField(cte, alias, annotation.output_field)\n for alias, annotation in query.annotation_select.items()\n )\n return fields\n\n @local_fields.setter\n def local_fields(self, value):\n if value != []:\n raise AttributeError(\"CTEMeta.local_fields is read-only\")\n\n @property\n def _relation_tree(self):\n return []\n\n\nclass CTEColumns(object):\n\n def __init__(self, cte):\n self._cte = weakref.ref(cte)\n\n def __getattr__(self, name):\n return CTEColumn(self._cte(), name)\n\n\nclass CTERef(object):\n\n def __init__(self, cte, name, output_field=None):\n self._cte = cte\n self.name = self.alias = name\n self._output_field = output_field\n\n def __repr__(self):\n return \"<{} {}.{}>\".format(\n self.__class__.__name__,\n self._cte.name,\n self.name,\n )\n\n @property\n def _ref(self):\n if self._cte._queryset is None:\n raise ValueError(\n \"cannot resolve '{cte}.{name}' in recursive CTE setup. \"\n \"Hint: use ExpressionWrapper({cte}.col.{name}, \"\n \"output_field=...)\".format(cte=self._cte.name, name=self.name)\n )\n ref = self._cte._resolve_ref(self.name)\n if ref is self or self in ref.get_source_expressions():\n raise ValueError(\"Circular reference: {} = {}\".format(self, ref))\n return ref\n\n @property\n def target(self):\n return self._ref.target\n\n @property\n def output_field(self):\n if self._output_field is not None:\n return self._output_field\n return self._ref.output_field\n\n\nclass CTEColumn(CTERef, Expression):\n\n def as_sql(self, compiler, connection):\n qn = compiler.quote_name_unless_alias\n ref = self._ref\n if isinstance(ref, Col) and self.name == \"pk\":\n column = ref.target.column\n else:\n column = self.name\n return \"%s.%s\" % (qn(self._cte.name), qn(column)), []\n\n\nclass CTEField(CTERef):\n\n concrete = False\n is_relation = False\n\n def get_col(self, alias, output_field=None):\n output_field = output_field or self.output_field\n return Col(alias, self, output_field)\n\n @property\n def model(self):\n query = self._cte._queryset.query\n model = query.model\n if isinstance(model, CTEModel) and model._meta._cte() is self._cte:\n return model\n return CTEModel(self._cte, query)\n\n @property\n def column(self):\n return self.name\n\n def __getattr__(self, name):\n if name == \"attname\":\n # Do not allow column reference by concrete column name.\n # Prevents concrete name masking other CTE column alias.\n raise AttributeError(\"attname\")\n return getattr(self.output_field, name)\n","sub_path":"django_cte/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"363639696","text":"# encoding:utf-8\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, Dataset\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport numpy as np\n\n# Device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# Hyper-parameters\nsequence_length = 28\ninput_size = 28\nhidden_size = 128\nnum_layers = 2\nnum_classes = 10\nbatch_size = 100\nnum_epochs = 2\nlearning_rate = 0.003\n\n# MNIST dataset\nx_train_path = '/home/zt/Documents/Data_Tensorflow_keras_sklearn/mnist/mnist/x_train.npy'\ny_train_path = '/home/zt/Documents/Data_Tensorflow_keras_sklearn/mnist/mnist/y_train.npy'\nx_test_path = '/home/zt/Documents/Data_Tensorflow_keras_sklearn/mnist/mnist/x_test.npy'\ny_test_path = '/home/zt/Documents/Data_Tensorflow_keras_sklearn/mnist/mnist/y_test.npy'\n\n\n\"\"\"直接读取numpy的数据类型\"\"\"\nclass NumpyDataset(Dataset):\n \"\"\"直接读取numpy的数据类型\n\n transforms.ToTensor():把一个取值范围是[0,255]的PIL.Image或者shape为(H,W,C)的numpy.ndarray,转换成形状为[C,H,W],取值范围是[0,1.0]的torch.FloadTensor\n transforms.ToPILImage():将shape为(C,H,W)的Tensor或shape为(H,W,C)的numpy.ndarray转换成PIL.Image\n \"\"\"\n def __init__(self, x_path, y_path,x_transform=None):\n super(NumpyDataset, self).__init__()\n self.x = np.load(x_path)\n self.y = np.load(y_path)\n\n def __getitem__(self, index):\n x,y = self.x[index],self.y[index]\n x = x.reshape(28, 28, 1)\n x = transforms.ToTensor()(x)\n\n return x, y.astype(np.int64)\n\n def __len__(self):\n return len(self.x)\n\ntrain_dataset = NumpyDataset(x_train_path, y_train_path)\ntrain_loader = DataLoader(dataset=train_dataset, batch_size=20, shuffle=True)\ntest_dataset = NumpyDataset(x_test_path, y_test_path)\ntest_loader = DataLoader(dataset=test_dataset, batch_size=20, shuffle=False)\n\n\n# Bidirectional recurrent neural network (many-to-one)\nclass BiRNN(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers, num_classes):\n super(BiRNN, self).__init__()\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True)\n self.fc = nn.Linear(hidden_size * 2, num_classes) # 2 for bidirection\n\n def forward(self, x):\n # Set initial states\n h0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(device) # 2 for bidirection\n c0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(device)\n\n # Forward propagate LSTM\n out, _ = self.lstm(x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size*2)\n\n # Decode the hidden state of the last time step\n out = self.fc(out[:, -1, :])\n return out\n\n\nmodel = BiRNN(input_size, hidden_size, num_layers, num_classes).to(device)\n\n# Loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n# Train the model\ntotal_step = len(train_loader)\nfor epoch in range(num_epochs):\n for i, (images, labels) in enumerate(train_loader):\n images = images.reshape(-1, sequence_length, input_size).to(device)\n labels = labels.to(device)\n\n # Forward pass\n outputs = model(images)\n loss = criterion(outputs, labels)\n\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if (i + 1) % 100 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'\n .format(epoch + 1, num_epochs, i + 1, total_step, loss.item()))\n\n# Test the model\nwith torch.no_grad():\n \"\"\"torch.no_grad不更新权重\"\"\"\n correct = 0\n total = 0\n for images, labels in test_loader:\n images = images.reshape(-1, sequence_length, input_size).to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))\n\n# Save the model checkpoint\ntorch.save(model.state_dict(), 'model.ckpt')","sub_path":"ch03_RNN/bidirectional_recurrent_neural_network.py","file_name":"bidirectional_recurrent_neural_network.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"63651833","text":"import pandas as pd\r\nimport os, glob, fnmatch\r\nimport json\r\nimport random\r\nfrom PIL import Image ,ImageFilter\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\n\r\n#import cv2\r\nimport time\r\nfrom IPython import display\r\nimport json\r\nimport time\r\nfrom glob import glob\r\nfrom websocket import create_connection\r\n\r\ngetModelsURL = \"ws://127.0.0.1:4649/GetModelsFilePath\"\r\ncreateModelURL = \"ws://127.0.0.1:4649/CreateModel\"\r\ndata_folder = os.path.join('data','raw')\r\nsynthetic_data_folder = os.path.join('data','synthetic')\r\nannotations_csv = os.path.join('data','raw','synthetic_annotations.csv')\r\ncategories_to_insert = ['ConcreteTruck']\r\nuse_seamless_clone = False\r\n\r\ndef get_model_list(model_name):\r\n ws = create_connection(getModelsURL)\r\n ws.send(model_name)\r\n modelFilePath = ws.recv()\r\n # print('Received models path:\"%s\"' % modelFilePath)\r\n ws.close()\r\n\r\n with open(modelFilePath, encoding='utf8') as f:\r\n models_list = f.readlines()\r\n models_list = [x.strip() for x in models_list]\r\n return models_list\r\n\r\n\r\ndef get_models_lists(models_to_insert):\r\n models = {}\r\n for model in models_to_insert:\r\n models[model] = get_model_list(model)\r\n\r\n return models\r\n\r\n\r\ndef generate_random_configuration(model_list):\r\n model_name = model_list[random.randint(0,len(model_list) - 1 )]\r\n # print(model_name)\r\n configuration = {'modelPath': model_name,\r\n 'lightColorA': 1,\r\n 'lightColorB': random.random()*.3 + .7,\r\n 'lightColorG': random.random()*.3 + .7,\r\n 'lightColorR': random.random()*.3 + .7,\r\n 'lightXRotation': random.random()* 360,\r\n 'lightYRotation': random.random()* 360,\r\n 'lightZRotation': random.random()* 360,\r\n 'xRotation': 0,\r\n 'yRotation': 0,\r\n 'zRotation': random.random()*20 -10}\r\n return configuration\r\n\r\ndef generate_random_imageWebsocket(images_list):\r\n configuration = generate_random_configuration(images_list)\r\n ws = create_connection(createModelURL)\r\n ws.send(json.dumps(configuration))\r\n im = ws.recv()\r\n ws.close()\r\n return im\r\n\r\n\r\ndef read_images_csv(csv_file):\r\n df_full = pd.read_csv(csv_file)\r\n df_full['FileName'] = df_full['FileName'].fillna(method='ffill')\r\n mask = pd.isnull(df_full.iloc[:, 1:]).all(axis=1)\r\n df_out = df_full.loc[~mask, :]\r\n\r\n return df_out\r\n\r\n\r\ndef rotate_and_scale_image(im_path, image_after_resize = (100 * 4, 100 * 3),blur_radius = 1):\r\n im_to_insert = Image.open(im_path)\r\n\r\n im_to_insert = im_to_insert.filter(ImageFilter.GaussianBlur(radius=blur_radius))\r\n angle = 90 - row['angle']\r\n im_to_insert = im_to_insert.rotate(angle, expand=True)\r\n angle = angle % 180\r\n rad_angle = angle / 180 * np.pi\r\n\r\n if rad_angle >= 0 and rad_angle <= np.pi / 2:\r\n rotate_resize = (\r\n np.cos(rad_angle) * image_after_resize[0] + np.sin(rad_angle) * image_after_resize[1],\r\n np.sin(rad_angle) * image_after_resize[0] + np.cos(rad_angle) * image_after_resize[1])\r\n else:\r\n rotate_resize = (\r\n np.sin(rad_angle - np.pi / 2) * image_after_resize[0] + np.cos(rad_angle - np.pi / 2) *\r\n image_after_resize[1],\r\n np.cos(rad_angle - np.pi / 2) * image_after_resize[0] + np.sin(rad_angle - np.pi / 2) *\r\n image_after_resize[1])\r\n\r\n im_to_insert = im_to_insert.resize(size=(int(rotate_resize[0]), int(rotate_resize[1])))\r\n return im_to_insert\r\n\r\n\r\ndef create_metadata(file_name, category, mask, location_in_background):\r\n min_max_x = np.array([np.min(np.where(mask.any(axis=0))), np.max(np.where(mask.any(axis=0)))]) + \\\r\n location_in_background[0]\r\n min_max_y = np.array([np.min(np.where(mask.any(axis=1))), np.max(np.where(mask.any(axis=1)))]) + \\\r\n location_in_background[1]\r\n\r\n addedAnnotation = {}\r\n if min_max_y[0] < 0:\r\n min_max_y[0] = 0\r\n if min_max_y[1] < 0:\r\n min_max_y[1] = 0\r\n if min_max_x[0] < 0:\r\n min_max_x[0] = 0\r\n if min_max_x[1] < 0:\r\n min_max_x[1] = 0\r\n\r\n im = Image.open(file_name)\r\n width, height = im.size\r\n\r\n\r\n if min_max_y[0] > height:\r\n min_max_y[0] = height-1\r\n if min_max_y[1] > height:\r\n min_max_y[1] = height-1\r\n if min_max_x[0] > width:\r\n min_max_x[0] = width-1\r\n if min_max_x[1] > width:\r\n min_max_x[1] = width-1\r\n\r\n\r\n \r\n addedAnnotation['FileName'] = os.path.basename(file_name)\r\n addedAnnotation['Entities EntityType'] = category\r\n addedAnnotation['Entities P4 Y'] = min_max_y[0]\r\n addedAnnotation['Entities P4 X'] = min_max_x[0]\r\n addedAnnotation['Entities P3 Y'] = min_max_y[0]\r\n addedAnnotation['Entities P3 X'] = min_max_x[1]\r\n addedAnnotation['Entities P2 Y'] = min_max_y[1]\r\n addedAnnotation['Entities P2 X'] = min_max_x[1]\r\n addedAnnotation['Entities P1 Y'] = min_max_y[1]\r\n addedAnnotation['Entities P1 X'] = min_max_x[0]\r\n return addedAnnotation\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n subdirs = [o for o in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder,o))]\r\n models_dict = get_models_lists(categories_to_insert)\r\n df_spots = pd.DataFrame.from_csv(annotations_csv)\r\n print(\"total item to insert: %d\" % df_spots.shape[0])\r\n\r\n if not os.path.exists(synthetic_data_folder):\r\n os.mkdir(synthetic_data_folder)\r\n\r\n for subdir in subdirs:\r\n synth_subdir = os.path.join(synthetic_data_folder, subdir)\r\n if not os.path.exists(synth_subdir):\r\n os.mkdir(synth_subdir)\r\n images = glob(os.path.join(data_folder, subdir, \"*.jpg\"))\r\n csv_file = os.path.join(data_folder, subdir, \"images.csv\")\r\n addedAnnotations = []\r\n for image in images:\r\n # print('synthing image %s' % image)\r\n df_image_spots = df_spots.loc[df_spots['filename'] == image, :]\r\n\r\n background = Image.open(image)\r\n\r\n if use_seamless_clone:\r\n out_cv = np.array(background)\r\n out_cv = out_cv[:, :, ::-1].copy()\r\n else:\r\n out = background.copy()\r\n\r\n for index, row in df_image_spots.iterrows():\r\n # pick a category\r\n category = categories_to_insert[random.randint(0, len(categories_to_insert) - 1)]\r\n im_path = generate_random_imageWebsocket(models_dict[category])\r\n im_to_insert = rotate_and_scale_image(im_path)\r\n\r\n # get mask from the alpha channel (RGBA)\r\n mask = np.array(im_to_insert)[:, :, 3].copy()\r\n mask[mask > 0] = 255\r\n\r\n location_in_background = (int(round(row['center_x'] - im_to_insert.width / 2)),\r\n int(round(row['center_y'] - im_to_insert.height / 2)))\r\n\r\n addedAnnotations.append(create_metadata(image, category, mask, location_in_background))\r\n\r\n if use_seamless_clone:\r\n cv_insert = np.array(im_to_insert)\r\n location_in_background_cv = (int(round(row['center_x'])), int(round(row['center_y'])))\r\n cv_insert = cv_insert[:, :, :3].copy() # RGBA to RGB\r\n cv_insert = cv_insert[:, :, ::-1].copy() # Convert RGB to BGR\r\n\r\n out_cv = cv2.seamlessClone(cv_insert, out_cv, mask, location_in_background_cv, cv2.NORMAL_CLONE)\r\n # out_cv = cv2.seamlessClone(cv_insert, out_cv, mask, location_in_background_cv, cv2.MIXED_CLONE)\r\n else:\r\n out.paste(im_to_insert, location_in_background, im_to_insert)\r\n\r\n time.sleep(1) # to prevent the debris-bug\r\n out_image_name = os.path.join(synth_subdir, os.path.basename(image))\r\n if use_seamless_clone:\r\n cv2.imwrite(out_image_name, out_cv)\r\n else:\r\n out.save(out_image_name)\r\n\r\n df_annotations = read_images_csv(csv_file)\r\n df_new_annotations = pd.DataFrame(addedAnnotations)\r\n df_annotations = pd.concat([df_new_annotations, df_annotations], axis=0)\r\n df_annotations = df_annotations.sort_values('FileName')\r\n df_annotations.to_csv(os.path.join(synth_subdir,'images.csv'),index =False)\r\n print('%d items added to %s' %(df_new_annotations.shape[0], subdir))\r\n\r\n","sub_path":"create_synth_images.py","file_name":"create_synth_images.py","file_ext":"py","file_size_in_byte":8444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"61515","text":"# -*- coding:utf-8 -*-\r\n# Anaconda 4.3.0 環境 (TensorFlow インストール済み)\r\n\r\n\"\"\"\r\n 更新情報\r\n [17/10/14] : 新規作成\r\n [17/11/19] : scikit-learn ライブラリの推定器 estimator の基本クラス `BaseEstimator`, `ClassifierMixin` を継承しているように変更\r\n : 各種抽象メソッド追加\r\n [17/12/01] : TensorBoard に計算グラフを表示するためのファイルを書き込むための関数 write_tensorboard_graph(...) 追加\r\n [18/02/12] : 学習済み NN モデルの各種 Variable の保存関数 save_model(...)、及び読み込み関数 load_model(...) 追加\r\n [18/05/03] : ./model フォルダにあるモジュールを import するように変更\r\n : チェックポイントファイルのディクショナリを ./model_session → ./_model_session に変更\r\n [xx/xx/xx] : \r\n\r\n\"\"\"\r\n\r\nfrom abc import ABCMeta, abstractmethod # 抽象クラスを作成するための ABC クラス\r\n\r\nimport os\r\n\r\n# scikit-learn ライブラリ関連\r\nfrom sklearn.base import BaseEstimator # 推定器 Estimator の上位クラス. get_params(), set_params() 関数が定義されている.\r\nfrom sklearn.base import ClassifierMixin # 推定器 Estimator の上位クラス. score() 関数が定義されている.\r\n\r\n# TensorFlow ライブラリ\r\nimport tensorflow as tf\r\nfrom tensorflow.python.framework import ops\r\n\r\n# 自作モジュール\r\nfrom model.NNActivation import NNActivation # ニューラルネットワークの活性化関数を表すクラス\r\nfrom model.NNActivation import Sigmoid\r\nfrom model.NNActivation import Relu\r\nfrom model.NNActivation import Softmax\r\n\r\nfrom model.NNLoss import NNLoss # ニューラルネットワークの損失関数を表すクラス\r\nfrom model.NNLoss import L1Norm\r\nfrom model.NNLoss import L2Norm\r\nfrom model.NNLoss import BinaryCrossEntropy\r\nfrom model.NNLoss import CrossEntropy\r\nfrom model.NNLoss import SoftmaxCrossEntropy\r\nfrom model.NNLoss import SparseSoftmaxCrossEntropy\r\n\r\nfrom model.NNOptimizer import NNOptimizer # ニューラルネットワークの最適化アルゴリズム Optimizer を表すクラス\r\nfrom model.NNOptimizer import GradientDecent\r\nfrom model.NNOptimizer import GradientDecentDecay\r\nfrom model.NNOptimizer import Momentum\r\nfrom model.NNOptimizer import NesterovMomentum\r\nfrom model.NNOptimizer import Adagrad\r\nfrom model.NNOptimizer import Adadelta\r\nfrom model.NNOptimizer import Adam\r\n\r\n\r\nclass NeuralNetworkBase( BaseEstimator, ClassifierMixin ):\r\n \"\"\"\r\n ニューラルネットワークの基底クラス(自作クラス)\r\n scikit-learn ライブラリの推定器 estimator の基本クラス BaseEstimator, ClassifierMixin を継承している.\r\n TensorFlow ライブラリを使用\r\n ニューラルネットワークの基本的なフレームワークを想定した仮想メソッドからなる抽象クラス。\r\n 実際のニューラルネットワークを表すクラスの実装は、このクラスを継承し、オーバーライドするを想定している。\r\n \r\n ----------------------------------------------------------------------------------------------------\r\n [public] public アクセス可能なインスタスンス変数には, 便宜上変数名の最後にアンダースコア _ を付ける.\r\n _session : tf.Session()\r\n 自身の Session\r\n _init_var_op : tf.global_variables_initializer()\r\n 全 Variable の初期化オペレーター\r\n\r\n _loss_op : Operator\r\n 損失関数を表すオペレーター\r\n _optimizer : Optimizer\r\n モデルの最適化アルゴリズム\r\n _train_step : \r\n トレーニングステップ\r\n _y_out_op : Operator\r\n モデルの出力のオペレーター\r\n\r\n _model__saver : tf.train.Saver クラスのオブジェクト\r\n モデルの saver\r\n モデルの保存に使用する。\r\n\r\n [protedted] protedted な使用法を想定 \r\n\r\n [private] 変数名の前にダブルアンダースコア __ を付ける(Pythonルール)\r\n\r\n \"\"\"\r\n\r\n def __init__( self, session = tf.Session() ):\r\n \"\"\"\r\n コンストラクタ(厳密にはイニシャライザ)\r\n \"\"\"\r\n # メンバ変数の初期化\r\n self._session = session\r\n self._init_var_op = None\r\n\r\n self._loss_op = None\r\n self._optimizer = None\r\n self._train_step = None\r\n self._y_out_op = None\r\n\r\n self._model_saver = None\r\n\r\n return\r\n\r\n\r\n def print( self, str ):\r\n print( \"NeuralNetworkBase\" )\r\n print( self )\r\n print( str )\r\n\r\n print( \"_session : \\n\", self._session )\r\n print( \"_init_var_op : \\n\", self._init_var_op )\r\n\r\n print( \"_y_out_op :\", self._y_out_op )\r\n\r\n print( \"_loss_op :\", self._loss_op )\r\n print( \"_optimizer :\", self._optimizer )\r\n print( \"_train_step :\", self._train_step )\r\n\r\n return\r\n\r\n\r\n @abstractmethod\r\n def model( self ):\r\n \"\"\"\r\n モデルの定義を行い、\r\n 最終的なモデルの出力のオペレーター self._y_out_op を設定する。\r\n (抽象メソッド)\r\n\r\n [Output]\r\n self._y_out_op : Operator\r\n モデルの出力のオペレーター\r\n \"\"\"\r\n return self._y_out_op\r\n\r\n \r\n @abstractmethod\r\n def loss( self, nnLoss ):\r\n \"\"\"\r\n 損失関数(誤差関数、コスト関数)の定義を行う。\r\n (抽象メソッド)\r\n\r\n [Input]\r\n nnLoss : NNLoss クラスのオブジェクト\r\n \r\n [Output]\r\n self._loss_op : Operator\r\n 損失関数を表すオペレーター\r\n \"\"\"\r\n return self._loss_op\r\n\r\n \r\n @abstractmethod\r\n def optimizer( self, nnOptimizer ):\r\n \"\"\"\r\n モデルの最適化アルゴリズムの設定を行う。(抽象メソッド)\r\n\r\n [Input]\r\n nnOptimizer : NNOptimizer のクラスのオブジェクト\r\n\r\n [Output]\r\n optimizer の train_step\r\n\r\n \"\"\"\r\n return self._train_step\r\n\r\n \r\n @abstractmethod\r\n def fit( self, X_train, y_train ):\r\n \"\"\"\r\n 指定されたトレーニングデータで、モデルの fitting 処理を行う。(抽象メソッド)\r\n\r\n [Input]\r\n X_train : numpy.ndarray ( shape = [n_samples, n_features] )\r\n トレーニングデータ(特徴行列)\r\n \r\n y_train : numpy.ndarray ( shape = [n_samples] )\r\n レーニングデータ用のクラスラベル(教師データ)のリスト\r\n\r\n [Output]\r\n self : 自身のオブジェクト\r\n \"\"\"\r\n return self\r\n\r\n\r\n @abstractmethod\r\n def predict( self, X_features ):\r\n \"\"\"\r\n fitting 処理し���モデルで、推定を行い、予想値を返す。(抽象メソッド)\r\n\r\n [Input]\r\n X_features : numpy.ndarry ( shape = [n_samples, n_features] )\r\n 予想したい特徴行列\r\n\r\n [Output]\r\n results : numpy.ndaary ( shape = [n_samples] )\r\n 予想結果(分類モデルの場合は、クラスラベル)\r\n \"\"\"\r\n return\r\n\r\n\r\n @abstractmethod\r\n def predict_proba( self, X_test ):\r\n \"\"\"\r\n fitting 処理したモデルで、推定を行い、クラスの所属確率の予想値を返す。(抽象メソッド)\r\n proba : probability\r\n\r\n [Input]\r\n X_test : numpy.ndarry ( shape = [n_samples, n_features] )\r\n 予想したい特徴行列\r\n \"\"\"\r\n return\r\n\r\n @abstractmethod\r\n def accuracy( self, X_test, y_test ):\r\n \"\"\"\r\n 指定したデータでの正解率 [accuracy] を計算する。\r\n \"\"\"\r\n return\r\n\r\n @abstractmethod\r\n def accuracy_labels( self, X_test, y_test ):\r\n \"\"\"\r\n 指定したデータでのラベル毎の正解率 [acuuracy] を算出する。\r\n \"\"\"\r\n return\r\n\r\n\r\n def write_tensorboard_graph( self, dir = \"./TensorBoard\" ):\r\n \"\"\"\r\n TensorBoard に計算グラフを表示するためのファイルを書き込む。\r\n [Input]\r\n dir : str\r\n TensorBoard 用のファイルを作成するディレクトリのパス\r\n \"\"\"\r\n # TensorBoard 用のファイル(フォルダ)を作成\r\n merged = tf.summary.merge_all() # Add summaries to tensorboard\r\n summary_writer = tf.summary.FileWriter( dir, graph = self._session.graph ) # tensorboard --logdir=${PWD}\r\n \r\n return\r\n\r\n\r\n def save_model( self, dir = \"./_model_session\", file_name = \"model_variables\", saver = None, global_step = None ):\r\n \"\"\"\r\n 学習済み NN モデルの重み等の各種 Variable を保存する。\r\n [Input]\r\n\r\n [補足]\r\n 保存した変数の数と,saver = tf.train.Saver()を呼ぶまでに宣言する変数数をそろえる必要がある\r\n \"\"\"\r\n # 保存用ディレクトリの作成\r\n if ( os.path.isdir( dir ) == False ):\r\n os.makedirs( dir )\r\n\r\n # tf.train.Saver() オブジェクト未作成ならオブジェクトを作成\r\n if ( (saver == None) & (self._model_saver == None ) ):\r\n self._model_saver = tf.train.Saver()\r\n\r\n # 保存\r\n self._model_saver.save( \r\n self._session, \r\n os.path.join( dir, file_name ), \r\n global_step = global_step \r\n )\r\n\r\n print( \"save model data at : %s\" % os.path.join( dir, file_name ) )\r\n\r\n return\r\n\r\n\r\n def load_model( self, dir = \"./_model_session\", file_name = \"model_variables\", saver = None ):\r\n \"\"\"\r\n 保存しておいた学習済み NN モデルの重み等の各種 Variable を読み込む。\r\n \"\"\"\r\n check_point = tf.train.get_checkpoint_state( dir )\r\n if ( (os.path.isdir( dir ) == False ) | ( check_point == None ) ):\r\n print( \"error : file is not founded at : %s\" % os.path.join( dir, file_name ) )\r\n \r\n else:\r\n # tf.train.Saver() オブジェクト未作成ならオブジェクトを作成\r\n if ( (saver == None) & (self._model_saver == None ) ):\r\n self._model_saver = tf.train.Saver()\r\n\r\n self._model_saver.restore( self._session, os.path.join( dir, file_name ) )\r\n print( \"load model data from : %s\" % os.path.join( dir, file_name ) )\r\n\r\n return\r\n","sub_path":"ObjectDetection_SSD_TensorFlow/model/NeuralNetworkBase.py","file_name":"NeuralNetworkBase.py","file_ext":"py","file_size_in_byte":10797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"432931293","text":"import numpy as np\nfrom sklearn.model_selection import KFold\nfrom sklearn.neighbors import KNeighborsClassifier\n\nfrom misc import load_data\n\n\nclass EvaluationFunction(object):\n def __init__(self, classifier, k_folds=3, complexity_penalty=0.05):\n self._classifier = classifier\n self._n_splits = k_folds\n self._complexity_penalty = complexity_penalty\n\n @staticmethod\n def kfold_train_and_predict(X, Y, classifier, n_splits=5, indices=None, features=None):\n if indices is None:\n indices = np.array(list(range(X.shape[0])))\n if features is None:\n features = np.array(list(range(X.shape[1])))\n\n features = np.array(list(features))\n kfolds = KFold(n_splits=n_splits, random_state=42)\n scores = []\n\n for train_index, test_index in kfolds.split(X, Y):\n train_index = indices[train_index].astype(\"int\")\n test_index = indices[test_index].astype(\"int\")\n\n X_train = X[train_index, :][:, features]\n y_train = Y[train_index]\n X_test = X[test_index, :][:, features]\n y_test = Y[test_index]\n\n classifier.fit(X_train, y_train)\n scores += [classifier.score(X_test, y_test)]\n\n scores = np.array(scores)\n\n return np.mean(scores), np.std(scores)\n\n def evaluate_feature_set_size_penalty(self, X, Y, indices, feature_set):\n # Balances the accuracy achieved with a set with the size of the set.\n accuracy, stdev = self.kfold_train_and_predict(X, Y, self._classifier, self._n_splits, indices, feature_set)\n score = accuracy + self._complexity_penalty * (1. - float(len(feature_set)) / X.shape[1])\n return score\n\n\nclass SequentialFeatureSelector(object):\n def __init__(self, X, Y, evaluation_function):\n if X.shape[0] != len(Y):\n raise ValueError(\"X and Y must have have same amount of samples\")\n self._X = X\n self._Y = Y\n self._evaluation_function = evaluation_function\n\n @staticmethod\n def __apply_operation_to_feature_set(feature_set, feature_id, operation):\n assert isinstance(feature_set, set)\n assert operation in [-1, 1]\n\n feature_set = set(feature_set)\n\n if operation == 1:\n if feature_id in feature_set:\n print(\"Warning: adding of feature {}: feature is already present in feature set {}\".format(\n feature_id, str(feature_set)))\n else:\n feature_set.add(feature_id)\n else:\n if feature_id not in feature_set:\n print(\"Warning: removing feature {}: feature is not present in feature set {}\".format(\n feature_id, str(feature_set)))\n else:\n feature_set.remove(feature_id)\n\n return feature_set\n\n def run(self, **kwargs):\n return self.__sequential_feature_selection(**kwargs)\n\n def __sequential_feature_selection(self, k_best=None, overshoot=3, epsilon=0.):\n n_features = self._X.shape[1]\n n_samples = self._X.shape[0]\n\n if n_samples != len(self._Y):\n raise AttributeError(\"Y must have the same length as X has rows (n_samples)\")\n\n indices = np.arange(n_samples)\n\n initial_features = set([])\n remaining_features = set(list(np.arange(n_features)))\n current_features = initial_features\n\n add_to_set_operation = 1\n remove_from_set_operation = - add_to_set_operation\n\n score_of_current_set = float(\"-inf\")\n overall_best_score = score_of_current_set\n overall_best = initial_features\n\n best_not_changed_in = 0\n\n while (best_not_changed_in <= overshoot) or (k_best is not None and len(overall_best) < k_best):\n print(\"current best feature set {}\".format(str(overall_best)))\n\n score_of_best_feat_to_modify = float(\"-inf\")\n best_feat_to_modify = None\n\n # determine which features to look at in this iteration\n look_at = set(remaining_features)\n\n for index, feature in enumerate(look_at):\n if index % 1000 == 0:\n print(\"Iteration: \", index)\n\n new_feature_set = self.__apply_operation_to_feature_set(current_features, feature, add_to_set_operation)\n\n if len(new_feature_set) == 0:\n continue\n\n score_with_new_set = self._evaluation_function(self._X, self._Y, indices, new_feature_set)\n\n if score_with_new_set > score_of_best_feat_to_modify:\n best_feat_to_modify = feature\n score_of_best_feat_to_modify = score_with_new_set\n\n if best_feat_to_modify is not None:\n remaining_features = self.__apply_operation_to_feature_set(remaining_features, best_feat_to_modify,\n remove_from_set_operation)\n current_features = self.__apply_operation_to_feature_set(current_features, best_feat_to_modify,\n add_to_set_operation)\n score_of_current_set = score_of_best_feat_to_modify\n\n print(\"curr set is now: {}\".format(str(current_features)))\n\n print(\"local best score is {}, overall best score is {}\".format(score_of_current_set, overall_best_score))\n\n if score_of_current_set > (overall_best_score - epsilon):\n overall_best_score = score_of_current_set\n best_not_changed_in = 0\n overall_best = current_features\n else:\n best_not_changed_in += 1\n print(\"best set has not changed in {} iterations\".format(best_not_changed_in))\n\n return np.sort(list(overall_best)).astype(\"int\"), overall_best_score\n\n\nif __name__ == \"__main__\":\n X_train, y_train, X_valid, y_valid = load_data()\n\n knn_clf = KNeighborsClassifier(n_jobs=-1)\n eval_function = EvaluationFunction(knn_clf, 3)\n\n feature_selector = SequentialFeatureSelector(X_train, y_train,\n eval_function.evaluate_feature_set_size_penalty)\n selected_features, score = feature_selector.run(k_best=30)\n\n with open(\"sfs_output.txt\", \"w\") as file:\n file.write(np.array2string(selected_features, separator='\\n').strip(\"[]\").strip())\n","sub_path":"feature-selection/sequential_feature_forward_selection.py","file_name":"sequential_feature_forward_selection.py","file_ext":"py","file_size_in_byte":6418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"640329053","text":"import random\nclass Word:\n word = \"\"\n tu = 0\n def incr(self):\n self.tu+= 1 \nsearchpunctuation = 0\nsearchwords = 0\nprint(searchwords + searchpunctuation)\ndef binarySearchpunc(arr,item):\n for i in range(len(arr)):\n if arr[i] == item:\n searchpunctuation = i\n \ndef binarySearchword(arr, item):\n for i in range(len(arr)):\n if arr[i] == item:\n searchwords = i\nprint(\"rendering punctuation\")\npunctuation = ['.', ',', \"?\", \"!\", \";\", \":\", \"'\", \"\\\"\"]\nraw_lines = open(\"text.txt\", \"r\").read().split('\\n')\nlines = []\nfor i in range(len(raw_lines)):\n lines.append(raw_lines[i])\nfor i in range(len(raw_lines)):\n line = \"\"\n for chtr in raw_lines[i].split():\n binarySearchpunc(punctuation, chtr)\n if searchpunctuation < 0:\n line += chtr\n else:\n line += chtr + \" \"\n lines[i] = line\n \nprint(\"rendering text\")\n\nwords = []\nrendered_text = []\nfor line in lines:\n for word in line.split(\" \"):\n if not(word == \"\"):\n binarySearchword(words, word)\n if searchwords < 0:\n words.append(word)\n rendered_text.append(word)\n\n\nprint(\"indexing dictionary\")\ndata = {}\ncounter = 0\ntotal_counter = len(words)\nlast_percent = 0\n#this is where the dictionary inverts my colon\nfor word in words:\n word_list = []\n for i in range(len(rendered_text-1)):\n if rendered_text[i] == word:\n exists = False\n for word_list_entry in word_list:\n print(word_list)\n if word_list_entry == rendered_text[i+1]:\n word_list_entry.incr()\n exists = True\n break\n if not(exists):\n newword = Word()\n word_list.append(Word(rendered_text[i], 1))\n data[word] = word_list\n \n counter+=1\n if round(100*counter/total_counter) > last_percent + 9:\n last_percent = round(100 * counter / total_counter)\n print(last_percent + \"%\")\n \nprint(\"generating output\")\nlenn = 1000\noutput = \"\"\nprint(data)\nkeyword = rendered_text[0]\nwhile True:\n word_list = data[keyword]\n \n if not(word_list):\n break\n \n total = 0\n for word in word_list:\n total += word.tu\n num = random.randint(0, total)\n nextt = \"\"\n for word in word_list:\n if word.tu > num:\n nextt = word.word\n break\n num -= word.tu\n output += keyword + \" \"\n keyword = nextt\n if len(output) > lenn:\n break\n print(output) ","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"302138719","text":"\"\"\"Some simple tests for practracker metrics\"\"\"\n\nimport unittest\n\nimport StringIO\n\nimport metrics\n\nfunction_file = \"\"\"static void\nfun(directory_request_t *req, const char *resource)\n{\n time_t if_modified_since = 0;\n uint8_t or_diff_from[DIGEST256_LEN];\n}\n\nstatic void\nfun(directory_request_t *req,\n const char *resource)\n{\n time_t if_modified_since = 0;\n uint8_t or_diff_from[DIGEST256_LEN];\n}\n\nMOCK_IMPL(void,\nfun,(\n uint8_t dir_purpose,\n uint8_t router_purpose,\n const char *resource,\n int pds_flags,\n download_want_authority_t want_authority))\n{\n const routerstatus_t *rs = NULL;\n const or_options_t *options = get_options();\n}\n\"\"\"\n\nclass TestFunctionLength(unittest.TestCase):\n def test_function_length(self):\n funcs = StringIO.StringIO(function_file)\n # All functions should have length 2\n for name, lines in metrics.function_lines(funcs):\n self.assertEqual(name, \"fun\")\n\n funcs.seek(0)\n\n for name, lines in metrics.function_lines(funcs):\n self.assertEqual(lines, 2)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/tor/scripts/maint/practracker/practracker_tests.py","file_name":"practracker_tests.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"286707859","text":"# Soubor: interpret_cls_library.py\n# Kódování: UTF-8\n# Projekt: IIP projekt\n# Autor: Jakub Frýz (xfryzj01)\n\nclass InstructionLibrary:\n library = {\n # práce s rámci, volání funkcí\n 'MOVE' : ('v', 's'),\n 'CREATEFRAME' : (),\n 'PUSHFRAME' : (),\n 'POPFRAME' : (),\n 'DEFVAR' : ('v'),\n 'CALL' : ('l'),\n 'RETURN': (),\n # práce s datovým zásobníkem\n 'PUSHS' : ('s'),\n 'POPS' : ('v'),\n # aritmetické, relační, booleovské a konverzní instrukce\n 'ADD' : ('v', 's', 's'),\n 'SUB' : ('v', 's', 's'),\n 'MUL' : ('v', 's', 's'),\n 'IDIV' : ('v', 's', 's'),\n 'LT' : ('v', 's', 's'),\n 'GT' : ('v', 's', 's'),\n 'EQ' : ('v', 's', 's'),\n 'AND' : ('v', 's', 's'),\n 'OR' : ('v', 's', 's'),\n 'NOT' : ('v', 's'),\n 'INT2CHAR' : ('v', 's'),\n 'STRI2INT' : ('v', 's', 's'),\n # vstupně-výstupní instrukce\n 'READ' : ('v', 't'),\n 'WRITE' : ('s'),\n # práce s řetězci\n 'CONCAT' : ('v', 's', 's'),\n 'STRLEN' : ('v', 's'),\n 'GETCHAR' : ('v', 's', 's'),\n 'SETCHAR' : ('v', 's', 's'),\n # práce s typy\n 'TYPE' : ('v', 's'),\n # instrukce pro řízení toku programu\n 'LABEL' : ('l'),\n 'JUMP' : ('l'),\n 'JUMPIFEQ' : ('l', 's', 's'),\n 'JUMPIFNEQ' : ('l', 's', 's'),\n # ladící instrukce\n 'DPRINT' : ('s'),\n 'BREAK' : ()\n }\n\n def instr_exists(self, instruction):\n if instruction.upper() in self.library:\n return True\n else:\n return False\n\n def instr_arg_count(self, instruction):\n return len(self.library[instruction])\n\n def instr_argno_type(self, instruction, argno):\n return self.library[instruction][argno]\n\n def get_instructions(self):\n return self.library.keys()\n","sub_path":"6. semestr/IPP/interpret_cls_library.py","file_name":"interpret_cls_library.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"299878181","text":"# -*- comding:utf-8 -*-\n# 日志分析\n\ndef testlongchuant():\n\n import datetime\n line = '''183.60.212.153 - - [19/Feb/2013:10:23:29 +0800] \\\n \"GET /o2o/media.html?menu=3 HTTP/1.1\" 200 16691 \"-\" \\\n \"Mozilla/5.0 (compatible; EasouSpider; +http://www.easou.com/search/spider.html)\"'''\n CHARS = set(' \\'\"[]')\n # print(CHARS)\n def makekey(line: str): #切片\n start = 0\n flag = False\n stopchar = ''\n for i, c in enumerate(line): # [a]\n if c in CHARS:\n if c == '[':\n flag = True\n start = i + 1\n if c == ']':\n flag = False\n if c == '\"':\n flag = not flag\n if flag:\n start = i + 1\n if flag:\n continue\n if start == i:\n start = i + 1\n continue\n yield line[start:i]\n start = i + 1\n else:\n if start < len(line):\n yield line[start:]\n print(list(makekey(line)))\n\n names = ('remote', '', '', 'datetime', 'request','status', 'length', '', 'useragent')\n\n ops = (None, None, None, lambda timestr: datetime.datetime.strptime(timestr, '%d/%b/%Y:%H:%M:%S %z'),\n lambda request: dict(zip(['method', 'url', 'protocol'], request.split())),\n int, int, None, None)\n\n\n def extract(line: str):\n return dict(\n map(\n lambda item: (item[0], item[2](item[1]) if item[2] is not None else item[1]),\n zip(names, makekey(line), ops)\n )\n )\n\n\n print(extract(line))\n\n\n\n\n\n\nimport datetime\nimport re\nline = '''183.60.212.153 - - [19/Feb/2019:10:23:29 +0800] \\\n\"GET /o2o/media.html?menu=3 HTTP/1.1\" 200 16691 \"-\" \\\n\"Mozilla/5.0 (compatible; EasouSpider; +http://www.easou.com/search/spider.html)\"'''\n\nops = {\n 'datetime': lambda timestr: datetime.datetime.strptime(timestr, '%d/%b/%Y:%H:%M:%S %z'),\n 'status': int,\n 'length': int\n}\n\npattern = '''(?P[\\d.]{7,}) - - \\[(?P[/\\w +:]+)\\] \\\n\"(?P\\w+) (?P\\S+) (?P[\\w/\\d.]+)\" \\\n(?P\\d+) (?P\\d+) .+ \"(?P.+)\"'''\n\n\n\ndef extract(line:str) -> dict: #正则分组取值,本身是一个元组,有分组和对应的值,然后重新构建一个字典,替换k,如果没有不替换\n regex = re.compile(pattern)\n matcher = regex.match(line)\n # print(matcher.groupdict().items())\n if matcher:\n return {k:ops.get(k, lambda x:x)(v) for k,v in matcher.groupdict().items()}\n else:\n return 'eeror'\n \n# print(extract(line))\n\n\ndef load(filename:str): #寻找读取日志中的,正则取值\n with open(filename, encoding='utf-8') as f:\n for line in f:\n # print(line)\n # print(type(line))\n # print(extract(line))\n fields = extract(line)\n if isinstance(fields, (dict,)):\n yield fields\n else:\n print('No match.{}'.format(fields))\n\nfor x in load(r'E:\\python-mage\\mag363\\configurefile\\test.log'):\n print(x)\n\n","sub_path":"mag363/Completed/200829_logfenxi.py","file_name":"200829_logfenxi.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"494987256","text":"# -- coding: utf-8 --\r\nimport pytesseract\r\nfrom PIL import Image\r\nimport os\r\nimport xlwt\r\nimport argparse\r\n\r\n# import future_builtins\r\n\r\n# import sys\r\n# reload(sys)\r\n# sys.setdefaultencoding('utf8')\r\n\r\nglobal tesseract_dir\r\ntesseract_dir = 'C:/Program Files (x86)/Tesseract-OCR/tesseract'\r\nglobal img_dir # 图片的目录路径\r\nimg_dir = 'sampleImg'\r\nglobal result_list # ocr后识别的结果\r\nresult_list = []\r\n\r\n\r\ndef convert_to_excel():\r\n print('converting to excel file')\r\n book = xlwt.Workbook(encoding='utf-8', style_compression=0)\r\n sheet = book.add_sheet('Result', cell_overwrite_ok=True)\r\n first_col = sheet.col(0)\r\n second_col = sheet.col(1)\r\n first_col.width = 256 * 30\r\n second_col.width = 256 * 20\r\n sheet.write(0, 0, '企业名称')\r\n sheet.write(0, 1, '企业注册号')\r\n for i in range(0, len(result_list)):\r\n sheet.write(i + 1, 0, result_list[i][1])\r\n sheet.write(i + 1, 1, result_list[i][0])\r\n if os.path.exists('./result.xls'):\r\n os.remove('./result.xls')\r\n book.save('./result.xls')\r\n print(\"Finished\")\r\n\r\n\r\ndef compare(list1, list2):\r\n for i in range(0, len(list1)):\r\n if list1[i] != list2[i]:\r\n return False\r\n return True\r\n\r\n\r\ndef ocr(img_file): # 单个图像切割处理后识别\r\n pytesseract.pytesseract.tesseract_cmd = tesseract_dir\r\n img = Image.open(img_file)\r\n box1 = (0, 0, 1200, 40)\r\n img_one = img.crop(box1)\r\n result = pytesseract.image_to_string(img_one, lang='chi_sim', config=\"--psm 7\")\r\n if result.find(\"企 业\") == -1:\r\n print(img_file+\".Fail on this img\")\r\n return\r\n box2 = (0, 40, 1200, 80)\r\n img_two = img.crop(box2)\r\n result = result + pytesseract.image_to_string(img_two, lang='chi_sim', config=\"--psm 7\")\r\n print(result)\r\n ocr_rs = []\r\n index1 = result.find(':')\r\n index2 = result.rfind(':')\r\n ocr_rs.append(result[index1 + 1:index1 + 18].replace(' ', '')) # 企业注册号为18位数字\r\n ocr_rs.append(result[index2 + 1:].replace(' ', ''))\r\n result_list.append(ocr_rs)\r\n\r\n\r\ndef sort_file(file_name):\r\n file = file_name.split('.')\r\n if file[0].find('png') != -1:\r\n return int(file_name[0:2])\r\n return int(file[0])\r\n\r\n\r\ndef batch_process(): # 批量处理图片\r\n root_dir = './' + img_dir + '/'\r\n file_list = os.listdir(root_dir)\r\n\r\n for file in sorted(file_list, key=sort_file):\r\n print(\"deal with \" + file)\r\n ocr(root_dir + file)\r\n\r\n\r\nimport threadpool\r\nfrom multiprocessing import cpu_count\r\nmaximum_threads = cpu_count() # 多线程最大线程数\r\n\r\n\r\ndef batch_with_multithreading():\r\n root_dir = './' + img_dir + '/'\r\n file_list = os.listdir(root_dir)\r\n file_list = sorted(file_list, key=sort_file)\r\n for i in range(len(file_list)):\r\n file_list[i] = root_dir + file_list[i]\r\n\r\n pool = threadpool.ThreadPool(maximum_threads)\r\n requests = threadpool.makeRequests(ocr, file_list)\r\n [pool.putRequest(req) for req in requests]\r\n pool.wait()\r\n\r\n\r\nimport time\r\n\r\nif __name__ == '__main__':\r\n start_time = time.time()\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-dir', type=str, help='specify the img directory')\r\n args = parser.parse_args()\r\n if args.dir is not None:\r\n img_dir = args.dir\r\n else:\r\n print(\"Using the default directory sampleImg.You can use -dir to specify the img directory\")\r\n\r\n ocr_start_time = time.time()\r\n\r\n # batch_process()\r\n batch_with_multithreading()\r\n\r\n ocr_end_time = time.time()\r\n print(\"ocr用时:%f s\" % (ocr_end_time - ocr_start_time))\r\n\r\n write_start_time = time.time()\r\n convert_to_excel()\r\n write_end_time = time.time()\r\n print(\"xls写入用时:%f s\" % (write_end_time - write_start_time))\r\n\r\n end_time = time.time()\r\n print(\"程序总用时:%f s\" % (end_time - start_time))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"516860402","text":"import torch\nimport numpy as np\n\n\ndef tit_for_tat(agent1, agent2, env, log, tb_writer, args, iteration):\n # Log tit-for-tat\n for key, value in env.states_dict.items():\n # Agent 1\n cooperate_prob = agent1.get_action_prob(value[0])\n log[args.log_name].info(\n \"Agent1 at {}: Cooperate prob {:.2f} at iteration {}\".format(key, cooperate_prob, iteration))\n tb_writer.add_scalars(\"debug/\" + key, {\"agent1\": cooperate_prob}, iteration)\n\n # Agent 2\n cooperate_prob = agent2.get_action_prob(value[1])\n log[args.log_name].info(\n \"Agent2 at {}: Cooperate prob {:.2f} at iteration {}\".format(key, cooperate_prob, iteration))\n tb_writer.add_scalars(\"debug/\" + key, {\"agent2\": cooperate_prob}, iteration)\n\n\ndef evaluate(agent1, agent2, env, args):\n actor1, critic1 = agent1.actor, agent1.critic\n actor2, critic2 = agent2.actor, agent2.critic\n\n score1, score2 = 0., 0.\n obs1, obs2 = env.reset()\n for timestep in range(args.ep_max_timesteps):\n # Get actions\n action1, logprop1, value1 = agent1.act(obs1, actor1, critic1)\n action2, logprop2, value2 = agent2.act(obs2, actor2, critic2)\n \n # Take step in the environment\n (next_obs1, next_obs2), (reward1, reward2), _, _ = env.step((action1, action2))\n\n # For next timestep\n obs1, obs2 = next_obs1, next_obs2\n\n # Accumulate reward\n score1 += np.mean(reward1) / float(args.ep_max_timesteps)\n score2 += np.mean(reward2) / float(args.ep_max_timesteps)\n\n return score1, score2\n\n\ndef train(agent1, agent2, env, log, tb_writer, args):\n for iteration in range(200):\n # Copy other agent's parameters\n actor2_ = torch.tensor(agent2.actor.detach(), requires_grad=True)\n critic2_ = torch.tensor(agent2.critic.detach(), requires_grad=True)\n\n actor1_ = torch.tensor(agent1.actor.detach(), requires_grad=True)\n critic1_ = torch.tensor(agent1.critic.detach(), requires_grad=True)\n\n # Perform inner-loop update\n for _ in range(args.n_lookahead):\n actor2_ = agent1.in_lookahead(\n opponent_actor=actor2_, \n opponent_critic=critic2_)\n actor1_ = agent2.in_lookahead(\n opponent_actor=actor1_, \n opponent_critic=critic1_)\n\n # Perform outer-loop update\n agent1.out_lookahead(actor2_, critic2_)\n agent2.out_lookahead(actor1_, critic1_)\n score1, score2 = evaluate(agent1, agent2, env, args)\n\n # Log performance\n if iteration % 10 == 0:\n log[args.log_name].info(\"At iteration {}, returns: {:.3f}, {:.3f}\".format(\n iteration, score1, score2))\n tb_writer.add_scalars(\"train_reward\", {\"agent1\": score1}, iteration)\n tb_writer.add_scalars(\"train_reward\", {\"agent2\": score2}, iteration)\n \n tit_for_tat(agent1, agent2, env, log, tb_writer, args, iteration)\n","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"163852729","text":"import unittest\nimport os\nimport threading\nfrom datetime import datetime\nimport pandas\nimport cv2\nimport numpy\n\nfrom src.attention_calculator import AttentionCalc\nfrom src.class_details import ClassDetails\nfrom src.frame_saver import FrameSaver\nfrom src.statistics_data_loader import Statistics\nfrom src.face_detector import FaceDetector\nfrom src.frame_provider import FrameProvider\n\n\nclass FrameProviderTestCase(unittest.TestCase):\n fp = FrameProvider(0)\n\n def test_get_frame(self):\n frame = self.fp.get_frame()\n expected_type = type(numpy.ndarray([]))\n\n self.assertEqual(expected_type, type(frame))\n\n\nclass FaceDetectorTestCase(unittest.TestCase):\n fd = FaceDetector()\n\n main_path = os.path.dirname(os.getcwd())\n img_with_face = cv2.imread(main_path + \"\\public\\\\test_img\\with_face.jpg\")\n img_without_face = cv2.imread(main_path + \"\\public\\\\test_img\\without_face.jpg\")\n\n def test_get_face(self):\n result = self.fd.get_face(self.img_with_face)\n expected = True\n\n self.assertEqual(expected, len(result) > 0)\n\n def test_get_no_face(self):\n result = self.fd.get_face(self.img_without_face)\n expected = 0\n\n self.assertEqual(expected, len(result))\n\n def test_has_face(self):\n result = self.fd.has_face(self.img_with_face)\n expected = True\n self.assertEqual(expected, len(result) > 0)\n\n def test_has_no_face(self):\n result = self.fd.has_face(self.img_without_face)\n expected = False\n self.assertEqual(expected, result)\n\n def test_start_timer(self):\n result = self.fd.start_timer()\n expected = type(datetime.now())\n self.assertEqual(expected, type(result))\n\n def test_stop_timer(self):\n result = self.fd.stop_timer()\n expected = None\n\n self.assertEqual(expected, result)\n\n\nclass StatisticsTestCase(unittest.TestCase):\n class_data = ClassDetails(lecture_value=1, lecturer_value='Tamar Shrot')\n fd = FaceDetector()\n stat = Statistics(class_data, fd)\n\n def test_add_value(self):\n val = self.stat.values.copy()\n time = self.stat.times.copy()\n self.stat.add_value(datetime.now(), 10)\n\n expected_time = len(time) + 1\n expected_val = len(val) + 1\n\n time_result = len(self.stat.times)\n val_result = len(self.stat.values)\n\n self.assertEqual(expected_time, time_result)\n self.assertEqual(expected_val, val_result)\n\n def test_save_emotion(self):\n emotion_counter = self.stat.emotion_counter.copy()\n self.stat.save_emotion(['Affection'])\n\n expected = emotion_counter['Affection'] + 1\n result = self.stat.emotion_counter['Affection']\n\n self.assertEqual(expected, result)\n\n def test_get_top_3(self):\n emotions1 = ['Affection', 'Disconnection', 'Annoyance', 'Fatigue', 'Aversion']\n emotions2 = ['Affection', 'Disconnection', 'Annoyance', 'Doubt/Confusion']\n emotions3 = ['Disconnection', 'Excitement', 'Affection', 'Fear', 'Happiness', ]\n self.stat.save_emotion(emotions1)\n self.stat.save_emotion(emotions2)\n self.stat.save_emotion(emotions3)\n\n expected = sorted(['Disconnection', 'Affection', 'Annoyance'])\n result = sorted(self.stat.get_top_3())\n\n self.assertEqual(expected, result)\n\n def test_get_data_frame(self):\n expected = pandas.DataFrame()\n result = self.stat.get_data_frame()\n\n self.assertEqual(type(expected), type(result))\n\n\nclass FrameSaverTestCase(unittest.TestCase):\n # locks creation\n p_lock = threading.Lock()\n c_lock = threading.Lock()\n locks = [p_lock, c_lock]\n\n main_path = os.path.dirname(os.getcwd())\n img_path = main_path + \"\\debug_exp\\\\frames\\\\frame.jpg\"\n img = cv2.imread(main_path + \"\\public\\\\test_img\\\\frame.jpg\")\n\n fs = FrameSaver(2, None, locks, None, None)\n\n def test_save_frame(self):\n\n try:\n os.remove(self.img_path)\n except FileNotFoundError:\n pass\n\n self.fs.save_frame(self.img)\n result = os.path.isfile(self.img_path)\n\n self.assertTrue(result)\n\n\nclass AttentionCalcTestCase(unittest.TestCase):\n # set parameters for attention calculator\n cont_weights = [0.3, 0.3, 0.4]\n ratio = 0.6\n alpha = 0.3\n weights = [cont_weights, ratio, alpha]\n\n attention_calculator = AttentionCalc(weights[0], weights[1], weights[2])\n\n # emotion_value function test-case\n def test_emotion_value_positive(self):\n emotion = 'Surprise'\n expected = 1\n result = self.attention_calculator.emotion_value(emotion)\n\n self.assertEqual(expected, result)\n\n def test_emotion_value_negative(self):\n emotion = 'Anger'\n expected = -1\n result = self.attention_calculator.emotion_value(emotion)\n\n self.assertEqual(expected, result)\n\n def test_emotion_value_neutral(self):\n emotion = 'Affection'\n expected = 0\n result = self.attention_calculator.emotion_value(emotion)\n\n self.assertEqual(expected, result)\n\n def test_emotion_value_empty(self):\n emotion = None\n result = self.attention_calculator.emotion_value\n\n self.assertRaises(ValueError, result, emotion)\n\n def test_emotion_value_other(self):\n emotion = 'Other'\n result = self.attention_calculator.emotion_value\n\n self.assertRaises(ValueError, result, emotion)\n\n # emotion_calc function test-case\n def test_emotion_calc(self):\n emotions = ['Anticipation', 'Confidence', 'Engagement', 'Esteem', 'Excitement',\n 'Happiness', 'Peace', 'Surprise', 'Anger', 'Annoyance', 'Aversion', 'Disapproval', 'Disconnection', 'Disquietment',\n 'Doubt/Confusion', 'Embarrassment', 'Fear', 'Sadness',\n 'Sensitivity', 'Affection', 'Pleasure', 'Sympathy', 'Fatigue', 'Pain', 'Suffering', 'Yearning']\n\n expected = 2\n result = self.attention_calculator.emotion_calc(emotions)\n\n self.assertEqual(expected, result)\n\n def test_emotion_calc_empty(self):\n emotions = []\n\n expected = 5\n result = self.attention_calculator.emotion_calc(emotions)\n\n self.assertEqual(expected, result)\n\n def test_emotion_calc_other(self):\n emotions = ['Not', 'Emotion']\n\n expected = 5\n result = self.attention_calculator.emotion_calc\n\n self.assertRaises(ValueError, result, emotions)\n\n # cont_calc function test-case\n def test_cont_calc(self):\n cont = [5, 5, 5]\n expected = 5\n result = self.attention_calculator.cont_calc(cont)\n\n self.assertEqual(expected, result)\n\n # attention_calc function test-case\n def test_attention_calc(self):\n emotions_cont_list = [['Affection', 'Disconnection', 'Disquietment', 'Engagement', 'Esteem', 'Happiness',\n 'Peace', 'Pleasure', 'Sympathy'],\n [5.9388, 4.8419, 6.4948]\n ]\n expected = 1.9596\n result = self.attention_calculator.attention_calc(emotions_cont_list)\n\n self.assertEqual(expected, result)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/unit.py","file_name":"unit.py","file_ext":"py","file_size_in_byte":7198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"88971996","text":"#!/usr/bin/python\nfrom db_conn import connect\nfrom db_conn import close\nimport requests\nimport re\n\n# Sample of Grocery Merchants\nMERCHANTS = ['Loblaw', 'Walmart', 'Metro', 'Costco', 'Freshco', 'NoFrills', 'FoodBasics', 'Independent', 'Zehrs','ShoppersDrugMart', 'ValuMart', 'Sobeys', 'IGA']\n# Central Postal Code in Ottawa\nLOCATION = ['K1P1J1']\n\n# Initialize variables\nproduct_list = []\nclassification_list = []\nglobal data\n\ndef flipp_request(location, merchant):\n req = requests.get('https://backflipp.wishabi.com/flipp/items/search?locale=en-ca&postal_code='+ location + '&radius=50km' + '&q=' + merchant)\n res = req.json()['items']\n return res\n\ndef get_data():\n # retrieve flyer data\n for loc in LOCATION:\n for mer in MERCHANTS:\n data = flipp_request(loc,mer)\n\n # loop over data, format, and extract values of interest\n for i in data:\n flyer_id = i['flyer_item_id']\n product_name = i['name'].upper()\n merchant = i['merchant_name'].upper()\n sale_price = i['current_price']\n price_units = i['post_price_text']\n start_date = i['valid_from']\n end_date = i['valid_to']\n\n # add each row to product list for insert\n product_list.append((price_units, start_date, sale_price, product_name, merchant, end_date, flyer_id))\n\ndef broad_category_classify():\n for i in product_list:\n name = i[3]\n flyer_id = i[6]\n\n if \"CHICKEN\" in name:\n add_classification(flyer_id, \"CHICKEN\")\n if \"TURKEY\" in name:\n add_classification(flyer_id, \"TURKEY\")\n if \"DUCK\" in name:\n add_classification(flyer_id, \"DUCK\")\n else:\n add_classification(flyer_id, None)\n\ndef add_classification(flyer_id, classification_string):\n classification_list.append((flyer_id, classification_string))\n\ndef insert_data(cursor, connection, products, classifications):\n # execute and commit insert statements\n cursor.executemany(\"INSERT INTO flyer_item (price_units, start_date, sale_price, product_name, merchant, end_date, flyer_id) VALUES(%s,%s,%s,%s,%s,%s,%s)\", products)\n connection.commit()\n cursor.executemany(\"INSERT INTO classification VALUES(%s,%s)\", classifications)\n connection.commit()\n\nif __name__ == \"__main__\":\n # gather data, connect to db, insert data, terminate db connection\n get_data()\n broad_category_classify()\n cursor, connection = connect()\n insert_data(cursor, connection, product_list, classification_list)\n close(cursor, connection)\n","sub_path":"sprigs.py","file_name":"sprigs.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"145099796","text":"# Runs a flask web app that allows pictures to be taken on trigger click\nfrom flask import Flask, request, render_template, redirect\nfrom picamera import PiCamera\nfrom datetime import datetime\n\napp = Flask(__name__)\n\n@app.route('/image-capture', methods=['POST', 'GET'])\ndef image_capture():\n try:\n take_picture = request.form.get(\"take_picture\")\n print('Take picture? = ', take_picture)\n if take_picture == \"t\":\n timestamp = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n image_src = \"raw_images/\" + timestamp + \".png\"\n camera.capture(image_src)\n # print('bong')\n return render_template('image-capture.html')\n finally:\n return render_template('image-capture.html')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')","sub_path":"for_pi/image_capture_host_trigger.py","file_name":"image_capture_host_trigger.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"45241753","text":"import pandas as pd\r\nimport numpy as np\r\nimport os\r\nfrom features import get_between, get_point, get_mode, get_bright_session_num, get_apps_between_with_duration, get_app_num\r\n\r\nstime, etime = 'formatted_start_time', 'formatted_end_time'\r\nltime = 'last_bright_start_time'\r\n\r\ndef load_user_brit(enSN):\r\n df = pd.read_csv('./data/db_brightness_detail.csv')\r\n return df[df['enSN'] == enSN].sort_values(by=[stime]).reset_index(drop=True).copy()\r\n\r\ndef load(filename, enSN):\r\n df = pd.read_csv(os.path.join('./data', filename))\r\n return df[df['enSN'] == enSN].reset_index(drop=True).copy()\r\n\r\ndef load_app_class(enSN, typ):\r\n df = pd.read_csv('./data/gpu_detail_type(1)(1).csv')\r\n return df['name'][(df['enSN'] == enSN) & (df['type'] == typ)].unique()\r\n\r\ndef build_dark_session(enSN):\r\n df = load_user_brit(enSN)\r\n df_sess = pd.DataFrame()\r\n sess_s, sess_e, sess_flag = None, None, None\r\n for i, row in df.iterrows():\r\n if i == 0:\r\n sess_s, sess_e, sess_flag = row[stime], row[etime], row['brightness'] > 0\r\n else:\r\n curr_s, curr_e, curr_flag = row[stime], row[etime], row['brightness'] > 0\r\n if curr_s < sess_e:\r\n continue\r\n elif curr_s == sess_e:\r\n if curr_flag == sess_flag:\r\n sess_e = curr_e\r\n else:\r\n df_sess = df_sess.append({stime: sess_s, etime: sess_e, 'is_bright': sess_flag}, ignore_index=True)\r\n sess_s, sess_e, sess_flag = curr_s, curr_e, curr_flag\r\n else:\r\n if not sess_flag:\r\n if curr_flag == sess_flag:\r\n sess_e = curr_e\r\n else:\r\n df_sess = df_sess.append({stime: sess_s, etime: curr_s, 'is_bright': sess_flag}, ignore_index=True)\r\n sess_s, sess_e, sess_flag = curr_s, curr_e, curr_flag\r\n else:\r\n df_sess = df_sess.append({stime: sess_s, etime: sess_e, 'is_bright': sess_flag}, ignore_index=True)\r\n if not curr_flag:\r\n sess_s, sess_e, sess_flag = sess_e, curr_e, curr_flag\r\n else:\r\n df_sess = df_sess.append({stime: sess_e, etime: curr_s, 'is_bright': False}, ignore_index=True)\r\n sess_s, sess_e, sess_flag = curr_s, curr_e, curr_flag\r\n df_sess[ltime] = ''\r\n for i, row in df_sess.iterrows():\r\n if i > 0 and not row['is_bright']:\r\n df_sess[ltime].iat[i] = df_sess[stime][i-1]\r\n df_sess = df_sess[(df_sess['is_bright'] == False) & (df_sess[ltime] != '')]\r\n df_sess[stime] = pd.to_datetime(df_sess[stime])\r\n df_sess[etime] = pd.to_datetime(df_sess[etime])\r\n df_sess[ltime] = pd.to_datetime(df_sess[ltime])\r\n return df_sess.drop(columns=['is_bright']).reset_index(drop=True).copy()\r\n\r\ndef build_series(enSN, dt=pd.Timedelta(seconds=10)):\r\n df_sess = build_dark_session(enSN)\r\n df_wifi = load('db_app_wifi_data_detail.csv', enSN)\r\n df_cell = load('db_app_modem_data_detail.csv', enSN)\r\n # df_audio = load('db_app_audio_detail.csv', enSN)\r\n # df_volume = load('db_audio_volume.csv', enSN) \r\n # df_light = load('db_ambient_light_detail.csv', enSN)\r\n df_power_saving = load('db_power_saving.csv', enSN)\r\n df_battery = load('db_battery_detail.csv', enSN)\r\n df_disp = load('db_app_display_detail.csv', enSN)\r\n # df_tp = load('db_app_tp_detail.csv', enSN)\r\n # df_bt = load('db_app_bt_detail.csv', enSN)\r\n # df_camera = load('db_app_camera_detail.csv', enSN)\r\n # df_cpu = load('db_app_cpu_detail.csv', enSN)\r\n # df_gnss = load('db_app_gnss_detail.csv', enSN)\r\n # df_gpu = load('db_app_gpu_detail.csv', enSN)\r\n # df_sensor = load('db_app_sensor_detail.csv', enSN)\r\n series_dict = {}\r\n apps_work = load_app_class(enSN, '工作')\r\n apps_game = load_app_class(enSN, '游戏')\r\n apps_social = load_app_class(enSN, '社交')\r\n apps_video = load_app_class(enSN, '视频')\r\n apps_call = load_app_class(enSN, '通话')\r\n for i, row in df_sess.iterrows():\r\n series_dict[i] = pd.DataFrame()\r\n curr = row[stime]\r\n while curr <= row[etime]:\r\n mode = get_mode(df_power_saving, 'mode', curr)\r\n w_e, w_d = get_apps_between_with_duration(df_disp, 'energy', 'sum', row[ltime], row[stime], apps_work)\r\n g_e, g_d = get_apps_between_with_duration(df_disp, 'energy', 'sum', row[ltime], row[stime], apps_game)\r\n s_e, s_d = get_apps_between_with_duration(df_disp, 'energy', 'sum', row[ltime], row[stime], apps_social)\r\n v_e, v_d = get_apps_between_with_duration(df_disp, 'energy', 'sum', row[ltime], row[stime], apps_video) \r\n c_e, c_d = get_apps_between_with_duration(df_disp, 'energy', 'sum', row[ltime], row[stime], apps_call)\r\n item = {\r\n # GENERAL\r\n 'hour': curr.hour,\r\n 'minute': curr.minute,\r\n 'second': curr.second,\r\n # 'day of week': curr.dayofweek,\r\n \r\n # DURING LAST SESSION\r\n 'D duration': (row[stime] - row[ltime]).total_seconds(),\r\n 'D battery used': get_between(df_battery, 'screen_on_gas_gauge', 'sum', row[ltime], row[stime]),\r\n 'D work duration': w_d,\r\n 'D work energy': w_e,\r\n 'D game duration': g_d,\r\n 'D game energy': g_e,\r\n 'D social duration': s_d,\r\n 'D social energy': s_e,\r\n 'D video duration': v_d,\r\n 'D video energy': v_e,\r\n 'D call duration': c_d,\r\n 'D call energy': c_e,\r\n # 'D audio energy': get_between(df_audio, 'screen_on_energy', 'sum', row[ltime], row[stime]),\r\n # 'D display energy': get_between(df_disp, 'energy', 'sum', row[ltime], row[stime]),\r\n # 'D tp energy': get_between(df_tp, 'energy', 'sum', row[ltime], row[stime]),\r\n # 'D bt energy': get_between(df_bt, 'screen_on_energy', 'sum', row[ltime], row[stime]),\r\n # 'D camera energy': get_between(df_camera, 'screen_on_energy', 'sum', row[ltime], row[stime]),\r\n # 'D cpu energy': get_between(df_cpu, 'screen_on_energy', 'sum', row[ltime], row[stime]),\r\n # 'D gnss energy': get_between(df_gnss, 'screen_on_energy', 'sum', row[ltime], row[stime]),\r\n # 'D gpu energy': get_between(df_gpu, 'energy', 'sum', row[ltime], row[stime]),\r\n # 'D sensor energy': get_between(df_sensor, 'screen_on_energy', 'sum', row[ltime], row[stime]),\r\n \r\n # SINCE LAST SESSION \r\n 'S time': (curr - row[stime]).total_seconds(),\r\n 'S battery used': get_between(df_battery, 'screen_off_gas_gauge', 'sum', row[stime], curr),\r\n 'S wifi data upload': get_between(df_wifi, 'screen_off_tx_bytes', 'sum', row[stime], curr),\r\n 'S wifi data download': get_between(df_wifi, 'screen_off_rx_bytes', 'sum', row[stime], curr),\r\n 'S cellular data upload': get_between(df_cell, 'screen_off_tx_bytes', 'sum', row[stime], curr),\r\n 'S cellular data download': get_between(df_cell, 'screen_off_rx_bytes', 'sum', row[stime], curr),\r\n # 'S bt energy': get_between(df_bt, 'screen_off_energy', 'sum', row[stime], curr),\r\n # 'S audio energy': get_between(df_audio, 'screen_off_energy', 'sum', row[stime], curr),\r\n # 'S gnss energy': get_between(df_gnss, 'screen_off_energy', 'sum', row[stime], curr),\r\n # 'S gpu energy': get_between(df_gpu, 'energy', 'sum', row[stime], curr),\r\n # 'S sensor energy': get_between(df_sensor, 'screen_off_energy', 'sum', row[stime], curr),\r\n\r\n # NOW\r\n # 'C audio device':get_point(df_audio, 'device', curr),\r\n # 'C audio volume': get_point(df_volume, 'state', curr),\r\n # 'C ambient light': get_point(df_light, 'level', curr),\r\n 'C mode 1': mode[0],\r\n 'C mode 2': mode[1],\r\n 'C mode 3': mode[2],\r\n 'C mode 4': mode[3],\r\n 'C charge': get_point(df_power_saving, 'charge', curr),\r\n 'C # bright sessions': get_bright_session_num(df_sess, curr),\r\n 'C # apps': get_app_num(df_disp, curr),\r\n \r\n # LABEL\r\n 'time before bright': (row[etime] - curr).total_seconds()\r\n }\r\n series_dict[i] = series_dict[i].append(item, ignore_index=True)\r\n curr += dt\r\n return series_dict\r\n\r\nif __name__ == '__main__':\r\n #for enSN in ['ELS000040']:\r\n for enSN in ['ELS000040', 'ELS000043', 'ELS000063']:\r\n result = build_series(enSN)\r\n np.save('x_dict_%s.npy' % enSN, result)","sub_path":"series.py","file_name":"series.py","file_ext":"py","file_size_in_byte":8959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"582863199","text":"import sqlite3\nimport datetime\n\ndef init_db(db_name='data/cycles.db'):\n table = 'cycles'\n conn = sqlite3.connect(db_name, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)\n db = conn.cursor()\n create_database(db)\n conn.commit()\n return (conn, db)\n\ndef create_database(db):\n query = '''CREATE TABLE IF NOT EXISTS cycles\n (current_timestamp timestamp, cycles integer, disk text)'''\n db.execute(query)\n\ndef insert_cycles(db, cycles, disk):\n query = 'insert into cycles(current_timestamp, cycles, disk) values (?, ?, ?)'\n values = (datetime.datetime.now(), cycles, disk)\n db.execute(query, values)\n\ndef select_disk(db, disk):\n query = 'SELECT \"current_timestamp\", cycles, disk FROM cycles WHERE disk = ?'\n db.execute(query, (disk,))\n rows = db.fetchall()\n if len(rows) > 0:\n dates, cycles, _ = zip(*rows)\n return {'disk': disk, 'date': dates, 'cycles': cycles}\n\ndef select_all(db, disks):\n results = []\n\n for disk in disks:\n result = select_disk(db, disk)\n if result:\n results.append(result)\n return results","sub_path":"data/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"121270476","text":"import geopandas as gpd\nimport pandas as pd\nimport numpy as np\nimport random\nimport timeit\nimport multiprocessing\n\nprint('Reading Data...')\n\n#load data\nadult = pd.read_csv('/scratch/njiang8/Parents/NY_Adults.csv')\nkids = pd.read_csv('/scratch/njiang8/Parents/NY_kids.csv')\n\n\ndef check_parents(kid):\n \n parents = adult.loc[adult.hhold == kid.hhold].copy()\n #if wp is null means parents stay at home and take care of babies\n if parents.wp.isnull().all():\n return False\n else:\n #babies go to daycare\n return True\n\ndef go(kid):\n kid['Stay'] = kid.apply(check_parents,axis=1)\n return kid\n\ndef parallelize_kid(kid, func):\n #Cores\n num_cores = multiprocessing.cpu_count()\n num_partitions = num_cores\n #Pools\n pool = multiprocessing.Pool(num_cores)\n \n kid_split = np.array_split(kid, num_partitions)\n \n df = pd.concat(pool.map(func, kid_split))\n return df\n \n pool.close()\n pool.join()\n\n\nif __name__=='__main__':\n print('Start runing...')\n #set timer\n start_time = timeit.default_timer()\n \n #Apply Check Function\n df = parallelize_kid(kids, go)\n #babies go to daycare\n df_go = df[df['Stay'] == True].copy()\n \n print('Saving...')\n df_go.to_csv('/scratch/njiang8/Parents/kids_go_daycare.csv')\n \n #End timer\n elapsed = timeit.default_timer() - start_time\n \n print('job total time (seconds):', elapsed)\n print('End program')\n\n\n\n\n\n","sub_path":"Daycare/2_2ny_check.py","file_name":"2_2ny_check.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"386438742","text":"s=input()\nl=list(s)\nc=0\nfor i in range(1,len(l)):\n\tif l[i]==l[i-1]:\n\t\tc=c+1\nif c==0:\n\tprint(\"Yes\")\nelse:\n\tprint(\"No\")\n","sub_path":"86.py","file_name":"86.py","file_ext":"py","file_size_in_byte":118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"101819077","text":"#-*- coding: utf-8 -*-\nimport re\nfrom django.db.models.fields.files import ImageFieldFile,ImageField\nimport os\nfrom django.conf import settings\nfrom django.db.models import signals\n\nclass TImageFieldFile(ImageFieldFile):\n \n def delete(self, save=True):\n #remove all existing thumbnails\n filehead, filetail = os.path.split(self.name)\n basename, format = os.path.splitext(filetail)\n miniatures_dir = os.path.join(self.storage.location,filehead,settings.THUMBNAILS_SUBDIR)\n pattern = re.compile(r'^%s_(\\d+x\\d+)c?%s$' % (basename, format))\n for file in os.listdir(miniatures_dir):\n m = re.match(pattern, file)\n if m is not None:\n self.storage.delete(os.path.join(filehead,settings.THUMBNAILS_SUBDIR,file))\n \n super(self.__class__, self).delete(save)\n\nclass TImageField(ImageField):\n attr_class = TImageFieldFile\n\n def contribute_to_class(self, cls, name):\n super(ImageField, self).contribute_to_class(cls, name)\n signals.post_delete.connect(self.delete_file, sender=cls)\n \n def delete_file(self, instance, sender, **kwargs): \n file = getattr(instance, self.attname) \n # If no other object of this type references the file, \n # and it's not the default value for future objects, \n # delete it from the backend. \n if file and file.name != self.default and \\\n not sender._default_manager.filter(**{self.name: file.name}): \n file.delete(save=False) \n elif file: \n # Otherwise, just close the file, so it doesn't tie up resources. \n file.close() \n\ntry:\n\tfrom south.modelsinspector import add_introspection_rules\n\tadd_introspection_rules([], [\"^thumbnails\\.fields\\.TImageField\"])\nexcept ImportError:\n\tpass\n","sub_path":"apps/thumbnails/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"225104876","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('camo', '0022_auto_20150403_2122'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='aircraft',\n name='status',\n field=models.CharField(choices=[('flying', 'W użytkowaniu'), ('parked', 'Hangarowany')], max_length=10, default='flying'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='crs',\n name='done_date',\n field=models.DateField(default=datetime.date(2015, 4, 4)),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='work_order',\n name='date',\n field=models.DateField(default=datetime.date(2015, 4, 4)),\n preserve_default=True,\n ),\n ]\n","sub_path":"camo/migrations/0023_auto_20150404_1144.py","file_name":"0023_auto_20150404_1144.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"130760118","text":"import numpy\n\nclass WindyGridworld:\n\t\"\"\"\n\tThe class encapsulating the grid along with its functions.\n\t\"\"\"\n\n\tdef __init__(self, rows, columns, start, goal, wind_speeds, task):\n\t\t\"\"\"\n\t\trows = number of rows in the gridworld\n\t\tcolumn = number of columns in the grid world\n\t\tstart = the start state of the agent (row, column)\n\t\tgoal = the goal state of the agent (row, column)\n\t\twind_speeds = the array of length = len(columns) of wind speeds\n\t\ttask 1 is deterministic with 4 actions\n\t\ttask 2 is deterministic with 8 actions\n\t\ttask 3 adds stochasticity\n\t\t\"\"\"\n\n\t\t# initialize the properties of the gridworld\n\t\tself.rows = rows\n\t\tself.columns = columns\n\t\tself.start = start\n\t\tself.goal = goal\n\t\tself.wind_speeds = wind_speeds\n\t\tself.task = task\n\n\t\t# make sure the initialization is correct\n\t\tassert self.task in [1,2,3,4,5], \"Invalid Task\"\n\t\tassert all([self.start[0] < self.rows, self.start[1] < self.columns]), \"Incorrect Start State\"\n\t\tassert all([self.goal[0] < self.rows, self.goal[1] < self.columns]), \"Incorrect Goal State\"\n\t\tassert len(self.wind_speeds) == self.columns, \"Wind Speeds length Incorrect\"\n\n\t\t# initialize the possible actions with their results\n\t\tif self.task == 1:\n\t\t\tself.actions = {\"N\": (1,0), \"S\": (-1,0), \"E\": (0,1), \"W\": (0,-1)}\n\t\telif self.task == 2 or self.task == 3:\n\t\t\tself.actions = {\"N\": (1,0), \"S\": (-1,0), \"E\": (0,1), \"W\": (0,-1), \"NE\": (1,1), \"NW\": (1,-1), \"SE\": (-1,1), \"SW\": (-1,1)}\n\t\telif self.task == 4:\n\t\t\tself.actions = {\"N\": (1,0), \"S\": (-1,0), \"E\": (0,1), \"W\": (0,-1), \"NE\": (1,1), \"NW\": (1,-1), \"SE\": (-1,1), \"SW\": (-1,1), \"0\":(0,0)}\n\t\telif self.task == 5:\n\t\t\tself.actions = {\"N\": (1,0), \"S\": (-1,0), \"E\": (0,1), \"W\": (0,-1)}\n\n\tdef take_step(self, state, action):\n\t\t\"\"\"\n\t\taction is one of the actions in self.actions\n\t\tupdate the current state and return the reward\n\t\t\"\"\"\n\t\t\t\n\t\t# get the overall effect of the action and get the new state\n\t\teffect = [self.actions[action][0] + self.wind_speeds[state[1]], self.actions[action][1]]\n\n\t\t# consider the effect of stochasticity for task 3\n\t\tif self.task == 3 or self.task==5:\n\t\t\trnd = numpy.random.uniform(0,1)\n\t\t\tif rnd < 1./3.:\n\t\t\t\teffect[0] -= 1\n\t\t\telif rnd > 2./3.:\n\t\t\t\teffect[0] += 1\n\n\n\t\tnew_state = [None, None]\n\t\tnew_state[0] = max(0, min(state[0] + effect[0], self.rows-1))\n\t\tnew_state[1] = max(0, min(state[1] + effect[1], self.columns-1))\n\n\t\t# return the corresponding next state and reward\n\t\tif new_state == list(self.goal):\n\t\t\treturn new_state, 100\n\t\telse:\n\t\t\treturn new_state, -1\n\ndef getDefaultGridworld(task):\n\t\"\"\"\n\treturn the default dridworld as described in the example 6.5\n\t\"\"\"\n\trows = 7\n\tcolumns = 10\n\tstart = (3,0)\n\tgoal = (3,7)\n\twind_speeds = (0,0,0,1,1,1,2,2,1,0)\n\n\treturn WindyGridworld(rows, columns, start, goal, wind_speeds, task)","sub_path":"A3/submission/WindyGridworld.py","file_name":"WindyGridworld.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"620444422","text":"# ===============================================================================\n#\n# Copyright (c) 2013-2017 Qualcomm Technologies, Inc.\n# All Rights Reserved.\n# Confidential and Proprietary - Qualcomm Technologies, Inc.\n#\n# ===============================================================================\n\"\"\"Factory module for crypto discovery.\n\"\"\"\n\nfrom sectools.common.core.plugin import SecPluginMgr\n\n\nclass ModuleNotFound(object):\n \"\"\"Class to be used as return value by discovery implementations to specify\n that the module was not found.\n \"\"\"\n pass\n\n\nclass DiscoveryFactory(SecPluginMgr):\n \"\"\"Factory class for the discovery classes. Allows plugging in new classes.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initializes modules variable with the IDs that have registered with\n the factory.\n \"\"\"\n self.modules = {}\n\n def supports(self, module_id):\n \"\"\"Returns True if the module ID (str) is supported for discovery.\n \"\"\"\n return module_id in self.modules\n\n def get_impl(self, module_id):\n \"\"\"Returns the module specific information for the module ID (str)\n provided. Tries to discover the module if it hasn't already been\n discovered.\n\n :param str module_id: module\n :raises KeyError: if the module is not registered for discovery.\n :raises RuntimeError: if the module could not be discovered.\n \"\"\"\n # Check if module is supported for discovery\n if module_id not in self.get_map():\n raise KeyError('Module \"' + str(module_id) + '\" cannot be discovered. ' +\n 'Discovery logic is not available.')\n\n # Check if module impl is available\n module_impl = self.modules.get(module_id, None)\n\n # Discover if module has not already been searched\n if module_impl is None:\n module_impl = self.discover(module_id)\n self.modules[module_id] = module_impl\n\n # Check if module was found\n if module_impl == ModuleNotFound:\n raise RuntimeError('Module \"' + str(module_id) + '\" was not found.')\n\n # Return the discovered module\n return module_impl\n\n def discover(self, module_id):\n \"\"\"Discovers the module based on the module ID (str) provided. If module\n was discovered before, then rediscovers the module and overrides\n previous module info.\n\n :param str module_id: module\n :returns: Module specific information & data structure. If module is not\n found, returns ModuleNotFound\n \"\"\"\n # Discover a module\n module_impl = self.get_map()[module_id]().discover()\n return module_impl\n\n\n# Create instance of the discovery manager\ndiscovery_factory = DiscoveryFactory()\n","sub_path":"QCA4020_SDK/target/sectools/qdn/sectools/common/crypto/discovery/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"632477860","text":"# -*- coding: utf-8 -*-\n\nimport sys\nsys.path.append(\"../\") \nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom common import BaseNet\nfrom time import time\nimport numpy as np\nfrom itertools import combinations\n\nclass FM(nn.Module, BaseNet):\n def __init__(self, total_num_feature, feature_len, device, embedding_size=4, emb_decay=1e-4, \n drop_embedding=0, embedding_norm=False, **params):\n \"\"\"\n Initialize a new network\n Inputs:\n - total_num_feature: A list of integer giving the size of features for each field.\n - embedding_size: An integer giving size of feature embedding.\n - hidden_dims: A list of integer giving the size of each hidden layer.\n - num_classes: An integer giving the number of classes to predict. For example,\n someone may rate 1,2,3,4 or 5 stars to a film.\n - batch_size: An integer giving size of instances used in each interation.\n \"\"\"\n super().__init__()\n self.total_num_feature = total_num_feature\n self.feature_len = feature_len\n self.embedding_size = embedding_size\n self.emb_decay = emb_decay\n self.drop_embedding = drop_embedding\n self.device = device\n self.loss_fn = F.binary_cross_entropy\n self.embedding_norm = embedding_norm\n if self.embedding_norm:\n self.emb_norm = nn.BatchNorm1d(feature_len)\n self.Embedding_1d = nn.Embedding(total_num_feature, 1, sparse=False)\n self.Embedding = nn.Embedding(total_num_feature, self.embedding_size, sparse=False)\n self.embedding_dropout = nn.Dropout2d(drop_embedding)\n\n self.apply(self.init_weights)\n \n def FM(self, feature_emb1d, feature_emb):\n '''\n feature_emb1d : b x k x 1\n '''\n \n # solution-A\n lr_score = torch.sum(feature_emb1d, dim=1)\n sum_square = torch.sum(feature_emb, dim=1) ** 2 # sum then square\n square_sum = torch.sum(feature_emb ** 2, dim=1) # square than sum\n cross_score = torch.sum((sum_square - square_sum) / 2, dim=1).view(-1, 1)\n print(cross_score)\n \n # solution-B\n sum_of_square = torch.sum(feature_emb, dim=0) ** 2\n square_of_sum = torch.sum(feature_emb ** 2, dim=0)\n bi_interaction_vector = (sum_of_square - square_of_sum) * 0.5\n print(torch.sum(bi_interaction_vector, dim=-1).view(-1, 1))\n \n # solution-C\n dot_val = 0\n for i in range(feature_emb.size()[0]):\n for j in range(feature_emb.size()[1]):\n if i'''.format(_version)\n\n## ==============================================\n## General utility functions - utils.py\n## ==============================================\n\ndef con_dec(x, dec):\n '''Return a float string with n decimals\n (used for ascii output).'''\n if x is None: return(x)\n return(\"%.\" + str(dec) + \"f\" % x)\n\ndef inc2str_inc(inc):\n '''convert a WGS84 geographic increment to a str_inc (e.g. 0.0000925 ==> `13`)'''\n import fractions\n return(str(fractions.Fraction(str(inc * 3600)).limit_denominator(10)).replace('/', ''))\n\ndef this_year():\n '''return the current year'''\n import datetime\n return(datetime.datetime.now().strftime('%Y'))\n\ndef remove_glob(glob_str):\n '''glob `glob_str` and os.remove results'''\n import glob\n globs = glob.glob(glob_str)\n if len(globs) > 0:\n for g in globs:\n try:\n os.remove(g)\n return(0)\n except: return(None)\n else: return(None)\n\ncmd_exists = lambda x: any(os.access(os.path.join(path, x), os.X_OK) for path in os.environ[\"PATH\"].split(os.pathsep))\n\ndef run_cmd(cmd, data_fun = None, verbose = False):\n '''Run a command with or without a progress bar while passing data'''\n if verbose: echo_msg('running cmd: \\033[1m{}\\033[m...'.format(cmd.rstrip())) \n if data_fun is not None:\n pipe_stdin = subprocess.PIPE\n else: pipe_stdin = None\n p = subprocess.Popen(cmd, shell = True, stdin = pipe_stdin, stdout = subprocess.PIPE, stderr = subprocess.PIPE, close_fds = True) \n\n if data_fun is not None:\n if verbose: echo_msg('piping data to cmd subprocess...')\n data_fun(p.stdin)\n p.stdin.close()\n \n while p.poll() is None:\n if verbose:\n rl = p.stderr.readline()\n sys.stderr.write('\\x1b[2K\\r')\n sys.stderr.write(rl.decode('utf-8'))\n if verbose: sys.stderr.write(p.stderr.read().decode('utf-8'))\n\n out = p.stdout.read()\n p.stderr.close()\n p.stdout.close()\n if verbose: echo_msg('ran cmd: \\033[1m{}\\033[m and returned {}.'.format(cmd.rstrip(), p.returncode))\n return(out, p.returncode)\n\ndef cmd_check(cmd_str, cmd_vers_str):\n '''check system for availability of 'cmd_str' and return it's version'''\n if cmd_exists(cmd_str): \n cmd_vers, status = run_cmd('{}'.format(cmd_vers_str))\n return(cmd_vers.split()[-1].rstrip())\n else: return(None)\n\ndef config_check(chk_vdatum = False, verbose = False):\n '''check for needed geomods external software'''\n _waff_co = {}\n py_vers = str(sys.version_info[0]),\n host_os = sys.platform\n _waff_co['platform'] = host_os\n _waff_co['python'] = py_vers\n if host_os == 'win32': ae = '.exe'\n else: ae = ''\n\n #if chk_vdatum: _waff_co['VDATUM'] = vdatum(verbose=verbose).vdatum_path\n _waff_co['GDAL'] = cmd_check('gdal_grid{}'.format(ae), 'gdal_grid --version')\n _waff_co['GMT'] = cmd_check('gmt{}'.format(ae), 'gmt --version')\n _waff_co['MBGRID'] = cmd_check('mbgrid{}'.format(ae), 'mbgrid -version | grep Version')\n _waff_co['BOUNDS'] = cmd_check('bounds{}'.format(ae), 'bounds --version')\n \n return(_waff_co)\n \n## ==============================================\n## stderr messaging\n## ==============================================\n\ndef echo_error_msg(msg, prefix = 'geomods'):\n '''echo error msg to stderr'''\n sys.stderr.write('\\x1b[2K\\r')\n sys.stderr.flush()\n sys.stderr.write('{}: error, {}\\n'.format(prefix, msg))\n\ndef echo_msg(msg, prefix = 'geomods'):\n '''echo msg to stderr'''\n sys.stderr.write('\\x1b[2K\\r')\n sys.stderr.flush()\n sys.stderr.write('{}: {}\\n'.format(prefix, msg))\n \n### End\n","sub_path":"geomods-old/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"115375838","text":"import numpy as np\n\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ape import helper, Constants\n\n\nclass ResBlock(nn.Module):\n def __init__(self, d_model):\n super(ResBlock, self).__init__()\n\n self.res_block = nn.Sequential(\n nn.ReLU(True),\n nn.Conv1d(d_model, d_model, 5, padding=2), # nn.Linear(DIM, DIM),\n nn.ReLU(True),\n nn.Conv1d(d_model, d_model, 5, padding=2), # nn.Linear(DIM, DIM),\n )\n\n def forward(self, x):\n y = self.res_block(x)\n return x + (0.3 * y)\n\n\nclass BinaryClassifierCNN(nn.Module):\n '''\n 分辨seq為真實或機器產生\n\n Args:\n\n Inputs: input_var\n - **input_var** (batch, seq_length)\n\n Outputs: prob\n - **prob** (batch, 1) 輸出[0~1]的值\n '''\n\n def __init__(self, num_vocab, embed_dim, num_kernel, kernel_sizes, dropout_p):\n super(BinaryClassifierCNN, self).__init__()\n self.min_len = max(kernel_sizes) # 沒直接用到,但有些training會需要這個參數\n\n self.embed = nn.Embedding(num_vocab, embed_dim)\n self.convs = nn.ModuleList(\n [nn.Conv2d(1, num_kernel, (k, embed_dim)) for k in kernel_sizes])\n self.dropout = nn.Dropout(dropout_p)\n self.conv2out = nn.Linear(len(kernel_sizes) * num_kernel, 1)\n\n def forward(self, x, input_onehot=False):\n # x = self.check_input(x) # (batch, seq_len)\n if input_onehot:\n batch_size = x[0].size(0)\n bos = self.embed(autograd.Variable(\n torch.LongTensor([Constants.BOS] * batch_size))).unsqueeze(1)\n x = [token.mm(self.embed.weight).unsqueeze(1) for token in x]\n x.insert(0, bos)\n x = torch.cat(x, dim=1)\n else:\n x = self.embed(x) # (batch, input_size, embed_dim)\n x = x.unsqueeze(1) # (batch, 1, input_size, embed_dim)\n x = [F.relu(conv(x)).squeeze(3) for conv in self.convs] # [(batch, C_out, Width), ...]*len(Ks)\n x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(batch,C_out), ...]*len(Ks)\n x = torch.cat(x, 1) # (batch, len(kernal_sizes) * kernal_num)\n x = self.dropout(x) # N x len(Ks)*Co\n logit = self.conv2out(x) # (batch, class_num)\n logit.squeeze_(1) # (batch)\n out = F.sigmoid(logit) # out介於[0-1],表示prob\n return out\n # return logit\n\n def check_input(self, x):\n if x.size(1) < self.min_len:\n x = helper.pad_seq(x.data, self.min_len, Constants.PAD)\n return x\n\n\nclass TestDiscriminator(nn.Module):\n def __init__(self, n_vocab, d_model, max_len):\n super(TestDiscriminator, self).__init__()\n\n self.max_len = max_len\n self.d_model = d_model\n\n self.vocab2embed = nn.Linear(n_vocab, d_model)\n self.block = nn.Sequential(\n ResBlock(d_model),\n ResBlock(d_model),\n ResBlock(d_model),\n ResBlock(d_model),\n ResBlock(d_model), )\n self.conv2logit = nn.Linear(max_len * d_model, 1)\n # self.conv1d = nn.Conv1d(n_vocab, d_model, 1)\n # self.pool = nn.MaxPool1d()\n\n def forward(self, x):\n x = self.vocab2embed(x) # (batch, max_len, n_vocab)\n x = x.transpose(1, 2) # (batch, n_vocab, max_len)\n x = self.block(x) # (batch, model_dim, max_len)\n x = x.view(-1, self.max_len * self.d_model) # (batch, model_dim * seq_len)\n logit = self.conv2logit(x) # (batch, 1)\n return logit\n\n\nclass ReinforceGenerator(nn.Module):\n \"\"\"\n 增加 GAN generator 所需要的其他功能,最好的方式應為直接更動原始Seq2seq class,不過這樣會影響original repository\n\n Args:\n encoder (EncoderRNN): object of EncoderRNN\n decoder (DecoderRNN): object of DecoderRNN\n decode_function (func, optional): function to generate symbols from output hidden states (default: F.log_softmax)\n\n Inputs: input_variable, input_lengths, target_variable, teacher_forcing_ratio, volatile\n - **input_variable** (list, option): list of sequences, whose length is the batch size and within which\n each sequence is a list of token IDs. This information is forwarded to the encoder.\n - **input_lengths** (list of int, optional): A list that contains the lengths of sequences\n in the mini-batch, it must be provided when using variable length RNN (default: `None`)\n - **target_variable** (list, optional): list of sequences, whose length is the batch size and within which\n each sequence is a list of token IDs. This information is forwarded to the decoder.\n - **teacher_forcing_ratio** (int, optional): The probability that teacher forcing will be used. A random number\n is drawn uniformly from 0-1 for every decoding token, and if the sample is smaller than the given value,\n teacher forcing would be used (default is 0)\n\n Outputs: decoder_outputs, decoder_hidden, ret_dict\n - **decoder_outputs** (batch): batch-length list of tensors with size (max_length, hidden_size) containing the\n outputs of the decoder.\n - **decoder_hidden** (num_layers * num_directions, batch, hidden_size): tensor containing the last hidden\n state of the decoder.\n - **ret_dict**: dictionary containing additional information as follows {*KEY_LENGTH* : list of integers\n representing lengths of output sequences, *KEY_SEQUENCE* : list of sequences, where each sequence is a list of\n predicted token IDs, *KEY_INPUT* : target outputs if provided for decoding, *KEY_ATTN_SCORE* : list of\n sequences, where each list is of attention weights }.\n \"\"\"\n\n def __init__(self, encoder, decoder, decode_function=F.log_softmax):\n super(ReinforceGenerator, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.decode_function = decode_function\n self.max_len = self.decoder.max_len\n\n def flatten_parameters(self):\n self.encoder.rnn.flatten_parameters()\n self.decoder.rnn.flatten_parameters()\n\n def forward(self, input_variable, input_lengths=None, target_variable=None,\n teacher_forcing_ratio=0):\n encoder_outputs, encoder_hidden = self.encoder(input_variable, input_lengths)\n decoder_outputs, decoder_hidden, other = self.decoder(\n inputs=target_variable, encoder_hidden=encoder_hidden, encoder_outputs=encoder_outputs,\n function=self.decode_function, teacher_forcing_ratio=teacher_forcing_ratio)\n\n step_probs = decoder_outputs\n\n # 忽略掉每個seq的length,直接合併成max_length\n # length = other['length'][0]\n # tgt_id_seq = [other['sequence'][di][0].data[0] for di in range(length)]\n # tgt_seq = [self.tgt_vocab.itos[tok] for tok in tgt_id_seq]\n\n out_seqs = torch.cat(other[DecoderRNN.KEY_SEQUENCE], dim=1) # batch x max_length\n out_lengths = other[DecoderRNN.KEY_LENGTH]\n\n return out_seqs, out_lengths, step_probs\n\n def rollout(self, input_variable, input_lengths=None, num_rollout=64):\n if input_variable.size(0) != 1:\n raise Exception('rollout 只接受 batch_size = 1, ie 一次一個 real_seq')\n\n encoder_outputs, encoder_hidden = self.encoder(input_variable, input_lengths)\n\n inputs, batch_size, max_length = self.decoder._validate_args(\n None, encoder_hidden, encoder_outputs, function=None, teacher_forcing_ratio=0)\n h_n, c_n = self.decoder._init_state(encoder_hidden)\n\n # stack tensors for batch sampling: (1, *) -> (num_rollout, *)\n inputs = torch.cat([inputs for _ in range(num_rollout)], dim=0)\n h_n = torch.cat([h_n for _ in range(num_rollout)], dim=1)\n c_n = torch.cat([c_n for _ in range(num_rollout)], dim=1)\n encoder_outputs = torch.cat([encoder_outputs for _ in range(num_rollout)], dim=0)\n decoder_hidden = (h_n, c_n)\n\n # a list of tensor_token (num_rollout x 1)\n seq_symbols, log_probs, entropies = [], [], []\n _indices = list(range(num_rollout))\n decoder_input = inputs[:, 0].unsqueeze(1)\n\n for i in range(max_length):\n decoder_output, decoder_hidden, step_attn = self.decoder.forward_step(\n decoder_input, decoder_hidden, encoder_outputs, function=F.softmax)\n # step_output = decoder_output.squeeze(1)\n # symbols = decode(di, step_output, step_attn)\n # decoder_input = symbols\n\n # decode\n probs = decoder_output.squeeze(1)\n symbol = probs.multinomial(1).data # select an action\n prob = probs[_indices, symbol.squeeze(1).tolist()] # get the probability of the action\n entropy = -(probs * probs.log()).sum(dim=1)\n\n seq_symbols.append(symbol)\n log_probs.append(prob.log())\n entropies.append(entropy)\n\n rollouts = torch.cat(seq_symbols, 1)\n return rollouts, seq_symbols, log_probs, entropies\n\n def batch_loss(self, step_probs, target_variable=None, criterion=nn.NLLLoss()):\n if target_variable is None:\n raise ValueError('需要target_variable才能計算loss')\n\n target = self._validate_variables(target=target_variable)\n\n batch_size = target_variable.size(0)\n\n loss = 0\n for step, step_prob in enumerate(step_probs):\n loss = criterion(step_prob.contiguous().view(batch_size, -1), target[:, step])\n loss /= batch_size\n yield loss\n\n def _validate_variables(self, target=None):\n if target is not None:\n # 第一欄中有? -> 去掉第一欄\n if target[:, 0].eq(self.decoder.sos_id).sum().data[0] > 0:\n target = target[:, 1:]\n\n # 若len未達到max_len -> padding\n # max_len = self.decoder.max_length\n # if target.size(1) < max_len:\n # target = helper.pad_sequence(target, max_len, seq2seq.pad_id)\n return target\n\n\nclass CycleGAN(nn.Module):\n def __init__(self, g_a, g_b, d_a, d_b):\n super(CycleGAN, self).__init__()\n self.g_a = g_a\n self.g_b = g_b\n self.d_a = d_a\n self.d_b = d_b\n\n def flatten_parameters(self):\n self.g_a.flatten_parameters()\n self.g_b.flatten_parameters()\n\n def pg_loss(self, log_probs, entropies, rewards):\n ''' 給予1個real_seq,用此執行REINFORCE algorithm\n 步驟:\n 1. 輸入 real_seq\n 2. 用seq2seq model生成數個samples\n 3. 用 discriminator 分辨每個 sample 的真實性,輸出probability,此prob為每個sample的final_reward\n 4. intermediate_reward (每個action的reward) = log(prob_of_action) * final_reward\n 5. normalization\n 6. loss = -(sum of all intermediate_rewards)\n\n Args:\n model a generator model\n dis a discriminator model\n input_var a real sequence from source\n '''\n loss = 0\n for log_prob, entropy in zip(log_probs, entropies):\n loss += -(log_prob * rewards).sum()\n\n # normalize\n num_rollout = rewards.size(0)\n loss /= len(log_probs) * num_rollout\n\n return loss\n","sub_path":"ape/model/discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":11358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"102478279","text":"import csv\nimport unicodedata\nimport re\nimport utils\n\nwith open('initial/clip_links.csv', encoding=\"utf8\") as csvfile:\n reader = csv.reader(csvfile)\n next(reader)\n with open('cleaned/clip_links_cleaned.csv', 'w', encoding=\"utf8\") as out:\n wr = csv.writer(out)\n added = set()\n clips = utils.get_clip_set()\n for row in reader:\n if row[0] in clips and row[1] in clips:\n new_row = (row[0], row[1], row[2])\n if len(row[0]) != 0 and row[0].lower() != 'null' and len(row[1]) != 0 and row[1].lower() != 'null' and new_row not in added:\n wr.writerow(new_row)\n added.add(new_row)\n","sub_path":"data/scripts/clean_clip_links.py","file_name":"clean_clip_links.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"177832698","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2012 NEC Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport logging\n\nfrom django.core.urlresolvers import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom horizon import api\nfrom horizon import exceptions\nfrom horizon import forms\nfrom .forms import CreateSubnet, UpdateSubnet\n\nLOG = logging.getLogger(__name__)\n\n\nclass CreateView(forms.ModalFormView):\n form_class = CreateSubnet\n template_name = 'admin/networks/subnets/create.html'\n success_url = 'horizon:admin:networks:detail'\n\n def get_success_url(self):\n return reverse(self.success_url,\n args=(self.kwargs['network_id'],))\n\n def get_object(self):\n if not hasattr(self, \"_object\"):\n try:\n network_id = self.kwargs[\"network_id\"]\n self._object = api.quantum.network_get(self.request,\n network_id)\n except:\n redirect = reverse('horizon:project:networks:index')\n msg = _(\"Unable to retrieve network.\")\n exceptions.handle(self.request, msg, redirect=redirect)\n return self._object\n\n def get_context_data(self, **kwargs):\n context = super(CreateView, self).get_context_data(**kwargs)\n context['network'] = self.get_object()\n return context\n\n def get_initial(self):\n network = self.get_object()\n return {\"network_id\": self.kwargs['network_id'],\n \"network_name\": network.name}\n\n\nclass UpdateView(forms.ModalFormView):\n form_class = UpdateSubnet\n template_name = 'admin/networks/subnets/update.html'\n context_object_name = 'subnet'\n success_url = 'horizon:admin:networks:detail'\n\n def get_success_url(self):\n return reverse(self.success_url,\n args=(self.kwargs['network_id'],))\n\n def _get_object(self, *args, **kwargs):\n if not hasattr(self, \"_object\"):\n subnet_id = self.kwargs['subnet_id']\n try:\n self._object = api.quantum.subnet_get(self.request, subnet_id)\n except:\n redirect = reverse(\"horizon:admin:networks:detail\",\n args=(self.kwargs['network_id'],))\n msg = _('Unable to retrieve subnet details')\n exceptions.handle(self.request, msg, redirect=redirect)\n return self._object\n\n def get_context_data(self, **kwargs):\n context = super(UpdateView, self).get_context_data(**kwargs)\n subnet = self._get_object()\n context['subnet_id'] = subnet['id']\n context['network_id'] = subnet['network_id']\n context['cidr'] = subnet['cidr']\n context['ip_version'] = {4: 'IPv4', 6: 'IPv6'}[subnet['ip_version']]\n return context\n\n def get_initial(self):\n subnet = self._get_object()\n return {'network_id': self.kwargs['network_id'],\n 'subnet_id': subnet['id'],\n 'tenant_id': subnet['tenant_id'],\n 'cidr': subnet['cidr'],\n 'ip_version': subnet['ip_version'],\n 'name': subnet['name'],\n 'gateway_ip': subnet['gateway_ip']}\n","sub_path":"stack/horizon/horizon/dashboards/admin/networks/subnets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"125143492","text":"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test deprecated functionality which will be removed in v1.8.0.\"\"\"\nimport time\nfrom unittest.mock import Mock\n\nimport numpy as np\nimport pytest\nimport torch\nfrom torch import optim\n\nfrom pytorch_lightning import Callback, Trainer\nfrom pytorch_lightning.loggers import CSVLogger, LightningLoggerBase, LoggerCollection\nfrom pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin\nfrom pytorch_lightning.plugins.training_type.ddp import DDPPlugin\nfrom pytorch_lightning.plugins.training_type.ddp2 import DDP2Plugin\nfrom pytorch_lightning.plugins.training_type.ddp_spawn import DDPSpawnPlugin\nfrom pytorch_lightning.plugins.training_type.deepspeed import DeepSpeedPlugin\nfrom pytorch_lightning.plugins.training_type.dp import DataParallelPlugin\nfrom pytorch_lightning.plugins.training_type.fully_sharded import DDPFullyShardedPlugin\nfrom pytorch_lightning.plugins.training_type.ipu import IPUPlugin\nfrom pytorch_lightning.plugins.training_type.sharded import DDPShardedPlugin\nfrom pytorch_lightning.plugins.training_type.sharded_spawn import DDPSpawnShardedPlugin\nfrom pytorch_lightning.plugins.training_type.single_device import SingleDevicePlugin\nfrom pytorch_lightning.plugins.training_type.single_tpu import SingleTPUPlugin\nfrom pytorch_lightning.plugins.training_type.tpu_spawn import TPUSpawnPlugin\nfrom pytorch_lightning.profiler import AbstractProfiler, AdvancedProfiler, SimpleProfiler\nfrom pytorch_lightning.trainer.configuration_validator import _check_datamodule_checkpoint_hooks\nfrom pytorch_lightning.trainer.states import RunningStage\nfrom pytorch_lightning.utilities.apply_func import move_data_to_device\nfrom pytorch_lightning.utilities.enums import DeviceType, DistributedType\nfrom pytorch_lightning.utilities.imports import _TORCHTEXT_LEGACY\nfrom pytorch_lightning.utilities.rank_zero import rank_zero_only, rank_zero_warn\nfrom tests.deprecated_api import no_deprecated_call\nfrom tests.helpers.boring_model import BoringDataModule, BoringModel\nfrom tests.helpers.runif import RunIf\nfrom tests.helpers.torchtext_utils import get_dummy_torchtext_data_iterator\n\n\ndef test_v1_8_0_deprecated_distributed_type_enum():\n\n with pytest.deprecated_call(match=\"has been deprecated in v1.6 and will be removed in v1.8.\"):\n _ = DistributedType.DDP\n\n\ndef test_v1_8_0_deprecated_device_type_enum():\n\n with pytest.deprecated_call(match=\"has been deprecated in v1.6 and will be removed in v1.8.\"):\n _ = DeviceType.CPU\n\n\n@pytest.mark.skipif(not _TORCHTEXT_LEGACY, reason=\"torchtext.legacy is deprecated.\")\ndef test_v1_8_0_deprecated_torchtext_batch():\n\n with pytest.deprecated_call(match=\"is deprecated and Lightning will remove support for it in v1.8\"):\n data_iterator, _ = get_dummy_torchtext_data_iterator(num_samples=3, batch_size=3)\n batch = next(iter(data_iterator))\n _ = move_data_to_device(batch=batch, device=torch.device(\"cpu\"))\n\n\ndef test_v1_8_0_on_init_start_end(tmpdir):\n class TestCallback(Callback):\n def on_init_start(self, trainer):\n print(\"Starting to init trainer!\")\n\n def on_init_end(self, trainer):\n print(\"Trainer is init now\")\n\n model = BoringModel()\n\n trainer = Trainer(\n callbacks=[TestCallback()],\n max_epochs=1,\n fast_dev_run=True,\n enable_progress_bar=False,\n logger=False,\n default_root_dir=tmpdir,\n )\n with pytest.deprecated_call(\n match=\"The `on_init_start` callback hook was deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.fit(model)\n with pytest.deprecated_call(\n match=\"The `on_init_end` callback hook was deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.validate(model)\n\n\ndef test_v1_8_0_deprecated_call_hook():\n trainer = Trainer(\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n enable_progress_bar=False,\n logger=False,\n )\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8.\"):\n trainer.call_hook(\"test_hook\")\n\n\ndef test_v1_8_0_deprecated_warning_positional_category():\n with pytest.deprecated_call(match=r\"use `category=FutureWarning.\"):\n rank_zero_warn(\"foo\", FutureWarning)\n\n\ndef test_v1_8_0_deprecated_on_hpc_hooks(tmpdir):\n class TestModelSave(BoringModel):\n def on_hpc_save(self):\n print(\"on_hpc_save override\")\n\n class TestModelLoad(BoringModel):\n def on_hpc_load(self):\n print(\"on_hpc_load override\")\n\n save_model = TestModelSave()\n load_model = TestModelLoad()\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, fast_dev_run=True)\n\n with pytest.deprecated_call(\n match=r\"Method `LightningModule.on_hpc_save` is deprecated in v1.6 and will be removed in v1.8.\"\n ):\n trainer.fit(save_model)\n with pytest.deprecated_call(\n match=r\"Method `LightningModule.on_hpc_load` is deprecated in v1.6 and will be removed in v1.8.\"\n ):\n trainer.fit(load_model)\n\n\ndef test_v1_8_0_deprecated_run_stage():\n trainer = Trainer()\n trainer._run_stage = Mock()\n with pytest.deprecated_call(match=\"`Trainer.run_stage` is deprecated in v1.6 and will be removed in v1.8.\"):\n trainer.run_stage()\n\n\ndef test_v1_8_0_trainer_verbose_evaluate():\n trainer = Trainer()\n with pytest.deprecated_call(match=\"verbose_evaluate` property has been deprecated and will be removed in v1.8\"):\n assert trainer.verbose_evaluate\n\n with pytest.deprecated_call(match=\"verbose_evaluate` property has been deprecated and will be removed in v1.8\"):\n trainer.verbose_evaluate = False\n\n\n@pytest.mark.parametrize(\"fn_prefix\", [\"validated\", \"tested\", \"predicted\"])\ndef test_v1_8_0_trainer_ckpt_path_attributes(fn_prefix: str):\n test_attr = f\"{fn_prefix}_ckpt_path\"\n trainer = Trainer()\n with pytest.deprecated_call(match=f\"{test_attr}` attribute was deprecated in v1.6 and will be removed in v1.8\"):\n _ = getattr(trainer, test_attr)\n with pytest.deprecated_call(match=f\"{test_attr}` attribute was deprecated in v1.6 and will be removed in v1.8\"):\n setattr(trainer, test_attr, \"v\")\n\n\ndef test_v1_8_0_deprecated_trainer_should_rank_save_checkpoint(tmpdir):\n trainer = Trainer()\n with pytest.deprecated_call(\n match=r\"`Trainer.should_rank_save_checkpoint` is deprecated in v1.6 and will be removed in v1.8.\"\n ):\n _ = trainer.should_rank_save_checkpoint\n\n\ndef test_v1_8_0_deprecated_lr_scheduler():\n trainer = Trainer()\n with pytest.deprecated_call(match=r\"`Trainer.lr_schedulers` is deprecated in v1.6 and will be removed in v1.8.\"):\n assert trainer.lr_schedulers == []\n\n\ndef test_v1_8_0_trainer_optimizers_mixin():\n trainer = Trainer()\n model = BoringModel()\n trainer.strategy.connect(model)\n trainer.lightning_module.trainer = trainer\n\n with pytest.deprecated_call(\n match=r\"`TrainerOptimizersMixin.init_optimizers` was deprecated in v1.6 and will be removed in v1.8.\"\n ):\n trainer.init_optimizers(model)\n\n with pytest.deprecated_call(\n match=r\"`TrainerOptimizersMixin.convert_to_lightning_optimizers` was deprecated in v1.6 and will be removed in \"\n \"v1.8.\"\n ):\n trainer.convert_to_lightning_optimizers()\n\n\ndef test_v1_8_0_deprecate_trainer_callback_hook_mixin():\n methods_with_self = [\n \"on_before_accelerator_backend_setup\",\n \"on_configure_sharded_model\",\n \"on_init_start\",\n \"on_init_end\",\n \"on_fit_start\",\n \"on_fit_end\",\n \"on_sanity_check_start\",\n \"on_sanity_check_end\",\n \"on_train_epoch_start\",\n \"on_train_epoch_end\",\n \"on_validation_epoch_start\",\n \"on_validation_epoch_end\",\n \"on_test_epoch_start\",\n \"on_test_epoch_end\",\n \"on_predict_epoch_start\",\n \"on_epoch_start\",\n \"on_epoch_end\",\n \"on_train_start\",\n \"on_train_end\",\n \"on_pretrain_routine_start\",\n \"on_pretrain_routine_end\",\n \"on_batch_start\",\n \"on_batch_end\",\n \"on_validation_start\",\n \"on_validation_end\",\n \"on_test_start\",\n \"on_test_end\",\n \"on_predict_start\",\n \"on_predict_end\",\n \"on_after_backward\",\n ]\n methods_with_stage = [\n \"setup\",\n \"teardown\",\n ]\n methods_with_batch_batch_idx_dataloader_idx = [\n \"on_train_batch_start\",\n \"on_validation_batch_start\",\n \"on_test_batch_start\",\n \"on_predict_batch_start\",\n ]\n methods_with_outputs_batch_batch_idx_dataloader_idx = [\n \"on_train_batch_end\",\n \"on_validation_batch_end\",\n \"on_test_batch_end\",\n \"on_predict_batch_end\",\n ]\n methods_with_checkpoint = [\"on_save_checkpoint\", \"on_load_checkpoint\"]\n trainer = Trainer(\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n enable_progress_bar=False,\n logger=False,\n )\n model = BoringModel()\n # need to attach model to trainer for testing of `on_pretrain_routine_start`\n trainer.strategy.connect(model)\n for method_name in methods_with_self:\n fn = getattr(trainer, method_name, None)\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n fn()\n for method_name in methods_with_stage:\n fn = getattr(trainer, method_name)\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n fn(stage=\"test\")\n for method_name in methods_with_batch_batch_idx_dataloader_idx:\n fn = getattr(trainer, method_name)\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n fn(batch={}, batch_idx=0, dataloader_idx=0)\n for method_name in methods_with_outputs_batch_batch_idx_dataloader_idx:\n fn = getattr(trainer, method_name)\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n fn(outputs=torch.tensor([[1.0, -1.0], [1.0, -1.0]]), batch={}, batch_idx=0, dataloader_idx=0)\n for method_name in methods_with_checkpoint:\n fn = getattr(trainer, method_name)\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n fn(checkpoint={})\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n trainer.on_predict_epoch_end(outputs=torch.tensor([[1.0, -1.0], [1.0, -1.0]]))\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n trainer.on_exception(exception=Exception)\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n trainer.on_before_backward(loss=torch.tensor([[1.0, -1.0], [1.0, -1.0]]))\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n trainer.on_before_optimizer_step(\n optimizer=optim.SGD(model.parameters(), lr=0.01, momentum=0.9), optimizer_idx=0\n )\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n trainer.on_before_zero_grad(optimizer=optim.SGD(model.parameters(), lr=0.01, momentum=0.9))\n\n\ndef test_v1_8_0_deprecated_training_type_plugin_property():\n trainer = Trainer()\n with pytest.deprecated_call(match=\"in v1.6 and will be removed in v1.8\"):\n trainer.training_type_plugin\n\n\ndef test_v1_8_0_deprecate_trainer_data_loading_mixin():\n trainer = Trainer(max_epochs=1)\n model = BoringModel()\n dm = BoringDataModule()\n trainer.fit(model, datamodule=dm)\n\n with pytest.deprecated_call(\n match=r\"`TrainerDataLoadingMixin.prepare_dataloader` was deprecated in v1.6 and will be removed in v1.8.\",\n ):\n trainer.prepare_dataloader(dataloader=model.train_dataloader, shuffle=False)\n with pytest.deprecated_call(\n match=r\"`TrainerDataLoadingMixin.request_dataloader` was deprecated in v1.6 and will be removed in v1.8.\",\n ):\n trainer.request_dataloader(stage=RunningStage.TRAINING)\n\n\ndef test_v_1_8_0_deprecated_device_stats_monitor_prefix_metric_keys():\n from pytorch_lightning.callbacks.device_stats_monitor import prefix_metric_keys\n\n with pytest.deprecated_call(match=\"in v1.6 and will be removed in v1.8\"):\n prefix_metric_keys({\"foo\": 1.0}, \"bar\")\n\n\n@pytest.mark.parametrize(\n \"cls\",\n [\n DDPPlugin,\n DDP2Plugin,\n DDPSpawnPlugin,\n pytest.param(DeepSpeedPlugin, marks=RunIf(deepspeed=True)),\n DataParallelPlugin,\n DDPFullyShardedPlugin,\n pytest.param(IPUPlugin, marks=RunIf(ipu=True)),\n DDPShardedPlugin,\n DDPSpawnShardedPlugin,\n TPUSpawnPlugin,\n ],\n)\ndef test_v1_8_0_deprecated_training_type_plugin_classes(cls):\n old_name = cls.__name__\n new_name = old_name.replace(\"Plugin\", \"Strategy\")\n with pytest.deprecated_call(\n match=f\"{old_name}` is deprecated in v1.6 and will be removed in v1.8. Use .*{new_name}` instead.\"\n ):\n cls()\n\n\ndef test_v1_8_0_deprecated_single_device_plugin_class():\n with pytest.deprecated_call(\n match=(\n \"SingleDevicePlugin` is deprecated in v1.6 and will be removed in v1.8.\"\n \" Use `.*SingleDeviceStrategy` instead.\"\n )\n ):\n SingleDevicePlugin(\"cpu\")\n\n\n@RunIf(tpu=True)\ndef test_v1_8_0_deprecated_single_tpu_plugin_class():\n with pytest.deprecated_call(\n match=(\n \"SingleTPUPlugin` is deprecated in v1.6 and will be removed in v1.8.\" \" Use `.*SingleTPUStrategy` instead.\"\n )\n ):\n SingleTPUPlugin(0)\n\n\ndef test_v1_8_0_deprecated_lightning_optimizers():\n trainer = Trainer()\n with pytest.deprecated_call(\n match=\"Trainer.lightning_optimizers` is deprecated in v1.6 and will be removed in v1.8\"\n ):\n assert trainer.lightning_optimizers == {}\n\n\ndef test_v1_8_0_remove_on_batch_start_end(tmpdir):\n class TestCallback(Callback):\n def on_batch_start(self, *args, **kwargs):\n print(\"on_batch_start\")\n\n model = BoringModel()\n trainer = Trainer(\n callbacks=[TestCallback()],\n fast_dev_run=True,\n default_root_dir=tmpdir,\n )\n with pytest.deprecated_call(\n match=\"The `Callback.on_batch_start` hook was deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.fit(model)\n\n class TestCallback(Callback):\n def on_batch_end(self, *args, **kwargs):\n print(\"on_batch_end\")\n\n trainer = Trainer(\n callbacks=[TestCallback()],\n fast_dev_run=True,\n default_root_dir=tmpdir,\n )\n with pytest.deprecated_call(\n match=\"The `Callback.on_batch_end` hook was deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.fit(model)\n\n\ndef test_v1_8_0_on_configure_sharded_model(tmpdir):\n class TestCallback(Callback):\n def on_configure_sharded_model(self, trainer, model):\n print(\"Configuring sharded model\")\n\n model = BoringModel()\n\n trainer = Trainer(\n callbacks=[TestCallback()],\n max_epochs=1,\n fast_dev_run=True,\n enable_progress_bar=False,\n logger=False,\n default_root_dir=tmpdir,\n )\n with pytest.deprecated_call(\n match=\"The `on_configure_sharded_model` callback hook was deprecated in v1.6 and will be removed in v1.8.\"\n ):\n trainer.fit(model)\n\n\ndef test_v1_8_0_remove_on_epoch_start_end_lightning_module(tmpdir):\n class CustomModel(BoringModel):\n def on_epoch_start(self, *args, **kwargs):\n print(\"on_epoch_start\")\n\n model = CustomModel()\n trainer = Trainer(\n fast_dev_run=True,\n default_root_dir=tmpdir,\n )\n with pytest.deprecated_call(\n match=\"The `LightningModule.on_epoch_start` hook was deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.fit(model)\n\n class CustomModel(BoringModel):\n def on_epoch_end(self, *args, **kwargs):\n print(\"on_epoch_end\")\n\n trainer = Trainer(\n fast_dev_run=True,\n default_root_dir=tmpdir,\n )\n\n model = CustomModel()\n with pytest.deprecated_call(\n match=\"The `LightningModule.on_epoch_end` hook was deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.fit(model)\n\n\ndef test_v1_8_0_remove_on_pretrain_routine_start_end_lightning_module(tmpdir):\n class CustomModel(BoringModel):\n def on_pretrain_routine_start(self, *args, **kwargs):\n print(\"foo\")\n\n model = CustomModel()\n trainer = Trainer(\n fast_dev_run=True,\n default_root_dir=tmpdir,\n )\n with pytest.deprecated_call(\n match=\"The `LightningModule.on_pretrain_routine_start` hook was deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.fit(model)\n\n class CustomModel(BoringModel):\n def on_pretrain_routine_end(self, *args, **kwargs):\n print(\"foo\")\n\n trainer = Trainer(\n fast_dev_run=True,\n default_root_dir=tmpdir,\n )\n\n model = CustomModel()\n with pytest.deprecated_call(\n match=\"The `LightningModule.on_pretrain_routine_end` hook was deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.fit(model)\n\n\ndef test_v1_8_0_rank_zero_imports():\n\n import warnings\n\n from pytorch_lightning.utilities.distributed import rank_zero_debug, rank_zero_info\n from pytorch_lightning.utilities.warnings import LightningDeprecationWarning, rank_zero_deprecation, rank_zero_warn\n\n with pytest.deprecated_call(\n match=\"pytorch_lightning.utilities.distributed.rank_zero_debug has been deprecated in v1.6\"\n \" and will be removed in v1.8.\"\n ):\n rank_zero_debug(\"foo\")\n with pytest.deprecated_call(\n match=\"pytorch_lightning.utilities.distributed.rank_zero_info has been deprecated in v1.6\"\n \" and will be removed in v1.8.\"\n ):\n rank_zero_info(\"foo\")\n with pytest.deprecated_call(\n match=\"pytorch_lightning.utilities.warnings.rank_zero_warn has been deprecated in v1.6\"\n \" and will be removed in v1.8.\"\n ):\n rank_zero_warn(\"foo\")\n with pytest.deprecated_call(\n match=\"pytorch_lightning.utilities.warnings.rank_zero_deprecation has been deprecated in v1.6\"\n \" and will be removed in v1.8.\"\n ):\n rank_zero_deprecation(\"foo\")\n with pytest.deprecated_call(\n match=\"pytorch_lightning.utilities.warnings.LightningDeprecationWarning has been deprecated in v1.6\"\n \" and will be removed in v1.8.\"\n ):\n warnings.warn(\"foo\", LightningDeprecationWarning, stacklevel=5)\n\n\ndef test_v1_8_0_on_before_accelerator_backend_setup(tmpdir):\n class TestCallback(Callback):\n def on_before_accelerator_backend_setup(self, *args, **kwargs):\n print(\"on_before_accelerator_backend called.\")\n\n model = BoringModel()\n\n trainer = Trainer(\n callbacks=[TestCallback()],\n max_epochs=1,\n fast_dev_run=True,\n enable_progress_bar=False,\n logger=False,\n default_root_dir=tmpdir,\n )\n with pytest.deprecated_call(\n match=\"The `on_before_accelerator_backend_setup` callback hook was deprecated in v1.6\"\n \" and will be removed in v1.8\"\n ):\n trainer.fit(model)\n\n\ndef test_v1_8_0_logger_agg_parameters():\n class CustomLogger(LightningLoggerBase):\n @rank_zero_only\n def log_hyperparams(self, params):\n pass\n\n @rank_zero_only\n def log_metrics(self, metrics, step):\n pass\n\n @property\n def name(self):\n pass\n\n @property\n def version(self):\n pass\n\n with pytest.deprecated_call(\n match=\"The `agg_key_funcs` parameter for `LightningLoggerBase` was deprecated in v1.6\"\n \" and will be removed in v1.8.\"\n ):\n CustomLogger(agg_key_funcs={\"mean\", np.mean})\n\n with pytest.deprecated_call(\n match=\"The `agg_default_func` parameter for `LightningLoggerBase` was deprecated in v1.6\"\n \" and will be removed in v1.8.\"\n ):\n CustomLogger(agg_default_func=np.mean)\n\n # Should have no deprecation warning\n logger = CustomLogger()\n\n with pytest.deprecated_call(\n match=\"`LightningLoggerBase.update_agg_funcs` was deprecated in v1.6 and will be removed in v1.8.\"\n ):\n logger.update_agg_funcs()\n\n\ndef test_v1_8_0_deprecated_agg_and_log_metrics_override(tmpdir):\n class AggregationOverrideLogger(CSVLogger):\n @rank_zero_only\n def agg_and_log_metrics(self, metrics, step):\n self.log_metrics(metrics=metrics, step=step)\n\n logger = AggregationOverrideLogger(tmpdir)\n logger2 = CSVLogger(tmpdir)\n logger3 = CSVLogger(tmpdir)\n\n # Test single loggers\n with pytest.deprecated_call(\n match=\"`LightningLoggerBase.agg_and_log_metrics` is deprecated in v1.6 and will be removed\"\n \" in v1.8. `Trainer` will directly call `LightningLoggerBase.log_metrics` so custom\"\n \" loggers should not implement `LightningLoggerBase.agg_and_log_metrics`.\"\n ):\n Trainer(logger=logger)\n # Should have no deprecation warning\n Trainer(logger=logger2)\n\n # Test multiple loggers\n with pytest.deprecated_call(\n match=\"`LightningLoggerBase.agg_and_log_metrics` is deprecated in v1.6 and will be removed\"\n \" in v1.8. `Trainer` will directly call `LightningLoggerBase.log_metrics` so custom\"\n \" loggers should not implement `LightningLoggerBase.agg_and_log_metrics`.\"\n ):\n Trainer(logger=[logger, logger3])\n # Should have no deprecation warning\n Trainer(logger=[logger2, logger3])\n\n\ndef test_v1_8_0_callback_on_pretrain_routine_start_end(tmpdir):\n class TestCallback(Callback):\n def on_pretrain_routine_start(self, trainer, pl_module):\n print(\"on_pretrain_routine_start called.\")\n\n model = BoringModel()\n\n trainer = Trainer(\n callbacks=[TestCallback()],\n fast_dev_run=True,\n enable_progress_bar=False,\n default_root_dir=tmpdir,\n )\n with pytest.deprecated_call(\n match=\"The `Callback.on_pretrain_routine_start` hook has been deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.fit(model)\n\n class TestCallback(Callback):\n def on_pretrain_routine_end(self, trainer, pl_module):\n print(\"on_pretrain_routine_end called.\")\n\n model = BoringModel()\n\n trainer = Trainer(\n callbacks=[TestCallback()],\n fast_dev_run=True,\n enable_progress_bar=False,\n default_root_dir=tmpdir,\n )\n with pytest.deprecated_call(\n match=\"The `Callback.on_pretrain_routine_end` hook has been deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.fit(model)\n\n\ndef test_v1_8_0_weights_save_path(tmpdir):\n with pytest.deprecated_call(match=r\"Setting `Trainer\\(weights_save_path=\\)` has been deprecated in v1.6\"):\n trainer = Trainer(weights_save_path=tmpdir)\n with pytest.deprecated_call(match=r\"`Trainer.weights_save_path` has been deprecated in v1.6\"):\n _ = trainer.weights_save_path\n\n\ndef test_deprecated_epoch_outputs_format(tmpdir):\n class DeprecationModel(BoringModel):\n def __init__(self):\n super().__init__()\n self.truncated_bptt_steps = 1\n\n def training_step(self, batch, batch_idx, optimizer_idx, hiddens):\n output = super().training_step(batch, batch_idx)\n output[\"hiddens\"] = hiddens\n return output\n\n def tbptt_split_batch(self, batch, split_size):\n return [batch, batch]\n\n def training_epoch_end(self, outputs):\n ...\n\n def on_train_batch_end(self, outputs, batch, batch_idx) -> None:\n ...\n\n def configure_optimizers(self):\n return [torch.optim.Adam(self.parameters()), torch.optim.Adam(self.parameters())]\n\n trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)\n model = DeprecationModel()\n batch_match = r\"on_train_batch_end.*will change in version v1.8 to \\(tbptt_steps, n_optimizers\\)\"\n with pytest.deprecated_call(match=batch_match):\n trainer.fit(model)\n\n class DeprecationModel2(DeprecationModel):\n def on_train_batch_end(self, *args, new_format=True):\n ...\n\n trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)\n model = DeprecationModel()\n epoch_match = r\"training_epoch_end.*will change in version v1.8 to \\(n_batches, tbptt_steps, n_optimizers\\)\"\n with pytest.deprecated_call(match=epoch_match):\n trainer.fit(model)\n\n class NoDeprecationModel(DeprecationModel2):\n def training_epoch_end(self, outputs, new_format=True):\n ...\n\n trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)\n model = NoDeprecationModel()\n with no_deprecated_call(match=\"will change in version v1.8.*new_format=True\"):\n trainer.fit(model)\n\n\n@pytest.mark.flaky(reruns=3)\n@pytest.mark.parametrize([\"action\", \"expected\"], [(\"a\", [3, 1]), (\"b\", [2]), (\"c\", [1])])\ndef test_simple_profiler_iterable_durations(tmpdir, action: str, expected: list):\n \"\"\"Ensure the reported durations are reasonably accurate.\"\"\"\n\n def _sleep_generator(durations):\n \"\"\"the profile_iterable method needs an iterable in which we can ensure that we're properly timing how long\n it takes to call __next__\"\"\"\n for duration in durations:\n time.sleep(duration)\n yield duration\n\n def _get_python_cprofile_total_duration(profile):\n return sum(x.inlinetime for x in profile.getstats())\n\n simple_profiler = SimpleProfiler()\n iterable = _sleep_generator(expected)\n\n with pytest.deprecated_call(\n match=\"`BaseProfiler.profile_iterable` is deprecated in v1.6 and will be removed in v1.8.\"\n ):\n for _ in simple_profiler.profile_iterable(iterable, action):\n pass\n\n # we exclude the last item in the recorded durations since that's when StopIteration is raised\n np.testing.assert_allclose(simple_profiler.recorded_durations[action][:-1], expected, rtol=0.2)\n\n advanced_profiler = AdvancedProfiler(dirpath=tmpdir, filename=\"profiler\")\n\n iterable = _sleep_generator(expected)\n\n with pytest.deprecated_call(\n match=\"`BaseProfiler.profile_iterable` is deprecated in v1.6 and will be removed in v1.8.\"\n ):\n for _ in advanced_profiler.profile_iterable(iterable, action):\n pass\n\n recorded_total_duration = _get_python_cprofile_total_duration(advanced_profiler.profiled_actions[action])\n expected_total_duration = np.sum(expected)\n np.testing.assert_allclose(recorded_total_duration, expected_total_duration, rtol=0.2)\n\n\ndef test_v1_8_0_logger_collection(tmpdir):\n logger1 = CSVLogger(tmpdir)\n logger2 = CSVLogger(tmpdir)\n\n trainer1 = Trainer(logger=logger1)\n trainer2 = Trainer(logger=[logger1, logger2])\n\n # Should have no deprecation warning\n trainer1.logger\n trainer1.loggers\n trainer2.loggers\n trainer2.logger\n\n with pytest.deprecated_call(match=\"`LoggerCollection` is deprecated in v1.6\"):\n LoggerCollection([logger1, logger2])\n\n\ndef test_v1_8_0_precision_plugin_checkpoint_hooks(tmpdir):\n class PrecisionPluginSaveHook(PrecisionPlugin):\n def on_save_checkpoint(self, checkpoint):\n print(\"override on_save_checkpoint\")\n\n class PrecisionPluginLoadHook(PrecisionPlugin):\n def on_load_checkpoint(self, checkpoint):\n print(\"override on_load_checkpoint\")\n\n model = BoringModel()\n\n precplugin_save = PrecisionPluginSaveHook()\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, plugins=[precplugin_save])\n with pytest.deprecated_call(\n match=\"`PrecisionPlugin.on_save_checkpoint` was deprecated in\"\n \" v1.6 and will be removed in v1.8. Use `state_dict` instead.\"\n ):\n trainer.fit(model)\n\n precplugin_load = PrecisionPluginLoadHook()\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, plugins=[precplugin_load])\n with pytest.deprecated_call(\n match=\"`PrecisionPlugin.on_load_checkpoint` was deprecated in\"\n \" v1.6 and will be removed in v1.8. Use `load_state_dict` instead.\"\n ):\n trainer.fit(model)\n\n\ndef test_v1_8_0_abstract_profiler():\n assert \"`AbstractProfiler` was deprecated in v1.6\" in AbstractProfiler.__doc__\n\n\ndef test_v1_8_0_datamodule_checkpointhooks():\n class CustomBoringDataModuleSave(BoringDataModule):\n def on_save_checkpoint(self, checkpoint):\n print(\"override on_save_checkpoint\")\n\n class CustomBoringDataModuleLoad(BoringDataModule):\n def on_load_checkpoint(self, checkpoint):\n print(\"override on_load_checkpoint\")\n\n trainer = Mock()\n\n trainer.datamodule = CustomBoringDataModuleSave()\n with pytest.deprecated_call(\n match=\"`LightningDataModule.on_save_checkpoint` was deprecated in\"\n \" v1.6 and will be removed in v1.8. Use `state_dict` instead.\"\n ):\n _check_datamodule_checkpoint_hooks(trainer)\n\n trainer.datamodule = CustomBoringDataModuleLoad()\n with pytest.deprecated_call(\n match=\"`LightningDataModule.on_load_checkpoint` was deprecated in\"\n \" v1.6 and will be removed in v1.8. Use `load_state_dict` instead.\"\n ):\n _check_datamodule_checkpoint_hooks(trainer)\n","sub_path":"tests/deprecated_api/test_remove_1-8.py","file_name":"test_remove_1-8.py","file_ext":"py","file_size_in_byte":29931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"349311818","text":"class BinarySearchTree:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n # adds the input value to the binary search tree\n def insert(self, value):\n # if value is less we go left\n # if there is no left child, we can park this node here\n if value < self.value:\n if not self.left:\n self.left = BinarySearchTree(value)\n\n # recurse on the left child\n else:\n self.left.insert(value)\n elif value >= self.value:\n if not self.right:\n self.right = BinarySearchTree(value)\n else:\n self.right.insert(value)\n\n # searches tree for the input value, returning a boolean if it exists or not\n def contains(self, target):\n # use a while loop to check self.values against target - if it finds a match return True\n while True:\n # check if the target is equal to the current node\n if target == self.value:\n return True\n # check if target is less than the node value and that a left node exists\n elif target < self.value and self.left:\n # set self to the new node down the tree\n self = self.left\n\n # if target is > self.value and a right node exists\n elif target > self.value and self.right:\n # set self to the new node down the treeiO(O)\n self = self.right\n\n # if no match is found return False\n else:\n return False\n # returns the maximum value in the binary search tree\n\n def get_max(self):\n # traverse down the right side of the tree until self.right is None with a while loop\n while True:\n if self.right is not None:\n self = self.right\n\n # Once self.right is None return the current nodes value\n else:\n break\n return self.value\n\n # performs a traversal of every node, executing the cb on every tree node value\n def for_each(self, cb):\n # use recursion to make this easy\n # invoke the callback on every node\n cb(self.value)\n\n # check if there is a left\n if self.left:\n # if so call for_each with the callback on the left nodes\n self.left.for_each(cb)\n\n # check if there is a right\n if self.right:\n # if so call for_each with the callback on the right nodes\n self.right.for_each(cb)\n","sub_path":"binary_search_tree/binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"105843383","text":"\"\"\"\nModule with ftp related helper functions\n\"\"\"\n\nimport ftplib\nimport os\nfrom urllib.parse import ParseResult\n\n\nclass FtpSession:\n \"\"\" Represent an FTP session \"\"\"\n\n def __init__(self, host: ParseResult):\n \"\"\"\n Instantiate the ftp session using given host\n :param host: host details\n \"\"\"\n self.files_cache = {}\n self.ftp = ftplib.FTP()\n\n port = 21 if host.port is None else host.port\n self.ftp.connect(host.hostname, port, 5)\n self.ftp.login(host.username, host.password)\n\n if host.path is not None:\n self.ftp.cwd(host.path)\n\n def __del__(self):\n \"\"\"\n Close the ftp connection upon destruction\n \"\"\"\n self.ftp.quit()\n\n def directory_exists(self, haystack: str, needle: str) -> bool:\n \"\"\"\n Determinate if given directory needle exist in haystack parent directory\n :param haystack: directory to find\n :param needle: directory to look into\n :return: true if needle is find in haystack\n \"\"\"\n\n if os.path.join(haystack, needle) in self.files_cache:\n return True\n\n for file, facts in self.ftp.mlsd(haystack, facts=['type']):\n if file == needle and facts['type'] == 'dir':\n self.files_cache[os.path.join(haystack, needle)] = True\n return True\n return False\n\n def make_directories(self, path: str):\n \"\"\"\n Make any missing directories that well be needed to allow storage on given path\n :param path: the path\n \"\"\"\n current_dir = \"\"\n\n for directory in os.path.split(path)[0].split(os.sep):\n next_dir = os.path.join(current_dir, directory)\n if not self.directory_exists(current_dir, directory):\n self.ftp.mkd(next_dir)\n current_dir = next_dir\n\n def delete(self, file):\n \"\"\"\n Delete given file\n :param file: file path\n \"\"\"\n self.ftp.delete(file)\n\n def upload(self, path: str, content):\n \"\"\"\n Upload given content to given path\n :param path: remote path where to store the content\n :param content: content to be uploaded\n \"\"\"\n self.ftp.storbinary(\"STOR {}\".format(path), content)\n","sub_path":"ftpsync/ftp.py","file_name":"ftp.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"391272741","text":"from kivy.graphics import Color, Rectangle\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.button import Button\nfrom kivy.core.audio import SoundLoader\nimport note\nimport wave\n\nBLOCK_LEN = 15\n\n\nclass ChildWidget(FloatLayout):\n\n def __init__(self, **kwargs):\n # make sure we aren't overriding any important functionality\n super(ChildWidget, self).__init__(**kwargs)\n with self.canvas:\n Color(0, 0, 0, 1.0)\n self.rect = Rectangle(pos=self.pos, size=self.size)\n self.bind(pos=self.update_rect)\n self.bind(size=self.update_rect)\n self.audio = kwargs.get(\"audio\")\n self.audio_len = self.audio.getnframes()/self.audio.getframerate()\n # Number of BLOCK_LEN sec blocks\n if self.audio_len <= BLOCK_LEN:\n self.blocks = 0\n else:\n self.blocks = int(round(self.audio_len/(BLOCK_LEN)))\n if self.audio_len % BLOCK_LEN > 0:\n self.blocks += 1\n\n self.curr_block = 0\n _, _, _, p = zip(*(kwargs.get(\"f\")))\n self.max_freq = max(p)\n self.init_notes(kwargs.get(\"f\"))\n self.show_notes()\n\n init_t = Button(\n id=\"start\",\n background_normal='',\n background_color=[0, 0, 0, 1],\n text=self.setTime(0),\n size_hint=(0.05, .01),\n pos_hint={'center_x': 0.05, 'center_y': 0.06}\n )\n final_t = Button(\n id=\"end\",\n background_normal='',\n background_color=[0, 0, 0, 1],\n text=self.setTime(BLOCK_LEN),\n size_hint=(0.05, .01),\n pos_hint={'center_x': 0.95, 'center_y': 0.06}\n )\n self.add_widget(init_t)\n self.add_widget(final_t)\n\n \"\"\"\n [update_rect] updates the positioning of the rectangle that gives this\n widget its background color when the widget size has been changed.\n \"\"\"\n\n def update_rect(self, *args):\n self.rect.pos = self.pos\n self.rect.size = self.size\n\n def map_time(self, t):\n factor = int(t / BLOCK_LEN)\n adj = t - (BLOCK_LEN * factor)\n return (adj/BLOCK_LEN)\n\n \"\"\"\n [map_coord] converts information from analysis.py into the appropriate\n Note information\n \"\"\"\n\n def map_coord(self, t_start, t_end, p):\n x_l = self.map_time(t_start)\n x_len = self.map_time(t_end)\n y = round(p/self.get_maxf(), 4)\n y = y - 0.2 # top margin\n return x_l, x_len, y\n\n \"\"\"\n [init_notes] initalizes a Note for each detected note from analysis.py\n and adds it to the parent widget\n \"\"\"\n\n def init_notes(self, f):\n i = 0\n pad = 0.1\n for n in f:\n x_l, x_len, y = self.map_coord(n[0], n[1], n[3])\n x_l = x_l + i\n x_len = x_len/10\n n = note.Note(\n start=n[0],\n end=n[0]+n[1],\n background_normal='',\n background_color=n[2],\n text='',\n size_hint=(x_len, .1),\n pos_hint={'x': x_l, 'y': y})\n\n n.set_color(0)\n self.add_widget(n, index=0)\n i = x_len\n\n \"\"\"\n [show_notes] shows all the notes within the time span of [self.block]\n \"\"\"\n\n def show_notes(self):\n min_time = self.get_curr_block() * BLOCK_LEN\n max_time = (self.get_curr_block() + 1) * BLOCK_LEN\n for widget in self.walk(restrict=True):\n if (type(widget)is note.Note):\n if min_time <= widget.get_end()and widget.get_end() <= max_time and min_time <= widget.get_start()and widget.get_start() <= max_time:\n widget.set_color(widget.get_alpha())\n else:\n widget.set_color(0)\n\n elif (type(widget) is Button):\n if widget.id == \"start\":\n widget.text = self.setTime(min_time)\n elif widget.id == \"end\":\n widget.text = self.setTime(max_time)\n\n def get_curr_block(self):\n return self.curr_block\n\n def get_max_block(self):\n return self.blocks\n\n def get_maxf(self):\n return self.max_freq\n\n def get_audio_len(self):\n return self.audio_len\n\n \"\"\" if d = 1 then left button was pressed, if d = 0 then right button was pressed \"\"\"\n\n def set_block(self, d):\n if d and self.get_curr_block() > 0:\n self.curr_block -= 1\n self.show_notes()\n elif not d and self.get_curr_block() < self.get_max_block():\n self.curr_block += 1\n self.show_notes()\n\n \"\"\" Convert time in seconds to minutes in the format of x:yz\"\"\"\n\n def setTime(self, t):\n if t < 10:\n return \"0:0\"+str(t)\n elif t < 60:\n return \"0:\"+str(t)\n else:\n m = t/60\n s = 60 % t\n if s == 0:\n return str(m)+\":00\"\n elif s < 10:\n return str(m) + \":0\"+str(s)\n else:\n return str(m)+\":\"+str(s)\n","sub_path":"demo/child.py","file_name":"child.py","file_ext":"py","file_size_in_byte":5045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"159088605","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass SubscriptionCreationParameters(Model):\n \"\"\"Subscription Creation Parameters required to create a new Azure\n subscription.\n\n :param display_name: The display name of the subscription.\n :type display_name: str\n :param owners: The list of principals that should be granted Owner access\n on the subscription. Principals should be of type User, Service Principal\n or Security Group.\n :type owners: list[~azure.mgmt.subscription.models.AdPrincipal]\n :param offer_type: The offer type of the subscription. For example,\n MS-AZR-0017P (EnterpriseAgreement) and MS-AZR-0148P (EnterpriseAgreement\n devTest) are available. Only valid when creating a subscription in a\n enrollment account scope. Possible values include: 'MS-AZR-0017P',\n 'MS-AZR-0148P'\n :type offer_type: str or ~azure.mgmt.subscription.models.OfferType\n :param additional_parameters: Additional, untyped parameters to support\n custom subscription creation scenarios.\n :type additional_parameters: dict[str, object]\n \"\"\"\n\n _attribute_map = {\n 'display_name': {'key': 'displayName', 'type': 'str'},\n 'owners': {'key': 'owners', 'type': '[AdPrincipal]'},\n 'offer_type': {'key': 'offerType', 'type': 'str'},\n 'additional_parameters': {'key': 'additionalParameters', 'type': '{object}'},\n }\n\n def __init__(self, **kwargs):\n super(SubscriptionCreationParameters, self).__init__(**kwargs)\n self.display_name = kwargs.get('display_name', None)\n self.owners = kwargs.get('owners', None)\n self.offer_type = kwargs.get('offer_type', None)\n self.additional_parameters = kwargs.get('additional_parameters', None)\n","sub_path":"src/subscription/azext_subscription/subscription/models/subscription_creation_parameters.py","file_name":"subscription_creation_parameters.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"41518128","text":"import cv2\nimport sys\nimport datetime\nimport numpy\nimport subprocess\n\nbasedir = os.path.dirname(sys.argv[0])\n\ndef addItems(frame,faces,mouths,eyes):\n glasses = cv2.imread(basedir+\"/glasses.png\",-1)\n joint = cv2.imread(basedir+\"/joint.png\",-1)\n \n t=datetime.datetime.now().time()\n filename=basedir+\"/Saved/\"+str(t.hour)+str(t.minute)+str(t.second)+\".jpg\"\n #print(frame[0,0])\n #print(joint)\n for (x, y, w, h) in faces:\n for (x2, y2, w2, h2) in eyes:\n if(x2>x and x2+w2y and y2+h2x+h/3 and x2+w2/2x and x2y+h/2 and y2+h2x+h/3 and x2+w2/2 pipe[1][1]):\n if pipe[0][0] - bird.birdRadius < bird.birdx < pipe[0][0] + self.pipeWidth:\n bird.score -= (abs(bird.birdy - (pipe[0][1] + self.pipeHeight + self.gap/2)))/4\n self.allBirds += [bird]\n self.birds.remove(bird)\n break\n if (pipe[0][0] + self.pipeWidth) == (bird.birdx + bird.birdRadius):\n self.score += 1\n bird.gameScore += 1\n bird.score += 150\n if len(self.birds) == 1:\n tempBest = self.birds[0]\n if self.bestBird == None:\n self.bestBird = Bird(self.width, self.height, tempBest.network)\n self.allBestBirds += [Bird(self.width, self.height, tempBest.network)]\n if tempBest.gameScore > self.bestBird.gameScore:\n self.bestBird = Bird(self.width, self.width, tempBest.network)\n self.allBestBirds += [Bird(self.width, self.height, tempBest.network)]\n if tempBest.network.biasO[0] != self.bestBird.network.biasO[0]:\n print('weightI', self.bestBird.network.weightsI)\n print('weightsO', self.bestBird.network.weightsO)\n print('biasI',self.bestBird.network.biasI)\n print('biasO',self.bestBird.network.biasO)\n print(\"#######################\")\n elif len(self.birds) == 0:\n self.birds = nextGeneration(self.allBirds, self.width, self.height, self.total, self.rate)\n self.generation += 1\n PygameGame.init(self, self.birds, self.generation, self.allBirds, self.bestBird, self.allBestBirds) \n def redrawAll(self, screen):\n self.win.blit(self.background, (0,0))\n for pipe in self.pipe:\n self.win.blit(self.topPipe, pipe[0])\n self.win.blit(self.bottomPipe, pipe[1])\n for bird in self.birds:\n self.win.blit(bird.birds[bird.birdImage], (bird.birdx, bird.birdy))\n myfont = pygame.font.SysFont('LCD Solid', 60)\n textsurface = myfont.render(str(int(self.score)), False, (255, 255, 255))\n if len(str(self.score)) == 1:\n screen.blit(textsurface,(self.width/2 - 20, 30))\n if len(str(self.score)) == 2:\n screen.blit(textsurface,(self.width/2 - 40, 30))\n if len(str(self.score)) == 3:\n screen.blit(textsurface,(self.width/2 - 60, 30))\n if len(str(self.score)) == 4:\n screen.blit(textsurface,(self.width/2 - 80, 30))\n myfont = pygame.font.SysFont('LCD Solid', 10)\n textsurface = myfont.render(\"Generation \" + str(int(self.generation)), False, (255, 255, 255))\n screen.blit(textsurface,(self.width/2 - 40, 10)) \n if self.over:\n self.win.blit(self.gameover, (self.width/6 , self.height/2))\n\n def isKeyPressed(self, key):\n return self._keys.get(key, False)\n\n def __init__(self, width=288, height=490, fps=50, title=\"112 Pygame Game\"):\n self.width = width\n self.height = height\n self.fps = fps\n self.title = title\n self.bgColor = (255, 255, 255)\n pygame.init()\n\n def run(self):\n pygame.font.init() \n clock = pygame.time.Clock()\n screen = pygame.display.set_mode((self.width, self.height))\n # set the title of the window\n pygame.display.set_caption(self.title)\n\n # stores all the keys currently being held down\n self._keys = dict()\n\n # call game-specific initialization\n self.init()\n playing = True\n while playing:\n time = clock.tick(self.fps)\n self.timerFired(time)\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n self.mousePressed(*(event.pos))\n elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n self.mouseReleased(*(event.pos))\n elif (event.type == pygame.MOUSEMOTION and\n event.buttons == (0, 0, 0)):\n self.mouseMotion(*(event.pos))\n elif (event.type == pygame.MOUSEMOTION and\n event.buttons[0] == 1):\n self.mouseDrag(*(event.pos))\n elif event.type == pygame.KEYDOWN:\n self._keys[event.key] = True\n self.keyPressed(event.key, event.mod)\n elif event.type == pygame.KEYUP:\n self._keys[event.key] = False\n self.keyReleased(event.key, event.mod)\n elif event.type == pygame.QUIT:\n playing = False\n screen.fill(self.bgColor)\n self.redrawAll(screen)\n pygame.display.flip()\n\n pygame.quit()\n sys.exit()\n\n\ndef main():\n game = PygameGame()\n game.run()\n\nif __name__ == '__main__':\n main()","sub_path":"flappygenetic.py","file_name":"flappygenetic.py","file_ext":"py","file_size_in_byte":8485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"95812548","text":"# create anomaly set by concatenating two unique clips from different patients \n\nimport torch\nfrom torch.utils.data import Dataset\nfrom .utils import load_eeg_file, get_recordings_df\nfrom .eeg_dataset import EEGDataset\nfrom dynaconf import settings\n\nclass AnomalyDatasetByConcat(EEGDataset):\n \n def __init__(self, first_recording_length, second_recording_length, *args, **kwargs):\n \n # not concatenating clips in the begining (only checking length in super constructor)\n self.concat_clips = False\n \n super().__init__(*args, **kwargs)\n \n self.first_recording_length = first_recording_length\n self.second_recording_length = second_recording_length\n \n assert((self.first_recording_length + self.second_recording_length) == self.length), f\"Length of the two concatenated segments {self.first_recording_length} and {self.second_recording_length} should equal length param {self.length}\"\n \n self.concat_clips = True\n \n def __getitem__(self, first_idx):\n \n # if not concat clips defaults to super method\n if not self.concat_clips:\n return super().__getitem__(first_idx)\n \n second_idx = torch.randint(0, (super().__len__() - 1), (1,))[0].item()\n if second_idx == first_idx:\n # don't want the same clip concatenated to itself\n return self.__getitem__(first_idx)\n \n first_tensor = super().__getitem__(first_idx)\n second_tensor = super().__getitem__(second_idx)\n \n out = torch.cat((first_tensor[:,:self.first_recording_length], second_tensor[:,-self.second_recording_length:]), dim=1)\n return out\n \n \nif __name__ == \"__main__\":\n dataset = AnomalyDatasetByConcat(\n first_recording_length = 800,\n second_recording_length = 200,\n csv_file = settings.TRAIN_DATASET_CSV,\n length = 1000,\n select_channels = [0,1,2],\n max_num_examples = 100,\n )\n\n print(\"Length of the dataset\", dataset.__len__())\n print(\"Shape of dataset item\", dataset[0].shape)","sub_path":"data/anomaly_by_concat.py","file_name":"anomaly_by_concat.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"339904916","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: xiezuoru\n### 调用GET方法,读取A0号引脚的电平。\n在该案例中可以修改的参数有:\n - url:设置成虚谷号的IP地址\n - pin:对应的引脚 A0-A5,D0-D13\n注意:该方法需要外接传感器,否则数字口默认返回为低电平,模拟口返回随机数。\n\"\"\"\nimport requests\n\nvvboardip='192.168.3.14'\npin='A1'\npayload = {\"pin\":pin}\nre = requests.get(url='http://'+ vvboardip +':1024/',params=payload)\nif (re.status_code==200):\n r=re.json()\n print('返回的原始信息为:')\n print(re.text)\n print('成功获取引脚'+ str(r.get(\"pin\")) + '的状态:'+ str(r.get(\"msg\")))\n","sub_path":"Python_lib/webgpio/python/获取引脚状态.py","file_name":"获取引脚状态.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"51842154","text":"'''\n The entry into your code. This file should include a training function and an evaluation function.\n'''\n\nfrom csv2numpy import *\nfrom bin_data import *\nfrom model import Net\nfrom normalize_data import *\nfrom plot_train import *\nfrom dataset import GestureDataset\nfrom sklearn.model_selection import train_test_split\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\n\nimport argparse\nimport scipy.signal\nimport torch\nimport time\n\nseed = 0\ntry:\n orig_label = np.load(\"data/labels.npy\")\nexcept FileNotFoundError:\n unnormed_instances, orig_label = csv2numpy()\ntry:\n normed_instances = np.load(\"data/normalized_data.npy\")\nexcept FileNotFoundError:\n normed_instances = normalize_train_data()\n\ninstances = np.transpose(normed_instances, (0, 2, 1))\nlabels = []\nfor label in orig_label:\n num_dict = dict((key, i) for i, key in enumerate(string.ascii_lowercase))\n labels.append(num_dict[label])\n\ntrain_data, val_data, train_label, val_label = train_test_split(instances, np.array(labels), test_size=0.2, random_state=seed)\n\ndef load_data(batch_size):\n train_set = GestureDataset(train_data, train_label)\n val_set = GestureDataset(val_data, val_label)\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)\n val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)\n return train_loader, val_loader\n\ndef load_model(lr):\n model = Net()\n loss_fnc = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.ASGD(model.parameters(), lr = lr)\n\n return model, loss_fnc, optimizer\n\ndef evaluate(net, val_loader):\n total_err = 0.0\n total_epoch = 0\n\n for i, data in enumerate(val_loader, 0):\n inputs, labels = data\n outputs = net(inputs.type(torch.FloatTensor))\n _, predicted = torch.max(outputs, 1)\n corr = predicted == labels.type(torch.LongTensor)\n total_err += int(corr.sum())\n total_epoch += len(labels)\n\n return float(total_err) / len(val_loader.dataset)\n\ndef arg_parse():\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch_size', type=int, help='batch_size')\n parser.add_argument('--lr', type=float, help='learning rate')\n parser.add_argument('--epochs', type=int, default=20, help='# of epochs')\n parser.add_argument('--eval_every', type=int, default=10)\n\n args = parser.parse_args()\n batch_size = args.batch_size\n lr = args.lr\n epochs = args.epochs\n eval_every = args.eval_every\n return batch_size, lr, epochs, eval_every\n\ndef save_model_plot(config, model, num_steps, train_acc, val_acc):\n\n model_name = get_model_name(config)\n MODEL_DIR = os.path.abspath(os.path.join(os.getcwd(), \"Model_Data\"))\n try:\n os.mkdir(MODEL_DIR)\n except:\n pass\n model_path = os.path.join(MODEL_DIR, model_name)\n torch.save((model).state_dict(), model_path)\n train_acc = scipy.signal.savgol_filter(train_acc, 5, 1)\n val_acc = scipy.signal.savgol_filter(val_acc, 5, 1)\n # Write the train/test loss/err into CSV file for plotting later\n steps = np.arange(1, num_steps + 1)\n df = pd.DataFrame({\"step\": steps, \"train_acc\": train_acc})\n df.to_csv(os.path.join(MODEL_DIR, \"train_acc_{}.csv\".format(model_name)), index=False)\n df = pd.DataFrame({\"step\": steps, \"val_acc\": val_acc})\n df.to_csv(os.path.join(MODEL_DIR, \"val_acc_{}.csv\".format(model_name)), index=False)\n plot_graph(model_name, \"acc\", config)\n\ndef evaluate_test_set():\n try:\n test_instances = np.load(\"data/normalized_test_data.npy\")\n except FileNotFoundError:\n test_instances = normalize_test_data()\n model = torch.load('best_model.pt')\n test_instances = torch.Tensor(np.transpose(test_instances, (0, 2, 1)))\n result = model(test_instances)\n _, predicted = torch.max(result, 1)\n np.savetxt('predictions.txt', predicted)\n\ndef main():\n #csv2numpy()\n #get_normal()\n #batch_size, lr, epochs, eval_every = arg_parse()\n batch_size, lr, epochs, eval_every = 32, 0.01, 100, 100\n config = {\"batch_size\": batch_size, \"lr\": lr, \"epochs\": epochs, \"eval_every\": eval_every}\n start_time = time.time()\n\n train_loader, val_loader = load_data(config[\"batch_size\"])\n model, loss_fnc, optimizer = load_model(config[\"lr\"])\n train_acc = []\n val_acc = []\n num_steps = 0\n best_val_acc = 0\n time_total = []\n time_start = time.time()\n for epoch in range(config[\"epochs\"]):\n accu_loss = 0\n total_corr = 0\n for i, data in enumerate(train_loader):\n inputs, labels = data\n inputs = inputs.type(torch.FloatTensor)\n labels = labels.type(torch.LongTensor)\n # print(inputs,label)\n optimizer.zero_grad()\n #model = Net().cuda()\n outputs = model(inputs)\n # print(outputs, labels)\n batch_loss = loss_fnc(input=outputs, target=labels)\n batch_loss.backward()\n optimizer.step()\n accu_loss += batch_loss\n _, predicted = torch.max(outputs, 1)\n corr = predicted == labels\n total_corr += int(corr.sum())\n\n eval_every = config[\"eval_every\"]\n if i % eval_every == eval_every - 1:\n num_steps += 1\n print('Epoch %d, Step %5d loss: %.3f' %\n (epoch + 1, epoch*(i + 1), accu_loss / (batch_size * eval_every)))\n\n train_accuracy = total_corr / (batch_size * eval_every)\n print(\"training accuracy is:\", train_accuracy)\n train_acc.append(train_accuracy)\n val_accuracy = evaluate(model, val_loader)\n val_acc.append(val_accuracy)\n if val_accuracy > best_val_acc:\n best_val_acc = val_accuracy\n torch.save(model, 'best_model.pt')\n print(\"Accuracy of current model is: \", val_accuracy)\n accu_loss = 0.0\n total_corr = 0\n time_total.append(time.time() - time_start)\n\n end_time = time.time()\n print(\"total running time is\", end_time-start_time)\n\n save_model_plot(config, model, num_steps, train_acc, val_acc)\n\nif __name__ == \"__main__\":\n main()\n #evaluate_test_set()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"478173879","text":"from django.shortcuts import render, redirect\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nimport cv2\nimport re\nfrom .predictDisease import imagePred\n \nregex = '^[a-z0-9]+[\\._]?[a-z0-9]+[@]\\w+[.]\\w{2,3}$'\n\ndef home(request):\n if request.method == 'POST' and request.FILES['myfile']:\n\n \n myfile = request.FILES['myfile']\n print(myfile.size)\n print(myfile.content_type)\n \n if myfile.content_type.split(\"/\")[0] != \"image\":\n return render(request, 'core/CottonDiseasePrediction.html', {\n 'error_file': \"Error : Please Upload a Image\",\n 'uploaded_file_url': \"\"\n })\n if myfile.size > 23068672:\n return render(request, 'core/CottonDiseasePrediction.html', {\n 'error_file': \"Error : File size Exceeded 25 MB\",\n 'uploaded_file_url': \"\"\n })\n \n try:\n fs = FileSystemStorage()\n filename = fs.save(myfile.name, myfile)\n uploaded_file_url = fs.url(filename)\n output_pred = imagePred(uploaded_file_url)\n # attach_file_name = output_file\n # Open the file as binary mode\n \n return render(request, 'core/CottonDiseasePrediction.html', {\n 'error_file': output_pred,\n # 'uploaded_file_url': output_file\n })\n except Exception as e:\n return render(request, 'core/CottonDiseasePrediction.html', {\n # 'error_file': \"Error : Some Error Occured\",\n 'error_file': str(e),\n 'uploaded_file_url': \"\"\n })\n return render(request, 'core/CottonDiseasePrediction.html', {\n 'uploaded_file_url': \"\"\n })\n","sub_path":"cottonDiseaseApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"642581447","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_policy import policy\n\nfrom watcher.common.policies import base\n\nSTRATEGY = 'strategy:%s'\n\nrules = [\n policy.DocumentedRuleDefault(\n name=STRATEGY % 'detail',\n check_str=base.RULE_ADMIN_API,\n description='List strategies with detail.',\n operations=[\n {\n 'path': '/v1/strategies/detail',\n 'method': 'GET'\n }\n ]\n ),\n policy.DocumentedRuleDefault(\n name=STRATEGY % 'get',\n check_str=base.RULE_ADMIN_API,\n description='Get a strategy.',\n operations=[\n {\n 'path': '/v1/strategies/{strategy_uuid}',\n 'method': 'GET'\n }\n ]\n ),\n policy.DocumentedRuleDefault(\n name=STRATEGY % 'get_all',\n check_str=base.RULE_ADMIN_API,\n description='List all strategies.',\n operations=[\n {\n 'path': '/v1/strategies',\n 'method': 'GET'\n }\n ]\n ),\n policy.DocumentedRuleDefault(\n name=STRATEGY % 'state',\n check_str=base.RULE_ADMIN_API,\n description='Get state of strategy.',\n operations=[\n {\n 'path': '/v1/strategies{strategy_uuid}/state',\n 'method': 'GET'\n }\n ]\n )\n]\n\n\ndef list_rules():\n return rules\n","sub_path":"watcher/common/policies/strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"6978426","text":"\ndef my_abs(x):\n\tif not isinstance(x,(int,float)):\n\t\traise TypeError('bad opened type')\n\tif x>0:\n\t\treturn x\n\telse:\n\t\treturn -x\nprint(my_abs(10))\n\nimport math\n\n#坐标、偏移量、角度\ndef move(x, y, step, angle=0):\n nx = x + step * math.cos(angle)\n ny = y - step * math.sin(angle)\n return nx, ny\nr=move(100,100,60,math.pi/6)\nprint(r)\n#计算一元二次方程两个跟\n\ndef quadratic(a,b,c):\n\td=b*b-4*a*c\n\tif a==0 and b!=0:\n\t\treturn (-c/b)\n\telif a!=0 and d>0:\n\t\te=(-b+math.sqrt(d))/(2*a)\n\t\tf=(-b-math.sqrt(d))/(2*a)\n\t\treturn(e,f)\n\telif d<0:\n\t\t#return ('无视跟')\n\t\te=(-b+math.sqrt(abs(d)))/(2*a)\n\t\tf=(-b-math.sqrt(abs(d)))/(2*a)\n\t\t#输出须根\n\t\treturn(complex(e,f),complex(e,-f))\n\t\t#return(complex('e+f*j'),complex('e-f*j'))错误(直接将字符串转换为复数)\n\t\t\nr=quadratic(3,5,6)\nprint(r)\n#计算x的n次方,n的默认值为2\ndef power(x, n=2):\n\ts=1\n\twhile n>0:\n\t\tn=n-1 \n\t\ts=s*x\n\t\n\treturn s\nvalue=power(5,3)\nprint(value)\n\n#只能调用一次,重复调用将记住L的值'END'\ndef add_endl1(L=[]):\n\tL.append('END')\n\treturn L\n\t\nli=add_endl1(['1','2','3'])\n\nlist1=add_endl1()\nlist2=add_endl1()\nprint(li)\nprint(list1)\nprint(list2)\n\n#任意调用不会改变\ndef add_endl2(L=None):\n\tif L==None:\n\t\tL=[]\n\tL.append('END')\n\treturn L\n\nlist1=add_endl2()\nlist2=add_endl2()\nprint(list1)\nprint(list2)\n#默认参数\ndef enroll(name,gender,age=6,city='beijing'):\n\tprint('name',name)\n\tprint('gender',gender)\n\tprint('age',age)\n\tprint('city',city)\nenroll('bill','boy')\n\n#请计算a2 + b2 + c2 + ……\ndef calc(numbers):\n\tsum=0\n\tfor n in numbers:\n\t\tsum=sum+n*n\n\treturn sum\n\nresult_value=calc([1,3,5,7,9])\nprint(result_value)\n#利用可变参数,可以传入任意类型\ndef calc_change(*numbers):\n\tsum=0\n\tfor n in numbers:\n\t\tsum=sum+n*n\n\treturn sum\nnum=[1,3,5,7,9]\nresult2=calc_change(*num)\nprint(result2)\n#关键字参数,调用person时可以传入任意个参数.KV可变参数在函数调用时自动组装为一个tuple\ndef person(name,age,**kv):\n\tprint('name:',name,'age:',age,'other:',kv)\nperson('jack',16,city='beijing',gender='M')\n\n\nextra={'city':'beijing','job':'engenier'}\nperson('jack',16,**extra)\n#命名关键字参数、限制关键字命名的参数,加*\ndef person_limit(name,age,*,city,job):\n\tprint(name,age,city,job)\nperson_limit('jack',16,city='beijing',job='engenier')\n\n#参数组合\n#参数定义的顺序必须是:必选参数、默认参数、可变参数、命名关键字参数和关键字参数。\n#**kv默认为一个dict(类似于c的map(键、值))\n#*args是可变参数,args接收的是一个tuple;\ndef f1(a,b,c=0,*args,**kv):\n\tprint('a=',a,'b=',b,'c=',c,'args=',args,'kv=',kv)\nf1(1,2)\ndef f2(a,b,c=0,*,d,**kv):\n\tprint('a=',a,'b=',b,'c=',c,'d=',d,'kv=',kv)\nf2(1,2,d=99,exit=None)\n\n\n#可变参数既可以直接传入:func(1, 2, 3),又可以先组装list或tuple,再通过*args传入:func(*(1, 2, 3));\n\n#关键字参数既可以直接传入:func(a=1, b=2),又可以先组装dict,再通过**kw传入:func(**{'a': 1, 'b': 2})。\nargs=(1,2,3,4)\nkv={'d':99,'x':'#'}\nf1(*args,**kv)\nargs=(1,2,3)\nkv={'d':99,'x':'#'}\nf2(*args,**kv)\n\n#递归函数\ndef fact(n):\n\tif n==1:\n\t\treturn 1\n\treturn n*fact(n-1)\n\nn_value=fact(5)\nprint(n_value)\n\n#尾递归\ndef fact_iter(num,product):\n\tif num==1:\n\t\treturn product\n\t\n\treturn fact_iter(num-1,num*product)\nn_value=fact_iter(5,1)\nprint(n_value)\n\n#尾递归解决汉诺塔问题\n#编写move(n, a, b, c)函数,它接收参数n,表示3个柱子A、B、C中第1个柱子A的盘子数量,然后打印出把所有盘子从A借助B移动到C的方法\n#思路以n=3考虑\ndef move(n,a,b,c):\n\tif n==1:\n\t\tprint('%s-->%s'%(a,c))#递归到一个盘子直接到c\n\telse:\n\t\tmove(n-1,a,c,b)#n-1个盘子a放到b\n\t\tmove(1,a,b,c)#a中的这个盘子放到c \n\t\tmove(n-1,b,a,c)#b中n-1个盘子放到c然后递归\nmove(3,'a','b','c')\n\n#高级特性\nL=[]\nn=1\nwhile n<=99:\n\tL.append(n)\n\tn=n+2\nprint(L)\nm=0\nhalf_L=[]\nwhile m<=len(L)/2:\n\thalf_L.append(L[m])\n\tm=m+1\nprint(half_L)\n\n\n#切片提取\nL=['Michael', 'Sarah', 'Tracy', 'Bob', 'Jack']\nprint(L[0:3])\nprint(L[-1])#取倒数第一个\nprint(L[-3:])#取倒数3个\nprint(L[:4:2])#前4个每两个娶一个\n\n#迭代\n#用于dict\nd={'a':1,'b':2,'c':3,'d':4}\nfor key in d:\n\tprint(key)\nfor value in d.values():\n\tprint(value)\nfor k,v in d.items():\n\tprint(k,v)\n#用于字符串\nfor ch in 'ABC':\n\tprint(ch)\n#用于tuple\nfor x,y in [(1,2),(3,4),(5,6)]:\n\tprint(x,y)\n\n#列表生成式\nL=[]\nfor x in range(1,11):\n\tL.append(x*x)\nprint(L)\n\n#一部实现\nprint([x * x for x in range(1, 11)])\n#筛选偶数平方\nprint([x * x for x in range(1, 11)if x%2==0])\n\nimport os#列出当前目录下所有文件\nprint([d for d in os.listdir('.')])\n\n#把list字符串变小写\nL=['HELLO','OK','IAM','FINE']\nprint([s.lower() for s in L])\nl=['HELLO','OK','IAM','FINE',35]\nprint([s.lower() for s in L if isinstance(s,str)]) #判断s 是不是字符串\n#生成器\n#在Python中,这种一边循环一边计算的机制,称为生成器:generator。\n\ng=(x * x for x in range(1, 11))\nprint(g)\nfor n in g:\n\tprint(n)\n\n#著名的斐波拉契数列(Fibonacci)\ndef fib(max):\n\tn,a,b=0,0,1\n\twhile n 0#lambda是匿名函数,如果重新命名一个这样的函数会污染空间\ndef primers():\n\tyield 2#初始化序列\n\tit=_old_filter()\n\twhile True:\n\t\tn=next(it)#返回序列中的第一个数\n\t\tyield n\n\t\tit=filter(_not_devilible_(n),it)#构造新序列\nfor n in primers():\n\tif n < 1000:\n\t\tprint(n)\n\telse:\n\t\tbreak\n# print(list(primers))\n#判断回文数\ndef is_palindrome(n):\n\tl1=list(str(n))\n\tl2=list(str(n))\n\tl1.reverse()#翻转列表\n\tif l1==l2:\n\t\treturn True\n\telse:\n\t\treturn False\noutput = filter(is_palindrome, range(100, 1000))\nprint(list(output))\n#排序\nL = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]\n\ndef by_name(n):\n\treturn n[0].lower()#将字符串统一转化成小写比较\ndef by_score(n):\n\treturn n[1]\nr1=sorted(L,key=by_name)\nr2=sorted(L,key=by_score)\nprint (r1)\nprint(list(r2))\t\n\n\n\t\n\n\t\n\n\n\n\n\n\n\n\n\t\t\n\t\t\n\t\n\t","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":7481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"200763464","text":"from django.test import TestCase\nfrom thestar.models import Competitor\nfrom thestar.models import Vote\nfrom factory import create_competitor\n\nclass TestVoteScreen(TestCase):\n def test_get_vote_screen(self):\n create_competitor()\n\n url = '/'\n response = self.client.get(url)\n self.assertEqual(200, response.status_code)\n self.assertContains(response, 'Kangsom')\n self.assertContains(response, 'Tanatat Chaiyaat')\n self.assertContains(response, '8')\n\n def test_vote(self):\n create_competitor()\n\n url = '/thestar/vote/?no=8'\n response = self.client.get(url)\n self.assertEqual(302, response.status_code)\n\n votes_for_ks = Vote.objects.all().count()\n self.assertEqual(1, votes_for_ks)","sub_path":"day2/4_django_admin/my_first_app/thestar/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"529662542","text":"import re\n\ndef calcchecksum(name):\n letters = {}\n llist = []\n for c in name:\n if c == '-': continue\n if c in letters:\n letters[c] = letters[c] + 1\n else:\n letters[c] = 1\n\n # convert to list of pairs\n for c in letters:\n llist.append( (c, letters[c]) )\n # sort the list\n llist.sort(key = lambda x: (-x[1], x[0]))\n # build result\n return \"\".join(list(map(lambda x: x[0], llist[:5])))\n\nresult = 0\n\nwith open(\"input.txt\", mode=\"r\") as f:\n allrooms = f.readlines()\n\n for room in allrooms:\n m = re.match(r\"(\\S+)-(\\d+)\\[(\\w+)\\]\", room)\n if m is None:\n print(\"error: {}\", room)\n exit(1)\n roomname = m.group(1)\n sectorid = int(m.group(2))\n checksum = m.group(3)\n if checksum == calcchecksum(roomname):\n result += sectorid\n\nprint(result)\n","sub_path":"2016/04/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"518464635","text":"import random\nimport unittest\n\nfrom django.conf import settings\nfrom django.contrib.sessions.models import Session\nfrom django.core import mail\nfrom django.core.urlresolvers import reverse\nfrom django.test.client import Client\n\nfrom mock import Mock\n\nfrom nose.exc import SkipTest\n\nfrom faker.generators.utils import numerify\nfrom paypal.standard.ipn.models import PayPalIPN\nfrom pytz import timezone\n\nfrom tiger.accounts.models import Site, Location\nfrom tiger.core.forms import get_order_form\nfrom tiger.core.middleware import Cart\nfrom tiger.core.models import Item, Order, Coupon\nfrom tiger.core.tests.shrapnel import TEST_PAYPAL_TRANSACTION, FakeSession, FakeCoupon\nfrom tiger.fixtures import FakeOrder\nfrom tiger.sales.models import Plan\nfrom tiger.utils.test import TestCase\n\n\n\ndef data_for_order_form(item):\n data = {'quantity': random.randint(1, 5)}\n if item.variant_set.count():\n data['variant'] = item.variant_set.all()[0].id\n for sidegroup in item.sidedishgroup_set.all():\n if sidegroup.sidedish_set.count() > 1:\n data['side_%d' % sidegroup.id] = sidegroup.sidedish_set.order_by('?')[0].id \n return data\n\ndef random_order_form():\n no_variants = True\n while no_variants: \n item = Item.objects.order_by('?')[0]\n if item.variant_set.count():\n v = item.variant_set.all()[0]\n v.schedule = None\n v.save()\n break\n item.taxable = True\n item.save()\n form_class = get_order_form(item)\n data = data_for_order_form(item)\n bound_form = form_class(data, location=Location.objects.all()[0])\n bound_form.full_clean()\n return item, bound_form\n\ndef create_order():\n FakeOrder.generate(count=1)\n order = Order.objects.all()[0]\n session = Session.objects.all()[0]\n cart = Cart(session)\n cart.add(*random_order_form())\n order.cart = cart.contents\n order.save()\n return order\n \ndef deliver_paypal(order, **kwargs):\n TEST_PAYPAL_TRANSACTION.update({\n 'invoice': unicode(order.id),\n 'txn_id': numerify('########'),\n 'payment_status': 'Completed'\n })\n TEST_PAYPAL_TRANSACTION.update(kwargs)\n paypal = PayPalIPN.objects.create(**TEST_PAYPAL_TRANSACTION)\n paypal.send_signals()\n\n\nclass CouponDisplayTestCase(unittest.TestCase):\n def setUp(self):\n self.site = Mock(Site())\n\n def test_dollar_display(self):\n dollar_coupon = Coupon(\n site=self.site, \n short_code='', \n discount_type=Coupon.DISCOUNT_DOLLARS,\n dollars_off='1.00'\n )\n self.assertEquals(dollar_coupon.discount, '$1.00')\n\n def test_percent_display(self):\n percent_coupon = Coupon(\n site=self.site, \n short_code='', \n discount_type=Coupon.DISCOUNT_PERCENT,\n percent_off=10\n )\n self.assertEquals(percent_coupon.discount, '10%')\n\n\nclass CouponUsageTestCase(TestCase):\n fixtures = ['plans.json']\n poseur_fixtures = 'tiger.fixtures'\n\n def setUp(self):\n site = Site.objects.all()[0]\n site.plan = Plan.objects.filter(has_online_ordering=True)[0]\n site.save()\n self.site = site\n\n def test_add_remove_coupon(self):\n coupon = Coupon.objects.create(\n site=self.site, \n short_code='TEST', \n discount_type=Coupon.DISCOUNT_DOLLARS,\n dollars_off='1.00'\n )\n self.client.get(reverse('menu_home'))\n response = self.client.post(reverse('preview_order'), {'coupon_code': coupon.short_code}, follow=True)\n self.assertContains(response, 'Coupon TEST')\n response = self.client.get(reverse('clear_coupon'), follow=True)\n self.assertContains(response, 'Your coupon has been removed.')\n\n\nclass CartTestCase(TestCase):\n fixtures = ['plans.json']\n poseur_fixtures = 'tiger.fixtures'\n\n def setUp(self):\n FakeSession.generate(count=1)\n self.session = Session.objects.all()[0]\n self.cart = Cart(self.session)\n FakeCoupon.generate(count=1)\n self.coupon = Coupon.objects.all()[0]\n site = Site.objects.all()[0]\n location = site.location_set.all()[0]\n location.tax_rate = '6.25'\n location.save()\n\n def tearDown(self):\n Session.objects.all()[0].delete()\n\n def test_adding_to_total(self):\n cart = self.cart\n self.assertEquals(cart.subtotal(), 0)\n self.assertEquals(cart.taxes(), 0)\n self.assertEquals(cart.total(), 0)\n cart.add(*random_order_form())\n self.assertNotEquals(cart.subtotal(), 0)\n self.assertNotEquals(cart.taxes(), 0)\n self.assertNotEquals(cart.total(), 0)\n self.assertEquals(cart.subtotal() - cart.discount(), cart.total())\n self.assertEquals(cart.total() + cart.taxes(), cart.total_plus_tax())\n \n def test_coupons(self):\n cart = self.cart\n self.assertFalse(cart.has_coupon)\n cart.add_coupon(self.coupon)\n self.assertTrue(cart.has_coupon)\n self.assertTrue(cart.coupon_display().startswith('Coupon')) \n self.assertTrue(cart.coupon_display().endswith('off')) \n cart.remove_coupon()\n self.assertFalse(cart.has_coupon)\n self.assertEquals(cart.coupon_display(), '') \n\n\nclass OrderPropertiesTest(TestCase):\n fixtures = ['plans.json']\n poseur_fixtures = 'tiger.fixtures'\n\n def test_display_hours(self):\n \"\"\"Time that the order was placed should be localized to reflect the\n timezone selected by the restaurant.\n \"\"\"\n site = Site.objects.all()[0]\n site.plan = Plan.objects.get(has_online_ordering=True)\n site.save()\n # site is using default timezone\n location = site.location_set.all()[0]\n self.assertEquals(settings.TIME_ZONE, location.timezone)\n FakeOrder.generate(count=1)\n order = Order.objects.all()[0]\n server_tz = timezone(settings.TIME_ZONE)\n server_timestamp = server_tz.localize(order.timestamp)\n self.assertEquals(order.localized_timestamp(), server_timestamp)\n order.delete()\n # set site timezone to PST\n location.timezone = 'US/Pacific'\n location.save()\n FakeOrder.generate(count=1)\n order = Order.objects.all()[0]\n server_tz = timezone(settings.TIME_ZONE)\n server_timestamp = server_tz.localize(order.timestamp)\n self.assertEquals(order.localized_timestamp(), server_timestamp)\n self.assertNotEquals(order.localized_timestamp().tzinfo, server_timestamp.tzinfo)\n\n\nclass PayPalOrderTest(TestCase):\n fixtures = ['plans.json']\n poseur_fixtures = 'tiger.fixtures'\n\n def setUp(self):\n site = Site.objects.all()[0]\n site.plan = Plan.objects.filter(has_online_ordering=True)[0]\n site.save()\n location, = site.location_set.all()\n location.receive_via = Location.RECEIPT_EMAIL\n location.tax_rate = '6.25'\n location.save()\n FakeSession.generate(count=1)\n\n def teardown(cls):\n Session.objects.all()[0].delete()\n\n def create_paypal_order(self, **kwargs):\n # no good way to hook into the PayPal interaction and stub it out,\n # so we start by saving an already processed object. The ``verify``\n # method of ``PayPalIPN`` instances performs the postback, sets flags\n # on the instance, saves it to the database, and dispatches signals.\n # we're picking up with the call to ``save``.\n order = create_order()\n deliver_paypal(order, **kwargs)\n return order\n\n def test_paypal_transaction(self):\n order = self.create_paypal_order()\n order = Order.objects.get(id=order.id)\n # assert that Order is flagged as paid\n self.assertEquals(order.status, Order.STATUS_PAID)\n # assert that e-mail is sent\n self.assertEquals(len(mail.outbox), 1)\n\n def test_paypal_email_display(self):\n \"\"\"Ensure that payer e-mail is displayed on order detail page if order\n was paid for via PayPal.\n \"\"\"\n order = self.create_paypal_order()\n client = Client(HTTP_HOST='foo.takeouttiger.com')\n self.assertTrue(client.login(email='test@test.com', password='password', site=Site.objects.all()[0]))\n response = client.get(reverse('order_detail', args=[order.id]))\n self.assertContains(response, PayPalIPN.objects.all()[0].payer_email)\n self.assertContains(response, 'Paid online')\n\n def test_paypal_refunded(self):\n order = self.create_paypal_order()\n order = Order.objects.get(id=order.id)\n self.create_paypal_order(invoice=unicode(order.id), payment_status='Refunded')\n client = Client(HTTP_HOST='foo.takeouttiger.com')\n self.assertTrue(client.login(email='test@test.com', password='password', site=Site.objects.all()[0]))\n response = client.get(reverse('order_detail', args=[order.id]))\n self.assertContains(response, 'Refunded')\n\n def test_payment_pending(self):\n client = Client(HTTP_HOST='foo.takeouttiger.com')\n self.assertTrue(client.login(email='test@test.com', password='password', site=Site.objects.all()[0]))\n order = create_order()\n order.status = Order.STATUS_PENDING\n order.save()\n response = client.get(reverse('order_detail', args=[order.id]))\n self.assertContains(response, 'Payment pending')\n deliver_paypal(order)\n response = client.get(reverse('order_detail', args=[order.id]))\n self.assertNotContains(response, 'Payment pending')\n self.assertContains(response, 'Paid online')\n\n\nclass OrderListTest(TestCase):\n def test_jonathan_has_replaced_this_with_a_stubbed_javascript_test(self):\n raise SkipTest\n\n\nclass OrderScreensAvailabilityTestCase(TestCase):\n fixtures = ['plans.json']\n poseur_fixtures = 'tiger.fixtures'\n\n def setUp(self):\n self.site = Site.objects.all()[0]\n self.client = Client(HTTP_HOST='foo.takeouttiger.com')\n urls = map(reverse, [\n 'preview_order',\n 'send_order',\n 'order_success',\n 'payment_paypal',\n 'payment_authnet'])\n item = Item.objects.all()[0]\n urls.append(reverse('order_item', kwargs={\n 'section_id': item.section.id,\n 'section_slug': item.section.slug,\n 'item_id': item.id,\n 'item_slug': item.slug\n }))\n self.ordering_urls = urls\n\n def test_site_with_online_ordering_has_ordering_screens(self):\n self.site.plan = Plan.objects.get(has_online_ordering=True)\n self.site.save()\n # hit menu screen just to set cookie\n self.client.get(reverse('menu_home'))\n for url in self.ordering_urls:\n response = self.client.get(url)\n self.assertNotEquals(response.status_code, 404)\n\n def test_site_without_online_ordering_has_no_ordering_screens(self):\n self.site.plan = Plan.objects.filter(has_online_ordering=False)[0]\n self.site.save()\n # hit menu screen just to set cookie\n self.client.get(reverse('menu_home'))\n for url in self.ordering_urls:\n response = self.client.get(url)\n self.assertEquals(response.status_code, 404)\n","sub_path":"tiger/core/tests/test_orders.py","file_name":"test_orders.py","file_ext":"py","file_size_in_byte":11274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"76464158","text":"from selenium import webdriver\nimport os\n\nbrowser = webdriver.Firefox()\n\nhtml_file = os.getcwd() + \"//\" + \"index.html\"\nbrowser.get(\"file:///\" + html_file)\n\n#assert 'hej' in browser.title\ntext = browser.find_element_by_id('text-box')\npostbutton = browser.find_element_by_id('send-btn')\n\n#Test_1 - Skriva meddelande 0 < chars < 141\nprint(\"------Skriva meddelande 0 < chars < 141------\")\nst = 'Testar att skriva något i rutan'\ntext.send_keys(st)\npostbutton.click()\nmsg = browser.find_element_by_class_name(\"msg\")\nassert msg.text == st\n\n#Test_2 - Skriva meddelande char = 0\nprint(\"\")\nprint(\"------Skriva meddelande char = 0------\")\ntext.send_keys(\"\")\npostbutton.click()\nassert browser.find_element_by_id(\"errorbox\")\nrm = browser.find_element_by_name('okaybtn')\nrm.click()\n\n#Test_3 - Skriva meddelande char > 140\nprint(\"\")\nprint(\"------Skriva meddelande char > 140------\")\ntext.send_keys(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.\")\npostbutton.click()\nassert browser.find_element_by_id(\"errorbox\")\nrm = browser.find_element_by_name('okaybtn')\nrm.click()\n\n#Test_4 - Ta bort error\nprint(\"\")\nprint(\"------Ta bort error------\")\nfor i in range(4):\n text.send_keys(\"\")\n postbutton.click()\nremovebuttons = browser.find_elements_by_name('okaybtn')\n\nfor button in removebuttons:\n button.click()\n\n\ntry:\n assert browser.find_element_by_id(\"errorbox\")\nexcept:\n print(\"Success!\")\n\n\n#Test_5 - Kronologiskt fallande\nprint(\"\")\nprint(\"------Kronologiskt fallande------\")\n\ntext.send_keys(\"Ett meddelande\")\npostbutton.click()\ntext.send_keys(\"Senaste meddelandet\")\npostbutton.click()\nmsg = browser.find_element_by_class_name(\"msg\")\nassert msg.text == \"Senaste meddelandet\"\n\n\n#Test_6 - Läsa meddelanden\nprint(\"\")\nprint(\"------Läsa meddelanden------\")\nremovebuttons = browser.find_elements_by_name('btn')\nn = 0\nfor button in removebuttons:\n n += 1\n print(\"Read message\",n)\n button.click()\n#Test_7 - Tydlig skillnad mellan lästa och olästa\n\nprint(\"\")\nprint(\"------Skillnad mellan lästa och olästa------\")\n\ntext.send_keys(\"Fler meddelanden\")\npostbutton.click()\ntext.send_keys(\"Fler meddelanden\")\npostbutton.click()\nr_msg = browser.find_elements_by_name('read_msg')\nu_msg = browser.find_elements_by_name('unread_msg')\nprint(\"Lästa meddelanden:\",len(r_msg),\"Olästa meddelanden:\", len(u_msg))\nassert len(r_msg) == 3 and len(u_msg) == 2\n\n#Test_8 - Alla meddelanden försvinner vid refresh\nprint(\"\")\nprint(\"------Laddar om sida(refresh)------\")\nbrowser.refresh()\nr_msg = browser.find_elements_by_name('read_msg')\nu_msg = browser.find_elements_by_name('unread_msg')\nprint(\"Lästa meddelanden:\",len(r_msg),\", Olästa meddelanden:\", len(u_msg))\n\n\nbrowser.close()\n\n\"\"\"\n#text.send_keys('Testar att skriva något i rutan')\npostbutton.click()\n\n\npostbutton.click()\n\nremovebuttons = browser.find_elements_by_name('okaybtn')\n\nfor button in removebuttons:\n button.click()\n\"\"\"\n","sub_path":"Labb 1/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"376641143","text":"# -*- coding: utf-8 -*-\ndef maximum(y, x):\n '''\n Devuelve el máximo al comparar x o y\n '''\n if (x > y):\n return x\n else:\n return y\nx='geasg'\ny=5\n\nz = maximum(x,y)","sub_path":"Maximum-function example.py","file_name":"Maximum-function example.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"484472853","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objs as go\nimport pandas as pd\nimport csv, os\n\nTOTAL_ELECTORAL_VOTES = 538\n\napp = dash.Dash()\n\nnon_voters_map = {}\nwith open('turnouts.csv') as csvfile, open('vote_counts.csv') as f:\n spamreader = csv.reader(csvfile)\n for row in spamreader:\n if len(row[-1]) != 2:\n continue\n non_voters = int(row[8].replace(',', '')) - int(row[7].replace(',', ''))\n state_code = row[-1]\n non_voters_map[state_code] = non_voters\n voter_opp_map = {}\n for line in [string.split(',') for string in f.read().split('\\n')]:\n sc = line[0]\n if len(sc) != 2:\n continue\n electoral_votes = int(line[1])\n margin = abs(int(line[2]) - int(line[3]))\n voter_opp = (non_voters_map[sc] / float(margin)) * (electoral_votes / TOTAL_ELECTORAL_VOTES)\n voter_opp_map[sc] = voter_opp\n\nwith open('voter_opp.csv', 'w') as voter_opp_file:\n voter_opp_file.write('State, Voter Turnout Opportunity\\n')\n voter_opp_file.write('string, number\\n')\n for k,v in sorted(voter_opp_map.items(), key=lambda x: x[1], reverse=True):\n voter_opp_file.write(k + \",\" + (\"%.3f\" % v) + '\\n')\n\n\napp.layout = html.Div([\n html.Title(\"Voting opportunity\"),\n dcc.Graph(\n id='voting-opportunity',\n figure={\n 'data': [\n go.Bar(\n x=list(voter_opp_map.keys()),\n y=list(voter_opp_map.values()),\n )\n ],\n 'layout': go.Layout(\n xaxis={'title': 'State'},\n yaxis={'title': 'Turnout opportunity'},\n title=\"Voter turnout opportunity by state for the 2016 presidential election\"\n )\n }\n ),\n html.Div([\n html.P('''A state's turnout opportunity is calculated by taking the ratio of non-voters to the margin of victory,\n and then scaling that value by the proportion of total electoral votes granted to that state. It is designed\n to capture how worthwhile it would be to focus energies to GOTV strategies in that state.'''),\n html.P(\"turnout_opportunity = (eligible_non_voters / margin_of_victory) * (state_electoral_votes / 538)\"),\n html.P('''The top five 'turnout opportunity' states are perhaps unsurprising: Michigan, Pennsylvania,\n Florida, Wisconsin, and New Hampshire are among the known 'swing states' in US elections. After that, however,\n we see some not so obvious candidates for targeted GOTV strategies. Texas comes in at number 6. A very interesting\n result from the 2016 election that got sort of drowned out amidst the Trump-mania was that Texas saw a rather\n substantial shift in favor of the Democrats. Although the state still went red, the margin of victory dropped from\n 16% to 9%. Moreover, with a turnout rate of 43.2%, Texas had the second lowest voter participation of all 50 states\n (only Hawaii is lower). For every 10 eligible voters who did not vote, Democrats needed only convince one of them\n to show up to vote Democrat, and Texas would have gone blue. Given the sheer size of the state, and the number of\n people who did not vote, Texas should be under serious consideration as a potential target for voter turnout opportunity in future campaigns. Arizon comes next, and it\n too exhibited a sizeable shift towards being Democratic in 2016 - it's margin dropped from 9% to 3.5% making it\n a bona fide swing state. For comparison the margins of victory in both Ohio and North Carolina were larger that\n Arizona's. '''),\n html.P([\"Code and data for this project available \", html.A(\"here\", href='http://github.com/rmacqueen/govdash')])\n ]),\n\n])\n\nif __name__ == '__main__':\n app.run_server(host='0.0.0.0', port=int(os.getenv('PORT', 5000)))\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"36425589","text":"'''\r\nCreated on 03/mar/2014\r\n\r\n@author:Sonya\r\n'''\r\nfrom numpy.random.mtrand import np\r\n\r\nimport random\r\nfrom FCM.Class_File_Manager import FileManager\r\nimport numpy\r\n\r\n################################################################################\r\n# Peach - Computational Intelligence for Python\r\n# Jose Alexandre Nalon\r\n#\r\n# This file: fuzzy/cmeans.py\r\n# Fuzzy C-Means algorithm\r\n################################################################################\r\n\r\n# Doc string, reStructuredText formatted:\r\n__doc__ = \"\"\"\r\nFuzzy C-Means\r\n\r\nFuzzy C-Means is a clustering algorithm based on fuzzy logic.\r\n\r\nThis package implements the fuzzy centr-means algorithm for clustering and\r\nclassification. This algorithm is very simple, yet very efficient. From a\r\ntraining set and an initial condition which gives the membership values of each\r\nexample in the training set to the clusters, it converges very fastly to crisper\r\nsets.\r\n\r\nThe initial conditions, ie, the starting membership, must follow some rules.\r\nPlease, refer to any bibliography about the subject to see why. Those rules are:\r\nno example might have membership 1 in every class, and the sum of the membership\r\nof every component must be equal to 1. This means that the initial condition is\r\na fuzzy partition of the universe.\r\n\"\"\"\r\n\r\n\r\n################################################################################\r\n\r\nfrom numpy import dot, array, sum, zeros, outer, any, identity\r\nfrom numpy import linalg as LA\r\n\r\n\r\n################################################################################\r\n# Fuzzy C-Means class\r\n################################################################################\r\nclass FuzzyCMeans(object):\r\n '''\r\n Fuzzy C-Means convergence.\r\n\r\n Use this class to instantiate a fuzzy centr-means object. The object must be\r\n given a training set and initial conditions. The training set is a list or\r\n an array of N-dimensional vectors; the initial conditions are a list of the\r\n initial membership values for every vector in the training set -- thus, the\r\n length of both lists must be the same. The number of columns in the initial\r\n conditions must be the same number of classes. That is, if you are, for\r\n example, classifying in ``C`` classes, then the initial conditions must have\r\n ``C`` columns.\r\n\r\n There are restrictions in the initial conditions: first, no column can be\r\n all zeros or all ones -- if that happened, then the class described by this\r\n column is unnecessary; second, the sum of the memberships of every example\r\n must be one -- that is, the sum of the membership in every column in each\r\n line must be one. This means that the initial condition is a perfect\r\n partition of ``C`` subsets.\r\n\r\n Notice, however, that *no checking* is done. If your algorithm seems to be\r\n behaving strangely, try to check these conditions.\r\n '''\r\n EUCLIDEAN_NORM = 1\r\n DIAGONAL_NORM = 2\r\n MAHALONOBIS_NORM = 3\r\n FUZZY_COVARIANCE_NORM=4\r\n \r\n \r\n INIT_MU_RANDOM = 1\r\n INIT_MU_CONSTANT = 2\r\n INIT_MU_FCM = 3\r\n \r\n def __init__(self, training_set, c, m=2., distType=EUCLIDEAN_NORM, initMu=INIT_MU_RANDOM):\r\n '''\r\n Initializes the algorithm.\r\n\r\n :Parameters:\r\n training_set\r\n A list or array of vectors containing the data to be classified.\r\n Each of the vectors in this list *must* have the same dimension, or\r\n the algorithm won't behave correctly. Notice that each vector can be\r\n given as a tuple -- internally, everything is converted to arrays.\r\n initial_conditions\r\n A list or array of vectors containing the initial membership values\r\n associated to each example in the training set. Each column of this\r\n array contains the membership assigned to the corresponding class\r\n for that vector. Notice that each vector can be given as a tuple --\r\n internally, everything is converted to arrays.\r\n m\r\n This is the aggregation value. The bigger it is, the smoother will\r\n be the classification. Please, consult the bibliography about the\r\n subject. ``m`` must be bigger than 1. Its default value is 2\r\n '''\r\n \r\n '''\r\n metodo shape restituisce il numero di riga e il numero di colonna\r\n metodo array converte una t-upla oppure una lista in un array\r\n \r\n \r\n '''\r\n \r\n self.__x = array(training_set)\r\n self.m = m\r\n self.setDistanceType(distType)\r\n '''The fuzzyness coefficient. Must be bigger than 1, the closest it is\r\n to 1, the smoother the membership curves will be.'''\r\n self.initMu = initMu\r\n self.__mu = array(self.initMembership(self.__x.shape[0], c))\r\n numpy.set_printoptions(threshold=numpy.nan)\r\n FileManager.writeTxt('mu_iniziale.txt', str(self.__mu))\r\n \r\n self.__centr = self.centers()\r\n \r\n \r\n \r\n \r\n \r\n def getcentr(self):\r\n return self.__centr\r\n \r\n def setcentr(self, centr):\r\n self.__centr = array(centr).reshape(self.__centr.shape)\r\n centr = property(getcentr, setcentr)\r\n '''A ``numpy`` array containing the centers of the classes in the algorithm.\r\n Each line represents a center, and the number of lines is the number of\r\n classes. This property is read and write, but care must be taken when\r\n setting new centers: if the dimensions are not exactly the same as given in\r\n the instantiation of the class (*ie*, *C* centers of dimension *N*, an\r\n exception will be raised.'''\r\n\r\n def getmu(self):\r\n return self.__mu\r\n def setmu(self, mu):\r\n self.__mu = array(mu).reshape(self.__mu.shape)\r\n mu = property(getmu, None)\r\n '''The membership values for every vector in the training set. This property\r\n is modified at each step of the execution of the algorithm. This property is\r\n not writable.'''\r\n\r\n def getx(self):\r\n return self.__x\r\n \r\n def setx(self,newX):\r\n self.__x = newX\r\n x = property(getx, setx)\r\n '''The vectors in which the algorithm bases its convergence. This property\r\n is not writable.'''\r\n\r\n def centers(self):\r\n '''\r\n Given the present state of the algorithm, recalculates the centers, that\r\n is, the position of the vectors representing each of the classes. Notice\r\n that this method modifies the state of the algorithm if any change was\r\n made to any parameter. This method receives no arguments and will seldom\r\n be used externally. It can be useful if you want to step over the\r\n algorithm. *This method has a colateral effect!* If you use it, the\r\n ``centr`` property (see above) will be modified.\r\n\r\n :Returns:\r\n A vector containing, in each line, the position of the centers of the\r\n algorithm.\r\n '''\r\n \r\n numpy.set_printoptions(threshold=numpy.nan)\r\n FileManager.writeTxt('mu_finale.txt', str(self.__mu))\r\n mm = self.__mu ** self.m \r\n \r\n # FileManager.writeTxt('mm.txt', str(mm)) \r\n \r\n # FileManager.writeTxt('sum.txt', str(sum(mm, axis=0))) \r\n \r\n # FileManager.writeTxt('dot.txt', str(dot(self.__x.T, mm))) \r\n c = dot(self.__x.T, mm) / sum(mm, axis=0)\r\n self.__centr = c.T\r\n return self.__centr\r\n\r\n def membership(self):\r\n '''\r\n Given the present state of the algorithm, recalculates the membership of\r\n each example on each class. That is, it modifies the initial conditions\r\n to represent an evolved state of the algorithm. Notice that this method\r\n modifies the state of the algorithm if any change was made to any\r\n parameter.\r\n\r\n :Returns:\r\n A vector containing, in each line, the membership of the corresponding\r\n example in each class.\r\n '''\r\n x = self.__x\r\n centr = self.__centr\r\n M, _ = x.shape\r\n C, _ = centr.shape\r\n r = zeros((M, C))\r\n m1 = 1. / (self.m - 1.)\r\n for k in range(M):\r\n '''den = sum((x[k] - centr) ** 2., axis=1)'''\r\n den = self.computeDistance(x[k],self.centr)\r\n if any(den == 0):\r\n return self.__mu\r\n frac = outer(den, 1. / den) ** m1\r\n r[k, :] = 1. / sum(frac, axis=1)\r\n self.__mu = r\r\n return self.__mu\r\n \r\n def computeDistance(self, datapoint, prototypes):\r\n c = prototypes\r\n A = self.__A\r\n distance = zeros(c.shape[0])\r\n # distance=sum((datapoint - centr) ** 2., axis=1)\r\n for i in range(c.shape[0]):\r\n d = datapoint - c[i]\r\n if (self.distanceType==FuzzyCMeans.FUZZY_COVARIANCE_NORM):\r\n A=self.computeCovarianceMatrix(i)\r\n distance[i] = dot(dot(d, A), d)\r\n \r\n return distance\r\n \r\n def setDistanceType (self, type):\r\n ''' imposta il tipo di norma per calcolare la matrice di appartenenza\r\n type =1 norma euclidea\r\n type =2 norma diagonale\r\n type =3 norma di Mahalonobis\r\n '''\r\n self.distanceType = type\r\n if(type == self.EUCLIDEAN_NORM):\r\n self.__A = identity(self.__x.shape[1])\r\n elif (type == FuzzyCMeans.DIAGONAL_NORM):\r\n self.__A = LA.inv(self.computeCyMatrix())\r\n elif (type == self.MAHALONOBIS_NORM):\r\n self.__A = LA.inv(np.diag(LA.eigvals(self.computeCyMatrix())))\r\n elif (type==self.FUZZY_COVARIANCE_NORM):\r\n self.__A=zeros((self.__x.shape[1],self.__x.shape[1]))\r\n \r\n \r\n \r\n \r\n \r\n\r\n def step(self):\r\n '''\r\n This method runs one step of the algorithm. It might be useful to track\r\n the changes in the parameters.\r\n\r\n :Returns:\r\n The norm of the change in the membership values of the examples. It\r\n can be used to track convergence and as an estimate of the error.\r\n '''\r\n old = self.__mu\r\n self.membership()\r\n self.centers()\r\n return sum((self.__mu - old) ** 2.) ** 1 / 2.\r\n\r\n def __call__(self, emax=0.001, imax=50):\r\n '''\r\n The ``__call__`` interface is used to run the algorithm until\r\n convergence is found.\r\n\r\n :Parameters:\r\n emax\r\n Specifies the maximum error admitted in the execution of the\r\n algorithm. It defaults to 1.e-10. The error is tracked according to\r\n the norm returned by the ``step()`` method.\r\n imax\r\n Specifies the maximum number of iterations admitted in the execution\r\n of the algorithm. It defaults to 20.\r\n\r\n :Returns:\r\n An array containing, at each line, the vectors representing the\r\n centers of the clustered regions.\r\n '''\r\n error = 1.\r\n i = 0\r\n while error > emax and i < imax:\r\n error = self.step()\r\n i = i + 1\r\n print('Iterazione '+str(i))\r\n print(\"Numero di iterazioni eseguite: \" + str(i))\r\n return self.centr\r\n \r\n \r\n def initMembership(self, n, c):\r\n u = zeros((n, c))\r\n if(c != 0):\r\n if(self.initMu == self.INIT_MU_RANDOM):\r\n for i in range(n):\r\n z = np.random.rand(c)\r\n u[i, :] = np.random.dirichlet(z, size=1)\r\n elif(self.initMu == self.INIT_MU_CONSTANT):\r\n for i in range(n): \r\n for j in range(c):\r\n u[i, j] = 1.0 / c\r\n elif(self.initMu == self.INIT_MU_FCM):\r\n p1 = FuzzyCMeans(self.__x, c, self.m, self.distanceType)\r\n p1(imax=5)\r\n u = p1.getmu() \r\n return u \r\n \r\n \r\n def getClusters(self):\r\n return np.argmax(self.__mu, axis=1)\r\n \r\n \r\n def computeCyMatrix(self):\r\n x = self.__x\r\n M = x.shape[0]\r\n n = x.shape[1]\r\n cy = sum(self.__x, axis=0) / M\r\n Cy = zeros((n, n))\r\n for k in range(M):\r\n z = x[k] - cy\r\n den = outer(z, z.T)\r\n Cy = Cy + den\r\n return Cy\r\n \r\n def computeCovarianceMatrix(self,y): \r\n c=self.__centr\r\n x=self.__x\r\n n=x.shape[1]\r\n mu=self.__mu\r\n n=x.shape[1]\r\n num=zeros((n,n))\r\n for k in range(len(x)):\r\n z = x[k] - c[y]\r\n num += ((mu[k,y])**2)*(outer(z, z.T))\r\n den=sum(mu[:, y])\r\n Py=num/den\r\n (sign, logdet) = LA.slogdet(Py)\r\n coeff=sign*numpy.exp(-logdet/n)\r\n My=LA.inv(coeff*Py)\r\n return My\r\n \r\n def getMedoids(self, trueLabelPoints):\r\n cluster_point = array(self.getClusters())\r\n medoids = []\r\n for k in range(len(self.__centr)):\r\n l = []\r\n for j in range(len(cluster_point)):\r\n if (k == cluster_point[j]):\r\n l.append(j)\r\n medoids.append(self.x[l[np.argmax(self.mu[l, k])]]) \r\n \r\n return array(medoids) \r\n \r\n def getMedoidsTrueLabel(self, trueLabelCluster, trueLabelPoints):\r\n cluster_point = array(self.getClusters())\r\n medoids = []\r\n for k in range(len(self.__centr)):\r\n l = []\r\n for j in range(len(cluster_point)):\r\n if (k == cluster_point[j] and trueLabelCluster[k] == trueLabelPoints[j]):\r\n l.append(j)\r\n medoids.append(self.x[l[np.argmax(self.mu[l, k])]]) \r\n \r\n return array(medoids) \r\n \r\n \r\n def getClusterLabel(self, trueLabels):\r\n ind = np.argmax(self.mu, axis=0)\r\n labels = trueLabels[ind]\r\n return array(labels) \r\n\r\n \r\n \r\n \r\n \r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n","sub_path":"Tesi/FCM/Class_FCM.py","file_name":"Class_FCM.py","file_ext":"py","file_size_in_byte":13969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"6709207","text":"import functools\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport models.archs.arch_util as arch_util\n\n\nclass MSRResNet(nn.Module):\n ''' modified SRResNet'''\n\n def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4):\n super(MSRResNet, self).__init__()\n self.upscale = upscale\n\n self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n basic_block = functools.partial(arch_util.ResidualBlock_noBN, nf=nf)\n self.recon_trunk_1 = arch_util.make_layer(basic_block, nb // 2)\n self.recon_trunk_2 = arch_util.make_layer(basic_block, nb // 2)\n # upsampling\n # self.upconv1_1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n self.HRconv_1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n self.conv_last_1 = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n self.upconv1_2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n self.HRconv_2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n self.conv_last_2 = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n # activation function\n self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n # initialization\n arch_util.initialize_weights([self.conv_first, self.upconv1_2, self.HRconv_2, self.conv_last_2, self.HRconv_1, self.conv_last_1],\n 0.1)\n if self.upscale == 4:\n arch_util.initialize_weights(self.upconv2, 0.1)\n\n def forward(self, x):\n fea = self.lrelu(self.conv_first(x))\n out1 = self.recon_trunk_1(fea)\n out2 = self.recon_trunk_2(out1)\n\n # out1 = self.lrelu(self.upconv1_1(out1))\n out1 = self.conv_last_1(self.lrelu(self.HRconv_1(out1)))\n\n out2 = self.lrelu(self.upconv1_2(out2))\n out2 = self.conv_last_2(self.lrelu(self.HRconv_2(out2)))\n\n out1 += x\n out2 += x\n return out1, out2\n","sub_path":"deblock/codes/models/archs/archs_sub/SRResNet_arch_mid.py","file_name":"SRResNet_arch_mid.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"504034910","text":"#!/usr/bin/env python\n# _*_ coding:utf-8 _*_\n\n\"\"\"\nFile: .py\nAuthor: Lijiacai (v_lijiacai@baidu.com)\nDate: 2018-xx-xx\nDescription:\n\"\"\"\nimport logging\nimport os\nimport sys\nimport time\n\ncur_dir = os.path.split(os.path.realpath(__file__))[0]\nsys.path.append(\"%s/../\" % cur_dir)\nfrom manager.spider import *\nfrom login.login import Loginer\n\n\nclass Addr(object):\n \"\"\"配送地址\"\"\"\n\n def __init__(self, username=\"\", password=\"\", lastname=\"\", firstname=\"\", province=\"\", city=\"\",\n district=\"\", detail_address=\"\", phone_num=\"\", ):\n \"\"\"\n 相关参数初始化\n :param username: 用户名\n :param password: 密码\n :param lastname: 姓\n :param firstname: 名\n :param province: 省份\n :param city: 城市\n :param district: 县城\n :param detail_address: 详细地址\n :param phone_num: 电话号码\n \"\"\"\n self.username = username\n self.password = password\n self.lastname = lastname\n self.firstname = firstname\n self.province = province\n self.city = city\n self.district = district\n self.detail_address = detail_address\n self.phone_num = phone_num\n\n def login(self):\n \"\"\"配置地址之前需要登陆\"\"\"\n loginer = Loginer(username=self.username, password=self.password, headless=False)\n if loginer.login(url=\"https://www.nike.com/cn/launch/\"):\n return loginer.B\n\n def setting_addr(self, url):\n \"\"\"\n 设置配送地址\n :param url: url\n :return:\n \"\"\"\n B = self.login()\n if B.get(\"status\", \"-1\"):\n try:\n # 设置请求url\n B.get(url)\n # 选中设置地址\n B.wait_for_element_loaded(\"addresses\", By.CLASS_NAME)\n elem_addresses = B.browser.find_element_by_class_name(\"addresses\")\n B.click_elem(elem_addresses)\n # 编辑地址\n B.wait_for_element_loaded(\"edit-button-container\", By.CLASS_NAME)\n elem_edit = B.browser.find_element_by_class_name(\"edit-button-container\")\n B.click_elem(elem_edit)\n # 编辑姓名,注意名在前,姓在后\n B.wait_for_element_loaded(\"address-lastname\", By.ID)\n elem_lastname = B.browser.find_element_by_id(\"address-lastname\")\n elem_firstname = B.browser.find_element_by_id(\"address-firstname\")\n elem_lastname.clear()\n elem_firstname.clear()\n elem_lastname.send_keys(self.lastname)\n elem_firstname.send_keys(self.firstname)\n # 编辑省份\n elem_province = B.browser.find_element_by_class_name(\"state-container\")\n B.click_elem(elem_province)\n state_province = B.browser.find_elements_by_xpath(\n \"//div[@class='input-wrapper state-container container2 js-addressState']/div/ul/li\")\n for one in state_province:\n if one.text == self.province:\n B.click_elem(one)\n # 编辑城市\n city = B.browser.find_element_by_class_name(\"city-container\")\n B.click_elem(city)\n citys = B.browser.find_elements_by_xpath(\n \"//div[@class='input-wrapper city-container container1 js-addressCity']/div/ul/li\")\n for one in citys:\n if one.text == self.city:\n B.click_elem(one)\n # 编辑县或城市\n district = B.browser.find_element_by_class_name(\"district-container\")\n B.click_elem(district)\n districts = B.browser.find_elements_by_xpath(\n \"//div[@class='input-wrapper district-container container2 js-addressDistrict']/div/ul/li\")\n for one in districts:\n if one.text == self.district:\n B.click_elem(one)\n # 编辑详细地址\n detail_address = B.browser.find_element_by_id(\"address-addressone\")\n detail_address.clear()\n detail_address.send_keys(self.detail_address)\n # 编辑电话号码\n phonenum = B.browser.find_element_by_id(\"address-phonenumber\")\n phonenum.clear()\n phonenum.send_keys(self.phone_num)\n # 保存设置\n save_button = B.browser.find_element_by_xpath(\n \"//button[@data-qa='my_account.settings.addresses.shipping_address.save_button']\")\n save_button.click()\n # todo:这里需要验证是否保存时候一定配置成功\n B.close()\n return {\"username\": self.username, \"status\": \"1\", \"item\": \"address\"}\n except Exception as e:\n logging.exception(\n \"%s :(address defeat %s) %s\" % (time.asctime(), self.username, str(e)))\n B.close()\n return {\"username\": self.username, \"status\": \"-1\", \"item\": \"address\"}\n\n\ndef test():\n \"\"\"unittest\"\"\"\n addr = Addr(username=\"18404983790\", password=\"Ljc19941108\", lastname=\"lee\", firstname=\"jack\",\n province=\"黑龙江省\", city=\"绥化市\",\n district=\"安达市\", detail_address=\"中国黑龙江绥化市安达市栖霞小区9栋505\",\n phone_num=\"00000000000\")\n print(addr.setting_addr(url=\"https://www.nike.com/cn/zh_cn/p/settings\"))\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"snkrs_backserver/address/address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":5620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"153759234","text":"#!/usr/bin/env python3\nimport argparse\nimport glob\nimport math\nimport os\n\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nfrom astropy import constants as const\nfrom astropy import units as u\n\nimport artistools as at\n\n\ndef main():\n \"\"\"\n Plot the radiation field estimators and the fitted radiation field\n based on the fitted field parameters (temperature and scale factor W\n for a diluted blackbody)\n \"\"\"\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description='Plot ARTIS radiation field.')\n parser.add_argument('-path', action='store', default='./',\n help='Path to radfield_nnnn.out files')\n parser.add_argument('-listtimesteps', action='store_true', default=False,\n help='Show the times at each timestep')\n parser.add_argument('-timestep', type=int, default=-1,\n help='Timestep number to plot, or -1 for last')\n parser.add_argument('-timestepmax', type=int, default=-1,\n help='Make plots for all timesteps up to this timestep')\n parser.add_argument('-modelgridindex', type=int, default=0,\n help='Modelgridindex to plot')\n parser.add_argument('--nospec', action='store_true', default=False,\n help='Don\\'t plot the emergent specrum')\n parser.add_argument('-xmin', type=int, default=1000,\n help='Plot range: minimum wavelength in Angstroms')\n parser.add_argument('-xmax', type=int, default=20000,\n help='Plot range: maximum wavelength in Angstroms')\n parser.add_argument('--normalised', default=False, action='store_true',\n help='Normalise the spectra to their peak values')\n parser.add_argument('-o', action='store', dest='outputfile',\n default='plotradfield_cell{0:03d}_{1:03d}.pdf',\n help='Filename for PDF file')\n args = parser.parse_args()\n\n if args.listtimesteps:\n at.showtimesteptimes('spec.out')\n else:\n radfield_files = (\n glob.glob('radfield_????.out', recursive=True) +\n glob.glob('*/radfield_????.out', recursive=True) +\n glob.glob('radfield-????.out', recursive=True) + glob.glob('radfield.out', recursive=True))\n\n if not radfield_files:\n print(\"No radfield files found\")\n return\n else:\n radfielddata = at.radfield.read_files(radfield_files, args.modelgridindex)\n\n if not args.timestep or args.timestep < 0:\n timestepmin = max(radfielddata['timestep'])\n else:\n timestepmin = args.timestep\n\n if not args.timestepmax or args.timestepmax < 0:\n timestepmax = timestepmin + 1\n else:\n timestepmax = args.timestepmax\n\n specfilename = 'spec.out'\n\n if not os.path.isfile(specfilename):\n specfilename = '../example_run/spec.out'\n\n if not os.path.isfile(specfilename):\n print(f'Could not find {specfilename}')\n return\n\n for timestep in range(timestepmin, timestepmax):\n radfielddata_currenttimestep = radfielddata.query('timestep==@timestep')\n\n if len(radfielddata_currenttimestep) > 0:\n outputfile = args.outputfile.format(args.modelgridindex, timestep)\n make_plot(radfielddata_currenttimestep, specfilename, timestep, outputfile,\n xmin=args.xmin, xmax=args.xmax, modelgridindex=args.modelgridindex, nospec=args.nospec,\n normalised=args.normalised)\n else:\n print(f'No data for timestep {timestep:d}')\n\n\ndef make_plot(radfielddata, specfilename, timestep, outputfile, xmin, xmax, modelgridindex, nospec=False,\n normalised=False):\n \"\"\"\n Draw the bin edges, fitted field, and emergent spectrum\n \"\"\"\n time_days = at.get_timestep_time(specfilename, timestep)\n\n print(f'Plotting timestep {timestep:d} (t={time_days})')\n\n fig, axis = plt.subplots(1, 1, sharex=True, figsize=(8, 4),\n tight_layout={\"pad\": 0.2, \"w_pad\": 0.0, \"h_pad\": 0.0})\n\n ymax1 = at.radfield.plot_field_estimators(axis, radfielddata)\n ymax2 = at.radfield.plot_fitted_field(axis, radfielddata, xmin, xmax)\n\n ymax = max(ymax1, ymax2)\n\n if len(radfielddata) < 400:\n binedges = [const.c.to('angstrom/s').value / radfielddata['nu_lower'].iloc[1]] + \\\n list(const.c.to('angstrom/s').value / radfielddata['nu_upper'][1:])\n axis.vlines(binedges, ymin=0.0, ymax=ymax, linewidth=0.5,\n color='red', label='', zorder=-1, alpha=0.4)\n if not nospec:\n if not normalised:\n modeldata, t_model_init = at.get_modeldata('model.txt')\n v_surface = modeldata.loc[int(radfielddata.modelgridindex.max())].velocity * u.km / u.s # outer velocity\n r_surface = (327.773 * u.day * v_surface).to('km')\n r_observer = u.megaparsec.to('km')\n scale_factor = (r_observer / r_surface) ** 2 / (2 * math.pi)\n print(f'Scaling emergent spectrum flux at 1 Mpc to specific intensity '\n f'at surface (v={v_surface:.3e}, r={r_surface:.3e})')\n plot_specout(axis, specfilename, timestep, scale_factor=scale_factor) # peak_value=ymax)\n else:\n plot_specout(axis, specfilename, timestep, peak_value=ymax)\n\n axis.annotate(f'Timestep {timestep:d} (t={time_days})\\nCell {modelgridindex:d}',\n xy=(0.02, 0.96), xycoords='axes fraction',\n horizontalalignment='left', verticalalignment='top', fontsize=8)\n\n axis.set_xlabel(r'Wavelength ($\\AA$)')\n axis.set_ylabel(r'J$_\\lambda$ [erg/s/cm$^2$/$\\AA$]')\n axis.xaxis.set_minor_locator(ticker.MultipleLocator(base=100))\n axis.set_xlim(xmin=xmin, xmax=xmax)\n axis.set_ylim(ymin=0.0, ymax=ymax)\n\n axis.legend(loc='best', handlelength=2,\n frameon=False, numpoints=1, prop={'size': 13})\n\n print(f'Saving to {outputfile:s}')\n fig.savefig(outputfile, format='pdf')\n plt.close()\n\n\ndef plot_specout(axis, specfilename, timestep, peak_value=None, scale_factor=None):\n \"\"\"\n Plot the ARTIS spectrum\n \"\"\"\n\n print(f\"Plotting {specfilename}\")\n\n dfspectrum = at.spectra.get_spectrum(specfilename, timestep)\n if scale_factor:\n dfspectrum['f_lambda'] = dfspectrum['f_lambda'] * scale_factor\n if peak_value:\n dfspectrum['f_lambda'] = dfspectrum['f_lambda'] / dfspectrum['f_lambda'].max() * peak_value\n\n dfspectrum.plot(x='lambda_angstroms', y='f_lambda', ax=axis, linewidth=1.5, color='black', alpha=0.7,\n label='Emergent spectrum (normalised)')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"artistools/plot/radfield.py","file_name":"radfield.py","file_ext":"py","file_size_in_byte":6861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"608930464","text":"from flask import Flask, flash, render_template, request, session, redirect, url_for\nimport urllib2, json\nimport os\nfrom datetime import datetime\nimport movie_api\n\napp = Flask(__name__)\napp.secret_key = os.urandom(32)\n\n@app.route('/')\ndef index():\n recommended = movie_api.sort(movie_api.get_family())\n return render_template('index.html', recommended=recommended)\n\n@app.route('/schedule')\ndef schedule():\n return render_template('schedule.html')\n\n@app.route('/groups')\ndef groups():\n return render_template('groups.html')\n\n@app.route('/genres')\ndef genres():\n genres = ['action', 'adventure', 'anamation', 'comedy', 'crime', 'drama', 'family', 'fantasy']\n g = ''\n recommended = []\n if request.args.get('genre'):\n g = request.args.get('genre')\n recommended = eval('movie_api.sort(movie_api.get_%s())'%g)\n return render_template('genres.html', genres=genres, recommended=recommended, g=g)\n\n@app.route('/watch')\ndef watch():\n return render_template('single.html')\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run()\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"532167359","text":"import json\nimport itertools\nwith open('eerstekamer.json', 'r') as data_file:\n data = json.load(data_file)\nwith open('functies_1.json', 'r') as data_file:\n functies_1 = json.load(data_file)\n\nwith open('functies_2.json', 'r') as data_file:\n functies_2 = json.load(data_file)\n\nwith open('functies_3.json', 'r') as data_file:\n functies_3 = json.load(data_file)\n\nwith open('functies_4.json', 'r') as data_file:\n functies_4 = json.load(data_file)\n\nwith open('functies_5.json', 'r') as data_file:\n functies_5 = json.load(data_file)\nfor person in data[\"value\"]:\n for fs in functies_1[\"value\"]:\n if person[\"Id\"] == fs[\"PersoonId\"]:\n print(fs[\"Omschrijving\"])\n x = 'c128a46d-ebd9-4ee9-80cc-16bfe505e539'\n if x == fs[\"PersoonId\"]:\n print(\"noob\")\n","sub_path":"json/Extra/Inkomsten Experiment and Eerste Kamer/eerstekamergenerator.py","file_name":"eerstekamergenerator.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"420696640","text":"from ctypes import windll, WINFUNCTYPE, c_int, c_char_p, c_ulong, c_uint32\nfrom model.input_data import *\n\np1 = c_char_p(local_host_ip.encode(\"utf-8\"))\np2 = c_char_p(iidk_port.encode(\"utf-8\"))\np3 = c_char_p(iidk_id.encode(\"utf-8\"))\n# message = c_char_p(\"CORE\", \"RANDOM\", \"CREATE_OBJECT\", \"objtype\", \"CAM\", \"objid\", \"999999\", \"parent_id\", \"2\", \"name\", \"Test Camera\")\np5 = 0\np6 = 0\np7 = 0\n\n\nclass DllHelper:\n def __init__(self, message=None, cb1=None, cb2=None, cb3=None, obj_name=None, obj_id=None):\n self.my_dll = windll.LoadLibrary(\"C:\\Program Files (x86)\\ISS\\SecurOS\\iidk.dll\")\n self.ConnectEx = self.my_dll.ConnectEx\n self.Disconnect = self.my_dll.Disconnect\n self.SendDoReact = self.my_dll.SendDoReact\n self.message = message\n self.objName = obj_name\n self.objId = obj_id\n self.cb1 = cb1\n self.cb2 = cb2\n self.cb3 = cb3\n\n\n def callback(self, cb1, cb2, cb3):\n # сохранение параметра в общей переменной\n # print(cb1, cb2, cb3)\n self.cb1 = cb1\n return 0\n\n def callback_proto(self):\n global CallbackProto\n CallbackProto = WINFUNCTYPE(c_int, c_char_p, c_char_p, c_ulong)\n\n def callback_wrapper(self):\n global CallbackWrapper\n CallbackWrapper = CallbackProto(self.callback)\n\n def connect(self):\n self.my_dll.ConnectEx.argtypes = [c_char_p, c_char_p, c_char_p, CallbackProto, c_uint32, c_int, c_uint32]\n self.my_dll.ConnectEx(p1, p2, p3, CallbackWrapper, p5, p6, p7)\n \"\"\"проверка isconected\n if x == 0:\n print(\"error\")\n else:\n print(\"ok\")\n \"\"\"\n\n def send_react(self, message):\n self.connect_to_dll()\n # message = (\"CORE\", \"RANDOM\", \"CREATE_OBJECT\", \"objtype\", \"CAM\", \"objid\", \"999999\", \"parent_id\", \"2\", \"name\", \"Test Camera\")\n msg = c_char_p(message)\n self.my_dll.SendDoReact.argtypes = [c_char_p, c_char_p]\n self.my_dll.SendDoReact(p3, msg)\n self.disconnect()\n\n def send_event(self, message):\n self.connect_to_dll()\n # message = \"CORE||CREATE_OBJECT|objtype,objid<99>,parent_id<2>,name\".encode(\"utf-8\")\n msg = c_char_p(message)\n self.my_dll.SendMsg.argtypes = [c_char_p, c_char_p]\n self.my_dll.SendMsg(p3, msg)\n self.disconnect()\n\n def disconnect(self):\n self.my_dll.Disconnect(iidk_id)\n\n\n def connect_to_dll(self):\n self.callback_proto()\n self.callback_wrapper()\n self.connect()\n\n\"\"\"\n def is_object_exist(self, obj_name, obj_id):\n self.my_dll.\n\"\"\"\n\n\n\n# print(my_dll)\n# print(vars(my_dll))\n\n# message = (p10, p11, p12, p13, p14, p15, p16, p17, p18, p19, p20)\n# message = \"CORE||CREATE_OBJECT|objtype,objid<99>,parent_id<2>,name\".encode(\"utf-8\")\n# message = \"CAM|40|REC\".encode(\"utf-8\")","sub_path":"fixture/load_dll.py","file_name":"load_dll.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"637029327","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nfrom Tkinter import *\nfrom tkFileDialog import askopenfilename\nimport tkMessageBox\nimport zipfile\nimport zlib\nimport datetime\nimport os\nimport sys\n\n\nclass iZip:\n\n def __init__(self, master):\n self.master = master\n\n self.master.geometry(\"250x250\")\n self.master.resizable(False, False)\n self.master.title(\"iZip\")\n\n self.message = Label(text='Wellcome to iZip', pady=20, padx=10).pack()\n self.compact = Button(text='Go Zip!', pady=10,\n command=self.go_compact).pack()\n self.extract = Button(text='Go to Extract!', pady=10,\n command=self.go_extract).pack()\n self.exit = Button(text='Exit', padx=30, command=self.quit).pack()\n\n def go_compact(self):\n\n name_file = askopenfilename()\n list_file = []\n\n if name_file != \"\":\n list_file.append(name_file)\n\n new_zip = zipfile.ZipFile('file.zip', 'w')\n\n for file in list_file:\n\n if (os.path.isfile(file) and os.path.exists(file)):\n base_path = os.path.basename(file)\n new_zip.write(file, base_path,\n compress_type=zipfile.ZIP_DEFLATED)\n tkMessageBox.showinfo(\"Message\", \"Compact successfully!\")\n new_zip.close()\n else:\n tkMessageBox.showwarning(\"Message\", \"Error!\")\n else:\n tkMessageBox.showinfo(\"Message\", \"Select an item!\")\n\n def go_extract(self):\n\n name_file = askopenfilename()\n\n if name_file != \"\" and zipfile.is_zipfile(name_file):\n\n new_file = zipfile.ZipFile(name_file)\n new_file.extractall()\n\n tkMessageBox.showinfo(\"Message\", \"Extract successfully!\")\n else:\n tkMessageBox.showinfo(\"Message\", \"Select an item!\")\n\n def quit(self):\n root.destroy()\n\n\nif __name__ == '__main__':\n root = Tk()\n screen = iZip(root)\n root.mainloop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"108624291","text":"from flask import Flask, jsonify, request\nimport json\n\n\napp = Flask(__name__)\n\ndef okulan():\n r = open(r\"data.json\", \"r\", encoding=\"UTF-8\").read()\n r = json.loads(r)\n\n return r\n\ndef arakardes(key):\n data = okulan()\n\n bulundu = list()\n\n for _ in data:\n if key in _[\"search\"]:\n bulundu.append(_)\n\n return bulundu\n \n\n@app.route(\"/stations\")\ndef stations():\n return jsonify(okulan())\n\n@app.route(\"/arama\", methods=[\"GET\"])\ndef ara():\n key = request.args.get(\"search\")\n \n kim_o = request.headers.get('client')\n\n if kim_o == \"lps-project\":\n\n result = arakardes(key)\n return jsonify(result)\n return \"kusura kalma kardeş giremezsin\"\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"dosya.py","file_name":"dosya.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"493773017","text":"\n# coding: utf-8\n\n# # Lecture 16 - Index Objects\n\n# ## Robert Esteves\n\n# In[1]:\n\n\nimport pandas as pd\n\n\n# In[2]:\n\n\nimport numpy as np\n\n\n# In[3]:\n\n\nfrom pandas import Series, DataFrame\n\n\n# In[4]:\n\n\nmy_ser = Series([1,2,3,4], index=['A', 'B', 'C', 'D'])\n\n\n# In[5]:\n\n\nmy_ser\n\n\n# In[7]:\n\n\nmy_index = my_ser.index\n\n\n# In[8]:\n\n\nmy_index\n\n\n# In[9]:\n\n\nmy_index[2]\n\n\n# In[10]:\n\n\nmy_index[2:]\n\n\n# In[12]:\n\n\n# What happens when you try to change an index\nmy_index[0]\n\n# Note: indexes don't support mutable operation\n# You have to pass an entire index\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Lecture_16_Index_Objects.py","file_name":"Lecture_16_Index_Objects.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"649546037","text":"import logging\n\nimport pytest\nimport time\nimport copy\nfrom utils import *\nfrom constants import *\n\nuid = \"list_collection\"\n\n\nclass TestListCollections:\n \"\"\"\n ******************************************************************\n The following cases are used to test `list_collections` function\n ******************************************************************\n \"\"\"\n\n def test_list_collections(self, client, collection):\n '''\n target: test list collections\n method: create collection, assert the value returned by list_collections method\n expected: True\n '''\n collections = map(lambda x: x['collection_name'], client.list_collections())\n assert collection in collections\n\n def test_list_collections_not_existed(self, client):\n '''\n target: test if collection not created\n method: random a collection name, which not existed in db, assert the value returned by list_collections method\n expected: False\n '''\n collection_name = gen_unique_str(uid)\n collections = map(lambda x: x['collection_name'], client.list_collections())\n assert collection_name not in collections\n\n def test_list_collections_no_collection(self, client):\n '''\n target: test list collections when no collection in db\n method: delete all collections and list collections\n expected: status is ok and len of result is 0\n '''\n client.clear_db()\n result = client.list_collections()\n assert len(result) == 0\n\n def test_list_collections_multi_collections(self, client, collection):\n '''\n target: test list collections with multi collections\n method: create multi collections and list them\n expected: len of list results is equal collection nums\n '''\n collection_name = gen_unique_str(uid)\n fields = copy.deepcopy(default_fields)\n assert client.create_collection(collection_name, fields)\n collections = map(lambda x: x['collection_name'], client.list_collections())\n assert collection_name in collections\n assert collection in collections\n\n def test_list_collections_offset(self, client, collection):\n '''\n target: test list collections with offset parameter\n method: create multi collections and list them with offset\n expected: first collection with offset=1 equal to second collection with offset=0\n '''\n collection_num = 2\n fields = copy.deepcopy(default_fields)\n for i in range(collection_num):\n collection_name = gen_unique_str(uid)\n assert client.create_collection(collection_name, fields)\n collections = list(map(lambda x: x['collection_name'], client.list_collections()))\n collections_new = list(map(lambda x: x['collection_name'], client.list_collections(offset=1)))\n assert collections[1] == collections_new[0]\n\n def test_list_collections_page_size(self, client, collection):\n '''\n target: test list collections with page_size parameter\n method: create multi collections and list them with page_size\n expected: collection num equal to page_size\n '''\n collection_num = 6\n page_size = 5\n fields = copy.deepcopy(default_fields)\n for i in range(collection_num):\n collection_name = gen_unique_str(uid)\n assert client.create_collection(collection_name, fields)\n collections = list(map(lambda x: x['collection_name'], client.list_collections(page_size=page_size)))\n c = list(map(lambda x: x['collection_name'], client.list_collections()))\n assert len(collections) == page_size\n\n","sub_path":"tests/milvus_http_test/collections/test_list.py","file_name":"test_list.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"9659654","text":"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport re\nimport string\nimport json\nimport numpy as np\n\n\ndef compute_prediction(examples,\n features,\n predictions,\n version_2_with_negative=False,\n n_best_size=20,\n max_answer_length=30,\n null_score_diff_threshold=0.0):\n \"\"\"\n Post-processes the predictions of a question-answering model to convert \n them to answers that are substrings of the original contexts. This is \n the base postprocessing functions for models that only return start and \n end logits.\n\n Args:\n examples (list): List of raw squad-style data (see `run_squad.py \n `__ for more \n information).\n features (list): List of processed squad-style features (see \n `run_squad.py `__\n for more information).\n predictions (tuple): The predictions of the model. Should be a tuple\n of two list containing the start logits and the end logits.\n version_2_with_negative (bool, optional): Whether the dataset contains\n examples with no answers. Defaults to False.\n n_best_size (int, optional): The total number of candidate predictions\n to generate. Defaults to 20.\n max_answer_length (int, optional): The maximum length of predicted answer.\n Defaults to 20.\n null_score_diff_threshold (float, optional): The threshold used to select\n the null answer. Only useful when `version_2_with_negative` is True.\n Defaults to 0.0.\n \n Returns:\n A tuple of three dictionaries containing final selected answer, all n_best \n answers along with their probability and scores, and the score_diff of each \n example.\n \"\"\"\n assert len(\n predictions\n ) == 2, \"`predictions` should be a tuple with two elements (start_logits, end_logits).\"\n all_start_logits, all_end_logits = predictions\n\n assert len(predictions[0]) == len(\n features), \"Number of predictions should be equal to number of features.\"\n\n # Build a map example to its corresponding features.\n features_per_example = collections.defaultdict(list)\n for i, feature in enumerate(features):\n features_per_example[feature[\"example_id\"]].append(i)\n\n # The dictionaries we have to fill.\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n\n scores_diff_json = collections.OrderedDict()\n\n # Let's loop over all the examples!\n for example_index, example in enumerate(examples):\n # Those are the indices of the features associated to the current example.\n feature_indices = features_per_example[example['id']]\n\n min_null_prediction = None\n prelim_predictions = []\n\n # Looping through all the features associated to the current example.\n for feature_index in feature_indices:\n # We grab the predictions of the model for this feature.\n start_logits = all_start_logits[feature_index]\n end_logits = all_end_logits[feature_index]\n # This is what will allow us to map some the positions in our logits to span of texts in the original\n # context.\n offset_mapping = features[feature_index][\"offset_mapping\"]\n # Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context\n # available in the current feature.\n token_is_max_context = features[feature_index].get(\n \"token_is_max_context\", None)\n\n # Update minimum null prediction.\n feature_null_score = start_logits[0] + end_logits[0]\n if min_null_prediction is None or min_null_prediction[\n \"score\"] > feature_null_score:\n min_null_prediction = {\n \"offsets\": (0, 0),\n \"score\": feature_null_score,\n \"start_logit\": start_logits[0],\n \"end_logit\": end_logits[0],\n }\n\n # Go through all possibilities for the `n_best_size` greater start and end logits.\n start_indexes = np.argsort(start_logits)[-1:-n_best_size - 1:\n -1].tolist()\n end_indexes = np.argsort(end_logits)[-1:-n_best_size - 1:-1].tolist(\n )\n for start_index in start_indexes:\n for end_index in end_indexes:\n # Don't consider out-of-scope answers, either because the indices are out of bounds or correspond\n # to part of the input_ids that are not in the context.\n if (start_index >= len(offset_mapping) or\n end_index >= len(offset_mapping) or\n offset_mapping[start_index] is None or\n offset_mapping[end_index] is None or\n offset_mapping[start_index] == (0, 0) or\n offset_mapping[end_index] == (0, 0)):\n continue\n # Don't consider answers with a length that is either < 0 or > max_answer_length.\n if end_index < start_index or end_index - start_index + 1 > max_answer_length:\n continue\n # Don't consider answer that don't have the maximum context available (if such information is\n # provided).\n if token_is_max_context is not None and not token_is_max_context.get(\n str(start_index), False):\n continue\n prelim_predictions.append({\n \"offsets\": (offset_mapping[start_index][0],\n offset_mapping[end_index][1]),\n \"score\":\n start_logits[start_index] + end_logits[end_index],\n \"start_logit\": start_logits[start_index],\n \"end_logit\": end_logits[end_index],\n })\n if version_2_with_negative:\n # Add the minimum null prediction\n prelim_predictions.append(min_null_prediction)\n null_score = min_null_prediction[\"score\"]\n\n # Only keep the best `n_best_size` predictions.\n predictions = sorted(\n prelim_predictions, key=lambda x: x[\"score\"],\n reverse=True)[:n_best_size]\n\n # Add back the minimum null prediction if it was removed because of its low score.\n if version_2_with_negative and not any(p[\"offsets\"] == (0, 0)\n for p in predictions):\n predictions.append(min_null_prediction)\n\n # Use the offsets to gather the answer text in the original context.\n context = example[\"context\"]\n for pred in predictions:\n offsets = pred.pop(\"offsets\")\n pred[\"text\"] = context[offsets[0]:offsets[1]]\n\n # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid\n # failure.\n if len(predictions) == 0 or (len(predictions) == 1 and\n predictions[0][\"text\"] == \"\"):\n predictions.insert(0, {\n \"text\": \"empty\",\n \"start_logit\": 0.0,\n \"end_logit\": 0.0,\n \"score\": 0.0\n })\n\n # Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using\n # the LogSumExp trick).\n scores = np.array([pred.pop(\"score\") for pred in predictions])\n exp_scores = np.exp(scores - np.max(scores))\n probs = exp_scores / exp_scores.sum()\n\n # Include the probabilities in our predictions.\n for prob, pred in zip(probs, predictions):\n pred[\"probability\"] = prob\n\n # Pick the best prediction. If the null answer is not possible, this is easy.\n if not version_2_with_negative:\n all_predictions[example[\"id\"]] = predictions[0][\"text\"]\n else:\n # Otherwise we first need to find the best non-empty prediction.\n i = 0\n while predictions[i][\"text\"] == \"\":\n i += 1\n best_non_null_pred = predictions[i]\n\n # Then we compare to the null prediction using the threshold.\n score_diff = null_score - best_non_null_pred[\n \"start_logit\"] - best_non_null_pred[\"end_logit\"]\n scores_diff_json[example[\"id\"]] = float(\n score_diff) # To be JSON-serializable.\n if score_diff > null_score_diff_threshold:\n all_predictions[example[\"id\"]] = \"\"\n else:\n all_predictions[example[\"id\"]] = best_non_null_pred[\"text\"]\n\n # Make `predictions` JSON-serializable by casting np.float back to float.\n all_nbest_json[example[\"id\"]] = [{\n k: (float(v)\n if isinstance(v, (np.float16, np.float32, np.float64)) else v)\n for k, v in pred.items()\n } for pred in predictions]\n\n return all_predictions, all_nbest_json, scores_diff_json\n\n\ndef make_qid_to_has_ans(examples):\n qid_to_has_ans = {}\n for example in examples:\n qid_to_has_ans[example['id']] = not example.get('is_impossible', False)\n return qid_to_has_ans\n\n\ndef normalize_answer(s):\n #Lower text and remove punctuation, articles and extra whitespace.\n def remove_articles(text):\n regex = re.compile(r'\\b(a|an|the)\\b', re.UNICODE)\n return re.sub(regex, ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n if not s:\n return ''\n else:\n return white_space_fix(remove_articles(remove_punc(lower(s))))\n\n\ndef compute_exact(a_gold, a_pred):\n return int(normalize_answer(a_gold) == normalize_answer(a_pred))\n\n\ndef compute_f1(a_gold, a_pred, is_whitespace_splited=True):\n gold_toks = normalize_answer(a_gold).split()\n pred_toks = normalize_answer(a_pred).split()\n\n if not is_whitespace_splited:\n gold_toks = gold_toks[0] if gold_toks else \"\"\n pred_toks = pred_toks[0] if pred_toks else \"\"\n\n common = collections.Counter(gold_toks) & collections.Counter(pred_toks)\n num_same = sum(common.values())\n if len(gold_toks) == 0 or len(pred_toks) == 0:\n # If either is no-answer, then F1 is 1 if they agree, 0 otherwise\n return int(gold_toks == pred_toks)\n if num_same == 0:\n return 0\n precision = 1.0 * num_same / len(pred_toks)\n recall = 1.0 * num_same / len(gold_toks)\n f1 = (2 * precision * recall) / (precision + recall)\n return f1\n\n\ndef get_raw_scores(examples, preds, is_whitespace_splited=True):\n exact_scores = {}\n f1_scores = {}\n for example in examples:\n qid = example['id']\n gold_answers = [\n text for text in example['answers'] if normalize_answer(text)\n ]\n if not gold_answers:\n # For unanswerable questions, only correct answer is empty string\n gold_answers = ['']\n if qid not in preds:\n print('Missing prediction for %s' % qid)\n continue\n a_pred = preds[qid]\n # Take max over all gold answers\n exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)\n f1_scores[qid] = max(\n compute_f1(a, a_pred, is_whitespace_splited) for a in gold_answers)\n\n return exact_scores, f1_scores\n\n\ndef apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):\n new_scores = {}\n for qid, s in scores.items():\n pred_na = na_probs[qid] > na_prob_thresh\n if pred_na:\n new_scores[qid] = float(not qid_to_has_ans[qid])\n else:\n new_scores[qid] = s\n return new_scores\n\n\ndef make_eval_dict(exact_scores, f1_scores, qid_list=None):\n if not qid_list:\n total = len(exact_scores)\n return collections.OrderedDict([\n ('exact', 100.0 * sum(exact_scores.values()) / total),\n ('f1', 100.0 * sum(f1_scores.values()) / total),\n ('total', total),\n ])\n else:\n total = len(qid_list)\n return collections.OrderedDict([\n ('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),\n ('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),\n ('total', total),\n ])\n\n\ndef merge_eval(main_eval, new_eval, prefix):\n for k in new_eval:\n main_eval['%s_%s' % (prefix, k)] = new_eval[k]\n\n\ndef find_best_thresh(preds, scores, na_probs, qid_to_has_ans):\n num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])\n cur_score = num_no_ans\n best_score = cur_score\n best_thresh = 0.0\n qid_list = sorted(na_probs, key=lambda k: na_probs[k])\n for i, qid in enumerate(qid_list):\n if qid not in scores: continue\n if qid_to_has_ans[qid]:\n diff = scores[qid]\n else:\n if preds[qid]:\n diff = -1\n else:\n diff = 0\n cur_score += diff\n if cur_score > best_score:\n best_score = cur_score\n best_thresh = na_probs[qid]\n return 100.0 * best_score / len(scores), best_thresh\n\n\ndef find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs,\n qid_to_has_ans):\n best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs,\n qid_to_has_ans)\n best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs,\n qid_to_has_ans)\n main_eval['best_exact'] = best_exact\n main_eval['best_exact_thresh'] = exact_thresh\n main_eval['best_f1'] = best_f1\n main_eval['best_f1_thresh'] = f1_thresh\n\n\ndef squad_evaluate(examples,\n preds,\n na_probs=None,\n na_prob_thresh=1.0,\n is_whitespace_splited=True):\n '''\n Computes and prints the f1 score and em score of input prediction.\n\n Args:\n examples (list): List of raw squad-style data (see `run_squad.py\n `__ for more\n information).\n preds (dict): Dictionary of final predictions. Usually generated by\n `compute_prediction`.\n na_probs (dict, optional): Dictionary of score_diffs of each example.\n Used to decide if answer exits and compute best score_diff\n threshold of null. Defaults to None.\n na_prob_thresh (float, optional): The threshold used to select the\n null answer. Defaults to 1.0.\n is_whitespace_splited (bool, optional): Whether the predictions and references\n can be tokenized by whitespace. Usually set True for English and\n False for Chinese. Defaults to True.\n '''\n\n if not na_probs:\n na_probs = {k: 0.0 for k in preds}\n\n qid_to_has_ans = make_qid_to_has_ans(examples) # maps qid to True/False\n has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]\n no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]\n exact_raw, f1_raw = get_raw_scores(examples, preds, is_whitespace_splited)\n exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,\n na_prob_thresh)\n f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,\n na_prob_thresh)\n out_eval = make_eval_dict(exact_thresh, f1_thresh)\n if has_ans_qids:\n has_ans_eval = make_eval_dict(\n exact_thresh, f1_thresh, qid_list=has_ans_qids)\n merge_eval(out_eval, has_ans_eval, 'HasAns')\n if no_ans_qids:\n no_ans_eval = make_eval_dict(\n exact_thresh, f1_thresh, qid_list=no_ans_qids)\n merge_eval(out_eval, no_ans_eval, 'NoAns')\n find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs,\n qid_to_has_ans)\n\n print(json.dumps(out_eval, indent=2))\n","sub_path":"paddlenlp/metrics/squad.py","file_name":"squad.py","file_ext":"py","file_size_in_byte":17378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"613245993","text":"from google.appengine.ext import ndb\nimport logging\nfrom model import user,project\nfrom login import BaseHandler,check_permission\nimport json as json\nfrom model import sprint,task,time_log\nfrom datetime import datetime, timedelta,date\nimport time\nfrom common import checkdomain\n\n\nclass Timesheet(BaseHandler):\n @checkdomain\n def show_timesheet(self,from_date,to_date):\n self.response.write('under construction')\n \n @checkdomain\n def get(self,*args,**kargs):\n week_number=0\n day_number=7\n projectKey=self.session['current_project']\n currentUser=self.auth.get_user_by_session()\n currentUser=self.user_model.get_by_id(currentUser['user_id']).key\n if self.request.get('from') and self.request.get('to'):\n logging.info('get')\n monday1=datetime.strptime(self.request.get('from'),'%d/%m/%Y').date()\n monday2=datetime.strptime(self.request.get('to'),'%d/%m/%Y').date()\n #monday1 = (start_date - timedelta(days=start_date.weekday()))\n #monday2 = (end_date - timedelta(days=end_date.weekday()))\n week_number=(monday2 - monday1).days/7\n day_number=(monday2 - monday1).days%7\n start=monday1\n prev_date=datetime.strftime(monday1 - timedelta(days=1),'%d/%m/%Y')\n next_date=datetime.strftime(monday2 + timedelta(days=8),'%d/%m/%Y')\n elif self.request.get('from') and not self.request.get('to'):\n dt=datetime.strptime(self.request.get('from'),'%d/%m/%Y').date()\n start = dt - timedelta(days=dt.weekday())\n week_number=0\n day_number=6\n prev_date=datetime.strftime(start - timedelta(days=1),'%d/%m/%Y')\n next_date=datetime.strftime(start + timedelta(days=8),'%d/%m/%Y')\n else: \n dt=date.today()\n start = dt - timedelta(days=dt.weekday())\n week_number=0\n day_number=6\n prev_date=datetime.strftime(start - timedelta(days=1),'%d/%m/%Y')\n next_date=datetime.strftime(start + timedelta(days=8),'%d/%m/%Y')\n\n #w=(monday2 - monday1).days/7\n week=[]\n week_total=0\n for j in range(0,week_number+1):\n if j==week_number:\n d=day_number+1\n else:\n d=7\n for i in range(0,d):\n end = start + timedelta(days=i)\n \n if i == 6:\n start=start + timedelta(days=7)\n #range_to = end.strftime('%d/%m/%Y')\n week.append({'week_number':j+1,'date':end,'items':[],'total':0})\n \n logs=time_log.Time_Log().getByProjectUser(projectKey,currentUser)\n for w in week:\n for name in logs:\n key = name.today_date\n if key == w['date']:\n w['items'].append(name)\n w['total']=w['total']+name.total_effort\n week_total=week_total+ name.total_effort\n self.render_template(\"user_new/timesheet.html\",{\"logs\":week,'week_total':week_total,'previous':prev_date,'next':next_date})\n \n \n ","sub_path":"src/controller/timesheet.py","file_name":"timesheet.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"436678218","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.4'\n# jupytext_version: 1.1.7\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # Setup\n\n# ## Imports\n\n# +\n# Import Scripts\n# %reload_ext autoreload\n# %autoreload 2\nimport sys\nfrom pathlib import Path\nimport os\nsource_path = str(Path(os.path.abspath('joaoc-experiment-checks')).parent.parent.parent / 'src')\nif source_path not in sys.path:\n sys.path.insert(0, source_path)\n\n\n# Packages\nimport pandas as pd\nimport matplotlib.pyplot as plt\npd.options.display.max_columns = 999\nimport seaborn as sns\nimport yaml\nfrom collections import defaultdict\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nfrom model_selection import plotting, select_model\nfrom utils.utils import connect_to_database, path_to_shared\n\n# %matplotlib inline\n# -\n\ncon = connect_to_database()\n\n# ## Matplotlib functions and config\n\n# +\nfontsize = 20 # General fontsize\n\ndef get_index_of_line(line, df, forbiden='forest'):\n line = [i for i in df.unique() if ((line in i) & (forbiden not in i))][0]\n return [i.get_text() for i in axis.lines[0].axes.get_legend().texts].index(line) - 1\n\n\n# -\n\n# ## >> Insert Macro Experiment ID >>\n\n# Add here your parameters:\nmacro_experiment_id = 8216636716\nargs = dict(experiment_id=macro_experiment_id)\n\n# ## Count Errors\n\nquery = f\"\"\"\nselect *\nfrom experiments.errors\nwhere experiment_id in (\n select experiment_id \n from experiments.parallelization \n where macro_experiment_id = {macro_experiment_id})\norder by created_on desc\n\"\"\"\nerrors = pd.read_sql_query(query, con)\nprint('number of errors: ', len(pd.read_sql_query(query, con)))\n\n# ## Download Data\n\n# + {\"active\": \"\"}\n# # This is just for this experiment\n# # It adds baselines to dataframe\n#\n# query = f\"\"\" \n# select evaluation.*, approach.name\n# from (select *\n# from experiments.evaluations\n# where experiment_id in (\n# select experiment_id \n# from experiments.parallelization \n# where macro_experiment_id = 5431381010)) evaluation\n# left join experiments.approaches approach\n# on evaluation.experiment_id = approach.experiment_id\n# and evaluation.approach_id = approach.approach_id\"\"\"\n#\n# data1 = pd.read_sql_query(query, con)\n# data1 = data1.query('name in (\"higher_than_x\", \"random\")')\n\n# +\nquery = f\"\"\" \n select evaluation.*, approach.name\n from (select *\n from experiments.evaluations\n where experiment_id in (\n select experiment_id \n from experiments.parallelization \n where macro_experiment_id = {macro_experiment_id})) evaluation\n left join experiments.approaches approach\n on evaluation.experiment_id = approach.experiment_id\n and evaluation.approach_id = approach.approach_id\"\"\"\n\ndata = pd.read_sql_query(query, con)\n\n# data = pd.concat([data, data1])\n\ndef explode(df):\n\n df['metric'] = df['eval_metric'].apply(lambda x: x.split('@')[0])\n df['at'] = df['eval_metric'].apply(lambda x: float(x.split('@')[1]))\n df['type_procurement'] = df['eval_metric'].apply(lambda x: x.split('@')[2])\n \n return df\n\ndata = explode(data)\n# -\n\n# # First Data Selection: Performance Metrics\n#\n# It selects models according to overall performance metrics such as `recall` and `precision`.\n\n# +\n# List of rules to filter the table experiments.evaluations.\n#\n# One rule orders a list of learners according to an statistic that is \n# applied to all folds of a given experiment. Then, it orders it given\n# the higher_is_better paramenter. Finally, the top is used to cut the list.\n#\n# Parameters:\n# ----------\n# metric: string\n# It accepts any implemented metric, i.e., precision, recall, acurracy.\n# See pipeline/evaluation.py.\n#\n# filter: dict\n# type: str\n# It can be `threshold` or `top`\n# value: number\n# A number to filter by\n#\n# It filters the statistic of the metrix by threshold or top. \n#\n# statistic: string\n# Any statistic accepted by pd.Groupby.agg function.\n# Some examples are: mean, median, std, sum, min, max, skew, kurt, sem, var\n#\n# higher_is_better: boolean\n# Teels the algorithm how to order the list given the statistic of the metric.\n# If the metric is good with high values, then, this parameter should be true\n\n# Selector is a set of rules to be applied to an experiment result. \n# One can build multiple selectors with different rules. \n# \n# To build a selector, you need to declare the order that you want the rules\n# above to be applied. For instance, a selector can use the rules in the following order:\n# 1 -> 3 -> 2. Or it can be as simple as one rule: 1\n#\n# This chains are defined by a list, [1, 3, 2] or [1], that has to be added to the order key\n# for each selector.\n# -\n\nselector = \\\n\"\"\"\nrules:\n - \n metric: 'recall@30@overall'\n filter: \n type: threshold\n value: 0.7\n statistic: mean\n higher_is_better: true\n - \n metric: 'recall@30@overall'\n filter: \n type: top\n value: 150\n statistic: mean\n higher_is_better: true\n - \n metric: 'recall@30@overall'\n filter: \n type: top\n value: 30\n statistic: mean\n higher_is_better: true\n - \n metric: 'recall@30@overall'\n filter: \n type: top\n value: 10\n statistic: std\n higher_is_better: false\n \n\nselectors:\n -\n order: [2] \n\"\"\"\n\nres = select_model.run(args['experiment_id'], yaml.load(selector), data.query('fold != \"2017-12-26\"'))\n\n\n# ## Recall by Quality Reviews\n\ndata.query('name in (\"higher_than_x\", \"random\")').query('fold == \"2017-12-26\"').query('eval_metric == \"recall@30@overall\"')\n\ndata.query('learner_id == 6183597133')\n\n# +\nselector = res['selectors'][0]\n\nselector['data'] = selector['data'].query('learner_id == 6183597133')\n\nfor selector in res['selectors']:\n\n fig, axis = plt.subplots(figsize=(20, 12),\n ncols=1, nrows=1)\n \n df = data[data['learner_id'].isin(selector['data']['learner_id'].unique())].query('fold == \"2017-12-26\"')\n df = pd.concat([df, data.query('name in (\"higher_than_x\", \"random\")').query('fold == \"2017-12-26\"')])\n \n df['new_name'] = df.apply(lambda x: x['name'] + '_' + str(x['learner_id']), 1)\n defaultdict\n translate = defaultdict(lambda: 'Selected Model')\n translate.update({'higher_than_x': 'Ordered By Value',\n 'random': 'Random Baseline'\n })\n \n df['Models'] = df['name'].apply(lambda x: translate[x])\n\n axis = sns.lineplot(x=\"at\", \n y=\"score\",\n hue=\"Models\",\n hue_order=['Random Baseline', 'Ordered By Value', 'Selected Model',],\n palette=['grey', '#1f497d', '#f3b32a'],\n ci=100,\n data=df.query('type_procurement == \"overall\"')\\\n .query('metric == \"recall\"'),\n linewidth=15,\n# dashes=dict(zip(['Random Baseline', 'Ordered By Value', 'Selected Models',], [':', '--', ':'])),\n ax=axis)\n \n leg = axis.legend(prop={'size': 40})\n \n linewidth = 20\n \n higher_idx = get_index_of_line('Ordered', df['Models'])\n axis.lines[higher_idx].set_linestyle(\"--\")\n axis.lines[higher_idx].set_linewidth(linewidth)\n\n\n leg.get_lines()[higher_idx+1].set_linestyle(\"--\")\n leg.get_lines()[higher_idx+1].set_linewidth(linewidth)\n\n random_idx = get_index_of_line('Random', df['Models'])\n axis.lines[random_idx].set_linestyle(\":\")\n axis.lines[random_idx].set_linewidth(linewidth)\n\n\n leg.get_lines()[random_idx+1].set_linestyle(\":\")\n leg.get_lines()[random_idx+1].set_linewidth(linewidth)\n \n \n random_idx = get_index_of_line('Selected', df['Models'])\n axis.lines[random_idx].set_alpha(0.7)\n \n leg.get_lines()[random_idx+1].set_linewidth(linewidth)\n leg.get_lines()[random_idx+1].set_alpha(0.7)\n \n\n axis.set_ylabel('Recall', fontsize=55)\n axis.set_xlabel('% of quality reviews', fontsize=55)\n axis.tick_params(axis='both', labelsize=45)\n# axis.set_title(f\"\"\"\n# Best Recall at 30% of Quality Reviews\"\"\".format(selector['name']), fontdict={'fontsize': 55})\n \n fig.tight_layout()\n path = path_to_shared('user', 'imgs', selector['name'], 'png')\n fig.savefig(path) \n\n# -\n\n# ## Precision by Quality Reviews\n\n# +\nselector = res['selectors'][0]\n\nfor selector in res['selectors']:\n\n fig, axis = plt.subplots(figsize=(15, 10),\n ncols=1, nrows=1)\n \n df = explode(selector['data'])\n df = pd.concat([df, data.query('name in (\"higher_than_x\", \"random\")')])\n \n df['new_name'] = df.apply(lambda x: x['name'] + '_' + str(x['learner_id']), 1)\n defaultdict\n translate = defaultdict(lambda: 'Selected Models')\n translate.update({'higher_than_x': 'Ordered by Value Baseline',\n 'random': 'Random Baseline'\n })\n \n df['Models'] = df['name'].apply(lambda x: translate[x])\n\n axis = sns.lineplot(x=\"at\", \n y=\"score\",\n hue=\"Models\",\n # hue_order=['Random Baseline', model_name, 'Ordered by Value Baseline'],\n ci=100,\n data=df.query('type_procurement == \"overall\"')\\\n .query('metric == \"precision\"'),\n ax=axis)\n\n higher_idx = get_index_of_line('Ordered', df['Models'])\n axis.lines[higher_idx].set_linestyle(\"--\")\n axis.lines[higher_idx].set_linewidth(10)\n axis.lines[higher_idx].set_color('black')\n\n random_idx = get_index_of_line('Random', df['Models'])\n axis.lines[random_idx].set_linestyle(\":\")\n axis.lines[random_idx].set_linewidth(10)\n axis.lines[random_idx].set_color('black')\n\n axis.set_ylabel('Precision', fontsize=fontsize)\n axis.set_xlabel('% of quality reviews', fontsize=fontsize)\n axis.tick_params(axis='both', labelsize=fontsize)\n axis.set_title(f\"\"\"\n Best models for {selector['name']}\"\"\".format(selector['name']), fontdict={'fontsize': fontsize})\n axis.legend(prop={'size': fontsize})\n fig.tight_layout()\n path = path_to_shared('user', 'imgs', selector['name'], 'png')\n fig.savefig(path) \n\n# -\n\n# # Second Data Selection: Bias and Fairness Metrics\n#\n# In development\n\n# # Feature Importance\n\nlearner_ids = res['selectors'][0]['learner_stats'].reset_index()['learner_id'].tolist()\n\n\ndef get_features(learner_ids):\n \n query = \"\"\"\n select learner_id, feature, importance\n from experiments.feature_importances\n where learner_id in ({learner_ids})\n \"\"\".format(learner_ids=','.join(map(lambda x: str(x), learner_ids)))\n \n return pd.read_sql_query(query, con)\n\n\ndef compare_best_features_per_learner(features):\n \n features['importance'] = features['importance'].apply(abs)\n a = features.groupby(['learner_id', 'feature']).mean().reset_index()\n \n features_imp = pd.DataFrame()\n for lid in a['learner_id'].unique():\n features_imp[lid] = a[a['learner_id'] == lid].sort_values(by='importance', ascending=False)\\\n .reset_index()['feature']\n \n return features_imp\n\n\ndef get_feature_importance_distribution_for_learner(features, learner_id, top=20):\n \n f = features[features['learner_id'] == learner_id].groupby(['learner_id', 'feature']).mean().reset_index()\n f.nlargest(10, 'importance')\n\n return f.nlargest(top, 'importance').plot(y='importance', x='feature', kind='barh', figsize=(8, top*0.5))\n\n\nfeatures = get_features(learner_ids)\n\n# ## Feature distribution for given learner\n\nget_feature_importance_distribution_for_learner(features, list(learner_ids)[0], top=10)\n\n\n# ## Compare Feature Importance Across Models\n\ncompare_best_features_per_learner(features).head(10).T\n\n# ## Useless Features\n\nuseless = features.groupby(['learner_id', 'feature']).mean().query('importance == 0')\\\n .reset_index().groupby('feature').count()['importance'].sort_values(ascending=False).to_frame()\n\nprint('Number of Useless Features: ', len(useless))\n\nuseless\n\n\n","sub_path":"experiments/experiments-results/Final Selection.py","file_name":"Final Selection.py","file_ext":"py","file_size_in_byte":12355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"306994265","text":"from django.shortcuts import render,redirect\nfrom .models import Comments\nfrom django.core.paginator import Paginator\nfrom .forms import CommentsForm\nfrom django.views.generic import View\nfrom django.views.generic import TemplateView\nfrom django.contrib import messages\n\nfrom django.conf import settings\nfrom django.core.mail import send_mail\n\nclass CommentsCreate(TemplateView):\n\n #Пост запрос на создания комментария\n def post(self,request, *args, **kwargs):\n form = CommentsForm(request.POST, request.FILES) #Передаем форму из forms.py\n if form.is_valid():\n comment = form.save(commit=False) # Не даем сохраниться комментарию\n comment.user = self.request.user # Заполняем поля таблицы не переданными значениями\n comment.save() # Сохраняем\n username = self.request.user\n mail_send = 'Имя пользователя: {0}\\nКомменатрий: {1}'.format(comment.user, comment.content)\n send_mail('Новый комментарий на сайте',\n mail_send, settings.EMAIL_HOST_USER,\n ['alexanderdemure@gmail.com'], fail_silently=False)\n\n messages.success(request, 'Уважаемый {} благодарим вас за комментарий!'.format(username))\n return redirect('comment_page_url')\n\n return redirect('comment_page_url')\n\n\n\n template_name = 'coment/coment_page.html'\n def get_context_data(self, **kwargs): # Метод для вытягивание комментариев + пагинации\n context = super(CommentsCreate, self).get_context_data(**kwargs) #Вызываем базовый класс\n comments = Comments.objects.all() #QuarySet comments\n paginator = Paginator(comments, 4) #Пагинация на 4 комментария\n page_number = self.request.GET.get('page', 1) #Начинать с 1 страницы\n page = paginator.get_page(page_number)\n\n is_paginated = page.has_other_pages()\n\n if page.has_previous():\n prev_url = '?page={}'.format(page.previous_page_number())\n else:\n prev_url = ''\n\n if page.has_next():\n next_url = '?page={}'.format(page.next_page_number())\n else:\n next_url = ''\n #Вытягиваем необходимые данные\n context['form'] = CommentsForm\n context['page_object'] = page\n context['is_paginated'] = is_paginated\n context['next_url'] = next_url\n context['prev_url'] = prev_url\n\n return context\n","sub_path":"buspoint/app/coment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"599950024","text":"import numpy as np\n\n\ndef linear_weight(pixel_value):\n \n # calculate weights\n z_min, z_max = 0., 255.\n if pixel_value > (z_min+z_max)/2:\n pixel_value = z_max - pixel_value\n else:\n pixel_value = pixel_value-z_min\n \n return pixel_value\n\n# sample intensities from images\ndef sample_rgb_images(images):\n \n num_images = len(images)\n num_intensities = 256\n intensities = np.zeros((num_intensities, num_images), dtype=np.uint8)\n\n mid_img = images[num_images // 2] # as reference for picking up values\n\n for i in range(num_intensities):\n rows, cols = np.where(mid_img==i)\n if len(rows) > 0:\n sample = np.random.randint(0,len(rows))\n for j in range(num_images):\n sample_row, sample_col = rows[sample], cols[sample]\n intensities[i, j] = images[j][sample_row, sample_col]\n \n return intensities\n\n# Debevec paper, chapter 2.1 and Abstract A\ndef gsolve(intensities, log_exposures, llambda, weighting_function): \n \n intensity_range = 255\n num_samples = intensities.shape[0]\n num_images = len(log_exposures)\n\n A = np.zeros((num_images * num_samples + intensity_range, num_samples + intensity_range + 1), dtype=np.float64)\n b = np.zeros((A.shape[0], 1), dtype=np.float64)\n \n # data fitting equation\n k = 0\n for i in range(num_samples):\n for j in range(num_images):\n z_ij = intensities[i, j] # only integers can be used as index, i.e. A[k,z_ij]\n w_ij = weighting_function(z_ij)\n A[k, z_ij] = w_ij \n A[k, num_samples+i] = -w_ij\n b[k,0] = w_ij*log_exposures[j] # in the paper is [i, j] but is defined as [j] only\n k += 1\n \n #smoothness equation\n for z_k in range(1,intensity_range):\n w_k = weighting_function(z_k)\n A[k, z_k-1] = w_k * llambda\n A[k, z_k] = -2 * w_k * llambda\n A[k, z_k+1] = w_k * llambda\n k += 1\n \n #fix curve - set middle value to 0\n A[-1, intensity_range//2 ] = 1\n \n #svd\n A_inv = np.linalg.pinv(A)\n x = np.dot(A_inv, b)\n\n g = x[0: intensity_range + 1]\n #lE = x[n+1: x.shape[0]]\n \n return g[:, 0]\n\n# Debevec paper, chaper 2.2\ndef compute_hdr_map(images, log_exposures, response_curve, weighting_function):\n \n shapes = images[0].shape\n img_rad_map = np.zeros(shapes, dtype=np.float64)\n\n g = np.zeros(len(images))\n w = np.zeros(len(images))\n \n for i in range(shapes[0]):\n for j in range(shapes[1]):\n for k in range(len(images)):\n g[k] = response_curve[images[k][i,j]]\n w[k] = weighting_function(images[k][i,j])\n Sum_w = np.sum(w)\n if Sum_w>0:\n img_rad_map[i,j] = np.sum(w*(g-log_exposures)/Sum_w)\n else:\n img_rad_map[i,j] = g[len(images)//2] - log_exposures[len(images)//2]\n \n return img_rad_map\n\nif __name__ == '__main__':\n pass","sub_path":"Homework1/Part2/rapo_final/hdr.py","file_name":"hdr.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"531096502","text":"from ast import walk\nimport os\nimport sys\n\n\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), \"..\", \"..\")))\n\n\nfrom testrunner import CodersLabTestSuite, CodersLabException, p, dedent\n\n\ntc = CodersLabTestSuite(\"Pętla while\")\n\n\n@tc.test(\"Skrypt wypisuje tekst na ekranie\")\ndef test_printing(invoke, stdout, **kwargs):\n invoke()\n\n if \"Jestem programistą Pythona\".lower() in \"\".join(stdout).lower():\n return\n\n if \"Jestem programistką Pythona\".lower() in \"\".join(stdout).lower():\n return\n\n tc.assert_print_called(stdout)\n\n raise CodersLabException(\n \"Nie znaleziono oczekiwanej frazy w tekście, \"\n \"który skrypt wypisuje na ekranie\"\n )\n\n\n@tc.test(\"Skrypt wypisuje tekst na ekranie 10 razy\")\ndef test_printing_10_times(invoke, stdout, **kwargs):\n invoke()\n\n string = \"\".join(stdout).lower()\n count = string.count(\"Jestem programistą Pythona\".lower()) or string.count(\n \"Jestem programistką Pythona\".lower()\n )\n\n if count != 10:\n raise CodersLabException(\n \"Tekst został wypisany {} razy, oczekiwano 10\".format(count)\n )\n\n\n@tc.test(\"Pętla while była użyta do wykonania zadania\")\ndef test_while_used(ast, **kwargs):\n for node in walk(ast):\n print(node)\n\n\ntc.run()\n","sub_path":"04_Kontrola_przeplywu_programu/01_Petla_while/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"72503559","text":"from django.views.decorators.csrf import csrf_exempt\nimport json\nfrom auth.utils import response, token_required\nfrom base.log import logger\nfrom base.udemy import UdemyAnalysis\n\n\n@csrf_exempt\ndef title(request):\n if request.method == 'POST' or request.method == 'GET':\n titles = UdemyAnalysis().get_total_titles()\n return response({\n 'titles': titles\n })\n elif request.method == 'OPTIONS':\n return response({})\n else:\n return response({\n 'error': 'Invalid Method'\n }, status=405)\n\n","sub_path":"back/udemy/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"368774856","text":"# 快排算法\ndef quick_sort(start, end, arr, sort):\n # 如果start>=end了就说明已经递归完整个数组了\n if not start < end:\n return\n\n \"\"\"\n 取第一个数为基准数,然后将比其小的放左边比它大的放右边,由此可以划分两个数组。\n 然后将这两个数组再次快排,就实现了整个快排算法,这也是一种分治思想\n \"\"\"\n baseValue = arr[start]\n\n # 用来标记比基准数大的偏移量(从左往右)\n little = start\n # 用来标记比基准数小的偏移量(从右往左)\n big = end\n\n if sort == \"<\":\n # 只要big和little没有碰头就说明没有遍历完整个数组\n while little < big:\n\n \"\"\"\n 只要数组未便利完,并且当前数组右侧比左侧大可以说明该右侧数组是存放比基准数大的数组。\n 如果出现比基准数小的,则跳出循环并且将该值覆盖到arr[little]的位置,如果是第一次则相当于将基数覆盖\n \"\"\"\n while little < big and arr[big] > baseValue:\n big -= 1\n\n # 跳出循环说明该arr[big]的值比arr[little]的值小,所以进行覆盖\n arr[little] = arr[big]\n\n \"\"\"\n 只要数组未便利完,并且当前数组左侧比右侧大可以说明该左侧数组是存放比基准数小的数组。\n 如果出现比基准数大的,则跳出循环并且将该值覆盖到arr[big]的位置\n \"\"\"\n while little < big and arr[little] <= baseValue:\n little += 1\n\n # 跳出循环说明该arr[little]的值比arr[big]的值大,所以进行覆盖\n arr[big] = arr[little]\n else:\n # 只要big和little没有碰头就说明没有遍历完整个数组\n while little < big:\n\n \"\"\"\n 原理跟正序便利相同,结果相反而已\n \"\"\"\n while little < big and arr[big] < baseValue:\n big -= 1\n\n arr[little] = arr[big]\n\n \"\"\"\n 原理跟正序便利相同,结果相反而已\n \"\"\"\n while little < big and arr[little] >= baseValue:\n little += 1\n\n arr[big] = arr[little]\n\n # 当这个while跳出来之后相当于little和big碰头了,所以可以理解为这个位置是类似于中位数的一个存在\n arr[little] = baseValue\n\n \"\"\"\n 这时候以中位数为标准可以知道左侧存放的都是比其小的,右侧是放比它大的。\n 然后通过同样的算法可以获取一个顺序的数组\n \"\"\"\n # 中位数左侧数组\n quick_sort(start, little - 1, arr, sort)\n # 中位数右侧数组\n quick_sort(little + 1, end, arr, sort)\n\n\nif __name__ == '__main__':\n arr = [5, 4, 7, 2, 1, 5, 3, 2, 9, 6, 7]\n quick_sort(0, len(arr) - 1, arr, \"<\")\n print(arr)\n arr = [5, 4, 7, 2, 1, 5, 3, 2, 9, 6, 7]\n\n quick_sort(0, len(arr) - 1, arr, \">\")\n print(arr)\n","sub_path":"arithmetic/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"315642702","text":"#!/usr/bin/python3\n# https://github.com/googleapis/python-dialogflow/blob/main/samples/snippets/detect_intent_texts.py\n\nimport uuid\nimport os\nimport sys\nimport snowboydecoder\nimport signal\nimport speech_recognition as sr\nimport urllib\nimport uuid\nimport json\nimport requests\nimport RPi.GPIO as GPIO\nimport time\nimport configparser\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(25, GPIO.OUT, initial=GPIO.LOW)\n\ninterrupted = False\n\nconfig = configparser.ConfigParser()\nconfig.read('../../smart_speaker.conf')\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = config.get('dialogflow', 'google_app_credential')\nproject_id = config.get('dialogflow', 'project_id')\nsession_id = str(uuid.uuid4())\nlanguage_code = \"zh-TW\"\n\ndef audioRecorderCallback(fname):\n print(\"converting audio to text\")\n r = sr.Recognizer()\n r.pause_threshold = 1\n r.phrase_threshold = 0.3\n r.non_speaking_duration = 0.5\n r.dynamic_energy_threshold = False\n\n with sr.AudioFile(fname) as source:\n r.adjust_for_ambient_noise(source, duration=1) \n audio = r.record(source) # read the entire audio file\n try:\n texts = r.recognize_google(audio, language=\"zh-TW\")\n print(texts)\n\n from google.cloud import dialogflow\n\n session_client = dialogflow.SessionsClient()\n session = session_client.session_path(project_id, session_id)\n print('Session path: {}'.format(session))\n\n text_input = dialogflow.TextInput(text=texts, language_code=language_code)\n query_input = dialogflow.QueryInput(text=text_input)\n response = session_client.detect_intent(\n request={\"session\": session, \"query_input\": query_input}\n )\n\n print('=' * 20)\n print('Query text: {}'.format(response.query_result.query_text))\n print('Detected intent: {} (confidence: {})'.format(\n response.query_result.intent.display_name,\n response.query_result.intent_detection_confidence))\n print('Fulfillment text: {}'.format(\n response.query_result.fulfillment_text))\n\n if response.query_result.fulfillment_text == 'turn_on_light_ok':\n GPIO.output(25, GPIO.HIGH)\n # elif response.query_result.fulfillment_text == 'turn_off_light_ok':\n else:\n GPIO.output(25, GPIO.LOW)\n\n\n except sr.UnknownValueError:\n print(\"Google Speech Recognition could not understand audio\")\n except sr.RequestError as e:\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\n finally:\n os.remove(fname)\n\n\ndef detectedCallback():\n snowboydecoder.play_audio_file()\n sys.stdout.write(\"Say something>>> \")\n sys.stdout.flush()\n\ndef signal_handler(signal, frame):\n global interrupted\n interrupted = True\n GPIO.cleanup()\n\ndef interrupt_callback():\n global interrupted\n return interrupted\n\nif len(sys.argv) == 1:\n print(\"Error: need to specify model name\")\n print(\"Usage: python demo.py your.model\")\n sys.exit(-1)\n\nmodel = sys.argv[1]\n\n# capture SIGINT signal, e.g., Ctrl+C\nsignal.signal(signal.SIGINT, signal_handler)\n\ndetector = snowboydecoder.HotwordDetector(model, sensitivity=0.5)\nprint(\"Listening... Press Ctrl+C to exit\")\n\n# main loop\ndetector.start(detected_callback=detectedCallback,\n audio_recorder_callback=audioRecorderCallback,\n interrupt_check=interrupt_callback,\n sleep_time=0.01)\n\ndetector.terminate()\n","sub_path":"03-stt/snowboy/nlu_turn_on_light_v2.py","file_name":"nlu_turn_on_light_v2.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"527563060","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport torch\n\n'''\nhttps://github.com/wei-mao-2019/HisRepItself/blob/master/utils/ang2joint.py\n'''\n\ndef ang2joint(p3d0, pose,\n parent={0: -1, 1: 0, 2: 0, 3: 0, 4: 1, 5: 2, 6: 3, 7: 4, 8: 5, 9: 6, 10: 7, 11: 8, 12: 9, 13: 9, 14: 9,\n 15: 12, 16: 13, 17: 14, 18: 16, 19: 17, 20: 18, 21: 19, 22: 20, 23: 21}):\n \"\"\"\n :param p3d0:[batch_size, joint_num, 3]\n :param pose:[batch_size, joint_num, 3]\n :param parent:\n :return:\n \"\"\"\n # model_path = './model.npz'\n # params = np.load(model_path, allow_pickle=True)\n # kintree_table = params['kintree_table']\n batch_num = p3d0.shape[0]\n # id_to_col = {kintree_table[1, i]: i\n # for i in range(kintree_table.shape[1])}\n # parent = {\n # i: id_to_col[kintree_table[0, i]]\n # for i in range(1, kintree_table.shape[1])\n # }\n # parent = {1: 0, 2: 0, 3: 0, 4: 1, 5: 2, 6: 3, 7: 4, 8: 5, 9: 6, 10: 7, 11: 8, 12: 9, 13: 9, 14: 9, 15: 12, 16: 13,\n # 17: 14, 18: 16, 19: 17, 20: 18, 21: 19, 22: 20, 23: 21}\n jnum = len(parent.keys())\n # v_shaped = torch.tensordot(betas, self.shapedirs, dims=([1], [2])) + self.v_template\n # J = torch.matmul(self.J_regressor, v_shaped)\n # face_J = v_shaped[:, [333, 2801, 6261], :]\n J = p3d0\n R_cube_big = rodrigues(pose.contiguous().view(-1, 1, 3)).reshape(batch_num, -1, 3, 3)\n results = []\n results.append(\n with_zeros(torch.cat((R_cube_big[:, 0], torch.reshape(J[:, 0, :], (-1, 3, 1))), dim=2))\n )\n # for i in range(1, kintree_table.shape[1]):\n for i in range(1, jnum):\n results.append(\n torch.matmul(\n results[parent[i]],\n with_zeros(\n torch.cat(\n (R_cube_big[:, i], torch.reshape(J[:, i, :] - J[:, parent[i], :], (-1, 3, 1))),\n dim=2\n )\n )\n )\n )\n\n stacked = torch.stack(results, dim=1)\n J_transformed = stacked[:, :, :3, 3]\n return J_transformed\n\n\n# In[ ]:\n\n\ndef rodrigues(r):\n \"\"\"\n Rodrigues' rotation formula that turns axis-angle tensor into rotation\n matrix in a batch-ed manner.\n Parameter:\n ----------\n r: Axis-angle rotation tensor of shape [batch_size * angle_num, 1, 3].\n Return:\n -------\n Rotation matrix of shape [batch_size * angle_num, 3, 3].\n \"\"\"\n eps = r.clone().normal_(std=1e-8)\n theta = torch.norm(r + eps, dim=(1, 2), keepdim=True)\n # theta = torch.norm(r, dim=(1, 2), keepdim=True) # dim cannot be tuple\n theta_dim = theta.shape[0]\n r_hat = r / theta\n cos = torch.cos(theta)\n z_stick = torch.zeros(theta_dim, dtype=torch.float).to(r.device)\n m = torch.stack(\n (z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick,\n -r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick), dim=1)\n m = torch.reshape(m, (-1, 3, 3))\n i_cube = (torch.eye(3, dtype=torch.float).unsqueeze(dim=0) + torch.zeros((theta_dim, 3, 3), dtype=torch.float)).to(r.device)\n A = r_hat.permute(0, 2, 1)\n dot = torch.matmul(A, r_hat)\n R = cos * i_cube + (1 - cos) * dot + torch.sin(theta) * m\n return R\n\n\n# In[ ]:\n\n\ndef with_zeros(x):\n \"\"\"\n Append a [0, 0, 0, 1] tensor to a [3, 4] tensor.\n Parameter:\n ---------\n x: Tensor to be appended.\n Return:\n ------\n Tensor after appending of shape [4,4]\n \"\"\"\n ones = torch.tensor(\n [[[0.0, 0.0, 0.0, 1.0]]], dtype=torch.float\n ).expand(x.shape[0], -1, -1).to(x.device)\n ret = torch.cat((x, ones), dim=1)\n return ret\n\n\ndef pack(x):\n \"\"\"\n Append zero tensors of shape [4, 3] to a batch of [4, 1] shape tensor.\n Parameter:\n ----------\n x: A tensor of shape [batch_size, 4, 1]\n Return:\n ------\n A tensor of shape [batch_size, 4, 4] after appending.\n \"\"\"\n zeros43 = torch.zeros(\n (x.shape[0], x.shape[1], 4, 3), dtype=torch.float).to(x.device)\n ret = torch.cat((zeros43, x), dim=3)\n return ret\n\n","sub_path":"utils/ang2joint.py","file_name":"ang2joint.py","file_ext":"py","file_size_in_byte":4060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"437631911","text":"import os\nimport sys\nimport time\nfrom typing import Tuple, List\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\nfrom torchsummary import summary\n\nimport config\nimport search_space\nfrom cifar10_loader import get_cifar10_sets\nfrom config import device\nfrom modules.cell import Cell\nfrom modules.model import Model\nfrom pnas import pnas_utilities\nfrom pnas.pnas_utilities import _expand_cells, _get_normal_and_reduction_cells, PNASDataset, _prediction_accuracy, \\\n _to_architecture_tensor\nfrom pnas.surrogate_function import Surrogate\n\nRATIO = 0.8\nEPOCHS = 0\nSURROGATE_BATCH_SIZE = 32\nSURROGATE_EPOCHS = 250\n\nCHILD_LR_MAX = 1e-5\nCHILD_L2_REG = 3e-4\n\nSURROGATE_LAYERS = 1\nSURROGATE_HIDDEN_SIZE = 64\nSURROGATE_DROPOUT = 0\nSURROGATE_MLP_DROPOUT = 0.1\nSURROGATE_MLP_LAYERS = 1\nSURROGATE_MLP_HIDDEN_LAYERS = 2\nSURROGATE_L2_REG = 0\nSURROGATE_LR = 0.001\nSURROGATE_GRAD_BOUND = 5.0\n\ncriterion = nn.CrossEntropyLoss()\n\ntrain_loader, test_loader, classes = get_cifar10_sets()\n\nOUTPUT_DIRECTORY = 'output'\n\n\ndef train_and_evaluate_network(net: Model) -> float:\n \"\"\"\n\n :param net:\n :return:\n \"\"\"\n net.to(device)\n\n summary(net, (3, 32, 32))\n\n train_network(net)\n return evaluate_architecture(net)\n\n\ndef train_network(net: nn.Module):\n \"\"\"\n Train the network on cifar 10\n :param net: the network\n :return:\n \"\"\"\n optimizer = optim.SGD(net.parameters(), lr=CHILD_LR_MAX, momentum=0.9, weight_decay=CHILD_L2_REG)\n start = time.perf_counter()\n\n print('Training architecture for {} epochs.'.format(EPOCHS))\n for epoch in range(EPOCHS): # loop over the dataset multiple times\n running_loss = 0.0\n for i, data in enumerate(train_loader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data[0].to(device), data[1].to(device)\n # inputs, labels = data[0], data[1]\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 64 == 63: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0\n end = time.perf_counter()\n print('Finished Training. It took: {}'.format((end - start)))\n\n\ndef evaluate_architecture(net: Model) -> float:\n \"\"\"\n evaluate the architecture performance\n :param net: the network\n :return: accuracy\n \"\"\"\n correct = 0\n total = 0\n with torch.no_grad():\n net.eval()\n for data in test_loader:\n images, labels = data\n images = images.to(device)\n labels = labels.to(device)\n outputs = net(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n accuracy = correct / total\n\n print(\"---\")\n\n normal_cell_str = net.normal_cell.__str__()\n reduction_cell_str = net.reduction_cell.__str__()\n\n print('Normal: [{}], Reduction: [{}]'.format(normal_cell_str, reduction_cell_str))\n print('The network achieved the following accuracy: {}'.format(accuracy))\n\n with open('{}/architectures.csv'.format(OUTPUT_DIRECTORY), 'a+') as fd:\n fd.write('{} {},{}\\n'.format(normal_cell_str, reduction_cell_str, accuracy))\n\n print(\"---\")\n\n return accuracy\n\n\ndef _build_datasets(models: List[List[Model]], targets: List[List[float]]) -> (DataLoader, DataLoader):\n \"\"\"\n build a training and a validation data set\n :param models:\n :param targets:\n :return:\n \"\"\"\n assert len(models) == len(targets)\n\n model_accuracies = []\n\n for i in range(len(models)):\n model_accuracies.extend(\n zip([_to_architecture_tensor(m) for m in models[i]], targets[i]))\n\n np.random.shuffle(model_accuracies)\n\n split = int(len(model_accuracies) * RATIO)\n\n train_model_accuracies = model_accuracies[:split]\n train_model = [m_a[0] for m_a in train_model_accuracies]\n train_accuracies = [m_a[1] for m_a in train_model_accuracies]\n\n validation_model_accuracies = model_accuracies[split:]\n validation_model = [m_a[0] for m_a in validation_model_accuracies]\n validation_accuracies = [m_a[1] for m_a in validation_model_accuracies]\n\n train_dataset = PNASDataset(train_model, train_accuracies)\n validation_dataset = PNASDataset(validation_model, validation_accuracies, train=False)\n\n print(\"Surrogate train={}, valid={}\".format(len(train_dataset.inputs), len(validation_dataset.inputs)))\n\n train_queue = torch.utils.data.DataLoader(\n train_dataset, batch_size=SURROGATE_BATCH_SIZE, shuffle=True, pin_memory=True)\n validation_queue = torch.utils.data.DataLoader(\n validation_dataset, batch_size=SURROGATE_BATCH_SIZE, shuffle=True, pin_memory=True)\n\n return train_queue, validation_queue\n\n\ndef _validate_surrogate_function(model: Surrogate, validate_queue: DataLoader) -> float:\n \"\"\"\n validate the surrogate function on unknown cells\n :param model:\n :param validate_queue:\n :return: the accuracy\n \"\"\"\n\n targets = []\n predictions = []\n\n with torch.no_grad():\n model.eval()\n for step, sample in enumerate(validate_queue):\n surrogate_input = sample['surrogate_input'].to(device)\n surrogate_target = sample['surrogate_target'].to(device)\n\n predict_value = model(surrogate_input)\n\n targets += surrogate_target.data.squeeze().tolist()\n predictions += predict_value.data.squeeze().tolist()\n\n return _prediction_accuracy(targets, predictions)\n\n\ndef _train_surrogate_function(model: Surrogate, models: List[List[Model]], accuracies: List[List[float]],\n optimizer: torch.optim.Adam) -> None:\n \"\"\"\n\n :param model:\n :return:\n \"\"\"\n train_queue, validate_queue = _build_datasets(models, accuracies)\n loss = sys.maxsize\n\n for epoch in range(SURROGATE_EPOCHS):\n model.train()\n\n for step, sample in enumerate(train_queue):\n surrogate_input = sample['surrogate_input'].to(device)\n surrogate_target = sample['surrogate_target'].to(device)\n\n optimizer.zero_grad()\n predict_value = model(surrogate_input)\n\n loss = F.mse_loss(predict_value.squeeze(), surrogate_target.squeeze())\n loss.backward()\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), SURROGATE_GRAD_BOUND)\n\n optimizer.step()\n\n print('Ep {} Surrogate loss: {}'.format(epoch, loss))\n\n if epoch % 100 == 0:\n acc = _validate_surrogate_function(model, validate_queue)\n print(\"Ep {} Validation accuracy: {}\".format(epoch, acc))\n\n\ndef _surrogate_infer(surrogate: Surrogate, models: List[Model]) -> List[float]:\n \"\"\"\n\n :param model:\n :param models:\n :return:\n \"\"\"\n surrogate.eval()\n predictions = []\n\n model_inputs = PNASDataset([_to_architecture_tensor(model) for model in models], train=False)\n infer_set = torch.utils.data.DataLoader(\n model_inputs, batch_size=SURROGATE_BATCH_SIZE, shuffle=False, pin_memory=True)\n\n for step, sample in enumerate(infer_set):\n surrogate_in = sample['surrogate_input'].to(device)\n acc = surrogate(surrogate_in)\n\n acc = acc.view(-1).tolist()\n predictions.extend(acc)\n\n return predictions\n\n\ndef _cell_combinations_to_cnn(cell_combinations: List[Tuple[Cell, Cell]]) -> List[Model]:\n \"\"\"\n Build a model from the cell combination and return it\n :param cell_combinations: the cell combinations\n :return: the models\n \"\"\"\n models = []\n\n for normal_cell, reduction_cell in cell_combinations:\n m = Model()\n m.normal_cell = normal_cell\n m.reduction_cell = reduction_cell\n\n m.setup_modules()\n\n models.append(m)\n\n return models\n\n\ndef _normalize_accuracy(accuracies: List[float]) -> List[float]:\n \"\"\"\n\n :param accuracies:\n :return:\n \"\"\"\n min_acc = min(accuracies)\n max_acc = max(accuracies)\n return [(acc - min_acc) / (max_acc - min_acc) for acc in accuracies]\n\n\ndef _get_best_models(cells: List[Cell], models: List[Model], accuracies: List[float], beam_size: int) -> List[\n Tuple[Cell, Model, float]]:\n \"\"\"\n\n :param models:\n :param accuracies:\n :param beam_size:\n :return:\n \"\"\"\n assert len(cells) == len(models) == len(accuracies), 'len(cells)={}, len(models)={}, len(accuracies)={}'.format(\n len(cells), len(models), len(accuracies))\n models_with_acc = sorted(zip(cells, models, accuracies), key=lambda x: x[-1], reverse=True)\n\n return models_with_acc[:beam_size]\n\n\ndef progressive_neural_architecture_search(max_num_blocks: int, max_epochs: int, number_of_filters_in_first_layer: int,\n beam_size: int, normal_cells_per_stack: int, stack_count: int) -> List[\n Tuple[Cell, float]]:\n search_space.NUMBER_OF_BLOCKS_PER_CELL = 1\n pnas_utilities.MAX_NUMBER_OF_BLOCKS_PER_CELL = max_num_blocks\n search_space.STACK_COUNT = stack_count\n search_space.NUMBER_OF_NORMAL_CELLS_PER_STACK = normal_cells_per_stack\n search_space.NUMBER_OF_FILTERS = number_of_filters_in_first_layer\n\n global EPOCHS\n EPOCHS = max_epochs\n\n cells = [_expand_cells([])]\n normal_and_reduction_cell_combinations = _get_normal_and_reduction_cells(cells[0])\n\n models = []\n accuracies = []\n targets = []\n\n models.append(_cell_combinations_to_cnn(normal_and_reduction_cell_combinations))\n accuracies.append([train_and_evaluate_network(m) for m in models[0]])\n targets.append(_normalize_accuracy(accuracies[0]))\n\n surrogate = Surrogate(SURROGATE_HIDDEN_SIZE, SURROGATE_LAYERS, SURROGATE_DROPOUT, SURROGATE_MLP_DROPOUT,\n SURROGATE_MLP_LAYERS,\n SURROGATE_MLP_HIDDEN_LAYERS)\n surrogate.to(device)\n\n surrogate_optimizer = torch.optim.Adam(surrogate.parameters(), lr=SURROGATE_LR, weight_decay=SURROGATE_L2_REG)\n\n print(\"Training surrogate.\")\n _train_surrogate_function(surrogate, models, targets, surrogate_optimizer)\n\n print(\"Increasing block_size now.\")\n\n for block_size in range(1, max_num_blocks):\n search_space.NUMBER_OF_BLOCKS_PER_CELL = block_size\n\n print(\"Beginning block_size={}\".format(block_size + 1))\n cells.append(_expand_cells(cells[block_size - 1]))\n\n print(\"After expanding, there are {} cells.\".format(len(cells[block_size])))\n\n normal_and_reduction_cell_combinations = _get_normal_and_reduction_cells(cells[block_size])\n\n models.append(_cell_combinations_to_cnn(normal_and_reduction_cell_combinations))\n predictions = _surrogate_infer(surrogate, models[block_size])\n\n remaining_models = _get_best_models(cells[block_size], models[block_size], predictions, beam_size)\n print(\"Reduced to {} models according to their accuracy.\".format(len(remaining_models)))\n\n cells[block_size] = list(map(lambda x: x[0], remaining_models))\n models[block_size] = list(map(lambda x: x[1], remaining_models))\n\n print(\"Training these architectures in order to update the surrogate.\")\n accuracies.append([train_and_evaluate_network(m) for m in models[block_size]])\n targets.append(_normalize_accuracy(accuracies[block_size]))\n\n print(\"Updating predictor\")\n _train_surrogate_function(surrogate, models, targets, surrogate_optimizer)\n\n print(\"End of block_size={}\".format(block_size + 1))\n\n return list(\n map(lambda cell, _, acc: (cell, acc), _get_best_models(cells[-1], models[-1], accuracies[-1], beam_size)))\n\n\ndef main():\n config.OUTPUT_DIRECTORY = 'output'\n\n if not os.path.exists(config.OUTPUT_DIRECTORY):\n os.makedirs(config.OUTPUT_DIRECTORY)\n\n print(\"Performing PNAS.\")\n cell_accs = progressive_neural_architecture_search(5, 1, 32, search_space.NUMBER_OF_FILTERS,\n search_space.NUMBER_OF_NORMAL_CELLS_PER_STACK,\n search_space.STACK_COUNT)\n\n print(\"We've found the following architectures:\")\n\n for c, acc in cell_accs:\n print(\"----\")\n print(\"Normal/Reduction: []\".format(c.__str__()))\n print(\"Top-1: {:5.f}\".format(acc))\n print(\"----\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pnas/search_strategy.py","file_name":"search_strategy.py","file_ext":"py","file_size_in_byte":12684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"180652931","text":"# Dict Iteration, Wie auf Keys und Values zugreifen?\n\ntagebuch = {'montag':'doofer tag', 'dienstag':'besserer tag'}\n\ntagebuch2 = {'montag':['wochenbeginn', 'planung woche'],\n 'dienstag':['wochenmitte','planungsabschluss']}\n\ntagebuch3 = {'montag':{'morgens':'brot','mittags':'spaghetti','abends':'brot'},\n 'dienstag':{'morgens':'müsli','mittags':'pizza','abends':'auflauf'}}\n\n\n# Schlechte Variante, da mit Liste verwechselbar\nfor val in tagebuch3:\n print(val)\n\n# Besser, da Dict erkennbar\nfor k in tagebuch3.keys():\n print(k)\n\n\n# Rückgabe von Keys und Values\nfor k, v in tagebuch3.items():\n print(k,' ',v)\n\n# Vgl mit Enumerate\nfor k, v in enumerate(tagebuch3):\n print(k, ' ',v)\n\n# Wenn nur Value benötigt wird\nfor val in tagebuch.values():\n print(val)\n\nfor k, liste in tagebuch2.items():\n for val in liste:\n print(val)","sub_path":"13_Dict_Iteration.py","file_name":"13_Dict_Iteration.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"69"} +{"seq_id":"229074435","text":"from datetime import datetime\nimport os\nimport re\n\nimport pandas as pd\n\nfrom app.forms import blueprint\nfrom flask import jsonify, render_template, redirect, request, url_for\nfrom flask_login import current_user, login_required\nfrom werkzeug.utils import secure_filename\n\nfrom app import db\nfrom app.base.forms import AddUrlForm\nfrom app.base.models import Document, Url, User\n\nimport pandas as pd\nfrom prepo.prepo.scraper import scrap\nfrom prepo.prepo.preprocessor import preprocessing, summarize\nfrom prepo.submodules.kakaotalk_msg_preprocessor import kakaotalk_msg_preprocessor\n\nfrom prepo.submodules.Top2Vec.top2vec import Top2Vec\n\n@blueprint.route('/